github.com/hikaru7719/go@v0.0.0-20181025140707-c8b2ac68906a/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "cmd/internal/obj" 8 import "cmd/internal/objabi" 9 import "cmd/compile/internal/types" 10 11 var _ = math.MinInt8 // in case not otherwise used 12 var _ = obj.ANOP // in case not otherwise used 13 var _ = objabi.GOROOT // in case not otherwise used 14 var _ = types.TypeMem // in case not otherwise used 15 16 func rewriteValueAMD64(v *Value) bool { 17 switch v.Op { 18 case OpAMD64ADDL: 19 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) || rewriteValueAMD64_OpAMD64ADDL_20(v) 20 case OpAMD64ADDLconst: 21 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) || rewriteValueAMD64_OpAMD64ADDLconst_10(v) 22 case OpAMD64ADDLconstmodify: 23 return rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v) 24 case OpAMD64ADDLload: 25 return rewriteValueAMD64_OpAMD64ADDLload_0(v) 26 case OpAMD64ADDLmodify: 27 return rewriteValueAMD64_OpAMD64ADDLmodify_0(v) 28 case OpAMD64ADDQ: 29 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 30 case OpAMD64ADDQconst: 31 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) || rewriteValueAMD64_OpAMD64ADDQconst_10(v) 32 case OpAMD64ADDQconstmodify: 33 return rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v) 34 case OpAMD64ADDQload: 35 return rewriteValueAMD64_OpAMD64ADDQload_0(v) 36 case OpAMD64ADDQmodify: 37 return rewriteValueAMD64_OpAMD64ADDQmodify_0(v) 38 case OpAMD64ADDSD: 39 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 40 case OpAMD64ADDSDload: 41 return rewriteValueAMD64_OpAMD64ADDSDload_0(v) 42 case OpAMD64ADDSS: 43 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 44 case OpAMD64ADDSSload: 45 return rewriteValueAMD64_OpAMD64ADDSSload_0(v) 46 case OpAMD64ANDL: 47 return rewriteValueAMD64_OpAMD64ANDL_0(v) 48 case OpAMD64ANDLconst: 49 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 50 case OpAMD64ANDLconstmodify: 51 return rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v) 52 case OpAMD64ANDLload: 53 return rewriteValueAMD64_OpAMD64ANDLload_0(v) 54 case OpAMD64ANDLmodify: 55 return rewriteValueAMD64_OpAMD64ANDLmodify_0(v) 56 case OpAMD64ANDQ: 57 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 58 case OpAMD64ANDQconst: 59 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 60 case OpAMD64ANDQconstmodify: 61 return rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v) 62 case OpAMD64ANDQload: 63 return rewriteValueAMD64_OpAMD64ANDQload_0(v) 64 case OpAMD64ANDQmodify: 65 return rewriteValueAMD64_OpAMD64ANDQmodify_0(v) 66 case OpAMD64BSFQ: 67 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 68 case OpAMD64BTCLconst: 69 return rewriteValueAMD64_OpAMD64BTCLconst_0(v) 70 case OpAMD64BTCLconstmodify: 71 return rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v) 72 case OpAMD64BTCLmodify: 73 return rewriteValueAMD64_OpAMD64BTCLmodify_0(v) 74 case OpAMD64BTCQconst: 75 return rewriteValueAMD64_OpAMD64BTCQconst_0(v) 76 case OpAMD64BTCQconstmodify: 77 return rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v) 78 case OpAMD64BTCQmodify: 79 return rewriteValueAMD64_OpAMD64BTCQmodify_0(v) 80 case OpAMD64BTLconst: 81 return rewriteValueAMD64_OpAMD64BTLconst_0(v) 82 case OpAMD64BTQconst: 83 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 84 case OpAMD64BTRLconst: 85 return rewriteValueAMD64_OpAMD64BTRLconst_0(v) 86 case OpAMD64BTRLconstmodify: 87 return rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v) 88 case OpAMD64BTRLmodify: 89 return rewriteValueAMD64_OpAMD64BTRLmodify_0(v) 90 case OpAMD64BTRQconst: 91 return rewriteValueAMD64_OpAMD64BTRQconst_0(v) 92 case OpAMD64BTRQconstmodify: 93 return rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v) 94 case OpAMD64BTRQmodify: 95 return rewriteValueAMD64_OpAMD64BTRQmodify_0(v) 96 case OpAMD64BTSLconst: 97 return rewriteValueAMD64_OpAMD64BTSLconst_0(v) 98 case OpAMD64BTSLconstmodify: 99 return rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v) 100 case OpAMD64BTSLmodify: 101 return rewriteValueAMD64_OpAMD64BTSLmodify_0(v) 102 case OpAMD64BTSQconst: 103 return rewriteValueAMD64_OpAMD64BTSQconst_0(v) 104 case OpAMD64BTSQconstmodify: 105 return rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v) 106 case OpAMD64BTSQmodify: 107 return rewriteValueAMD64_OpAMD64BTSQmodify_0(v) 108 case OpAMD64CMOVLCC: 109 return rewriteValueAMD64_OpAMD64CMOVLCC_0(v) 110 case OpAMD64CMOVLCS: 111 return rewriteValueAMD64_OpAMD64CMOVLCS_0(v) 112 case OpAMD64CMOVLEQ: 113 return rewriteValueAMD64_OpAMD64CMOVLEQ_0(v) 114 case OpAMD64CMOVLGE: 115 return rewriteValueAMD64_OpAMD64CMOVLGE_0(v) 116 case OpAMD64CMOVLGT: 117 return rewriteValueAMD64_OpAMD64CMOVLGT_0(v) 118 case OpAMD64CMOVLHI: 119 return rewriteValueAMD64_OpAMD64CMOVLHI_0(v) 120 case OpAMD64CMOVLLE: 121 return rewriteValueAMD64_OpAMD64CMOVLLE_0(v) 122 case OpAMD64CMOVLLS: 123 return rewriteValueAMD64_OpAMD64CMOVLLS_0(v) 124 case OpAMD64CMOVLLT: 125 return rewriteValueAMD64_OpAMD64CMOVLLT_0(v) 126 case OpAMD64CMOVLNE: 127 return rewriteValueAMD64_OpAMD64CMOVLNE_0(v) 128 case OpAMD64CMOVQCC: 129 return rewriteValueAMD64_OpAMD64CMOVQCC_0(v) 130 case OpAMD64CMOVQCS: 131 return rewriteValueAMD64_OpAMD64CMOVQCS_0(v) 132 case OpAMD64CMOVQEQ: 133 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 134 case OpAMD64CMOVQGE: 135 return rewriteValueAMD64_OpAMD64CMOVQGE_0(v) 136 case OpAMD64CMOVQGT: 137 return rewriteValueAMD64_OpAMD64CMOVQGT_0(v) 138 case OpAMD64CMOVQHI: 139 return rewriteValueAMD64_OpAMD64CMOVQHI_0(v) 140 case OpAMD64CMOVQLE: 141 return rewriteValueAMD64_OpAMD64CMOVQLE_0(v) 142 case OpAMD64CMOVQLS: 143 return rewriteValueAMD64_OpAMD64CMOVQLS_0(v) 144 case OpAMD64CMOVQLT: 145 return rewriteValueAMD64_OpAMD64CMOVQLT_0(v) 146 case OpAMD64CMOVQNE: 147 return rewriteValueAMD64_OpAMD64CMOVQNE_0(v) 148 case OpAMD64CMOVWCC: 149 return rewriteValueAMD64_OpAMD64CMOVWCC_0(v) 150 case OpAMD64CMOVWCS: 151 return rewriteValueAMD64_OpAMD64CMOVWCS_0(v) 152 case OpAMD64CMOVWEQ: 153 return rewriteValueAMD64_OpAMD64CMOVWEQ_0(v) 154 case OpAMD64CMOVWGE: 155 return rewriteValueAMD64_OpAMD64CMOVWGE_0(v) 156 case OpAMD64CMOVWGT: 157 return rewriteValueAMD64_OpAMD64CMOVWGT_0(v) 158 case OpAMD64CMOVWHI: 159 return rewriteValueAMD64_OpAMD64CMOVWHI_0(v) 160 case OpAMD64CMOVWLE: 161 return rewriteValueAMD64_OpAMD64CMOVWLE_0(v) 162 case OpAMD64CMOVWLS: 163 return rewriteValueAMD64_OpAMD64CMOVWLS_0(v) 164 case OpAMD64CMOVWLT: 165 return rewriteValueAMD64_OpAMD64CMOVWLT_0(v) 166 case OpAMD64CMOVWNE: 167 return rewriteValueAMD64_OpAMD64CMOVWNE_0(v) 168 case OpAMD64CMPB: 169 return rewriteValueAMD64_OpAMD64CMPB_0(v) 170 case OpAMD64CMPBconst: 171 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 172 case OpAMD64CMPBconstload: 173 return rewriteValueAMD64_OpAMD64CMPBconstload_0(v) 174 case OpAMD64CMPBload: 175 return rewriteValueAMD64_OpAMD64CMPBload_0(v) 176 case OpAMD64CMPL: 177 return rewriteValueAMD64_OpAMD64CMPL_0(v) 178 case OpAMD64CMPLconst: 179 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) || rewriteValueAMD64_OpAMD64CMPLconst_10(v) 180 case OpAMD64CMPLconstload: 181 return rewriteValueAMD64_OpAMD64CMPLconstload_0(v) 182 case OpAMD64CMPLload: 183 return rewriteValueAMD64_OpAMD64CMPLload_0(v) 184 case OpAMD64CMPQ: 185 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 186 case OpAMD64CMPQconst: 187 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 188 case OpAMD64CMPQconstload: 189 return rewriteValueAMD64_OpAMD64CMPQconstload_0(v) 190 case OpAMD64CMPQload: 191 return rewriteValueAMD64_OpAMD64CMPQload_0(v) 192 case OpAMD64CMPW: 193 return rewriteValueAMD64_OpAMD64CMPW_0(v) 194 case OpAMD64CMPWconst: 195 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 196 case OpAMD64CMPWconstload: 197 return rewriteValueAMD64_OpAMD64CMPWconstload_0(v) 198 case OpAMD64CMPWload: 199 return rewriteValueAMD64_OpAMD64CMPWload_0(v) 200 case OpAMD64CMPXCHGLlock: 201 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 202 case OpAMD64CMPXCHGQlock: 203 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 204 case OpAMD64DIVSD: 205 return rewriteValueAMD64_OpAMD64DIVSD_0(v) 206 case OpAMD64DIVSDload: 207 return rewriteValueAMD64_OpAMD64DIVSDload_0(v) 208 case OpAMD64DIVSS: 209 return rewriteValueAMD64_OpAMD64DIVSS_0(v) 210 case OpAMD64DIVSSload: 211 return rewriteValueAMD64_OpAMD64DIVSSload_0(v) 212 case OpAMD64HMULL: 213 return rewriteValueAMD64_OpAMD64HMULL_0(v) 214 case OpAMD64HMULLU: 215 return rewriteValueAMD64_OpAMD64HMULLU_0(v) 216 case OpAMD64HMULQ: 217 return rewriteValueAMD64_OpAMD64HMULQ_0(v) 218 case OpAMD64HMULQU: 219 return rewriteValueAMD64_OpAMD64HMULQU_0(v) 220 case OpAMD64LEAL: 221 return rewriteValueAMD64_OpAMD64LEAL_0(v) 222 case OpAMD64LEAL1: 223 return rewriteValueAMD64_OpAMD64LEAL1_0(v) 224 case OpAMD64LEAL2: 225 return rewriteValueAMD64_OpAMD64LEAL2_0(v) 226 case OpAMD64LEAL4: 227 return rewriteValueAMD64_OpAMD64LEAL4_0(v) 228 case OpAMD64LEAL8: 229 return rewriteValueAMD64_OpAMD64LEAL8_0(v) 230 case OpAMD64LEAQ: 231 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 232 case OpAMD64LEAQ1: 233 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 234 case OpAMD64LEAQ2: 235 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 236 case OpAMD64LEAQ4: 237 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 238 case OpAMD64LEAQ8: 239 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 240 case OpAMD64MOVBQSX: 241 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 242 case OpAMD64MOVBQSXload: 243 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 244 case OpAMD64MOVBQZX: 245 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 246 case OpAMD64MOVBload: 247 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 248 case OpAMD64MOVBloadidx1: 249 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 250 case OpAMD64MOVBstore: 251 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) || rewriteValueAMD64_OpAMD64MOVBstore_30(v) 252 case OpAMD64MOVBstoreconst: 253 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 254 case OpAMD64MOVBstoreconstidx1: 255 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 256 case OpAMD64MOVBstoreidx1: 257 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) || rewriteValueAMD64_OpAMD64MOVBstoreidx1_10(v) 258 case OpAMD64MOVLQSX: 259 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 260 case OpAMD64MOVLQSXload: 261 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 262 case OpAMD64MOVLQZX: 263 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 264 case OpAMD64MOVLatomicload: 265 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 266 case OpAMD64MOVLf2i: 267 return rewriteValueAMD64_OpAMD64MOVLf2i_0(v) 268 case OpAMD64MOVLi2f: 269 return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) 270 case OpAMD64MOVLload: 271 return rewriteValueAMD64_OpAMD64MOVLload_0(v) || rewriteValueAMD64_OpAMD64MOVLload_10(v) 272 case OpAMD64MOVLloadidx1: 273 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 274 case OpAMD64MOVLloadidx4: 275 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 276 case OpAMD64MOVLloadidx8: 277 return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v) 278 case OpAMD64MOVLstore: 279 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) || rewriteValueAMD64_OpAMD64MOVLstore_20(v) || rewriteValueAMD64_OpAMD64MOVLstore_30(v) 280 case OpAMD64MOVLstoreconst: 281 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 282 case OpAMD64MOVLstoreconstidx1: 283 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 284 case OpAMD64MOVLstoreconstidx4: 285 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 286 case OpAMD64MOVLstoreidx1: 287 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 288 case OpAMD64MOVLstoreidx4: 289 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 290 case OpAMD64MOVLstoreidx8: 291 return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v) 292 case OpAMD64MOVOload: 293 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 294 case OpAMD64MOVOstore: 295 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 296 case OpAMD64MOVQatomicload: 297 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 298 case OpAMD64MOVQf2i: 299 return rewriteValueAMD64_OpAMD64MOVQf2i_0(v) 300 case OpAMD64MOVQi2f: 301 return rewriteValueAMD64_OpAMD64MOVQi2f_0(v) 302 case OpAMD64MOVQload: 303 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 304 case OpAMD64MOVQloadidx1: 305 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 306 case OpAMD64MOVQloadidx8: 307 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 308 case OpAMD64MOVQstore: 309 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v) || rewriteValueAMD64_OpAMD64MOVQstore_30(v) 310 case OpAMD64MOVQstoreconst: 311 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 312 case OpAMD64MOVQstoreconstidx1: 313 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 314 case OpAMD64MOVQstoreconstidx8: 315 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 316 case OpAMD64MOVQstoreidx1: 317 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 318 case OpAMD64MOVQstoreidx8: 319 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 320 case OpAMD64MOVSDload: 321 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 322 case OpAMD64MOVSDloadidx1: 323 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 324 case OpAMD64MOVSDloadidx8: 325 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 326 case OpAMD64MOVSDstore: 327 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 328 case OpAMD64MOVSDstoreidx1: 329 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 330 case OpAMD64MOVSDstoreidx8: 331 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 332 case OpAMD64MOVSSload: 333 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 334 case OpAMD64MOVSSloadidx1: 335 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 336 case OpAMD64MOVSSloadidx4: 337 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 338 case OpAMD64MOVSSstore: 339 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 340 case OpAMD64MOVSSstoreidx1: 341 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 342 case OpAMD64MOVSSstoreidx4: 343 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 344 case OpAMD64MOVWQSX: 345 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 346 case OpAMD64MOVWQSXload: 347 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 348 case OpAMD64MOVWQZX: 349 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 350 case OpAMD64MOVWload: 351 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 352 case OpAMD64MOVWloadidx1: 353 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 354 case OpAMD64MOVWloadidx2: 355 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 356 case OpAMD64MOVWstore: 357 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 358 case OpAMD64MOVWstoreconst: 359 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 360 case OpAMD64MOVWstoreconstidx1: 361 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 362 case OpAMD64MOVWstoreconstidx2: 363 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 364 case OpAMD64MOVWstoreidx1: 365 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 366 case OpAMD64MOVWstoreidx2: 367 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 368 case OpAMD64MULL: 369 return rewriteValueAMD64_OpAMD64MULL_0(v) 370 case OpAMD64MULLconst: 371 return rewriteValueAMD64_OpAMD64MULLconst_0(v) || rewriteValueAMD64_OpAMD64MULLconst_10(v) || rewriteValueAMD64_OpAMD64MULLconst_20(v) || rewriteValueAMD64_OpAMD64MULLconst_30(v) 372 case OpAMD64MULQ: 373 return rewriteValueAMD64_OpAMD64MULQ_0(v) 374 case OpAMD64MULQconst: 375 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) || rewriteValueAMD64_OpAMD64MULQconst_30(v) 376 case OpAMD64MULSD: 377 return rewriteValueAMD64_OpAMD64MULSD_0(v) 378 case OpAMD64MULSDload: 379 return rewriteValueAMD64_OpAMD64MULSDload_0(v) 380 case OpAMD64MULSS: 381 return rewriteValueAMD64_OpAMD64MULSS_0(v) 382 case OpAMD64MULSSload: 383 return rewriteValueAMD64_OpAMD64MULSSload_0(v) 384 case OpAMD64NEGL: 385 return rewriteValueAMD64_OpAMD64NEGL_0(v) 386 case OpAMD64NEGQ: 387 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 388 case OpAMD64NOTL: 389 return rewriteValueAMD64_OpAMD64NOTL_0(v) 390 case OpAMD64NOTQ: 391 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 392 case OpAMD64ORL: 393 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 394 case OpAMD64ORLconst: 395 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 396 case OpAMD64ORLconstmodify: 397 return rewriteValueAMD64_OpAMD64ORLconstmodify_0(v) 398 case OpAMD64ORLload: 399 return rewriteValueAMD64_OpAMD64ORLload_0(v) 400 case OpAMD64ORLmodify: 401 return rewriteValueAMD64_OpAMD64ORLmodify_0(v) 402 case OpAMD64ORQ: 403 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 404 case OpAMD64ORQconst: 405 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 406 case OpAMD64ORQconstmodify: 407 return rewriteValueAMD64_OpAMD64ORQconstmodify_0(v) 408 case OpAMD64ORQload: 409 return rewriteValueAMD64_OpAMD64ORQload_0(v) 410 case OpAMD64ORQmodify: 411 return rewriteValueAMD64_OpAMD64ORQmodify_0(v) 412 case OpAMD64ROLB: 413 return rewriteValueAMD64_OpAMD64ROLB_0(v) 414 case OpAMD64ROLBconst: 415 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 416 case OpAMD64ROLL: 417 return rewriteValueAMD64_OpAMD64ROLL_0(v) 418 case OpAMD64ROLLconst: 419 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 420 case OpAMD64ROLQ: 421 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 422 case OpAMD64ROLQconst: 423 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 424 case OpAMD64ROLW: 425 return rewriteValueAMD64_OpAMD64ROLW_0(v) 426 case OpAMD64ROLWconst: 427 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 428 case OpAMD64RORB: 429 return rewriteValueAMD64_OpAMD64RORB_0(v) 430 case OpAMD64RORL: 431 return rewriteValueAMD64_OpAMD64RORL_0(v) 432 case OpAMD64RORQ: 433 return rewriteValueAMD64_OpAMD64RORQ_0(v) 434 case OpAMD64RORW: 435 return rewriteValueAMD64_OpAMD64RORW_0(v) 436 case OpAMD64SARB: 437 return rewriteValueAMD64_OpAMD64SARB_0(v) 438 case OpAMD64SARBconst: 439 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 440 case OpAMD64SARL: 441 return rewriteValueAMD64_OpAMD64SARL_0(v) 442 case OpAMD64SARLconst: 443 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 444 case OpAMD64SARQ: 445 return rewriteValueAMD64_OpAMD64SARQ_0(v) 446 case OpAMD64SARQconst: 447 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 448 case OpAMD64SARW: 449 return rewriteValueAMD64_OpAMD64SARW_0(v) 450 case OpAMD64SARWconst: 451 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 452 case OpAMD64SBBLcarrymask: 453 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 454 case OpAMD64SBBQcarrymask: 455 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 456 case OpAMD64SETA: 457 return rewriteValueAMD64_OpAMD64SETA_0(v) 458 case OpAMD64SETAE: 459 return rewriteValueAMD64_OpAMD64SETAE_0(v) 460 case OpAMD64SETAEstore: 461 return rewriteValueAMD64_OpAMD64SETAEstore_0(v) 462 case OpAMD64SETAstore: 463 return rewriteValueAMD64_OpAMD64SETAstore_0(v) 464 case OpAMD64SETB: 465 return rewriteValueAMD64_OpAMD64SETB_0(v) 466 case OpAMD64SETBE: 467 return rewriteValueAMD64_OpAMD64SETBE_0(v) 468 case OpAMD64SETBEstore: 469 return rewriteValueAMD64_OpAMD64SETBEstore_0(v) 470 case OpAMD64SETBstore: 471 return rewriteValueAMD64_OpAMD64SETBstore_0(v) 472 case OpAMD64SETEQ: 473 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) || rewriteValueAMD64_OpAMD64SETEQ_20(v) 474 case OpAMD64SETEQstore: 475 return rewriteValueAMD64_OpAMD64SETEQstore_0(v) || rewriteValueAMD64_OpAMD64SETEQstore_10(v) || rewriteValueAMD64_OpAMD64SETEQstore_20(v) 476 case OpAMD64SETG: 477 return rewriteValueAMD64_OpAMD64SETG_0(v) 478 case OpAMD64SETGE: 479 return rewriteValueAMD64_OpAMD64SETGE_0(v) 480 case OpAMD64SETGEstore: 481 return rewriteValueAMD64_OpAMD64SETGEstore_0(v) 482 case OpAMD64SETGstore: 483 return rewriteValueAMD64_OpAMD64SETGstore_0(v) 484 case OpAMD64SETL: 485 return rewriteValueAMD64_OpAMD64SETL_0(v) 486 case OpAMD64SETLE: 487 return rewriteValueAMD64_OpAMD64SETLE_0(v) 488 case OpAMD64SETLEstore: 489 return rewriteValueAMD64_OpAMD64SETLEstore_0(v) 490 case OpAMD64SETLstore: 491 return rewriteValueAMD64_OpAMD64SETLstore_0(v) 492 case OpAMD64SETNE: 493 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) || rewriteValueAMD64_OpAMD64SETNE_20(v) 494 case OpAMD64SETNEstore: 495 return rewriteValueAMD64_OpAMD64SETNEstore_0(v) || rewriteValueAMD64_OpAMD64SETNEstore_10(v) || rewriteValueAMD64_OpAMD64SETNEstore_20(v) 496 case OpAMD64SHLL: 497 return rewriteValueAMD64_OpAMD64SHLL_0(v) 498 case OpAMD64SHLLconst: 499 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 500 case OpAMD64SHLQ: 501 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 502 case OpAMD64SHLQconst: 503 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 504 case OpAMD64SHRB: 505 return rewriteValueAMD64_OpAMD64SHRB_0(v) 506 case OpAMD64SHRBconst: 507 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 508 case OpAMD64SHRL: 509 return rewriteValueAMD64_OpAMD64SHRL_0(v) 510 case OpAMD64SHRLconst: 511 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 512 case OpAMD64SHRQ: 513 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 514 case OpAMD64SHRQconst: 515 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 516 case OpAMD64SHRW: 517 return rewriteValueAMD64_OpAMD64SHRW_0(v) 518 case OpAMD64SHRWconst: 519 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 520 case OpAMD64SUBL: 521 return rewriteValueAMD64_OpAMD64SUBL_0(v) 522 case OpAMD64SUBLconst: 523 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 524 case OpAMD64SUBLload: 525 return rewriteValueAMD64_OpAMD64SUBLload_0(v) 526 case OpAMD64SUBLmodify: 527 return rewriteValueAMD64_OpAMD64SUBLmodify_0(v) 528 case OpAMD64SUBQ: 529 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 530 case OpAMD64SUBQconst: 531 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 532 case OpAMD64SUBQload: 533 return rewriteValueAMD64_OpAMD64SUBQload_0(v) 534 case OpAMD64SUBQmodify: 535 return rewriteValueAMD64_OpAMD64SUBQmodify_0(v) 536 case OpAMD64SUBSD: 537 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 538 case OpAMD64SUBSDload: 539 return rewriteValueAMD64_OpAMD64SUBSDload_0(v) 540 case OpAMD64SUBSS: 541 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 542 case OpAMD64SUBSSload: 543 return rewriteValueAMD64_OpAMD64SUBSSload_0(v) 544 case OpAMD64TESTB: 545 return rewriteValueAMD64_OpAMD64TESTB_0(v) 546 case OpAMD64TESTBconst: 547 return rewriteValueAMD64_OpAMD64TESTBconst_0(v) 548 case OpAMD64TESTL: 549 return rewriteValueAMD64_OpAMD64TESTL_0(v) 550 case OpAMD64TESTLconst: 551 return rewriteValueAMD64_OpAMD64TESTLconst_0(v) 552 case OpAMD64TESTQ: 553 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 554 case OpAMD64TESTQconst: 555 return rewriteValueAMD64_OpAMD64TESTQconst_0(v) 556 case OpAMD64TESTW: 557 return rewriteValueAMD64_OpAMD64TESTW_0(v) 558 case OpAMD64TESTWconst: 559 return rewriteValueAMD64_OpAMD64TESTWconst_0(v) 560 case OpAMD64XADDLlock: 561 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 562 case OpAMD64XADDQlock: 563 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 564 case OpAMD64XCHGL: 565 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 566 case OpAMD64XCHGQ: 567 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 568 case OpAMD64XORL: 569 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 570 case OpAMD64XORLconst: 571 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) 572 case OpAMD64XORLconstmodify: 573 return rewriteValueAMD64_OpAMD64XORLconstmodify_0(v) 574 case OpAMD64XORLload: 575 return rewriteValueAMD64_OpAMD64XORLload_0(v) 576 case OpAMD64XORLmodify: 577 return rewriteValueAMD64_OpAMD64XORLmodify_0(v) 578 case OpAMD64XORQ: 579 return rewriteValueAMD64_OpAMD64XORQ_0(v) || rewriteValueAMD64_OpAMD64XORQ_10(v) 580 case OpAMD64XORQconst: 581 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 582 case OpAMD64XORQconstmodify: 583 return rewriteValueAMD64_OpAMD64XORQconstmodify_0(v) 584 case OpAMD64XORQload: 585 return rewriteValueAMD64_OpAMD64XORQload_0(v) 586 case OpAMD64XORQmodify: 587 return rewriteValueAMD64_OpAMD64XORQmodify_0(v) 588 case OpAdd16: 589 return rewriteValueAMD64_OpAdd16_0(v) 590 case OpAdd32: 591 return rewriteValueAMD64_OpAdd32_0(v) 592 case OpAdd32F: 593 return rewriteValueAMD64_OpAdd32F_0(v) 594 case OpAdd64: 595 return rewriteValueAMD64_OpAdd64_0(v) 596 case OpAdd64F: 597 return rewriteValueAMD64_OpAdd64F_0(v) 598 case OpAdd8: 599 return rewriteValueAMD64_OpAdd8_0(v) 600 case OpAddPtr: 601 return rewriteValueAMD64_OpAddPtr_0(v) 602 case OpAddr: 603 return rewriteValueAMD64_OpAddr_0(v) 604 case OpAnd16: 605 return rewriteValueAMD64_OpAnd16_0(v) 606 case OpAnd32: 607 return rewriteValueAMD64_OpAnd32_0(v) 608 case OpAnd64: 609 return rewriteValueAMD64_OpAnd64_0(v) 610 case OpAnd8: 611 return rewriteValueAMD64_OpAnd8_0(v) 612 case OpAndB: 613 return rewriteValueAMD64_OpAndB_0(v) 614 case OpAtomicAdd32: 615 return rewriteValueAMD64_OpAtomicAdd32_0(v) 616 case OpAtomicAdd64: 617 return rewriteValueAMD64_OpAtomicAdd64_0(v) 618 case OpAtomicAnd8: 619 return rewriteValueAMD64_OpAtomicAnd8_0(v) 620 case OpAtomicCompareAndSwap32: 621 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 622 case OpAtomicCompareAndSwap64: 623 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 624 case OpAtomicExchange32: 625 return rewriteValueAMD64_OpAtomicExchange32_0(v) 626 case OpAtomicExchange64: 627 return rewriteValueAMD64_OpAtomicExchange64_0(v) 628 case OpAtomicLoad32: 629 return rewriteValueAMD64_OpAtomicLoad32_0(v) 630 case OpAtomicLoad64: 631 return rewriteValueAMD64_OpAtomicLoad64_0(v) 632 case OpAtomicLoadPtr: 633 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 634 case OpAtomicOr8: 635 return rewriteValueAMD64_OpAtomicOr8_0(v) 636 case OpAtomicStore32: 637 return rewriteValueAMD64_OpAtomicStore32_0(v) 638 case OpAtomicStore64: 639 return rewriteValueAMD64_OpAtomicStore64_0(v) 640 case OpAtomicStorePtrNoWB: 641 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 642 case OpAvg64u: 643 return rewriteValueAMD64_OpAvg64u_0(v) 644 case OpBitLen16: 645 return rewriteValueAMD64_OpBitLen16_0(v) 646 case OpBitLen32: 647 return rewriteValueAMD64_OpBitLen32_0(v) 648 case OpBitLen64: 649 return rewriteValueAMD64_OpBitLen64_0(v) 650 case OpBitLen8: 651 return rewriteValueAMD64_OpBitLen8_0(v) 652 case OpBswap32: 653 return rewriteValueAMD64_OpBswap32_0(v) 654 case OpBswap64: 655 return rewriteValueAMD64_OpBswap64_0(v) 656 case OpCeil: 657 return rewriteValueAMD64_OpCeil_0(v) 658 case OpClosureCall: 659 return rewriteValueAMD64_OpClosureCall_0(v) 660 case OpCom16: 661 return rewriteValueAMD64_OpCom16_0(v) 662 case OpCom32: 663 return rewriteValueAMD64_OpCom32_0(v) 664 case OpCom64: 665 return rewriteValueAMD64_OpCom64_0(v) 666 case OpCom8: 667 return rewriteValueAMD64_OpCom8_0(v) 668 case OpCondSelect: 669 return rewriteValueAMD64_OpCondSelect_0(v) || rewriteValueAMD64_OpCondSelect_10(v) || rewriteValueAMD64_OpCondSelect_20(v) || rewriteValueAMD64_OpCondSelect_30(v) || rewriteValueAMD64_OpCondSelect_40(v) 670 case OpConst16: 671 return rewriteValueAMD64_OpConst16_0(v) 672 case OpConst32: 673 return rewriteValueAMD64_OpConst32_0(v) 674 case OpConst32F: 675 return rewriteValueAMD64_OpConst32F_0(v) 676 case OpConst64: 677 return rewriteValueAMD64_OpConst64_0(v) 678 case OpConst64F: 679 return rewriteValueAMD64_OpConst64F_0(v) 680 case OpConst8: 681 return rewriteValueAMD64_OpConst8_0(v) 682 case OpConstBool: 683 return rewriteValueAMD64_OpConstBool_0(v) 684 case OpConstNil: 685 return rewriteValueAMD64_OpConstNil_0(v) 686 case OpCtz16: 687 return rewriteValueAMD64_OpCtz16_0(v) 688 case OpCtz16NonZero: 689 return rewriteValueAMD64_OpCtz16NonZero_0(v) 690 case OpCtz32: 691 return rewriteValueAMD64_OpCtz32_0(v) 692 case OpCtz32NonZero: 693 return rewriteValueAMD64_OpCtz32NonZero_0(v) 694 case OpCtz64: 695 return rewriteValueAMD64_OpCtz64_0(v) 696 case OpCtz64NonZero: 697 return rewriteValueAMD64_OpCtz64NonZero_0(v) 698 case OpCtz8: 699 return rewriteValueAMD64_OpCtz8_0(v) 700 case OpCtz8NonZero: 701 return rewriteValueAMD64_OpCtz8NonZero_0(v) 702 case OpCvt32Fto32: 703 return rewriteValueAMD64_OpCvt32Fto32_0(v) 704 case OpCvt32Fto64: 705 return rewriteValueAMD64_OpCvt32Fto64_0(v) 706 case OpCvt32Fto64F: 707 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 708 case OpCvt32to32F: 709 return rewriteValueAMD64_OpCvt32to32F_0(v) 710 case OpCvt32to64F: 711 return rewriteValueAMD64_OpCvt32to64F_0(v) 712 case OpCvt64Fto32: 713 return rewriteValueAMD64_OpCvt64Fto32_0(v) 714 case OpCvt64Fto32F: 715 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 716 case OpCvt64Fto64: 717 return rewriteValueAMD64_OpCvt64Fto64_0(v) 718 case OpCvt64to32F: 719 return rewriteValueAMD64_OpCvt64to32F_0(v) 720 case OpCvt64to64F: 721 return rewriteValueAMD64_OpCvt64to64F_0(v) 722 case OpDiv128u: 723 return rewriteValueAMD64_OpDiv128u_0(v) 724 case OpDiv16: 725 return rewriteValueAMD64_OpDiv16_0(v) 726 case OpDiv16u: 727 return rewriteValueAMD64_OpDiv16u_0(v) 728 case OpDiv32: 729 return rewriteValueAMD64_OpDiv32_0(v) 730 case OpDiv32F: 731 return rewriteValueAMD64_OpDiv32F_0(v) 732 case OpDiv32u: 733 return rewriteValueAMD64_OpDiv32u_0(v) 734 case OpDiv64: 735 return rewriteValueAMD64_OpDiv64_0(v) 736 case OpDiv64F: 737 return rewriteValueAMD64_OpDiv64F_0(v) 738 case OpDiv64u: 739 return rewriteValueAMD64_OpDiv64u_0(v) 740 case OpDiv8: 741 return rewriteValueAMD64_OpDiv8_0(v) 742 case OpDiv8u: 743 return rewriteValueAMD64_OpDiv8u_0(v) 744 case OpEq16: 745 return rewriteValueAMD64_OpEq16_0(v) 746 case OpEq32: 747 return rewriteValueAMD64_OpEq32_0(v) 748 case OpEq32F: 749 return rewriteValueAMD64_OpEq32F_0(v) 750 case OpEq64: 751 return rewriteValueAMD64_OpEq64_0(v) 752 case OpEq64F: 753 return rewriteValueAMD64_OpEq64F_0(v) 754 case OpEq8: 755 return rewriteValueAMD64_OpEq8_0(v) 756 case OpEqB: 757 return rewriteValueAMD64_OpEqB_0(v) 758 case OpEqPtr: 759 return rewriteValueAMD64_OpEqPtr_0(v) 760 case OpFloor: 761 return rewriteValueAMD64_OpFloor_0(v) 762 case OpGeq16: 763 return rewriteValueAMD64_OpGeq16_0(v) 764 case OpGeq16U: 765 return rewriteValueAMD64_OpGeq16U_0(v) 766 case OpGeq32: 767 return rewriteValueAMD64_OpGeq32_0(v) 768 case OpGeq32F: 769 return rewriteValueAMD64_OpGeq32F_0(v) 770 case OpGeq32U: 771 return rewriteValueAMD64_OpGeq32U_0(v) 772 case OpGeq64: 773 return rewriteValueAMD64_OpGeq64_0(v) 774 case OpGeq64F: 775 return rewriteValueAMD64_OpGeq64F_0(v) 776 case OpGeq64U: 777 return rewriteValueAMD64_OpGeq64U_0(v) 778 case OpGeq8: 779 return rewriteValueAMD64_OpGeq8_0(v) 780 case OpGeq8U: 781 return rewriteValueAMD64_OpGeq8U_0(v) 782 case OpGetCallerPC: 783 return rewriteValueAMD64_OpGetCallerPC_0(v) 784 case OpGetCallerSP: 785 return rewriteValueAMD64_OpGetCallerSP_0(v) 786 case OpGetClosurePtr: 787 return rewriteValueAMD64_OpGetClosurePtr_0(v) 788 case OpGetG: 789 return rewriteValueAMD64_OpGetG_0(v) 790 case OpGreater16: 791 return rewriteValueAMD64_OpGreater16_0(v) 792 case OpGreater16U: 793 return rewriteValueAMD64_OpGreater16U_0(v) 794 case OpGreater32: 795 return rewriteValueAMD64_OpGreater32_0(v) 796 case OpGreater32F: 797 return rewriteValueAMD64_OpGreater32F_0(v) 798 case OpGreater32U: 799 return rewriteValueAMD64_OpGreater32U_0(v) 800 case OpGreater64: 801 return rewriteValueAMD64_OpGreater64_0(v) 802 case OpGreater64F: 803 return rewriteValueAMD64_OpGreater64F_0(v) 804 case OpGreater64U: 805 return rewriteValueAMD64_OpGreater64U_0(v) 806 case OpGreater8: 807 return rewriteValueAMD64_OpGreater8_0(v) 808 case OpGreater8U: 809 return rewriteValueAMD64_OpGreater8U_0(v) 810 case OpHmul32: 811 return rewriteValueAMD64_OpHmul32_0(v) 812 case OpHmul32u: 813 return rewriteValueAMD64_OpHmul32u_0(v) 814 case OpHmul64: 815 return rewriteValueAMD64_OpHmul64_0(v) 816 case OpHmul64u: 817 return rewriteValueAMD64_OpHmul64u_0(v) 818 case OpInt64Hi: 819 return rewriteValueAMD64_OpInt64Hi_0(v) 820 case OpInterCall: 821 return rewriteValueAMD64_OpInterCall_0(v) 822 case OpIsInBounds: 823 return rewriteValueAMD64_OpIsInBounds_0(v) 824 case OpIsNonNil: 825 return rewriteValueAMD64_OpIsNonNil_0(v) 826 case OpIsSliceInBounds: 827 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 828 case OpLeq16: 829 return rewriteValueAMD64_OpLeq16_0(v) 830 case OpLeq16U: 831 return rewriteValueAMD64_OpLeq16U_0(v) 832 case OpLeq32: 833 return rewriteValueAMD64_OpLeq32_0(v) 834 case OpLeq32F: 835 return rewriteValueAMD64_OpLeq32F_0(v) 836 case OpLeq32U: 837 return rewriteValueAMD64_OpLeq32U_0(v) 838 case OpLeq64: 839 return rewriteValueAMD64_OpLeq64_0(v) 840 case OpLeq64F: 841 return rewriteValueAMD64_OpLeq64F_0(v) 842 case OpLeq64U: 843 return rewriteValueAMD64_OpLeq64U_0(v) 844 case OpLeq8: 845 return rewriteValueAMD64_OpLeq8_0(v) 846 case OpLeq8U: 847 return rewriteValueAMD64_OpLeq8U_0(v) 848 case OpLess16: 849 return rewriteValueAMD64_OpLess16_0(v) 850 case OpLess16U: 851 return rewriteValueAMD64_OpLess16U_0(v) 852 case OpLess32: 853 return rewriteValueAMD64_OpLess32_0(v) 854 case OpLess32F: 855 return rewriteValueAMD64_OpLess32F_0(v) 856 case OpLess32U: 857 return rewriteValueAMD64_OpLess32U_0(v) 858 case OpLess64: 859 return rewriteValueAMD64_OpLess64_0(v) 860 case OpLess64F: 861 return rewriteValueAMD64_OpLess64F_0(v) 862 case OpLess64U: 863 return rewriteValueAMD64_OpLess64U_0(v) 864 case OpLess8: 865 return rewriteValueAMD64_OpLess8_0(v) 866 case OpLess8U: 867 return rewriteValueAMD64_OpLess8U_0(v) 868 case OpLoad: 869 return rewriteValueAMD64_OpLoad_0(v) 870 case OpLocalAddr: 871 return rewriteValueAMD64_OpLocalAddr_0(v) 872 case OpLsh16x16: 873 return rewriteValueAMD64_OpLsh16x16_0(v) 874 case OpLsh16x32: 875 return rewriteValueAMD64_OpLsh16x32_0(v) 876 case OpLsh16x64: 877 return rewriteValueAMD64_OpLsh16x64_0(v) 878 case OpLsh16x8: 879 return rewriteValueAMD64_OpLsh16x8_0(v) 880 case OpLsh32x16: 881 return rewriteValueAMD64_OpLsh32x16_0(v) 882 case OpLsh32x32: 883 return rewriteValueAMD64_OpLsh32x32_0(v) 884 case OpLsh32x64: 885 return rewriteValueAMD64_OpLsh32x64_0(v) 886 case OpLsh32x8: 887 return rewriteValueAMD64_OpLsh32x8_0(v) 888 case OpLsh64x16: 889 return rewriteValueAMD64_OpLsh64x16_0(v) 890 case OpLsh64x32: 891 return rewriteValueAMD64_OpLsh64x32_0(v) 892 case OpLsh64x64: 893 return rewriteValueAMD64_OpLsh64x64_0(v) 894 case OpLsh64x8: 895 return rewriteValueAMD64_OpLsh64x8_0(v) 896 case OpLsh8x16: 897 return rewriteValueAMD64_OpLsh8x16_0(v) 898 case OpLsh8x32: 899 return rewriteValueAMD64_OpLsh8x32_0(v) 900 case OpLsh8x64: 901 return rewriteValueAMD64_OpLsh8x64_0(v) 902 case OpLsh8x8: 903 return rewriteValueAMD64_OpLsh8x8_0(v) 904 case OpMod16: 905 return rewriteValueAMD64_OpMod16_0(v) 906 case OpMod16u: 907 return rewriteValueAMD64_OpMod16u_0(v) 908 case OpMod32: 909 return rewriteValueAMD64_OpMod32_0(v) 910 case OpMod32u: 911 return rewriteValueAMD64_OpMod32u_0(v) 912 case OpMod64: 913 return rewriteValueAMD64_OpMod64_0(v) 914 case OpMod64u: 915 return rewriteValueAMD64_OpMod64u_0(v) 916 case OpMod8: 917 return rewriteValueAMD64_OpMod8_0(v) 918 case OpMod8u: 919 return rewriteValueAMD64_OpMod8u_0(v) 920 case OpMove: 921 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) 922 case OpMul16: 923 return rewriteValueAMD64_OpMul16_0(v) 924 case OpMul32: 925 return rewriteValueAMD64_OpMul32_0(v) 926 case OpMul32F: 927 return rewriteValueAMD64_OpMul32F_0(v) 928 case OpMul64: 929 return rewriteValueAMD64_OpMul64_0(v) 930 case OpMul64F: 931 return rewriteValueAMD64_OpMul64F_0(v) 932 case OpMul64uhilo: 933 return rewriteValueAMD64_OpMul64uhilo_0(v) 934 case OpMul8: 935 return rewriteValueAMD64_OpMul8_0(v) 936 case OpNeg16: 937 return rewriteValueAMD64_OpNeg16_0(v) 938 case OpNeg32: 939 return rewriteValueAMD64_OpNeg32_0(v) 940 case OpNeg32F: 941 return rewriteValueAMD64_OpNeg32F_0(v) 942 case OpNeg64: 943 return rewriteValueAMD64_OpNeg64_0(v) 944 case OpNeg64F: 945 return rewriteValueAMD64_OpNeg64F_0(v) 946 case OpNeg8: 947 return rewriteValueAMD64_OpNeg8_0(v) 948 case OpNeq16: 949 return rewriteValueAMD64_OpNeq16_0(v) 950 case OpNeq32: 951 return rewriteValueAMD64_OpNeq32_0(v) 952 case OpNeq32F: 953 return rewriteValueAMD64_OpNeq32F_0(v) 954 case OpNeq64: 955 return rewriteValueAMD64_OpNeq64_0(v) 956 case OpNeq64F: 957 return rewriteValueAMD64_OpNeq64F_0(v) 958 case OpNeq8: 959 return rewriteValueAMD64_OpNeq8_0(v) 960 case OpNeqB: 961 return rewriteValueAMD64_OpNeqB_0(v) 962 case OpNeqPtr: 963 return rewriteValueAMD64_OpNeqPtr_0(v) 964 case OpNilCheck: 965 return rewriteValueAMD64_OpNilCheck_0(v) 966 case OpNot: 967 return rewriteValueAMD64_OpNot_0(v) 968 case OpOffPtr: 969 return rewriteValueAMD64_OpOffPtr_0(v) 970 case OpOr16: 971 return rewriteValueAMD64_OpOr16_0(v) 972 case OpOr32: 973 return rewriteValueAMD64_OpOr32_0(v) 974 case OpOr64: 975 return rewriteValueAMD64_OpOr64_0(v) 976 case OpOr8: 977 return rewriteValueAMD64_OpOr8_0(v) 978 case OpOrB: 979 return rewriteValueAMD64_OpOrB_0(v) 980 case OpPopCount16: 981 return rewriteValueAMD64_OpPopCount16_0(v) 982 case OpPopCount32: 983 return rewriteValueAMD64_OpPopCount32_0(v) 984 case OpPopCount64: 985 return rewriteValueAMD64_OpPopCount64_0(v) 986 case OpPopCount8: 987 return rewriteValueAMD64_OpPopCount8_0(v) 988 case OpRotateLeft16: 989 return rewriteValueAMD64_OpRotateLeft16_0(v) 990 case OpRotateLeft32: 991 return rewriteValueAMD64_OpRotateLeft32_0(v) 992 case OpRotateLeft64: 993 return rewriteValueAMD64_OpRotateLeft64_0(v) 994 case OpRotateLeft8: 995 return rewriteValueAMD64_OpRotateLeft8_0(v) 996 case OpRound32F: 997 return rewriteValueAMD64_OpRound32F_0(v) 998 case OpRound64F: 999 return rewriteValueAMD64_OpRound64F_0(v) 1000 case OpRoundToEven: 1001 return rewriteValueAMD64_OpRoundToEven_0(v) 1002 case OpRsh16Ux16: 1003 return rewriteValueAMD64_OpRsh16Ux16_0(v) 1004 case OpRsh16Ux32: 1005 return rewriteValueAMD64_OpRsh16Ux32_0(v) 1006 case OpRsh16Ux64: 1007 return rewriteValueAMD64_OpRsh16Ux64_0(v) 1008 case OpRsh16Ux8: 1009 return rewriteValueAMD64_OpRsh16Ux8_0(v) 1010 case OpRsh16x16: 1011 return rewriteValueAMD64_OpRsh16x16_0(v) 1012 case OpRsh16x32: 1013 return rewriteValueAMD64_OpRsh16x32_0(v) 1014 case OpRsh16x64: 1015 return rewriteValueAMD64_OpRsh16x64_0(v) 1016 case OpRsh16x8: 1017 return rewriteValueAMD64_OpRsh16x8_0(v) 1018 case OpRsh32Ux16: 1019 return rewriteValueAMD64_OpRsh32Ux16_0(v) 1020 case OpRsh32Ux32: 1021 return rewriteValueAMD64_OpRsh32Ux32_0(v) 1022 case OpRsh32Ux64: 1023 return rewriteValueAMD64_OpRsh32Ux64_0(v) 1024 case OpRsh32Ux8: 1025 return rewriteValueAMD64_OpRsh32Ux8_0(v) 1026 case OpRsh32x16: 1027 return rewriteValueAMD64_OpRsh32x16_0(v) 1028 case OpRsh32x32: 1029 return rewriteValueAMD64_OpRsh32x32_0(v) 1030 case OpRsh32x64: 1031 return rewriteValueAMD64_OpRsh32x64_0(v) 1032 case OpRsh32x8: 1033 return rewriteValueAMD64_OpRsh32x8_0(v) 1034 case OpRsh64Ux16: 1035 return rewriteValueAMD64_OpRsh64Ux16_0(v) 1036 case OpRsh64Ux32: 1037 return rewriteValueAMD64_OpRsh64Ux32_0(v) 1038 case OpRsh64Ux64: 1039 return rewriteValueAMD64_OpRsh64Ux64_0(v) 1040 case OpRsh64Ux8: 1041 return rewriteValueAMD64_OpRsh64Ux8_0(v) 1042 case OpRsh64x16: 1043 return rewriteValueAMD64_OpRsh64x16_0(v) 1044 case OpRsh64x32: 1045 return rewriteValueAMD64_OpRsh64x32_0(v) 1046 case OpRsh64x64: 1047 return rewriteValueAMD64_OpRsh64x64_0(v) 1048 case OpRsh64x8: 1049 return rewriteValueAMD64_OpRsh64x8_0(v) 1050 case OpRsh8Ux16: 1051 return rewriteValueAMD64_OpRsh8Ux16_0(v) 1052 case OpRsh8Ux32: 1053 return rewriteValueAMD64_OpRsh8Ux32_0(v) 1054 case OpRsh8Ux64: 1055 return rewriteValueAMD64_OpRsh8Ux64_0(v) 1056 case OpRsh8Ux8: 1057 return rewriteValueAMD64_OpRsh8Ux8_0(v) 1058 case OpRsh8x16: 1059 return rewriteValueAMD64_OpRsh8x16_0(v) 1060 case OpRsh8x32: 1061 return rewriteValueAMD64_OpRsh8x32_0(v) 1062 case OpRsh8x64: 1063 return rewriteValueAMD64_OpRsh8x64_0(v) 1064 case OpRsh8x8: 1065 return rewriteValueAMD64_OpRsh8x8_0(v) 1066 case OpSelect0: 1067 return rewriteValueAMD64_OpSelect0_0(v) 1068 case OpSelect1: 1069 return rewriteValueAMD64_OpSelect1_0(v) 1070 case OpSignExt16to32: 1071 return rewriteValueAMD64_OpSignExt16to32_0(v) 1072 case OpSignExt16to64: 1073 return rewriteValueAMD64_OpSignExt16to64_0(v) 1074 case OpSignExt32to64: 1075 return rewriteValueAMD64_OpSignExt32to64_0(v) 1076 case OpSignExt8to16: 1077 return rewriteValueAMD64_OpSignExt8to16_0(v) 1078 case OpSignExt8to32: 1079 return rewriteValueAMD64_OpSignExt8to32_0(v) 1080 case OpSignExt8to64: 1081 return rewriteValueAMD64_OpSignExt8to64_0(v) 1082 case OpSlicemask: 1083 return rewriteValueAMD64_OpSlicemask_0(v) 1084 case OpSqrt: 1085 return rewriteValueAMD64_OpSqrt_0(v) 1086 case OpStaticCall: 1087 return rewriteValueAMD64_OpStaticCall_0(v) 1088 case OpStore: 1089 return rewriteValueAMD64_OpStore_0(v) 1090 case OpSub16: 1091 return rewriteValueAMD64_OpSub16_0(v) 1092 case OpSub32: 1093 return rewriteValueAMD64_OpSub32_0(v) 1094 case OpSub32F: 1095 return rewriteValueAMD64_OpSub32F_0(v) 1096 case OpSub64: 1097 return rewriteValueAMD64_OpSub64_0(v) 1098 case OpSub64F: 1099 return rewriteValueAMD64_OpSub64F_0(v) 1100 case OpSub8: 1101 return rewriteValueAMD64_OpSub8_0(v) 1102 case OpSubPtr: 1103 return rewriteValueAMD64_OpSubPtr_0(v) 1104 case OpTrunc: 1105 return rewriteValueAMD64_OpTrunc_0(v) 1106 case OpTrunc16to8: 1107 return rewriteValueAMD64_OpTrunc16to8_0(v) 1108 case OpTrunc32to16: 1109 return rewriteValueAMD64_OpTrunc32to16_0(v) 1110 case OpTrunc32to8: 1111 return rewriteValueAMD64_OpTrunc32to8_0(v) 1112 case OpTrunc64to16: 1113 return rewriteValueAMD64_OpTrunc64to16_0(v) 1114 case OpTrunc64to32: 1115 return rewriteValueAMD64_OpTrunc64to32_0(v) 1116 case OpTrunc64to8: 1117 return rewriteValueAMD64_OpTrunc64to8_0(v) 1118 case OpWB: 1119 return rewriteValueAMD64_OpWB_0(v) 1120 case OpXor16: 1121 return rewriteValueAMD64_OpXor16_0(v) 1122 case OpXor32: 1123 return rewriteValueAMD64_OpXor32_0(v) 1124 case OpXor64: 1125 return rewriteValueAMD64_OpXor64_0(v) 1126 case OpXor8: 1127 return rewriteValueAMD64_OpXor8_0(v) 1128 case OpZero: 1129 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v) 1130 case OpZeroExt16to32: 1131 return rewriteValueAMD64_OpZeroExt16to32_0(v) 1132 case OpZeroExt16to64: 1133 return rewriteValueAMD64_OpZeroExt16to64_0(v) 1134 case OpZeroExt32to64: 1135 return rewriteValueAMD64_OpZeroExt32to64_0(v) 1136 case OpZeroExt8to16: 1137 return rewriteValueAMD64_OpZeroExt8to16_0(v) 1138 case OpZeroExt8to32: 1139 return rewriteValueAMD64_OpZeroExt8to32_0(v) 1140 case OpZeroExt8to64: 1141 return rewriteValueAMD64_OpZeroExt8to64_0(v) 1142 } 1143 return false 1144 } 1145 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 1146 // match: (ADDL x (MOVLconst [c])) 1147 // cond: 1148 // result: (ADDLconst [c] x) 1149 for { 1150 _ = v.Args[1] 1151 x := v.Args[0] 1152 v_1 := v.Args[1] 1153 if v_1.Op != OpAMD64MOVLconst { 1154 break 1155 } 1156 c := v_1.AuxInt 1157 v.reset(OpAMD64ADDLconst) 1158 v.AuxInt = c 1159 v.AddArg(x) 1160 return true 1161 } 1162 // match: (ADDL (MOVLconst [c]) x) 1163 // cond: 1164 // result: (ADDLconst [c] x) 1165 for { 1166 _ = v.Args[1] 1167 v_0 := v.Args[0] 1168 if v_0.Op != OpAMD64MOVLconst { 1169 break 1170 } 1171 c := v_0.AuxInt 1172 x := v.Args[1] 1173 v.reset(OpAMD64ADDLconst) 1174 v.AuxInt = c 1175 v.AddArg(x) 1176 return true 1177 } 1178 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 1179 // cond: d==32-c 1180 // result: (ROLLconst x [c]) 1181 for { 1182 _ = v.Args[1] 1183 v_0 := v.Args[0] 1184 if v_0.Op != OpAMD64SHLLconst { 1185 break 1186 } 1187 c := v_0.AuxInt 1188 x := v_0.Args[0] 1189 v_1 := v.Args[1] 1190 if v_1.Op != OpAMD64SHRLconst { 1191 break 1192 } 1193 d := v_1.AuxInt 1194 if x != v_1.Args[0] { 1195 break 1196 } 1197 if !(d == 32-c) { 1198 break 1199 } 1200 v.reset(OpAMD64ROLLconst) 1201 v.AuxInt = c 1202 v.AddArg(x) 1203 return true 1204 } 1205 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 1206 // cond: d==32-c 1207 // result: (ROLLconst x [c]) 1208 for { 1209 _ = v.Args[1] 1210 v_0 := v.Args[0] 1211 if v_0.Op != OpAMD64SHRLconst { 1212 break 1213 } 1214 d := v_0.AuxInt 1215 x := v_0.Args[0] 1216 v_1 := v.Args[1] 1217 if v_1.Op != OpAMD64SHLLconst { 1218 break 1219 } 1220 c := v_1.AuxInt 1221 if x != v_1.Args[0] { 1222 break 1223 } 1224 if !(d == 32-c) { 1225 break 1226 } 1227 v.reset(OpAMD64ROLLconst) 1228 v.AuxInt = c 1229 v.AddArg(x) 1230 return true 1231 } 1232 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 1233 // cond: d==16-c && c < 16 && t.Size() == 2 1234 // result: (ROLWconst x [c]) 1235 for { 1236 t := v.Type 1237 _ = v.Args[1] 1238 v_0 := v.Args[0] 1239 if v_0.Op != OpAMD64SHLLconst { 1240 break 1241 } 1242 c := v_0.AuxInt 1243 x := v_0.Args[0] 1244 v_1 := v.Args[1] 1245 if v_1.Op != OpAMD64SHRWconst { 1246 break 1247 } 1248 d := v_1.AuxInt 1249 if x != v_1.Args[0] { 1250 break 1251 } 1252 if !(d == 16-c && c < 16 && t.Size() == 2) { 1253 break 1254 } 1255 v.reset(OpAMD64ROLWconst) 1256 v.AuxInt = c 1257 v.AddArg(x) 1258 return true 1259 } 1260 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 1261 // cond: d==16-c && c < 16 && t.Size() == 2 1262 // result: (ROLWconst x [c]) 1263 for { 1264 t := v.Type 1265 _ = v.Args[1] 1266 v_0 := v.Args[0] 1267 if v_0.Op != OpAMD64SHRWconst { 1268 break 1269 } 1270 d := v_0.AuxInt 1271 x := v_0.Args[0] 1272 v_1 := v.Args[1] 1273 if v_1.Op != OpAMD64SHLLconst { 1274 break 1275 } 1276 c := v_1.AuxInt 1277 if x != v_1.Args[0] { 1278 break 1279 } 1280 if !(d == 16-c && c < 16 && t.Size() == 2) { 1281 break 1282 } 1283 v.reset(OpAMD64ROLWconst) 1284 v.AuxInt = c 1285 v.AddArg(x) 1286 return true 1287 } 1288 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 1289 // cond: d==8-c && c < 8 && t.Size() == 1 1290 // result: (ROLBconst x [c]) 1291 for { 1292 t := v.Type 1293 _ = v.Args[1] 1294 v_0 := v.Args[0] 1295 if v_0.Op != OpAMD64SHLLconst { 1296 break 1297 } 1298 c := v_0.AuxInt 1299 x := v_0.Args[0] 1300 v_1 := v.Args[1] 1301 if v_1.Op != OpAMD64SHRBconst { 1302 break 1303 } 1304 d := v_1.AuxInt 1305 if x != v_1.Args[0] { 1306 break 1307 } 1308 if !(d == 8-c && c < 8 && t.Size() == 1) { 1309 break 1310 } 1311 v.reset(OpAMD64ROLBconst) 1312 v.AuxInt = c 1313 v.AddArg(x) 1314 return true 1315 } 1316 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1317 // cond: d==8-c && c < 8 && t.Size() == 1 1318 // result: (ROLBconst x [c]) 1319 for { 1320 t := v.Type 1321 _ = v.Args[1] 1322 v_0 := v.Args[0] 1323 if v_0.Op != OpAMD64SHRBconst { 1324 break 1325 } 1326 d := v_0.AuxInt 1327 x := v_0.Args[0] 1328 v_1 := v.Args[1] 1329 if v_1.Op != OpAMD64SHLLconst { 1330 break 1331 } 1332 c := v_1.AuxInt 1333 if x != v_1.Args[0] { 1334 break 1335 } 1336 if !(d == 8-c && c < 8 && t.Size() == 1) { 1337 break 1338 } 1339 v.reset(OpAMD64ROLBconst) 1340 v.AuxInt = c 1341 v.AddArg(x) 1342 return true 1343 } 1344 // match: (ADDL x (SHLLconst [3] y)) 1345 // cond: 1346 // result: (LEAL8 x y) 1347 for { 1348 _ = v.Args[1] 1349 x := v.Args[0] 1350 v_1 := v.Args[1] 1351 if v_1.Op != OpAMD64SHLLconst { 1352 break 1353 } 1354 if v_1.AuxInt != 3 { 1355 break 1356 } 1357 y := v_1.Args[0] 1358 v.reset(OpAMD64LEAL8) 1359 v.AddArg(x) 1360 v.AddArg(y) 1361 return true 1362 } 1363 // match: (ADDL (SHLLconst [3] y) x) 1364 // cond: 1365 // result: (LEAL8 x y) 1366 for { 1367 _ = v.Args[1] 1368 v_0 := v.Args[0] 1369 if v_0.Op != OpAMD64SHLLconst { 1370 break 1371 } 1372 if v_0.AuxInt != 3 { 1373 break 1374 } 1375 y := v_0.Args[0] 1376 x := v.Args[1] 1377 v.reset(OpAMD64LEAL8) 1378 v.AddArg(x) 1379 v.AddArg(y) 1380 return true 1381 } 1382 return false 1383 } 1384 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1385 // match: (ADDL x (SHLLconst [2] y)) 1386 // cond: 1387 // result: (LEAL4 x y) 1388 for { 1389 _ = v.Args[1] 1390 x := v.Args[0] 1391 v_1 := v.Args[1] 1392 if v_1.Op != OpAMD64SHLLconst { 1393 break 1394 } 1395 if v_1.AuxInt != 2 { 1396 break 1397 } 1398 y := v_1.Args[0] 1399 v.reset(OpAMD64LEAL4) 1400 v.AddArg(x) 1401 v.AddArg(y) 1402 return true 1403 } 1404 // match: (ADDL (SHLLconst [2] y) x) 1405 // cond: 1406 // result: (LEAL4 x y) 1407 for { 1408 _ = v.Args[1] 1409 v_0 := v.Args[0] 1410 if v_0.Op != OpAMD64SHLLconst { 1411 break 1412 } 1413 if v_0.AuxInt != 2 { 1414 break 1415 } 1416 y := v_0.Args[0] 1417 x := v.Args[1] 1418 v.reset(OpAMD64LEAL4) 1419 v.AddArg(x) 1420 v.AddArg(y) 1421 return true 1422 } 1423 // match: (ADDL x (SHLLconst [1] y)) 1424 // cond: 1425 // result: (LEAL2 x y) 1426 for { 1427 _ = v.Args[1] 1428 x := v.Args[0] 1429 v_1 := v.Args[1] 1430 if v_1.Op != OpAMD64SHLLconst { 1431 break 1432 } 1433 if v_1.AuxInt != 1 { 1434 break 1435 } 1436 y := v_1.Args[0] 1437 v.reset(OpAMD64LEAL2) 1438 v.AddArg(x) 1439 v.AddArg(y) 1440 return true 1441 } 1442 // match: (ADDL (SHLLconst [1] y) x) 1443 // cond: 1444 // result: (LEAL2 x y) 1445 for { 1446 _ = v.Args[1] 1447 v_0 := v.Args[0] 1448 if v_0.Op != OpAMD64SHLLconst { 1449 break 1450 } 1451 if v_0.AuxInt != 1 { 1452 break 1453 } 1454 y := v_0.Args[0] 1455 x := v.Args[1] 1456 v.reset(OpAMD64LEAL2) 1457 v.AddArg(x) 1458 v.AddArg(y) 1459 return true 1460 } 1461 // match: (ADDL x (ADDL y y)) 1462 // cond: 1463 // result: (LEAL2 x y) 1464 for { 1465 _ = v.Args[1] 1466 x := v.Args[0] 1467 v_1 := v.Args[1] 1468 if v_1.Op != OpAMD64ADDL { 1469 break 1470 } 1471 _ = v_1.Args[1] 1472 y := v_1.Args[0] 1473 if y != v_1.Args[1] { 1474 break 1475 } 1476 v.reset(OpAMD64LEAL2) 1477 v.AddArg(x) 1478 v.AddArg(y) 1479 return true 1480 } 1481 // match: (ADDL (ADDL y y) x) 1482 // cond: 1483 // result: (LEAL2 x y) 1484 for { 1485 _ = v.Args[1] 1486 v_0 := v.Args[0] 1487 if v_0.Op != OpAMD64ADDL { 1488 break 1489 } 1490 _ = v_0.Args[1] 1491 y := v_0.Args[0] 1492 if y != v_0.Args[1] { 1493 break 1494 } 1495 x := v.Args[1] 1496 v.reset(OpAMD64LEAL2) 1497 v.AddArg(x) 1498 v.AddArg(y) 1499 return true 1500 } 1501 // match: (ADDL x (ADDL x y)) 1502 // cond: 1503 // result: (LEAL2 y x) 1504 for { 1505 _ = v.Args[1] 1506 x := v.Args[0] 1507 v_1 := v.Args[1] 1508 if v_1.Op != OpAMD64ADDL { 1509 break 1510 } 1511 _ = v_1.Args[1] 1512 if x != v_1.Args[0] { 1513 break 1514 } 1515 y := v_1.Args[1] 1516 v.reset(OpAMD64LEAL2) 1517 v.AddArg(y) 1518 v.AddArg(x) 1519 return true 1520 } 1521 // match: (ADDL x (ADDL y x)) 1522 // cond: 1523 // result: (LEAL2 y x) 1524 for { 1525 _ = v.Args[1] 1526 x := v.Args[0] 1527 v_1 := v.Args[1] 1528 if v_1.Op != OpAMD64ADDL { 1529 break 1530 } 1531 _ = v_1.Args[1] 1532 y := v_1.Args[0] 1533 if x != v_1.Args[1] { 1534 break 1535 } 1536 v.reset(OpAMD64LEAL2) 1537 v.AddArg(y) 1538 v.AddArg(x) 1539 return true 1540 } 1541 // match: (ADDL (ADDL x y) x) 1542 // cond: 1543 // result: (LEAL2 y x) 1544 for { 1545 _ = v.Args[1] 1546 v_0 := v.Args[0] 1547 if v_0.Op != OpAMD64ADDL { 1548 break 1549 } 1550 _ = v_0.Args[1] 1551 x := v_0.Args[0] 1552 y := v_0.Args[1] 1553 if x != v.Args[1] { 1554 break 1555 } 1556 v.reset(OpAMD64LEAL2) 1557 v.AddArg(y) 1558 v.AddArg(x) 1559 return true 1560 } 1561 // match: (ADDL (ADDL y x) x) 1562 // cond: 1563 // result: (LEAL2 y x) 1564 for { 1565 _ = v.Args[1] 1566 v_0 := v.Args[0] 1567 if v_0.Op != OpAMD64ADDL { 1568 break 1569 } 1570 _ = v_0.Args[1] 1571 y := v_0.Args[0] 1572 x := v_0.Args[1] 1573 if x != v.Args[1] { 1574 break 1575 } 1576 v.reset(OpAMD64LEAL2) 1577 v.AddArg(y) 1578 v.AddArg(x) 1579 return true 1580 } 1581 return false 1582 } 1583 func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { 1584 // match: (ADDL (ADDLconst [c] x) y) 1585 // cond: 1586 // result: (LEAL1 [c] x y) 1587 for { 1588 _ = v.Args[1] 1589 v_0 := v.Args[0] 1590 if v_0.Op != OpAMD64ADDLconst { 1591 break 1592 } 1593 c := v_0.AuxInt 1594 x := v_0.Args[0] 1595 y := v.Args[1] 1596 v.reset(OpAMD64LEAL1) 1597 v.AuxInt = c 1598 v.AddArg(x) 1599 v.AddArg(y) 1600 return true 1601 } 1602 // match: (ADDL y (ADDLconst [c] x)) 1603 // cond: 1604 // result: (LEAL1 [c] x y) 1605 for { 1606 _ = v.Args[1] 1607 y := v.Args[0] 1608 v_1 := v.Args[1] 1609 if v_1.Op != OpAMD64ADDLconst { 1610 break 1611 } 1612 c := v_1.AuxInt 1613 x := v_1.Args[0] 1614 v.reset(OpAMD64LEAL1) 1615 v.AuxInt = c 1616 v.AddArg(x) 1617 v.AddArg(y) 1618 return true 1619 } 1620 // match: (ADDL x (LEAL [c] {s} y)) 1621 // cond: x.Op != OpSB && y.Op != OpSB 1622 // result: (LEAL1 [c] {s} x y) 1623 for { 1624 _ = v.Args[1] 1625 x := v.Args[0] 1626 v_1 := v.Args[1] 1627 if v_1.Op != OpAMD64LEAL { 1628 break 1629 } 1630 c := v_1.AuxInt 1631 s := v_1.Aux 1632 y := v_1.Args[0] 1633 if !(x.Op != OpSB && y.Op != OpSB) { 1634 break 1635 } 1636 v.reset(OpAMD64LEAL1) 1637 v.AuxInt = c 1638 v.Aux = s 1639 v.AddArg(x) 1640 v.AddArg(y) 1641 return true 1642 } 1643 // match: (ADDL (LEAL [c] {s} y) x) 1644 // cond: x.Op != OpSB && y.Op != OpSB 1645 // result: (LEAL1 [c] {s} x y) 1646 for { 1647 _ = v.Args[1] 1648 v_0 := v.Args[0] 1649 if v_0.Op != OpAMD64LEAL { 1650 break 1651 } 1652 c := v_0.AuxInt 1653 s := v_0.Aux 1654 y := v_0.Args[0] 1655 x := v.Args[1] 1656 if !(x.Op != OpSB && y.Op != OpSB) { 1657 break 1658 } 1659 v.reset(OpAMD64LEAL1) 1660 v.AuxInt = c 1661 v.Aux = s 1662 v.AddArg(x) 1663 v.AddArg(y) 1664 return true 1665 } 1666 // match: (ADDL x (NEGL y)) 1667 // cond: 1668 // result: (SUBL x y) 1669 for { 1670 _ = v.Args[1] 1671 x := v.Args[0] 1672 v_1 := v.Args[1] 1673 if v_1.Op != OpAMD64NEGL { 1674 break 1675 } 1676 y := v_1.Args[0] 1677 v.reset(OpAMD64SUBL) 1678 v.AddArg(x) 1679 v.AddArg(y) 1680 return true 1681 } 1682 // match: (ADDL (NEGL y) x) 1683 // cond: 1684 // result: (SUBL x y) 1685 for { 1686 _ = v.Args[1] 1687 v_0 := v.Args[0] 1688 if v_0.Op != OpAMD64NEGL { 1689 break 1690 } 1691 y := v_0.Args[0] 1692 x := v.Args[1] 1693 v.reset(OpAMD64SUBL) 1694 v.AddArg(x) 1695 v.AddArg(y) 1696 return true 1697 } 1698 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1699 // cond: canMergeLoad(v, l, x) && clobber(l) 1700 // result: (ADDLload x [off] {sym} ptr mem) 1701 for { 1702 _ = v.Args[1] 1703 x := v.Args[0] 1704 l := v.Args[1] 1705 if l.Op != OpAMD64MOVLload { 1706 break 1707 } 1708 off := l.AuxInt 1709 sym := l.Aux 1710 _ = l.Args[1] 1711 ptr := l.Args[0] 1712 mem := l.Args[1] 1713 if !(canMergeLoad(v, l, x) && clobber(l)) { 1714 break 1715 } 1716 v.reset(OpAMD64ADDLload) 1717 v.AuxInt = off 1718 v.Aux = sym 1719 v.AddArg(x) 1720 v.AddArg(ptr) 1721 v.AddArg(mem) 1722 return true 1723 } 1724 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1725 // cond: canMergeLoad(v, l, x) && clobber(l) 1726 // result: (ADDLload x [off] {sym} ptr mem) 1727 for { 1728 _ = v.Args[1] 1729 l := v.Args[0] 1730 if l.Op != OpAMD64MOVLload { 1731 break 1732 } 1733 off := l.AuxInt 1734 sym := l.Aux 1735 _ = l.Args[1] 1736 ptr := l.Args[0] 1737 mem := l.Args[1] 1738 x := v.Args[1] 1739 if !(canMergeLoad(v, l, x) && clobber(l)) { 1740 break 1741 } 1742 v.reset(OpAMD64ADDLload) 1743 v.AuxInt = off 1744 v.Aux = sym 1745 v.AddArg(x) 1746 v.AddArg(ptr) 1747 v.AddArg(mem) 1748 return true 1749 } 1750 return false 1751 } 1752 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1753 // match: (ADDLconst [c] (ADDL x y)) 1754 // cond: 1755 // result: (LEAL1 [c] x y) 1756 for { 1757 c := v.AuxInt 1758 v_0 := v.Args[0] 1759 if v_0.Op != OpAMD64ADDL { 1760 break 1761 } 1762 _ = v_0.Args[1] 1763 x := v_0.Args[0] 1764 y := v_0.Args[1] 1765 v.reset(OpAMD64LEAL1) 1766 v.AuxInt = c 1767 v.AddArg(x) 1768 v.AddArg(y) 1769 return true 1770 } 1771 // match: (ADDLconst [c] (SHLLconst [1] x)) 1772 // cond: 1773 // result: (LEAL1 [c] x x) 1774 for { 1775 c := v.AuxInt 1776 v_0 := v.Args[0] 1777 if v_0.Op != OpAMD64SHLLconst { 1778 break 1779 } 1780 if v_0.AuxInt != 1 { 1781 break 1782 } 1783 x := v_0.Args[0] 1784 v.reset(OpAMD64LEAL1) 1785 v.AuxInt = c 1786 v.AddArg(x) 1787 v.AddArg(x) 1788 return true 1789 } 1790 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1791 // cond: is32Bit(c+d) 1792 // result: (LEAL [c+d] {s} x) 1793 for { 1794 c := v.AuxInt 1795 v_0 := v.Args[0] 1796 if v_0.Op != OpAMD64LEAL { 1797 break 1798 } 1799 d := v_0.AuxInt 1800 s := v_0.Aux 1801 x := v_0.Args[0] 1802 if !(is32Bit(c + d)) { 1803 break 1804 } 1805 v.reset(OpAMD64LEAL) 1806 v.AuxInt = c + d 1807 v.Aux = s 1808 v.AddArg(x) 1809 return true 1810 } 1811 // match: (ADDLconst [c] (LEAL1 [d] {s} x y)) 1812 // cond: is32Bit(c+d) 1813 // result: (LEAL1 [c+d] {s} x y) 1814 for { 1815 c := v.AuxInt 1816 v_0 := v.Args[0] 1817 if v_0.Op != OpAMD64LEAL1 { 1818 break 1819 } 1820 d := v_0.AuxInt 1821 s := v_0.Aux 1822 _ = v_0.Args[1] 1823 x := v_0.Args[0] 1824 y := v_0.Args[1] 1825 if !(is32Bit(c + d)) { 1826 break 1827 } 1828 v.reset(OpAMD64LEAL1) 1829 v.AuxInt = c + d 1830 v.Aux = s 1831 v.AddArg(x) 1832 v.AddArg(y) 1833 return true 1834 } 1835 // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) 1836 // cond: is32Bit(c+d) 1837 // result: (LEAL2 [c+d] {s} x y) 1838 for { 1839 c := v.AuxInt 1840 v_0 := v.Args[0] 1841 if v_0.Op != OpAMD64LEAL2 { 1842 break 1843 } 1844 d := v_0.AuxInt 1845 s := v_0.Aux 1846 _ = v_0.Args[1] 1847 x := v_0.Args[0] 1848 y := v_0.Args[1] 1849 if !(is32Bit(c + d)) { 1850 break 1851 } 1852 v.reset(OpAMD64LEAL2) 1853 v.AuxInt = c + d 1854 v.Aux = s 1855 v.AddArg(x) 1856 v.AddArg(y) 1857 return true 1858 } 1859 // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) 1860 // cond: is32Bit(c+d) 1861 // result: (LEAL4 [c+d] {s} x y) 1862 for { 1863 c := v.AuxInt 1864 v_0 := v.Args[0] 1865 if v_0.Op != OpAMD64LEAL4 { 1866 break 1867 } 1868 d := v_0.AuxInt 1869 s := v_0.Aux 1870 _ = v_0.Args[1] 1871 x := v_0.Args[0] 1872 y := v_0.Args[1] 1873 if !(is32Bit(c + d)) { 1874 break 1875 } 1876 v.reset(OpAMD64LEAL4) 1877 v.AuxInt = c + d 1878 v.Aux = s 1879 v.AddArg(x) 1880 v.AddArg(y) 1881 return true 1882 } 1883 // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) 1884 // cond: is32Bit(c+d) 1885 // result: (LEAL8 [c+d] {s} x y) 1886 for { 1887 c := v.AuxInt 1888 v_0 := v.Args[0] 1889 if v_0.Op != OpAMD64LEAL8 { 1890 break 1891 } 1892 d := v_0.AuxInt 1893 s := v_0.Aux 1894 _ = v_0.Args[1] 1895 x := v_0.Args[0] 1896 y := v_0.Args[1] 1897 if !(is32Bit(c + d)) { 1898 break 1899 } 1900 v.reset(OpAMD64LEAL8) 1901 v.AuxInt = c + d 1902 v.Aux = s 1903 v.AddArg(x) 1904 v.AddArg(y) 1905 return true 1906 } 1907 // match: (ADDLconst [c] x) 1908 // cond: int32(c)==0 1909 // result: x 1910 for { 1911 c := v.AuxInt 1912 x := v.Args[0] 1913 if !(int32(c) == 0) { 1914 break 1915 } 1916 v.reset(OpCopy) 1917 v.Type = x.Type 1918 v.AddArg(x) 1919 return true 1920 } 1921 // match: (ADDLconst [c] (MOVLconst [d])) 1922 // cond: 1923 // result: (MOVLconst [int64(int32(c+d))]) 1924 for { 1925 c := v.AuxInt 1926 v_0 := v.Args[0] 1927 if v_0.Op != OpAMD64MOVLconst { 1928 break 1929 } 1930 d := v_0.AuxInt 1931 v.reset(OpAMD64MOVLconst) 1932 v.AuxInt = int64(int32(c + d)) 1933 return true 1934 } 1935 // match: (ADDLconst [c] (ADDLconst [d] x)) 1936 // cond: 1937 // result: (ADDLconst [int64(int32(c+d))] x) 1938 for { 1939 c := v.AuxInt 1940 v_0 := v.Args[0] 1941 if v_0.Op != OpAMD64ADDLconst { 1942 break 1943 } 1944 d := v_0.AuxInt 1945 x := v_0.Args[0] 1946 v.reset(OpAMD64ADDLconst) 1947 v.AuxInt = int64(int32(c + d)) 1948 v.AddArg(x) 1949 return true 1950 } 1951 return false 1952 } 1953 func rewriteValueAMD64_OpAMD64ADDLconst_10(v *Value) bool { 1954 // match: (ADDLconst [off] x:(SP)) 1955 // cond: 1956 // result: (LEAL [off] x) 1957 for { 1958 off := v.AuxInt 1959 x := v.Args[0] 1960 if x.Op != OpSP { 1961 break 1962 } 1963 v.reset(OpAMD64LEAL) 1964 v.AuxInt = off 1965 v.AddArg(x) 1966 return true 1967 } 1968 return false 1969 } 1970 func rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v *Value) bool { 1971 // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 1972 // cond: ValAndOff(valoff1).canAdd(off2) 1973 // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 1974 for { 1975 valoff1 := v.AuxInt 1976 sym := v.Aux 1977 _ = v.Args[1] 1978 v_0 := v.Args[0] 1979 if v_0.Op != OpAMD64ADDQconst { 1980 break 1981 } 1982 off2 := v_0.AuxInt 1983 base := v_0.Args[0] 1984 mem := v.Args[1] 1985 if !(ValAndOff(valoff1).canAdd(off2)) { 1986 break 1987 } 1988 v.reset(OpAMD64ADDLconstmodify) 1989 v.AuxInt = ValAndOff(valoff1).add(off2) 1990 v.Aux = sym 1991 v.AddArg(base) 1992 v.AddArg(mem) 1993 return true 1994 } 1995 // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 1996 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 1997 // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 1998 for { 1999 valoff1 := v.AuxInt 2000 sym1 := v.Aux 2001 _ = v.Args[1] 2002 v_0 := v.Args[0] 2003 if v_0.Op != OpAMD64LEAQ { 2004 break 2005 } 2006 off2 := v_0.AuxInt 2007 sym2 := v_0.Aux 2008 base := v_0.Args[0] 2009 mem := v.Args[1] 2010 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 2011 break 2012 } 2013 v.reset(OpAMD64ADDLconstmodify) 2014 v.AuxInt = ValAndOff(valoff1).add(off2) 2015 v.Aux = mergeSym(sym1, sym2) 2016 v.AddArg(base) 2017 v.AddArg(mem) 2018 return true 2019 } 2020 return false 2021 } 2022 func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool { 2023 b := v.Block 2024 _ = b 2025 typ := &b.Func.Config.Types 2026 _ = typ 2027 // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem) 2028 // cond: is32Bit(off1+off2) 2029 // result: (ADDLload [off1+off2] {sym} val base mem) 2030 for { 2031 off1 := v.AuxInt 2032 sym := v.Aux 2033 _ = v.Args[2] 2034 val := v.Args[0] 2035 v_1 := v.Args[1] 2036 if v_1.Op != OpAMD64ADDQconst { 2037 break 2038 } 2039 off2 := v_1.AuxInt 2040 base := v_1.Args[0] 2041 mem := v.Args[2] 2042 if !(is32Bit(off1 + off2)) { 2043 break 2044 } 2045 v.reset(OpAMD64ADDLload) 2046 v.AuxInt = off1 + off2 2047 v.Aux = sym 2048 v.AddArg(val) 2049 v.AddArg(base) 2050 v.AddArg(mem) 2051 return true 2052 } 2053 // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2054 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2055 // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2056 for { 2057 off1 := v.AuxInt 2058 sym1 := v.Aux 2059 _ = v.Args[2] 2060 val := v.Args[0] 2061 v_1 := v.Args[1] 2062 if v_1.Op != OpAMD64LEAQ { 2063 break 2064 } 2065 off2 := v_1.AuxInt 2066 sym2 := v_1.Aux 2067 base := v_1.Args[0] 2068 mem := v.Args[2] 2069 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2070 break 2071 } 2072 v.reset(OpAMD64ADDLload) 2073 v.AuxInt = off1 + off2 2074 v.Aux = mergeSym(sym1, sym2) 2075 v.AddArg(val) 2076 v.AddArg(base) 2077 v.AddArg(mem) 2078 return true 2079 } 2080 // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 2081 // cond: 2082 // result: (ADDL x (MOVLf2i y)) 2083 for { 2084 off := v.AuxInt 2085 sym := v.Aux 2086 _ = v.Args[2] 2087 x := v.Args[0] 2088 ptr := v.Args[1] 2089 v_2 := v.Args[2] 2090 if v_2.Op != OpAMD64MOVSSstore { 2091 break 2092 } 2093 if v_2.AuxInt != off { 2094 break 2095 } 2096 if v_2.Aux != sym { 2097 break 2098 } 2099 _ = v_2.Args[2] 2100 if ptr != v_2.Args[0] { 2101 break 2102 } 2103 y := v_2.Args[1] 2104 v.reset(OpAMD64ADDL) 2105 v.AddArg(x) 2106 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 2107 v0.AddArg(y) 2108 v.AddArg(v0) 2109 return true 2110 } 2111 return false 2112 } 2113 func rewriteValueAMD64_OpAMD64ADDLmodify_0(v *Value) bool { 2114 // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 2115 // cond: is32Bit(off1+off2) 2116 // result: (ADDLmodify [off1+off2] {sym} base val mem) 2117 for { 2118 off1 := v.AuxInt 2119 sym := v.Aux 2120 _ = v.Args[2] 2121 v_0 := v.Args[0] 2122 if v_0.Op != OpAMD64ADDQconst { 2123 break 2124 } 2125 off2 := v_0.AuxInt 2126 base := v_0.Args[0] 2127 val := v.Args[1] 2128 mem := v.Args[2] 2129 if !(is32Bit(off1 + off2)) { 2130 break 2131 } 2132 v.reset(OpAMD64ADDLmodify) 2133 v.AuxInt = off1 + off2 2134 v.Aux = sym 2135 v.AddArg(base) 2136 v.AddArg(val) 2137 v.AddArg(mem) 2138 return true 2139 } 2140 // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 2141 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2142 // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 2143 for { 2144 off1 := v.AuxInt 2145 sym1 := v.Aux 2146 _ = v.Args[2] 2147 v_0 := v.Args[0] 2148 if v_0.Op != OpAMD64LEAQ { 2149 break 2150 } 2151 off2 := v_0.AuxInt 2152 sym2 := v_0.Aux 2153 base := v_0.Args[0] 2154 val := v.Args[1] 2155 mem := v.Args[2] 2156 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2157 break 2158 } 2159 v.reset(OpAMD64ADDLmodify) 2160 v.AuxInt = off1 + off2 2161 v.Aux = mergeSym(sym1, sym2) 2162 v.AddArg(base) 2163 v.AddArg(val) 2164 v.AddArg(mem) 2165 return true 2166 } 2167 return false 2168 } 2169 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 2170 // match: (ADDQ x (MOVQconst [c])) 2171 // cond: is32Bit(c) 2172 // result: (ADDQconst [c] x) 2173 for { 2174 _ = v.Args[1] 2175 x := v.Args[0] 2176 v_1 := v.Args[1] 2177 if v_1.Op != OpAMD64MOVQconst { 2178 break 2179 } 2180 c := v_1.AuxInt 2181 if !(is32Bit(c)) { 2182 break 2183 } 2184 v.reset(OpAMD64ADDQconst) 2185 v.AuxInt = c 2186 v.AddArg(x) 2187 return true 2188 } 2189 // match: (ADDQ (MOVQconst [c]) x) 2190 // cond: is32Bit(c) 2191 // result: (ADDQconst [c] x) 2192 for { 2193 _ = v.Args[1] 2194 v_0 := v.Args[0] 2195 if v_0.Op != OpAMD64MOVQconst { 2196 break 2197 } 2198 c := v_0.AuxInt 2199 x := v.Args[1] 2200 if !(is32Bit(c)) { 2201 break 2202 } 2203 v.reset(OpAMD64ADDQconst) 2204 v.AuxInt = c 2205 v.AddArg(x) 2206 return true 2207 } 2208 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 2209 // cond: d==64-c 2210 // result: (ROLQconst x [c]) 2211 for { 2212 _ = v.Args[1] 2213 v_0 := v.Args[0] 2214 if v_0.Op != OpAMD64SHLQconst { 2215 break 2216 } 2217 c := v_0.AuxInt 2218 x := v_0.Args[0] 2219 v_1 := v.Args[1] 2220 if v_1.Op != OpAMD64SHRQconst { 2221 break 2222 } 2223 d := v_1.AuxInt 2224 if x != v_1.Args[0] { 2225 break 2226 } 2227 if !(d == 64-c) { 2228 break 2229 } 2230 v.reset(OpAMD64ROLQconst) 2231 v.AuxInt = c 2232 v.AddArg(x) 2233 return true 2234 } 2235 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 2236 // cond: d==64-c 2237 // result: (ROLQconst x [c]) 2238 for { 2239 _ = v.Args[1] 2240 v_0 := v.Args[0] 2241 if v_0.Op != OpAMD64SHRQconst { 2242 break 2243 } 2244 d := v_0.AuxInt 2245 x := v_0.Args[0] 2246 v_1 := v.Args[1] 2247 if v_1.Op != OpAMD64SHLQconst { 2248 break 2249 } 2250 c := v_1.AuxInt 2251 if x != v_1.Args[0] { 2252 break 2253 } 2254 if !(d == 64-c) { 2255 break 2256 } 2257 v.reset(OpAMD64ROLQconst) 2258 v.AuxInt = c 2259 v.AddArg(x) 2260 return true 2261 } 2262 // match: (ADDQ x (SHLQconst [3] y)) 2263 // cond: 2264 // result: (LEAQ8 x y) 2265 for { 2266 _ = v.Args[1] 2267 x := v.Args[0] 2268 v_1 := v.Args[1] 2269 if v_1.Op != OpAMD64SHLQconst { 2270 break 2271 } 2272 if v_1.AuxInt != 3 { 2273 break 2274 } 2275 y := v_1.Args[0] 2276 v.reset(OpAMD64LEAQ8) 2277 v.AddArg(x) 2278 v.AddArg(y) 2279 return true 2280 } 2281 // match: (ADDQ (SHLQconst [3] y) x) 2282 // cond: 2283 // result: (LEAQ8 x y) 2284 for { 2285 _ = v.Args[1] 2286 v_0 := v.Args[0] 2287 if v_0.Op != OpAMD64SHLQconst { 2288 break 2289 } 2290 if v_0.AuxInt != 3 { 2291 break 2292 } 2293 y := v_0.Args[0] 2294 x := v.Args[1] 2295 v.reset(OpAMD64LEAQ8) 2296 v.AddArg(x) 2297 v.AddArg(y) 2298 return true 2299 } 2300 // match: (ADDQ x (SHLQconst [2] y)) 2301 // cond: 2302 // result: (LEAQ4 x y) 2303 for { 2304 _ = v.Args[1] 2305 x := v.Args[0] 2306 v_1 := v.Args[1] 2307 if v_1.Op != OpAMD64SHLQconst { 2308 break 2309 } 2310 if v_1.AuxInt != 2 { 2311 break 2312 } 2313 y := v_1.Args[0] 2314 v.reset(OpAMD64LEAQ4) 2315 v.AddArg(x) 2316 v.AddArg(y) 2317 return true 2318 } 2319 // match: (ADDQ (SHLQconst [2] y) x) 2320 // cond: 2321 // result: (LEAQ4 x y) 2322 for { 2323 _ = v.Args[1] 2324 v_0 := v.Args[0] 2325 if v_0.Op != OpAMD64SHLQconst { 2326 break 2327 } 2328 if v_0.AuxInt != 2 { 2329 break 2330 } 2331 y := v_0.Args[0] 2332 x := v.Args[1] 2333 v.reset(OpAMD64LEAQ4) 2334 v.AddArg(x) 2335 v.AddArg(y) 2336 return true 2337 } 2338 // match: (ADDQ x (SHLQconst [1] y)) 2339 // cond: 2340 // result: (LEAQ2 x y) 2341 for { 2342 _ = v.Args[1] 2343 x := v.Args[0] 2344 v_1 := v.Args[1] 2345 if v_1.Op != OpAMD64SHLQconst { 2346 break 2347 } 2348 if v_1.AuxInt != 1 { 2349 break 2350 } 2351 y := v_1.Args[0] 2352 v.reset(OpAMD64LEAQ2) 2353 v.AddArg(x) 2354 v.AddArg(y) 2355 return true 2356 } 2357 // match: (ADDQ (SHLQconst [1] y) x) 2358 // cond: 2359 // result: (LEAQ2 x y) 2360 for { 2361 _ = v.Args[1] 2362 v_0 := v.Args[0] 2363 if v_0.Op != OpAMD64SHLQconst { 2364 break 2365 } 2366 if v_0.AuxInt != 1 { 2367 break 2368 } 2369 y := v_0.Args[0] 2370 x := v.Args[1] 2371 v.reset(OpAMD64LEAQ2) 2372 v.AddArg(x) 2373 v.AddArg(y) 2374 return true 2375 } 2376 return false 2377 } 2378 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 2379 // match: (ADDQ x (ADDQ y y)) 2380 // cond: 2381 // result: (LEAQ2 x y) 2382 for { 2383 _ = v.Args[1] 2384 x := v.Args[0] 2385 v_1 := v.Args[1] 2386 if v_1.Op != OpAMD64ADDQ { 2387 break 2388 } 2389 _ = v_1.Args[1] 2390 y := v_1.Args[0] 2391 if y != v_1.Args[1] { 2392 break 2393 } 2394 v.reset(OpAMD64LEAQ2) 2395 v.AddArg(x) 2396 v.AddArg(y) 2397 return true 2398 } 2399 // match: (ADDQ (ADDQ y y) x) 2400 // cond: 2401 // result: (LEAQ2 x y) 2402 for { 2403 _ = v.Args[1] 2404 v_0 := v.Args[0] 2405 if v_0.Op != OpAMD64ADDQ { 2406 break 2407 } 2408 _ = v_0.Args[1] 2409 y := v_0.Args[0] 2410 if y != v_0.Args[1] { 2411 break 2412 } 2413 x := v.Args[1] 2414 v.reset(OpAMD64LEAQ2) 2415 v.AddArg(x) 2416 v.AddArg(y) 2417 return true 2418 } 2419 // match: (ADDQ x (ADDQ x y)) 2420 // cond: 2421 // result: (LEAQ2 y x) 2422 for { 2423 _ = v.Args[1] 2424 x := v.Args[0] 2425 v_1 := v.Args[1] 2426 if v_1.Op != OpAMD64ADDQ { 2427 break 2428 } 2429 _ = v_1.Args[1] 2430 if x != v_1.Args[0] { 2431 break 2432 } 2433 y := v_1.Args[1] 2434 v.reset(OpAMD64LEAQ2) 2435 v.AddArg(y) 2436 v.AddArg(x) 2437 return true 2438 } 2439 // match: (ADDQ x (ADDQ y x)) 2440 // cond: 2441 // result: (LEAQ2 y x) 2442 for { 2443 _ = v.Args[1] 2444 x := v.Args[0] 2445 v_1 := v.Args[1] 2446 if v_1.Op != OpAMD64ADDQ { 2447 break 2448 } 2449 _ = v_1.Args[1] 2450 y := v_1.Args[0] 2451 if x != v_1.Args[1] { 2452 break 2453 } 2454 v.reset(OpAMD64LEAQ2) 2455 v.AddArg(y) 2456 v.AddArg(x) 2457 return true 2458 } 2459 // match: (ADDQ (ADDQ x y) x) 2460 // cond: 2461 // result: (LEAQ2 y x) 2462 for { 2463 _ = v.Args[1] 2464 v_0 := v.Args[0] 2465 if v_0.Op != OpAMD64ADDQ { 2466 break 2467 } 2468 _ = v_0.Args[1] 2469 x := v_0.Args[0] 2470 y := v_0.Args[1] 2471 if x != v.Args[1] { 2472 break 2473 } 2474 v.reset(OpAMD64LEAQ2) 2475 v.AddArg(y) 2476 v.AddArg(x) 2477 return true 2478 } 2479 // match: (ADDQ (ADDQ y x) x) 2480 // cond: 2481 // result: (LEAQ2 y x) 2482 for { 2483 _ = v.Args[1] 2484 v_0 := v.Args[0] 2485 if v_0.Op != OpAMD64ADDQ { 2486 break 2487 } 2488 _ = v_0.Args[1] 2489 y := v_0.Args[0] 2490 x := v_0.Args[1] 2491 if x != v.Args[1] { 2492 break 2493 } 2494 v.reset(OpAMD64LEAQ2) 2495 v.AddArg(y) 2496 v.AddArg(x) 2497 return true 2498 } 2499 // match: (ADDQ (ADDQconst [c] x) y) 2500 // cond: 2501 // result: (LEAQ1 [c] x y) 2502 for { 2503 _ = v.Args[1] 2504 v_0 := v.Args[0] 2505 if v_0.Op != OpAMD64ADDQconst { 2506 break 2507 } 2508 c := v_0.AuxInt 2509 x := v_0.Args[0] 2510 y := v.Args[1] 2511 v.reset(OpAMD64LEAQ1) 2512 v.AuxInt = c 2513 v.AddArg(x) 2514 v.AddArg(y) 2515 return true 2516 } 2517 // match: (ADDQ y (ADDQconst [c] x)) 2518 // cond: 2519 // result: (LEAQ1 [c] x y) 2520 for { 2521 _ = v.Args[1] 2522 y := v.Args[0] 2523 v_1 := v.Args[1] 2524 if v_1.Op != OpAMD64ADDQconst { 2525 break 2526 } 2527 c := v_1.AuxInt 2528 x := v_1.Args[0] 2529 v.reset(OpAMD64LEAQ1) 2530 v.AuxInt = c 2531 v.AddArg(x) 2532 v.AddArg(y) 2533 return true 2534 } 2535 // match: (ADDQ x (LEAQ [c] {s} y)) 2536 // cond: x.Op != OpSB && y.Op != OpSB 2537 // result: (LEAQ1 [c] {s} x y) 2538 for { 2539 _ = v.Args[1] 2540 x := v.Args[0] 2541 v_1 := v.Args[1] 2542 if v_1.Op != OpAMD64LEAQ { 2543 break 2544 } 2545 c := v_1.AuxInt 2546 s := v_1.Aux 2547 y := v_1.Args[0] 2548 if !(x.Op != OpSB && y.Op != OpSB) { 2549 break 2550 } 2551 v.reset(OpAMD64LEAQ1) 2552 v.AuxInt = c 2553 v.Aux = s 2554 v.AddArg(x) 2555 v.AddArg(y) 2556 return true 2557 } 2558 // match: (ADDQ (LEAQ [c] {s} y) x) 2559 // cond: x.Op != OpSB && y.Op != OpSB 2560 // result: (LEAQ1 [c] {s} x y) 2561 for { 2562 _ = v.Args[1] 2563 v_0 := v.Args[0] 2564 if v_0.Op != OpAMD64LEAQ { 2565 break 2566 } 2567 c := v_0.AuxInt 2568 s := v_0.Aux 2569 y := v_0.Args[0] 2570 x := v.Args[1] 2571 if !(x.Op != OpSB && y.Op != OpSB) { 2572 break 2573 } 2574 v.reset(OpAMD64LEAQ1) 2575 v.AuxInt = c 2576 v.Aux = s 2577 v.AddArg(x) 2578 v.AddArg(y) 2579 return true 2580 } 2581 return false 2582 } 2583 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 2584 // match: (ADDQ x (NEGQ y)) 2585 // cond: 2586 // result: (SUBQ x y) 2587 for { 2588 _ = v.Args[1] 2589 x := v.Args[0] 2590 v_1 := v.Args[1] 2591 if v_1.Op != OpAMD64NEGQ { 2592 break 2593 } 2594 y := v_1.Args[0] 2595 v.reset(OpAMD64SUBQ) 2596 v.AddArg(x) 2597 v.AddArg(y) 2598 return true 2599 } 2600 // match: (ADDQ (NEGQ y) x) 2601 // cond: 2602 // result: (SUBQ x y) 2603 for { 2604 _ = v.Args[1] 2605 v_0 := v.Args[0] 2606 if v_0.Op != OpAMD64NEGQ { 2607 break 2608 } 2609 y := v_0.Args[0] 2610 x := v.Args[1] 2611 v.reset(OpAMD64SUBQ) 2612 v.AddArg(x) 2613 v.AddArg(y) 2614 return true 2615 } 2616 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 2617 // cond: canMergeLoad(v, l, x) && clobber(l) 2618 // result: (ADDQload x [off] {sym} ptr mem) 2619 for { 2620 _ = v.Args[1] 2621 x := v.Args[0] 2622 l := v.Args[1] 2623 if l.Op != OpAMD64MOVQload { 2624 break 2625 } 2626 off := l.AuxInt 2627 sym := l.Aux 2628 _ = l.Args[1] 2629 ptr := l.Args[0] 2630 mem := l.Args[1] 2631 if !(canMergeLoad(v, l, x) && clobber(l)) { 2632 break 2633 } 2634 v.reset(OpAMD64ADDQload) 2635 v.AuxInt = off 2636 v.Aux = sym 2637 v.AddArg(x) 2638 v.AddArg(ptr) 2639 v.AddArg(mem) 2640 return true 2641 } 2642 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 2643 // cond: canMergeLoad(v, l, x) && clobber(l) 2644 // result: (ADDQload x [off] {sym} ptr mem) 2645 for { 2646 _ = v.Args[1] 2647 l := v.Args[0] 2648 if l.Op != OpAMD64MOVQload { 2649 break 2650 } 2651 off := l.AuxInt 2652 sym := l.Aux 2653 _ = l.Args[1] 2654 ptr := l.Args[0] 2655 mem := l.Args[1] 2656 x := v.Args[1] 2657 if !(canMergeLoad(v, l, x) && clobber(l)) { 2658 break 2659 } 2660 v.reset(OpAMD64ADDQload) 2661 v.AuxInt = off 2662 v.Aux = sym 2663 v.AddArg(x) 2664 v.AddArg(ptr) 2665 v.AddArg(mem) 2666 return true 2667 } 2668 return false 2669 } 2670 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 2671 // match: (ADDQconst [c] (ADDQ x y)) 2672 // cond: 2673 // result: (LEAQ1 [c] x y) 2674 for { 2675 c := v.AuxInt 2676 v_0 := v.Args[0] 2677 if v_0.Op != OpAMD64ADDQ { 2678 break 2679 } 2680 _ = v_0.Args[1] 2681 x := v_0.Args[0] 2682 y := v_0.Args[1] 2683 v.reset(OpAMD64LEAQ1) 2684 v.AuxInt = c 2685 v.AddArg(x) 2686 v.AddArg(y) 2687 return true 2688 } 2689 // match: (ADDQconst [c] (SHLQconst [1] x)) 2690 // cond: 2691 // result: (LEAQ1 [c] x x) 2692 for { 2693 c := v.AuxInt 2694 v_0 := v.Args[0] 2695 if v_0.Op != OpAMD64SHLQconst { 2696 break 2697 } 2698 if v_0.AuxInt != 1 { 2699 break 2700 } 2701 x := v_0.Args[0] 2702 v.reset(OpAMD64LEAQ1) 2703 v.AuxInt = c 2704 v.AddArg(x) 2705 v.AddArg(x) 2706 return true 2707 } 2708 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 2709 // cond: is32Bit(c+d) 2710 // result: (LEAQ [c+d] {s} x) 2711 for { 2712 c := v.AuxInt 2713 v_0 := v.Args[0] 2714 if v_0.Op != OpAMD64LEAQ { 2715 break 2716 } 2717 d := v_0.AuxInt 2718 s := v_0.Aux 2719 x := v_0.Args[0] 2720 if !(is32Bit(c + d)) { 2721 break 2722 } 2723 v.reset(OpAMD64LEAQ) 2724 v.AuxInt = c + d 2725 v.Aux = s 2726 v.AddArg(x) 2727 return true 2728 } 2729 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 2730 // cond: is32Bit(c+d) 2731 // result: (LEAQ1 [c+d] {s} x y) 2732 for { 2733 c := v.AuxInt 2734 v_0 := v.Args[0] 2735 if v_0.Op != OpAMD64LEAQ1 { 2736 break 2737 } 2738 d := v_0.AuxInt 2739 s := v_0.Aux 2740 _ = v_0.Args[1] 2741 x := v_0.Args[0] 2742 y := v_0.Args[1] 2743 if !(is32Bit(c + d)) { 2744 break 2745 } 2746 v.reset(OpAMD64LEAQ1) 2747 v.AuxInt = c + d 2748 v.Aux = s 2749 v.AddArg(x) 2750 v.AddArg(y) 2751 return true 2752 } 2753 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 2754 // cond: is32Bit(c+d) 2755 // result: (LEAQ2 [c+d] {s} x y) 2756 for { 2757 c := v.AuxInt 2758 v_0 := v.Args[0] 2759 if v_0.Op != OpAMD64LEAQ2 { 2760 break 2761 } 2762 d := v_0.AuxInt 2763 s := v_0.Aux 2764 _ = v_0.Args[1] 2765 x := v_0.Args[0] 2766 y := v_0.Args[1] 2767 if !(is32Bit(c + d)) { 2768 break 2769 } 2770 v.reset(OpAMD64LEAQ2) 2771 v.AuxInt = c + d 2772 v.Aux = s 2773 v.AddArg(x) 2774 v.AddArg(y) 2775 return true 2776 } 2777 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 2778 // cond: is32Bit(c+d) 2779 // result: (LEAQ4 [c+d] {s} x y) 2780 for { 2781 c := v.AuxInt 2782 v_0 := v.Args[0] 2783 if v_0.Op != OpAMD64LEAQ4 { 2784 break 2785 } 2786 d := v_0.AuxInt 2787 s := v_0.Aux 2788 _ = v_0.Args[1] 2789 x := v_0.Args[0] 2790 y := v_0.Args[1] 2791 if !(is32Bit(c + d)) { 2792 break 2793 } 2794 v.reset(OpAMD64LEAQ4) 2795 v.AuxInt = c + d 2796 v.Aux = s 2797 v.AddArg(x) 2798 v.AddArg(y) 2799 return true 2800 } 2801 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 2802 // cond: is32Bit(c+d) 2803 // result: (LEAQ8 [c+d] {s} x y) 2804 for { 2805 c := v.AuxInt 2806 v_0 := v.Args[0] 2807 if v_0.Op != OpAMD64LEAQ8 { 2808 break 2809 } 2810 d := v_0.AuxInt 2811 s := v_0.Aux 2812 _ = v_0.Args[1] 2813 x := v_0.Args[0] 2814 y := v_0.Args[1] 2815 if !(is32Bit(c + d)) { 2816 break 2817 } 2818 v.reset(OpAMD64LEAQ8) 2819 v.AuxInt = c + d 2820 v.Aux = s 2821 v.AddArg(x) 2822 v.AddArg(y) 2823 return true 2824 } 2825 // match: (ADDQconst [0] x) 2826 // cond: 2827 // result: x 2828 for { 2829 if v.AuxInt != 0 { 2830 break 2831 } 2832 x := v.Args[0] 2833 v.reset(OpCopy) 2834 v.Type = x.Type 2835 v.AddArg(x) 2836 return true 2837 } 2838 // match: (ADDQconst [c] (MOVQconst [d])) 2839 // cond: 2840 // result: (MOVQconst [c+d]) 2841 for { 2842 c := v.AuxInt 2843 v_0 := v.Args[0] 2844 if v_0.Op != OpAMD64MOVQconst { 2845 break 2846 } 2847 d := v_0.AuxInt 2848 v.reset(OpAMD64MOVQconst) 2849 v.AuxInt = c + d 2850 return true 2851 } 2852 // match: (ADDQconst [c] (ADDQconst [d] x)) 2853 // cond: is32Bit(c+d) 2854 // result: (ADDQconst [c+d] x) 2855 for { 2856 c := v.AuxInt 2857 v_0 := v.Args[0] 2858 if v_0.Op != OpAMD64ADDQconst { 2859 break 2860 } 2861 d := v_0.AuxInt 2862 x := v_0.Args[0] 2863 if !(is32Bit(c + d)) { 2864 break 2865 } 2866 v.reset(OpAMD64ADDQconst) 2867 v.AuxInt = c + d 2868 v.AddArg(x) 2869 return true 2870 } 2871 return false 2872 } 2873 func rewriteValueAMD64_OpAMD64ADDQconst_10(v *Value) bool { 2874 // match: (ADDQconst [off] x:(SP)) 2875 // cond: 2876 // result: (LEAQ [off] x) 2877 for { 2878 off := v.AuxInt 2879 x := v.Args[0] 2880 if x.Op != OpSP { 2881 break 2882 } 2883 v.reset(OpAMD64LEAQ) 2884 v.AuxInt = off 2885 v.AddArg(x) 2886 return true 2887 } 2888 return false 2889 } 2890 func rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v *Value) bool { 2891 // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 2892 // cond: ValAndOff(valoff1).canAdd(off2) 2893 // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 2894 for { 2895 valoff1 := v.AuxInt 2896 sym := v.Aux 2897 _ = v.Args[1] 2898 v_0 := v.Args[0] 2899 if v_0.Op != OpAMD64ADDQconst { 2900 break 2901 } 2902 off2 := v_0.AuxInt 2903 base := v_0.Args[0] 2904 mem := v.Args[1] 2905 if !(ValAndOff(valoff1).canAdd(off2)) { 2906 break 2907 } 2908 v.reset(OpAMD64ADDQconstmodify) 2909 v.AuxInt = ValAndOff(valoff1).add(off2) 2910 v.Aux = sym 2911 v.AddArg(base) 2912 v.AddArg(mem) 2913 return true 2914 } 2915 // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 2916 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 2917 // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 2918 for { 2919 valoff1 := v.AuxInt 2920 sym1 := v.Aux 2921 _ = v.Args[1] 2922 v_0 := v.Args[0] 2923 if v_0.Op != OpAMD64LEAQ { 2924 break 2925 } 2926 off2 := v_0.AuxInt 2927 sym2 := v_0.Aux 2928 base := v_0.Args[0] 2929 mem := v.Args[1] 2930 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 2931 break 2932 } 2933 v.reset(OpAMD64ADDQconstmodify) 2934 v.AuxInt = ValAndOff(valoff1).add(off2) 2935 v.Aux = mergeSym(sym1, sym2) 2936 v.AddArg(base) 2937 v.AddArg(mem) 2938 return true 2939 } 2940 return false 2941 } 2942 func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool { 2943 b := v.Block 2944 _ = b 2945 typ := &b.Func.Config.Types 2946 _ = typ 2947 // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem) 2948 // cond: is32Bit(off1+off2) 2949 // result: (ADDQload [off1+off2] {sym} val base mem) 2950 for { 2951 off1 := v.AuxInt 2952 sym := v.Aux 2953 _ = v.Args[2] 2954 val := v.Args[0] 2955 v_1 := v.Args[1] 2956 if v_1.Op != OpAMD64ADDQconst { 2957 break 2958 } 2959 off2 := v_1.AuxInt 2960 base := v_1.Args[0] 2961 mem := v.Args[2] 2962 if !(is32Bit(off1 + off2)) { 2963 break 2964 } 2965 v.reset(OpAMD64ADDQload) 2966 v.AuxInt = off1 + off2 2967 v.Aux = sym 2968 v.AddArg(val) 2969 v.AddArg(base) 2970 v.AddArg(mem) 2971 return true 2972 } 2973 // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2974 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2975 // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2976 for { 2977 off1 := v.AuxInt 2978 sym1 := v.Aux 2979 _ = v.Args[2] 2980 val := v.Args[0] 2981 v_1 := v.Args[1] 2982 if v_1.Op != OpAMD64LEAQ { 2983 break 2984 } 2985 off2 := v_1.AuxInt 2986 sym2 := v_1.Aux 2987 base := v_1.Args[0] 2988 mem := v.Args[2] 2989 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2990 break 2991 } 2992 v.reset(OpAMD64ADDQload) 2993 v.AuxInt = off1 + off2 2994 v.Aux = mergeSym(sym1, sym2) 2995 v.AddArg(val) 2996 v.AddArg(base) 2997 v.AddArg(mem) 2998 return true 2999 } 3000 // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 3001 // cond: 3002 // result: (ADDQ x (MOVQf2i y)) 3003 for { 3004 off := v.AuxInt 3005 sym := v.Aux 3006 _ = v.Args[2] 3007 x := v.Args[0] 3008 ptr := v.Args[1] 3009 v_2 := v.Args[2] 3010 if v_2.Op != OpAMD64MOVSDstore { 3011 break 3012 } 3013 if v_2.AuxInt != off { 3014 break 3015 } 3016 if v_2.Aux != sym { 3017 break 3018 } 3019 _ = v_2.Args[2] 3020 if ptr != v_2.Args[0] { 3021 break 3022 } 3023 y := v_2.Args[1] 3024 v.reset(OpAMD64ADDQ) 3025 v.AddArg(x) 3026 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 3027 v0.AddArg(y) 3028 v.AddArg(v0) 3029 return true 3030 } 3031 return false 3032 } 3033 func rewriteValueAMD64_OpAMD64ADDQmodify_0(v *Value) bool { 3034 // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 3035 // cond: is32Bit(off1+off2) 3036 // result: (ADDQmodify [off1+off2] {sym} base val mem) 3037 for { 3038 off1 := v.AuxInt 3039 sym := v.Aux 3040 _ = v.Args[2] 3041 v_0 := v.Args[0] 3042 if v_0.Op != OpAMD64ADDQconst { 3043 break 3044 } 3045 off2 := v_0.AuxInt 3046 base := v_0.Args[0] 3047 val := v.Args[1] 3048 mem := v.Args[2] 3049 if !(is32Bit(off1 + off2)) { 3050 break 3051 } 3052 v.reset(OpAMD64ADDQmodify) 3053 v.AuxInt = off1 + off2 3054 v.Aux = sym 3055 v.AddArg(base) 3056 v.AddArg(val) 3057 v.AddArg(mem) 3058 return true 3059 } 3060 // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 3061 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3062 // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3063 for { 3064 off1 := v.AuxInt 3065 sym1 := v.Aux 3066 _ = v.Args[2] 3067 v_0 := v.Args[0] 3068 if v_0.Op != OpAMD64LEAQ { 3069 break 3070 } 3071 off2 := v_0.AuxInt 3072 sym2 := v_0.Aux 3073 base := v_0.Args[0] 3074 val := v.Args[1] 3075 mem := v.Args[2] 3076 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3077 break 3078 } 3079 v.reset(OpAMD64ADDQmodify) 3080 v.AuxInt = off1 + off2 3081 v.Aux = mergeSym(sym1, sym2) 3082 v.AddArg(base) 3083 v.AddArg(val) 3084 v.AddArg(mem) 3085 return true 3086 } 3087 return false 3088 } 3089 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 3090 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 3091 // cond: canMergeLoad(v, l, x) && clobber(l) 3092 // result: (ADDSDload x [off] {sym} ptr mem) 3093 for { 3094 _ = v.Args[1] 3095 x := v.Args[0] 3096 l := v.Args[1] 3097 if l.Op != OpAMD64MOVSDload { 3098 break 3099 } 3100 off := l.AuxInt 3101 sym := l.Aux 3102 _ = l.Args[1] 3103 ptr := l.Args[0] 3104 mem := l.Args[1] 3105 if !(canMergeLoad(v, l, x) && clobber(l)) { 3106 break 3107 } 3108 v.reset(OpAMD64ADDSDload) 3109 v.AuxInt = off 3110 v.Aux = sym 3111 v.AddArg(x) 3112 v.AddArg(ptr) 3113 v.AddArg(mem) 3114 return true 3115 } 3116 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 3117 // cond: canMergeLoad(v, l, x) && clobber(l) 3118 // result: (ADDSDload x [off] {sym} ptr mem) 3119 for { 3120 _ = v.Args[1] 3121 l := v.Args[0] 3122 if l.Op != OpAMD64MOVSDload { 3123 break 3124 } 3125 off := l.AuxInt 3126 sym := l.Aux 3127 _ = l.Args[1] 3128 ptr := l.Args[0] 3129 mem := l.Args[1] 3130 x := v.Args[1] 3131 if !(canMergeLoad(v, l, x) && clobber(l)) { 3132 break 3133 } 3134 v.reset(OpAMD64ADDSDload) 3135 v.AuxInt = off 3136 v.Aux = sym 3137 v.AddArg(x) 3138 v.AddArg(ptr) 3139 v.AddArg(mem) 3140 return true 3141 } 3142 return false 3143 } 3144 func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool { 3145 b := v.Block 3146 _ = b 3147 typ := &b.Func.Config.Types 3148 _ = typ 3149 // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem) 3150 // cond: is32Bit(off1+off2) 3151 // result: (ADDSDload [off1+off2] {sym} val base mem) 3152 for { 3153 off1 := v.AuxInt 3154 sym := v.Aux 3155 _ = v.Args[2] 3156 val := v.Args[0] 3157 v_1 := v.Args[1] 3158 if v_1.Op != OpAMD64ADDQconst { 3159 break 3160 } 3161 off2 := v_1.AuxInt 3162 base := v_1.Args[0] 3163 mem := v.Args[2] 3164 if !(is32Bit(off1 + off2)) { 3165 break 3166 } 3167 v.reset(OpAMD64ADDSDload) 3168 v.AuxInt = off1 + off2 3169 v.Aux = sym 3170 v.AddArg(val) 3171 v.AddArg(base) 3172 v.AddArg(mem) 3173 return true 3174 } 3175 // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3176 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3177 // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3178 for { 3179 off1 := v.AuxInt 3180 sym1 := v.Aux 3181 _ = v.Args[2] 3182 val := v.Args[0] 3183 v_1 := v.Args[1] 3184 if v_1.Op != OpAMD64LEAQ { 3185 break 3186 } 3187 off2 := v_1.AuxInt 3188 sym2 := v_1.Aux 3189 base := v_1.Args[0] 3190 mem := v.Args[2] 3191 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3192 break 3193 } 3194 v.reset(OpAMD64ADDSDload) 3195 v.AuxInt = off1 + off2 3196 v.Aux = mergeSym(sym1, sym2) 3197 v.AddArg(val) 3198 v.AddArg(base) 3199 v.AddArg(mem) 3200 return true 3201 } 3202 // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 3203 // cond: 3204 // result: (ADDSD x (MOVQi2f y)) 3205 for { 3206 off := v.AuxInt 3207 sym := v.Aux 3208 _ = v.Args[2] 3209 x := v.Args[0] 3210 ptr := v.Args[1] 3211 v_2 := v.Args[2] 3212 if v_2.Op != OpAMD64MOVQstore { 3213 break 3214 } 3215 if v_2.AuxInt != off { 3216 break 3217 } 3218 if v_2.Aux != sym { 3219 break 3220 } 3221 _ = v_2.Args[2] 3222 if ptr != v_2.Args[0] { 3223 break 3224 } 3225 y := v_2.Args[1] 3226 v.reset(OpAMD64ADDSD) 3227 v.AddArg(x) 3228 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 3229 v0.AddArg(y) 3230 v.AddArg(v0) 3231 return true 3232 } 3233 return false 3234 } 3235 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 3236 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 3237 // cond: canMergeLoad(v, l, x) && clobber(l) 3238 // result: (ADDSSload x [off] {sym} ptr mem) 3239 for { 3240 _ = v.Args[1] 3241 x := v.Args[0] 3242 l := v.Args[1] 3243 if l.Op != OpAMD64MOVSSload { 3244 break 3245 } 3246 off := l.AuxInt 3247 sym := l.Aux 3248 _ = l.Args[1] 3249 ptr := l.Args[0] 3250 mem := l.Args[1] 3251 if !(canMergeLoad(v, l, x) && clobber(l)) { 3252 break 3253 } 3254 v.reset(OpAMD64ADDSSload) 3255 v.AuxInt = off 3256 v.Aux = sym 3257 v.AddArg(x) 3258 v.AddArg(ptr) 3259 v.AddArg(mem) 3260 return true 3261 } 3262 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 3263 // cond: canMergeLoad(v, l, x) && clobber(l) 3264 // result: (ADDSSload x [off] {sym} ptr mem) 3265 for { 3266 _ = v.Args[1] 3267 l := v.Args[0] 3268 if l.Op != OpAMD64MOVSSload { 3269 break 3270 } 3271 off := l.AuxInt 3272 sym := l.Aux 3273 _ = l.Args[1] 3274 ptr := l.Args[0] 3275 mem := l.Args[1] 3276 x := v.Args[1] 3277 if !(canMergeLoad(v, l, x) && clobber(l)) { 3278 break 3279 } 3280 v.reset(OpAMD64ADDSSload) 3281 v.AuxInt = off 3282 v.Aux = sym 3283 v.AddArg(x) 3284 v.AddArg(ptr) 3285 v.AddArg(mem) 3286 return true 3287 } 3288 return false 3289 } 3290 func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool { 3291 b := v.Block 3292 _ = b 3293 typ := &b.Func.Config.Types 3294 _ = typ 3295 // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem) 3296 // cond: is32Bit(off1+off2) 3297 // result: (ADDSSload [off1+off2] {sym} val base mem) 3298 for { 3299 off1 := v.AuxInt 3300 sym := v.Aux 3301 _ = v.Args[2] 3302 val := v.Args[0] 3303 v_1 := v.Args[1] 3304 if v_1.Op != OpAMD64ADDQconst { 3305 break 3306 } 3307 off2 := v_1.AuxInt 3308 base := v_1.Args[0] 3309 mem := v.Args[2] 3310 if !(is32Bit(off1 + off2)) { 3311 break 3312 } 3313 v.reset(OpAMD64ADDSSload) 3314 v.AuxInt = off1 + off2 3315 v.Aux = sym 3316 v.AddArg(val) 3317 v.AddArg(base) 3318 v.AddArg(mem) 3319 return true 3320 } 3321 // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3322 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3323 // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3324 for { 3325 off1 := v.AuxInt 3326 sym1 := v.Aux 3327 _ = v.Args[2] 3328 val := v.Args[0] 3329 v_1 := v.Args[1] 3330 if v_1.Op != OpAMD64LEAQ { 3331 break 3332 } 3333 off2 := v_1.AuxInt 3334 sym2 := v_1.Aux 3335 base := v_1.Args[0] 3336 mem := v.Args[2] 3337 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3338 break 3339 } 3340 v.reset(OpAMD64ADDSSload) 3341 v.AuxInt = off1 + off2 3342 v.Aux = mergeSym(sym1, sym2) 3343 v.AddArg(val) 3344 v.AddArg(base) 3345 v.AddArg(mem) 3346 return true 3347 } 3348 // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 3349 // cond: 3350 // result: (ADDSS x (MOVLi2f y)) 3351 for { 3352 off := v.AuxInt 3353 sym := v.Aux 3354 _ = v.Args[2] 3355 x := v.Args[0] 3356 ptr := v.Args[1] 3357 v_2 := v.Args[2] 3358 if v_2.Op != OpAMD64MOVLstore { 3359 break 3360 } 3361 if v_2.AuxInt != off { 3362 break 3363 } 3364 if v_2.Aux != sym { 3365 break 3366 } 3367 _ = v_2.Args[2] 3368 if ptr != v_2.Args[0] { 3369 break 3370 } 3371 y := v_2.Args[1] 3372 v.reset(OpAMD64ADDSS) 3373 v.AddArg(x) 3374 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 3375 v0.AddArg(y) 3376 v.AddArg(v0) 3377 return true 3378 } 3379 return false 3380 } 3381 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 3382 b := v.Block 3383 _ = b 3384 config := b.Func.Config 3385 _ = config 3386 // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x) 3387 // cond: !config.nacl 3388 // result: (BTRL x y) 3389 for { 3390 _ = v.Args[1] 3391 v_0 := v.Args[0] 3392 if v_0.Op != OpAMD64NOTL { 3393 break 3394 } 3395 v_0_0 := v_0.Args[0] 3396 if v_0_0.Op != OpAMD64SHLL { 3397 break 3398 } 3399 _ = v_0_0.Args[1] 3400 v_0_0_0 := v_0_0.Args[0] 3401 if v_0_0_0.Op != OpAMD64MOVLconst { 3402 break 3403 } 3404 if v_0_0_0.AuxInt != 1 { 3405 break 3406 } 3407 y := v_0_0.Args[1] 3408 x := v.Args[1] 3409 if !(!config.nacl) { 3410 break 3411 } 3412 v.reset(OpAMD64BTRL) 3413 v.AddArg(x) 3414 v.AddArg(y) 3415 return true 3416 } 3417 // match: (ANDL x (NOTL (SHLL (MOVLconst [1]) y))) 3418 // cond: !config.nacl 3419 // result: (BTRL x y) 3420 for { 3421 _ = v.Args[1] 3422 x := v.Args[0] 3423 v_1 := v.Args[1] 3424 if v_1.Op != OpAMD64NOTL { 3425 break 3426 } 3427 v_1_0 := v_1.Args[0] 3428 if v_1_0.Op != OpAMD64SHLL { 3429 break 3430 } 3431 _ = v_1_0.Args[1] 3432 v_1_0_0 := v_1_0.Args[0] 3433 if v_1_0_0.Op != OpAMD64MOVLconst { 3434 break 3435 } 3436 if v_1_0_0.AuxInt != 1 { 3437 break 3438 } 3439 y := v_1_0.Args[1] 3440 if !(!config.nacl) { 3441 break 3442 } 3443 v.reset(OpAMD64BTRL) 3444 v.AddArg(x) 3445 v.AddArg(y) 3446 return true 3447 } 3448 // match: (ANDL (MOVLconst [c]) x) 3449 // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 3450 // result: (BTRLconst [log2uint32(^c)] x) 3451 for { 3452 _ = v.Args[1] 3453 v_0 := v.Args[0] 3454 if v_0.Op != OpAMD64MOVLconst { 3455 break 3456 } 3457 c := v_0.AuxInt 3458 x := v.Args[1] 3459 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 3460 break 3461 } 3462 v.reset(OpAMD64BTRLconst) 3463 v.AuxInt = log2uint32(^c) 3464 v.AddArg(x) 3465 return true 3466 } 3467 // match: (ANDL x (MOVLconst [c])) 3468 // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 3469 // result: (BTRLconst [log2uint32(^c)] x) 3470 for { 3471 _ = v.Args[1] 3472 x := v.Args[0] 3473 v_1 := v.Args[1] 3474 if v_1.Op != OpAMD64MOVLconst { 3475 break 3476 } 3477 c := v_1.AuxInt 3478 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 3479 break 3480 } 3481 v.reset(OpAMD64BTRLconst) 3482 v.AuxInt = log2uint32(^c) 3483 v.AddArg(x) 3484 return true 3485 } 3486 // match: (ANDL x (MOVLconst [c])) 3487 // cond: 3488 // result: (ANDLconst [c] x) 3489 for { 3490 _ = v.Args[1] 3491 x := v.Args[0] 3492 v_1 := v.Args[1] 3493 if v_1.Op != OpAMD64MOVLconst { 3494 break 3495 } 3496 c := v_1.AuxInt 3497 v.reset(OpAMD64ANDLconst) 3498 v.AuxInt = c 3499 v.AddArg(x) 3500 return true 3501 } 3502 // match: (ANDL (MOVLconst [c]) x) 3503 // cond: 3504 // result: (ANDLconst [c] x) 3505 for { 3506 _ = v.Args[1] 3507 v_0 := v.Args[0] 3508 if v_0.Op != OpAMD64MOVLconst { 3509 break 3510 } 3511 c := v_0.AuxInt 3512 x := v.Args[1] 3513 v.reset(OpAMD64ANDLconst) 3514 v.AuxInt = c 3515 v.AddArg(x) 3516 return true 3517 } 3518 // match: (ANDL x x) 3519 // cond: 3520 // result: x 3521 for { 3522 _ = v.Args[1] 3523 x := v.Args[0] 3524 if x != v.Args[1] { 3525 break 3526 } 3527 v.reset(OpCopy) 3528 v.Type = x.Type 3529 v.AddArg(x) 3530 return true 3531 } 3532 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 3533 // cond: canMergeLoad(v, l, x) && clobber(l) 3534 // result: (ANDLload x [off] {sym} ptr mem) 3535 for { 3536 _ = v.Args[1] 3537 x := v.Args[0] 3538 l := v.Args[1] 3539 if l.Op != OpAMD64MOVLload { 3540 break 3541 } 3542 off := l.AuxInt 3543 sym := l.Aux 3544 _ = l.Args[1] 3545 ptr := l.Args[0] 3546 mem := l.Args[1] 3547 if !(canMergeLoad(v, l, x) && clobber(l)) { 3548 break 3549 } 3550 v.reset(OpAMD64ANDLload) 3551 v.AuxInt = off 3552 v.Aux = sym 3553 v.AddArg(x) 3554 v.AddArg(ptr) 3555 v.AddArg(mem) 3556 return true 3557 } 3558 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 3559 // cond: canMergeLoad(v, l, x) && clobber(l) 3560 // result: (ANDLload x [off] {sym} ptr mem) 3561 for { 3562 _ = v.Args[1] 3563 l := v.Args[0] 3564 if l.Op != OpAMD64MOVLload { 3565 break 3566 } 3567 off := l.AuxInt 3568 sym := l.Aux 3569 _ = l.Args[1] 3570 ptr := l.Args[0] 3571 mem := l.Args[1] 3572 x := v.Args[1] 3573 if !(canMergeLoad(v, l, x) && clobber(l)) { 3574 break 3575 } 3576 v.reset(OpAMD64ANDLload) 3577 v.AuxInt = off 3578 v.Aux = sym 3579 v.AddArg(x) 3580 v.AddArg(ptr) 3581 v.AddArg(mem) 3582 return true 3583 } 3584 return false 3585 } 3586 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 3587 b := v.Block 3588 _ = b 3589 config := b.Func.Config 3590 _ = config 3591 // match: (ANDLconst [c] x) 3592 // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 3593 // result: (BTRLconst [log2uint32(^c)] x) 3594 for { 3595 c := v.AuxInt 3596 x := v.Args[0] 3597 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 3598 break 3599 } 3600 v.reset(OpAMD64BTRLconst) 3601 v.AuxInt = log2uint32(^c) 3602 v.AddArg(x) 3603 return true 3604 } 3605 // match: (ANDLconst [c] (ANDLconst [d] x)) 3606 // cond: 3607 // result: (ANDLconst [c & d] x) 3608 for { 3609 c := v.AuxInt 3610 v_0 := v.Args[0] 3611 if v_0.Op != OpAMD64ANDLconst { 3612 break 3613 } 3614 d := v_0.AuxInt 3615 x := v_0.Args[0] 3616 v.reset(OpAMD64ANDLconst) 3617 v.AuxInt = c & d 3618 v.AddArg(x) 3619 return true 3620 } 3621 // match: (ANDLconst [c] (BTRLconst [d] x)) 3622 // cond: 3623 // result: (ANDLconst [c &^ (1<<uint32(d))] x) 3624 for { 3625 c := v.AuxInt 3626 v_0 := v.Args[0] 3627 if v_0.Op != OpAMD64BTRLconst { 3628 break 3629 } 3630 d := v_0.AuxInt 3631 x := v_0.Args[0] 3632 v.reset(OpAMD64ANDLconst) 3633 v.AuxInt = c &^ (1 << uint32(d)) 3634 v.AddArg(x) 3635 return true 3636 } 3637 // match: (ANDLconst [ 0xFF] x) 3638 // cond: 3639 // result: (MOVBQZX x) 3640 for { 3641 if v.AuxInt != 0xFF { 3642 break 3643 } 3644 x := v.Args[0] 3645 v.reset(OpAMD64MOVBQZX) 3646 v.AddArg(x) 3647 return true 3648 } 3649 // match: (ANDLconst [0xFFFF] x) 3650 // cond: 3651 // result: (MOVWQZX x) 3652 for { 3653 if v.AuxInt != 0xFFFF { 3654 break 3655 } 3656 x := v.Args[0] 3657 v.reset(OpAMD64MOVWQZX) 3658 v.AddArg(x) 3659 return true 3660 } 3661 // match: (ANDLconst [c] _) 3662 // cond: int32(c)==0 3663 // result: (MOVLconst [0]) 3664 for { 3665 c := v.AuxInt 3666 if !(int32(c) == 0) { 3667 break 3668 } 3669 v.reset(OpAMD64MOVLconst) 3670 v.AuxInt = 0 3671 return true 3672 } 3673 // match: (ANDLconst [c] x) 3674 // cond: int32(c)==-1 3675 // result: x 3676 for { 3677 c := v.AuxInt 3678 x := v.Args[0] 3679 if !(int32(c) == -1) { 3680 break 3681 } 3682 v.reset(OpCopy) 3683 v.Type = x.Type 3684 v.AddArg(x) 3685 return true 3686 } 3687 // match: (ANDLconst [c] (MOVLconst [d])) 3688 // cond: 3689 // result: (MOVLconst [c&d]) 3690 for { 3691 c := v.AuxInt 3692 v_0 := v.Args[0] 3693 if v_0.Op != OpAMD64MOVLconst { 3694 break 3695 } 3696 d := v_0.AuxInt 3697 v.reset(OpAMD64MOVLconst) 3698 v.AuxInt = c & d 3699 return true 3700 } 3701 return false 3702 } 3703 func rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v *Value) bool { 3704 // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 3705 // cond: ValAndOff(valoff1).canAdd(off2) 3706 // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 3707 for { 3708 valoff1 := v.AuxInt 3709 sym := v.Aux 3710 _ = v.Args[1] 3711 v_0 := v.Args[0] 3712 if v_0.Op != OpAMD64ADDQconst { 3713 break 3714 } 3715 off2 := v_0.AuxInt 3716 base := v_0.Args[0] 3717 mem := v.Args[1] 3718 if !(ValAndOff(valoff1).canAdd(off2)) { 3719 break 3720 } 3721 v.reset(OpAMD64ANDLconstmodify) 3722 v.AuxInt = ValAndOff(valoff1).add(off2) 3723 v.Aux = sym 3724 v.AddArg(base) 3725 v.AddArg(mem) 3726 return true 3727 } 3728 // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 3729 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 3730 // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 3731 for { 3732 valoff1 := v.AuxInt 3733 sym1 := v.Aux 3734 _ = v.Args[1] 3735 v_0 := v.Args[0] 3736 if v_0.Op != OpAMD64LEAQ { 3737 break 3738 } 3739 off2 := v_0.AuxInt 3740 sym2 := v_0.Aux 3741 base := v_0.Args[0] 3742 mem := v.Args[1] 3743 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 3744 break 3745 } 3746 v.reset(OpAMD64ANDLconstmodify) 3747 v.AuxInt = ValAndOff(valoff1).add(off2) 3748 v.Aux = mergeSym(sym1, sym2) 3749 v.AddArg(base) 3750 v.AddArg(mem) 3751 return true 3752 } 3753 return false 3754 } 3755 func rewriteValueAMD64_OpAMD64ANDLload_0(v *Value) bool { 3756 b := v.Block 3757 _ = b 3758 typ := &b.Func.Config.Types 3759 _ = typ 3760 // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem) 3761 // cond: is32Bit(off1+off2) 3762 // result: (ANDLload [off1+off2] {sym} val base mem) 3763 for { 3764 off1 := v.AuxInt 3765 sym := v.Aux 3766 _ = v.Args[2] 3767 val := v.Args[0] 3768 v_1 := v.Args[1] 3769 if v_1.Op != OpAMD64ADDQconst { 3770 break 3771 } 3772 off2 := v_1.AuxInt 3773 base := v_1.Args[0] 3774 mem := v.Args[2] 3775 if !(is32Bit(off1 + off2)) { 3776 break 3777 } 3778 v.reset(OpAMD64ANDLload) 3779 v.AuxInt = off1 + off2 3780 v.Aux = sym 3781 v.AddArg(val) 3782 v.AddArg(base) 3783 v.AddArg(mem) 3784 return true 3785 } 3786 // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3787 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3788 // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3789 for { 3790 off1 := v.AuxInt 3791 sym1 := v.Aux 3792 _ = v.Args[2] 3793 val := v.Args[0] 3794 v_1 := v.Args[1] 3795 if v_1.Op != OpAMD64LEAQ { 3796 break 3797 } 3798 off2 := v_1.AuxInt 3799 sym2 := v_1.Aux 3800 base := v_1.Args[0] 3801 mem := v.Args[2] 3802 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3803 break 3804 } 3805 v.reset(OpAMD64ANDLload) 3806 v.AuxInt = off1 + off2 3807 v.Aux = mergeSym(sym1, sym2) 3808 v.AddArg(val) 3809 v.AddArg(base) 3810 v.AddArg(mem) 3811 return true 3812 } 3813 // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 3814 // cond: 3815 // result: (ANDL x (MOVLf2i y)) 3816 for { 3817 off := v.AuxInt 3818 sym := v.Aux 3819 _ = v.Args[2] 3820 x := v.Args[0] 3821 ptr := v.Args[1] 3822 v_2 := v.Args[2] 3823 if v_2.Op != OpAMD64MOVSSstore { 3824 break 3825 } 3826 if v_2.AuxInt != off { 3827 break 3828 } 3829 if v_2.Aux != sym { 3830 break 3831 } 3832 _ = v_2.Args[2] 3833 if ptr != v_2.Args[0] { 3834 break 3835 } 3836 y := v_2.Args[1] 3837 v.reset(OpAMD64ANDL) 3838 v.AddArg(x) 3839 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 3840 v0.AddArg(y) 3841 v.AddArg(v0) 3842 return true 3843 } 3844 return false 3845 } 3846 func rewriteValueAMD64_OpAMD64ANDLmodify_0(v *Value) bool { 3847 // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 3848 // cond: is32Bit(off1+off2) 3849 // result: (ANDLmodify [off1+off2] {sym} base val mem) 3850 for { 3851 off1 := v.AuxInt 3852 sym := v.Aux 3853 _ = v.Args[2] 3854 v_0 := v.Args[0] 3855 if v_0.Op != OpAMD64ADDQconst { 3856 break 3857 } 3858 off2 := v_0.AuxInt 3859 base := v_0.Args[0] 3860 val := v.Args[1] 3861 mem := v.Args[2] 3862 if !(is32Bit(off1 + off2)) { 3863 break 3864 } 3865 v.reset(OpAMD64ANDLmodify) 3866 v.AuxInt = off1 + off2 3867 v.Aux = sym 3868 v.AddArg(base) 3869 v.AddArg(val) 3870 v.AddArg(mem) 3871 return true 3872 } 3873 // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 3874 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3875 // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3876 for { 3877 off1 := v.AuxInt 3878 sym1 := v.Aux 3879 _ = v.Args[2] 3880 v_0 := v.Args[0] 3881 if v_0.Op != OpAMD64LEAQ { 3882 break 3883 } 3884 off2 := v_0.AuxInt 3885 sym2 := v_0.Aux 3886 base := v_0.Args[0] 3887 val := v.Args[1] 3888 mem := v.Args[2] 3889 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3890 break 3891 } 3892 v.reset(OpAMD64ANDLmodify) 3893 v.AuxInt = off1 + off2 3894 v.Aux = mergeSym(sym1, sym2) 3895 v.AddArg(base) 3896 v.AddArg(val) 3897 v.AddArg(mem) 3898 return true 3899 } 3900 return false 3901 } 3902 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 3903 b := v.Block 3904 _ = b 3905 config := b.Func.Config 3906 _ = config 3907 // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x) 3908 // cond: !config.nacl 3909 // result: (BTRQ x y) 3910 for { 3911 _ = v.Args[1] 3912 v_0 := v.Args[0] 3913 if v_0.Op != OpAMD64NOTQ { 3914 break 3915 } 3916 v_0_0 := v_0.Args[0] 3917 if v_0_0.Op != OpAMD64SHLQ { 3918 break 3919 } 3920 _ = v_0_0.Args[1] 3921 v_0_0_0 := v_0_0.Args[0] 3922 if v_0_0_0.Op != OpAMD64MOVQconst { 3923 break 3924 } 3925 if v_0_0_0.AuxInt != 1 { 3926 break 3927 } 3928 y := v_0_0.Args[1] 3929 x := v.Args[1] 3930 if !(!config.nacl) { 3931 break 3932 } 3933 v.reset(OpAMD64BTRQ) 3934 v.AddArg(x) 3935 v.AddArg(y) 3936 return true 3937 } 3938 // match: (ANDQ x (NOTQ (SHLQ (MOVQconst [1]) y))) 3939 // cond: !config.nacl 3940 // result: (BTRQ x y) 3941 for { 3942 _ = v.Args[1] 3943 x := v.Args[0] 3944 v_1 := v.Args[1] 3945 if v_1.Op != OpAMD64NOTQ { 3946 break 3947 } 3948 v_1_0 := v_1.Args[0] 3949 if v_1_0.Op != OpAMD64SHLQ { 3950 break 3951 } 3952 _ = v_1_0.Args[1] 3953 v_1_0_0 := v_1_0.Args[0] 3954 if v_1_0_0.Op != OpAMD64MOVQconst { 3955 break 3956 } 3957 if v_1_0_0.AuxInt != 1 { 3958 break 3959 } 3960 y := v_1_0.Args[1] 3961 if !(!config.nacl) { 3962 break 3963 } 3964 v.reset(OpAMD64BTRQ) 3965 v.AddArg(x) 3966 v.AddArg(y) 3967 return true 3968 } 3969 // match: (ANDQ (MOVQconst [c]) x) 3970 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 3971 // result: (BTRQconst [log2(^c)] x) 3972 for { 3973 _ = v.Args[1] 3974 v_0 := v.Args[0] 3975 if v_0.Op != OpAMD64MOVQconst { 3976 break 3977 } 3978 c := v_0.AuxInt 3979 x := v.Args[1] 3980 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 3981 break 3982 } 3983 v.reset(OpAMD64BTRQconst) 3984 v.AuxInt = log2(^c) 3985 v.AddArg(x) 3986 return true 3987 } 3988 // match: (ANDQ x (MOVQconst [c])) 3989 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 3990 // result: (BTRQconst [log2(^c)] x) 3991 for { 3992 _ = v.Args[1] 3993 x := v.Args[0] 3994 v_1 := v.Args[1] 3995 if v_1.Op != OpAMD64MOVQconst { 3996 break 3997 } 3998 c := v_1.AuxInt 3999 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 4000 break 4001 } 4002 v.reset(OpAMD64BTRQconst) 4003 v.AuxInt = log2(^c) 4004 v.AddArg(x) 4005 return true 4006 } 4007 // match: (ANDQ x (MOVQconst [c])) 4008 // cond: is32Bit(c) 4009 // result: (ANDQconst [c] x) 4010 for { 4011 _ = v.Args[1] 4012 x := v.Args[0] 4013 v_1 := v.Args[1] 4014 if v_1.Op != OpAMD64MOVQconst { 4015 break 4016 } 4017 c := v_1.AuxInt 4018 if !(is32Bit(c)) { 4019 break 4020 } 4021 v.reset(OpAMD64ANDQconst) 4022 v.AuxInt = c 4023 v.AddArg(x) 4024 return true 4025 } 4026 // match: (ANDQ (MOVQconst [c]) x) 4027 // cond: is32Bit(c) 4028 // result: (ANDQconst [c] x) 4029 for { 4030 _ = v.Args[1] 4031 v_0 := v.Args[0] 4032 if v_0.Op != OpAMD64MOVQconst { 4033 break 4034 } 4035 c := v_0.AuxInt 4036 x := v.Args[1] 4037 if !(is32Bit(c)) { 4038 break 4039 } 4040 v.reset(OpAMD64ANDQconst) 4041 v.AuxInt = c 4042 v.AddArg(x) 4043 return true 4044 } 4045 // match: (ANDQ x x) 4046 // cond: 4047 // result: x 4048 for { 4049 _ = v.Args[1] 4050 x := v.Args[0] 4051 if x != v.Args[1] { 4052 break 4053 } 4054 v.reset(OpCopy) 4055 v.Type = x.Type 4056 v.AddArg(x) 4057 return true 4058 } 4059 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 4060 // cond: canMergeLoad(v, l, x) && clobber(l) 4061 // result: (ANDQload x [off] {sym} ptr mem) 4062 for { 4063 _ = v.Args[1] 4064 x := v.Args[0] 4065 l := v.Args[1] 4066 if l.Op != OpAMD64MOVQload { 4067 break 4068 } 4069 off := l.AuxInt 4070 sym := l.Aux 4071 _ = l.Args[1] 4072 ptr := l.Args[0] 4073 mem := l.Args[1] 4074 if !(canMergeLoad(v, l, x) && clobber(l)) { 4075 break 4076 } 4077 v.reset(OpAMD64ANDQload) 4078 v.AuxInt = off 4079 v.Aux = sym 4080 v.AddArg(x) 4081 v.AddArg(ptr) 4082 v.AddArg(mem) 4083 return true 4084 } 4085 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 4086 // cond: canMergeLoad(v, l, x) && clobber(l) 4087 // result: (ANDQload x [off] {sym} ptr mem) 4088 for { 4089 _ = v.Args[1] 4090 l := v.Args[0] 4091 if l.Op != OpAMD64MOVQload { 4092 break 4093 } 4094 off := l.AuxInt 4095 sym := l.Aux 4096 _ = l.Args[1] 4097 ptr := l.Args[0] 4098 mem := l.Args[1] 4099 x := v.Args[1] 4100 if !(canMergeLoad(v, l, x) && clobber(l)) { 4101 break 4102 } 4103 v.reset(OpAMD64ANDQload) 4104 v.AuxInt = off 4105 v.Aux = sym 4106 v.AddArg(x) 4107 v.AddArg(ptr) 4108 v.AddArg(mem) 4109 return true 4110 } 4111 return false 4112 } 4113 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 4114 b := v.Block 4115 _ = b 4116 config := b.Func.Config 4117 _ = config 4118 // match: (ANDQconst [c] x) 4119 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl 4120 // result: (BTRQconst [log2(^c)] x) 4121 for { 4122 c := v.AuxInt 4123 x := v.Args[0] 4124 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) { 4125 break 4126 } 4127 v.reset(OpAMD64BTRQconst) 4128 v.AuxInt = log2(^c) 4129 v.AddArg(x) 4130 return true 4131 } 4132 // match: (ANDQconst [c] (ANDQconst [d] x)) 4133 // cond: 4134 // result: (ANDQconst [c & d] x) 4135 for { 4136 c := v.AuxInt 4137 v_0 := v.Args[0] 4138 if v_0.Op != OpAMD64ANDQconst { 4139 break 4140 } 4141 d := v_0.AuxInt 4142 x := v_0.Args[0] 4143 v.reset(OpAMD64ANDQconst) 4144 v.AuxInt = c & d 4145 v.AddArg(x) 4146 return true 4147 } 4148 // match: (ANDQconst [c] (BTRQconst [d] x)) 4149 // cond: 4150 // result: (ANDQconst [c &^ (1<<uint32(d))] x) 4151 for { 4152 c := v.AuxInt 4153 v_0 := v.Args[0] 4154 if v_0.Op != OpAMD64BTRQconst { 4155 break 4156 } 4157 d := v_0.AuxInt 4158 x := v_0.Args[0] 4159 v.reset(OpAMD64ANDQconst) 4160 v.AuxInt = c &^ (1 << uint32(d)) 4161 v.AddArg(x) 4162 return true 4163 } 4164 // match: (ANDQconst [ 0xFF] x) 4165 // cond: 4166 // result: (MOVBQZX x) 4167 for { 4168 if v.AuxInt != 0xFF { 4169 break 4170 } 4171 x := v.Args[0] 4172 v.reset(OpAMD64MOVBQZX) 4173 v.AddArg(x) 4174 return true 4175 } 4176 // match: (ANDQconst [0xFFFF] x) 4177 // cond: 4178 // result: (MOVWQZX x) 4179 for { 4180 if v.AuxInt != 0xFFFF { 4181 break 4182 } 4183 x := v.Args[0] 4184 v.reset(OpAMD64MOVWQZX) 4185 v.AddArg(x) 4186 return true 4187 } 4188 // match: (ANDQconst [0xFFFFFFFF] x) 4189 // cond: 4190 // result: (MOVLQZX x) 4191 for { 4192 if v.AuxInt != 0xFFFFFFFF { 4193 break 4194 } 4195 x := v.Args[0] 4196 v.reset(OpAMD64MOVLQZX) 4197 v.AddArg(x) 4198 return true 4199 } 4200 // match: (ANDQconst [0] _) 4201 // cond: 4202 // result: (MOVQconst [0]) 4203 for { 4204 if v.AuxInt != 0 { 4205 break 4206 } 4207 v.reset(OpAMD64MOVQconst) 4208 v.AuxInt = 0 4209 return true 4210 } 4211 // match: (ANDQconst [-1] x) 4212 // cond: 4213 // result: x 4214 for { 4215 if v.AuxInt != -1 { 4216 break 4217 } 4218 x := v.Args[0] 4219 v.reset(OpCopy) 4220 v.Type = x.Type 4221 v.AddArg(x) 4222 return true 4223 } 4224 // match: (ANDQconst [c] (MOVQconst [d])) 4225 // cond: 4226 // result: (MOVQconst [c&d]) 4227 for { 4228 c := v.AuxInt 4229 v_0 := v.Args[0] 4230 if v_0.Op != OpAMD64MOVQconst { 4231 break 4232 } 4233 d := v_0.AuxInt 4234 v.reset(OpAMD64MOVQconst) 4235 v.AuxInt = c & d 4236 return true 4237 } 4238 return false 4239 } 4240 func rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v *Value) bool { 4241 // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 4242 // cond: ValAndOff(valoff1).canAdd(off2) 4243 // result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 4244 for { 4245 valoff1 := v.AuxInt 4246 sym := v.Aux 4247 _ = v.Args[1] 4248 v_0 := v.Args[0] 4249 if v_0.Op != OpAMD64ADDQconst { 4250 break 4251 } 4252 off2 := v_0.AuxInt 4253 base := v_0.Args[0] 4254 mem := v.Args[1] 4255 if !(ValAndOff(valoff1).canAdd(off2)) { 4256 break 4257 } 4258 v.reset(OpAMD64ANDQconstmodify) 4259 v.AuxInt = ValAndOff(valoff1).add(off2) 4260 v.Aux = sym 4261 v.AddArg(base) 4262 v.AddArg(mem) 4263 return true 4264 } 4265 // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 4266 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 4267 // result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 4268 for { 4269 valoff1 := v.AuxInt 4270 sym1 := v.Aux 4271 _ = v.Args[1] 4272 v_0 := v.Args[0] 4273 if v_0.Op != OpAMD64LEAQ { 4274 break 4275 } 4276 off2 := v_0.AuxInt 4277 sym2 := v_0.Aux 4278 base := v_0.Args[0] 4279 mem := v.Args[1] 4280 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 4281 break 4282 } 4283 v.reset(OpAMD64ANDQconstmodify) 4284 v.AuxInt = ValAndOff(valoff1).add(off2) 4285 v.Aux = mergeSym(sym1, sym2) 4286 v.AddArg(base) 4287 v.AddArg(mem) 4288 return true 4289 } 4290 return false 4291 } 4292 func rewriteValueAMD64_OpAMD64ANDQload_0(v *Value) bool { 4293 b := v.Block 4294 _ = b 4295 typ := &b.Func.Config.Types 4296 _ = typ 4297 // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem) 4298 // cond: is32Bit(off1+off2) 4299 // result: (ANDQload [off1+off2] {sym} val base mem) 4300 for { 4301 off1 := v.AuxInt 4302 sym := v.Aux 4303 _ = v.Args[2] 4304 val := v.Args[0] 4305 v_1 := v.Args[1] 4306 if v_1.Op != OpAMD64ADDQconst { 4307 break 4308 } 4309 off2 := v_1.AuxInt 4310 base := v_1.Args[0] 4311 mem := v.Args[2] 4312 if !(is32Bit(off1 + off2)) { 4313 break 4314 } 4315 v.reset(OpAMD64ANDQload) 4316 v.AuxInt = off1 + off2 4317 v.Aux = sym 4318 v.AddArg(val) 4319 v.AddArg(base) 4320 v.AddArg(mem) 4321 return true 4322 } 4323 // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 4324 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4325 // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 4326 for { 4327 off1 := v.AuxInt 4328 sym1 := v.Aux 4329 _ = v.Args[2] 4330 val := v.Args[0] 4331 v_1 := v.Args[1] 4332 if v_1.Op != OpAMD64LEAQ { 4333 break 4334 } 4335 off2 := v_1.AuxInt 4336 sym2 := v_1.Aux 4337 base := v_1.Args[0] 4338 mem := v.Args[2] 4339 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4340 break 4341 } 4342 v.reset(OpAMD64ANDQload) 4343 v.AuxInt = off1 + off2 4344 v.Aux = mergeSym(sym1, sym2) 4345 v.AddArg(val) 4346 v.AddArg(base) 4347 v.AddArg(mem) 4348 return true 4349 } 4350 // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 4351 // cond: 4352 // result: (ANDQ x (MOVQf2i y)) 4353 for { 4354 off := v.AuxInt 4355 sym := v.Aux 4356 _ = v.Args[2] 4357 x := v.Args[0] 4358 ptr := v.Args[1] 4359 v_2 := v.Args[2] 4360 if v_2.Op != OpAMD64MOVSDstore { 4361 break 4362 } 4363 if v_2.AuxInt != off { 4364 break 4365 } 4366 if v_2.Aux != sym { 4367 break 4368 } 4369 _ = v_2.Args[2] 4370 if ptr != v_2.Args[0] { 4371 break 4372 } 4373 y := v_2.Args[1] 4374 v.reset(OpAMD64ANDQ) 4375 v.AddArg(x) 4376 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 4377 v0.AddArg(y) 4378 v.AddArg(v0) 4379 return true 4380 } 4381 return false 4382 } 4383 func rewriteValueAMD64_OpAMD64ANDQmodify_0(v *Value) bool { 4384 // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 4385 // cond: is32Bit(off1+off2) 4386 // result: (ANDQmodify [off1+off2] {sym} base val mem) 4387 for { 4388 off1 := v.AuxInt 4389 sym := v.Aux 4390 _ = v.Args[2] 4391 v_0 := v.Args[0] 4392 if v_0.Op != OpAMD64ADDQconst { 4393 break 4394 } 4395 off2 := v_0.AuxInt 4396 base := v_0.Args[0] 4397 val := v.Args[1] 4398 mem := v.Args[2] 4399 if !(is32Bit(off1 + off2)) { 4400 break 4401 } 4402 v.reset(OpAMD64ANDQmodify) 4403 v.AuxInt = off1 + off2 4404 v.Aux = sym 4405 v.AddArg(base) 4406 v.AddArg(val) 4407 v.AddArg(mem) 4408 return true 4409 } 4410 // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4411 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4412 // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4413 for { 4414 off1 := v.AuxInt 4415 sym1 := v.Aux 4416 _ = v.Args[2] 4417 v_0 := v.Args[0] 4418 if v_0.Op != OpAMD64LEAQ { 4419 break 4420 } 4421 off2 := v_0.AuxInt 4422 sym2 := v_0.Aux 4423 base := v_0.Args[0] 4424 val := v.Args[1] 4425 mem := v.Args[2] 4426 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4427 break 4428 } 4429 v.reset(OpAMD64ANDQmodify) 4430 v.AuxInt = off1 + off2 4431 v.Aux = mergeSym(sym1, sym2) 4432 v.AddArg(base) 4433 v.AddArg(val) 4434 v.AddArg(mem) 4435 return true 4436 } 4437 return false 4438 } 4439 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 4440 b := v.Block 4441 _ = b 4442 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 4443 // cond: 4444 // result: (BSFQ (ORQconst <t> [1<<8] x)) 4445 for { 4446 v_0 := v.Args[0] 4447 if v_0.Op != OpAMD64ORQconst { 4448 break 4449 } 4450 t := v_0.Type 4451 if v_0.AuxInt != 1<<8 { 4452 break 4453 } 4454 v_0_0 := v_0.Args[0] 4455 if v_0_0.Op != OpAMD64MOVBQZX { 4456 break 4457 } 4458 x := v_0_0.Args[0] 4459 v.reset(OpAMD64BSFQ) 4460 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 4461 v0.AuxInt = 1 << 8 4462 v0.AddArg(x) 4463 v.AddArg(v0) 4464 return true 4465 } 4466 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 4467 // cond: 4468 // result: (BSFQ (ORQconst <t> [1<<16] x)) 4469 for { 4470 v_0 := v.Args[0] 4471 if v_0.Op != OpAMD64ORQconst { 4472 break 4473 } 4474 t := v_0.Type 4475 if v_0.AuxInt != 1<<16 { 4476 break 4477 } 4478 v_0_0 := v_0.Args[0] 4479 if v_0_0.Op != OpAMD64MOVWQZX { 4480 break 4481 } 4482 x := v_0_0.Args[0] 4483 v.reset(OpAMD64BSFQ) 4484 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 4485 v0.AuxInt = 1 << 16 4486 v0.AddArg(x) 4487 v.AddArg(v0) 4488 return true 4489 } 4490 return false 4491 } 4492 func rewriteValueAMD64_OpAMD64BTCLconst_0(v *Value) bool { 4493 // match: (BTCLconst [c] (XORLconst [d] x)) 4494 // cond: 4495 // result: (XORLconst [d ^ 1<<uint32(c)] x) 4496 for { 4497 c := v.AuxInt 4498 v_0 := v.Args[0] 4499 if v_0.Op != OpAMD64XORLconst { 4500 break 4501 } 4502 d := v_0.AuxInt 4503 x := v_0.Args[0] 4504 v.reset(OpAMD64XORLconst) 4505 v.AuxInt = d ^ 1<<uint32(c) 4506 v.AddArg(x) 4507 return true 4508 } 4509 // match: (BTCLconst [c] (BTCLconst [d] x)) 4510 // cond: 4511 // result: (XORLconst [1<<uint32(c) ^ 1<<uint32(d)] x) 4512 for { 4513 c := v.AuxInt 4514 v_0 := v.Args[0] 4515 if v_0.Op != OpAMD64BTCLconst { 4516 break 4517 } 4518 d := v_0.AuxInt 4519 x := v_0.Args[0] 4520 v.reset(OpAMD64XORLconst) 4521 v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d) 4522 v.AddArg(x) 4523 return true 4524 } 4525 // match: (BTCLconst [c] (MOVLconst [d])) 4526 // cond: 4527 // result: (MOVLconst [d^(1<<uint32(c))]) 4528 for { 4529 c := v.AuxInt 4530 v_0 := v.Args[0] 4531 if v_0.Op != OpAMD64MOVLconst { 4532 break 4533 } 4534 d := v_0.AuxInt 4535 v.reset(OpAMD64MOVLconst) 4536 v.AuxInt = d ^ (1 << uint32(c)) 4537 return true 4538 } 4539 return false 4540 } 4541 func rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v *Value) bool { 4542 // match: (BTCLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 4543 // cond: ValAndOff(valoff1).canAdd(off2) 4544 // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 4545 for { 4546 valoff1 := v.AuxInt 4547 sym := v.Aux 4548 _ = v.Args[1] 4549 v_0 := v.Args[0] 4550 if v_0.Op != OpAMD64ADDQconst { 4551 break 4552 } 4553 off2 := v_0.AuxInt 4554 base := v_0.Args[0] 4555 mem := v.Args[1] 4556 if !(ValAndOff(valoff1).canAdd(off2)) { 4557 break 4558 } 4559 v.reset(OpAMD64BTCLconstmodify) 4560 v.AuxInt = ValAndOff(valoff1).add(off2) 4561 v.Aux = sym 4562 v.AddArg(base) 4563 v.AddArg(mem) 4564 return true 4565 } 4566 // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 4567 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 4568 // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 4569 for { 4570 valoff1 := v.AuxInt 4571 sym1 := v.Aux 4572 _ = v.Args[1] 4573 v_0 := v.Args[0] 4574 if v_0.Op != OpAMD64LEAQ { 4575 break 4576 } 4577 off2 := v_0.AuxInt 4578 sym2 := v_0.Aux 4579 base := v_0.Args[0] 4580 mem := v.Args[1] 4581 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 4582 break 4583 } 4584 v.reset(OpAMD64BTCLconstmodify) 4585 v.AuxInt = ValAndOff(valoff1).add(off2) 4586 v.Aux = mergeSym(sym1, sym2) 4587 v.AddArg(base) 4588 v.AddArg(mem) 4589 return true 4590 } 4591 return false 4592 } 4593 func rewriteValueAMD64_OpAMD64BTCLmodify_0(v *Value) bool { 4594 // match: (BTCLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 4595 // cond: is32Bit(off1+off2) 4596 // result: (BTCLmodify [off1+off2] {sym} base val mem) 4597 for { 4598 off1 := v.AuxInt 4599 sym := v.Aux 4600 _ = v.Args[2] 4601 v_0 := v.Args[0] 4602 if v_0.Op != OpAMD64ADDQconst { 4603 break 4604 } 4605 off2 := v_0.AuxInt 4606 base := v_0.Args[0] 4607 val := v.Args[1] 4608 mem := v.Args[2] 4609 if !(is32Bit(off1 + off2)) { 4610 break 4611 } 4612 v.reset(OpAMD64BTCLmodify) 4613 v.AuxInt = off1 + off2 4614 v.Aux = sym 4615 v.AddArg(base) 4616 v.AddArg(val) 4617 v.AddArg(mem) 4618 return true 4619 } 4620 // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4621 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4622 // result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4623 for { 4624 off1 := v.AuxInt 4625 sym1 := v.Aux 4626 _ = v.Args[2] 4627 v_0 := v.Args[0] 4628 if v_0.Op != OpAMD64LEAQ { 4629 break 4630 } 4631 off2 := v_0.AuxInt 4632 sym2 := v_0.Aux 4633 base := v_0.Args[0] 4634 val := v.Args[1] 4635 mem := v.Args[2] 4636 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4637 break 4638 } 4639 v.reset(OpAMD64BTCLmodify) 4640 v.AuxInt = off1 + off2 4641 v.Aux = mergeSym(sym1, sym2) 4642 v.AddArg(base) 4643 v.AddArg(val) 4644 v.AddArg(mem) 4645 return true 4646 } 4647 return false 4648 } 4649 func rewriteValueAMD64_OpAMD64BTCQconst_0(v *Value) bool { 4650 // match: (BTCQconst [c] (XORQconst [d] x)) 4651 // cond: 4652 // result: (XORQconst [d ^ 1<<uint32(c)] x) 4653 for { 4654 c := v.AuxInt 4655 v_0 := v.Args[0] 4656 if v_0.Op != OpAMD64XORQconst { 4657 break 4658 } 4659 d := v_0.AuxInt 4660 x := v_0.Args[0] 4661 v.reset(OpAMD64XORQconst) 4662 v.AuxInt = d ^ 1<<uint32(c) 4663 v.AddArg(x) 4664 return true 4665 } 4666 // match: (BTCQconst [c] (BTCQconst [d] x)) 4667 // cond: 4668 // result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x) 4669 for { 4670 c := v.AuxInt 4671 v_0 := v.Args[0] 4672 if v_0.Op != OpAMD64BTCQconst { 4673 break 4674 } 4675 d := v_0.AuxInt 4676 x := v_0.Args[0] 4677 v.reset(OpAMD64XORQconst) 4678 v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d) 4679 v.AddArg(x) 4680 return true 4681 } 4682 // match: (BTCQconst [c] (MOVQconst [d])) 4683 // cond: 4684 // result: (MOVQconst [d^(1<<uint32(c))]) 4685 for { 4686 c := v.AuxInt 4687 v_0 := v.Args[0] 4688 if v_0.Op != OpAMD64MOVQconst { 4689 break 4690 } 4691 d := v_0.AuxInt 4692 v.reset(OpAMD64MOVQconst) 4693 v.AuxInt = d ^ (1 << uint32(c)) 4694 return true 4695 } 4696 return false 4697 } 4698 func rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v *Value) bool { 4699 // match: (BTCQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 4700 // cond: ValAndOff(valoff1).canAdd(off2) 4701 // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 4702 for { 4703 valoff1 := v.AuxInt 4704 sym := v.Aux 4705 _ = v.Args[1] 4706 v_0 := v.Args[0] 4707 if v_0.Op != OpAMD64ADDQconst { 4708 break 4709 } 4710 off2 := v_0.AuxInt 4711 base := v_0.Args[0] 4712 mem := v.Args[1] 4713 if !(ValAndOff(valoff1).canAdd(off2)) { 4714 break 4715 } 4716 v.reset(OpAMD64BTCQconstmodify) 4717 v.AuxInt = ValAndOff(valoff1).add(off2) 4718 v.Aux = sym 4719 v.AddArg(base) 4720 v.AddArg(mem) 4721 return true 4722 } 4723 // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 4724 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 4725 // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 4726 for { 4727 valoff1 := v.AuxInt 4728 sym1 := v.Aux 4729 _ = v.Args[1] 4730 v_0 := v.Args[0] 4731 if v_0.Op != OpAMD64LEAQ { 4732 break 4733 } 4734 off2 := v_0.AuxInt 4735 sym2 := v_0.Aux 4736 base := v_0.Args[0] 4737 mem := v.Args[1] 4738 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 4739 break 4740 } 4741 v.reset(OpAMD64BTCQconstmodify) 4742 v.AuxInt = ValAndOff(valoff1).add(off2) 4743 v.Aux = mergeSym(sym1, sym2) 4744 v.AddArg(base) 4745 v.AddArg(mem) 4746 return true 4747 } 4748 return false 4749 } 4750 func rewriteValueAMD64_OpAMD64BTCQmodify_0(v *Value) bool { 4751 // match: (BTCQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 4752 // cond: is32Bit(off1+off2) 4753 // result: (BTCQmodify [off1+off2] {sym} base val mem) 4754 for { 4755 off1 := v.AuxInt 4756 sym := v.Aux 4757 _ = v.Args[2] 4758 v_0 := v.Args[0] 4759 if v_0.Op != OpAMD64ADDQconst { 4760 break 4761 } 4762 off2 := v_0.AuxInt 4763 base := v_0.Args[0] 4764 val := v.Args[1] 4765 mem := v.Args[2] 4766 if !(is32Bit(off1 + off2)) { 4767 break 4768 } 4769 v.reset(OpAMD64BTCQmodify) 4770 v.AuxInt = off1 + off2 4771 v.Aux = sym 4772 v.AddArg(base) 4773 v.AddArg(val) 4774 v.AddArg(mem) 4775 return true 4776 } 4777 // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4778 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4779 // result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4780 for { 4781 off1 := v.AuxInt 4782 sym1 := v.Aux 4783 _ = v.Args[2] 4784 v_0 := v.Args[0] 4785 if v_0.Op != OpAMD64LEAQ { 4786 break 4787 } 4788 off2 := v_0.AuxInt 4789 sym2 := v_0.Aux 4790 base := v_0.Args[0] 4791 val := v.Args[1] 4792 mem := v.Args[2] 4793 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4794 break 4795 } 4796 v.reset(OpAMD64BTCQmodify) 4797 v.AuxInt = off1 + off2 4798 v.Aux = mergeSym(sym1, sym2) 4799 v.AddArg(base) 4800 v.AddArg(val) 4801 v.AddArg(mem) 4802 return true 4803 } 4804 return false 4805 } 4806 func rewriteValueAMD64_OpAMD64BTLconst_0(v *Value) bool { 4807 // match: (BTLconst [c] (SHRQconst [d] x)) 4808 // cond: (c+d)<64 4809 // result: (BTQconst [c+d] x) 4810 for { 4811 c := v.AuxInt 4812 v_0 := v.Args[0] 4813 if v_0.Op != OpAMD64SHRQconst { 4814 break 4815 } 4816 d := v_0.AuxInt 4817 x := v_0.Args[0] 4818 if !((c + d) < 64) { 4819 break 4820 } 4821 v.reset(OpAMD64BTQconst) 4822 v.AuxInt = c + d 4823 v.AddArg(x) 4824 return true 4825 } 4826 // match: (BTLconst [c] (SHLQconst [d] x)) 4827 // cond: c>d 4828 // result: (BTLconst [c-d] x) 4829 for { 4830 c := v.AuxInt 4831 v_0 := v.Args[0] 4832 if v_0.Op != OpAMD64SHLQconst { 4833 break 4834 } 4835 d := v_0.AuxInt 4836 x := v_0.Args[0] 4837 if !(c > d) { 4838 break 4839 } 4840 v.reset(OpAMD64BTLconst) 4841 v.AuxInt = c - d 4842 v.AddArg(x) 4843 return true 4844 } 4845 // match: (BTLconst [0] s:(SHRQ x y)) 4846 // cond: 4847 // result: (BTQ y x) 4848 for { 4849 if v.AuxInt != 0 { 4850 break 4851 } 4852 s := v.Args[0] 4853 if s.Op != OpAMD64SHRQ { 4854 break 4855 } 4856 _ = s.Args[1] 4857 x := s.Args[0] 4858 y := s.Args[1] 4859 v.reset(OpAMD64BTQ) 4860 v.AddArg(y) 4861 v.AddArg(x) 4862 return true 4863 } 4864 // match: (BTLconst [c] (SHRLconst [d] x)) 4865 // cond: (c+d)<32 4866 // result: (BTLconst [c+d] x) 4867 for { 4868 c := v.AuxInt 4869 v_0 := v.Args[0] 4870 if v_0.Op != OpAMD64SHRLconst { 4871 break 4872 } 4873 d := v_0.AuxInt 4874 x := v_0.Args[0] 4875 if !((c + d) < 32) { 4876 break 4877 } 4878 v.reset(OpAMD64BTLconst) 4879 v.AuxInt = c + d 4880 v.AddArg(x) 4881 return true 4882 } 4883 // match: (BTLconst [c] (SHLLconst [d] x)) 4884 // cond: c>d 4885 // result: (BTLconst [c-d] x) 4886 for { 4887 c := v.AuxInt 4888 v_0 := v.Args[0] 4889 if v_0.Op != OpAMD64SHLLconst { 4890 break 4891 } 4892 d := v_0.AuxInt 4893 x := v_0.Args[0] 4894 if !(c > d) { 4895 break 4896 } 4897 v.reset(OpAMD64BTLconst) 4898 v.AuxInt = c - d 4899 v.AddArg(x) 4900 return true 4901 } 4902 // match: (BTLconst [0] s:(SHRL x y)) 4903 // cond: 4904 // result: (BTL y x) 4905 for { 4906 if v.AuxInt != 0 { 4907 break 4908 } 4909 s := v.Args[0] 4910 if s.Op != OpAMD64SHRL { 4911 break 4912 } 4913 _ = s.Args[1] 4914 x := s.Args[0] 4915 y := s.Args[1] 4916 v.reset(OpAMD64BTL) 4917 v.AddArg(y) 4918 v.AddArg(x) 4919 return true 4920 } 4921 return false 4922 } 4923 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 4924 // match: (BTQconst [c] (SHRQconst [d] x)) 4925 // cond: (c+d)<64 4926 // result: (BTQconst [c+d] x) 4927 for { 4928 c := v.AuxInt 4929 v_0 := v.Args[0] 4930 if v_0.Op != OpAMD64SHRQconst { 4931 break 4932 } 4933 d := v_0.AuxInt 4934 x := v_0.Args[0] 4935 if !((c + d) < 64) { 4936 break 4937 } 4938 v.reset(OpAMD64BTQconst) 4939 v.AuxInt = c + d 4940 v.AddArg(x) 4941 return true 4942 } 4943 // match: (BTQconst [c] (SHLQconst [d] x)) 4944 // cond: c>d 4945 // result: (BTQconst [c-d] x) 4946 for { 4947 c := v.AuxInt 4948 v_0 := v.Args[0] 4949 if v_0.Op != OpAMD64SHLQconst { 4950 break 4951 } 4952 d := v_0.AuxInt 4953 x := v_0.Args[0] 4954 if !(c > d) { 4955 break 4956 } 4957 v.reset(OpAMD64BTQconst) 4958 v.AuxInt = c - d 4959 v.AddArg(x) 4960 return true 4961 } 4962 // match: (BTQconst [0] s:(SHRQ x y)) 4963 // cond: 4964 // result: (BTQ y x) 4965 for { 4966 if v.AuxInt != 0 { 4967 break 4968 } 4969 s := v.Args[0] 4970 if s.Op != OpAMD64SHRQ { 4971 break 4972 } 4973 _ = s.Args[1] 4974 x := s.Args[0] 4975 y := s.Args[1] 4976 v.reset(OpAMD64BTQ) 4977 v.AddArg(y) 4978 v.AddArg(x) 4979 return true 4980 } 4981 return false 4982 } 4983 func rewriteValueAMD64_OpAMD64BTRLconst_0(v *Value) bool { 4984 // match: (BTRLconst [c] (BTSLconst [c] x)) 4985 // cond: 4986 // result: (BTRLconst [c] x) 4987 for { 4988 c := v.AuxInt 4989 v_0 := v.Args[0] 4990 if v_0.Op != OpAMD64BTSLconst { 4991 break 4992 } 4993 if v_0.AuxInt != c { 4994 break 4995 } 4996 x := v_0.Args[0] 4997 v.reset(OpAMD64BTRLconst) 4998 v.AuxInt = c 4999 v.AddArg(x) 5000 return true 5001 } 5002 // match: (BTRLconst [c] (BTCLconst [c] x)) 5003 // cond: 5004 // result: (BTRLconst [c] x) 5005 for { 5006 c := v.AuxInt 5007 v_0 := v.Args[0] 5008 if v_0.Op != OpAMD64BTCLconst { 5009 break 5010 } 5011 if v_0.AuxInt != c { 5012 break 5013 } 5014 x := v_0.Args[0] 5015 v.reset(OpAMD64BTRLconst) 5016 v.AuxInt = c 5017 v.AddArg(x) 5018 return true 5019 } 5020 // match: (BTRLconst [c] (ANDLconst [d] x)) 5021 // cond: 5022 // result: (ANDLconst [d &^ (1<<uint32(c))] x) 5023 for { 5024 c := v.AuxInt 5025 v_0 := v.Args[0] 5026 if v_0.Op != OpAMD64ANDLconst { 5027 break 5028 } 5029 d := v_0.AuxInt 5030 x := v_0.Args[0] 5031 v.reset(OpAMD64ANDLconst) 5032 v.AuxInt = d &^ (1 << uint32(c)) 5033 v.AddArg(x) 5034 return true 5035 } 5036 // match: (BTRLconst [c] (BTRLconst [d] x)) 5037 // cond: 5038 // result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x) 5039 for { 5040 c := v.AuxInt 5041 v_0 := v.Args[0] 5042 if v_0.Op != OpAMD64BTRLconst { 5043 break 5044 } 5045 d := v_0.AuxInt 5046 x := v_0.Args[0] 5047 v.reset(OpAMD64ANDLconst) 5048 v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d)) 5049 v.AddArg(x) 5050 return true 5051 } 5052 // match: (BTRLconst [c] (MOVLconst [d])) 5053 // cond: 5054 // result: (MOVLconst [d&^(1<<uint32(c))]) 5055 for { 5056 c := v.AuxInt 5057 v_0 := v.Args[0] 5058 if v_0.Op != OpAMD64MOVLconst { 5059 break 5060 } 5061 d := v_0.AuxInt 5062 v.reset(OpAMD64MOVLconst) 5063 v.AuxInt = d &^ (1 << uint32(c)) 5064 return true 5065 } 5066 return false 5067 } 5068 func rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v *Value) bool { 5069 // match: (BTRLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 5070 // cond: ValAndOff(valoff1).canAdd(off2) 5071 // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 5072 for { 5073 valoff1 := v.AuxInt 5074 sym := v.Aux 5075 _ = v.Args[1] 5076 v_0 := v.Args[0] 5077 if v_0.Op != OpAMD64ADDQconst { 5078 break 5079 } 5080 off2 := v_0.AuxInt 5081 base := v_0.Args[0] 5082 mem := v.Args[1] 5083 if !(ValAndOff(valoff1).canAdd(off2)) { 5084 break 5085 } 5086 v.reset(OpAMD64BTRLconstmodify) 5087 v.AuxInt = ValAndOff(valoff1).add(off2) 5088 v.Aux = sym 5089 v.AddArg(base) 5090 v.AddArg(mem) 5091 return true 5092 } 5093 // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 5094 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 5095 // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 5096 for { 5097 valoff1 := v.AuxInt 5098 sym1 := v.Aux 5099 _ = v.Args[1] 5100 v_0 := v.Args[0] 5101 if v_0.Op != OpAMD64LEAQ { 5102 break 5103 } 5104 off2 := v_0.AuxInt 5105 sym2 := v_0.Aux 5106 base := v_0.Args[0] 5107 mem := v.Args[1] 5108 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 5109 break 5110 } 5111 v.reset(OpAMD64BTRLconstmodify) 5112 v.AuxInt = ValAndOff(valoff1).add(off2) 5113 v.Aux = mergeSym(sym1, sym2) 5114 v.AddArg(base) 5115 v.AddArg(mem) 5116 return true 5117 } 5118 return false 5119 } 5120 func rewriteValueAMD64_OpAMD64BTRLmodify_0(v *Value) bool { 5121 // match: (BTRLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 5122 // cond: is32Bit(off1+off2) 5123 // result: (BTRLmodify [off1+off2] {sym} base val mem) 5124 for { 5125 off1 := v.AuxInt 5126 sym := v.Aux 5127 _ = v.Args[2] 5128 v_0 := v.Args[0] 5129 if v_0.Op != OpAMD64ADDQconst { 5130 break 5131 } 5132 off2 := v_0.AuxInt 5133 base := v_0.Args[0] 5134 val := v.Args[1] 5135 mem := v.Args[2] 5136 if !(is32Bit(off1 + off2)) { 5137 break 5138 } 5139 v.reset(OpAMD64BTRLmodify) 5140 v.AuxInt = off1 + off2 5141 v.Aux = sym 5142 v.AddArg(base) 5143 v.AddArg(val) 5144 v.AddArg(mem) 5145 return true 5146 } 5147 // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5148 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5149 // result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5150 for { 5151 off1 := v.AuxInt 5152 sym1 := v.Aux 5153 _ = v.Args[2] 5154 v_0 := v.Args[0] 5155 if v_0.Op != OpAMD64LEAQ { 5156 break 5157 } 5158 off2 := v_0.AuxInt 5159 sym2 := v_0.Aux 5160 base := v_0.Args[0] 5161 val := v.Args[1] 5162 mem := v.Args[2] 5163 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5164 break 5165 } 5166 v.reset(OpAMD64BTRLmodify) 5167 v.AuxInt = off1 + off2 5168 v.Aux = mergeSym(sym1, sym2) 5169 v.AddArg(base) 5170 v.AddArg(val) 5171 v.AddArg(mem) 5172 return true 5173 } 5174 return false 5175 } 5176 func rewriteValueAMD64_OpAMD64BTRQconst_0(v *Value) bool { 5177 // match: (BTRQconst [c] (BTSQconst [c] x)) 5178 // cond: 5179 // result: (BTRQconst [c] x) 5180 for { 5181 c := v.AuxInt 5182 v_0 := v.Args[0] 5183 if v_0.Op != OpAMD64BTSQconst { 5184 break 5185 } 5186 if v_0.AuxInt != c { 5187 break 5188 } 5189 x := v_0.Args[0] 5190 v.reset(OpAMD64BTRQconst) 5191 v.AuxInt = c 5192 v.AddArg(x) 5193 return true 5194 } 5195 // match: (BTRQconst [c] (BTCQconst [c] x)) 5196 // cond: 5197 // result: (BTRQconst [c] x) 5198 for { 5199 c := v.AuxInt 5200 v_0 := v.Args[0] 5201 if v_0.Op != OpAMD64BTCQconst { 5202 break 5203 } 5204 if v_0.AuxInt != c { 5205 break 5206 } 5207 x := v_0.Args[0] 5208 v.reset(OpAMD64BTRQconst) 5209 v.AuxInt = c 5210 v.AddArg(x) 5211 return true 5212 } 5213 // match: (BTRQconst [c] (ANDQconst [d] x)) 5214 // cond: 5215 // result: (ANDQconst [d &^ (1<<uint32(c))] x) 5216 for { 5217 c := v.AuxInt 5218 v_0 := v.Args[0] 5219 if v_0.Op != OpAMD64ANDQconst { 5220 break 5221 } 5222 d := v_0.AuxInt 5223 x := v_0.Args[0] 5224 v.reset(OpAMD64ANDQconst) 5225 v.AuxInt = d &^ (1 << uint32(c)) 5226 v.AddArg(x) 5227 return true 5228 } 5229 // match: (BTRQconst [c] (BTRQconst [d] x)) 5230 // cond: 5231 // result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x) 5232 for { 5233 c := v.AuxInt 5234 v_0 := v.Args[0] 5235 if v_0.Op != OpAMD64BTRQconst { 5236 break 5237 } 5238 d := v_0.AuxInt 5239 x := v_0.Args[0] 5240 v.reset(OpAMD64ANDQconst) 5241 v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d)) 5242 v.AddArg(x) 5243 return true 5244 } 5245 // match: (BTRQconst [c] (MOVQconst [d])) 5246 // cond: 5247 // result: (MOVQconst [d&^(1<<uint32(c))]) 5248 for { 5249 c := v.AuxInt 5250 v_0 := v.Args[0] 5251 if v_0.Op != OpAMD64MOVQconst { 5252 break 5253 } 5254 d := v_0.AuxInt 5255 v.reset(OpAMD64MOVQconst) 5256 v.AuxInt = d &^ (1 << uint32(c)) 5257 return true 5258 } 5259 return false 5260 } 5261 func rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v *Value) bool { 5262 // match: (BTRQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 5263 // cond: ValAndOff(valoff1).canAdd(off2) 5264 // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 5265 for { 5266 valoff1 := v.AuxInt 5267 sym := v.Aux 5268 _ = v.Args[1] 5269 v_0 := v.Args[0] 5270 if v_0.Op != OpAMD64ADDQconst { 5271 break 5272 } 5273 off2 := v_0.AuxInt 5274 base := v_0.Args[0] 5275 mem := v.Args[1] 5276 if !(ValAndOff(valoff1).canAdd(off2)) { 5277 break 5278 } 5279 v.reset(OpAMD64BTRQconstmodify) 5280 v.AuxInt = ValAndOff(valoff1).add(off2) 5281 v.Aux = sym 5282 v.AddArg(base) 5283 v.AddArg(mem) 5284 return true 5285 } 5286 // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 5287 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 5288 // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 5289 for { 5290 valoff1 := v.AuxInt 5291 sym1 := v.Aux 5292 _ = v.Args[1] 5293 v_0 := v.Args[0] 5294 if v_0.Op != OpAMD64LEAQ { 5295 break 5296 } 5297 off2 := v_0.AuxInt 5298 sym2 := v_0.Aux 5299 base := v_0.Args[0] 5300 mem := v.Args[1] 5301 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 5302 break 5303 } 5304 v.reset(OpAMD64BTRQconstmodify) 5305 v.AuxInt = ValAndOff(valoff1).add(off2) 5306 v.Aux = mergeSym(sym1, sym2) 5307 v.AddArg(base) 5308 v.AddArg(mem) 5309 return true 5310 } 5311 return false 5312 } 5313 func rewriteValueAMD64_OpAMD64BTRQmodify_0(v *Value) bool { 5314 // match: (BTRQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 5315 // cond: is32Bit(off1+off2) 5316 // result: (BTRQmodify [off1+off2] {sym} base val mem) 5317 for { 5318 off1 := v.AuxInt 5319 sym := v.Aux 5320 _ = v.Args[2] 5321 v_0 := v.Args[0] 5322 if v_0.Op != OpAMD64ADDQconst { 5323 break 5324 } 5325 off2 := v_0.AuxInt 5326 base := v_0.Args[0] 5327 val := v.Args[1] 5328 mem := v.Args[2] 5329 if !(is32Bit(off1 + off2)) { 5330 break 5331 } 5332 v.reset(OpAMD64BTRQmodify) 5333 v.AuxInt = off1 + off2 5334 v.Aux = sym 5335 v.AddArg(base) 5336 v.AddArg(val) 5337 v.AddArg(mem) 5338 return true 5339 } 5340 // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5341 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5342 // result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5343 for { 5344 off1 := v.AuxInt 5345 sym1 := v.Aux 5346 _ = v.Args[2] 5347 v_0 := v.Args[0] 5348 if v_0.Op != OpAMD64LEAQ { 5349 break 5350 } 5351 off2 := v_0.AuxInt 5352 sym2 := v_0.Aux 5353 base := v_0.Args[0] 5354 val := v.Args[1] 5355 mem := v.Args[2] 5356 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5357 break 5358 } 5359 v.reset(OpAMD64BTRQmodify) 5360 v.AuxInt = off1 + off2 5361 v.Aux = mergeSym(sym1, sym2) 5362 v.AddArg(base) 5363 v.AddArg(val) 5364 v.AddArg(mem) 5365 return true 5366 } 5367 return false 5368 } 5369 func rewriteValueAMD64_OpAMD64BTSLconst_0(v *Value) bool { 5370 // match: (BTSLconst [c] (BTRLconst [c] x)) 5371 // cond: 5372 // result: (BTSLconst [c] x) 5373 for { 5374 c := v.AuxInt 5375 v_0 := v.Args[0] 5376 if v_0.Op != OpAMD64BTRLconst { 5377 break 5378 } 5379 if v_0.AuxInt != c { 5380 break 5381 } 5382 x := v_0.Args[0] 5383 v.reset(OpAMD64BTSLconst) 5384 v.AuxInt = c 5385 v.AddArg(x) 5386 return true 5387 } 5388 // match: (BTSLconst [c] (BTCLconst [c] x)) 5389 // cond: 5390 // result: (BTSLconst [c] x) 5391 for { 5392 c := v.AuxInt 5393 v_0 := v.Args[0] 5394 if v_0.Op != OpAMD64BTCLconst { 5395 break 5396 } 5397 if v_0.AuxInt != c { 5398 break 5399 } 5400 x := v_0.Args[0] 5401 v.reset(OpAMD64BTSLconst) 5402 v.AuxInt = c 5403 v.AddArg(x) 5404 return true 5405 } 5406 // match: (BTSLconst [c] (ORLconst [d] x)) 5407 // cond: 5408 // result: (ORLconst [d | 1<<uint32(c)] x) 5409 for { 5410 c := v.AuxInt 5411 v_0 := v.Args[0] 5412 if v_0.Op != OpAMD64ORLconst { 5413 break 5414 } 5415 d := v_0.AuxInt 5416 x := v_0.Args[0] 5417 v.reset(OpAMD64ORLconst) 5418 v.AuxInt = d | 1<<uint32(c) 5419 v.AddArg(x) 5420 return true 5421 } 5422 // match: (BTSLconst [c] (BTSLconst [d] x)) 5423 // cond: 5424 // result: (ORLconst [1<<uint32(d) | 1<<uint32(c)] x) 5425 for { 5426 c := v.AuxInt 5427 v_0 := v.Args[0] 5428 if v_0.Op != OpAMD64BTSLconst { 5429 break 5430 } 5431 d := v_0.AuxInt 5432 x := v_0.Args[0] 5433 v.reset(OpAMD64ORLconst) 5434 v.AuxInt = 1<<uint32(d) | 1<<uint32(c) 5435 v.AddArg(x) 5436 return true 5437 } 5438 // match: (BTSLconst [c] (MOVLconst [d])) 5439 // cond: 5440 // result: (MOVLconst [d|(1<<uint32(c))]) 5441 for { 5442 c := v.AuxInt 5443 v_0 := v.Args[0] 5444 if v_0.Op != OpAMD64MOVLconst { 5445 break 5446 } 5447 d := v_0.AuxInt 5448 v.reset(OpAMD64MOVLconst) 5449 v.AuxInt = d | (1 << uint32(c)) 5450 return true 5451 } 5452 return false 5453 } 5454 func rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v *Value) bool { 5455 // match: (BTSLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 5456 // cond: ValAndOff(valoff1).canAdd(off2) 5457 // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 5458 for { 5459 valoff1 := v.AuxInt 5460 sym := v.Aux 5461 _ = v.Args[1] 5462 v_0 := v.Args[0] 5463 if v_0.Op != OpAMD64ADDQconst { 5464 break 5465 } 5466 off2 := v_0.AuxInt 5467 base := v_0.Args[0] 5468 mem := v.Args[1] 5469 if !(ValAndOff(valoff1).canAdd(off2)) { 5470 break 5471 } 5472 v.reset(OpAMD64BTSLconstmodify) 5473 v.AuxInt = ValAndOff(valoff1).add(off2) 5474 v.Aux = sym 5475 v.AddArg(base) 5476 v.AddArg(mem) 5477 return true 5478 } 5479 // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 5480 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 5481 // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 5482 for { 5483 valoff1 := v.AuxInt 5484 sym1 := v.Aux 5485 _ = v.Args[1] 5486 v_0 := v.Args[0] 5487 if v_0.Op != OpAMD64LEAQ { 5488 break 5489 } 5490 off2 := v_0.AuxInt 5491 sym2 := v_0.Aux 5492 base := v_0.Args[0] 5493 mem := v.Args[1] 5494 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 5495 break 5496 } 5497 v.reset(OpAMD64BTSLconstmodify) 5498 v.AuxInt = ValAndOff(valoff1).add(off2) 5499 v.Aux = mergeSym(sym1, sym2) 5500 v.AddArg(base) 5501 v.AddArg(mem) 5502 return true 5503 } 5504 return false 5505 } 5506 func rewriteValueAMD64_OpAMD64BTSLmodify_0(v *Value) bool { 5507 // match: (BTSLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 5508 // cond: is32Bit(off1+off2) 5509 // result: (BTSLmodify [off1+off2] {sym} base val mem) 5510 for { 5511 off1 := v.AuxInt 5512 sym := v.Aux 5513 _ = v.Args[2] 5514 v_0 := v.Args[0] 5515 if v_0.Op != OpAMD64ADDQconst { 5516 break 5517 } 5518 off2 := v_0.AuxInt 5519 base := v_0.Args[0] 5520 val := v.Args[1] 5521 mem := v.Args[2] 5522 if !(is32Bit(off1 + off2)) { 5523 break 5524 } 5525 v.reset(OpAMD64BTSLmodify) 5526 v.AuxInt = off1 + off2 5527 v.Aux = sym 5528 v.AddArg(base) 5529 v.AddArg(val) 5530 v.AddArg(mem) 5531 return true 5532 } 5533 // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5534 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5535 // result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5536 for { 5537 off1 := v.AuxInt 5538 sym1 := v.Aux 5539 _ = v.Args[2] 5540 v_0 := v.Args[0] 5541 if v_0.Op != OpAMD64LEAQ { 5542 break 5543 } 5544 off2 := v_0.AuxInt 5545 sym2 := v_0.Aux 5546 base := v_0.Args[0] 5547 val := v.Args[1] 5548 mem := v.Args[2] 5549 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5550 break 5551 } 5552 v.reset(OpAMD64BTSLmodify) 5553 v.AuxInt = off1 + off2 5554 v.Aux = mergeSym(sym1, sym2) 5555 v.AddArg(base) 5556 v.AddArg(val) 5557 v.AddArg(mem) 5558 return true 5559 } 5560 return false 5561 } 5562 func rewriteValueAMD64_OpAMD64BTSQconst_0(v *Value) bool { 5563 // match: (BTSQconst [c] (BTRQconst [c] x)) 5564 // cond: 5565 // result: (BTSQconst [c] x) 5566 for { 5567 c := v.AuxInt 5568 v_0 := v.Args[0] 5569 if v_0.Op != OpAMD64BTRQconst { 5570 break 5571 } 5572 if v_0.AuxInt != c { 5573 break 5574 } 5575 x := v_0.Args[0] 5576 v.reset(OpAMD64BTSQconst) 5577 v.AuxInt = c 5578 v.AddArg(x) 5579 return true 5580 } 5581 // match: (BTSQconst [c] (BTCQconst [c] x)) 5582 // cond: 5583 // result: (BTSQconst [c] x) 5584 for { 5585 c := v.AuxInt 5586 v_0 := v.Args[0] 5587 if v_0.Op != OpAMD64BTCQconst { 5588 break 5589 } 5590 if v_0.AuxInt != c { 5591 break 5592 } 5593 x := v_0.Args[0] 5594 v.reset(OpAMD64BTSQconst) 5595 v.AuxInt = c 5596 v.AddArg(x) 5597 return true 5598 } 5599 // match: (BTSQconst [c] (ORQconst [d] x)) 5600 // cond: 5601 // result: (ORQconst [d | 1<<uint32(c)] x) 5602 for { 5603 c := v.AuxInt 5604 v_0 := v.Args[0] 5605 if v_0.Op != OpAMD64ORQconst { 5606 break 5607 } 5608 d := v_0.AuxInt 5609 x := v_0.Args[0] 5610 v.reset(OpAMD64ORQconst) 5611 v.AuxInt = d | 1<<uint32(c) 5612 v.AddArg(x) 5613 return true 5614 } 5615 // match: (BTSQconst [c] (BTSQconst [d] x)) 5616 // cond: 5617 // result: (ORQconst [1<<uint32(d) | 1<<uint32(c)] x) 5618 for { 5619 c := v.AuxInt 5620 v_0 := v.Args[0] 5621 if v_0.Op != OpAMD64BTSQconst { 5622 break 5623 } 5624 d := v_0.AuxInt 5625 x := v_0.Args[0] 5626 v.reset(OpAMD64ORQconst) 5627 v.AuxInt = 1<<uint32(d) | 1<<uint32(c) 5628 v.AddArg(x) 5629 return true 5630 } 5631 // match: (BTSQconst [c] (MOVQconst [d])) 5632 // cond: 5633 // result: (MOVQconst [d|(1<<uint32(c))]) 5634 for { 5635 c := v.AuxInt 5636 v_0 := v.Args[0] 5637 if v_0.Op != OpAMD64MOVQconst { 5638 break 5639 } 5640 d := v_0.AuxInt 5641 v.reset(OpAMD64MOVQconst) 5642 v.AuxInt = d | (1 << uint32(c)) 5643 return true 5644 } 5645 return false 5646 } 5647 func rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v *Value) bool { 5648 // match: (BTSQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 5649 // cond: ValAndOff(valoff1).canAdd(off2) 5650 // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 5651 for { 5652 valoff1 := v.AuxInt 5653 sym := v.Aux 5654 _ = v.Args[1] 5655 v_0 := v.Args[0] 5656 if v_0.Op != OpAMD64ADDQconst { 5657 break 5658 } 5659 off2 := v_0.AuxInt 5660 base := v_0.Args[0] 5661 mem := v.Args[1] 5662 if !(ValAndOff(valoff1).canAdd(off2)) { 5663 break 5664 } 5665 v.reset(OpAMD64BTSQconstmodify) 5666 v.AuxInt = ValAndOff(valoff1).add(off2) 5667 v.Aux = sym 5668 v.AddArg(base) 5669 v.AddArg(mem) 5670 return true 5671 } 5672 // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 5673 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 5674 // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 5675 for { 5676 valoff1 := v.AuxInt 5677 sym1 := v.Aux 5678 _ = v.Args[1] 5679 v_0 := v.Args[0] 5680 if v_0.Op != OpAMD64LEAQ { 5681 break 5682 } 5683 off2 := v_0.AuxInt 5684 sym2 := v_0.Aux 5685 base := v_0.Args[0] 5686 mem := v.Args[1] 5687 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 5688 break 5689 } 5690 v.reset(OpAMD64BTSQconstmodify) 5691 v.AuxInt = ValAndOff(valoff1).add(off2) 5692 v.Aux = mergeSym(sym1, sym2) 5693 v.AddArg(base) 5694 v.AddArg(mem) 5695 return true 5696 } 5697 return false 5698 } 5699 func rewriteValueAMD64_OpAMD64BTSQmodify_0(v *Value) bool { 5700 // match: (BTSQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 5701 // cond: is32Bit(off1+off2) 5702 // result: (BTSQmodify [off1+off2] {sym} base val mem) 5703 for { 5704 off1 := v.AuxInt 5705 sym := v.Aux 5706 _ = v.Args[2] 5707 v_0 := v.Args[0] 5708 if v_0.Op != OpAMD64ADDQconst { 5709 break 5710 } 5711 off2 := v_0.AuxInt 5712 base := v_0.Args[0] 5713 val := v.Args[1] 5714 mem := v.Args[2] 5715 if !(is32Bit(off1 + off2)) { 5716 break 5717 } 5718 v.reset(OpAMD64BTSQmodify) 5719 v.AuxInt = off1 + off2 5720 v.Aux = sym 5721 v.AddArg(base) 5722 v.AddArg(val) 5723 v.AddArg(mem) 5724 return true 5725 } 5726 // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5727 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5728 // result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5729 for { 5730 off1 := v.AuxInt 5731 sym1 := v.Aux 5732 _ = v.Args[2] 5733 v_0 := v.Args[0] 5734 if v_0.Op != OpAMD64LEAQ { 5735 break 5736 } 5737 off2 := v_0.AuxInt 5738 sym2 := v_0.Aux 5739 base := v_0.Args[0] 5740 val := v.Args[1] 5741 mem := v.Args[2] 5742 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5743 break 5744 } 5745 v.reset(OpAMD64BTSQmodify) 5746 v.AuxInt = off1 + off2 5747 v.Aux = mergeSym(sym1, sym2) 5748 v.AddArg(base) 5749 v.AddArg(val) 5750 v.AddArg(mem) 5751 return true 5752 } 5753 return false 5754 } 5755 func rewriteValueAMD64_OpAMD64CMOVLCC_0(v *Value) bool { 5756 // match: (CMOVLCC x y (InvertFlags cond)) 5757 // cond: 5758 // result: (CMOVLLS x y cond) 5759 for { 5760 _ = v.Args[2] 5761 x := v.Args[0] 5762 y := v.Args[1] 5763 v_2 := v.Args[2] 5764 if v_2.Op != OpAMD64InvertFlags { 5765 break 5766 } 5767 cond := v_2.Args[0] 5768 v.reset(OpAMD64CMOVLLS) 5769 v.AddArg(x) 5770 v.AddArg(y) 5771 v.AddArg(cond) 5772 return true 5773 } 5774 // match: (CMOVLCC _ x (FlagEQ)) 5775 // cond: 5776 // result: x 5777 for { 5778 _ = v.Args[2] 5779 x := v.Args[1] 5780 v_2 := v.Args[2] 5781 if v_2.Op != OpAMD64FlagEQ { 5782 break 5783 } 5784 v.reset(OpCopy) 5785 v.Type = x.Type 5786 v.AddArg(x) 5787 return true 5788 } 5789 // match: (CMOVLCC _ x (FlagGT_UGT)) 5790 // cond: 5791 // result: x 5792 for { 5793 _ = v.Args[2] 5794 x := v.Args[1] 5795 v_2 := v.Args[2] 5796 if v_2.Op != OpAMD64FlagGT_UGT { 5797 break 5798 } 5799 v.reset(OpCopy) 5800 v.Type = x.Type 5801 v.AddArg(x) 5802 return true 5803 } 5804 // match: (CMOVLCC y _ (FlagGT_ULT)) 5805 // cond: 5806 // result: y 5807 for { 5808 _ = v.Args[2] 5809 y := v.Args[0] 5810 v_2 := v.Args[2] 5811 if v_2.Op != OpAMD64FlagGT_ULT { 5812 break 5813 } 5814 v.reset(OpCopy) 5815 v.Type = y.Type 5816 v.AddArg(y) 5817 return true 5818 } 5819 // match: (CMOVLCC y _ (FlagLT_ULT)) 5820 // cond: 5821 // result: y 5822 for { 5823 _ = v.Args[2] 5824 y := v.Args[0] 5825 v_2 := v.Args[2] 5826 if v_2.Op != OpAMD64FlagLT_ULT { 5827 break 5828 } 5829 v.reset(OpCopy) 5830 v.Type = y.Type 5831 v.AddArg(y) 5832 return true 5833 } 5834 // match: (CMOVLCC _ x (FlagLT_UGT)) 5835 // cond: 5836 // result: x 5837 for { 5838 _ = v.Args[2] 5839 x := v.Args[1] 5840 v_2 := v.Args[2] 5841 if v_2.Op != OpAMD64FlagLT_UGT { 5842 break 5843 } 5844 v.reset(OpCopy) 5845 v.Type = x.Type 5846 v.AddArg(x) 5847 return true 5848 } 5849 return false 5850 } 5851 func rewriteValueAMD64_OpAMD64CMOVLCS_0(v *Value) bool { 5852 // match: (CMOVLCS x y (InvertFlags cond)) 5853 // cond: 5854 // result: (CMOVLHI x y cond) 5855 for { 5856 _ = v.Args[2] 5857 x := v.Args[0] 5858 y := v.Args[1] 5859 v_2 := v.Args[2] 5860 if v_2.Op != OpAMD64InvertFlags { 5861 break 5862 } 5863 cond := v_2.Args[0] 5864 v.reset(OpAMD64CMOVLHI) 5865 v.AddArg(x) 5866 v.AddArg(y) 5867 v.AddArg(cond) 5868 return true 5869 } 5870 // match: (CMOVLCS y _ (FlagEQ)) 5871 // cond: 5872 // result: y 5873 for { 5874 _ = v.Args[2] 5875 y := v.Args[0] 5876 v_2 := v.Args[2] 5877 if v_2.Op != OpAMD64FlagEQ { 5878 break 5879 } 5880 v.reset(OpCopy) 5881 v.Type = y.Type 5882 v.AddArg(y) 5883 return true 5884 } 5885 // match: (CMOVLCS y _ (FlagGT_UGT)) 5886 // cond: 5887 // result: y 5888 for { 5889 _ = v.Args[2] 5890 y := v.Args[0] 5891 v_2 := v.Args[2] 5892 if v_2.Op != OpAMD64FlagGT_UGT { 5893 break 5894 } 5895 v.reset(OpCopy) 5896 v.Type = y.Type 5897 v.AddArg(y) 5898 return true 5899 } 5900 // match: (CMOVLCS _ x (FlagGT_ULT)) 5901 // cond: 5902 // result: x 5903 for { 5904 _ = v.Args[2] 5905 x := v.Args[1] 5906 v_2 := v.Args[2] 5907 if v_2.Op != OpAMD64FlagGT_ULT { 5908 break 5909 } 5910 v.reset(OpCopy) 5911 v.Type = x.Type 5912 v.AddArg(x) 5913 return true 5914 } 5915 // match: (CMOVLCS _ x (FlagLT_ULT)) 5916 // cond: 5917 // result: x 5918 for { 5919 _ = v.Args[2] 5920 x := v.Args[1] 5921 v_2 := v.Args[2] 5922 if v_2.Op != OpAMD64FlagLT_ULT { 5923 break 5924 } 5925 v.reset(OpCopy) 5926 v.Type = x.Type 5927 v.AddArg(x) 5928 return true 5929 } 5930 // match: (CMOVLCS y _ (FlagLT_UGT)) 5931 // cond: 5932 // result: y 5933 for { 5934 _ = v.Args[2] 5935 y := v.Args[0] 5936 v_2 := v.Args[2] 5937 if v_2.Op != OpAMD64FlagLT_UGT { 5938 break 5939 } 5940 v.reset(OpCopy) 5941 v.Type = y.Type 5942 v.AddArg(y) 5943 return true 5944 } 5945 return false 5946 } 5947 func rewriteValueAMD64_OpAMD64CMOVLEQ_0(v *Value) bool { 5948 // match: (CMOVLEQ x y (InvertFlags cond)) 5949 // cond: 5950 // result: (CMOVLEQ x y cond) 5951 for { 5952 _ = v.Args[2] 5953 x := v.Args[0] 5954 y := v.Args[1] 5955 v_2 := v.Args[2] 5956 if v_2.Op != OpAMD64InvertFlags { 5957 break 5958 } 5959 cond := v_2.Args[0] 5960 v.reset(OpAMD64CMOVLEQ) 5961 v.AddArg(x) 5962 v.AddArg(y) 5963 v.AddArg(cond) 5964 return true 5965 } 5966 // match: (CMOVLEQ _ x (FlagEQ)) 5967 // cond: 5968 // result: x 5969 for { 5970 _ = v.Args[2] 5971 x := v.Args[1] 5972 v_2 := v.Args[2] 5973 if v_2.Op != OpAMD64FlagEQ { 5974 break 5975 } 5976 v.reset(OpCopy) 5977 v.Type = x.Type 5978 v.AddArg(x) 5979 return true 5980 } 5981 // match: (CMOVLEQ y _ (FlagGT_UGT)) 5982 // cond: 5983 // result: y 5984 for { 5985 _ = v.Args[2] 5986 y := v.Args[0] 5987 v_2 := v.Args[2] 5988 if v_2.Op != OpAMD64FlagGT_UGT { 5989 break 5990 } 5991 v.reset(OpCopy) 5992 v.Type = y.Type 5993 v.AddArg(y) 5994 return true 5995 } 5996 // match: (CMOVLEQ y _ (FlagGT_ULT)) 5997 // cond: 5998 // result: y 5999 for { 6000 _ = v.Args[2] 6001 y := v.Args[0] 6002 v_2 := v.Args[2] 6003 if v_2.Op != OpAMD64FlagGT_ULT { 6004 break 6005 } 6006 v.reset(OpCopy) 6007 v.Type = y.Type 6008 v.AddArg(y) 6009 return true 6010 } 6011 // match: (CMOVLEQ y _ (FlagLT_ULT)) 6012 // cond: 6013 // result: y 6014 for { 6015 _ = v.Args[2] 6016 y := v.Args[0] 6017 v_2 := v.Args[2] 6018 if v_2.Op != OpAMD64FlagLT_ULT { 6019 break 6020 } 6021 v.reset(OpCopy) 6022 v.Type = y.Type 6023 v.AddArg(y) 6024 return true 6025 } 6026 // match: (CMOVLEQ y _ (FlagLT_UGT)) 6027 // cond: 6028 // result: y 6029 for { 6030 _ = v.Args[2] 6031 y := v.Args[0] 6032 v_2 := v.Args[2] 6033 if v_2.Op != OpAMD64FlagLT_UGT { 6034 break 6035 } 6036 v.reset(OpCopy) 6037 v.Type = y.Type 6038 v.AddArg(y) 6039 return true 6040 } 6041 return false 6042 } 6043 func rewriteValueAMD64_OpAMD64CMOVLGE_0(v *Value) bool { 6044 // match: (CMOVLGE x y (InvertFlags cond)) 6045 // cond: 6046 // result: (CMOVLLE x y cond) 6047 for { 6048 _ = v.Args[2] 6049 x := v.Args[0] 6050 y := v.Args[1] 6051 v_2 := v.Args[2] 6052 if v_2.Op != OpAMD64InvertFlags { 6053 break 6054 } 6055 cond := v_2.Args[0] 6056 v.reset(OpAMD64CMOVLLE) 6057 v.AddArg(x) 6058 v.AddArg(y) 6059 v.AddArg(cond) 6060 return true 6061 } 6062 // match: (CMOVLGE _ x (FlagEQ)) 6063 // cond: 6064 // result: x 6065 for { 6066 _ = v.Args[2] 6067 x := v.Args[1] 6068 v_2 := v.Args[2] 6069 if v_2.Op != OpAMD64FlagEQ { 6070 break 6071 } 6072 v.reset(OpCopy) 6073 v.Type = x.Type 6074 v.AddArg(x) 6075 return true 6076 } 6077 // match: (CMOVLGE _ x (FlagGT_UGT)) 6078 // cond: 6079 // result: x 6080 for { 6081 _ = v.Args[2] 6082 x := v.Args[1] 6083 v_2 := v.Args[2] 6084 if v_2.Op != OpAMD64FlagGT_UGT { 6085 break 6086 } 6087 v.reset(OpCopy) 6088 v.Type = x.Type 6089 v.AddArg(x) 6090 return true 6091 } 6092 // match: (CMOVLGE _ x (FlagGT_ULT)) 6093 // cond: 6094 // result: x 6095 for { 6096 _ = v.Args[2] 6097 x := v.Args[1] 6098 v_2 := v.Args[2] 6099 if v_2.Op != OpAMD64FlagGT_ULT { 6100 break 6101 } 6102 v.reset(OpCopy) 6103 v.Type = x.Type 6104 v.AddArg(x) 6105 return true 6106 } 6107 // match: (CMOVLGE y _ (FlagLT_ULT)) 6108 // cond: 6109 // result: y 6110 for { 6111 _ = v.Args[2] 6112 y := v.Args[0] 6113 v_2 := v.Args[2] 6114 if v_2.Op != OpAMD64FlagLT_ULT { 6115 break 6116 } 6117 v.reset(OpCopy) 6118 v.Type = y.Type 6119 v.AddArg(y) 6120 return true 6121 } 6122 // match: (CMOVLGE y _ (FlagLT_UGT)) 6123 // cond: 6124 // result: y 6125 for { 6126 _ = v.Args[2] 6127 y := v.Args[0] 6128 v_2 := v.Args[2] 6129 if v_2.Op != OpAMD64FlagLT_UGT { 6130 break 6131 } 6132 v.reset(OpCopy) 6133 v.Type = y.Type 6134 v.AddArg(y) 6135 return true 6136 } 6137 return false 6138 } 6139 func rewriteValueAMD64_OpAMD64CMOVLGT_0(v *Value) bool { 6140 // match: (CMOVLGT x y (InvertFlags cond)) 6141 // cond: 6142 // result: (CMOVLLT x y cond) 6143 for { 6144 _ = v.Args[2] 6145 x := v.Args[0] 6146 y := v.Args[1] 6147 v_2 := v.Args[2] 6148 if v_2.Op != OpAMD64InvertFlags { 6149 break 6150 } 6151 cond := v_2.Args[0] 6152 v.reset(OpAMD64CMOVLLT) 6153 v.AddArg(x) 6154 v.AddArg(y) 6155 v.AddArg(cond) 6156 return true 6157 } 6158 // match: (CMOVLGT y _ (FlagEQ)) 6159 // cond: 6160 // result: y 6161 for { 6162 _ = v.Args[2] 6163 y := v.Args[0] 6164 v_2 := v.Args[2] 6165 if v_2.Op != OpAMD64FlagEQ { 6166 break 6167 } 6168 v.reset(OpCopy) 6169 v.Type = y.Type 6170 v.AddArg(y) 6171 return true 6172 } 6173 // match: (CMOVLGT _ x (FlagGT_UGT)) 6174 // cond: 6175 // result: x 6176 for { 6177 _ = v.Args[2] 6178 x := v.Args[1] 6179 v_2 := v.Args[2] 6180 if v_2.Op != OpAMD64FlagGT_UGT { 6181 break 6182 } 6183 v.reset(OpCopy) 6184 v.Type = x.Type 6185 v.AddArg(x) 6186 return true 6187 } 6188 // match: (CMOVLGT _ x (FlagGT_ULT)) 6189 // cond: 6190 // result: x 6191 for { 6192 _ = v.Args[2] 6193 x := v.Args[1] 6194 v_2 := v.Args[2] 6195 if v_2.Op != OpAMD64FlagGT_ULT { 6196 break 6197 } 6198 v.reset(OpCopy) 6199 v.Type = x.Type 6200 v.AddArg(x) 6201 return true 6202 } 6203 // match: (CMOVLGT y _ (FlagLT_ULT)) 6204 // cond: 6205 // result: y 6206 for { 6207 _ = v.Args[2] 6208 y := v.Args[0] 6209 v_2 := v.Args[2] 6210 if v_2.Op != OpAMD64FlagLT_ULT { 6211 break 6212 } 6213 v.reset(OpCopy) 6214 v.Type = y.Type 6215 v.AddArg(y) 6216 return true 6217 } 6218 // match: (CMOVLGT y _ (FlagLT_UGT)) 6219 // cond: 6220 // result: y 6221 for { 6222 _ = v.Args[2] 6223 y := v.Args[0] 6224 v_2 := v.Args[2] 6225 if v_2.Op != OpAMD64FlagLT_UGT { 6226 break 6227 } 6228 v.reset(OpCopy) 6229 v.Type = y.Type 6230 v.AddArg(y) 6231 return true 6232 } 6233 return false 6234 } 6235 func rewriteValueAMD64_OpAMD64CMOVLHI_0(v *Value) bool { 6236 // match: (CMOVLHI x y (InvertFlags cond)) 6237 // cond: 6238 // result: (CMOVLCS x y cond) 6239 for { 6240 _ = v.Args[2] 6241 x := v.Args[0] 6242 y := v.Args[1] 6243 v_2 := v.Args[2] 6244 if v_2.Op != OpAMD64InvertFlags { 6245 break 6246 } 6247 cond := v_2.Args[0] 6248 v.reset(OpAMD64CMOVLCS) 6249 v.AddArg(x) 6250 v.AddArg(y) 6251 v.AddArg(cond) 6252 return true 6253 } 6254 // match: (CMOVLHI y _ (FlagEQ)) 6255 // cond: 6256 // result: y 6257 for { 6258 _ = v.Args[2] 6259 y := v.Args[0] 6260 v_2 := v.Args[2] 6261 if v_2.Op != OpAMD64FlagEQ { 6262 break 6263 } 6264 v.reset(OpCopy) 6265 v.Type = y.Type 6266 v.AddArg(y) 6267 return true 6268 } 6269 // match: (CMOVLHI _ x (FlagGT_UGT)) 6270 // cond: 6271 // result: x 6272 for { 6273 _ = v.Args[2] 6274 x := v.Args[1] 6275 v_2 := v.Args[2] 6276 if v_2.Op != OpAMD64FlagGT_UGT { 6277 break 6278 } 6279 v.reset(OpCopy) 6280 v.Type = x.Type 6281 v.AddArg(x) 6282 return true 6283 } 6284 // match: (CMOVLHI y _ (FlagGT_ULT)) 6285 // cond: 6286 // result: y 6287 for { 6288 _ = v.Args[2] 6289 y := v.Args[0] 6290 v_2 := v.Args[2] 6291 if v_2.Op != OpAMD64FlagGT_ULT { 6292 break 6293 } 6294 v.reset(OpCopy) 6295 v.Type = y.Type 6296 v.AddArg(y) 6297 return true 6298 } 6299 // match: (CMOVLHI y _ (FlagLT_ULT)) 6300 // cond: 6301 // result: y 6302 for { 6303 _ = v.Args[2] 6304 y := v.Args[0] 6305 v_2 := v.Args[2] 6306 if v_2.Op != OpAMD64FlagLT_ULT { 6307 break 6308 } 6309 v.reset(OpCopy) 6310 v.Type = y.Type 6311 v.AddArg(y) 6312 return true 6313 } 6314 // match: (CMOVLHI _ x (FlagLT_UGT)) 6315 // cond: 6316 // result: x 6317 for { 6318 _ = v.Args[2] 6319 x := v.Args[1] 6320 v_2 := v.Args[2] 6321 if v_2.Op != OpAMD64FlagLT_UGT { 6322 break 6323 } 6324 v.reset(OpCopy) 6325 v.Type = x.Type 6326 v.AddArg(x) 6327 return true 6328 } 6329 return false 6330 } 6331 func rewriteValueAMD64_OpAMD64CMOVLLE_0(v *Value) bool { 6332 // match: (CMOVLLE x y (InvertFlags cond)) 6333 // cond: 6334 // result: (CMOVLGE x y cond) 6335 for { 6336 _ = v.Args[2] 6337 x := v.Args[0] 6338 y := v.Args[1] 6339 v_2 := v.Args[2] 6340 if v_2.Op != OpAMD64InvertFlags { 6341 break 6342 } 6343 cond := v_2.Args[0] 6344 v.reset(OpAMD64CMOVLGE) 6345 v.AddArg(x) 6346 v.AddArg(y) 6347 v.AddArg(cond) 6348 return true 6349 } 6350 // match: (CMOVLLE _ x (FlagEQ)) 6351 // cond: 6352 // result: x 6353 for { 6354 _ = v.Args[2] 6355 x := v.Args[1] 6356 v_2 := v.Args[2] 6357 if v_2.Op != OpAMD64FlagEQ { 6358 break 6359 } 6360 v.reset(OpCopy) 6361 v.Type = x.Type 6362 v.AddArg(x) 6363 return true 6364 } 6365 // match: (CMOVLLE y _ (FlagGT_UGT)) 6366 // cond: 6367 // result: y 6368 for { 6369 _ = v.Args[2] 6370 y := v.Args[0] 6371 v_2 := v.Args[2] 6372 if v_2.Op != OpAMD64FlagGT_UGT { 6373 break 6374 } 6375 v.reset(OpCopy) 6376 v.Type = y.Type 6377 v.AddArg(y) 6378 return true 6379 } 6380 // match: (CMOVLLE y _ (FlagGT_ULT)) 6381 // cond: 6382 // result: y 6383 for { 6384 _ = v.Args[2] 6385 y := v.Args[0] 6386 v_2 := v.Args[2] 6387 if v_2.Op != OpAMD64FlagGT_ULT { 6388 break 6389 } 6390 v.reset(OpCopy) 6391 v.Type = y.Type 6392 v.AddArg(y) 6393 return true 6394 } 6395 // match: (CMOVLLE _ x (FlagLT_ULT)) 6396 // cond: 6397 // result: x 6398 for { 6399 _ = v.Args[2] 6400 x := v.Args[1] 6401 v_2 := v.Args[2] 6402 if v_2.Op != OpAMD64FlagLT_ULT { 6403 break 6404 } 6405 v.reset(OpCopy) 6406 v.Type = x.Type 6407 v.AddArg(x) 6408 return true 6409 } 6410 // match: (CMOVLLE _ x (FlagLT_UGT)) 6411 // cond: 6412 // result: x 6413 for { 6414 _ = v.Args[2] 6415 x := v.Args[1] 6416 v_2 := v.Args[2] 6417 if v_2.Op != OpAMD64FlagLT_UGT { 6418 break 6419 } 6420 v.reset(OpCopy) 6421 v.Type = x.Type 6422 v.AddArg(x) 6423 return true 6424 } 6425 return false 6426 } 6427 func rewriteValueAMD64_OpAMD64CMOVLLS_0(v *Value) bool { 6428 // match: (CMOVLLS x y (InvertFlags cond)) 6429 // cond: 6430 // result: (CMOVLCC x y cond) 6431 for { 6432 _ = v.Args[2] 6433 x := v.Args[0] 6434 y := v.Args[1] 6435 v_2 := v.Args[2] 6436 if v_2.Op != OpAMD64InvertFlags { 6437 break 6438 } 6439 cond := v_2.Args[0] 6440 v.reset(OpAMD64CMOVLCC) 6441 v.AddArg(x) 6442 v.AddArg(y) 6443 v.AddArg(cond) 6444 return true 6445 } 6446 // match: (CMOVLLS _ x (FlagEQ)) 6447 // cond: 6448 // result: x 6449 for { 6450 _ = v.Args[2] 6451 x := v.Args[1] 6452 v_2 := v.Args[2] 6453 if v_2.Op != OpAMD64FlagEQ { 6454 break 6455 } 6456 v.reset(OpCopy) 6457 v.Type = x.Type 6458 v.AddArg(x) 6459 return true 6460 } 6461 // match: (CMOVLLS y _ (FlagGT_UGT)) 6462 // cond: 6463 // result: y 6464 for { 6465 _ = v.Args[2] 6466 y := v.Args[0] 6467 v_2 := v.Args[2] 6468 if v_2.Op != OpAMD64FlagGT_UGT { 6469 break 6470 } 6471 v.reset(OpCopy) 6472 v.Type = y.Type 6473 v.AddArg(y) 6474 return true 6475 } 6476 // match: (CMOVLLS _ x (FlagGT_ULT)) 6477 // cond: 6478 // result: x 6479 for { 6480 _ = v.Args[2] 6481 x := v.Args[1] 6482 v_2 := v.Args[2] 6483 if v_2.Op != OpAMD64FlagGT_ULT { 6484 break 6485 } 6486 v.reset(OpCopy) 6487 v.Type = x.Type 6488 v.AddArg(x) 6489 return true 6490 } 6491 // match: (CMOVLLS _ x (FlagLT_ULT)) 6492 // cond: 6493 // result: x 6494 for { 6495 _ = v.Args[2] 6496 x := v.Args[1] 6497 v_2 := v.Args[2] 6498 if v_2.Op != OpAMD64FlagLT_ULT { 6499 break 6500 } 6501 v.reset(OpCopy) 6502 v.Type = x.Type 6503 v.AddArg(x) 6504 return true 6505 } 6506 // match: (CMOVLLS y _ (FlagLT_UGT)) 6507 // cond: 6508 // result: y 6509 for { 6510 _ = v.Args[2] 6511 y := v.Args[0] 6512 v_2 := v.Args[2] 6513 if v_2.Op != OpAMD64FlagLT_UGT { 6514 break 6515 } 6516 v.reset(OpCopy) 6517 v.Type = y.Type 6518 v.AddArg(y) 6519 return true 6520 } 6521 return false 6522 } 6523 func rewriteValueAMD64_OpAMD64CMOVLLT_0(v *Value) bool { 6524 // match: (CMOVLLT x y (InvertFlags cond)) 6525 // cond: 6526 // result: (CMOVLGT x y cond) 6527 for { 6528 _ = v.Args[2] 6529 x := v.Args[0] 6530 y := v.Args[1] 6531 v_2 := v.Args[2] 6532 if v_2.Op != OpAMD64InvertFlags { 6533 break 6534 } 6535 cond := v_2.Args[0] 6536 v.reset(OpAMD64CMOVLGT) 6537 v.AddArg(x) 6538 v.AddArg(y) 6539 v.AddArg(cond) 6540 return true 6541 } 6542 // match: (CMOVLLT y _ (FlagEQ)) 6543 // cond: 6544 // result: y 6545 for { 6546 _ = v.Args[2] 6547 y := v.Args[0] 6548 v_2 := v.Args[2] 6549 if v_2.Op != OpAMD64FlagEQ { 6550 break 6551 } 6552 v.reset(OpCopy) 6553 v.Type = y.Type 6554 v.AddArg(y) 6555 return true 6556 } 6557 // match: (CMOVLLT y _ (FlagGT_UGT)) 6558 // cond: 6559 // result: y 6560 for { 6561 _ = v.Args[2] 6562 y := v.Args[0] 6563 v_2 := v.Args[2] 6564 if v_2.Op != OpAMD64FlagGT_UGT { 6565 break 6566 } 6567 v.reset(OpCopy) 6568 v.Type = y.Type 6569 v.AddArg(y) 6570 return true 6571 } 6572 // match: (CMOVLLT y _ (FlagGT_ULT)) 6573 // cond: 6574 // result: y 6575 for { 6576 _ = v.Args[2] 6577 y := v.Args[0] 6578 v_2 := v.Args[2] 6579 if v_2.Op != OpAMD64FlagGT_ULT { 6580 break 6581 } 6582 v.reset(OpCopy) 6583 v.Type = y.Type 6584 v.AddArg(y) 6585 return true 6586 } 6587 // match: (CMOVLLT _ x (FlagLT_ULT)) 6588 // cond: 6589 // result: x 6590 for { 6591 _ = v.Args[2] 6592 x := v.Args[1] 6593 v_2 := v.Args[2] 6594 if v_2.Op != OpAMD64FlagLT_ULT { 6595 break 6596 } 6597 v.reset(OpCopy) 6598 v.Type = x.Type 6599 v.AddArg(x) 6600 return true 6601 } 6602 // match: (CMOVLLT _ x (FlagLT_UGT)) 6603 // cond: 6604 // result: x 6605 for { 6606 _ = v.Args[2] 6607 x := v.Args[1] 6608 v_2 := v.Args[2] 6609 if v_2.Op != OpAMD64FlagLT_UGT { 6610 break 6611 } 6612 v.reset(OpCopy) 6613 v.Type = x.Type 6614 v.AddArg(x) 6615 return true 6616 } 6617 return false 6618 } 6619 func rewriteValueAMD64_OpAMD64CMOVLNE_0(v *Value) bool { 6620 // match: (CMOVLNE x y (InvertFlags cond)) 6621 // cond: 6622 // result: (CMOVLNE x y cond) 6623 for { 6624 _ = v.Args[2] 6625 x := v.Args[0] 6626 y := v.Args[1] 6627 v_2 := v.Args[2] 6628 if v_2.Op != OpAMD64InvertFlags { 6629 break 6630 } 6631 cond := v_2.Args[0] 6632 v.reset(OpAMD64CMOVLNE) 6633 v.AddArg(x) 6634 v.AddArg(y) 6635 v.AddArg(cond) 6636 return true 6637 } 6638 // match: (CMOVLNE y _ (FlagEQ)) 6639 // cond: 6640 // result: y 6641 for { 6642 _ = v.Args[2] 6643 y := v.Args[0] 6644 v_2 := v.Args[2] 6645 if v_2.Op != OpAMD64FlagEQ { 6646 break 6647 } 6648 v.reset(OpCopy) 6649 v.Type = y.Type 6650 v.AddArg(y) 6651 return true 6652 } 6653 // match: (CMOVLNE _ x (FlagGT_UGT)) 6654 // cond: 6655 // result: x 6656 for { 6657 _ = v.Args[2] 6658 x := v.Args[1] 6659 v_2 := v.Args[2] 6660 if v_2.Op != OpAMD64FlagGT_UGT { 6661 break 6662 } 6663 v.reset(OpCopy) 6664 v.Type = x.Type 6665 v.AddArg(x) 6666 return true 6667 } 6668 // match: (CMOVLNE _ x (FlagGT_ULT)) 6669 // cond: 6670 // result: x 6671 for { 6672 _ = v.Args[2] 6673 x := v.Args[1] 6674 v_2 := v.Args[2] 6675 if v_2.Op != OpAMD64FlagGT_ULT { 6676 break 6677 } 6678 v.reset(OpCopy) 6679 v.Type = x.Type 6680 v.AddArg(x) 6681 return true 6682 } 6683 // match: (CMOVLNE _ x (FlagLT_ULT)) 6684 // cond: 6685 // result: x 6686 for { 6687 _ = v.Args[2] 6688 x := v.Args[1] 6689 v_2 := v.Args[2] 6690 if v_2.Op != OpAMD64FlagLT_ULT { 6691 break 6692 } 6693 v.reset(OpCopy) 6694 v.Type = x.Type 6695 v.AddArg(x) 6696 return true 6697 } 6698 // match: (CMOVLNE _ x (FlagLT_UGT)) 6699 // cond: 6700 // result: x 6701 for { 6702 _ = v.Args[2] 6703 x := v.Args[1] 6704 v_2 := v.Args[2] 6705 if v_2.Op != OpAMD64FlagLT_UGT { 6706 break 6707 } 6708 v.reset(OpCopy) 6709 v.Type = x.Type 6710 v.AddArg(x) 6711 return true 6712 } 6713 return false 6714 } 6715 func rewriteValueAMD64_OpAMD64CMOVQCC_0(v *Value) bool { 6716 // match: (CMOVQCC x y (InvertFlags cond)) 6717 // cond: 6718 // result: (CMOVQLS x y cond) 6719 for { 6720 _ = v.Args[2] 6721 x := v.Args[0] 6722 y := v.Args[1] 6723 v_2 := v.Args[2] 6724 if v_2.Op != OpAMD64InvertFlags { 6725 break 6726 } 6727 cond := v_2.Args[0] 6728 v.reset(OpAMD64CMOVQLS) 6729 v.AddArg(x) 6730 v.AddArg(y) 6731 v.AddArg(cond) 6732 return true 6733 } 6734 // match: (CMOVQCC _ x (FlagEQ)) 6735 // cond: 6736 // result: x 6737 for { 6738 _ = v.Args[2] 6739 x := v.Args[1] 6740 v_2 := v.Args[2] 6741 if v_2.Op != OpAMD64FlagEQ { 6742 break 6743 } 6744 v.reset(OpCopy) 6745 v.Type = x.Type 6746 v.AddArg(x) 6747 return true 6748 } 6749 // match: (CMOVQCC _ x (FlagGT_UGT)) 6750 // cond: 6751 // result: x 6752 for { 6753 _ = v.Args[2] 6754 x := v.Args[1] 6755 v_2 := v.Args[2] 6756 if v_2.Op != OpAMD64FlagGT_UGT { 6757 break 6758 } 6759 v.reset(OpCopy) 6760 v.Type = x.Type 6761 v.AddArg(x) 6762 return true 6763 } 6764 // match: (CMOVQCC y _ (FlagGT_ULT)) 6765 // cond: 6766 // result: y 6767 for { 6768 _ = v.Args[2] 6769 y := v.Args[0] 6770 v_2 := v.Args[2] 6771 if v_2.Op != OpAMD64FlagGT_ULT { 6772 break 6773 } 6774 v.reset(OpCopy) 6775 v.Type = y.Type 6776 v.AddArg(y) 6777 return true 6778 } 6779 // match: (CMOVQCC y _ (FlagLT_ULT)) 6780 // cond: 6781 // result: y 6782 for { 6783 _ = v.Args[2] 6784 y := v.Args[0] 6785 v_2 := v.Args[2] 6786 if v_2.Op != OpAMD64FlagLT_ULT { 6787 break 6788 } 6789 v.reset(OpCopy) 6790 v.Type = y.Type 6791 v.AddArg(y) 6792 return true 6793 } 6794 // match: (CMOVQCC _ x (FlagLT_UGT)) 6795 // cond: 6796 // result: x 6797 for { 6798 _ = v.Args[2] 6799 x := v.Args[1] 6800 v_2 := v.Args[2] 6801 if v_2.Op != OpAMD64FlagLT_UGT { 6802 break 6803 } 6804 v.reset(OpCopy) 6805 v.Type = x.Type 6806 v.AddArg(x) 6807 return true 6808 } 6809 return false 6810 } 6811 func rewriteValueAMD64_OpAMD64CMOVQCS_0(v *Value) bool { 6812 // match: (CMOVQCS x y (InvertFlags cond)) 6813 // cond: 6814 // result: (CMOVQHI x y cond) 6815 for { 6816 _ = v.Args[2] 6817 x := v.Args[0] 6818 y := v.Args[1] 6819 v_2 := v.Args[2] 6820 if v_2.Op != OpAMD64InvertFlags { 6821 break 6822 } 6823 cond := v_2.Args[0] 6824 v.reset(OpAMD64CMOVQHI) 6825 v.AddArg(x) 6826 v.AddArg(y) 6827 v.AddArg(cond) 6828 return true 6829 } 6830 // match: (CMOVQCS y _ (FlagEQ)) 6831 // cond: 6832 // result: y 6833 for { 6834 _ = v.Args[2] 6835 y := v.Args[0] 6836 v_2 := v.Args[2] 6837 if v_2.Op != OpAMD64FlagEQ { 6838 break 6839 } 6840 v.reset(OpCopy) 6841 v.Type = y.Type 6842 v.AddArg(y) 6843 return true 6844 } 6845 // match: (CMOVQCS y _ (FlagGT_UGT)) 6846 // cond: 6847 // result: y 6848 for { 6849 _ = v.Args[2] 6850 y := v.Args[0] 6851 v_2 := v.Args[2] 6852 if v_2.Op != OpAMD64FlagGT_UGT { 6853 break 6854 } 6855 v.reset(OpCopy) 6856 v.Type = y.Type 6857 v.AddArg(y) 6858 return true 6859 } 6860 // match: (CMOVQCS _ x (FlagGT_ULT)) 6861 // cond: 6862 // result: x 6863 for { 6864 _ = v.Args[2] 6865 x := v.Args[1] 6866 v_2 := v.Args[2] 6867 if v_2.Op != OpAMD64FlagGT_ULT { 6868 break 6869 } 6870 v.reset(OpCopy) 6871 v.Type = x.Type 6872 v.AddArg(x) 6873 return true 6874 } 6875 // match: (CMOVQCS _ x (FlagLT_ULT)) 6876 // cond: 6877 // result: x 6878 for { 6879 _ = v.Args[2] 6880 x := v.Args[1] 6881 v_2 := v.Args[2] 6882 if v_2.Op != OpAMD64FlagLT_ULT { 6883 break 6884 } 6885 v.reset(OpCopy) 6886 v.Type = x.Type 6887 v.AddArg(x) 6888 return true 6889 } 6890 // match: (CMOVQCS y _ (FlagLT_UGT)) 6891 // cond: 6892 // result: y 6893 for { 6894 _ = v.Args[2] 6895 y := v.Args[0] 6896 v_2 := v.Args[2] 6897 if v_2.Op != OpAMD64FlagLT_UGT { 6898 break 6899 } 6900 v.reset(OpCopy) 6901 v.Type = y.Type 6902 v.AddArg(y) 6903 return true 6904 } 6905 return false 6906 } 6907 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 6908 // match: (CMOVQEQ x y (InvertFlags cond)) 6909 // cond: 6910 // result: (CMOVQEQ x y cond) 6911 for { 6912 _ = v.Args[2] 6913 x := v.Args[0] 6914 y := v.Args[1] 6915 v_2 := v.Args[2] 6916 if v_2.Op != OpAMD64InvertFlags { 6917 break 6918 } 6919 cond := v_2.Args[0] 6920 v.reset(OpAMD64CMOVQEQ) 6921 v.AddArg(x) 6922 v.AddArg(y) 6923 v.AddArg(cond) 6924 return true 6925 } 6926 // match: (CMOVQEQ _ x (FlagEQ)) 6927 // cond: 6928 // result: x 6929 for { 6930 _ = v.Args[2] 6931 x := v.Args[1] 6932 v_2 := v.Args[2] 6933 if v_2.Op != OpAMD64FlagEQ { 6934 break 6935 } 6936 v.reset(OpCopy) 6937 v.Type = x.Type 6938 v.AddArg(x) 6939 return true 6940 } 6941 // match: (CMOVQEQ y _ (FlagGT_UGT)) 6942 // cond: 6943 // result: y 6944 for { 6945 _ = v.Args[2] 6946 y := v.Args[0] 6947 v_2 := v.Args[2] 6948 if v_2.Op != OpAMD64FlagGT_UGT { 6949 break 6950 } 6951 v.reset(OpCopy) 6952 v.Type = y.Type 6953 v.AddArg(y) 6954 return true 6955 } 6956 // match: (CMOVQEQ y _ (FlagGT_ULT)) 6957 // cond: 6958 // result: y 6959 for { 6960 _ = v.Args[2] 6961 y := v.Args[0] 6962 v_2 := v.Args[2] 6963 if v_2.Op != OpAMD64FlagGT_ULT { 6964 break 6965 } 6966 v.reset(OpCopy) 6967 v.Type = y.Type 6968 v.AddArg(y) 6969 return true 6970 } 6971 // match: (CMOVQEQ y _ (FlagLT_ULT)) 6972 // cond: 6973 // result: y 6974 for { 6975 _ = v.Args[2] 6976 y := v.Args[0] 6977 v_2 := v.Args[2] 6978 if v_2.Op != OpAMD64FlagLT_ULT { 6979 break 6980 } 6981 v.reset(OpCopy) 6982 v.Type = y.Type 6983 v.AddArg(y) 6984 return true 6985 } 6986 // match: (CMOVQEQ y _ (FlagLT_UGT)) 6987 // cond: 6988 // result: y 6989 for { 6990 _ = v.Args[2] 6991 y := v.Args[0] 6992 v_2 := v.Args[2] 6993 if v_2.Op != OpAMD64FlagLT_UGT { 6994 break 6995 } 6996 v.reset(OpCopy) 6997 v.Type = y.Type 6998 v.AddArg(y) 6999 return true 7000 } 7001 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 7002 // cond: c != 0 7003 // result: x 7004 for { 7005 _ = v.Args[2] 7006 x := v.Args[0] 7007 v_2 := v.Args[2] 7008 if v_2.Op != OpSelect1 { 7009 break 7010 } 7011 v_2_0 := v_2.Args[0] 7012 if v_2_0.Op != OpAMD64BSFQ { 7013 break 7014 } 7015 v_2_0_0 := v_2_0.Args[0] 7016 if v_2_0_0.Op != OpAMD64ORQconst { 7017 break 7018 } 7019 c := v_2_0_0.AuxInt 7020 if !(c != 0) { 7021 break 7022 } 7023 v.reset(OpCopy) 7024 v.Type = x.Type 7025 v.AddArg(x) 7026 return true 7027 } 7028 return false 7029 } 7030 func rewriteValueAMD64_OpAMD64CMOVQGE_0(v *Value) bool { 7031 // match: (CMOVQGE x y (InvertFlags cond)) 7032 // cond: 7033 // result: (CMOVQLE x y cond) 7034 for { 7035 _ = v.Args[2] 7036 x := v.Args[0] 7037 y := v.Args[1] 7038 v_2 := v.Args[2] 7039 if v_2.Op != OpAMD64InvertFlags { 7040 break 7041 } 7042 cond := v_2.Args[0] 7043 v.reset(OpAMD64CMOVQLE) 7044 v.AddArg(x) 7045 v.AddArg(y) 7046 v.AddArg(cond) 7047 return true 7048 } 7049 // match: (CMOVQGE _ x (FlagEQ)) 7050 // cond: 7051 // result: x 7052 for { 7053 _ = v.Args[2] 7054 x := v.Args[1] 7055 v_2 := v.Args[2] 7056 if v_2.Op != OpAMD64FlagEQ { 7057 break 7058 } 7059 v.reset(OpCopy) 7060 v.Type = x.Type 7061 v.AddArg(x) 7062 return true 7063 } 7064 // match: (CMOVQGE _ x (FlagGT_UGT)) 7065 // cond: 7066 // result: x 7067 for { 7068 _ = v.Args[2] 7069 x := v.Args[1] 7070 v_2 := v.Args[2] 7071 if v_2.Op != OpAMD64FlagGT_UGT { 7072 break 7073 } 7074 v.reset(OpCopy) 7075 v.Type = x.Type 7076 v.AddArg(x) 7077 return true 7078 } 7079 // match: (CMOVQGE _ x (FlagGT_ULT)) 7080 // cond: 7081 // result: x 7082 for { 7083 _ = v.Args[2] 7084 x := v.Args[1] 7085 v_2 := v.Args[2] 7086 if v_2.Op != OpAMD64FlagGT_ULT { 7087 break 7088 } 7089 v.reset(OpCopy) 7090 v.Type = x.Type 7091 v.AddArg(x) 7092 return true 7093 } 7094 // match: (CMOVQGE y _ (FlagLT_ULT)) 7095 // cond: 7096 // result: y 7097 for { 7098 _ = v.Args[2] 7099 y := v.Args[0] 7100 v_2 := v.Args[2] 7101 if v_2.Op != OpAMD64FlagLT_ULT { 7102 break 7103 } 7104 v.reset(OpCopy) 7105 v.Type = y.Type 7106 v.AddArg(y) 7107 return true 7108 } 7109 // match: (CMOVQGE y _ (FlagLT_UGT)) 7110 // cond: 7111 // result: y 7112 for { 7113 _ = v.Args[2] 7114 y := v.Args[0] 7115 v_2 := v.Args[2] 7116 if v_2.Op != OpAMD64FlagLT_UGT { 7117 break 7118 } 7119 v.reset(OpCopy) 7120 v.Type = y.Type 7121 v.AddArg(y) 7122 return true 7123 } 7124 return false 7125 } 7126 func rewriteValueAMD64_OpAMD64CMOVQGT_0(v *Value) bool { 7127 // match: (CMOVQGT x y (InvertFlags cond)) 7128 // cond: 7129 // result: (CMOVQLT x y cond) 7130 for { 7131 _ = v.Args[2] 7132 x := v.Args[0] 7133 y := v.Args[1] 7134 v_2 := v.Args[2] 7135 if v_2.Op != OpAMD64InvertFlags { 7136 break 7137 } 7138 cond := v_2.Args[0] 7139 v.reset(OpAMD64CMOVQLT) 7140 v.AddArg(x) 7141 v.AddArg(y) 7142 v.AddArg(cond) 7143 return true 7144 } 7145 // match: (CMOVQGT y _ (FlagEQ)) 7146 // cond: 7147 // result: y 7148 for { 7149 _ = v.Args[2] 7150 y := v.Args[0] 7151 v_2 := v.Args[2] 7152 if v_2.Op != OpAMD64FlagEQ { 7153 break 7154 } 7155 v.reset(OpCopy) 7156 v.Type = y.Type 7157 v.AddArg(y) 7158 return true 7159 } 7160 // match: (CMOVQGT _ x (FlagGT_UGT)) 7161 // cond: 7162 // result: x 7163 for { 7164 _ = v.Args[2] 7165 x := v.Args[1] 7166 v_2 := v.Args[2] 7167 if v_2.Op != OpAMD64FlagGT_UGT { 7168 break 7169 } 7170 v.reset(OpCopy) 7171 v.Type = x.Type 7172 v.AddArg(x) 7173 return true 7174 } 7175 // match: (CMOVQGT _ x (FlagGT_ULT)) 7176 // cond: 7177 // result: x 7178 for { 7179 _ = v.Args[2] 7180 x := v.Args[1] 7181 v_2 := v.Args[2] 7182 if v_2.Op != OpAMD64FlagGT_ULT { 7183 break 7184 } 7185 v.reset(OpCopy) 7186 v.Type = x.Type 7187 v.AddArg(x) 7188 return true 7189 } 7190 // match: (CMOVQGT y _ (FlagLT_ULT)) 7191 // cond: 7192 // result: y 7193 for { 7194 _ = v.Args[2] 7195 y := v.Args[0] 7196 v_2 := v.Args[2] 7197 if v_2.Op != OpAMD64FlagLT_ULT { 7198 break 7199 } 7200 v.reset(OpCopy) 7201 v.Type = y.Type 7202 v.AddArg(y) 7203 return true 7204 } 7205 // match: (CMOVQGT y _ (FlagLT_UGT)) 7206 // cond: 7207 // result: y 7208 for { 7209 _ = v.Args[2] 7210 y := v.Args[0] 7211 v_2 := v.Args[2] 7212 if v_2.Op != OpAMD64FlagLT_UGT { 7213 break 7214 } 7215 v.reset(OpCopy) 7216 v.Type = y.Type 7217 v.AddArg(y) 7218 return true 7219 } 7220 return false 7221 } 7222 func rewriteValueAMD64_OpAMD64CMOVQHI_0(v *Value) bool { 7223 // match: (CMOVQHI x y (InvertFlags cond)) 7224 // cond: 7225 // result: (CMOVQCS x y cond) 7226 for { 7227 _ = v.Args[2] 7228 x := v.Args[0] 7229 y := v.Args[1] 7230 v_2 := v.Args[2] 7231 if v_2.Op != OpAMD64InvertFlags { 7232 break 7233 } 7234 cond := v_2.Args[0] 7235 v.reset(OpAMD64CMOVQCS) 7236 v.AddArg(x) 7237 v.AddArg(y) 7238 v.AddArg(cond) 7239 return true 7240 } 7241 // match: (CMOVQHI y _ (FlagEQ)) 7242 // cond: 7243 // result: y 7244 for { 7245 _ = v.Args[2] 7246 y := v.Args[0] 7247 v_2 := v.Args[2] 7248 if v_2.Op != OpAMD64FlagEQ { 7249 break 7250 } 7251 v.reset(OpCopy) 7252 v.Type = y.Type 7253 v.AddArg(y) 7254 return true 7255 } 7256 // match: (CMOVQHI _ x (FlagGT_UGT)) 7257 // cond: 7258 // result: x 7259 for { 7260 _ = v.Args[2] 7261 x := v.Args[1] 7262 v_2 := v.Args[2] 7263 if v_2.Op != OpAMD64FlagGT_UGT { 7264 break 7265 } 7266 v.reset(OpCopy) 7267 v.Type = x.Type 7268 v.AddArg(x) 7269 return true 7270 } 7271 // match: (CMOVQHI y _ (FlagGT_ULT)) 7272 // cond: 7273 // result: y 7274 for { 7275 _ = v.Args[2] 7276 y := v.Args[0] 7277 v_2 := v.Args[2] 7278 if v_2.Op != OpAMD64FlagGT_ULT { 7279 break 7280 } 7281 v.reset(OpCopy) 7282 v.Type = y.Type 7283 v.AddArg(y) 7284 return true 7285 } 7286 // match: (CMOVQHI y _ (FlagLT_ULT)) 7287 // cond: 7288 // result: y 7289 for { 7290 _ = v.Args[2] 7291 y := v.Args[0] 7292 v_2 := v.Args[2] 7293 if v_2.Op != OpAMD64FlagLT_ULT { 7294 break 7295 } 7296 v.reset(OpCopy) 7297 v.Type = y.Type 7298 v.AddArg(y) 7299 return true 7300 } 7301 // match: (CMOVQHI _ x (FlagLT_UGT)) 7302 // cond: 7303 // result: x 7304 for { 7305 _ = v.Args[2] 7306 x := v.Args[1] 7307 v_2 := v.Args[2] 7308 if v_2.Op != OpAMD64FlagLT_UGT { 7309 break 7310 } 7311 v.reset(OpCopy) 7312 v.Type = x.Type 7313 v.AddArg(x) 7314 return true 7315 } 7316 return false 7317 } 7318 func rewriteValueAMD64_OpAMD64CMOVQLE_0(v *Value) bool { 7319 // match: (CMOVQLE x y (InvertFlags cond)) 7320 // cond: 7321 // result: (CMOVQGE x y cond) 7322 for { 7323 _ = v.Args[2] 7324 x := v.Args[0] 7325 y := v.Args[1] 7326 v_2 := v.Args[2] 7327 if v_2.Op != OpAMD64InvertFlags { 7328 break 7329 } 7330 cond := v_2.Args[0] 7331 v.reset(OpAMD64CMOVQGE) 7332 v.AddArg(x) 7333 v.AddArg(y) 7334 v.AddArg(cond) 7335 return true 7336 } 7337 // match: (CMOVQLE _ x (FlagEQ)) 7338 // cond: 7339 // result: x 7340 for { 7341 _ = v.Args[2] 7342 x := v.Args[1] 7343 v_2 := v.Args[2] 7344 if v_2.Op != OpAMD64FlagEQ { 7345 break 7346 } 7347 v.reset(OpCopy) 7348 v.Type = x.Type 7349 v.AddArg(x) 7350 return true 7351 } 7352 // match: (CMOVQLE y _ (FlagGT_UGT)) 7353 // cond: 7354 // result: y 7355 for { 7356 _ = v.Args[2] 7357 y := v.Args[0] 7358 v_2 := v.Args[2] 7359 if v_2.Op != OpAMD64FlagGT_UGT { 7360 break 7361 } 7362 v.reset(OpCopy) 7363 v.Type = y.Type 7364 v.AddArg(y) 7365 return true 7366 } 7367 // match: (CMOVQLE y _ (FlagGT_ULT)) 7368 // cond: 7369 // result: y 7370 for { 7371 _ = v.Args[2] 7372 y := v.Args[0] 7373 v_2 := v.Args[2] 7374 if v_2.Op != OpAMD64FlagGT_ULT { 7375 break 7376 } 7377 v.reset(OpCopy) 7378 v.Type = y.Type 7379 v.AddArg(y) 7380 return true 7381 } 7382 // match: (CMOVQLE _ x (FlagLT_ULT)) 7383 // cond: 7384 // result: x 7385 for { 7386 _ = v.Args[2] 7387 x := v.Args[1] 7388 v_2 := v.Args[2] 7389 if v_2.Op != OpAMD64FlagLT_ULT { 7390 break 7391 } 7392 v.reset(OpCopy) 7393 v.Type = x.Type 7394 v.AddArg(x) 7395 return true 7396 } 7397 // match: (CMOVQLE _ x (FlagLT_UGT)) 7398 // cond: 7399 // result: x 7400 for { 7401 _ = v.Args[2] 7402 x := v.Args[1] 7403 v_2 := v.Args[2] 7404 if v_2.Op != OpAMD64FlagLT_UGT { 7405 break 7406 } 7407 v.reset(OpCopy) 7408 v.Type = x.Type 7409 v.AddArg(x) 7410 return true 7411 } 7412 return false 7413 } 7414 func rewriteValueAMD64_OpAMD64CMOVQLS_0(v *Value) bool { 7415 // match: (CMOVQLS x y (InvertFlags cond)) 7416 // cond: 7417 // result: (CMOVQCC x y cond) 7418 for { 7419 _ = v.Args[2] 7420 x := v.Args[0] 7421 y := v.Args[1] 7422 v_2 := v.Args[2] 7423 if v_2.Op != OpAMD64InvertFlags { 7424 break 7425 } 7426 cond := v_2.Args[0] 7427 v.reset(OpAMD64CMOVQCC) 7428 v.AddArg(x) 7429 v.AddArg(y) 7430 v.AddArg(cond) 7431 return true 7432 } 7433 // match: (CMOVQLS _ x (FlagEQ)) 7434 // cond: 7435 // result: x 7436 for { 7437 _ = v.Args[2] 7438 x := v.Args[1] 7439 v_2 := v.Args[2] 7440 if v_2.Op != OpAMD64FlagEQ { 7441 break 7442 } 7443 v.reset(OpCopy) 7444 v.Type = x.Type 7445 v.AddArg(x) 7446 return true 7447 } 7448 // match: (CMOVQLS y _ (FlagGT_UGT)) 7449 // cond: 7450 // result: y 7451 for { 7452 _ = v.Args[2] 7453 y := v.Args[0] 7454 v_2 := v.Args[2] 7455 if v_2.Op != OpAMD64FlagGT_UGT { 7456 break 7457 } 7458 v.reset(OpCopy) 7459 v.Type = y.Type 7460 v.AddArg(y) 7461 return true 7462 } 7463 // match: (CMOVQLS _ x (FlagGT_ULT)) 7464 // cond: 7465 // result: x 7466 for { 7467 _ = v.Args[2] 7468 x := v.Args[1] 7469 v_2 := v.Args[2] 7470 if v_2.Op != OpAMD64FlagGT_ULT { 7471 break 7472 } 7473 v.reset(OpCopy) 7474 v.Type = x.Type 7475 v.AddArg(x) 7476 return true 7477 } 7478 // match: (CMOVQLS _ x (FlagLT_ULT)) 7479 // cond: 7480 // result: x 7481 for { 7482 _ = v.Args[2] 7483 x := v.Args[1] 7484 v_2 := v.Args[2] 7485 if v_2.Op != OpAMD64FlagLT_ULT { 7486 break 7487 } 7488 v.reset(OpCopy) 7489 v.Type = x.Type 7490 v.AddArg(x) 7491 return true 7492 } 7493 // match: (CMOVQLS y _ (FlagLT_UGT)) 7494 // cond: 7495 // result: y 7496 for { 7497 _ = v.Args[2] 7498 y := v.Args[0] 7499 v_2 := v.Args[2] 7500 if v_2.Op != OpAMD64FlagLT_UGT { 7501 break 7502 } 7503 v.reset(OpCopy) 7504 v.Type = y.Type 7505 v.AddArg(y) 7506 return true 7507 } 7508 return false 7509 } 7510 func rewriteValueAMD64_OpAMD64CMOVQLT_0(v *Value) bool { 7511 // match: (CMOVQLT x y (InvertFlags cond)) 7512 // cond: 7513 // result: (CMOVQGT x y cond) 7514 for { 7515 _ = v.Args[2] 7516 x := v.Args[0] 7517 y := v.Args[1] 7518 v_2 := v.Args[2] 7519 if v_2.Op != OpAMD64InvertFlags { 7520 break 7521 } 7522 cond := v_2.Args[0] 7523 v.reset(OpAMD64CMOVQGT) 7524 v.AddArg(x) 7525 v.AddArg(y) 7526 v.AddArg(cond) 7527 return true 7528 } 7529 // match: (CMOVQLT y _ (FlagEQ)) 7530 // cond: 7531 // result: y 7532 for { 7533 _ = v.Args[2] 7534 y := v.Args[0] 7535 v_2 := v.Args[2] 7536 if v_2.Op != OpAMD64FlagEQ { 7537 break 7538 } 7539 v.reset(OpCopy) 7540 v.Type = y.Type 7541 v.AddArg(y) 7542 return true 7543 } 7544 // match: (CMOVQLT y _ (FlagGT_UGT)) 7545 // cond: 7546 // result: y 7547 for { 7548 _ = v.Args[2] 7549 y := v.Args[0] 7550 v_2 := v.Args[2] 7551 if v_2.Op != OpAMD64FlagGT_UGT { 7552 break 7553 } 7554 v.reset(OpCopy) 7555 v.Type = y.Type 7556 v.AddArg(y) 7557 return true 7558 } 7559 // match: (CMOVQLT y _ (FlagGT_ULT)) 7560 // cond: 7561 // result: y 7562 for { 7563 _ = v.Args[2] 7564 y := v.Args[0] 7565 v_2 := v.Args[2] 7566 if v_2.Op != OpAMD64FlagGT_ULT { 7567 break 7568 } 7569 v.reset(OpCopy) 7570 v.Type = y.Type 7571 v.AddArg(y) 7572 return true 7573 } 7574 // match: (CMOVQLT _ x (FlagLT_ULT)) 7575 // cond: 7576 // result: x 7577 for { 7578 _ = v.Args[2] 7579 x := v.Args[1] 7580 v_2 := v.Args[2] 7581 if v_2.Op != OpAMD64FlagLT_ULT { 7582 break 7583 } 7584 v.reset(OpCopy) 7585 v.Type = x.Type 7586 v.AddArg(x) 7587 return true 7588 } 7589 // match: (CMOVQLT _ x (FlagLT_UGT)) 7590 // cond: 7591 // result: x 7592 for { 7593 _ = v.Args[2] 7594 x := v.Args[1] 7595 v_2 := v.Args[2] 7596 if v_2.Op != OpAMD64FlagLT_UGT { 7597 break 7598 } 7599 v.reset(OpCopy) 7600 v.Type = x.Type 7601 v.AddArg(x) 7602 return true 7603 } 7604 return false 7605 } 7606 func rewriteValueAMD64_OpAMD64CMOVQNE_0(v *Value) bool { 7607 // match: (CMOVQNE x y (InvertFlags cond)) 7608 // cond: 7609 // result: (CMOVQNE x y cond) 7610 for { 7611 _ = v.Args[2] 7612 x := v.Args[0] 7613 y := v.Args[1] 7614 v_2 := v.Args[2] 7615 if v_2.Op != OpAMD64InvertFlags { 7616 break 7617 } 7618 cond := v_2.Args[0] 7619 v.reset(OpAMD64CMOVQNE) 7620 v.AddArg(x) 7621 v.AddArg(y) 7622 v.AddArg(cond) 7623 return true 7624 } 7625 // match: (CMOVQNE y _ (FlagEQ)) 7626 // cond: 7627 // result: y 7628 for { 7629 _ = v.Args[2] 7630 y := v.Args[0] 7631 v_2 := v.Args[2] 7632 if v_2.Op != OpAMD64FlagEQ { 7633 break 7634 } 7635 v.reset(OpCopy) 7636 v.Type = y.Type 7637 v.AddArg(y) 7638 return true 7639 } 7640 // match: (CMOVQNE _ x (FlagGT_UGT)) 7641 // cond: 7642 // result: x 7643 for { 7644 _ = v.Args[2] 7645 x := v.Args[1] 7646 v_2 := v.Args[2] 7647 if v_2.Op != OpAMD64FlagGT_UGT { 7648 break 7649 } 7650 v.reset(OpCopy) 7651 v.Type = x.Type 7652 v.AddArg(x) 7653 return true 7654 } 7655 // match: (CMOVQNE _ x (FlagGT_ULT)) 7656 // cond: 7657 // result: x 7658 for { 7659 _ = v.Args[2] 7660 x := v.Args[1] 7661 v_2 := v.Args[2] 7662 if v_2.Op != OpAMD64FlagGT_ULT { 7663 break 7664 } 7665 v.reset(OpCopy) 7666 v.Type = x.Type 7667 v.AddArg(x) 7668 return true 7669 } 7670 // match: (CMOVQNE _ x (FlagLT_ULT)) 7671 // cond: 7672 // result: x 7673 for { 7674 _ = v.Args[2] 7675 x := v.Args[1] 7676 v_2 := v.Args[2] 7677 if v_2.Op != OpAMD64FlagLT_ULT { 7678 break 7679 } 7680 v.reset(OpCopy) 7681 v.Type = x.Type 7682 v.AddArg(x) 7683 return true 7684 } 7685 // match: (CMOVQNE _ x (FlagLT_UGT)) 7686 // cond: 7687 // result: x 7688 for { 7689 _ = v.Args[2] 7690 x := v.Args[1] 7691 v_2 := v.Args[2] 7692 if v_2.Op != OpAMD64FlagLT_UGT { 7693 break 7694 } 7695 v.reset(OpCopy) 7696 v.Type = x.Type 7697 v.AddArg(x) 7698 return true 7699 } 7700 return false 7701 } 7702 func rewriteValueAMD64_OpAMD64CMOVWCC_0(v *Value) bool { 7703 // match: (CMOVWCC x y (InvertFlags cond)) 7704 // cond: 7705 // result: (CMOVWLS x y cond) 7706 for { 7707 _ = v.Args[2] 7708 x := v.Args[0] 7709 y := v.Args[1] 7710 v_2 := v.Args[2] 7711 if v_2.Op != OpAMD64InvertFlags { 7712 break 7713 } 7714 cond := v_2.Args[0] 7715 v.reset(OpAMD64CMOVWLS) 7716 v.AddArg(x) 7717 v.AddArg(y) 7718 v.AddArg(cond) 7719 return true 7720 } 7721 // match: (CMOVWCC _ x (FlagEQ)) 7722 // cond: 7723 // result: x 7724 for { 7725 _ = v.Args[2] 7726 x := v.Args[1] 7727 v_2 := v.Args[2] 7728 if v_2.Op != OpAMD64FlagEQ { 7729 break 7730 } 7731 v.reset(OpCopy) 7732 v.Type = x.Type 7733 v.AddArg(x) 7734 return true 7735 } 7736 // match: (CMOVWCC _ x (FlagGT_UGT)) 7737 // cond: 7738 // result: x 7739 for { 7740 _ = v.Args[2] 7741 x := v.Args[1] 7742 v_2 := v.Args[2] 7743 if v_2.Op != OpAMD64FlagGT_UGT { 7744 break 7745 } 7746 v.reset(OpCopy) 7747 v.Type = x.Type 7748 v.AddArg(x) 7749 return true 7750 } 7751 // match: (CMOVWCC y _ (FlagGT_ULT)) 7752 // cond: 7753 // result: y 7754 for { 7755 _ = v.Args[2] 7756 y := v.Args[0] 7757 v_2 := v.Args[2] 7758 if v_2.Op != OpAMD64FlagGT_ULT { 7759 break 7760 } 7761 v.reset(OpCopy) 7762 v.Type = y.Type 7763 v.AddArg(y) 7764 return true 7765 } 7766 // match: (CMOVWCC y _ (FlagLT_ULT)) 7767 // cond: 7768 // result: y 7769 for { 7770 _ = v.Args[2] 7771 y := v.Args[0] 7772 v_2 := v.Args[2] 7773 if v_2.Op != OpAMD64FlagLT_ULT { 7774 break 7775 } 7776 v.reset(OpCopy) 7777 v.Type = y.Type 7778 v.AddArg(y) 7779 return true 7780 } 7781 // match: (CMOVWCC _ x (FlagLT_UGT)) 7782 // cond: 7783 // result: x 7784 for { 7785 _ = v.Args[2] 7786 x := v.Args[1] 7787 v_2 := v.Args[2] 7788 if v_2.Op != OpAMD64FlagLT_UGT { 7789 break 7790 } 7791 v.reset(OpCopy) 7792 v.Type = x.Type 7793 v.AddArg(x) 7794 return true 7795 } 7796 return false 7797 } 7798 func rewriteValueAMD64_OpAMD64CMOVWCS_0(v *Value) bool { 7799 // match: (CMOVWCS x y (InvertFlags cond)) 7800 // cond: 7801 // result: (CMOVWHI x y cond) 7802 for { 7803 _ = v.Args[2] 7804 x := v.Args[0] 7805 y := v.Args[1] 7806 v_2 := v.Args[2] 7807 if v_2.Op != OpAMD64InvertFlags { 7808 break 7809 } 7810 cond := v_2.Args[0] 7811 v.reset(OpAMD64CMOVWHI) 7812 v.AddArg(x) 7813 v.AddArg(y) 7814 v.AddArg(cond) 7815 return true 7816 } 7817 // match: (CMOVWCS y _ (FlagEQ)) 7818 // cond: 7819 // result: y 7820 for { 7821 _ = v.Args[2] 7822 y := v.Args[0] 7823 v_2 := v.Args[2] 7824 if v_2.Op != OpAMD64FlagEQ { 7825 break 7826 } 7827 v.reset(OpCopy) 7828 v.Type = y.Type 7829 v.AddArg(y) 7830 return true 7831 } 7832 // match: (CMOVWCS y _ (FlagGT_UGT)) 7833 // cond: 7834 // result: y 7835 for { 7836 _ = v.Args[2] 7837 y := v.Args[0] 7838 v_2 := v.Args[2] 7839 if v_2.Op != OpAMD64FlagGT_UGT { 7840 break 7841 } 7842 v.reset(OpCopy) 7843 v.Type = y.Type 7844 v.AddArg(y) 7845 return true 7846 } 7847 // match: (CMOVWCS _ x (FlagGT_ULT)) 7848 // cond: 7849 // result: x 7850 for { 7851 _ = v.Args[2] 7852 x := v.Args[1] 7853 v_2 := v.Args[2] 7854 if v_2.Op != OpAMD64FlagGT_ULT { 7855 break 7856 } 7857 v.reset(OpCopy) 7858 v.Type = x.Type 7859 v.AddArg(x) 7860 return true 7861 } 7862 // match: (CMOVWCS _ x (FlagLT_ULT)) 7863 // cond: 7864 // result: x 7865 for { 7866 _ = v.Args[2] 7867 x := v.Args[1] 7868 v_2 := v.Args[2] 7869 if v_2.Op != OpAMD64FlagLT_ULT { 7870 break 7871 } 7872 v.reset(OpCopy) 7873 v.Type = x.Type 7874 v.AddArg(x) 7875 return true 7876 } 7877 // match: (CMOVWCS y _ (FlagLT_UGT)) 7878 // cond: 7879 // result: y 7880 for { 7881 _ = v.Args[2] 7882 y := v.Args[0] 7883 v_2 := v.Args[2] 7884 if v_2.Op != OpAMD64FlagLT_UGT { 7885 break 7886 } 7887 v.reset(OpCopy) 7888 v.Type = y.Type 7889 v.AddArg(y) 7890 return true 7891 } 7892 return false 7893 } 7894 func rewriteValueAMD64_OpAMD64CMOVWEQ_0(v *Value) bool { 7895 // match: (CMOVWEQ x y (InvertFlags cond)) 7896 // cond: 7897 // result: (CMOVWEQ x y cond) 7898 for { 7899 _ = v.Args[2] 7900 x := v.Args[0] 7901 y := v.Args[1] 7902 v_2 := v.Args[2] 7903 if v_2.Op != OpAMD64InvertFlags { 7904 break 7905 } 7906 cond := v_2.Args[0] 7907 v.reset(OpAMD64CMOVWEQ) 7908 v.AddArg(x) 7909 v.AddArg(y) 7910 v.AddArg(cond) 7911 return true 7912 } 7913 // match: (CMOVWEQ _ x (FlagEQ)) 7914 // cond: 7915 // result: x 7916 for { 7917 _ = v.Args[2] 7918 x := v.Args[1] 7919 v_2 := v.Args[2] 7920 if v_2.Op != OpAMD64FlagEQ { 7921 break 7922 } 7923 v.reset(OpCopy) 7924 v.Type = x.Type 7925 v.AddArg(x) 7926 return true 7927 } 7928 // match: (CMOVWEQ y _ (FlagGT_UGT)) 7929 // cond: 7930 // result: y 7931 for { 7932 _ = v.Args[2] 7933 y := v.Args[0] 7934 v_2 := v.Args[2] 7935 if v_2.Op != OpAMD64FlagGT_UGT { 7936 break 7937 } 7938 v.reset(OpCopy) 7939 v.Type = y.Type 7940 v.AddArg(y) 7941 return true 7942 } 7943 // match: (CMOVWEQ y _ (FlagGT_ULT)) 7944 // cond: 7945 // result: y 7946 for { 7947 _ = v.Args[2] 7948 y := v.Args[0] 7949 v_2 := v.Args[2] 7950 if v_2.Op != OpAMD64FlagGT_ULT { 7951 break 7952 } 7953 v.reset(OpCopy) 7954 v.Type = y.Type 7955 v.AddArg(y) 7956 return true 7957 } 7958 // match: (CMOVWEQ y _ (FlagLT_ULT)) 7959 // cond: 7960 // result: y 7961 for { 7962 _ = v.Args[2] 7963 y := v.Args[0] 7964 v_2 := v.Args[2] 7965 if v_2.Op != OpAMD64FlagLT_ULT { 7966 break 7967 } 7968 v.reset(OpCopy) 7969 v.Type = y.Type 7970 v.AddArg(y) 7971 return true 7972 } 7973 // match: (CMOVWEQ y _ (FlagLT_UGT)) 7974 // cond: 7975 // result: y 7976 for { 7977 _ = v.Args[2] 7978 y := v.Args[0] 7979 v_2 := v.Args[2] 7980 if v_2.Op != OpAMD64FlagLT_UGT { 7981 break 7982 } 7983 v.reset(OpCopy) 7984 v.Type = y.Type 7985 v.AddArg(y) 7986 return true 7987 } 7988 return false 7989 } 7990 func rewriteValueAMD64_OpAMD64CMOVWGE_0(v *Value) bool { 7991 // match: (CMOVWGE x y (InvertFlags cond)) 7992 // cond: 7993 // result: (CMOVWLE x y cond) 7994 for { 7995 _ = v.Args[2] 7996 x := v.Args[0] 7997 y := v.Args[1] 7998 v_2 := v.Args[2] 7999 if v_2.Op != OpAMD64InvertFlags { 8000 break 8001 } 8002 cond := v_2.Args[0] 8003 v.reset(OpAMD64CMOVWLE) 8004 v.AddArg(x) 8005 v.AddArg(y) 8006 v.AddArg(cond) 8007 return true 8008 } 8009 // match: (CMOVWGE _ x (FlagEQ)) 8010 // cond: 8011 // result: x 8012 for { 8013 _ = v.Args[2] 8014 x := v.Args[1] 8015 v_2 := v.Args[2] 8016 if v_2.Op != OpAMD64FlagEQ { 8017 break 8018 } 8019 v.reset(OpCopy) 8020 v.Type = x.Type 8021 v.AddArg(x) 8022 return true 8023 } 8024 // match: (CMOVWGE _ x (FlagGT_UGT)) 8025 // cond: 8026 // result: x 8027 for { 8028 _ = v.Args[2] 8029 x := v.Args[1] 8030 v_2 := v.Args[2] 8031 if v_2.Op != OpAMD64FlagGT_UGT { 8032 break 8033 } 8034 v.reset(OpCopy) 8035 v.Type = x.Type 8036 v.AddArg(x) 8037 return true 8038 } 8039 // match: (CMOVWGE _ x (FlagGT_ULT)) 8040 // cond: 8041 // result: x 8042 for { 8043 _ = v.Args[2] 8044 x := v.Args[1] 8045 v_2 := v.Args[2] 8046 if v_2.Op != OpAMD64FlagGT_ULT { 8047 break 8048 } 8049 v.reset(OpCopy) 8050 v.Type = x.Type 8051 v.AddArg(x) 8052 return true 8053 } 8054 // match: (CMOVWGE y _ (FlagLT_ULT)) 8055 // cond: 8056 // result: y 8057 for { 8058 _ = v.Args[2] 8059 y := v.Args[0] 8060 v_2 := v.Args[2] 8061 if v_2.Op != OpAMD64FlagLT_ULT { 8062 break 8063 } 8064 v.reset(OpCopy) 8065 v.Type = y.Type 8066 v.AddArg(y) 8067 return true 8068 } 8069 // match: (CMOVWGE y _ (FlagLT_UGT)) 8070 // cond: 8071 // result: y 8072 for { 8073 _ = v.Args[2] 8074 y := v.Args[0] 8075 v_2 := v.Args[2] 8076 if v_2.Op != OpAMD64FlagLT_UGT { 8077 break 8078 } 8079 v.reset(OpCopy) 8080 v.Type = y.Type 8081 v.AddArg(y) 8082 return true 8083 } 8084 return false 8085 } 8086 func rewriteValueAMD64_OpAMD64CMOVWGT_0(v *Value) bool { 8087 // match: (CMOVWGT x y (InvertFlags cond)) 8088 // cond: 8089 // result: (CMOVWLT x y cond) 8090 for { 8091 _ = v.Args[2] 8092 x := v.Args[0] 8093 y := v.Args[1] 8094 v_2 := v.Args[2] 8095 if v_2.Op != OpAMD64InvertFlags { 8096 break 8097 } 8098 cond := v_2.Args[0] 8099 v.reset(OpAMD64CMOVWLT) 8100 v.AddArg(x) 8101 v.AddArg(y) 8102 v.AddArg(cond) 8103 return true 8104 } 8105 // match: (CMOVWGT y _ (FlagEQ)) 8106 // cond: 8107 // result: y 8108 for { 8109 _ = v.Args[2] 8110 y := v.Args[0] 8111 v_2 := v.Args[2] 8112 if v_2.Op != OpAMD64FlagEQ { 8113 break 8114 } 8115 v.reset(OpCopy) 8116 v.Type = y.Type 8117 v.AddArg(y) 8118 return true 8119 } 8120 // match: (CMOVWGT _ x (FlagGT_UGT)) 8121 // cond: 8122 // result: x 8123 for { 8124 _ = v.Args[2] 8125 x := v.Args[1] 8126 v_2 := v.Args[2] 8127 if v_2.Op != OpAMD64FlagGT_UGT { 8128 break 8129 } 8130 v.reset(OpCopy) 8131 v.Type = x.Type 8132 v.AddArg(x) 8133 return true 8134 } 8135 // match: (CMOVWGT _ x (FlagGT_ULT)) 8136 // cond: 8137 // result: x 8138 for { 8139 _ = v.Args[2] 8140 x := v.Args[1] 8141 v_2 := v.Args[2] 8142 if v_2.Op != OpAMD64FlagGT_ULT { 8143 break 8144 } 8145 v.reset(OpCopy) 8146 v.Type = x.Type 8147 v.AddArg(x) 8148 return true 8149 } 8150 // match: (CMOVWGT y _ (FlagLT_ULT)) 8151 // cond: 8152 // result: y 8153 for { 8154 _ = v.Args[2] 8155 y := v.Args[0] 8156 v_2 := v.Args[2] 8157 if v_2.Op != OpAMD64FlagLT_ULT { 8158 break 8159 } 8160 v.reset(OpCopy) 8161 v.Type = y.Type 8162 v.AddArg(y) 8163 return true 8164 } 8165 // match: (CMOVWGT y _ (FlagLT_UGT)) 8166 // cond: 8167 // result: y 8168 for { 8169 _ = v.Args[2] 8170 y := v.Args[0] 8171 v_2 := v.Args[2] 8172 if v_2.Op != OpAMD64FlagLT_UGT { 8173 break 8174 } 8175 v.reset(OpCopy) 8176 v.Type = y.Type 8177 v.AddArg(y) 8178 return true 8179 } 8180 return false 8181 } 8182 func rewriteValueAMD64_OpAMD64CMOVWHI_0(v *Value) bool { 8183 // match: (CMOVWHI x y (InvertFlags cond)) 8184 // cond: 8185 // result: (CMOVWCS x y cond) 8186 for { 8187 _ = v.Args[2] 8188 x := v.Args[0] 8189 y := v.Args[1] 8190 v_2 := v.Args[2] 8191 if v_2.Op != OpAMD64InvertFlags { 8192 break 8193 } 8194 cond := v_2.Args[0] 8195 v.reset(OpAMD64CMOVWCS) 8196 v.AddArg(x) 8197 v.AddArg(y) 8198 v.AddArg(cond) 8199 return true 8200 } 8201 // match: (CMOVWHI y _ (FlagEQ)) 8202 // cond: 8203 // result: y 8204 for { 8205 _ = v.Args[2] 8206 y := v.Args[0] 8207 v_2 := v.Args[2] 8208 if v_2.Op != OpAMD64FlagEQ { 8209 break 8210 } 8211 v.reset(OpCopy) 8212 v.Type = y.Type 8213 v.AddArg(y) 8214 return true 8215 } 8216 // match: (CMOVWHI _ x (FlagGT_UGT)) 8217 // cond: 8218 // result: x 8219 for { 8220 _ = v.Args[2] 8221 x := v.Args[1] 8222 v_2 := v.Args[2] 8223 if v_2.Op != OpAMD64FlagGT_UGT { 8224 break 8225 } 8226 v.reset(OpCopy) 8227 v.Type = x.Type 8228 v.AddArg(x) 8229 return true 8230 } 8231 // match: (CMOVWHI y _ (FlagGT_ULT)) 8232 // cond: 8233 // result: y 8234 for { 8235 _ = v.Args[2] 8236 y := v.Args[0] 8237 v_2 := v.Args[2] 8238 if v_2.Op != OpAMD64FlagGT_ULT { 8239 break 8240 } 8241 v.reset(OpCopy) 8242 v.Type = y.Type 8243 v.AddArg(y) 8244 return true 8245 } 8246 // match: (CMOVWHI y _ (FlagLT_ULT)) 8247 // cond: 8248 // result: y 8249 for { 8250 _ = v.Args[2] 8251 y := v.Args[0] 8252 v_2 := v.Args[2] 8253 if v_2.Op != OpAMD64FlagLT_ULT { 8254 break 8255 } 8256 v.reset(OpCopy) 8257 v.Type = y.Type 8258 v.AddArg(y) 8259 return true 8260 } 8261 // match: (CMOVWHI _ x (FlagLT_UGT)) 8262 // cond: 8263 // result: x 8264 for { 8265 _ = v.Args[2] 8266 x := v.Args[1] 8267 v_2 := v.Args[2] 8268 if v_2.Op != OpAMD64FlagLT_UGT { 8269 break 8270 } 8271 v.reset(OpCopy) 8272 v.Type = x.Type 8273 v.AddArg(x) 8274 return true 8275 } 8276 return false 8277 } 8278 func rewriteValueAMD64_OpAMD64CMOVWLE_0(v *Value) bool { 8279 // match: (CMOVWLE x y (InvertFlags cond)) 8280 // cond: 8281 // result: (CMOVWGE x y cond) 8282 for { 8283 _ = v.Args[2] 8284 x := v.Args[0] 8285 y := v.Args[1] 8286 v_2 := v.Args[2] 8287 if v_2.Op != OpAMD64InvertFlags { 8288 break 8289 } 8290 cond := v_2.Args[0] 8291 v.reset(OpAMD64CMOVWGE) 8292 v.AddArg(x) 8293 v.AddArg(y) 8294 v.AddArg(cond) 8295 return true 8296 } 8297 // match: (CMOVWLE _ x (FlagEQ)) 8298 // cond: 8299 // result: x 8300 for { 8301 _ = v.Args[2] 8302 x := v.Args[1] 8303 v_2 := v.Args[2] 8304 if v_2.Op != OpAMD64FlagEQ { 8305 break 8306 } 8307 v.reset(OpCopy) 8308 v.Type = x.Type 8309 v.AddArg(x) 8310 return true 8311 } 8312 // match: (CMOVWLE y _ (FlagGT_UGT)) 8313 // cond: 8314 // result: y 8315 for { 8316 _ = v.Args[2] 8317 y := v.Args[0] 8318 v_2 := v.Args[2] 8319 if v_2.Op != OpAMD64FlagGT_UGT { 8320 break 8321 } 8322 v.reset(OpCopy) 8323 v.Type = y.Type 8324 v.AddArg(y) 8325 return true 8326 } 8327 // match: (CMOVWLE y _ (FlagGT_ULT)) 8328 // cond: 8329 // result: y 8330 for { 8331 _ = v.Args[2] 8332 y := v.Args[0] 8333 v_2 := v.Args[2] 8334 if v_2.Op != OpAMD64FlagGT_ULT { 8335 break 8336 } 8337 v.reset(OpCopy) 8338 v.Type = y.Type 8339 v.AddArg(y) 8340 return true 8341 } 8342 // match: (CMOVWLE _ x (FlagLT_ULT)) 8343 // cond: 8344 // result: x 8345 for { 8346 _ = v.Args[2] 8347 x := v.Args[1] 8348 v_2 := v.Args[2] 8349 if v_2.Op != OpAMD64FlagLT_ULT { 8350 break 8351 } 8352 v.reset(OpCopy) 8353 v.Type = x.Type 8354 v.AddArg(x) 8355 return true 8356 } 8357 // match: (CMOVWLE _ x (FlagLT_UGT)) 8358 // cond: 8359 // result: x 8360 for { 8361 _ = v.Args[2] 8362 x := v.Args[1] 8363 v_2 := v.Args[2] 8364 if v_2.Op != OpAMD64FlagLT_UGT { 8365 break 8366 } 8367 v.reset(OpCopy) 8368 v.Type = x.Type 8369 v.AddArg(x) 8370 return true 8371 } 8372 return false 8373 } 8374 func rewriteValueAMD64_OpAMD64CMOVWLS_0(v *Value) bool { 8375 // match: (CMOVWLS x y (InvertFlags cond)) 8376 // cond: 8377 // result: (CMOVWCC x y cond) 8378 for { 8379 _ = v.Args[2] 8380 x := v.Args[0] 8381 y := v.Args[1] 8382 v_2 := v.Args[2] 8383 if v_2.Op != OpAMD64InvertFlags { 8384 break 8385 } 8386 cond := v_2.Args[0] 8387 v.reset(OpAMD64CMOVWCC) 8388 v.AddArg(x) 8389 v.AddArg(y) 8390 v.AddArg(cond) 8391 return true 8392 } 8393 // match: (CMOVWLS _ x (FlagEQ)) 8394 // cond: 8395 // result: x 8396 for { 8397 _ = v.Args[2] 8398 x := v.Args[1] 8399 v_2 := v.Args[2] 8400 if v_2.Op != OpAMD64FlagEQ { 8401 break 8402 } 8403 v.reset(OpCopy) 8404 v.Type = x.Type 8405 v.AddArg(x) 8406 return true 8407 } 8408 // match: (CMOVWLS y _ (FlagGT_UGT)) 8409 // cond: 8410 // result: y 8411 for { 8412 _ = v.Args[2] 8413 y := v.Args[0] 8414 v_2 := v.Args[2] 8415 if v_2.Op != OpAMD64FlagGT_UGT { 8416 break 8417 } 8418 v.reset(OpCopy) 8419 v.Type = y.Type 8420 v.AddArg(y) 8421 return true 8422 } 8423 // match: (CMOVWLS _ x (FlagGT_ULT)) 8424 // cond: 8425 // result: x 8426 for { 8427 _ = v.Args[2] 8428 x := v.Args[1] 8429 v_2 := v.Args[2] 8430 if v_2.Op != OpAMD64FlagGT_ULT { 8431 break 8432 } 8433 v.reset(OpCopy) 8434 v.Type = x.Type 8435 v.AddArg(x) 8436 return true 8437 } 8438 // match: (CMOVWLS _ x (FlagLT_ULT)) 8439 // cond: 8440 // result: x 8441 for { 8442 _ = v.Args[2] 8443 x := v.Args[1] 8444 v_2 := v.Args[2] 8445 if v_2.Op != OpAMD64FlagLT_ULT { 8446 break 8447 } 8448 v.reset(OpCopy) 8449 v.Type = x.Type 8450 v.AddArg(x) 8451 return true 8452 } 8453 // match: (CMOVWLS y _ (FlagLT_UGT)) 8454 // cond: 8455 // result: y 8456 for { 8457 _ = v.Args[2] 8458 y := v.Args[0] 8459 v_2 := v.Args[2] 8460 if v_2.Op != OpAMD64FlagLT_UGT { 8461 break 8462 } 8463 v.reset(OpCopy) 8464 v.Type = y.Type 8465 v.AddArg(y) 8466 return true 8467 } 8468 return false 8469 } 8470 func rewriteValueAMD64_OpAMD64CMOVWLT_0(v *Value) bool { 8471 // match: (CMOVWLT x y (InvertFlags cond)) 8472 // cond: 8473 // result: (CMOVWGT x y cond) 8474 for { 8475 _ = v.Args[2] 8476 x := v.Args[0] 8477 y := v.Args[1] 8478 v_2 := v.Args[2] 8479 if v_2.Op != OpAMD64InvertFlags { 8480 break 8481 } 8482 cond := v_2.Args[0] 8483 v.reset(OpAMD64CMOVWGT) 8484 v.AddArg(x) 8485 v.AddArg(y) 8486 v.AddArg(cond) 8487 return true 8488 } 8489 // match: (CMOVWLT y _ (FlagEQ)) 8490 // cond: 8491 // result: y 8492 for { 8493 _ = v.Args[2] 8494 y := v.Args[0] 8495 v_2 := v.Args[2] 8496 if v_2.Op != OpAMD64FlagEQ { 8497 break 8498 } 8499 v.reset(OpCopy) 8500 v.Type = y.Type 8501 v.AddArg(y) 8502 return true 8503 } 8504 // match: (CMOVWLT y _ (FlagGT_UGT)) 8505 // cond: 8506 // result: y 8507 for { 8508 _ = v.Args[2] 8509 y := v.Args[0] 8510 v_2 := v.Args[2] 8511 if v_2.Op != OpAMD64FlagGT_UGT { 8512 break 8513 } 8514 v.reset(OpCopy) 8515 v.Type = y.Type 8516 v.AddArg(y) 8517 return true 8518 } 8519 // match: (CMOVWLT y _ (FlagGT_ULT)) 8520 // cond: 8521 // result: y 8522 for { 8523 _ = v.Args[2] 8524 y := v.Args[0] 8525 v_2 := v.Args[2] 8526 if v_2.Op != OpAMD64FlagGT_ULT { 8527 break 8528 } 8529 v.reset(OpCopy) 8530 v.Type = y.Type 8531 v.AddArg(y) 8532 return true 8533 } 8534 // match: (CMOVWLT _ x (FlagLT_ULT)) 8535 // cond: 8536 // result: x 8537 for { 8538 _ = v.Args[2] 8539 x := v.Args[1] 8540 v_2 := v.Args[2] 8541 if v_2.Op != OpAMD64FlagLT_ULT { 8542 break 8543 } 8544 v.reset(OpCopy) 8545 v.Type = x.Type 8546 v.AddArg(x) 8547 return true 8548 } 8549 // match: (CMOVWLT _ x (FlagLT_UGT)) 8550 // cond: 8551 // result: x 8552 for { 8553 _ = v.Args[2] 8554 x := v.Args[1] 8555 v_2 := v.Args[2] 8556 if v_2.Op != OpAMD64FlagLT_UGT { 8557 break 8558 } 8559 v.reset(OpCopy) 8560 v.Type = x.Type 8561 v.AddArg(x) 8562 return true 8563 } 8564 return false 8565 } 8566 func rewriteValueAMD64_OpAMD64CMOVWNE_0(v *Value) bool { 8567 // match: (CMOVWNE x y (InvertFlags cond)) 8568 // cond: 8569 // result: (CMOVWNE x y cond) 8570 for { 8571 _ = v.Args[2] 8572 x := v.Args[0] 8573 y := v.Args[1] 8574 v_2 := v.Args[2] 8575 if v_2.Op != OpAMD64InvertFlags { 8576 break 8577 } 8578 cond := v_2.Args[0] 8579 v.reset(OpAMD64CMOVWNE) 8580 v.AddArg(x) 8581 v.AddArg(y) 8582 v.AddArg(cond) 8583 return true 8584 } 8585 // match: (CMOVWNE y _ (FlagEQ)) 8586 // cond: 8587 // result: y 8588 for { 8589 _ = v.Args[2] 8590 y := v.Args[0] 8591 v_2 := v.Args[2] 8592 if v_2.Op != OpAMD64FlagEQ { 8593 break 8594 } 8595 v.reset(OpCopy) 8596 v.Type = y.Type 8597 v.AddArg(y) 8598 return true 8599 } 8600 // match: (CMOVWNE _ x (FlagGT_UGT)) 8601 // cond: 8602 // result: x 8603 for { 8604 _ = v.Args[2] 8605 x := v.Args[1] 8606 v_2 := v.Args[2] 8607 if v_2.Op != OpAMD64FlagGT_UGT { 8608 break 8609 } 8610 v.reset(OpCopy) 8611 v.Type = x.Type 8612 v.AddArg(x) 8613 return true 8614 } 8615 // match: (CMOVWNE _ x (FlagGT_ULT)) 8616 // cond: 8617 // result: x 8618 for { 8619 _ = v.Args[2] 8620 x := v.Args[1] 8621 v_2 := v.Args[2] 8622 if v_2.Op != OpAMD64FlagGT_ULT { 8623 break 8624 } 8625 v.reset(OpCopy) 8626 v.Type = x.Type 8627 v.AddArg(x) 8628 return true 8629 } 8630 // match: (CMOVWNE _ x (FlagLT_ULT)) 8631 // cond: 8632 // result: x 8633 for { 8634 _ = v.Args[2] 8635 x := v.Args[1] 8636 v_2 := v.Args[2] 8637 if v_2.Op != OpAMD64FlagLT_ULT { 8638 break 8639 } 8640 v.reset(OpCopy) 8641 v.Type = x.Type 8642 v.AddArg(x) 8643 return true 8644 } 8645 // match: (CMOVWNE _ x (FlagLT_UGT)) 8646 // cond: 8647 // result: x 8648 for { 8649 _ = v.Args[2] 8650 x := v.Args[1] 8651 v_2 := v.Args[2] 8652 if v_2.Op != OpAMD64FlagLT_UGT { 8653 break 8654 } 8655 v.reset(OpCopy) 8656 v.Type = x.Type 8657 v.AddArg(x) 8658 return true 8659 } 8660 return false 8661 } 8662 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 8663 b := v.Block 8664 _ = b 8665 // match: (CMPB x (MOVLconst [c])) 8666 // cond: 8667 // result: (CMPBconst x [int64(int8(c))]) 8668 for { 8669 _ = v.Args[1] 8670 x := v.Args[0] 8671 v_1 := v.Args[1] 8672 if v_1.Op != OpAMD64MOVLconst { 8673 break 8674 } 8675 c := v_1.AuxInt 8676 v.reset(OpAMD64CMPBconst) 8677 v.AuxInt = int64(int8(c)) 8678 v.AddArg(x) 8679 return true 8680 } 8681 // match: (CMPB (MOVLconst [c]) x) 8682 // cond: 8683 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 8684 for { 8685 _ = v.Args[1] 8686 v_0 := v.Args[0] 8687 if v_0.Op != OpAMD64MOVLconst { 8688 break 8689 } 8690 c := v_0.AuxInt 8691 x := v.Args[1] 8692 v.reset(OpAMD64InvertFlags) 8693 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 8694 v0.AuxInt = int64(int8(c)) 8695 v0.AddArg(x) 8696 v.AddArg(v0) 8697 return true 8698 } 8699 // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x) 8700 // cond: canMergeLoad(v, l, x) && clobber(l) 8701 // result: (CMPBload {sym} [off] ptr x mem) 8702 for { 8703 _ = v.Args[1] 8704 l := v.Args[0] 8705 if l.Op != OpAMD64MOVBload { 8706 break 8707 } 8708 off := l.AuxInt 8709 sym := l.Aux 8710 _ = l.Args[1] 8711 ptr := l.Args[0] 8712 mem := l.Args[1] 8713 x := v.Args[1] 8714 if !(canMergeLoad(v, l, x) && clobber(l)) { 8715 break 8716 } 8717 v.reset(OpAMD64CMPBload) 8718 v.AuxInt = off 8719 v.Aux = sym 8720 v.AddArg(ptr) 8721 v.AddArg(x) 8722 v.AddArg(mem) 8723 return true 8724 } 8725 // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) 8726 // cond: canMergeLoad(v, l, x) && clobber(l) 8727 // result: (InvertFlags (CMPBload {sym} [off] ptr x mem)) 8728 for { 8729 _ = v.Args[1] 8730 x := v.Args[0] 8731 l := v.Args[1] 8732 if l.Op != OpAMD64MOVBload { 8733 break 8734 } 8735 off := l.AuxInt 8736 sym := l.Aux 8737 _ = l.Args[1] 8738 ptr := l.Args[0] 8739 mem := l.Args[1] 8740 if !(canMergeLoad(v, l, x) && clobber(l)) { 8741 break 8742 } 8743 v.reset(OpAMD64InvertFlags) 8744 v0 := b.NewValue0(v.Pos, OpAMD64CMPBload, types.TypeFlags) 8745 v0.AuxInt = off 8746 v0.Aux = sym 8747 v0.AddArg(ptr) 8748 v0.AddArg(x) 8749 v0.AddArg(mem) 8750 v.AddArg(v0) 8751 return true 8752 } 8753 return false 8754 } 8755 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 8756 b := v.Block 8757 _ = b 8758 // match: (CMPBconst (MOVLconst [x]) [y]) 8759 // cond: int8(x)==int8(y) 8760 // result: (FlagEQ) 8761 for { 8762 y := v.AuxInt 8763 v_0 := v.Args[0] 8764 if v_0.Op != OpAMD64MOVLconst { 8765 break 8766 } 8767 x := v_0.AuxInt 8768 if !(int8(x) == int8(y)) { 8769 break 8770 } 8771 v.reset(OpAMD64FlagEQ) 8772 return true 8773 } 8774 // match: (CMPBconst (MOVLconst [x]) [y]) 8775 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 8776 // result: (FlagLT_ULT) 8777 for { 8778 y := v.AuxInt 8779 v_0 := v.Args[0] 8780 if v_0.Op != OpAMD64MOVLconst { 8781 break 8782 } 8783 x := v_0.AuxInt 8784 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 8785 break 8786 } 8787 v.reset(OpAMD64FlagLT_ULT) 8788 return true 8789 } 8790 // match: (CMPBconst (MOVLconst [x]) [y]) 8791 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 8792 // result: (FlagLT_UGT) 8793 for { 8794 y := v.AuxInt 8795 v_0 := v.Args[0] 8796 if v_0.Op != OpAMD64MOVLconst { 8797 break 8798 } 8799 x := v_0.AuxInt 8800 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 8801 break 8802 } 8803 v.reset(OpAMD64FlagLT_UGT) 8804 return true 8805 } 8806 // match: (CMPBconst (MOVLconst [x]) [y]) 8807 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 8808 // result: (FlagGT_ULT) 8809 for { 8810 y := v.AuxInt 8811 v_0 := v.Args[0] 8812 if v_0.Op != OpAMD64MOVLconst { 8813 break 8814 } 8815 x := v_0.AuxInt 8816 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 8817 break 8818 } 8819 v.reset(OpAMD64FlagGT_ULT) 8820 return true 8821 } 8822 // match: (CMPBconst (MOVLconst [x]) [y]) 8823 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 8824 // result: (FlagGT_UGT) 8825 for { 8826 y := v.AuxInt 8827 v_0 := v.Args[0] 8828 if v_0.Op != OpAMD64MOVLconst { 8829 break 8830 } 8831 x := v_0.AuxInt 8832 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 8833 break 8834 } 8835 v.reset(OpAMD64FlagGT_UGT) 8836 return true 8837 } 8838 // match: (CMPBconst (ANDLconst _ [m]) [n]) 8839 // cond: 0 <= int8(m) && int8(m) < int8(n) 8840 // result: (FlagLT_ULT) 8841 for { 8842 n := v.AuxInt 8843 v_0 := v.Args[0] 8844 if v_0.Op != OpAMD64ANDLconst { 8845 break 8846 } 8847 m := v_0.AuxInt 8848 if !(0 <= int8(m) && int8(m) < int8(n)) { 8849 break 8850 } 8851 v.reset(OpAMD64FlagLT_ULT) 8852 return true 8853 } 8854 // match: (CMPBconst (ANDL x y) [0]) 8855 // cond: 8856 // result: (TESTB x y) 8857 for { 8858 if v.AuxInt != 0 { 8859 break 8860 } 8861 v_0 := v.Args[0] 8862 if v_0.Op != OpAMD64ANDL { 8863 break 8864 } 8865 _ = v_0.Args[1] 8866 x := v_0.Args[0] 8867 y := v_0.Args[1] 8868 v.reset(OpAMD64TESTB) 8869 v.AddArg(x) 8870 v.AddArg(y) 8871 return true 8872 } 8873 // match: (CMPBconst (ANDLconst [c] x) [0]) 8874 // cond: 8875 // result: (TESTBconst [int64(int8(c))] x) 8876 for { 8877 if v.AuxInt != 0 { 8878 break 8879 } 8880 v_0 := v.Args[0] 8881 if v_0.Op != OpAMD64ANDLconst { 8882 break 8883 } 8884 c := v_0.AuxInt 8885 x := v_0.Args[0] 8886 v.reset(OpAMD64TESTBconst) 8887 v.AuxInt = int64(int8(c)) 8888 v.AddArg(x) 8889 return true 8890 } 8891 // match: (CMPBconst x [0]) 8892 // cond: 8893 // result: (TESTB x x) 8894 for { 8895 if v.AuxInt != 0 { 8896 break 8897 } 8898 x := v.Args[0] 8899 v.reset(OpAMD64TESTB) 8900 v.AddArg(x) 8901 v.AddArg(x) 8902 return true 8903 } 8904 // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) 8905 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 8906 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(c,off)] ptr mem) 8907 for { 8908 c := v.AuxInt 8909 l := v.Args[0] 8910 if l.Op != OpAMD64MOVBload { 8911 break 8912 } 8913 off := l.AuxInt 8914 sym := l.Aux 8915 _ = l.Args[1] 8916 ptr := l.Args[0] 8917 mem := l.Args[1] 8918 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 8919 break 8920 } 8921 b = l.Block 8922 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstload, types.TypeFlags) 8923 v.reset(OpCopy) 8924 v.AddArg(v0) 8925 v0.AuxInt = makeValAndOff(c, off) 8926 v0.Aux = sym 8927 v0.AddArg(ptr) 8928 v0.AddArg(mem) 8929 return true 8930 } 8931 return false 8932 } 8933 func rewriteValueAMD64_OpAMD64CMPBconstload_0(v *Value) bool { 8934 // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 8935 // cond: ValAndOff(valoff1).canAdd(off2) 8936 // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 8937 for { 8938 valoff1 := v.AuxInt 8939 sym := v.Aux 8940 _ = v.Args[1] 8941 v_0 := v.Args[0] 8942 if v_0.Op != OpAMD64ADDQconst { 8943 break 8944 } 8945 off2 := v_0.AuxInt 8946 base := v_0.Args[0] 8947 mem := v.Args[1] 8948 if !(ValAndOff(valoff1).canAdd(off2)) { 8949 break 8950 } 8951 v.reset(OpAMD64CMPBconstload) 8952 v.AuxInt = ValAndOff(valoff1).add(off2) 8953 v.Aux = sym 8954 v.AddArg(base) 8955 v.AddArg(mem) 8956 return true 8957 } 8958 // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 8959 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 8960 // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 8961 for { 8962 valoff1 := v.AuxInt 8963 sym1 := v.Aux 8964 _ = v.Args[1] 8965 v_0 := v.Args[0] 8966 if v_0.Op != OpAMD64LEAQ { 8967 break 8968 } 8969 off2 := v_0.AuxInt 8970 sym2 := v_0.Aux 8971 base := v_0.Args[0] 8972 mem := v.Args[1] 8973 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 8974 break 8975 } 8976 v.reset(OpAMD64CMPBconstload) 8977 v.AuxInt = ValAndOff(valoff1).add(off2) 8978 v.Aux = mergeSym(sym1, sym2) 8979 v.AddArg(base) 8980 v.AddArg(mem) 8981 return true 8982 } 8983 return false 8984 } 8985 func rewriteValueAMD64_OpAMD64CMPBload_0(v *Value) bool { 8986 // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem) 8987 // cond: is32Bit(off1+off2) 8988 // result: (CMPBload [off1+off2] {sym} base val mem) 8989 for { 8990 off1 := v.AuxInt 8991 sym := v.Aux 8992 _ = v.Args[2] 8993 v_0 := v.Args[0] 8994 if v_0.Op != OpAMD64ADDQconst { 8995 break 8996 } 8997 off2 := v_0.AuxInt 8998 base := v_0.Args[0] 8999 val := v.Args[1] 9000 mem := v.Args[2] 9001 if !(is32Bit(off1 + off2)) { 9002 break 9003 } 9004 v.reset(OpAMD64CMPBload) 9005 v.AuxInt = off1 + off2 9006 v.Aux = sym 9007 v.AddArg(base) 9008 v.AddArg(val) 9009 v.AddArg(mem) 9010 return true 9011 } 9012 // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9013 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9014 // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9015 for { 9016 off1 := v.AuxInt 9017 sym1 := v.Aux 9018 _ = v.Args[2] 9019 v_0 := v.Args[0] 9020 if v_0.Op != OpAMD64LEAQ { 9021 break 9022 } 9023 off2 := v_0.AuxInt 9024 sym2 := v_0.Aux 9025 base := v_0.Args[0] 9026 val := v.Args[1] 9027 mem := v.Args[2] 9028 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9029 break 9030 } 9031 v.reset(OpAMD64CMPBload) 9032 v.AuxInt = off1 + off2 9033 v.Aux = mergeSym(sym1, sym2) 9034 v.AddArg(base) 9035 v.AddArg(val) 9036 v.AddArg(mem) 9037 return true 9038 } 9039 // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) 9040 // cond: validValAndOff(int64(int8(c)),off) 9041 // result: (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem) 9042 for { 9043 off := v.AuxInt 9044 sym := v.Aux 9045 _ = v.Args[2] 9046 ptr := v.Args[0] 9047 v_1 := v.Args[1] 9048 if v_1.Op != OpAMD64MOVLconst { 9049 break 9050 } 9051 c := v_1.AuxInt 9052 mem := v.Args[2] 9053 if !(validValAndOff(int64(int8(c)), off)) { 9054 break 9055 } 9056 v.reset(OpAMD64CMPBconstload) 9057 v.AuxInt = makeValAndOff(int64(int8(c)), off) 9058 v.Aux = sym 9059 v.AddArg(ptr) 9060 v.AddArg(mem) 9061 return true 9062 } 9063 return false 9064 } 9065 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 9066 b := v.Block 9067 _ = b 9068 // match: (CMPL x (MOVLconst [c])) 9069 // cond: 9070 // result: (CMPLconst x [c]) 9071 for { 9072 _ = v.Args[1] 9073 x := v.Args[0] 9074 v_1 := v.Args[1] 9075 if v_1.Op != OpAMD64MOVLconst { 9076 break 9077 } 9078 c := v_1.AuxInt 9079 v.reset(OpAMD64CMPLconst) 9080 v.AuxInt = c 9081 v.AddArg(x) 9082 return true 9083 } 9084 // match: (CMPL (MOVLconst [c]) x) 9085 // cond: 9086 // result: (InvertFlags (CMPLconst x [c])) 9087 for { 9088 _ = v.Args[1] 9089 v_0 := v.Args[0] 9090 if v_0.Op != OpAMD64MOVLconst { 9091 break 9092 } 9093 c := v_0.AuxInt 9094 x := v.Args[1] 9095 v.reset(OpAMD64InvertFlags) 9096 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 9097 v0.AuxInt = c 9098 v0.AddArg(x) 9099 v.AddArg(v0) 9100 return true 9101 } 9102 // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x) 9103 // cond: canMergeLoad(v, l, x) && clobber(l) 9104 // result: (CMPLload {sym} [off] ptr x mem) 9105 for { 9106 _ = v.Args[1] 9107 l := v.Args[0] 9108 if l.Op != OpAMD64MOVLload { 9109 break 9110 } 9111 off := l.AuxInt 9112 sym := l.Aux 9113 _ = l.Args[1] 9114 ptr := l.Args[0] 9115 mem := l.Args[1] 9116 x := v.Args[1] 9117 if !(canMergeLoad(v, l, x) && clobber(l)) { 9118 break 9119 } 9120 v.reset(OpAMD64CMPLload) 9121 v.AuxInt = off 9122 v.Aux = sym 9123 v.AddArg(ptr) 9124 v.AddArg(x) 9125 v.AddArg(mem) 9126 return true 9127 } 9128 // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) 9129 // cond: canMergeLoad(v, l, x) && clobber(l) 9130 // result: (InvertFlags (CMPLload {sym} [off] ptr x mem)) 9131 for { 9132 _ = v.Args[1] 9133 x := v.Args[0] 9134 l := v.Args[1] 9135 if l.Op != OpAMD64MOVLload { 9136 break 9137 } 9138 off := l.AuxInt 9139 sym := l.Aux 9140 _ = l.Args[1] 9141 ptr := l.Args[0] 9142 mem := l.Args[1] 9143 if !(canMergeLoad(v, l, x) && clobber(l)) { 9144 break 9145 } 9146 v.reset(OpAMD64InvertFlags) 9147 v0 := b.NewValue0(v.Pos, OpAMD64CMPLload, types.TypeFlags) 9148 v0.AuxInt = off 9149 v0.Aux = sym 9150 v0.AddArg(ptr) 9151 v0.AddArg(x) 9152 v0.AddArg(mem) 9153 v.AddArg(v0) 9154 return true 9155 } 9156 return false 9157 } 9158 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 9159 // match: (CMPLconst (MOVLconst [x]) [y]) 9160 // cond: int32(x)==int32(y) 9161 // result: (FlagEQ) 9162 for { 9163 y := v.AuxInt 9164 v_0 := v.Args[0] 9165 if v_0.Op != OpAMD64MOVLconst { 9166 break 9167 } 9168 x := v_0.AuxInt 9169 if !(int32(x) == int32(y)) { 9170 break 9171 } 9172 v.reset(OpAMD64FlagEQ) 9173 return true 9174 } 9175 // match: (CMPLconst (MOVLconst [x]) [y]) 9176 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 9177 // result: (FlagLT_ULT) 9178 for { 9179 y := v.AuxInt 9180 v_0 := v.Args[0] 9181 if v_0.Op != OpAMD64MOVLconst { 9182 break 9183 } 9184 x := v_0.AuxInt 9185 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 9186 break 9187 } 9188 v.reset(OpAMD64FlagLT_ULT) 9189 return true 9190 } 9191 // match: (CMPLconst (MOVLconst [x]) [y]) 9192 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 9193 // result: (FlagLT_UGT) 9194 for { 9195 y := v.AuxInt 9196 v_0 := v.Args[0] 9197 if v_0.Op != OpAMD64MOVLconst { 9198 break 9199 } 9200 x := v_0.AuxInt 9201 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 9202 break 9203 } 9204 v.reset(OpAMD64FlagLT_UGT) 9205 return true 9206 } 9207 // match: (CMPLconst (MOVLconst [x]) [y]) 9208 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 9209 // result: (FlagGT_ULT) 9210 for { 9211 y := v.AuxInt 9212 v_0 := v.Args[0] 9213 if v_0.Op != OpAMD64MOVLconst { 9214 break 9215 } 9216 x := v_0.AuxInt 9217 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 9218 break 9219 } 9220 v.reset(OpAMD64FlagGT_ULT) 9221 return true 9222 } 9223 // match: (CMPLconst (MOVLconst [x]) [y]) 9224 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 9225 // result: (FlagGT_UGT) 9226 for { 9227 y := v.AuxInt 9228 v_0 := v.Args[0] 9229 if v_0.Op != OpAMD64MOVLconst { 9230 break 9231 } 9232 x := v_0.AuxInt 9233 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 9234 break 9235 } 9236 v.reset(OpAMD64FlagGT_UGT) 9237 return true 9238 } 9239 // match: (CMPLconst (SHRLconst _ [c]) [n]) 9240 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 9241 // result: (FlagLT_ULT) 9242 for { 9243 n := v.AuxInt 9244 v_0 := v.Args[0] 9245 if v_0.Op != OpAMD64SHRLconst { 9246 break 9247 } 9248 c := v_0.AuxInt 9249 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 9250 break 9251 } 9252 v.reset(OpAMD64FlagLT_ULT) 9253 return true 9254 } 9255 // match: (CMPLconst (ANDLconst _ [m]) [n]) 9256 // cond: 0 <= int32(m) && int32(m) < int32(n) 9257 // result: (FlagLT_ULT) 9258 for { 9259 n := v.AuxInt 9260 v_0 := v.Args[0] 9261 if v_0.Op != OpAMD64ANDLconst { 9262 break 9263 } 9264 m := v_0.AuxInt 9265 if !(0 <= int32(m) && int32(m) < int32(n)) { 9266 break 9267 } 9268 v.reset(OpAMD64FlagLT_ULT) 9269 return true 9270 } 9271 // match: (CMPLconst (ANDL x y) [0]) 9272 // cond: 9273 // result: (TESTL x y) 9274 for { 9275 if v.AuxInt != 0 { 9276 break 9277 } 9278 v_0 := v.Args[0] 9279 if v_0.Op != OpAMD64ANDL { 9280 break 9281 } 9282 _ = v_0.Args[1] 9283 x := v_0.Args[0] 9284 y := v_0.Args[1] 9285 v.reset(OpAMD64TESTL) 9286 v.AddArg(x) 9287 v.AddArg(y) 9288 return true 9289 } 9290 // match: (CMPLconst (ANDLconst [c] x) [0]) 9291 // cond: 9292 // result: (TESTLconst [c] x) 9293 for { 9294 if v.AuxInt != 0 { 9295 break 9296 } 9297 v_0 := v.Args[0] 9298 if v_0.Op != OpAMD64ANDLconst { 9299 break 9300 } 9301 c := v_0.AuxInt 9302 x := v_0.Args[0] 9303 v.reset(OpAMD64TESTLconst) 9304 v.AuxInt = c 9305 v.AddArg(x) 9306 return true 9307 } 9308 // match: (CMPLconst x [0]) 9309 // cond: 9310 // result: (TESTL x x) 9311 for { 9312 if v.AuxInt != 0 { 9313 break 9314 } 9315 x := v.Args[0] 9316 v.reset(OpAMD64TESTL) 9317 v.AddArg(x) 9318 v.AddArg(x) 9319 return true 9320 } 9321 return false 9322 } 9323 func rewriteValueAMD64_OpAMD64CMPLconst_10(v *Value) bool { 9324 b := v.Block 9325 _ = b 9326 // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) 9327 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 9328 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) 9329 for { 9330 c := v.AuxInt 9331 l := v.Args[0] 9332 if l.Op != OpAMD64MOVLload { 9333 break 9334 } 9335 off := l.AuxInt 9336 sym := l.Aux 9337 _ = l.Args[1] 9338 ptr := l.Args[0] 9339 mem := l.Args[1] 9340 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 9341 break 9342 } 9343 b = l.Block 9344 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstload, types.TypeFlags) 9345 v.reset(OpCopy) 9346 v.AddArg(v0) 9347 v0.AuxInt = makeValAndOff(c, off) 9348 v0.Aux = sym 9349 v0.AddArg(ptr) 9350 v0.AddArg(mem) 9351 return true 9352 } 9353 return false 9354 } 9355 func rewriteValueAMD64_OpAMD64CMPLconstload_0(v *Value) bool { 9356 // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 9357 // cond: ValAndOff(valoff1).canAdd(off2) 9358 // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 9359 for { 9360 valoff1 := v.AuxInt 9361 sym := v.Aux 9362 _ = v.Args[1] 9363 v_0 := v.Args[0] 9364 if v_0.Op != OpAMD64ADDQconst { 9365 break 9366 } 9367 off2 := v_0.AuxInt 9368 base := v_0.Args[0] 9369 mem := v.Args[1] 9370 if !(ValAndOff(valoff1).canAdd(off2)) { 9371 break 9372 } 9373 v.reset(OpAMD64CMPLconstload) 9374 v.AuxInt = ValAndOff(valoff1).add(off2) 9375 v.Aux = sym 9376 v.AddArg(base) 9377 v.AddArg(mem) 9378 return true 9379 } 9380 // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 9381 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 9382 // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 9383 for { 9384 valoff1 := v.AuxInt 9385 sym1 := v.Aux 9386 _ = v.Args[1] 9387 v_0 := v.Args[0] 9388 if v_0.Op != OpAMD64LEAQ { 9389 break 9390 } 9391 off2 := v_0.AuxInt 9392 sym2 := v_0.Aux 9393 base := v_0.Args[0] 9394 mem := v.Args[1] 9395 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 9396 break 9397 } 9398 v.reset(OpAMD64CMPLconstload) 9399 v.AuxInt = ValAndOff(valoff1).add(off2) 9400 v.Aux = mergeSym(sym1, sym2) 9401 v.AddArg(base) 9402 v.AddArg(mem) 9403 return true 9404 } 9405 return false 9406 } 9407 func rewriteValueAMD64_OpAMD64CMPLload_0(v *Value) bool { 9408 // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem) 9409 // cond: is32Bit(off1+off2) 9410 // result: (CMPLload [off1+off2] {sym} base val mem) 9411 for { 9412 off1 := v.AuxInt 9413 sym := v.Aux 9414 _ = v.Args[2] 9415 v_0 := v.Args[0] 9416 if v_0.Op != OpAMD64ADDQconst { 9417 break 9418 } 9419 off2 := v_0.AuxInt 9420 base := v_0.Args[0] 9421 val := v.Args[1] 9422 mem := v.Args[2] 9423 if !(is32Bit(off1 + off2)) { 9424 break 9425 } 9426 v.reset(OpAMD64CMPLload) 9427 v.AuxInt = off1 + off2 9428 v.Aux = sym 9429 v.AddArg(base) 9430 v.AddArg(val) 9431 v.AddArg(mem) 9432 return true 9433 } 9434 // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9435 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9436 // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9437 for { 9438 off1 := v.AuxInt 9439 sym1 := v.Aux 9440 _ = v.Args[2] 9441 v_0 := v.Args[0] 9442 if v_0.Op != OpAMD64LEAQ { 9443 break 9444 } 9445 off2 := v_0.AuxInt 9446 sym2 := v_0.Aux 9447 base := v_0.Args[0] 9448 val := v.Args[1] 9449 mem := v.Args[2] 9450 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9451 break 9452 } 9453 v.reset(OpAMD64CMPLload) 9454 v.AuxInt = off1 + off2 9455 v.Aux = mergeSym(sym1, sym2) 9456 v.AddArg(base) 9457 v.AddArg(val) 9458 v.AddArg(mem) 9459 return true 9460 } 9461 // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) 9462 // cond: validValAndOff(c,off) 9463 // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) 9464 for { 9465 off := v.AuxInt 9466 sym := v.Aux 9467 _ = v.Args[2] 9468 ptr := v.Args[0] 9469 v_1 := v.Args[1] 9470 if v_1.Op != OpAMD64MOVLconst { 9471 break 9472 } 9473 c := v_1.AuxInt 9474 mem := v.Args[2] 9475 if !(validValAndOff(c, off)) { 9476 break 9477 } 9478 v.reset(OpAMD64CMPLconstload) 9479 v.AuxInt = makeValAndOff(c, off) 9480 v.Aux = sym 9481 v.AddArg(ptr) 9482 v.AddArg(mem) 9483 return true 9484 } 9485 return false 9486 } 9487 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 9488 b := v.Block 9489 _ = b 9490 // match: (CMPQ x (MOVQconst [c])) 9491 // cond: is32Bit(c) 9492 // result: (CMPQconst x [c]) 9493 for { 9494 _ = v.Args[1] 9495 x := v.Args[0] 9496 v_1 := v.Args[1] 9497 if v_1.Op != OpAMD64MOVQconst { 9498 break 9499 } 9500 c := v_1.AuxInt 9501 if !(is32Bit(c)) { 9502 break 9503 } 9504 v.reset(OpAMD64CMPQconst) 9505 v.AuxInt = c 9506 v.AddArg(x) 9507 return true 9508 } 9509 // match: (CMPQ (MOVQconst [c]) x) 9510 // cond: is32Bit(c) 9511 // result: (InvertFlags (CMPQconst x [c])) 9512 for { 9513 _ = v.Args[1] 9514 v_0 := v.Args[0] 9515 if v_0.Op != OpAMD64MOVQconst { 9516 break 9517 } 9518 c := v_0.AuxInt 9519 x := v.Args[1] 9520 if !(is32Bit(c)) { 9521 break 9522 } 9523 v.reset(OpAMD64InvertFlags) 9524 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 9525 v0.AuxInt = c 9526 v0.AddArg(x) 9527 v.AddArg(v0) 9528 return true 9529 } 9530 // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x) 9531 // cond: canMergeLoad(v, l, x) && clobber(l) 9532 // result: (CMPQload {sym} [off] ptr x mem) 9533 for { 9534 _ = v.Args[1] 9535 l := v.Args[0] 9536 if l.Op != OpAMD64MOVQload { 9537 break 9538 } 9539 off := l.AuxInt 9540 sym := l.Aux 9541 _ = l.Args[1] 9542 ptr := l.Args[0] 9543 mem := l.Args[1] 9544 x := v.Args[1] 9545 if !(canMergeLoad(v, l, x) && clobber(l)) { 9546 break 9547 } 9548 v.reset(OpAMD64CMPQload) 9549 v.AuxInt = off 9550 v.Aux = sym 9551 v.AddArg(ptr) 9552 v.AddArg(x) 9553 v.AddArg(mem) 9554 return true 9555 } 9556 // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem)) 9557 // cond: canMergeLoad(v, l, x) && clobber(l) 9558 // result: (InvertFlags (CMPQload {sym} [off] ptr x mem)) 9559 for { 9560 _ = v.Args[1] 9561 x := v.Args[0] 9562 l := v.Args[1] 9563 if l.Op != OpAMD64MOVQload { 9564 break 9565 } 9566 off := l.AuxInt 9567 sym := l.Aux 9568 _ = l.Args[1] 9569 ptr := l.Args[0] 9570 mem := l.Args[1] 9571 if !(canMergeLoad(v, l, x) && clobber(l)) { 9572 break 9573 } 9574 v.reset(OpAMD64InvertFlags) 9575 v0 := b.NewValue0(v.Pos, OpAMD64CMPQload, types.TypeFlags) 9576 v0.AuxInt = off 9577 v0.Aux = sym 9578 v0.AddArg(ptr) 9579 v0.AddArg(x) 9580 v0.AddArg(mem) 9581 v.AddArg(v0) 9582 return true 9583 } 9584 return false 9585 } 9586 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 9587 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 9588 // cond: 9589 // result: (FlagLT_ULT) 9590 for { 9591 if v.AuxInt != 32 { 9592 break 9593 } 9594 v_0 := v.Args[0] 9595 if v_0.Op != OpAMD64NEGQ { 9596 break 9597 } 9598 v_0_0 := v_0.Args[0] 9599 if v_0_0.Op != OpAMD64ADDQconst { 9600 break 9601 } 9602 if v_0_0.AuxInt != -16 { 9603 break 9604 } 9605 v_0_0_0 := v_0_0.Args[0] 9606 if v_0_0_0.Op != OpAMD64ANDQconst { 9607 break 9608 } 9609 if v_0_0_0.AuxInt != 15 { 9610 break 9611 } 9612 v.reset(OpAMD64FlagLT_ULT) 9613 return true 9614 } 9615 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 9616 // cond: 9617 // result: (FlagLT_ULT) 9618 for { 9619 if v.AuxInt != 32 { 9620 break 9621 } 9622 v_0 := v.Args[0] 9623 if v_0.Op != OpAMD64NEGQ { 9624 break 9625 } 9626 v_0_0 := v_0.Args[0] 9627 if v_0_0.Op != OpAMD64ADDQconst { 9628 break 9629 } 9630 if v_0_0.AuxInt != -8 { 9631 break 9632 } 9633 v_0_0_0 := v_0_0.Args[0] 9634 if v_0_0_0.Op != OpAMD64ANDQconst { 9635 break 9636 } 9637 if v_0_0_0.AuxInt != 7 { 9638 break 9639 } 9640 v.reset(OpAMD64FlagLT_ULT) 9641 return true 9642 } 9643 // match: (CMPQconst (MOVQconst [x]) [y]) 9644 // cond: x==y 9645 // result: (FlagEQ) 9646 for { 9647 y := v.AuxInt 9648 v_0 := v.Args[0] 9649 if v_0.Op != OpAMD64MOVQconst { 9650 break 9651 } 9652 x := v_0.AuxInt 9653 if !(x == y) { 9654 break 9655 } 9656 v.reset(OpAMD64FlagEQ) 9657 return true 9658 } 9659 // match: (CMPQconst (MOVQconst [x]) [y]) 9660 // cond: x<y && uint64(x)<uint64(y) 9661 // result: (FlagLT_ULT) 9662 for { 9663 y := v.AuxInt 9664 v_0 := v.Args[0] 9665 if v_0.Op != OpAMD64MOVQconst { 9666 break 9667 } 9668 x := v_0.AuxInt 9669 if !(x < y && uint64(x) < uint64(y)) { 9670 break 9671 } 9672 v.reset(OpAMD64FlagLT_ULT) 9673 return true 9674 } 9675 // match: (CMPQconst (MOVQconst [x]) [y]) 9676 // cond: x<y && uint64(x)>uint64(y) 9677 // result: (FlagLT_UGT) 9678 for { 9679 y := v.AuxInt 9680 v_0 := v.Args[0] 9681 if v_0.Op != OpAMD64MOVQconst { 9682 break 9683 } 9684 x := v_0.AuxInt 9685 if !(x < y && uint64(x) > uint64(y)) { 9686 break 9687 } 9688 v.reset(OpAMD64FlagLT_UGT) 9689 return true 9690 } 9691 // match: (CMPQconst (MOVQconst [x]) [y]) 9692 // cond: x>y && uint64(x)<uint64(y) 9693 // result: (FlagGT_ULT) 9694 for { 9695 y := v.AuxInt 9696 v_0 := v.Args[0] 9697 if v_0.Op != OpAMD64MOVQconst { 9698 break 9699 } 9700 x := v_0.AuxInt 9701 if !(x > y && uint64(x) < uint64(y)) { 9702 break 9703 } 9704 v.reset(OpAMD64FlagGT_ULT) 9705 return true 9706 } 9707 // match: (CMPQconst (MOVQconst [x]) [y]) 9708 // cond: x>y && uint64(x)>uint64(y) 9709 // result: (FlagGT_UGT) 9710 for { 9711 y := v.AuxInt 9712 v_0 := v.Args[0] 9713 if v_0.Op != OpAMD64MOVQconst { 9714 break 9715 } 9716 x := v_0.AuxInt 9717 if !(x > y && uint64(x) > uint64(y)) { 9718 break 9719 } 9720 v.reset(OpAMD64FlagGT_UGT) 9721 return true 9722 } 9723 // match: (CMPQconst (MOVBQZX _) [c]) 9724 // cond: 0xFF < c 9725 // result: (FlagLT_ULT) 9726 for { 9727 c := v.AuxInt 9728 v_0 := v.Args[0] 9729 if v_0.Op != OpAMD64MOVBQZX { 9730 break 9731 } 9732 if !(0xFF < c) { 9733 break 9734 } 9735 v.reset(OpAMD64FlagLT_ULT) 9736 return true 9737 } 9738 // match: (CMPQconst (MOVWQZX _) [c]) 9739 // cond: 0xFFFF < c 9740 // result: (FlagLT_ULT) 9741 for { 9742 c := v.AuxInt 9743 v_0 := v.Args[0] 9744 if v_0.Op != OpAMD64MOVWQZX { 9745 break 9746 } 9747 if !(0xFFFF < c) { 9748 break 9749 } 9750 v.reset(OpAMD64FlagLT_ULT) 9751 return true 9752 } 9753 // match: (CMPQconst (MOVLQZX _) [c]) 9754 // cond: 0xFFFFFFFF < c 9755 // result: (FlagLT_ULT) 9756 for { 9757 c := v.AuxInt 9758 v_0 := v.Args[0] 9759 if v_0.Op != OpAMD64MOVLQZX { 9760 break 9761 } 9762 if !(0xFFFFFFFF < c) { 9763 break 9764 } 9765 v.reset(OpAMD64FlagLT_ULT) 9766 return true 9767 } 9768 return false 9769 } 9770 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 9771 b := v.Block 9772 _ = b 9773 // match: (CMPQconst (SHRQconst _ [c]) [n]) 9774 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 9775 // result: (FlagLT_ULT) 9776 for { 9777 n := v.AuxInt 9778 v_0 := v.Args[0] 9779 if v_0.Op != OpAMD64SHRQconst { 9780 break 9781 } 9782 c := v_0.AuxInt 9783 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 9784 break 9785 } 9786 v.reset(OpAMD64FlagLT_ULT) 9787 return true 9788 } 9789 // match: (CMPQconst (ANDQconst _ [m]) [n]) 9790 // cond: 0 <= m && m < n 9791 // result: (FlagLT_ULT) 9792 for { 9793 n := v.AuxInt 9794 v_0 := v.Args[0] 9795 if v_0.Op != OpAMD64ANDQconst { 9796 break 9797 } 9798 m := v_0.AuxInt 9799 if !(0 <= m && m < n) { 9800 break 9801 } 9802 v.reset(OpAMD64FlagLT_ULT) 9803 return true 9804 } 9805 // match: (CMPQconst (ANDLconst _ [m]) [n]) 9806 // cond: 0 <= m && m < n 9807 // result: (FlagLT_ULT) 9808 for { 9809 n := v.AuxInt 9810 v_0 := v.Args[0] 9811 if v_0.Op != OpAMD64ANDLconst { 9812 break 9813 } 9814 m := v_0.AuxInt 9815 if !(0 <= m && m < n) { 9816 break 9817 } 9818 v.reset(OpAMD64FlagLT_ULT) 9819 return true 9820 } 9821 // match: (CMPQconst (ANDQ x y) [0]) 9822 // cond: 9823 // result: (TESTQ x y) 9824 for { 9825 if v.AuxInt != 0 { 9826 break 9827 } 9828 v_0 := v.Args[0] 9829 if v_0.Op != OpAMD64ANDQ { 9830 break 9831 } 9832 _ = v_0.Args[1] 9833 x := v_0.Args[0] 9834 y := v_0.Args[1] 9835 v.reset(OpAMD64TESTQ) 9836 v.AddArg(x) 9837 v.AddArg(y) 9838 return true 9839 } 9840 // match: (CMPQconst (ANDQconst [c] x) [0]) 9841 // cond: 9842 // result: (TESTQconst [c] x) 9843 for { 9844 if v.AuxInt != 0 { 9845 break 9846 } 9847 v_0 := v.Args[0] 9848 if v_0.Op != OpAMD64ANDQconst { 9849 break 9850 } 9851 c := v_0.AuxInt 9852 x := v_0.Args[0] 9853 v.reset(OpAMD64TESTQconst) 9854 v.AuxInt = c 9855 v.AddArg(x) 9856 return true 9857 } 9858 // match: (CMPQconst x [0]) 9859 // cond: 9860 // result: (TESTQ x x) 9861 for { 9862 if v.AuxInt != 0 { 9863 break 9864 } 9865 x := v.Args[0] 9866 v.reset(OpAMD64TESTQ) 9867 v.AddArg(x) 9868 v.AddArg(x) 9869 return true 9870 } 9871 // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c]) 9872 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 9873 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) 9874 for { 9875 c := v.AuxInt 9876 l := v.Args[0] 9877 if l.Op != OpAMD64MOVQload { 9878 break 9879 } 9880 off := l.AuxInt 9881 sym := l.Aux 9882 _ = l.Args[1] 9883 ptr := l.Args[0] 9884 mem := l.Args[1] 9885 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 9886 break 9887 } 9888 b = l.Block 9889 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstload, types.TypeFlags) 9890 v.reset(OpCopy) 9891 v.AddArg(v0) 9892 v0.AuxInt = makeValAndOff(c, off) 9893 v0.Aux = sym 9894 v0.AddArg(ptr) 9895 v0.AddArg(mem) 9896 return true 9897 } 9898 return false 9899 } 9900 func rewriteValueAMD64_OpAMD64CMPQconstload_0(v *Value) bool { 9901 // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 9902 // cond: ValAndOff(valoff1).canAdd(off2) 9903 // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 9904 for { 9905 valoff1 := v.AuxInt 9906 sym := v.Aux 9907 _ = v.Args[1] 9908 v_0 := v.Args[0] 9909 if v_0.Op != OpAMD64ADDQconst { 9910 break 9911 } 9912 off2 := v_0.AuxInt 9913 base := v_0.Args[0] 9914 mem := v.Args[1] 9915 if !(ValAndOff(valoff1).canAdd(off2)) { 9916 break 9917 } 9918 v.reset(OpAMD64CMPQconstload) 9919 v.AuxInt = ValAndOff(valoff1).add(off2) 9920 v.Aux = sym 9921 v.AddArg(base) 9922 v.AddArg(mem) 9923 return true 9924 } 9925 // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 9926 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 9927 // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 9928 for { 9929 valoff1 := v.AuxInt 9930 sym1 := v.Aux 9931 _ = v.Args[1] 9932 v_0 := v.Args[0] 9933 if v_0.Op != OpAMD64LEAQ { 9934 break 9935 } 9936 off2 := v_0.AuxInt 9937 sym2 := v_0.Aux 9938 base := v_0.Args[0] 9939 mem := v.Args[1] 9940 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 9941 break 9942 } 9943 v.reset(OpAMD64CMPQconstload) 9944 v.AuxInt = ValAndOff(valoff1).add(off2) 9945 v.Aux = mergeSym(sym1, sym2) 9946 v.AddArg(base) 9947 v.AddArg(mem) 9948 return true 9949 } 9950 return false 9951 } 9952 func rewriteValueAMD64_OpAMD64CMPQload_0(v *Value) bool { 9953 // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem) 9954 // cond: is32Bit(off1+off2) 9955 // result: (CMPQload [off1+off2] {sym} base val mem) 9956 for { 9957 off1 := v.AuxInt 9958 sym := v.Aux 9959 _ = v.Args[2] 9960 v_0 := v.Args[0] 9961 if v_0.Op != OpAMD64ADDQconst { 9962 break 9963 } 9964 off2 := v_0.AuxInt 9965 base := v_0.Args[0] 9966 val := v.Args[1] 9967 mem := v.Args[2] 9968 if !(is32Bit(off1 + off2)) { 9969 break 9970 } 9971 v.reset(OpAMD64CMPQload) 9972 v.AuxInt = off1 + off2 9973 v.Aux = sym 9974 v.AddArg(base) 9975 v.AddArg(val) 9976 v.AddArg(mem) 9977 return true 9978 } 9979 // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9980 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9981 // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9982 for { 9983 off1 := v.AuxInt 9984 sym1 := v.Aux 9985 _ = v.Args[2] 9986 v_0 := v.Args[0] 9987 if v_0.Op != OpAMD64LEAQ { 9988 break 9989 } 9990 off2 := v_0.AuxInt 9991 sym2 := v_0.Aux 9992 base := v_0.Args[0] 9993 val := v.Args[1] 9994 mem := v.Args[2] 9995 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9996 break 9997 } 9998 v.reset(OpAMD64CMPQload) 9999 v.AuxInt = off1 + off2 10000 v.Aux = mergeSym(sym1, sym2) 10001 v.AddArg(base) 10002 v.AddArg(val) 10003 v.AddArg(mem) 10004 return true 10005 } 10006 // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) 10007 // cond: validValAndOff(c,off) 10008 // result: (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) 10009 for { 10010 off := v.AuxInt 10011 sym := v.Aux 10012 _ = v.Args[2] 10013 ptr := v.Args[0] 10014 v_1 := v.Args[1] 10015 if v_1.Op != OpAMD64MOVQconst { 10016 break 10017 } 10018 c := v_1.AuxInt 10019 mem := v.Args[2] 10020 if !(validValAndOff(c, off)) { 10021 break 10022 } 10023 v.reset(OpAMD64CMPQconstload) 10024 v.AuxInt = makeValAndOff(c, off) 10025 v.Aux = sym 10026 v.AddArg(ptr) 10027 v.AddArg(mem) 10028 return true 10029 } 10030 return false 10031 } 10032 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 10033 b := v.Block 10034 _ = b 10035 // match: (CMPW x (MOVLconst [c])) 10036 // cond: 10037 // result: (CMPWconst x [int64(int16(c))]) 10038 for { 10039 _ = v.Args[1] 10040 x := v.Args[0] 10041 v_1 := v.Args[1] 10042 if v_1.Op != OpAMD64MOVLconst { 10043 break 10044 } 10045 c := v_1.AuxInt 10046 v.reset(OpAMD64CMPWconst) 10047 v.AuxInt = int64(int16(c)) 10048 v.AddArg(x) 10049 return true 10050 } 10051 // match: (CMPW (MOVLconst [c]) x) 10052 // cond: 10053 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 10054 for { 10055 _ = v.Args[1] 10056 v_0 := v.Args[0] 10057 if v_0.Op != OpAMD64MOVLconst { 10058 break 10059 } 10060 c := v_0.AuxInt 10061 x := v.Args[1] 10062 v.reset(OpAMD64InvertFlags) 10063 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 10064 v0.AuxInt = int64(int16(c)) 10065 v0.AddArg(x) 10066 v.AddArg(v0) 10067 return true 10068 } 10069 // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x) 10070 // cond: canMergeLoad(v, l, x) && clobber(l) 10071 // result: (CMPWload {sym} [off] ptr x mem) 10072 for { 10073 _ = v.Args[1] 10074 l := v.Args[0] 10075 if l.Op != OpAMD64MOVWload { 10076 break 10077 } 10078 off := l.AuxInt 10079 sym := l.Aux 10080 _ = l.Args[1] 10081 ptr := l.Args[0] 10082 mem := l.Args[1] 10083 x := v.Args[1] 10084 if !(canMergeLoad(v, l, x) && clobber(l)) { 10085 break 10086 } 10087 v.reset(OpAMD64CMPWload) 10088 v.AuxInt = off 10089 v.Aux = sym 10090 v.AddArg(ptr) 10091 v.AddArg(x) 10092 v.AddArg(mem) 10093 return true 10094 } 10095 // match: (CMPW x l:(MOVWload {sym} [off] ptr mem)) 10096 // cond: canMergeLoad(v, l, x) && clobber(l) 10097 // result: (InvertFlags (CMPWload {sym} [off] ptr x mem)) 10098 for { 10099 _ = v.Args[1] 10100 x := v.Args[0] 10101 l := v.Args[1] 10102 if l.Op != OpAMD64MOVWload { 10103 break 10104 } 10105 off := l.AuxInt 10106 sym := l.Aux 10107 _ = l.Args[1] 10108 ptr := l.Args[0] 10109 mem := l.Args[1] 10110 if !(canMergeLoad(v, l, x) && clobber(l)) { 10111 break 10112 } 10113 v.reset(OpAMD64InvertFlags) 10114 v0 := b.NewValue0(v.Pos, OpAMD64CMPWload, types.TypeFlags) 10115 v0.AuxInt = off 10116 v0.Aux = sym 10117 v0.AddArg(ptr) 10118 v0.AddArg(x) 10119 v0.AddArg(mem) 10120 v.AddArg(v0) 10121 return true 10122 } 10123 return false 10124 } 10125 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 10126 b := v.Block 10127 _ = b 10128 // match: (CMPWconst (MOVLconst [x]) [y]) 10129 // cond: int16(x)==int16(y) 10130 // result: (FlagEQ) 10131 for { 10132 y := v.AuxInt 10133 v_0 := v.Args[0] 10134 if v_0.Op != OpAMD64MOVLconst { 10135 break 10136 } 10137 x := v_0.AuxInt 10138 if !(int16(x) == int16(y)) { 10139 break 10140 } 10141 v.reset(OpAMD64FlagEQ) 10142 return true 10143 } 10144 // match: (CMPWconst (MOVLconst [x]) [y]) 10145 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 10146 // result: (FlagLT_ULT) 10147 for { 10148 y := v.AuxInt 10149 v_0 := v.Args[0] 10150 if v_0.Op != OpAMD64MOVLconst { 10151 break 10152 } 10153 x := v_0.AuxInt 10154 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 10155 break 10156 } 10157 v.reset(OpAMD64FlagLT_ULT) 10158 return true 10159 } 10160 // match: (CMPWconst (MOVLconst [x]) [y]) 10161 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 10162 // result: (FlagLT_UGT) 10163 for { 10164 y := v.AuxInt 10165 v_0 := v.Args[0] 10166 if v_0.Op != OpAMD64MOVLconst { 10167 break 10168 } 10169 x := v_0.AuxInt 10170 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 10171 break 10172 } 10173 v.reset(OpAMD64FlagLT_UGT) 10174 return true 10175 } 10176 // match: (CMPWconst (MOVLconst [x]) [y]) 10177 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 10178 // result: (FlagGT_ULT) 10179 for { 10180 y := v.AuxInt 10181 v_0 := v.Args[0] 10182 if v_0.Op != OpAMD64MOVLconst { 10183 break 10184 } 10185 x := v_0.AuxInt 10186 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 10187 break 10188 } 10189 v.reset(OpAMD64FlagGT_ULT) 10190 return true 10191 } 10192 // match: (CMPWconst (MOVLconst [x]) [y]) 10193 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 10194 // result: (FlagGT_UGT) 10195 for { 10196 y := v.AuxInt 10197 v_0 := v.Args[0] 10198 if v_0.Op != OpAMD64MOVLconst { 10199 break 10200 } 10201 x := v_0.AuxInt 10202 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 10203 break 10204 } 10205 v.reset(OpAMD64FlagGT_UGT) 10206 return true 10207 } 10208 // match: (CMPWconst (ANDLconst _ [m]) [n]) 10209 // cond: 0 <= int16(m) && int16(m) < int16(n) 10210 // result: (FlagLT_ULT) 10211 for { 10212 n := v.AuxInt 10213 v_0 := v.Args[0] 10214 if v_0.Op != OpAMD64ANDLconst { 10215 break 10216 } 10217 m := v_0.AuxInt 10218 if !(0 <= int16(m) && int16(m) < int16(n)) { 10219 break 10220 } 10221 v.reset(OpAMD64FlagLT_ULT) 10222 return true 10223 } 10224 // match: (CMPWconst (ANDL x y) [0]) 10225 // cond: 10226 // result: (TESTW x y) 10227 for { 10228 if v.AuxInt != 0 { 10229 break 10230 } 10231 v_0 := v.Args[0] 10232 if v_0.Op != OpAMD64ANDL { 10233 break 10234 } 10235 _ = v_0.Args[1] 10236 x := v_0.Args[0] 10237 y := v_0.Args[1] 10238 v.reset(OpAMD64TESTW) 10239 v.AddArg(x) 10240 v.AddArg(y) 10241 return true 10242 } 10243 // match: (CMPWconst (ANDLconst [c] x) [0]) 10244 // cond: 10245 // result: (TESTWconst [int64(int16(c))] x) 10246 for { 10247 if v.AuxInt != 0 { 10248 break 10249 } 10250 v_0 := v.Args[0] 10251 if v_0.Op != OpAMD64ANDLconst { 10252 break 10253 } 10254 c := v_0.AuxInt 10255 x := v_0.Args[0] 10256 v.reset(OpAMD64TESTWconst) 10257 v.AuxInt = int64(int16(c)) 10258 v.AddArg(x) 10259 return true 10260 } 10261 // match: (CMPWconst x [0]) 10262 // cond: 10263 // result: (TESTW x x) 10264 for { 10265 if v.AuxInt != 0 { 10266 break 10267 } 10268 x := v.Args[0] 10269 v.reset(OpAMD64TESTW) 10270 v.AddArg(x) 10271 v.AddArg(x) 10272 return true 10273 } 10274 // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) 10275 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 10276 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(c,off)] ptr mem) 10277 for { 10278 c := v.AuxInt 10279 l := v.Args[0] 10280 if l.Op != OpAMD64MOVWload { 10281 break 10282 } 10283 off := l.AuxInt 10284 sym := l.Aux 10285 _ = l.Args[1] 10286 ptr := l.Args[0] 10287 mem := l.Args[1] 10288 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 10289 break 10290 } 10291 b = l.Block 10292 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstload, types.TypeFlags) 10293 v.reset(OpCopy) 10294 v.AddArg(v0) 10295 v0.AuxInt = makeValAndOff(c, off) 10296 v0.Aux = sym 10297 v0.AddArg(ptr) 10298 v0.AddArg(mem) 10299 return true 10300 } 10301 return false 10302 } 10303 func rewriteValueAMD64_OpAMD64CMPWconstload_0(v *Value) bool { 10304 // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 10305 // cond: ValAndOff(valoff1).canAdd(off2) 10306 // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 10307 for { 10308 valoff1 := v.AuxInt 10309 sym := v.Aux 10310 _ = v.Args[1] 10311 v_0 := v.Args[0] 10312 if v_0.Op != OpAMD64ADDQconst { 10313 break 10314 } 10315 off2 := v_0.AuxInt 10316 base := v_0.Args[0] 10317 mem := v.Args[1] 10318 if !(ValAndOff(valoff1).canAdd(off2)) { 10319 break 10320 } 10321 v.reset(OpAMD64CMPWconstload) 10322 v.AuxInt = ValAndOff(valoff1).add(off2) 10323 v.Aux = sym 10324 v.AddArg(base) 10325 v.AddArg(mem) 10326 return true 10327 } 10328 // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 10329 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 10330 // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 10331 for { 10332 valoff1 := v.AuxInt 10333 sym1 := v.Aux 10334 _ = v.Args[1] 10335 v_0 := v.Args[0] 10336 if v_0.Op != OpAMD64LEAQ { 10337 break 10338 } 10339 off2 := v_0.AuxInt 10340 sym2 := v_0.Aux 10341 base := v_0.Args[0] 10342 mem := v.Args[1] 10343 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 10344 break 10345 } 10346 v.reset(OpAMD64CMPWconstload) 10347 v.AuxInt = ValAndOff(valoff1).add(off2) 10348 v.Aux = mergeSym(sym1, sym2) 10349 v.AddArg(base) 10350 v.AddArg(mem) 10351 return true 10352 } 10353 return false 10354 } 10355 func rewriteValueAMD64_OpAMD64CMPWload_0(v *Value) bool { 10356 // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem) 10357 // cond: is32Bit(off1+off2) 10358 // result: (CMPWload [off1+off2] {sym} base val mem) 10359 for { 10360 off1 := v.AuxInt 10361 sym := v.Aux 10362 _ = v.Args[2] 10363 v_0 := v.Args[0] 10364 if v_0.Op != OpAMD64ADDQconst { 10365 break 10366 } 10367 off2 := v_0.AuxInt 10368 base := v_0.Args[0] 10369 val := v.Args[1] 10370 mem := v.Args[2] 10371 if !(is32Bit(off1 + off2)) { 10372 break 10373 } 10374 v.reset(OpAMD64CMPWload) 10375 v.AuxInt = off1 + off2 10376 v.Aux = sym 10377 v.AddArg(base) 10378 v.AddArg(val) 10379 v.AddArg(mem) 10380 return true 10381 } 10382 // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10383 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10384 // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10385 for { 10386 off1 := v.AuxInt 10387 sym1 := v.Aux 10388 _ = v.Args[2] 10389 v_0 := v.Args[0] 10390 if v_0.Op != OpAMD64LEAQ { 10391 break 10392 } 10393 off2 := v_0.AuxInt 10394 sym2 := v_0.Aux 10395 base := v_0.Args[0] 10396 val := v.Args[1] 10397 mem := v.Args[2] 10398 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10399 break 10400 } 10401 v.reset(OpAMD64CMPWload) 10402 v.AuxInt = off1 + off2 10403 v.Aux = mergeSym(sym1, sym2) 10404 v.AddArg(base) 10405 v.AddArg(val) 10406 v.AddArg(mem) 10407 return true 10408 } 10409 // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) 10410 // cond: validValAndOff(int64(int16(c)),off) 10411 // result: (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem) 10412 for { 10413 off := v.AuxInt 10414 sym := v.Aux 10415 _ = v.Args[2] 10416 ptr := v.Args[0] 10417 v_1 := v.Args[1] 10418 if v_1.Op != OpAMD64MOVLconst { 10419 break 10420 } 10421 c := v_1.AuxInt 10422 mem := v.Args[2] 10423 if !(validValAndOff(int64(int16(c)), off)) { 10424 break 10425 } 10426 v.reset(OpAMD64CMPWconstload) 10427 v.AuxInt = makeValAndOff(int64(int16(c)), off) 10428 v.Aux = sym 10429 v.AddArg(ptr) 10430 v.AddArg(mem) 10431 return true 10432 } 10433 return false 10434 } 10435 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 10436 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 10437 // cond: is32Bit(off1+off2) 10438 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 10439 for { 10440 off1 := v.AuxInt 10441 sym := v.Aux 10442 _ = v.Args[3] 10443 v_0 := v.Args[0] 10444 if v_0.Op != OpAMD64ADDQconst { 10445 break 10446 } 10447 off2 := v_0.AuxInt 10448 ptr := v_0.Args[0] 10449 old := v.Args[1] 10450 new_ := v.Args[2] 10451 mem := v.Args[3] 10452 if !(is32Bit(off1 + off2)) { 10453 break 10454 } 10455 v.reset(OpAMD64CMPXCHGLlock) 10456 v.AuxInt = off1 + off2 10457 v.Aux = sym 10458 v.AddArg(ptr) 10459 v.AddArg(old) 10460 v.AddArg(new_) 10461 v.AddArg(mem) 10462 return true 10463 } 10464 return false 10465 } 10466 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 10467 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 10468 // cond: is32Bit(off1+off2) 10469 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 10470 for { 10471 off1 := v.AuxInt 10472 sym := v.Aux 10473 _ = v.Args[3] 10474 v_0 := v.Args[0] 10475 if v_0.Op != OpAMD64ADDQconst { 10476 break 10477 } 10478 off2 := v_0.AuxInt 10479 ptr := v_0.Args[0] 10480 old := v.Args[1] 10481 new_ := v.Args[2] 10482 mem := v.Args[3] 10483 if !(is32Bit(off1 + off2)) { 10484 break 10485 } 10486 v.reset(OpAMD64CMPXCHGQlock) 10487 v.AuxInt = off1 + off2 10488 v.Aux = sym 10489 v.AddArg(ptr) 10490 v.AddArg(old) 10491 v.AddArg(new_) 10492 v.AddArg(mem) 10493 return true 10494 } 10495 return false 10496 } 10497 func rewriteValueAMD64_OpAMD64DIVSD_0(v *Value) bool { 10498 // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) 10499 // cond: canMergeLoad(v, l, x) && clobber(l) 10500 // result: (DIVSDload x [off] {sym} ptr mem) 10501 for { 10502 _ = v.Args[1] 10503 x := v.Args[0] 10504 l := v.Args[1] 10505 if l.Op != OpAMD64MOVSDload { 10506 break 10507 } 10508 off := l.AuxInt 10509 sym := l.Aux 10510 _ = l.Args[1] 10511 ptr := l.Args[0] 10512 mem := l.Args[1] 10513 if !(canMergeLoad(v, l, x) && clobber(l)) { 10514 break 10515 } 10516 v.reset(OpAMD64DIVSDload) 10517 v.AuxInt = off 10518 v.Aux = sym 10519 v.AddArg(x) 10520 v.AddArg(ptr) 10521 v.AddArg(mem) 10522 return true 10523 } 10524 return false 10525 } 10526 func rewriteValueAMD64_OpAMD64DIVSDload_0(v *Value) bool { 10527 // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem) 10528 // cond: is32Bit(off1+off2) 10529 // result: (DIVSDload [off1+off2] {sym} val base mem) 10530 for { 10531 off1 := v.AuxInt 10532 sym := v.Aux 10533 _ = v.Args[2] 10534 val := v.Args[0] 10535 v_1 := v.Args[1] 10536 if v_1.Op != OpAMD64ADDQconst { 10537 break 10538 } 10539 off2 := v_1.AuxInt 10540 base := v_1.Args[0] 10541 mem := v.Args[2] 10542 if !(is32Bit(off1 + off2)) { 10543 break 10544 } 10545 v.reset(OpAMD64DIVSDload) 10546 v.AuxInt = off1 + off2 10547 v.Aux = sym 10548 v.AddArg(val) 10549 v.AddArg(base) 10550 v.AddArg(mem) 10551 return true 10552 } 10553 // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 10554 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10555 // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 10556 for { 10557 off1 := v.AuxInt 10558 sym1 := v.Aux 10559 _ = v.Args[2] 10560 val := v.Args[0] 10561 v_1 := v.Args[1] 10562 if v_1.Op != OpAMD64LEAQ { 10563 break 10564 } 10565 off2 := v_1.AuxInt 10566 sym2 := v_1.Aux 10567 base := v_1.Args[0] 10568 mem := v.Args[2] 10569 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10570 break 10571 } 10572 v.reset(OpAMD64DIVSDload) 10573 v.AuxInt = off1 + off2 10574 v.Aux = mergeSym(sym1, sym2) 10575 v.AddArg(val) 10576 v.AddArg(base) 10577 v.AddArg(mem) 10578 return true 10579 } 10580 return false 10581 } 10582 func rewriteValueAMD64_OpAMD64DIVSS_0(v *Value) bool { 10583 // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) 10584 // cond: canMergeLoad(v, l, x) && clobber(l) 10585 // result: (DIVSSload x [off] {sym} ptr mem) 10586 for { 10587 _ = v.Args[1] 10588 x := v.Args[0] 10589 l := v.Args[1] 10590 if l.Op != OpAMD64MOVSSload { 10591 break 10592 } 10593 off := l.AuxInt 10594 sym := l.Aux 10595 _ = l.Args[1] 10596 ptr := l.Args[0] 10597 mem := l.Args[1] 10598 if !(canMergeLoad(v, l, x) && clobber(l)) { 10599 break 10600 } 10601 v.reset(OpAMD64DIVSSload) 10602 v.AuxInt = off 10603 v.Aux = sym 10604 v.AddArg(x) 10605 v.AddArg(ptr) 10606 v.AddArg(mem) 10607 return true 10608 } 10609 return false 10610 } 10611 func rewriteValueAMD64_OpAMD64DIVSSload_0(v *Value) bool { 10612 // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem) 10613 // cond: is32Bit(off1+off2) 10614 // result: (DIVSSload [off1+off2] {sym} val base mem) 10615 for { 10616 off1 := v.AuxInt 10617 sym := v.Aux 10618 _ = v.Args[2] 10619 val := v.Args[0] 10620 v_1 := v.Args[1] 10621 if v_1.Op != OpAMD64ADDQconst { 10622 break 10623 } 10624 off2 := v_1.AuxInt 10625 base := v_1.Args[0] 10626 mem := v.Args[2] 10627 if !(is32Bit(off1 + off2)) { 10628 break 10629 } 10630 v.reset(OpAMD64DIVSSload) 10631 v.AuxInt = off1 + off2 10632 v.Aux = sym 10633 v.AddArg(val) 10634 v.AddArg(base) 10635 v.AddArg(mem) 10636 return true 10637 } 10638 // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 10639 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10640 // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 10641 for { 10642 off1 := v.AuxInt 10643 sym1 := v.Aux 10644 _ = v.Args[2] 10645 val := v.Args[0] 10646 v_1 := v.Args[1] 10647 if v_1.Op != OpAMD64LEAQ { 10648 break 10649 } 10650 off2 := v_1.AuxInt 10651 sym2 := v_1.Aux 10652 base := v_1.Args[0] 10653 mem := v.Args[2] 10654 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10655 break 10656 } 10657 v.reset(OpAMD64DIVSSload) 10658 v.AuxInt = off1 + off2 10659 v.Aux = mergeSym(sym1, sym2) 10660 v.AddArg(val) 10661 v.AddArg(base) 10662 v.AddArg(mem) 10663 return true 10664 } 10665 return false 10666 } 10667 func rewriteValueAMD64_OpAMD64HMULL_0(v *Value) bool { 10668 // match: (HMULL x y) 10669 // cond: !x.rematerializeable() && y.rematerializeable() 10670 // result: (HMULL y x) 10671 for { 10672 _ = v.Args[1] 10673 x := v.Args[0] 10674 y := v.Args[1] 10675 if !(!x.rematerializeable() && y.rematerializeable()) { 10676 break 10677 } 10678 v.reset(OpAMD64HMULL) 10679 v.AddArg(y) 10680 v.AddArg(x) 10681 return true 10682 } 10683 return false 10684 } 10685 func rewriteValueAMD64_OpAMD64HMULLU_0(v *Value) bool { 10686 // match: (HMULLU x y) 10687 // cond: !x.rematerializeable() && y.rematerializeable() 10688 // result: (HMULLU y x) 10689 for { 10690 _ = v.Args[1] 10691 x := v.Args[0] 10692 y := v.Args[1] 10693 if !(!x.rematerializeable() && y.rematerializeable()) { 10694 break 10695 } 10696 v.reset(OpAMD64HMULLU) 10697 v.AddArg(y) 10698 v.AddArg(x) 10699 return true 10700 } 10701 return false 10702 } 10703 func rewriteValueAMD64_OpAMD64HMULQ_0(v *Value) bool { 10704 // match: (HMULQ x y) 10705 // cond: !x.rematerializeable() && y.rematerializeable() 10706 // result: (HMULQ y x) 10707 for { 10708 _ = v.Args[1] 10709 x := v.Args[0] 10710 y := v.Args[1] 10711 if !(!x.rematerializeable() && y.rematerializeable()) { 10712 break 10713 } 10714 v.reset(OpAMD64HMULQ) 10715 v.AddArg(y) 10716 v.AddArg(x) 10717 return true 10718 } 10719 return false 10720 } 10721 func rewriteValueAMD64_OpAMD64HMULQU_0(v *Value) bool { 10722 // match: (HMULQU x y) 10723 // cond: !x.rematerializeable() && y.rematerializeable() 10724 // result: (HMULQU y x) 10725 for { 10726 _ = v.Args[1] 10727 x := v.Args[0] 10728 y := v.Args[1] 10729 if !(!x.rematerializeable() && y.rematerializeable()) { 10730 break 10731 } 10732 v.reset(OpAMD64HMULQU) 10733 v.AddArg(y) 10734 v.AddArg(x) 10735 return true 10736 } 10737 return false 10738 } 10739 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 10740 // match: (LEAL [c] {s} (ADDLconst [d] x)) 10741 // cond: is32Bit(c+d) 10742 // result: (LEAL [c+d] {s} x) 10743 for { 10744 c := v.AuxInt 10745 s := v.Aux 10746 v_0 := v.Args[0] 10747 if v_0.Op != OpAMD64ADDLconst { 10748 break 10749 } 10750 d := v_0.AuxInt 10751 x := v_0.Args[0] 10752 if !(is32Bit(c + d)) { 10753 break 10754 } 10755 v.reset(OpAMD64LEAL) 10756 v.AuxInt = c + d 10757 v.Aux = s 10758 v.AddArg(x) 10759 return true 10760 } 10761 // match: (LEAL [c] {s} (ADDL x y)) 10762 // cond: x.Op != OpSB && y.Op != OpSB 10763 // result: (LEAL1 [c] {s} x y) 10764 for { 10765 c := v.AuxInt 10766 s := v.Aux 10767 v_0 := v.Args[0] 10768 if v_0.Op != OpAMD64ADDL { 10769 break 10770 } 10771 _ = v_0.Args[1] 10772 x := v_0.Args[0] 10773 y := v_0.Args[1] 10774 if !(x.Op != OpSB && y.Op != OpSB) { 10775 break 10776 } 10777 v.reset(OpAMD64LEAL1) 10778 v.AuxInt = c 10779 v.Aux = s 10780 v.AddArg(x) 10781 v.AddArg(y) 10782 return true 10783 } 10784 return false 10785 } 10786 func rewriteValueAMD64_OpAMD64LEAL1_0(v *Value) bool { 10787 // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) 10788 // cond: is32Bit(c+d) && x.Op != OpSB 10789 // result: (LEAL1 [c+d] {s} x y) 10790 for { 10791 c := v.AuxInt 10792 s := v.Aux 10793 _ = v.Args[1] 10794 v_0 := v.Args[0] 10795 if v_0.Op != OpAMD64ADDLconst { 10796 break 10797 } 10798 d := v_0.AuxInt 10799 x := v_0.Args[0] 10800 y := v.Args[1] 10801 if !(is32Bit(c+d) && x.Op != OpSB) { 10802 break 10803 } 10804 v.reset(OpAMD64LEAL1) 10805 v.AuxInt = c + d 10806 v.Aux = s 10807 v.AddArg(x) 10808 v.AddArg(y) 10809 return true 10810 } 10811 // match: (LEAL1 [c] {s} y (ADDLconst [d] x)) 10812 // cond: is32Bit(c+d) && x.Op != OpSB 10813 // result: (LEAL1 [c+d] {s} x y) 10814 for { 10815 c := v.AuxInt 10816 s := v.Aux 10817 _ = v.Args[1] 10818 y := v.Args[0] 10819 v_1 := v.Args[1] 10820 if v_1.Op != OpAMD64ADDLconst { 10821 break 10822 } 10823 d := v_1.AuxInt 10824 x := v_1.Args[0] 10825 if !(is32Bit(c+d) && x.Op != OpSB) { 10826 break 10827 } 10828 v.reset(OpAMD64LEAL1) 10829 v.AuxInt = c + d 10830 v.Aux = s 10831 v.AddArg(x) 10832 v.AddArg(y) 10833 return true 10834 } 10835 // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) 10836 // cond: 10837 // result: (LEAL2 [c] {s} x y) 10838 for { 10839 c := v.AuxInt 10840 s := v.Aux 10841 _ = v.Args[1] 10842 x := v.Args[0] 10843 v_1 := v.Args[1] 10844 if v_1.Op != OpAMD64SHLLconst { 10845 break 10846 } 10847 if v_1.AuxInt != 1 { 10848 break 10849 } 10850 y := v_1.Args[0] 10851 v.reset(OpAMD64LEAL2) 10852 v.AuxInt = c 10853 v.Aux = s 10854 v.AddArg(x) 10855 v.AddArg(y) 10856 return true 10857 } 10858 // match: (LEAL1 [c] {s} (SHLLconst [1] y) x) 10859 // cond: 10860 // result: (LEAL2 [c] {s} x y) 10861 for { 10862 c := v.AuxInt 10863 s := v.Aux 10864 _ = v.Args[1] 10865 v_0 := v.Args[0] 10866 if v_0.Op != OpAMD64SHLLconst { 10867 break 10868 } 10869 if v_0.AuxInt != 1 { 10870 break 10871 } 10872 y := v_0.Args[0] 10873 x := v.Args[1] 10874 v.reset(OpAMD64LEAL2) 10875 v.AuxInt = c 10876 v.Aux = s 10877 v.AddArg(x) 10878 v.AddArg(y) 10879 return true 10880 } 10881 // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) 10882 // cond: 10883 // result: (LEAL4 [c] {s} x y) 10884 for { 10885 c := v.AuxInt 10886 s := v.Aux 10887 _ = v.Args[1] 10888 x := v.Args[0] 10889 v_1 := v.Args[1] 10890 if v_1.Op != OpAMD64SHLLconst { 10891 break 10892 } 10893 if v_1.AuxInt != 2 { 10894 break 10895 } 10896 y := v_1.Args[0] 10897 v.reset(OpAMD64LEAL4) 10898 v.AuxInt = c 10899 v.Aux = s 10900 v.AddArg(x) 10901 v.AddArg(y) 10902 return true 10903 } 10904 // match: (LEAL1 [c] {s} (SHLLconst [2] y) x) 10905 // cond: 10906 // result: (LEAL4 [c] {s} x y) 10907 for { 10908 c := v.AuxInt 10909 s := v.Aux 10910 _ = v.Args[1] 10911 v_0 := v.Args[0] 10912 if v_0.Op != OpAMD64SHLLconst { 10913 break 10914 } 10915 if v_0.AuxInt != 2 { 10916 break 10917 } 10918 y := v_0.Args[0] 10919 x := v.Args[1] 10920 v.reset(OpAMD64LEAL4) 10921 v.AuxInt = c 10922 v.Aux = s 10923 v.AddArg(x) 10924 v.AddArg(y) 10925 return true 10926 } 10927 // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) 10928 // cond: 10929 // result: (LEAL8 [c] {s} x y) 10930 for { 10931 c := v.AuxInt 10932 s := v.Aux 10933 _ = v.Args[1] 10934 x := v.Args[0] 10935 v_1 := v.Args[1] 10936 if v_1.Op != OpAMD64SHLLconst { 10937 break 10938 } 10939 if v_1.AuxInt != 3 { 10940 break 10941 } 10942 y := v_1.Args[0] 10943 v.reset(OpAMD64LEAL8) 10944 v.AuxInt = c 10945 v.Aux = s 10946 v.AddArg(x) 10947 v.AddArg(y) 10948 return true 10949 } 10950 // match: (LEAL1 [c] {s} (SHLLconst [3] y) x) 10951 // cond: 10952 // result: (LEAL8 [c] {s} x y) 10953 for { 10954 c := v.AuxInt 10955 s := v.Aux 10956 _ = v.Args[1] 10957 v_0 := v.Args[0] 10958 if v_0.Op != OpAMD64SHLLconst { 10959 break 10960 } 10961 if v_0.AuxInt != 3 { 10962 break 10963 } 10964 y := v_0.Args[0] 10965 x := v.Args[1] 10966 v.reset(OpAMD64LEAL8) 10967 v.AuxInt = c 10968 v.Aux = s 10969 v.AddArg(x) 10970 v.AddArg(y) 10971 return true 10972 } 10973 return false 10974 } 10975 func rewriteValueAMD64_OpAMD64LEAL2_0(v *Value) bool { 10976 // match: (LEAL2 [c] {s} (ADDLconst [d] x) y) 10977 // cond: is32Bit(c+d) && x.Op != OpSB 10978 // result: (LEAL2 [c+d] {s} x y) 10979 for { 10980 c := v.AuxInt 10981 s := v.Aux 10982 _ = v.Args[1] 10983 v_0 := v.Args[0] 10984 if v_0.Op != OpAMD64ADDLconst { 10985 break 10986 } 10987 d := v_0.AuxInt 10988 x := v_0.Args[0] 10989 y := v.Args[1] 10990 if !(is32Bit(c+d) && x.Op != OpSB) { 10991 break 10992 } 10993 v.reset(OpAMD64LEAL2) 10994 v.AuxInt = c + d 10995 v.Aux = s 10996 v.AddArg(x) 10997 v.AddArg(y) 10998 return true 10999 } 11000 // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) 11001 // cond: is32Bit(c+2*d) && y.Op != OpSB 11002 // result: (LEAL2 [c+2*d] {s} x y) 11003 for { 11004 c := v.AuxInt 11005 s := v.Aux 11006 _ = v.Args[1] 11007 x := v.Args[0] 11008 v_1 := v.Args[1] 11009 if v_1.Op != OpAMD64ADDLconst { 11010 break 11011 } 11012 d := v_1.AuxInt 11013 y := v_1.Args[0] 11014 if !(is32Bit(c+2*d) && y.Op != OpSB) { 11015 break 11016 } 11017 v.reset(OpAMD64LEAL2) 11018 v.AuxInt = c + 2*d 11019 v.Aux = s 11020 v.AddArg(x) 11021 v.AddArg(y) 11022 return true 11023 } 11024 // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) 11025 // cond: 11026 // result: (LEAL4 [c] {s} x y) 11027 for { 11028 c := v.AuxInt 11029 s := v.Aux 11030 _ = v.Args[1] 11031 x := v.Args[0] 11032 v_1 := v.Args[1] 11033 if v_1.Op != OpAMD64SHLLconst { 11034 break 11035 } 11036 if v_1.AuxInt != 1 { 11037 break 11038 } 11039 y := v_1.Args[0] 11040 v.reset(OpAMD64LEAL4) 11041 v.AuxInt = c 11042 v.Aux = s 11043 v.AddArg(x) 11044 v.AddArg(y) 11045 return true 11046 } 11047 // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) 11048 // cond: 11049 // result: (LEAL8 [c] {s} x y) 11050 for { 11051 c := v.AuxInt 11052 s := v.Aux 11053 _ = v.Args[1] 11054 x := v.Args[0] 11055 v_1 := v.Args[1] 11056 if v_1.Op != OpAMD64SHLLconst { 11057 break 11058 } 11059 if v_1.AuxInt != 2 { 11060 break 11061 } 11062 y := v_1.Args[0] 11063 v.reset(OpAMD64LEAL8) 11064 v.AuxInt = c 11065 v.Aux = s 11066 v.AddArg(x) 11067 v.AddArg(y) 11068 return true 11069 } 11070 return false 11071 } 11072 func rewriteValueAMD64_OpAMD64LEAL4_0(v *Value) bool { 11073 // match: (LEAL4 [c] {s} (ADDLconst [d] x) y) 11074 // cond: is32Bit(c+d) && x.Op != OpSB 11075 // result: (LEAL4 [c+d] {s} x y) 11076 for { 11077 c := v.AuxInt 11078 s := v.Aux 11079 _ = v.Args[1] 11080 v_0 := v.Args[0] 11081 if v_0.Op != OpAMD64ADDLconst { 11082 break 11083 } 11084 d := v_0.AuxInt 11085 x := v_0.Args[0] 11086 y := v.Args[1] 11087 if !(is32Bit(c+d) && x.Op != OpSB) { 11088 break 11089 } 11090 v.reset(OpAMD64LEAL4) 11091 v.AuxInt = c + d 11092 v.Aux = s 11093 v.AddArg(x) 11094 v.AddArg(y) 11095 return true 11096 } 11097 // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) 11098 // cond: is32Bit(c+4*d) && y.Op != OpSB 11099 // result: (LEAL4 [c+4*d] {s} x y) 11100 for { 11101 c := v.AuxInt 11102 s := v.Aux 11103 _ = v.Args[1] 11104 x := v.Args[0] 11105 v_1 := v.Args[1] 11106 if v_1.Op != OpAMD64ADDLconst { 11107 break 11108 } 11109 d := v_1.AuxInt 11110 y := v_1.Args[0] 11111 if !(is32Bit(c+4*d) && y.Op != OpSB) { 11112 break 11113 } 11114 v.reset(OpAMD64LEAL4) 11115 v.AuxInt = c + 4*d 11116 v.Aux = s 11117 v.AddArg(x) 11118 v.AddArg(y) 11119 return true 11120 } 11121 // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) 11122 // cond: 11123 // result: (LEAL8 [c] {s} x y) 11124 for { 11125 c := v.AuxInt 11126 s := v.Aux 11127 _ = v.Args[1] 11128 x := v.Args[0] 11129 v_1 := v.Args[1] 11130 if v_1.Op != OpAMD64SHLLconst { 11131 break 11132 } 11133 if v_1.AuxInt != 1 { 11134 break 11135 } 11136 y := v_1.Args[0] 11137 v.reset(OpAMD64LEAL8) 11138 v.AuxInt = c 11139 v.Aux = s 11140 v.AddArg(x) 11141 v.AddArg(y) 11142 return true 11143 } 11144 return false 11145 } 11146 func rewriteValueAMD64_OpAMD64LEAL8_0(v *Value) bool { 11147 // match: (LEAL8 [c] {s} (ADDLconst [d] x) y) 11148 // cond: is32Bit(c+d) && x.Op != OpSB 11149 // result: (LEAL8 [c+d] {s} x y) 11150 for { 11151 c := v.AuxInt 11152 s := v.Aux 11153 _ = v.Args[1] 11154 v_0 := v.Args[0] 11155 if v_0.Op != OpAMD64ADDLconst { 11156 break 11157 } 11158 d := v_0.AuxInt 11159 x := v_0.Args[0] 11160 y := v.Args[1] 11161 if !(is32Bit(c+d) && x.Op != OpSB) { 11162 break 11163 } 11164 v.reset(OpAMD64LEAL8) 11165 v.AuxInt = c + d 11166 v.Aux = s 11167 v.AddArg(x) 11168 v.AddArg(y) 11169 return true 11170 } 11171 // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) 11172 // cond: is32Bit(c+8*d) && y.Op != OpSB 11173 // result: (LEAL8 [c+8*d] {s} x y) 11174 for { 11175 c := v.AuxInt 11176 s := v.Aux 11177 _ = v.Args[1] 11178 x := v.Args[0] 11179 v_1 := v.Args[1] 11180 if v_1.Op != OpAMD64ADDLconst { 11181 break 11182 } 11183 d := v_1.AuxInt 11184 y := v_1.Args[0] 11185 if !(is32Bit(c+8*d) && y.Op != OpSB) { 11186 break 11187 } 11188 v.reset(OpAMD64LEAL8) 11189 v.AuxInt = c + 8*d 11190 v.Aux = s 11191 v.AddArg(x) 11192 v.AddArg(y) 11193 return true 11194 } 11195 return false 11196 } 11197 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 11198 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 11199 // cond: is32Bit(c+d) 11200 // result: (LEAQ [c+d] {s} x) 11201 for { 11202 c := v.AuxInt 11203 s := v.Aux 11204 v_0 := v.Args[0] 11205 if v_0.Op != OpAMD64ADDQconst { 11206 break 11207 } 11208 d := v_0.AuxInt 11209 x := v_0.Args[0] 11210 if !(is32Bit(c + d)) { 11211 break 11212 } 11213 v.reset(OpAMD64LEAQ) 11214 v.AuxInt = c + d 11215 v.Aux = s 11216 v.AddArg(x) 11217 return true 11218 } 11219 // match: (LEAQ [c] {s} (ADDQ x y)) 11220 // cond: x.Op != OpSB && y.Op != OpSB 11221 // result: (LEAQ1 [c] {s} x y) 11222 for { 11223 c := v.AuxInt 11224 s := v.Aux 11225 v_0 := v.Args[0] 11226 if v_0.Op != OpAMD64ADDQ { 11227 break 11228 } 11229 _ = v_0.Args[1] 11230 x := v_0.Args[0] 11231 y := v_0.Args[1] 11232 if !(x.Op != OpSB && y.Op != OpSB) { 11233 break 11234 } 11235 v.reset(OpAMD64LEAQ1) 11236 v.AuxInt = c 11237 v.Aux = s 11238 v.AddArg(x) 11239 v.AddArg(y) 11240 return true 11241 } 11242 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 11243 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11244 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 11245 for { 11246 off1 := v.AuxInt 11247 sym1 := v.Aux 11248 v_0 := v.Args[0] 11249 if v_0.Op != OpAMD64LEAQ { 11250 break 11251 } 11252 off2 := v_0.AuxInt 11253 sym2 := v_0.Aux 11254 x := v_0.Args[0] 11255 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11256 break 11257 } 11258 v.reset(OpAMD64LEAQ) 11259 v.AuxInt = off1 + off2 11260 v.Aux = mergeSym(sym1, sym2) 11261 v.AddArg(x) 11262 return true 11263 } 11264 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 11265 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11266 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 11267 for { 11268 off1 := v.AuxInt 11269 sym1 := v.Aux 11270 v_0 := v.Args[0] 11271 if v_0.Op != OpAMD64LEAQ1 { 11272 break 11273 } 11274 off2 := v_0.AuxInt 11275 sym2 := v_0.Aux 11276 _ = v_0.Args[1] 11277 x := v_0.Args[0] 11278 y := v_0.Args[1] 11279 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11280 break 11281 } 11282 v.reset(OpAMD64LEAQ1) 11283 v.AuxInt = off1 + off2 11284 v.Aux = mergeSym(sym1, sym2) 11285 v.AddArg(x) 11286 v.AddArg(y) 11287 return true 11288 } 11289 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 11290 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11291 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 11292 for { 11293 off1 := v.AuxInt 11294 sym1 := v.Aux 11295 v_0 := v.Args[0] 11296 if v_0.Op != OpAMD64LEAQ2 { 11297 break 11298 } 11299 off2 := v_0.AuxInt 11300 sym2 := v_0.Aux 11301 _ = v_0.Args[1] 11302 x := v_0.Args[0] 11303 y := v_0.Args[1] 11304 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11305 break 11306 } 11307 v.reset(OpAMD64LEAQ2) 11308 v.AuxInt = off1 + off2 11309 v.Aux = mergeSym(sym1, sym2) 11310 v.AddArg(x) 11311 v.AddArg(y) 11312 return true 11313 } 11314 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 11315 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11316 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 11317 for { 11318 off1 := v.AuxInt 11319 sym1 := v.Aux 11320 v_0 := v.Args[0] 11321 if v_0.Op != OpAMD64LEAQ4 { 11322 break 11323 } 11324 off2 := v_0.AuxInt 11325 sym2 := v_0.Aux 11326 _ = v_0.Args[1] 11327 x := v_0.Args[0] 11328 y := v_0.Args[1] 11329 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11330 break 11331 } 11332 v.reset(OpAMD64LEAQ4) 11333 v.AuxInt = off1 + off2 11334 v.Aux = mergeSym(sym1, sym2) 11335 v.AddArg(x) 11336 v.AddArg(y) 11337 return true 11338 } 11339 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 11340 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11341 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 11342 for { 11343 off1 := v.AuxInt 11344 sym1 := v.Aux 11345 v_0 := v.Args[0] 11346 if v_0.Op != OpAMD64LEAQ8 { 11347 break 11348 } 11349 off2 := v_0.AuxInt 11350 sym2 := v_0.Aux 11351 _ = v_0.Args[1] 11352 x := v_0.Args[0] 11353 y := v_0.Args[1] 11354 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11355 break 11356 } 11357 v.reset(OpAMD64LEAQ8) 11358 v.AuxInt = off1 + off2 11359 v.Aux = mergeSym(sym1, sym2) 11360 v.AddArg(x) 11361 v.AddArg(y) 11362 return true 11363 } 11364 return false 11365 } 11366 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 11367 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 11368 // cond: is32Bit(c+d) && x.Op != OpSB 11369 // result: (LEAQ1 [c+d] {s} x y) 11370 for { 11371 c := v.AuxInt 11372 s := v.Aux 11373 _ = v.Args[1] 11374 v_0 := v.Args[0] 11375 if v_0.Op != OpAMD64ADDQconst { 11376 break 11377 } 11378 d := v_0.AuxInt 11379 x := v_0.Args[0] 11380 y := v.Args[1] 11381 if !(is32Bit(c+d) && x.Op != OpSB) { 11382 break 11383 } 11384 v.reset(OpAMD64LEAQ1) 11385 v.AuxInt = c + d 11386 v.Aux = s 11387 v.AddArg(x) 11388 v.AddArg(y) 11389 return true 11390 } 11391 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 11392 // cond: is32Bit(c+d) && x.Op != OpSB 11393 // result: (LEAQ1 [c+d] {s} x y) 11394 for { 11395 c := v.AuxInt 11396 s := v.Aux 11397 _ = v.Args[1] 11398 y := v.Args[0] 11399 v_1 := v.Args[1] 11400 if v_1.Op != OpAMD64ADDQconst { 11401 break 11402 } 11403 d := v_1.AuxInt 11404 x := v_1.Args[0] 11405 if !(is32Bit(c+d) && x.Op != OpSB) { 11406 break 11407 } 11408 v.reset(OpAMD64LEAQ1) 11409 v.AuxInt = c + d 11410 v.Aux = s 11411 v.AddArg(x) 11412 v.AddArg(y) 11413 return true 11414 } 11415 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 11416 // cond: 11417 // result: (LEAQ2 [c] {s} x y) 11418 for { 11419 c := v.AuxInt 11420 s := v.Aux 11421 _ = v.Args[1] 11422 x := v.Args[0] 11423 v_1 := v.Args[1] 11424 if v_1.Op != OpAMD64SHLQconst { 11425 break 11426 } 11427 if v_1.AuxInt != 1 { 11428 break 11429 } 11430 y := v_1.Args[0] 11431 v.reset(OpAMD64LEAQ2) 11432 v.AuxInt = c 11433 v.Aux = s 11434 v.AddArg(x) 11435 v.AddArg(y) 11436 return true 11437 } 11438 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 11439 // cond: 11440 // result: (LEAQ2 [c] {s} x y) 11441 for { 11442 c := v.AuxInt 11443 s := v.Aux 11444 _ = v.Args[1] 11445 v_0 := v.Args[0] 11446 if v_0.Op != OpAMD64SHLQconst { 11447 break 11448 } 11449 if v_0.AuxInt != 1 { 11450 break 11451 } 11452 y := v_0.Args[0] 11453 x := v.Args[1] 11454 v.reset(OpAMD64LEAQ2) 11455 v.AuxInt = c 11456 v.Aux = s 11457 v.AddArg(x) 11458 v.AddArg(y) 11459 return true 11460 } 11461 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 11462 // cond: 11463 // result: (LEAQ4 [c] {s} x y) 11464 for { 11465 c := v.AuxInt 11466 s := v.Aux 11467 _ = v.Args[1] 11468 x := v.Args[0] 11469 v_1 := v.Args[1] 11470 if v_1.Op != OpAMD64SHLQconst { 11471 break 11472 } 11473 if v_1.AuxInt != 2 { 11474 break 11475 } 11476 y := v_1.Args[0] 11477 v.reset(OpAMD64LEAQ4) 11478 v.AuxInt = c 11479 v.Aux = s 11480 v.AddArg(x) 11481 v.AddArg(y) 11482 return true 11483 } 11484 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 11485 // cond: 11486 // result: (LEAQ4 [c] {s} x y) 11487 for { 11488 c := v.AuxInt 11489 s := v.Aux 11490 _ = v.Args[1] 11491 v_0 := v.Args[0] 11492 if v_0.Op != OpAMD64SHLQconst { 11493 break 11494 } 11495 if v_0.AuxInt != 2 { 11496 break 11497 } 11498 y := v_0.Args[0] 11499 x := v.Args[1] 11500 v.reset(OpAMD64LEAQ4) 11501 v.AuxInt = c 11502 v.Aux = s 11503 v.AddArg(x) 11504 v.AddArg(y) 11505 return true 11506 } 11507 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 11508 // cond: 11509 // result: (LEAQ8 [c] {s} x y) 11510 for { 11511 c := v.AuxInt 11512 s := v.Aux 11513 _ = v.Args[1] 11514 x := v.Args[0] 11515 v_1 := v.Args[1] 11516 if v_1.Op != OpAMD64SHLQconst { 11517 break 11518 } 11519 if v_1.AuxInt != 3 { 11520 break 11521 } 11522 y := v_1.Args[0] 11523 v.reset(OpAMD64LEAQ8) 11524 v.AuxInt = c 11525 v.Aux = s 11526 v.AddArg(x) 11527 v.AddArg(y) 11528 return true 11529 } 11530 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 11531 // cond: 11532 // result: (LEAQ8 [c] {s} x y) 11533 for { 11534 c := v.AuxInt 11535 s := v.Aux 11536 _ = v.Args[1] 11537 v_0 := v.Args[0] 11538 if v_0.Op != OpAMD64SHLQconst { 11539 break 11540 } 11541 if v_0.AuxInt != 3 { 11542 break 11543 } 11544 y := v_0.Args[0] 11545 x := v.Args[1] 11546 v.reset(OpAMD64LEAQ8) 11547 v.AuxInt = c 11548 v.Aux = s 11549 v.AddArg(x) 11550 v.AddArg(y) 11551 return true 11552 } 11553 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 11554 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11555 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 11556 for { 11557 off1 := v.AuxInt 11558 sym1 := v.Aux 11559 _ = v.Args[1] 11560 v_0 := v.Args[0] 11561 if v_0.Op != OpAMD64LEAQ { 11562 break 11563 } 11564 off2 := v_0.AuxInt 11565 sym2 := v_0.Aux 11566 x := v_0.Args[0] 11567 y := v.Args[1] 11568 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11569 break 11570 } 11571 v.reset(OpAMD64LEAQ1) 11572 v.AuxInt = off1 + off2 11573 v.Aux = mergeSym(sym1, sym2) 11574 v.AddArg(x) 11575 v.AddArg(y) 11576 return true 11577 } 11578 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 11579 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11580 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 11581 for { 11582 off1 := v.AuxInt 11583 sym1 := v.Aux 11584 _ = v.Args[1] 11585 y := v.Args[0] 11586 v_1 := v.Args[1] 11587 if v_1.Op != OpAMD64LEAQ { 11588 break 11589 } 11590 off2 := v_1.AuxInt 11591 sym2 := v_1.Aux 11592 x := v_1.Args[0] 11593 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11594 break 11595 } 11596 v.reset(OpAMD64LEAQ1) 11597 v.AuxInt = off1 + off2 11598 v.Aux = mergeSym(sym1, sym2) 11599 v.AddArg(x) 11600 v.AddArg(y) 11601 return true 11602 } 11603 return false 11604 } 11605 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 11606 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 11607 // cond: is32Bit(c+d) && x.Op != OpSB 11608 // result: (LEAQ2 [c+d] {s} x y) 11609 for { 11610 c := v.AuxInt 11611 s := v.Aux 11612 _ = v.Args[1] 11613 v_0 := v.Args[0] 11614 if v_0.Op != OpAMD64ADDQconst { 11615 break 11616 } 11617 d := v_0.AuxInt 11618 x := v_0.Args[0] 11619 y := v.Args[1] 11620 if !(is32Bit(c+d) && x.Op != OpSB) { 11621 break 11622 } 11623 v.reset(OpAMD64LEAQ2) 11624 v.AuxInt = c + d 11625 v.Aux = s 11626 v.AddArg(x) 11627 v.AddArg(y) 11628 return true 11629 } 11630 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 11631 // cond: is32Bit(c+2*d) && y.Op != OpSB 11632 // result: (LEAQ2 [c+2*d] {s} x y) 11633 for { 11634 c := v.AuxInt 11635 s := v.Aux 11636 _ = v.Args[1] 11637 x := v.Args[0] 11638 v_1 := v.Args[1] 11639 if v_1.Op != OpAMD64ADDQconst { 11640 break 11641 } 11642 d := v_1.AuxInt 11643 y := v_1.Args[0] 11644 if !(is32Bit(c+2*d) && y.Op != OpSB) { 11645 break 11646 } 11647 v.reset(OpAMD64LEAQ2) 11648 v.AuxInt = c + 2*d 11649 v.Aux = s 11650 v.AddArg(x) 11651 v.AddArg(y) 11652 return true 11653 } 11654 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 11655 // cond: 11656 // result: (LEAQ4 [c] {s} x y) 11657 for { 11658 c := v.AuxInt 11659 s := v.Aux 11660 _ = v.Args[1] 11661 x := v.Args[0] 11662 v_1 := v.Args[1] 11663 if v_1.Op != OpAMD64SHLQconst { 11664 break 11665 } 11666 if v_1.AuxInt != 1 { 11667 break 11668 } 11669 y := v_1.Args[0] 11670 v.reset(OpAMD64LEAQ4) 11671 v.AuxInt = c 11672 v.Aux = s 11673 v.AddArg(x) 11674 v.AddArg(y) 11675 return true 11676 } 11677 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 11678 // cond: 11679 // result: (LEAQ8 [c] {s} x y) 11680 for { 11681 c := v.AuxInt 11682 s := v.Aux 11683 _ = v.Args[1] 11684 x := v.Args[0] 11685 v_1 := v.Args[1] 11686 if v_1.Op != OpAMD64SHLQconst { 11687 break 11688 } 11689 if v_1.AuxInt != 2 { 11690 break 11691 } 11692 y := v_1.Args[0] 11693 v.reset(OpAMD64LEAQ8) 11694 v.AuxInt = c 11695 v.Aux = s 11696 v.AddArg(x) 11697 v.AddArg(y) 11698 return true 11699 } 11700 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 11701 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11702 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 11703 for { 11704 off1 := v.AuxInt 11705 sym1 := v.Aux 11706 _ = v.Args[1] 11707 v_0 := v.Args[0] 11708 if v_0.Op != OpAMD64LEAQ { 11709 break 11710 } 11711 off2 := v_0.AuxInt 11712 sym2 := v_0.Aux 11713 x := v_0.Args[0] 11714 y := v.Args[1] 11715 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11716 break 11717 } 11718 v.reset(OpAMD64LEAQ2) 11719 v.AuxInt = off1 + off2 11720 v.Aux = mergeSym(sym1, sym2) 11721 v.AddArg(x) 11722 v.AddArg(y) 11723 return true 11724 } 11725 return false 11726 } 11727 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 11728 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 11729 // cond: is32Bit(c+d) && x.Op != OpSB 11730 // result: (LEAQ4 [c+d] {s} x y) 11731 for { 11732 c := v.AuxInt 11733 s := v.Aux 11734 _ = v.Args[1] 11735 v_0 := v.Args[0] 11736 if v_0.Op != OpAMD64ADDQconst { 11737 break 11738 } 11739 d := v_0.AuxInt 11740 x := v_0.Args[0] 11741 y := v.Args[1] 11742 if !(is32Bit(c+d) && x.Op != OpSB) { 11743 break 11744 } 11745 v.reset(OpAMD64LEAQ4) 11746 v.AuxInt = c + d 11747 v.Aux = s 11748 v.AddArg(x) 11749 v.AddArg(y) 11750 return true 11751 } 11752 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 11753 // cond: is32Bit(c+4*d) && y.Op != OpSB 11754 // result: (LEAQ4 [c+4*d] {s} x y) 11755 for { 11756 c := v.AuxInt 11757 s := v.Aux 11758 _ = v.Args[1] 11759 x := v.Args[0] 11760 v_1 := v.Args[1] 11761 if v_1.Op != OpAMD64ADDQconst { 11762 break 11763 } 11764 d := v_1.AuxInt 11765 y := v_1.Args[0] 11766 if !(is32Bit(c+4*d) && y.Op != OpSB) { 11767 break 11768 } 11769 v.reset(OpAMD64LEAQ4) 11770 v.AuxInt = c + 4*d 11771 v.Aux = s 11772 v.AddArg(x) 11773 v.AddArg(y) 11774 return true 11775 } 11776 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 11777 // cond: 11778 // result: (LEAQ8 [c] {s} x y) 11779 for { 11780 c := v.AuxInt 11781 s := v.Aux 11782 _ = v.Args[1] 11783 x := v.Args[0] 11784 v_1 := v.Args[1] 11785 if v_1.Op != OpAMD64SHLQconst { 11786 break 11787 } 11788 if v_1.AuxInt != 1 { 11789 break 11790 } 11791 y := v_1.Args[0] 11792 v.reset(OpAMD64LEAQ8) 11793 v.AuxInt = c 11794 v.Aux = s 11795 v.AddArg(x) 11796 v.AddArg(y) 11797 return true 11798 } 11799 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 11800 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11801 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 11802 for { 11803 off1 := v.AuxInt 11804 sym1 := v.Aux 11805 _ = v.Args[1] 11806 v_0 := v.Args[0] 11807 if v_0.Op != OpAMD64LEAQ { 11808 break 11809 } 11810 off2 := v_0.AuxInt 11811 sym2 := v_0.Aux 11812 x := v_0.Args[0] 11813 y := v.Args[1] 11814 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11815 break 11816 } 11817 v.reset(OpAMD64LEAQ4) 11818 v.AuxInt = off1 + off2 11819 v.Aux = mergeSym(sym1, sym2) 11820 v.AddArg(x) 11821 v.AddArg(y) 11822 return true 11823 } 11824 return false 11825 } 11826 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 11827 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 11828 // cond: is32Bit(c+d) && x.Op != OpSB 11829 // result: (LEAQ8 [c+d] {s} x y) 11830 for { 11831 c := v.AuxInt 11832 s := v.Aux 11833 _ = v.Args[1] 11834 v_0 := v.Args[0] 11835 if v_0.Op != OpAMD64ADDQconst { 11836 break 11837 } 11838 d := v_0.AuxInt 11839 x := v_0.Args[0] 11840 y := v.Args[1] 11841 if !(is32Bit(c+d) && x.Op != OpSB) { 11842 break 11843 } 11844 v.reset(OpAMD64LEAQ8) 11845 v.AuxInt = c + d 11846 v.Aux = s 11847 v.AddArg(x) 11848 v.AddArg(y) 11849 return true 11850 } 11851 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 11852 // cond: is32Bit(c+8*d) && y.Op != OpSB 11853 // result: (LEAQ8 [c+8*d] {s} x y) 11854 for { 11855 c := v.AuxInt 11856 s := v.Aux 11857 _ = v.Args[1] 11858 x := v.Args[0] 11859 v_1 := v.Args[1] 11860 if v_1.Op != OpAMD64ADDQconst { 11861 break 11862 } 11863 d := v_1.AuxInt 11864 y := v_1.Args[0] 11865 if !(is32Bit(c+8*d) && y.Op != OpSB) { 11866 break 11867 } 11868 v.reset(OpAMD64LEAQ8) 11869 v.AuxInt = c + 8*d 11870 v.Aux = s 11871 v.AddArg(x) 11872 v.AddArg(y) 11873 return true 11874 } 11875 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 11876 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11877 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 11878 for { 11879 off1 := v.AuxInt 11880 sym1 := v.Aux 11881 _ = v.Args[1] 11882 v_0 := v.Args[0] 11883 if v_0.Op != OpAMD64LEAQ { 11884 break 11885 } 11886 off2 := v_0.AuxInt 11887 sym2 := v_0.Aux 11888 x := v_0.Args[0] 11889 y := v.Args[1] 11890 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11891 break 11892 } 11893 v.reset(OpAMD64LEAQ8) 11894 v.AuxInt = off1 + off2 11895 v.Aux = mergeSym(sym1, sym2) 11896 v.AddArg(x) 11897 v.AddArg(y) 11898 return true 11899 } 11900 return false 11901 } 11902 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 11903 b := v.Block 11904 _ = b 11905 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 11906 // cond: x.Uses == 1 && clobber(x) 11907 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 11908 for { 11909 x := v.Args[0] 11910 if x.Op != OpAMD64MOVBload { 11911 break 11912 } 11913 off := x.AuxInt 11914 sym := x.Aux 11915 _ = x.Args[1] 11916 ptr := x.Args[0] 11917 mem := x.Args[1] 11918 if !(x.Uses == 1 && clobber(x)) { 11919 break 11920 } 11921 b = x.Block 11922 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 11923 v.reset(OpCopy) 11924 v.AddArg(v0) 11925 v0.AuxInt = off 11926 v0.Aux = sym 11927 v0.AddArg(ptr) 11928 v0.AddArg(mem) 11929 return true 11930 } 11931 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 11932 // cond: x.Uses == 1 && clobber(x) 11933 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 11934 for { 11935 x := v.Args[0] 11936 if x.Op != OpAMD64MOVWload { 11937 break 11938 } 11939 off := x.AuxInt 11940 sym := x.Aux 11941 _ = x.Args[1] 11942 ptr := x.Args[0] 11943 mem := x.Args[1] 11944 if !(x.Uses == 1 && clobber(x)) { 11945 break 11946 } 11947 b = x.Block 11948 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 11949 v.reset(OpCopy) 11950 v.AddArg(v0) 11951 v0.AuxInt = off 11952 v0.Aux = sym 11953 v0.AddArg(ptr) 11954 v0.AddArg(mem) 11955 return true 11956 } 11957 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 11958 // cond: x.Uses == 1 && clobber(x) 11959 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 11960 for { 11961 x := v.Args[0] 11962 if x.Op != OpAMD64MOVLload { 11963 break 11964 } 11965 off := x.AuxInt 11966 sym := x.Aux 11967 _ = x.Args[1] 11968 ptr := x.Args[0] 11969 mem := x.Args[1] 11970 if !(x.Uses == 1 && clobber(x)) { 11971 break 11972 } 11973 b = x.Block 11974 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 11975 v.reset(OpCopy) 11976 v.AddArg(v0) 11977 v0.AuxInt = off 11978 v0.Aux = sym 11979 v0.AddArg(ptr) 11980 v0.AddArg(mem) 11981 return true 11982 } 11983 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 11984 // cond: x.Uses == 1 && clobber(x) 11985 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 11986 for { 11987 x := v.Args[0] 11988 if x.Op != OpAMD64MOVQload { 11989 break 11990 } 11991 off := x.AuxInt 11992 sym := x.Aux 11993 _ = x.Args[1] 11994 ptr := x.Args[0] 11995 mem := x.Args[1] 11996 if !(x.Uses == 1 && clobber(x)) { 11997 break 11998 } 11999 b = x.Block 12000 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 12001 v.reset(OpCopy) 12002 v.AddArg(v0) 12003 v0.AuxInt = off 12004 v0.Aux = sym 12005 v0.AddArg(ptr) 12006 v0.AddArg(mem) 12007 return true 12008 } 12009 // match: (MOVBQSX (ANDLconst [c] x)) 12010 // cond: c & 0x80 == 0 12011 // result: (ANDLconst [c & 0x7f] x) 12012 for { 12013 v_0 := v.Args[0] 12014 if v_0.Op != OpAMD64ANDLconst { 12015 break 12016 } 12017 c := v_0.AuxInt 12018 x := v_0.Args[0] 12019 if !(c&0x80 == 0) { 12020 break 12021 } 12022 v.reset(OpAMD64ANDLconst) 12023 v.AuxInt = c & 0x7f 12024 v.AddArg(x) 12025 return true 12026 } 12027 // match: (MOVBQSX (MOVBQSX x)) 12028 // cond: 12029 // result: (MOVBQSX x) 12030 for { 12031 v_0 := v.Args[0] 12032 if v_0.Op != OpAMD64MOVBQSX { 12033 break 12034 } 12035 x := v_0.Args[0] 12036 v.reset(OpAMD64MOVBQSX) 12037 v.AddArg(x) 12038 return true 12039 } 12040 return false 12041 } 12042 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 12043 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 12044 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12045 // result: (MOVBQSX x) 12046 for { 12047 off := v.AuxInt 12048 sym := v.Aux 12049 _ = v.Args[1] 12050 ptr := v.Args[0] 12051 v_1 := v.Args[1] 12052 if v_1.Op != OpAMD64MOVBstore { 12053 break 12054 } 12055 off2 := v_1.AuxInt 12056 sym2 := v_1.Aux 12057 _ = v_1.Args[2] 12058 ptr2 := v_1.Args[0] 12059 x := v_1.Args[1] 12060 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12061 break 12062 } 12063 v.reset(OpAMD64MOVBQSX) 12064 v.AddArg(x) 12065 return true 12066 } 12067 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12068 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12069 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12070 for { 12071 off1 := v.AuxInt 12072 sym1 := v.Aux 12073 _ = v.Args[1] 12074 v_0 := v.Args[0] 12075 if v_0.Op != OpAMD64LEAQ { 12076 break 12077 } 12078 off2 := v_0.AuxInt 12079 sym2 := v_0.Aux 12080 base := v_0.Args[0] 12081 mem := v.Args[1] 12082 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12083 break 12084 } 12085 v.reset(OpAMD64MOVBQSXload) 12086 v.AuxInt = off1 + off2 12087 v.Aux = mergeSym(sym1, sym2) 12088 v.AddArg(base) 12089 v.AddArg(mem) 12090 return true 12091 } 12092 return false 12093 } 12094 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 12095 b := v.Block 12096 _ = b 12097 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 12098 // cond: x.Uses == 1 && clobber(x) 12099 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 12100 for { 12101 x := v.Args[0] 12102 if x.Op != OpAMD64MOVBload { 12103 break 12104 } 12105 off := x.AuxInt 12106 sym := x.Aux 12107 _ = x.Args[1] 12108 ptr := x.Args[0] 12109 mem := x.Args[1] 12110 if !(x.Uses == 1 && clobber(x)) { 12111 break 12112 } 12113 b = x.Block 12114 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 12115 v.reset(OpCopy) 12116 v.AddArg(v0) 12117 v0.AuxInt = off 12118 v0.Aux = sym 12119 v0.AddArg(ptr) 12120 v0.AddArg(mem) 12121 return true 12122 } 12123 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 12124 // cond: x.Uses == 1 && clobber(x) 12125 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 12126 for { 12127 x := v.Args[0] 12128 if x.Op != OpAMD64MOVWload { 12129 break 12130 } 12131 off := x.AuxInt 12132 sym := x.Aux 12133 _ = x.Args[1] 12134 ptr := x.Args[0] 12135 mem := x.Args[1] 12136 if !(x.Uses == 1 && clobber(x)) { 12137 break 12138 } 12139 b = x.Block 12140 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 12141 v.reset(OpCopy) 12142 v.AddArg(v0) 12143 v0.AuxInt = off 12144 v0.Aux = sym 12145 v0.AddArg(ptr) 12146 v0.AddArg(mem) 12147 return true 12148 } 12149 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 12150 // cond: x.Uses == 1 && clobber(x) 12151 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 12152 for { 12153 x := v.Args[0] 12154 if x.Op != OpAMD64MOVLload { 12155 break 12156 } 12157 off := x.AuxInt 12158 sym := x.Aux 12159 _ = x.Args[1] 12160 ptr := x.Args[0] 12161 mem := x.Args[1] 12162 if !(x.Uses == 1 && clobber(x)) { 12163 break 12164 } 12165 b = x.Block 12166 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 12167 v.reset(OpCopy) 12168 v.AddArg(v0) 12169 v0.AuxInt = off 12170 v0.Aux = sym 12171 v0.AddArg(ptr) 12172 v0.AddArg(mem) 12173 return true 12174 } 12175 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 12176 // cond: x.Uses == 1 && clobber(x) 12177 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 12178 for { 12179 x := v.Args[0] 12180 if x.Op != OpAMD64MOVQload { 12181 break 12182 } 12183 off := x.AuxInt 12184 sym := x.Aux 12185 _ = x.Args[1] 12186 ptr := x.Args[0] 12187 mem := x.Args[1] 12188 if !(x.Uses == 1 && clobber(x)) { 12189 break 12190 } 12191 b = x.Block 12192 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 12193 v.reset(OpCopy) 12194 v.AddArg(v0) 12195 v0.AuxInt = off 12196 v0.Aux = sym 12197 v0.AddArg(ptr) 12198 v0.AddArg(mem) 12199 return true 12200 } 12201 // match: (MOVBQZX x) 12202 // cond: zeroUpper56Bits(x,3) 12203 // result: x 12204 for { 12205 x := v.Args[0] 12206 if !(zeroUpper56Bits(x, 3)) { 12207 break 12208 } 12209 v.reset(OpCopy) 12210 v.Type = x.Type 12211 v.AddArg(x) 12212 return true 12213 } 12214 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 12215 // cond: x.Uses == 1 && clobber(x) 12216 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 12217 for { 12218 x := v.Args[0] 12219 if x.Op != OpAMD64MOVBloadidx1 { 12220 break 12221 } 12222 off := x.AuxInt 12223 sym := x.Aux 12224 _ = x.Args[2] 12225 ptr := x.Args[0] 12226 idx := x.Args[1] 12227 mem := x.Args[2] 12228 if !(x.Uses == 1 && clobber(x)) { 12229 break 12230 } 12231 b = x.Block 12232 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 12233 v.reset(OpCopy) 12234 v.AddArg(v0) 12235 v0.AuxInt = off 12236 v0.Aux = sym 12237 v0.AddArg(ptr) 12238 v0.AddArg(idx) 12239 v0.AddArg(mem) 12240 return true 12241 } 12242 // match: (MOVBQZX (ANDLconst [c] x)) 12243 // cond: 12244 // result: (ANDLconst [c & 0xff] x) 12245 for { 12246 v_0 := v.Args[0] 12247 if v_0.Op != OpAMD64ANDLconst { 12248 break 12249 } 12250 c := v_0.AuxInt 12251 x := v_0.Args[0] 12252 v.reset(OpAMD64ANDLconst) 12253 v.AuxInt = c & 0xff 12254 v.AddArg(x) 12255 return true 12256 } 12257 // match: (MOVBQZX (MOVBQZX x)) 12258 // cond: 12259 // result: (MOVBQZX x) 12260 for { 12261 v_0 := v.Args[0] 12262 if v_0.Op != OpAMD64MOVBQZX { 12263 break 12264 } 12265 x := v_0.Args[0] 12266 v.reset(OpAMD64MOVBQZX) 12267 v.AddArg(x) 12268 return true 12269 } 12270 return false 12271 } 12272 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 12273 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 12274 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12275 // result: (MOVBQZX x) 12276 for { 12277 off := v.AuxInt 12278 sym := v.Aux 12279 _ = v.Args[1] 12280 ptr := v.Args[0] 12281 v_1 := v.Args[1] 12282 if v_1.Op != OpAMD64MOVBstore { 12283 break 12284 } 12285 off2 := v_1.AuxInt 12286 sym2 := v_1.Aux 12287 _ = v_1.Args[2] 12288 ptr2 := v_1.Args[0] 12289 x := v_1.Args[1] 12290 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12291 break 12292 } 12293 v.reset(OpAMD64MOVBQZX) 12294 v.AddArg(x) 12295 return true 12296 } 12297 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 12298 // cond: is32Bit(off1+off2) 12299 // result: (MOVBload [off1+off2] {sym} ptr mem) 12300 for { 12301 off1 := v.AuxInt 12302 sym := v.Aux 12303 _ = v.Args[1] 12304 v_0 := v.Args[0] 12305 if v_0.Op != OpAMD64ADDQconst { 12306 break 12307 } 12308 off2 := v_0.AuxInt 12309 ptr := v_0.Args[0] 12310 mem := v.Args[1] 12311 if !(is32Bit(off1 + off2)) { 12312 break 12313 } 12314 v.reset(OpAMD64MOVBload) 12315 v.AuxInt = off1 + off2 12316 v.Aux = sym 12317 v.AddArg(ptr) 12318 v.AddArg(mem) 12319 return true 12320 } 12321 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12322 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12323 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12324 for { 12325 off1 := v.AuxInt 12326 sym1 := v.Aux 12327 _ = v.Args[1] 12328 v_0 := v.Args[0] 12329 if v_0.Op != OpAMD64LEAQ { 12330 break 12331 } 12332 off2 := v_0.AuxInt 12333 sym2 := v_0.Aux 12334 base := v_0.Args[0] 12335 mem := v.Args[1] 12336 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12337 break 12338 } 12339 v.reset(OpAMD64MOVBload) 12340 v.AuxInt = off1 + off2 12341 v.Aux = mergeSym(sym1, sym2) 12342 v.AddArg(base) 12343 v.AddArg(mem) 12344 return true 12345 } 12346 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 12347 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12348 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12349 for { 12350 off1 := v.AuxInt 12351 sym1 := v.Aux 12352 _ = v.Args[1] 12353 v_0 := v.Args[0] 12354 if v_0.Op != OpAMD64LEAQ1 { 12355 break 12356 } 12357 off2 := v_0.AuxInt 12358 sym2 := v_0.Aux 12359 _ = v_0.Args[1] 12360 ptr := v_0.Args[0] 12361 idx := v_0.Args[1] 12362 mem := v.Args[1] 12363 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12364 break 12365 } 12366 v.reset(OpAMD64MOVBloadidx1) 12367 v.AuxInt = off1 + off2 12368 v.Aux = mergeSym(sym1, sym2) 12369 v.AddArg(ptr) 12370 v.AddArg(idx) 12371 v.AddArg(mem) 12372 return true 12373 } 12374 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 12375 // cond: ptr.Op != OpSB 12376 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 12377 for { 12378 off := v.AuxInt 12379 sym := v.Aux 12380 _ = v.Args[1] 12381 v_0 := v.Args[0] 12382 if v_0.Op != OpAMD64ADDQ { 12383 break 12384 } 12385 _ = v_0.Args[1] 12386 ptr := v_0.Args[0] 12387 idx := v_0.Args[1] 12388 mem := v.Args[1] 12389 if !(ptr.Op != OpSB) { 12390 break 12391 } 12392 v.reset(OpAMD64MOVBloadidx1) 12393 v.AuxInt = off 12394 v.Aux = sym 12395 v.AddArg(ptr) 12396 v.AddArg(idx) 12397 v.AddArg(mem) 12398 return true 12399 } 12400 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 12401 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 12402 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12403 for { 12404 off1 := v.AuxInt 12405 sym1 := v.Aux 12406 _ = v.Args[1] 12407 v_0 := v.Args[0] 12408 if v_0.Op != OpAMD64LEAL { 12409 break 12410 } 12411 off2 := v_0.AuxInt 12412 sym2 := v_0.Aux 12413 base := v_0.Args[0] 12414 mem := v.Args[1] 12415 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 12416 break 12417 } 12418 v.reset(OpAMD64MOVBload) 12419 v.AuxInt = off1 + off2 12420 v.Aux = mergeSym(sym1, sym2) 12421 v.AddArg(base) 12422 v.AddArg(mem) 12423 return true 12424 } 12425 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 12426 // cond: is32Bit(off1+off2) 12427 // result: (MOVBload [off1+off2] {sym} ptr mem) 12428 for { 12429 off1 := v.AuxInt 12430 sym := v.Aux 12431 _ = v.Args[1] 12432 v_0 := v.Args[0] 12433 if v_0.Op != OpAMD64ADDLconst { 12434 break 12435 } 12436 off2 := v_0.AuxInt 12437 ptr := v_0.Args[0] 12438 mem := v.Args[1] 12439 if !(is32Bit(off1 + off2)) { 12440 break 12441 } 12442 v.reset(OpAMD64MOVBload) 12443 v.AuxInt = off1 + off2 12444 v.Aux = sym 12445 v.AddArg(ptr) 12446 v.AddArg(mem) 12447 return true 12448 } 12449 // match: (MOVBload [off] {sym} (SB) _) 12450 // cond: symIsRO(sym) 12451 // result: (MOVLconst [int64(read8(sym, off))]) 12452 for { 12453 off := v.AuxInt 12454 sym := v.Aux 12455 _ = v.Args[1] 12456 v_0 := v.Args[0] 12457 if v_0.Op != OpSB { 12458 break 12459 } 12460 if !(symIsRO(sym)) { 12461 break 12462 } 12463 v.reset(OpAMD64MOVLconst) 12464 v.AuxInt = int64(read8(sym, off)) 12465 return true 12466 } 12467 return false 12468 } 12469 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 12470 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 12471 // cond: is32Bit(c+d) 12472 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 12473 for { 12474 c := v.AuxInt 12475 sym := v.Aux 12476 _ = v.Args[2] 12477 v_0 := v.Args[0] 12478 if v_0.Op != OpAMD64ADDQconst { 12479 break 12480 } 12481 d := v_0.AuxInt 12482 ptr := v_0.Args[0] 12483 idx := v.Args[1] 12484 mem := v.Args[2] 12485 if !(is32Bit(c + d)) { 12486 break 12487 } 12488 v.reset(OpAMD64MOVBloadidx1) 12489 v.AuxInt = c + d 12490 v.Aux = sym 12491 v.AddArg(ptr) 12492 v.AddArg(idx) 12493 v.AddArg(mem) 12494 return true 12495 } 12496 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 12497 // cond: is32Bit(c+d) 12498 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 12499 for { 12500 c := v.AuxInt 12501 sym := v.Aux 12502 _ = v.Args[2] 12503 idx := v.Args[0] 12504 v_1 := v.Args[1] 12505 if v_1.Op != OpAMD64ADDQconst { 12506 break 12507 } 12508 d := v_1.AuxInt 12509 ptr := v_1.Args[0] 12510 mem := v.Args[2] 12511 if !(is32Bit(c + d)) { 12512 break 12513 } 12514 v.reset(OpAMD64MOVBloadidx1) 12515 v.AuxInt = c + d 12516 v.Aux = sym 12517 v.AddArg(ptr) 12518 v.AddArg(idx) 12519 v.AddArg(mem) 12520 return true 12521 } 12522 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 12523 // cond: is32Bit(c+d) 12524 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 12525 for { 12526 c := v.AuxInt 12527 sym := v.Aux 12528 _ = v.Args[2] 12529 ptr := v.Args[0] 12530 v_1 := v.Args[1] 12531 if v_1.Op != OpAMD64ADDQconst { 12532 break 12533 } 12534 d := v_1.AuxInt 12535 idx := v_1.Args[0] 12536 mem := v.Args[2] 12537 if !(is32Bit(c + d)) { 12538 break 12539 } 12540 v.reset(OpAMD64MOVBloadidx1) 12541 v.AuxInt = c + d 12542 v.Aux = sym 12543 v.AddArg(ptr) 12544 v.AddArg(idx) 12545 v.AddArg(mem) 12546 return true 12547 } 12548 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 12549 // cond: is32Bit(c+d) 12550 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 12551 for { 12552 c := v.AuxInt 12553 sym := v.Aux 12554 _ = v.Args[2] 12555 v_0 := v.Args[0] 12556 if v_0.Op != OpAMD64ADDQconst { 12557 break 12558 } 12559 d := v_0.AuxInt 12560 idx := v_0.Args[0] 12561 ptr := v.Args[1] 12562 mem := v.Args[2] 12563 if !(is32Bit(c + d)) { 12564 break 12565 } 12566 v.reset(OpAMD64MOVBloadidx1) 12567 v.AuxInt = c + d 12568 v.Aux = sym 12569 v.AddArg(ptr) 12570 v.AddArg(idx) 12571 v.AddArg(mem) 12572 return true 12573 } 12574 // match: (MOVBloadidx1 [i] {s} p (MOVQconst [c]) mem) 12575 // cond: is32Bit(i+c) 12576 // result: (MOVBload [i+c] {s} p mem) 12577 for { 12578 i := v.AuxInt 12579 s := v.Aux 12580 _ = v.Args[2] 12581 p := v.Args[0] 12582 v_1 := v.Args[1] 12583 if v_1.Op != OpAMD64MOVQconst { 12584 break 12585 } 12586 c := v_1.AuxInt 12587 mem := v.Args[2] 12588 if !(is32Bit(i + c)) { 12589 break 12590 } 12591 v.reset(OpAMD64MOVBload) 12592 v.AuxInt = i + c 12593 v.Aux = s 12594 v.AddArg(p) 12595 v.AddArg(mem) 12596 return true 12597 } 12598 // match: (MOVBloadidx1 [i] {s} (MOVQconst [c]) p mem) 12599 // cond: is32Bit(i+c) 12600 // result: (MOVBload [i+c] {s} p mem) 12601 for { 12602 i := v.AuxInt 12603 s := v.Aux 12604 _ = v.Args[2] 12605 v_0 := v.Args[0] 12606 if v_0.Op != OpAMD64MOVQconst { 12607 break 12608 } 12609 c := v_0.AuxInt 12610 p := v.Args[1] 12611 mem := v.Args[2] 12612 if !(is32Bit(i + c)) { 12613 break 12614 } 12615 v.reset(OpAMD64MOVBload) 12616 v.AuxInt = i + c 12617 v.Aux = s 12618 v.AddArg(p) 12619 v.AddArg(mem) 12620 return true 12621 } 12622 return false 12623 } 12624 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 12625 // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) 12626 // cond: y.Uses == 1 12627 // result: (SETLstore [off] {sym} ptr x mem) 12628 for { 12629 off := v.AuxInt 12630 sym := v.Aux 12631 _ = v.Args[2] 12632 ptr := v.Args[0] 12633 y := v.Args[1] 12634 if y.Op != OpAMD64SETL { 12635 break 12636 } 12637 x := y.Args[0] 12638 mem := v.Args[2] 12639 if !(y.Uses == 1) { 12640 break 12641 } 12642 v.reset(OpAMD64SETLstore) 12643 v.AuxInt = off 12644 v.Aux = sym 12645 v.AddArg(ptr) 12646 v.AddArg(x) 12647 v.AddArg(mem) 12648 return true 12649 } 12650 // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) 12651 // cond: y.Uses == 1 12652 // result: (SETLEstore [off] {sym} ptr x mem) 12653 for { 12654 off := v.AuxInt 12655 sym := v.Aux 12656 _ = v.Args[2] 12657 ptr := v.Args[0] 12658 y := v.Args[1] 12659 if y.Op != OpAMD64SETLE { 12660 break 12661 } 12662 x := y.Args[0] 12663 mem := v.Args[2] 12664 if !(y.Uses == 1) { 12665 break 12666 } 12667 v.reset(OpAMD64SETLEstore) 12668 v.AuxInt = off 12669 v.Aux = sym 12670 v.AddArg(ptr) 12671 v.AddArg(x) 12672 v.AddArg(mem) 12673 return true 12674 } 12675 // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) 12676 // cond: y.Uses == 1 12677 // result: (SETGstore [off] {sym} ptr x mem) 12678 for { 12679 off := v.AuxInt 12680 sym := v.Aux 12681 _ = v.Args[2] 12682 ptr := v.Args[0] 12683 y := v.Args[1] 12684 if y.Op != OpAMD64SETG { 12685 break 12686 } 12687 x := y.Args[0] 12688 mem := v.Args[2] 12689 if !(y.Uses == 1) { 12690 break 12691 } 12692 v.reset(OpAMD64SETGstore) 12693 v.AuxInt = off 12694 v.Aux = sym 12695 v.AddArg(ptr) 12696 v.AddArg(x) 12697 v.AddArg(mem) 12698 return true 12699 } 12700 // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) 12701 // cond: y.Uses == 1 12702 // result: (SETGEstore [off] {sym} ptr x mem) 12703 for { 12704 off := v.AuxInt 12705 sym := v.Aux 12706 _ = v.Args[2] 12707 ptr := v.Args[0] 12708 y := v.Args[1] 12709 if y.Op != OpAMD64SETGE { 12710 break 12711 } 12712 x := y.Args[0] 12713 mem := v.Args[2] 12714 if !(y.Uses == 1) { 12715 break 12716 } 12717 v.reset(OpAMD64SETGEstore) 12718 v.AuxInt = off 12719 v.Aux = sym 12720 v.AddArg(ptr) 12721 v.AddArg(x) 12722 v.AddArg(mem) 12723 return true 12724 } 12725 // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) 12726 // cond: y.Uses == 1 12727 // result: (SETEQstore [off] {sym} ptr x mem) 12728 for { 12729 off := v.AuxInt 12730 sym := v.Aux 12731 _ = v.Args[2] 12732 ptr := v.Args[0] 12733 y := v.Args[1] 12734 if y.Op != OpAMD64SETEQ { 12735 break 12736 } 12737 x := y.Args[0] 12738 mem := v.Args[2] 12739 if !(y.Uses == 1) { 12740 break 12741 } 12742 v.reset(OpAMD64SETEQstore) 12743 v.AuxInt = off 12744 v.Aux = sym 12745 v.AddArg(ptr) 12746 v.AddArg(x) 12747 v.AddArg(mem) 12748 return true 12749 } 12750 // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) 12751 // cond: y.Uses == 1 12752 // result: (SETNEstore [off] {sym} ptr x mem) 12753 for { 12754 off := v.AuxInt 12755 sym := v.Aux 12756 _ = v.Args[2] 12757 ptr := v.Args[0] 12758 y := v.Args[1] 12759 if y.Op != OpAMD64SETNE { 12760 break 12761 } 12762 x := y.Args[0] 12763 mem := v.Args[2] 12764 if !(y.Uses == 1) { 12765 break 12766 } 12767 v.reset(OpAMD64SETNEstore) 12768 v.AuxInt = off 12769 v.Aux = sym 12770 v.AddArg(ptr) 12771 v.AddArg(x) 12772 v.AddArg(mem) 12773 return true 12774 } 12775 // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) 12776 // cond: y.Uses == 1 12777 // result: (SETBstore [off] {sym} ptr x mem) 12778 for { 12779 off := v.AuxInt 12780 sym := v.Aux 12781 _ = v.Args[2] 12782 ptr := v.Args[0] 12783 y := v.Args[1] 12784 if y.Op != OpAMD64SETB { 12785 break 12786 } 12787 x := y.Args[0] 12788 mem := v.Args[2] 12789 if !(y.Uses == 1) { 12790 break 12791 } 12792 v.reset(OpAMD64SETBstore) 12793 v.AuxInt = off 12794 v.Aux = sym 12795 v.AddArg(ptr) 12796 v.AddArg(x) 12797 v.AddArg(mem) 12798 return true 12799 } 12800 // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) 12801 // cond: y.Uses == 1 12802 // result: (SETBEstore [off] {sym} ptr x mem) 12803 for { 12804 off := v.AuxInt 12805 sym := v.Aux 12806 _ = v.Args[2] 12807 ptr := v.Args[0] 12808 y := v.Args[1] 12809 if y.Op != OpAMD64SETBE { 12810 break 12811 } 12812 x := y.Args[0] 12813 mem := v.Args[2] 12814 if !(y.Uses == 1) { 12815 break 12816 } 12817 v.reset(OpAMD64SETBEstore) 12818 v.AuxInt = off 12819 v.Aux = sym 12820 v.AddArg(ptr) 12821 v.AddArg(x) 12822 v.AddArg(mem) 12823 return true 12824 } 12825 // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) 12826 // cond: y.Uses == 1 12827 // result: (SETAstore [off] {sym} ptr x mem) 12828 for { 12829 off := v.AuxInt 12830 sym := v.Aux 12831 _ = v.Args[2] 12832 ptr := v.Args[0] 12833 y := v.Args[1] 12834 if y.Op != OpAMD64SETA { 12835 break 12836 } 12837 x := y.Args[0] 12838 mem := v.Args[2] 12839 if !(y.Uses == 1) { 12840 break 12841 } 12842 v.reset(OpAMD64SETAstore) 12843 v.AuxInt = off 12844 v.Aux = sym 12845 v.AddArg(ptr) 12846 v.AddArg(x) 12847 v.AddArg(mem) 12848 return true 12849 } 12850 // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) 12851 // cond: y.Uses == 1 12852 // result: (SETAEstore [off] {sym} ptr x mem) 12853 for { 12854 off := v.AuxInt 12855 sym := v.Aux 12856 _ = v.Args[2] 12857 ptr := v.Args[0] 12858 y := v.Args[1] 12859 if y.Op != OpAMD64SETAE { 12860 break 12861 } 12862 x := y.Args[0] 12863 mem := v.Args[2] 12864 if !(y.Uses == 1) { 12865 break 12866 } 12867 v.reset(OpAMD64SETAEstore) 12868 v.AuxInt = off 12869 v.Aux = sym 12870 v.AddArg(ptr) 12871 v.AddArg(x) 12872 v.AddArg(mem) 12873 return true 12874 } 12875 return false 12876 } 12877 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 12878 b := v.Block 12879 _ = b 12880 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 12881 // cond: 12882 // result: (MOVBstore [off] {sym} ptr x mem) 12883 for { 12884 off := v.AuxInt 12885 sym := v.Aux 12886 _ = v.Args[2] 12887 ptr := v.Args[0] 12888 v_1 := v.Args[1] 12889 if v_1.Op != OpAMD64MOVBQSX { 12890 break 12891 } 12892 x := v_1.Args[0] 12893 mem := v.Args[2] 12894 v.reset(OpAMD64MOVBstore) 12895 v.AuxInt = off 12896 v.Aux = sym 12897 v.AddArg(ptr) 12898 v.AddArg(x) 12899 v.AddArg(mem) 12900 return true 12901 } 12902 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 12903 // cond: 12904 // result: (MOVBstore [off] {sym} ptr x mem) 12905 for { 12906 off := v.AuxInt 12907 sym := v.Aux 12908 _ = v.Args[2] 12909 ptr := v.Args[0] 12910 v_1 := v.Args[1] 12911 if v_1.Op != OpAMD64MOVBQZX { 12912 break 12913 } 12914 x := v_1.Args[0] 12915 mem := v.Args[2] 12916 v.reset(OpAMD64MOVBstore) 12917 v.AuxInt = off 12918 v.Aux = sym 12919 v.AddArg(ptr) 12920 v.AddArg(x) 12921 v.AddArg(mem) 12922 return true 12923 } 12924 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 12925 // cond: is32Bit(off1+off2) 12926 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 12927 for { 12928 off1 := v.AuxInt 12929 sym := v.Aux 12930 _ = v.Args[2] 12931 v_0 := v.Args[0] 12932 if v_0.Op != OpAMD64ADDQconst { 12933 break 12934 } 12935 off2 := v_0.AuxInt 12936 ptr := v_0.Args[0] 12937 val := v.Args[1] 12938 mem := v.Args[2] 12939 if !(is32Bit(off1 + off2)) { 12940 break 12941 } 12942 v.reset(OpAMD64MOVBstore) 12943 v.AuxInt = off1 + off2 12944 v.Aux = sym 12945 v.AddArg(ptr) 12946 v.AddArg(val) 12947 v.AddArg(mem) 12948 return true 12949 } 12950 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 12951 // cond: validOff(off) 12952 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 12953 for { 12954 off := v.AuxInt 12955 sym := v.Aux 12956 _ = v.Args[2] 12957 ptr := v.Args[0] 12958 v_1 := v.Args[1] 12959 if v_1.Op != OpAMD64MOVLconst { 12960 break 12961 } 12962 c := v_1.AuxInt 12963 mem := v.Args[2] 12964 if !(validOff(off)) { 12965 break 12966 } 12967 v.reset(OpAMD64MOVBstoreconst) 12968 v.AuxInt = makeValAndOff(int64(int8(c)), off) 12969 v.Aux = sym 12970 v.AddArg(ptr) 12971 v.AddArg(mem) 12972 return true 12973 } 12974 // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem) 12975 // cond: validOff(off) 12976 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 12977 for { 12978 off := v.AuxInt 12979 sym := v.Aux 12980 _ = v.Args[2] 12981 ptr := v.Args[0] 12982 v_1 := v.Args[1] 12983 if v_1.Op != OpAMD64MOVQconst { 12984 break 12985 } 12986 c := v_1.AuxInt 12987 mem := v.Args[2] 12988 if !(validOff(off)) { 12989 break 12990 } 12991 v.reset(OpAMD64MOVBstoreconst) 12992 v.AuxInt = makeValAndOff(int64(int8(c)), off) 12993 v.Aux = sym 12994 v.AddArg(ptr) 12995 v.AddArg(mem) 12996 return true 12997 } 12998 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 12999 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13000 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13001 for { 13002 off1 := v.AuxInt 13003 sym1 := v.Aux 13004 _ = v.Args[2] 13005 v_0 := v.Args[0] 13006 if v_0.Op != OpAMD64LEAQ { 13007 break 13008 } 13009 off2 := v_0.AuxInt 13010 sym2 := v_0.Aux 13011 base := v_0.Args[0] 13012 val := v.Args[1] 13013 mem := v.Args[2] 13014 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13015 break 13016 } 13017 v.reset(OpAMD64MOVBstore) 13018 v.AuxInt = off1 + off2 13019 v.Aux = mergeSym(sym1, sym2) 13020 v.AddArg(base) 13021 v.AddArg(val) 13022 v.AddArg(mem) 13023 return true 13024 } 13025 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 13026 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13027 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 13028 for { 13029 off1 := v.AuxInt 13030 sym1 := v.Aux 13031 _ = v.Args[2] 13032 v_0 := v.Args[0] 13033 if v_0.Op != OpAMD64LEAQ1 { 13034 break 13035 } 13036 off2 := v_0.AuxInt 13037 sym2 := v_0.Aux 13038 _ = v_0.Args[1] 13039 ptr := v_0.Args[0] 13040 idx := v_0.Args[1] 13041 val := v.Args[1] 13042 mem := v.Args[2] 13043 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13044 break 13045 } 13046 v.reset(OpAMD64MOVBstoreidx1) 13047 v.AuxInt = off1 + off2 13048 v.Aux = mergeSym(sym1, sym2) 13049 v.AddArg(ptr) 13050 v.AddArg(idx) 13051 v.AddArg(val) 13052 v.AddArg(mem) 13053 return true 13054 } 13055 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 13056 // cond: ptr.Op != OpSB 13057 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 13058 for { 13059 off := v.AuxInt 13060 sym := v.Aux 13061 _ = v.Args[2] 13062 v_0 := v.Args[0] 13063 if v_0.Op != OpAMD64ADDQ { 13064 break 13065 } 13066 _ = v_0.Args[1] 13067 ptr := v_0.Args[0] 13068 idx := v_0.Args[1] 13069 val := v.Args[1] 13070 mem := v.Args[2] 13071 if !(ptr.Op != OpSB) { 13072 break 13073 } 13074 v.reset(OpAMD64MOVBstoreidx1) 13075 v.AuxInt = off 13076 v.Aux = sym 13077 v.AddArg(ptr) 13078 v.AddArg(idx) 13079 v.AddArg(val) 13080 v.AddArg(mem) 13081 return true 13082 } 13083 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 13084 // cond: x0.Uses == 1 && clobber(x0) 13085 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 13086 for { 13087 i := v.AuxInt 13088 s := v.Aux 13089 _ = v.Args[2] 13090 p := v.Args[0] 13091 w := v.Args[1] 13092 x0 := v.Args[2] 13093 if x0.Op != OpAMD64MOVBstore { 13094 break 13095 } 13096 if x0.AuxInt != i-1 { 13097 break 13098 } 13099 if x0.Aux != s { 13100 break 13101 } 13102 _ = x0.Args[2] 13103 if p != x0.Args[0] { 13104 break 13105 } 13106 x0_1 := x0.Args[1] 13107 if x0_1.Op != OpAMD64SHRWconst { 13108 break 13109 } 13110 if x0_1.AuxInt != 8 { 13111 break 13112 } 13113 if w != x0_1.Args[0] { 13114 break 13115 } 13116 mem := x0.Args[2] 13117 if !(x0.Uses == 1 && clobber(x0)) { 13118 break 13119 } 13120 v.reset(OpAMD64MOVWstore) 13121 v.AuxInt = i - 1 13122 v.Aux = s 13123 v.AddArg(p) 13124 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 13125 v0.AuxInt = 8 13126 v0.AddArg(w) 13127 v.AddArg(v0) 13128 v.AddArg(mem) 13129 return true 13130 } 13131 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 13132 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 13133 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 13134 for { 13135 i := v.AuxInt 13136 s := v.Aux 13137 _ = v.Args[2] 13138 p := v.Args[0] 13139 w := v.Args[1] 13140 x2 := v.Args[2] 13141 if x2.Op != OpAMD64MOVBstore { 13142 break 13143 } 13144 if x2.AuxInt != i-1 { 13145 break 13146 } 13147 if x2.Aux != s { 13148 break 13149 } 13150 _ = x2.Args[2] 13151 if p != x2.Args[0] { 13152 break 13153 } 13154 x2_1 := x2.Args[1] 13155 if x2_1.Op != OpAMD64SHRLconst { 13156 break 13157 } 13158 if x2_1.AuxInt != 8 { 13159 break 13160 } 13161 if w != x2_1.Args[0] { 13162 break 13163 } 13164 x1 := x2.Args[2] 13165 if x1.Op != OpAMD64MOVBstore { 13166 break 13167 } 13168 if x1.AuxInt != i-2 { 13169 break 13170 } 13171 if x1.Aux != s { 13172 break 13173 } 13174 _ = x1.Args[2] 13175 if p != x1.Args[0] { 13176 break 13177 } 13178 x1_1 := x1.Args[1] 13179 if x1_1.Op != OpAMD64SHRLconst { 13180 break 13181 } 13182 if x1_1.AuxInt != 16 { 13183 break 13184 } 13185 if w != x1_1.Args[0] { 13186 break 13187 } 13188 x0 := x1.Args[2] 13189 if x0.Op != OpAMD64MOVBstore { 13190 break 13191 } 13192 if x0.AuxInt != i-3 { 13193 break 13194 } 13195 if x0.Aux != s { 13196 break 13197 } 13198 _ = x0.Args[2] 13199 if p != x0.Args[0] { 13200 break 13201 } 13202 x0_1 := x0.Args[1] 13203 if x0_1.Op != OpAMD64SHRLconst { 13204 break 13205 } 13206 if x0_1.AuxInt != 24 { 13207 break 13208 } 13209 if w != x0_1.Args[0] { 13210 break 13211 } 13212 mem := x0.Args[2] 13213 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 13214 break 13215 } 13216 v.reset(OpAMD64MOVLstore) 13217 v.AuxInt = i - 3 13218 v.Aux = s 13219 v.AddArg(p) 13220 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 13221 v0.AddArg(w) 13222 v.AddArg(v0) 13223 v.AddArg(mem) 13224 return true 13225 } 13226 return false 13227 } 13228 func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { 13229 b := v.Block 13230 _ = b 13231 typ := &b.Func.Config.Types 13232 _ = typ 13233 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 13234 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 13235 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 13236 for { 13237 i := v.AuxInt 13238 s := v.Aux 13239 _ = v.Args[2] 13240 p := v.Args[0] 13241 w := v.Args[1] 13242 x6 := v.Args[2] 13243 if x6.Op != OpAMD64MOVBstore { 13244 break 13245 } 13246 if x6.AuxInt != i-1 { 13247 break 13248 } 13249 if x6.Aux != s { 13250 break 13251 } 13252 _ = x6.Args[2] 13253 if p != x6.Args[0] { 13254 break 13255 } 13256 x6_1 := x6.Args[1] 13257 if x6_1.Op != OpAMD64SHRQconst { 13258 break 13259 } 13260 if x6_1.AuxInt != 8 { 13261 break 13262 } 13263 if w != x6_1.Args[0] { 13264 break 13265 } 13266 x5 := x6.Args[2] 13267 if x5.Op != OpAMD64MOVBstore { 13268 break 13269 } 13270 if x5.AuxInt != i-2 { 13271 break 13272 } 13273 if x5.Aux != s { 13274 break 13275 } 13276 _ = x5.Args[2] 13277 if p != x5.Args[0] { 13278 break 13279 } 13280 x5_1 := x5.Args[1] 13281 if x5_1.Op != OpAMD64SHRQconst { 13282 break 13283 } 13284 if x5_1.AuxInt != 16 { 13285 break 13286 } 13287 if w != x5_1.Args[0] { 13288 break 13289 } 13290 x4 := x5.Args[2] 13291 if x4.Op != OpAMD64MOVBstore { 13292 break 13293 } 13294 if x4.AuxInt != i-3 { 13295 break 13296 } 13297 if x4.Aux != s { 13298 break 13299 } 13300 _ = x4.Args[2] 13301 if p != x4.Args[0] { 13302 break 13303 } 13304 x4_1 := x4.Args[1] 13305 if x4_1.Op != OpAMD64SHRQconst { 13306 break 13307 } 13308 if x4_1.AuxInt != 24 { 13309 break 13310 } 13311 if w != x4_1.Args[0] { 13312 break 13313 } 13314 x3 := x4.Args[2] 13315 if x3.Op != OpAMD64MOVBstore { 13316 break 13317 } 13318 if x3.AuxInt != i-4 { 13319 break 13320 } 13321 if x3.Aux != s { 13322 break 13323 } 13324 _ = x3.Args[2] 13325 if p != x3.Args[0] { 13326 break 13327 } 13328 x3_1 := x3.Args[1] 13329 if x3_1.Op != OpAMD64SHRQconst { 13330 break 13331 } 13332 if x3_1.AuxInt != 32 { 13333 break 13334 } 13335 if w != x3_1.Args[0] { 13336 break 13337 } 13338 x2 := x3.Args[2] 13339 if x2.Op != OpAMD64MOVBstore { 13340 break 13341 } 13342 if x2.AuxInt != i-5 { 13343 break 13344 } 13345 if x2.Aux != s { 13346 break 13347 } 13348 _ = x2.Args[2] 13349 if p != x2.Args[0] { 13350 break 13351 } 13352 x2_1 := x2.Args[1] 13353 if x2_1.Op != OpAMD64SHRQconst { 13354 break 13355 } 13356 if x2_1.AuxInt != 40 { 13357 break 13358 } 13359 if w != x2_1.Args[0] { 13360 break 13361 } 13362 x1 := x2.Args[2] 13363 if x1.Op != OpAMD64MOVBstore { 13364 break 13365 } 13366 if x1.AuxInt != i-6 { 13367 break 13368 } 13369 if x1.Aux != s { 13370 break 13371 } 13372 _ = x1.Args[2] 13373 if p != x1.Args[0] { 13374 break 13375 } 13376 x1_1 := x1.Args[1] 13377 if x1_1.Op != OpAMD64SHRQconst { 13378 break 13379 } 13380 if x1_1.AuxInt != 48 { 13381 break 13382 } 13383 if w != x1_1.Args[0] { 13384 break 13385 } 13386 x0 := x1.Args[2] 13387 if x0.Op != OpAMD64MOVBstore { 13388 break 13389 } 13390 if x0.AuxInt != i-7 { 13391 break 13392 } 13393 if x0.Aux != s { 13394 break 13395 } 13396 _ = x0.Args[2] 13397 if p != x0.Args[0] { 13398 break 13399 } 13400 x0_1 := x0.Args[1] 13401 if x0_1.Op != OpAMD64SHRQconst { 13402 break 13403 } 13404 if x0_1.AuxInt != 56 { 13405 break 13406 } 13407 if w != x0_1.Args[0] { 13408 break 13409 } 13410 mem := x0.Args[2] 13411 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 13412 break 13413 } 13414 v.reset(OpAMD64MOVQstore) 13415 v.AuxInt = i - 7 13416 v.Aux = s 13417 v.AddArg(p) 13418 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 13419 v0.AddArg(w) 13420 v.AddArg(v0) 13421 v.AddArg(mem) 13422 return true 13423 } 13424 // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 13425 // cond: x.Uses == 1 && clobber(x) 13426 // result: (MOVWstore [i-1] {s} p w mem) 13427 for { 13428 i := v.AuxInt 13429 s := v.Aux 13430 _ = v.Args[2] 13431 p := v.Args[0] 13432 v_1 := v.Args[1] 13433 if v_1.Op != OpAMD64SHRWconst { 13434 break 13435 } 13436 if v_1.AuxInt != 8 { 13437 break 13438 } 13439 w := v_1.Args[0] 13440 x := v.Args[2] 13441 if x.Op != OpAMD64MOVBstore { 13442 break 13443 } 13444 if x.AuxInt != i-1 { 13445 break 13446 } 13447 if x.Aux != s { 13448 break 13449 } 13450 _ = x.Args[2] 13451 if p != x.Args[0] { 13452 break 13453 } 13454 if w != x.Args[1] { 13455 break 13456 } 13457 mem := x.Args[2] 13458 if !(x.Uses == 1 && clobber(x)) { 13459 break 13460 } 13461 v.reset(OpAMD64MOVWstore) 13462 v.AuxInt = i - 1 13463 v.Aux = s 13464 v.AddArg(p) 13465 v.AddArg(w) 13466 v.AddArg(mem) 13467 return true 13468 } 13469 // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 13470 // cond: x.Uses == 1 && clobber(x) 13471 // result: (MOVWstore [i-1] {s} p w mem) 13472 for { 13473 i := v.AuxInt 13474 s := v.Aux 13475 _ = v.Args[2] 13476 p := v.Args[0] 13477 v_1 := v.Args[1] 13478 if v_1.Op != OpAMD64SHRLconst { 13479 break 13480 } 13481 if v_1.AuxInt != 8 { 13482 break 13483 } 13484 w := v_1.Args[0] 13485 x := v.Args[2] 13486 if x.Op != OpAMD64MOVBstore { 13487 break 13488 } 13489 if x.AuxInt != i-1 { 13490 break 13491 } 13492 if x.Aux != s { 13493 break 13494 } 13495 _ = x.Args[2] 13496 if p != x.Args[0] { 13497 break 13498 } 13499 if w != x.Args[1] { 13500 break 13501 } 13502 mem := x.Args[2] 13503 if !(x.Uses == 1 && clobber(x)) { 13504 break 13505 } 13506 v.reset(OpAMD64MOVWstore) 13507 v.AuxInt = i - 1 13508 v.Aux = s 13509 v.AddArg(p) 13510 v.AddArg(w) 13511 v.AddArg(mem) 13512 return true 13513 } 13514 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 13515 // cond: x.Uses == 1 && clobber(x) 13516 // result: (MOVWstore [i-1] {s} p w mem) 13517 for { 13518 i := v.AuxInt 13519 s := v.Aux 13520 _ = v.Args[2] 13521 p := v.Args[0] 13522 v_1 := v.Args[1] 13523 if v_1.Op != OpAMD64SHRQconst { 13524 break 13525 } 13526 if v_1.AuxInt != 8 { 13527 break 13528 } 13529 w := v_1.Args[0] 13530 x := v.Args[2] 13531 if x.Op != OpAMD64MOVBstore { 13532 break 13533 } 13534 if x.AuxInt != i-1 { 13535 break 13536 } 13537 if x.Aux != s { 13538 break 13539 } 13540 _ = x.Args[2] 13541 if p != x.Args[0] { 13542 break 13543 } 13544 if w != x.Args[1] { 13545 break 13546 } 13547 mem := x.Args[2] 13548 if !(x.Uses == 1 && clobber(x)) { 13549 break 13550 } 13551 v.reset(OpAMD64MOVWstore) 13552 v.AuxInt = i - 1 13553 v.Aux = s 13554 v.AddArg(p) 13555 v.AddArg(w) 13556 v.AddArg(mem) 13557 return true 13558 } 13559 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem)) 13560 // cond: x.Uses == 1 && clobber(x) 13561 // result: (MOVWstore [i] {s} p w mem) 13562 for { 13563 i := v.AuxInt 13564 s := v.Aux 13565 _ = v.Args[2] 13566 p := v.Args[0] 13567 w := v.Args[1] 13568 x := v.Args[2] 13569 if x.Op != OpAMD64MOVBstore { 13570 break 13571 } 13572 if x.AuxInt != i+1 { 13573 break 13574 } 13575 if x.Aux != s { 13576 break 13577 } 13578 _ = x.Args[2] 13579 if p != x.Args[0] { 13580 break 13581 } 13582 x_1 := x.Args[1] 13583 if x_1.Op != OpAMD64SHRWconst { 13584 break 13585 } 13586 if x_1.AuxInt != 8 { 13587 break 13588 } 13589 if w != x_1.Args[0] { 13590 break 13591 } 13592 mem := x.Args[2] 13593 if !(x.Uses == 1 && clobber(x)) { 13594 break 13595 } 13596 v.reset(OpAMD64MOVWstore) 13597 v.AuxInt = i 13598 v.Aux = s 13599 v.AddArg(p) 13600 v.AddArg(w) 13601 v.AddArg(mem) 13602 return true 13603 } 13604 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem)) 13605 // cond: x.Uses == 1 && clobber(x) 13606 // result: (MOVWstore [i] {s} p w mem) 13607 for { 13608 i := v.AuxInt 13609 s := v.Aux 13610 _ = v.Args[2] 13611 p := v.Args[0] 13612 w := v.Args[1] 13613 x := v.Args[2] 13614 if x.Op != OpAMD64MOVBstore { 13615 break 13616 } 13617 if x.AuxInt != i+1 { 13618 break 13619 } 13620 if x.Aux != s { 13621 break 13622 } 13623 _ = x.Args[2] 13624 if p != x.Args[0] { 13625 break 13626 } 13627 x_1 := x.Args[1] 13628 if x_1.Op != OpAMD64SHRLconst { 13629 break 13630 } 13631 if x_1.AuxInt != 8 { 13632 break 13633 } 13634 if w != x_1.Args[0] { 13635 break 13636 } 13637 mem := x.Args[2] 13638 if !(x.Uses == 1 && clobber(x)) { 13639 break 13640 } 13641 v.reset(OpAMD64MOVWstore) 13642 v.AuxInt = i 13643 v.Aux = s 13644 v.AddArg(p) 13645 v.AddArg(w) 13646 v.AddArg(mem) 13647 return true 13648 } 13649 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem)) 13650 // cond: x.Uses == 1 && clobber(x) 13651 // result: (MOVWstore [i] {s} p w mem) 13652 for { 13653 i := v.AuxInt 13654 s := v.Aux 13655 _ = v.Args[2] 13656 p := v.Args[0] 13657 w := v.Args[1] 13658 x := v.Args[2] 13659 if x.Op != OpAMD64MOVBstore { 13660 break 13661 } 13662 if x.AuxInt != i+1 { 13663 break 13664 } 13665 if x.Aux != s { 13666 break 13667 } 13668 _ = x.Args[2] 13669 if p != x.Args[0] { 13670 break 13671 } 13672 x_1 := x.Args[1] 13673 if x_1.Op != OpAMD64SHRQconst { 13674 break 13675 } 13676 if x_1.AuxInt != 8 { 13677 break 13678 } 13679 if w != x_1.Args[0] { 13680 break 13681 } 13682 mem := x.Args[2] 13683 if !(x.Uses == 1 && clobber(x)) { 13684 break 13685 } 13686 v.reset(OpAMD64MOVWstore) 13687 v.AuxInt = i 13688 v.Aux = s 13689 v.AddArg(p) 13690 v.AddArg(w) 13691 v.AddArg(mem) 13692 return true 13693 } 13694 // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) 13695 // cond: x.Uses == 1 && clobber(x) 13696 // result: (MOVWstore [i-1] {s} p w0 mem) 13697 for { 13698 i := v.AuxInt 13699 s := v.Aux 13700 _ = v.Args[2] 13701 p := v.Args[0] 13702 v_1 := v.Args[1] 13703 if v_1.Op != OpAMD64SHRLconst { 13704 break 13705 } 13706 j := v_1.AuxInt 13707 w := v_1.Args[0] 13708 x := v.Args[2] 13709 if x.Op != OpAMD64MOVBstore { 13710 break 13711 } 13712 if x.AuxInt != i-1 { 13713 break 13714 } 13715 if x.Aux != s { 13716 break 13717 } 13718 _ = x.Args[2] 13719 if p != x.Args[0] { 13720 break 13721 } 13722 w0 := x.Args[1] 13723 if w0.Op != OpAMD64SHRLconst { 13724 break 13725 } 13726 if w0.AuxInt != j-8 { 13727 break 13728 } 13729 if w != w0.Args[0] { 13730 break 13731 } 13732 mem := x.Args[2] 13733 if !(x.Uses == 1 && clobber(x)) { 13734 break 13735 } 13736 v.reset(OpAMD64MOVWstore) 13737 v.AuxInt = i - 1 13738 v.Aux = s 13739 v.AddArg(p) 13740 v.AddArg(w0) 13741 v.AddArg(mem) 13742 return true 13743 } 13744 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 13745 // cond: x.Uses == 1 && clobber(x) 13746 // result: (MOVWstore [i-1] {s} p w0 mem) 13747 for { 13748 i := v.AuxInt 13749 s := v.Aux 13750 _ = v.Args[2] 13751 p := v.Args[0] 13752 v_1 := v.Args[1] 13753 if v_1.Op != OpAMD64SHRQconst { 13754 break 13755 } 13756 j := v_1.AuxInt 13757 w := v_1.Args[0] 13758 x := v.Args[2] 13759 if x.Op != OpAMD64MOVBstore { 13760 break 13761 } 13762 if x.AuxInt != i-1 { 13763 break 13764 } 13765 if x.Aux != s { 13766 break 13767 } 13768 _ = x.Args[2] 13769 if p != x.Args[0] { 13770 break 13771 } 13772 w0 := x.Args[1] 13773 if w0.Op != OpAMD64SHRQconst { 13774 break 13775 } 13776 if w0.AuxInt != j-8 { 13777 break 13778 } 13779 if w != w0.Args[0] { 13780 break 13781 } 13782 mem := x.Args[2] 13783 if !(x.Uses == 1 && clobber(x)) { 13784 break 13785 } 13786 v.reset(OpAMD64MOVWstore) 13787 v.AuxInt = i - 1 13788 v.Aux = s 13789 v.AddArg(p) 13790 v.AddArg(w0) 13791 v.AddArg(mem) 13792 return true 13793 } 13794 // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) 13795 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 13796 // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) 13797 for { 13798 i := v.AuxInt 13799 s := v.Aux 13800 _ = v.Args[2] 13801 p := v.Args[0] 13802 x1 := v.Args[1] 13803 if x1.Op != OpAMD64MOVBload { 13804 break 13805 } 13806 j := x1.AuxInt 13807 s2 := x1.Aux 13808 _ = x1.Args[1] 13809 p2 := x1.Args[0] 13810 mem := x1.Args[1] 13811 mem2 := v.Args[2] 13812 if mem2.Op != OpAMD64MOVBstore { 13813 break 13814 } 13815 if mem2.AuxInt != i-1 { 13816 break 13817 } 13818 if mem2.Aux != s { 13819 break 13820 } 13821 _ = mem2.Args[2] 13822 if p != mem2.Args[0] { 13823 break 13824 } 13825 x2 := mem2.Args[1] 13826 if x2.Op != OpAMD64MOVBload { 13827 break 13828 } 13829 if x2.AuxInt != j-1 { 13830 break 13831 } 13832 if x2.Aux != s2 { 13833 break 13834 } 13835 _ = x2.Args[1] 13836 if p2 != x2.Args[0] { 13837 break 13838 } 13839 if mem != x2.Args[1] { 13840 break 13841 } 13842 if mem != mem2.Args[2] { 13843 break 13844 } 13845 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 13846 break 13847 } 13848 v.reset(OpAMD64MOVWstore) 13849 v.AuxInt = i - 1 13850 v.Aux = s 13851 v.AddArg(p) 13852 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 13853 v0.AuxInt = j - 1 13854 v0.Aux = s2 13855 v0.AddArg(p2) 13856 v0.AddArg(mem) 13857 v.AddArg(v0) 13858 v.AddArg(mem) 13859 return true 13860 } 13861 return false 13862 } 13863 func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool { 13864 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 13865 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 13866 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13867 for { 13868 off1 := v.AuxInt 13869 sym1 := v.Aux 13870 _ = v.Args[2] 13871 v_0 := v.Args[0] 13872 if v_0.Op != OpAMD64LEAL { 13873 break 13874 } 13875 off2 := v_0.AuxInt 13876 sym2 := v_0.Aux 13877 base := v_0.Args[0] 13878 val := v.Args[1] 13879 mem := v.Args[2] 13880 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 13881 break 13882 } 13883 v.reset(OpAMD64MOVBstore) 13884 v.AuxInt = off1 + off2 13885 v.Aux = mergeSym(sym1, sym2) 13886 v.AddArg(base) 13887 v.AddArg(val) 13888 v.AddArg(mem) 13889 return true 13890 } 13891 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 13892 // cond: is32Bit(off1+off2) 13893 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 13894 for { 13895 off1 := v.AuxInt 13896 sym := v.Aux 13897 _ = v.Args[2] 13898 v_0 := v.Args[0] 13899 if v_0.Op != OpAMD64ADDLconst { 13900 break 13901 } 13902 off2 := v_0.AuxInt 13903 ptr := v_0.Args[0] 13904 val := v.Args[1] 13905 mem := v.Args[2] 13906 if !(is32Bit(off1 + off2)) { 13907 break 13908 } 13909 v.reset(OpAMD64MOVBstore) 13910 v.AuxInt = off1 + off2 13911 v.Aux = sym 13912 v.AddArg(ptr) 13913 v.AddArg(val) 13914 v.AddArg(mem) 13915 return true 13916 } 13917 return false 13918 } 13919 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 13920 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 13921 // cond: ValAndOff(sc).canAdd(off) 13922 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13923 for { 13924 sc := v.AuxInt 13925 s := v.Aux 13926 _ = v.Args[1] 13927 v_0 := v.Args[0] 13928 if v_0.Op != OpAMD64ADDQconst { 13929 break 13930 } 13931 off := v_0.AuxInt 13932 ptr := v_0.Args[0] 13933 mem := v.Args[1] 13934 if !(ValAndOff(sc).canAdd(off)) { 13935 break 13936 } 13937 v.reset(OpAMD64MOVBstoreconst) 13938 v.AuxInt = ValAndOff(sc).add(off) 13939 v.Aux = s 13940 v.AddArg(ptr) 13941 v.AddArg(mem) 13942 return true 13943 } 13944 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 13945 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13946 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13947 for { 13948 sc := v.AuxInt 13949 sym1 := v.Aux 13950 _ = v.Args[1] 13951 v_0 := v.Args[0] 13952 if v_0.Op != OpAMD64LEAQ { 13953 break 13954 } 13955 off := v_0.AuxInt 13956 sym2 := v_0.Aux 13957 ptr := v_0.Args[0] 13958 mem := v.Args[1] 13959 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13960 break 13961 } 13962 v.reset(OpAMD64MOVBstoreconst) 13963 v.AuxInt = ValAndOff(sc).add(off) 13964 v.Aux = mergeSym(sym1, sym2) 13965 v.AddArg(ptr) 13966 v.AddArg(mem) 13967 return true 13968 } 13969 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 13970 // cond: canMergeSym(sym1, sym2) 13971 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13972 for { 13973 x := v.AuxInt 13974 sym1 := v.Aux 13975 _ = v.Args[1] 13976 v_0 := v.Args[0] 13977 if v_0.Op != OpAMD64LEAQ1 { 13978 break 13979 } 13980 off := v_0.AuxInt 13981 sym2 := v_0.Aux 13982 _ = v_0.Args[1] 13983 ptr := v_0.Args[0] 13984 idx := v_0.Args[1] 13985 mem := v.Args[1] 13986 if !(canMergeSym(sym1, sym2)) { 13987 break 13988 } 13989 v.reset(OpAMD64MOVBstoreconstidx1) 13990 v.AuxInt = ValAndOff(x).add(off) 13991 v.Aux = mergeSym(sym1, sym2) 13992 v.AddArg(ptr) 13993 v.AddArg(idx) 13994 v.AddArg(mem) 13995 return true 13996 } 13997 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 13998 // cond: 13999 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 14000 for { 14001 x := v.AuxInt 14002 sym := v.Aux 14003 _ = v.Args[1] 14004 v_0 := v.Args[0] 14005 if v_0.Op != OpAMD64ADDQ { 14006 break 14007 } 14008 _ = v_0.Args[1] 14009 ptr := v_0.Args[0] 14010 idx := v_0.Args[1] 14011 mem := v.Args[1] 14012 v.reset(OpAMD64MOVBstoreconstidx1) 14013 v.AuxInt = x 14014 v.Aux = sym 14015 v.AddArg(ptr) 14016 v.AddArg(idx) 14017 v.AddArg(mem) 14018 return true 14019 } 14020 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 14021 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 14022 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 14023 for { 14024 c := v.AuxInt 14025 s := v.Aux 14026 _ = v.Args[1] 14027 p := v.Args[0] 14028 x := v.Args[1] 14029 if x.Op != OpAMD64MOVBstoreconst { 14030 break 14031 } 14032 a := x.AuxInt 14033 if x.Aux != s { 14034 break 14035 } 14036 _ = x.Args[1] 14037 if p != x.Args[0] { 14038 break 14039 } 14040 mem := x.Args[1] 14041 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 14042 break 14043 } 14044 v.reset(OpAMD64MOVWstoreconst) 14045 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 14046 v.Aux = s 14047 v.AddArg(p) 14048 v.AddArg(mem) 14049 return true 14050 } 14051 // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) 14052 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 14053 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 14054 for { 14055 a := v.AuxInt 14056 s := v.Aux 14057 _ = v.Args[1] 14058 p := v.Args[0] 14059 x := v.Args[1] 14060 if x.Op != OpAMD64MOVBstoreconst { 14061 break 14062 } 14063 c := x.AuxInt 14064 if x.Aux != s { 14065 break 14066 } 14067 _ = x.Args[1] 14068 if p != x.Args[0] { 14069 break 14070 } 14071 mem := x.Args[1] 14072 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 14073 break 14074 } 14075 v.reset(OpAMD64MOVWstoreconst) 14076 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 14077 v.Aux = s 14078 v.AddArg(p) 14079 v.AddArg(mem) 14080 return true 14081 } 14082 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 14083 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 14084 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 14085 for { 14086 sc := v.AuxInt 14087 sym1 := v.Aux 14088 _ = v.Args[1] 14089 v_0 := v.Args[0] 14090 if v_0.Op != OpAMD64LEAL { 14091 break 14092 } 14093 off := v_0.AuxInt 14094 sym2 := v_0.Aux 14095 ptr := v_0.Args[0] 14096 mem := v.Args[1] 14097 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 14098 break 14099 } 14100 v.reset(OpAMD64MOVBstoreconst) 14101 v.AuxInt = ValAndOff(sc).add(off) 14102 v.Aux = mergeSym(sym1, sym2) 14103 v.AddArg(ptr) 14104 v.AddArg(mem) 14105 return true 14106 } 14107 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 14108 // cond: ValAndOff(sc).canAdd(off) 14109 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 14110 for { 14111 sc := v.AuxInt 14112 s := v.Aux 14113 _ = v.Args[1] 14114 v_0 := v.Args[0] 14115 if v_0.Op != OpAMD64ADDLconst { 14116 break 14117 } 14118 off := v_0.AuxInt 14119 ptr := v_0.Args[0] 14120 mem := v.Args[1] 14121 if !(ValAndOff(sc).canAdd(off)) { 14122 break 14123 } 14124 v.reset(OpAMD64MOVBstoreconst) 14125 v.AuxInt = ValAndOff(sc).add(off) 14126 v.Aux = s 14127 v.AddArg(ptr) 14128 v.AddArg(mem) 14129 return true 14130 } 14131 return false 14132 } 14133 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 14134 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 14135 // cond: ValAndOff(x).canAdd(c) 14136 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 14137 for { 14138 x := v.AuxInt 14139 sym := v.Aux 14140 _ = v.Args[2] 14141 v_0 := v.Args[0] 14142 if v_0.Op != OpAMD64ADDQconst { 14143 break 14144 } 14145 c := v_0.AuxInt 14146 ptr := v_0.Args[0] 14147 idx := v.Args[1] 14148 mem := v.Args[2] 14149 if !(ValAndOff(x).canAdd(c)) { 14150 break 14151 } 14152 v.reset(OpAMD64MOVBstoreconstidx1) 14153 v.AuxInt = ValAndOff(x).add(c) 14154 v.Aux = sym 14155 v.AddArg(ptr) 14156 v.AddArg(idx) 14157 v.AddArg(mem) 14158 return true 14159 } 14160 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 14161 // cond: ValAndOff(x).canAdd(c) 14162 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 14163 for { 14164 x := v.AuxInt 14165 sym := v.Aux 14166 _ = v.Args[2] 14167 ptr := v.Args[0] 14168 v_1 := v.Args[1] 14169 if v_1.Op != OpAMD64ADDQconst { 14170 break 14171 } 14172 c := v_1.AuxInt 14173 idx := v_1.Args[0] 14174 mem := v.Args[2] 14175 if !(ValAndOff(x).canAdd(c)) { 14176 break 14177 } 14178 v.reset(OpAMD64MOVBstoreconstidx1) 14179 v.AuxInt = ValAndOff(x).add(c) 14180 v.Aux = sym 14181 v.AddArg(ptr) 14182 v.AddArg(idx) 14183 v.AddArg(mem) 14184 return true 14185 } 14186 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 14187 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 14188 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 14189 for { 14190 c := v.AuxInt 14191 s := v.Aux 14192 _ = v.Args[2] 14193 p := v.Args[0] 14194 i := v.Args[1] 14195 x := v.Args[2] 14196 if x.Op != OpAMD64MOVBstoreconstidx1 { 14197 break 14198 } 14199 a := x.AuxInt 14200 if x.Aux != s { 14201 break 14202 } 14203 _ = x.Args[2] 14204 if p != x.Args[0] { 14205 break 14206 } 14207 if i != x.Args[1] { 14208 break 14209 } 14210 mem := x.Args[2] 14211 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 14212 break 14213 } 14214 v.reset(OpAMD64MOVWstoreconstidx1) 14215 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 14216 v.Aux = s 14217 v.AddArg(p) 14218 v.AddArg(i) 14219 v.AddArg(mem) 14220 return true 14221 } 14222 return false 14223 } 14224 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 14225 b := v.Block 14226 _ = b 14227 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 14228 // cond: is32Bit(c+d) 14229 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 14230 for { 14231 c := v.AuxInt 14232 sym := v.Aux 14233 _ = v.Args[3] 14234 v_0 := v.Args[0] 14235 if v_0.Op != OpAMD64ADDQconst { 14236 break 14237 } 14238 d := v_0.AuxInt 14239 ptr := v_0.Args[0] 14240 idx := v.Args[1] 14241 val := v.Args[2] 14242 mem := v.Args[3] 14243 if !(is32Bit(c + d)) { 14244 break 14245 } 14246 v.reset(OpAMD64MOVBstoreidx1) 14247 v.AuxInt = c + d 14248 v.Aux = sym 14249 v.AddArg(ptr) 14250 v.AddArg(idx) 14251 v.AddArg(val) 14252 v.AddArg(mem) 14253 return true 14254 } 14255 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 14256 // cond: is32Bit(c+d) 14257 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 14258 for { 14259 c := v.AuxInt 14260 sym := v.Aux 14261 _ = v.Args[3] 14262 ptr := v.Args[0] 14263 v_1 := v.Args[1] 14264 if v_1.Op != OpAMD64ADDQconst { 14265 break 14266 } 14267 d := v_1.AuxInt 14268 idx := v_1.Args[0] 14269 val := v.Args[2] 14270 mem := v.Args[3] 14271 if !(is32Bit(c + d)) { 14272 break 14273 } 14274 v.reset(OpAMD64MOVBstoreidx1) 14275 v.AuxInt = c + d 14276 v.Aux = sym 14277 v.AddArg(ptr) 14278 v.AddArg(idx) 14279 v.AddArg(val) 14280 v.AddArg(mem) 14281 return true 14282 } 14283 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 14284 // cond: x0.Uses == 1 && clobber(x0) 14285 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 14286 for { 14287 i := v.AuxInt 14288 s := v.Aux 14289 _ = v.Args[3] 14290 p := v.Args[0] 14291 idx := v.Args[1] 14292 w := v.Args[2] 14293 x0 := v.Args[3] 14294 if x0.Op != OpAMD64MOVBstoreidx1 { 14295 break 14296 } 14297 if x0.AuxInt != i-1 { 14298 break 14299 } 14300 if x0.Aux != s { 14301 break 14302 } 14303 _ = x0.Args[3] 14304 if p != x0.Args[0] { 14305 break 14306 } 14307 if idx != x0.Args[1] { 14308 break 14309 } 14310 x0_2 := x0.Args[2] 14311 if x0_2.Op != OpAMD64SHRWconst { 14312 break 14313 } 14314 if x0_2.AuxInt != 8 { 14315 break 14316 } 14317 if w != x0_2.Args[0] { 14318 break 14319 } 14320 mem := x0.Args[3] 14321 if !(x0.Uses == 1 && clobber(x0)) { 14322 break 14323 } 14324 v.reset(OpAMD64MOVWstoreidx1) 14325 v.AuxInt = i - 1 14326 v.Aux = s 14327 v.AddArg(p) 14328 v.AddArg(idx) 14329 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 14330 v0.AuxInt = 8 14331 v0.AddArg(w) 14332 v.AddArg(v0) 14333 v.AddArg(mem) 14334 return true 14335 } 14336 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 14337 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 14338 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 14339 for { 14340 i := v.AuxInt 14341 s := v.Aux 14342 _ = v.Args[3] 14343 p := v.Args[0] 14344 idx := v.Args[1] 14345 w := v.Args[2] 14346 x2 := v.Args[3] 14347 if x2.Op != OpAMD64MOVBstoreidx1 { 14348 break 14349 } 14350 if x2.AuxInt != i-1 { 14351 break 14352 } 14353 if x2.Aux != s { 14354 break 14355 } 14356 _ = x2.Args[3] 14357 if p != x2.Args[0] { 14358 break 14359 } 14360 if idx != x2.Args[1] { 14361 break 14362 } 14363 x2_2 := x2.Args[2] 14364 if x2_2.Op != OpAMD64SHRLconst { 14365 break 14366 } 14367 if x2_2.AuxInt != 8 { 14368 break 14369 } 14370 if w != x2_2.Args[0] { 14371 break 14372 } 14373 x1 := x2.Args[3] 14374 if x1.Op != OpAMD64MOVBstoreidx1 { 14375 break 14376 } 14377 if x1.AuxInt != i-2 { 14378 break 14379 } 14380 if x1.Aux != s { 14381 break 14382 } 14383 _ = x1.Args[3] 14384 if p != x1.Args[0] { 14385 break 14386 } 14387 if idx != x1.Args[1] { 14388 break 14389 } 14390 x1_2 := x1.Args[2] 14391 if x1_2.Op != OpAMD64SHRLconst { 14392 break 14393 } 14394 if x1_2.AuxInt != 16 { 14395 break 14396 } 14397 if w != x1_2.Args[0] { 14398 break 14399 } 14400 x0 := x1.Args[3] 14401 if x0.Op != OpAMD64MOVBstoreidx1 { 14402 break 14403 } 14404 if x0.AuxInt != i-3 { 14405 break 14406 } 14407 if x0.Aux != s { 14408 break 14409 } 14410 _ = x0.Args[3] 14411 if p != x0.Args[0] { 14412 break 14413 } 14414 if idx != x0.Args[1] { 14415 break 14416 } 14417 x0_2 := x0.Args[2] 14418 if x0_2.Op != OpAMD64SHRLconst { 14419 break 14420 } 14421 if x0_2.AuxInt != 24 { 14422 break 14423 } 14424 if w != x0_2.Args[0] { 14425 break 14426 } 14427 mem := x0.Args[3] 14428 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 14429 break 14430 } 14431 v.reset(OpAMD64MOVLstoreidx1) 14432 v.AuxInt = i - 3 14433 v.Aux = s 14434 v.AddArg(p) 14435 v.AddArg(idx) 14436 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 14437 v0.AddArg(w) 14438 v.AddArg(v0) 14439 v.AddArg(mem) 14440 return true 14441 } 14442 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 14443 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 14444 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 14445 for { 14446 i := v.AuxInt 14447 s := v.Aux 14448 _ = v.Args[3] 14449 p := v.Args[0] 14450 idx := v.Args[1] 14451 w := v.Args[2] 14452 x6 := v.Args[3] 14453 if x6.Op != OpAMD64MOVBstoreidx1 { 14454 break 14455 } 14456 if x6.AuxInt != i-1 { 14457 break 14458 } 14459 if x6.Aux != s { 14460 break 14461 } 14462 _ = x6.Args[3] 14463 if p != x6.Args[0] { 14464 break 14465 } 14466 if idx != x6.Args[1] { 14467 break 14468 } 14469 x6_2 := x6.Args[2] 14470 if x6_2.Op != OpAMD64SHRQconst { 14471 break 14472 } 14473 if x6_2.AuxInt != 8 { 14474 break 14475 } 14476 if w != x6_2.Args[0] { 14477 break 14478 } 14479 x5 := x6.Args[3] 14480 if x5.Op != OpAMD64MOVBstoreidx1 { 14481 break 14482 } 14483 if x5.AuxInt != i-2 { 14484 break 14485 } 14486 if x5.Aux != s { 14487 break 14488 } 14489 _ = x5.Args[3] 14490 if p != x5.Args[0] { 14491 break 14492 } 14493 if idx != x5.Args[1] { 14494 break 14495 } 14496 x5_2 := x5.Args[2] 14497 if x5_2.Op != OpAMD64SHRQconst { 14498 break 14499 } 14500 if x5_2.AuxInt != 16 { 14501 break 14502 } 14503 if w != x5_2.Args[0] { 14504 break 14505 } 14506 x4 := x5.Args[3] 14507 if x4.Op != OpAMD64MOVBstoreidx1 { 14508 break 14509 } 14510 if x4.AuxInt != i-3 { 14511 break 14512 } 14513 if x4.Aux != s { 14514 break 14515 } 14516 _ = x4.Args[3] 14517 if p != x4.Args[0] { 14518 break 14519 } 14520 if idx != x4.Args[1] { 14521 break 14522 } 14523 x4_2 := x4.Args[2] 14524 if x4_2.Op != OpAMD64SHRQconst { 14525 break 14526 } 14527 if x4_2.AuxInt != 24 { 14528 break 14529 } 14530 if w != x4_2.Args[0] { 14531 break 14532 } 14533 x3 := x4.Args[3] 14534 if x3.Op != OpAMD64MOVBstoreidx1 { 14535 break 14536 } 14537 if x3.AuxInt != i-4 { 14538 break 14539 } 14540 if x3.Aux != s { 14541 break 14542 } 14543 _ = x3.Args[3] 14544 if p != x3.Args[0] { 14545 break 14546 } 14547 if idx != x3.Args[1] { 14548 break 14549 } 14550 x3_2 := x3.Args[2] 14551 if x3_2.Op != OpAMD64SHRQconst { 14552 break 14553 } 14554 if x3_2.AuxInt != 32 { 14555 break 14556 } 14557 if w != x3_2.Args[0] { 14558 break 14559 } 14560 x2 := x3.Args[3] 14561 if x2.Op != OpAMD64MOVBstoreidx1 { 14562 break 14563 } 14564 if x2.AuxInt != i-5 { 14565 break 14566 } 14567 if x2.Aux != s { 14568 break 14569 } 14570 _ = x2.Args[3] 14571 if p != x2.Args[0] { 14572 break 14573 } 14574 if idx != x2.Args[1] { 14575 break 14576 } 14577 x2_2 := x2.Args[2] 14578 if x2_2.Op != OpAMD64SHRQconst { 14579 break 14580 } 14581 if x2_2.AuxInt != 40 { 14582 break 14583 } 14584 if w != x2_2.Args[0] { 14585 break 14586 } 14587 x1 := x2.Args[3] 14588 if x1.Op != OpAMD64MOVBstoreidx1 { 14589 break 14590 } 14591 if x1.AuxInt != i-6 { 14592 break 14593 } 14594 if x1.Aux != s { 14595 break 14596 } 14597 _ = x1.Args[3] 14598 if p != x1.Args[0] { 14599 break 14600 } 14601 if idx != x1.Args[1] { 14602 break 14603 } 14604 x1_2 := x1.Args[2] 14605 if x1_2.Op != OpAMD64SHRQconst { 14606 break 14607 } 14608 if x1_2.AuxInt != 48 { 14609 break 14610 } 14611 if w != x1_2.Args[0] { 14612 break 14613 } 14614 x0 := x1.Args[3] 14615 if x0.Op != OpAMD64MOVBstoreidx1 { 14616 break 14617 } 14618 if x0.AuxInt != i-7 { 14619 break 14620 } 14621 if x0.Aux != s { 14622 break 14623 } 14624 _ = x0.Args[3] 14625 if p != x0.Args[0] { 14626 break 14627 } 14628 if idx != x0.Args[1] { 14629 break 14630 } 14631 x0_2 := x0.Args[2] 14632 if x0_2.Op != OpAMD64SHRQconst { 14633 break 14634 } 14635 if x0_2.AuxInt != 56 { 14636 break 14637 } 14638 if w != x0_2.Args[0] { 14639 break 14640 } 14641 mem := x0.Args[3] 14642 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 14643 break 14644 } 14645 v.reset(OpAMD64MOVQstoreidx1) 14646 v.AuxInt = i - 7 14647 v.Aux = s 14648 v.AddArg(p) 14649 v.AddArg(idx) 14650 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 14651 v0.AddArg(w) 14652 v.AddArg(v0) 14653 v.AddArg(mem) 14654 return true 14655 } 14656 // match: (MOVBstoreidx1 [i] {s} p idx (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 14657 // cond: x.Uses == 1 && clobber(x) 14658 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 14659 for { 14660 i := v.AuxInt 14661 s := v.Aux 14662 _ = v.Args[3] 14663 p := v.Args[0] 14664 idx := v.Args[1] 14665 v_2 := v.Args[2] 14666 if v_2.Op != OpAMD64SHRWconst { 14667 break 14668 } 14669 if v_2.AuxInt != 8 { 14670 break 14671 } 14672 w := v_2.Args[0] 14673 x := v.Args[3] 14674 if x.Op != OpAMD64MOVBstoreidx1 { 14675 break 14676 } 14677 if x.AuxInt != i-1 { 14678 break 14679 } 14680 if x.Aux != s { 14681 break 14682 } 14683 _ = x.Args[3] 14684 if p != x.Args[0] { 14685 break 14686 } 14687 if idx != x.Args[1] { 14688 break 14689 } 14690 if w != x.Args[2] { 14691 break 14692 } 14693 mem := x.Args[3] 14694 if !(x.Uses == 1 && clobber(x)) { 14695 break 14696 } 14697 v.reset(OpAMD64MOVWstoreidx1) 14698 v.AuxInt = i - 1 14699 v.Aux = s 14700 v.AddArg(p) 14701 v.AddArg(idx) 14702 v.AddArg(w) 14703 v.AddArg(mem) 14704 return true 14705 } 14706 // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 14707 // cond: x.Uses == 1 && clobber(x) 14708 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 14709 for { 14710 i := v.AuxInt 14711 s := v.Aux 14712 _ = v.Args[3] 14713 p := v.Args[0] 14714 idx := v.Args[1] 14715 v_2 := v.Args[2] 14716 if v_2.Op != OpAMD64SHRLconst { 14717 break 14718 } 14719 if v_2.AuxInt != 8 { 14720 break 14721 } 14722 w := v_2.Args[0] 14723 x := v.Args[3] 14724 if x.Op != OpAMD64MOVBstoreidx1 { 14725 break 14726 } 14727 if x.AuxInt != i-1 { 14728 break 14729 } 14730 if x.Aux != s { 14731 break 14732 } 14733 _ = x.Args[3] 14734 if p != x.Args[0] { 14735 break 14736 } 14737 if idx != x.Args[1] { 14738 break 14739 } 14740 if w != x.Args[2] { 14741 break 14742 } 14743 mem := x.Args[3] 14744 if !(x.Uses == 1 && clobber(x)) { 14745 break 14746 } 14747 v.reset(OpAMD64MOVWstoreidx1) 14748 v.AuxInt = i - 1 14749 v.Aux = s 14750 v.AddArg(p) 14751 v.AddArg(idx) 14752 v.AddArg(w) 14753 v.AddArg(mem) 14754 return true 14755 } 14756 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 14757 // cond: x.Uses == 1 && clobber(x) 14758 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 14759 for { 14760 i := v.AuxInt 14761 s := v.Aux 14762 _ = v.Args[3] 14763 p := v.Args[0] 14764 idx := v.Args[1] 14765 v_2 := v.Args[2] 14766 if v_2.Op != OpAMD64SHRQconst { 14767 break 14768 } 14769 if v_2.AuxInt != 8 { 14770 break 14771 } 14772 w := v_2.Args[0] 14773 x := v.Args[3] 14774 if x.Op != OpAMD64MOVBstoreidx1 { 14775 break 14776 } 14777 if x.AuxInt != i-1 { 14778 break 14779 } 14780 if x.Aux != s { 14781 break 14782 } 14783 _ = x.Args[3] 14784 if p != x.Args[0] { 14785 break 14786 } 14787 if idx != x.Args[1] { 14788 break 14789 } 14790 if w != x.Args[2] { 14791 break 14792 } 14793 mem := x.Args[3] 14794 if !(x.Uses == 1 && clobber(x)) { 14795 break 14796 } 14797 v.reset(OpAMD64MOVWstoreidx1) 14798 v.AuxInt = i - 1 14799 v.Aux = s 14800 v.AddArg(p) 14801 v.AddArg(idx) 14802 v.AddArg(w) 14803 v.AddArg(mem) 14804 return true 14805 } 14806 // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) 14807 // cond: x.Uses == 1 && clobber(x) 14808 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 14809 for { 14810 i := v.AuxInt 14811 s := v.Aux 14812 _ = v.Args[3] 14813 p := v.Args[0] 14814 idx := v.Args[1] 14815 v_2 := v.Args[2] 14816 if v_2.Op != OpAMD64SHRLconst { 14817 break 14818 } 14819 j := v_2.AuxInt 14820 w := v_2.Args[0] 14821 x := v.Args[3] 14822 if x.Op != OpAMD64MOVBstoreidx1 { 14823 break 14824 } 14825 if x.AuxInt != i-1 { 14826 break 14827 } 14828 if x.Aux != s { 14829 break 14830 } 14831 _ = x.Args[3] 14832 if p != x.Args[0] { 14833 break 14834 } 14835 if idx != x.Args[1] { 14836 break 14837 } 14838 w0 := x.Args[2] 14839 if w0.Op != OpAMD64SHRLconst { 14840 break 14841 } 14842 if w0.AuxInt != j-8 { 14843 break 14844 } 14845 if w != w0.Args[0] { 14846 break 14847 } 14848 mem := x.Args[3] 14849 if !(x.Uses == 1 && clobber(x)) { 14850 break 14851 } 14852 v.reset(OpAMD64MOVWstoreidx1) 14853 v.AuxInt = i - 1 14854 v.Aux = s 14855 v.AddArg(p) 14856 v.AddArg(idx) 14857 v.AddArg(w0) 14858 v.AddArg(mem) 14859 return true 14860 } 14861 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 14862 // cond: x.Uses == 1 && clobber(x) 14863 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 14864 for { 14865 i := v.AuxInt 14866 s := v.Aux 14867 _ = v.Args[3] 14868 p := v.Args[0] 14869 idx := v.Args[1] 14870 v_2 := v.Args[2] 14871 if v_2.Op != OpAMD64SHRQconst { 14872 break 14873 } 14874 j := v_2.AuxInt 14875 w := v_2.Args[0] 14876 x := v.Args[3] 14877 if x.Op != OpAMD64MOVBstoreidx1 { 14878 break 14879 } 14880 if x.AuxInt != i-1 { 14881 break 14882 } 14883 if x.Aux != s { 14884 break 14885 } 14886 _ = x.Args[3] 14887 if p != x.Args[0] { 14888 break 14889 } 14890 if idx != x.Args[1] { 14891 break 14892 } 14893 w0 := x.Args[2] 14894 if w0.Op != OpAMD64SHRQconst { 14895 break 14896 } 14897 if w0.AuxInt != j-8 { 14898 break 14899 } 14900 if w != w0.Args[0] { 14901 break 14902 } 14903 mem := x.Args[3] 14904 if !(x.Uses == 1 && clobber(x)) { 14905 break 14906 } 14907 v.reset(OpAMD64MOVWstoreidx1) 14908 v.AuxInt = i - 1 14909 v.Aux = s 14910 v.AddArg(p) 14911 v.AddArg(idx) 14912 v.AddArg(w0) 14913 v.AddArg(mem) 14914 return true 14915 } 14916 return false 14917 } 14918 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_10(v *Value) bool { 14919 // match: (MOVBstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 14920 // cond: is32Bit(i+c) 14921 // result: (MOVBstore [i+c] {s} p w mem) 14922 for { 14923 i := v.AuxInt 14924 s := v.Aux 14925 _ = v.Args[3] 14926 p := v.Args[0] 14927 v_1 := v.Args[1] 14928 if v_1.Op != OpAMD64MOVQconst { 14929 break 14930 } 14931 c := v_1.AuxInt 14932 w := v.Args[2] 14933 mem := v.Args[3] 14934 if !(is32Bit(i + c)) { 14935 break 14936 } 14937 v.reset(OpAMD64MOVBstore) 14938 v.AuxInt = i + c 14939 v.Aux = s 14940 v.AddArg(p) 14941 v.AddArg(w) 14942 v.AddArg(mem) 14943 return true 14944 } 14945 return false 14946 } 14947 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 14948 b := v.Block 14949 _ = b 14950 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 14951 // cond: x.Uses == 1 && clobber(x) 14952 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 14953 for { 14954 x := v.Args[0] 14955 if x.Op != OpAMD64MOVLload { 14956 break 14957 } 14958 off := x.AuxInt 14959 sym := x.Aux 14960 _ = x.Args[1] 14961 ptr := x.Args[0] 14962 mem := x.Args[1] 14963 if !(x.Uses == 1 && clobber(x)) { 14964 break 14965 } 14966 b = x.Block 14967 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 14968 v.reset(OpCopy) 14969 v.AddArg(v0) 14970 v0.AuxInt = off 14971 v0.Aux = sym 14972 v0.AddArg(ptr) 14973 v0.AddArg(mem) 14974 return true 14975 } 14976 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 14977 // cond: x.Uses == 1 && clobber(x) 14978 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 14979 for { 14980 x := v.Args[0] 14981 if x.Op != OpAMD64MOVQload { 14982 break 14983 } 14984 off := x.AuxInt 14985 sym := x.Aux 14986 _ = x.Args[1] 14987 ptr := x.Args[0] 14988 mem := x.Args[1] 14989 if !(x.Uses == 1 && clobber(x)) { 14990 break 14991 } 14992 b = x.Block 14993 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 14994 v.reset(OpCopy) 14995 v.AddArg(v0) 14996 v0.AuxInt = off 14997 v0.Aux = sym 14998 v0.AddArg(ptr) 14999 v0.AddArg(mem) 15000 return true 15001 } 15002 // match: (MOVLQSX (ANDLconst [c] x)) 15003 // cond: c & 0x80000000 == 0 15004 // result: (ANDLconst [c & 0x7fffffff] x) 15005 for { 15006 v_0 := v.Args[0] 15007 if v_0.Op != OpAMD64ANDLconst { 15008 break 15009 } 15010 c := v_0.AuxInt 15011 x := v_0.Args[0] 15012 if !(c&0x80000000 == 0) { 15013 break 15014 } 15015 v.reset(OpAMD64ANDLconst) 15016 v.AuxInt = c & 0x7fffffff 15017 v.AddArg(x) 15018 return true 15019 } 15020 // match: (MOVLQSX (MOVLQSX x)) 15021 // cond: 15022 // result: (MOVLQSX x) 15023 for { 15024 v_0 := v.Args[0] 15025 if v_0.Op != OpAMD64MOVLQSX { 15026 break 15027 } 15028 x := v_0.Args[0] 15029 v.reset(OpAMD64MOVLQSX) 15030 v.AddArg(x) 15031 return true 15032 } 15033 // match: (MOVLQSX (MOVWQSX x)) 15034 // cond: 15035 // result: (MOVWQSX x) 15036 for { 15037 v_0 := v.Args[0] 15038 if v_0.Op != OpAMD64MOVWQSX { 15039 break 15040 } 15041 x := v_0.Args[0] 15042 v.reset(OpAMD64MOVWQSX) 15043 v.AddArg(x) 15044 return true 15045 } 15046 // match: (MOVLQSX (MOVBQSX x)) 15047 // cond: 15048 // result: (MOVBQSX x) 15049 for { 15050 v_0 := v.Args[0] 15051 if v_0.Op != OpAMD64MOVBQSX { 15052 break 15053 } 15054 x := v_0.Args[0] 15055 v.reset(OpAMD64MOVBQSX) 15056 v.AddArg(x) 15057 return true 15058 } 15059 return false 15060 } 15061 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 15062 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 15063 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 15064 // result: (MOVLQSX x) 15065 for { 15066 off := v.AuxInt 15067 sym := v.Aux 15068 _ = v.Args[1] 15069 ptr := v.Args[0] 15070 v_1 := v.Args[1] 15071 if v_1.Op != OpAMD64MOVLstore { 15072 break 15073 } 15074 off2 := v_1.AuxInt 15075 sym2 := v_1.Aux 15076 _ = v_1.Args[2] 15077 ptr2 := v_1.Args[0] 15078 x := v_1.Args[1] 15079 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 15080 break 15081 } 15082 v.reset(OpAMD64MOVLQSX) 15083 v.AddArg(x) 15084 return true 15085 } 15086 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 15087 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15088 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 15089 for { 15090 off1 := v.AuxInt 15091 sym1 := v.Aux 15092 _ = v.Args[1] 15093 v_0 := v.Args[0] 15094 if v_0.Op != OpAMD64LEAQ { 15095 break 15096 } 15097 off2 := v_0.AuxInt 15098 sym2 := v_0.Aux 15099 base := v_0.Args[0] 15100 mem := v.Args[1] 15101 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15102 break 15103 } 15104 v.reset(OpAMD64MOVLQSXload) 15105 v.AuxInt = off1 + off2 15106 v.Aux = mergeSym(sym1, sym2) 15107 v.AddArg(base) 15108 v.AddArg(mem) 15109 return true 15110 } 15111 return false 15112 } 15113 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 15114 b := v.Block 15115 _ = b 15116 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 15117 // cond: x.Uses == 1 && clobber(x) 15118 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 15119 for { 15120 x := v.Args[0] 15121 if x.Op != OpAMD64MOVLload { 15122 break 15123 } 15124 off := x.AuxInt 15125 sym := x.Aux 15126 _ = x.Args[1] 15127 ptr := x.Args[0] 15128 mem := x.Args[1] 15129 if !(x.Uses == 1 && clobber(x)) { 15130 break 15131 } 15132 b = x.Block 15133 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 15134 v.reset(OpCopy) 15135 v.AddArg(v0) 15136 v0.AuxInt = off 15137 v0.Aux = sym 15138 v0.AddArg(ptr) 15139 v0.AddArg(mem) 15140 return true 15141 } 15142 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 15143 // cond: x.Uses == 1 && clobber(x) 15144 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 15145 for { 15146 x := v.Args[0] 15147 if x.Op != OpAMD64MOVQload { 15148 break 15149 } 15150 off := x.AuxInt 15151 sym := x.Aux 15152 _ = x.Args[1] 15153 ptr := x.Args[0] 15154 mem := x.Args[1] 15155 if !(x.Uses == 1 && clobber(x)) { 15156 break 15157 } 15158 b = x.Block 15159 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 15160 v.reset(OpCopy) 15161 v.AddArg(v0) 15162 v0.AuxInt = off 15163 v0.Aux = sym 15164 v0.AddArg(ptr) 15165 v0.AddArg(mem) 15166 return true 15167 } 15168 // match: (MOVLQZX x) 15169 // cond: zeroUpper32Bits(x,3) 15170 // result: x 15171 for { 15172 x := v.Args[0] 15173 if !(zeroUpper32Bits(x, 3)) { 15174 break 15175 } 15176 v.reset(OpCopy) 15177 v.Type = x.Type 15178 v.AddArg(x) 15179 return true 15180 } 15181 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 15182 // cond: x.Uses == 1 && clobber(x) 15183 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 15184 for { 15185 x := v.Args[0] 15186 if x.Op != OpAMD64MOVLloadidx1 { 15187 break 15188 } 15189 off := x.AuxInt 15190 sym := x.Aux 15191 _ = x.Args[2] 15192 ptr := x.Args[0] 15193 idx := x.Args[1] 15194 mem := x.Args[2] 15195 if !(x.Uses == 1 && clobber(x)) { 15196 break 15197 } 15198 b = x.Block 15199 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 15200 v.reset(OpCopy) 15201 v.AddArg(v0) 15202 v0.AuxInt = off 15203 v0.Aux = sym 15204 v0.AddArg(ptr) 15205 v0.AddArg(idx) 15206 v0.AddArg(mem) 15207 return true 15208 } 15209 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 15210 // cond: x.Uses == 1 && clobber(x) 15211 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 15212 for { 15213 x := v.Args[0] 15214 if x.Op != OpAMD64MOVLloadidx4 { 15215 break 15216 } 15217 off := x.AuxInt 15218 sym := x.Aux 15219 _ = x.Args[2] 15220 ptr := x.Args[0] 15221 idx := x.Args[1] 15222 mem := x.Args[2] 15223 if !(x.Uses == 1 && clobber(x)) { 15224 break 15225 } 15226 b = x.Block 15227 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 15228 v.reset(OpCopy) 15229 v.AddArg(v0) 15230 v0.AuxInt = off 15231 v0.Aux = sym 15232 v0.AddArg(ptr) 15233 v0.AddArg(idx) 15234 v0.AddArg(mem) 15235 return true 15236 } 15237 // match: (MOVLQZX (ANDLconst [c] x)) 15238 // cond: 15239 // result: (ANDLconst [c] x) 15240 for { 15241 v_0 := v.Args[0] 15242 if v_0.Op != OpAMD64ANDLconst { 15243 break 15244 } 15245 c := v_0.AuxInt 15246 x := v_0.Args[0] 15247 v.reset(OpAMD64ANDLconst) 15248 v.AuxInt = c 15249 v.AddArg(x) 15250 return true 15251 } 15252 // match: (MOVLQZX (MOVLQZX x)) 15253 // cond: 15254 // result: (MOVLQZX x) 15255 for { 15256 v_0 := v.Args[0] 15257 if v_0.Op != OpAMD64MOVLQZX { 15258 break 15259 } 15260 x := v_0.Args[0] 15261 v.reset(OpAMD64MOVLQZX) 15262 v.AddArg(x) 15263 return true 15264 } 15265 // match: (MOVLQZX (MOVWQZX x)) 15266 // cond: 15267 // result: (MOVWQZX x) 15268 for { 15269 v_0 := v.Args[0] 15270 if v_0.Op != OpAMD64MOVWQZX { 15271 break 15272 } 15273 x := v_0.Args[0] 15274 v.reset(OpAMD64MOVWQZX) 15275 v.AddArg(x) 15276 return true 15277 } 15278 // match: (MOVLQZX (MOVBQZX x)) 15279 // cond: 15280 // result: (MOVBQZX x) 15281 for { 15282 v_0 := v.Args[0] 15283 if v_0.Op != OpAMD64MOVBQZX { 15284 break 15285 } 15286 x := v_0.Args[0] 15287 v.reset(OpAMD64MOVBQZX) 15288 v.AddArg(x) 15289 return true 15290 } 15291 return false 15292 } 15293 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 15294 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 15295 // cond: is32Bit(off1+off2) 15296 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 15297 for { 15298 off1 := v.AuxInt 15299 sym := v.Aux 15300 _ = v.Args[1] 15301 v_0 := v.Args[0] 15302 if v_0.Op != OpAMD64ADDQconst { 15303 break 15304 } 15305 off2 := v_0.AuxInt 15306 ptr := v_0.Args[0] 15307 mem := v.Args[1] 15308 if !(is32Bit(off1 + off2)) { 15309 break 15310 } 15311 v.reset(OpAMD64MOVLatomicload) 15312 v.AuxInt = off1 + off2 15313 v.Aux = sym 15314 v.AddArg(ptr) 15315 v.AddArg(mem) 15316 return true 15317 } 15318 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 15319 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15320 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 15321 for { 15322 off1 := v.AuxInt 15323 sym1 := v.Aux 15324 _ = v.Args[1] 15325 v_0 := v.Args[0] 15326 if v_0.Op != OpAMD64LEAQ { 15327 break 15328 } 15329 off2 := v_0.AuxInt 15330 sym2 := v_0.Aux 15331 ptr := v_0.Args[0] 15332 mem := v.Args[1] 15333 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15334 break 15335 } 15336 v.reset(OpAMD64MOVLatomicload) 15337 v.AuxInt = off1 + off2 15338 v.Aux = mergeSym(sym1, sym2) 15339 v.AddArg(ptr) 15340 v.AddArg(mem) 15341 return true 15342 } 15343 return false 15344 } 15345 func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool { 15346 b := v.Block 15347 _ = b 15348 // match: (MOVLf2i <t> (Arg <u> [off] {sym})) 15349 // cond: t.Size() == u.Size() 15350 // result: @b.Func.Entry (Arg <t> [off] {sym}) 15351 for { 15352 t := v.Type 15353 v_0 := v.Args[0] 15354 if v_0.Op != OpArg { 15355 break 15356 } 15357 u := v_0.Type 15358 off := v_0.AuxInt 15359 sym := v_0.Aux 15360 if !(t.Size() == u.Size()) { 15361 break 15362 } 15363 b = b.Func.Entry 15364 v0 := b.NewValue0(v.Pos, OpArg, t) 15365 v.reset(OpCopy) 15366 v.AddArg(v0) 15367 v0.AuxInt = off 15368 v0.Aux = sym 15369 return true 15370 } 15371 return false 15372 } 15373 func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool { 15374 b := v.Block 15375 _ = b 15376 // match: (MOVLi2f <t> (Arg <u> [off] {sym})) 15377 // cond: t.Size() == u.Size() 15378 // result: @b.Func.Entry (Arg <t> [off] {sym}) 15379 for { 15380 t := v.Type 15381 v_0 := v.Args[0] 15382 if v_0.Op != OpArg { 15383 break 15384 } 15385 u := v_0.Type 15386 off := v_0.AuxInt 15387 sym := v_0.Aux 15388 if !(t.Size() == u.Size()) { 15389 break 15390 } 15391 b = b.Func.Entry 15392 v0 := b.NewValue0(v.Pos, OpArg, t) 15393 v.reset(OpCopy) 15394 v.AddArg(v0) 15395 v0.AuxInt = off 15396 v0.Aux = sym 15397 return true 15398 } 15399 return false 15400 } 15401 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 15402 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 15403 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 15404 // result: (MOVLQZX x) 15405 for { 15406 off := v.AuxInt 15407 sym := v.Aux 15408 _ = v.Args[1] 15409 ptr := v.Args[0] 15410 v_1 := v.Args[1] 15411 if v_1.Op != OpAMD64MOVLstore { 15412 break 15413 } 15414 off2 := v_1.AuxInt 15415 sym2 := v_1.Aux 15416 _ = v_1.Args[2] 15417 ptr2 := v_1.Args[0] 15418 x := v_1.Args[1] 15419 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 15420 break 15421 } 15422 v.reset(OpAMD64MOVLQZX) 15423 v.AddArg(x) 15424 return true 15425 } 15426 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 15427 // cond: is32Bit(off1+off2) 15428 // result: (MOVLload [off1+off2] {sym} ptr mem) 15429 for { 15430 off1 := v.AuxInt 15431 sym := v.Aux 15432 _ = v.Args[1] 15433 v_0 := v.Args[0] 15434 if v_0.Op != OpAMD64ADDQconst { 15435 break 15436 } 15437 off2 := v_0.AuxInt 15438 ptr := v_0.Args[0] 15439 mem := v.Args[1] 15440 if !(is32Bit(off1 + off2)) { 15441 break 15442 } 15443 v.reset(OpAMD64MOVLload) 15444 v.AuxInt = off1 + off2 15445 v.Aux = sym 15446 v.AddArg(ptr) 15447 v.AddArg(mem) 15448 return true 15449 } 15450 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 15451 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15452 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 15453 for { 15454 off1 := v.AuxInt 15455 sym1 := v.Aux 15456 _ = v.Args[1] 15457 v_0 := v.Args[0] 15458 if v_0.Op != OpAMD64LEAQ { 15459 break 15460 } 15461 off2 := v_0.AuxInt 15462 sym2 := v_0.Aux 15463 base := v_0.Args[0] 15464 mem := v.Args[1] 15465 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15466 break 15467 } 15468 v.reset(OpAMD64MOVLload) 15469 v.AuxInt = off1 + off2 15470 v.Aux = mergeSym(sym1, sym2) 15471 v.AddArg(base) 15472 v.AddArg(mem) 15473 return true 15474 } 15475 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 15476 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15477 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 15478 for { 15479 off1 := v.AuxInt 15480 sym1 := v.Aux 15481 _ = v.Args[1] 15482 v_0 := v.Args[0] 15483 if v_0.Op != OpAMD64LEAQ1 { 15484 break 15485 } 15486 off2 := v_0.AuxInt 15487 sym2 := v_0.Aux 15488 _ = v_0.Args[1] 15489 ptr := v_0.Args[0] 15490 idx := v_0.Args[1] 15491 mem := v.Args[1] 15492 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15493 break 15494 } 15495 v.reset(OpAMD64MOVLloadidx1) 15496 v.AuxInt = off1 + off2 15497 v.Aux = mergeSym(sym1, sym2) 15498 v.AddArg(ptr) 15499 v.AddArg(idx) 15500 v.AddArg(mem) 15501 return true 15502 } 15503 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 15504 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15505 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 15506 for { 15507 off1 := v.AuxInt 15508 sym1 := v.Aux 15509 _ = v.Args[1] 15510 v_0 := v.Args[0] 15511 if v_0.Op != OpAMD64LEAQ4 { 15512 break 15513 } 15514 off2 := v_0.AuxInt 15515 sym2 := v_0.Aux 15516 _ = v_0.Args[1] 15517 ptr := v_0.Args[0] 15518 idx := v_0.Args[1] 15519 mem := v.Args[1] 15520 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15521 break 15522 } 15523 v.reset(OpAMD64MOVLloadidx4) 15524 v.AuxInt = off1 + off2 15525 v.Aux = mergeSym(sym1, sym2) 15526 v.AddArg(ptr) 15527 v.AddArg(idx) 15528 v.AddArg(mem) 15529 return true 15530 } 15531 // match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 15532 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 15533 // result: (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 15534 for { 15535 off1 := v.AuxInt 15536 sym1 := v.Aux 15537 _ = v.Args[1] 15538 v_0 := v.Args[0] 15539 if v_0.Op != OpAMD64LEAQ8 { 15540 break 15541 } 15542 off2 := v_0.AuxInt 15543 sym2 := v_0.Aux 15544 _ = v_0.Args[1] 15545 ptr := v_0.Args[0] 15546 idx := v_0.Args[1] 15547 mem := v.Args[1] 15548 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 15549 break 15550 } 15551 v.reset(OpAMD64MOVLloadidx8) 15552 v.AuxInt = off1 + off2 15553 v.Aux = mergeSym(sym1, sym2) 15554 v.AddArg(ptr) 15555 v.AddArg(idx) 15556 v.AddArg(mem) 15557 return true 15558 } 15559 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 15560 // cond: ptr.Op != OpSB 15561 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 15562 for { 15563 off := v.AuxInt 15564 sym := v.Aux 15565 _ = v.Args[1] 15566 v_0 := v.Args[0] 15567 if v_0.Op != OpAMD64ADDQ { 15568 break 15569 } 15570 _ = v_0.Args[1] 15571 ptr := v_0.Args[0] 15572 idx := v_0.Args[1] 15573 mem := v.Args[1] 15574 if !(ptr.Op != OpSB) { 15575 break 15576 } 15577 v.reset(OpAMD64MOVLloadidx1) 15578 v.AuxInt = off 15579 v.Aux = sym 15580 v.AddArg(ptr) 15581 v.AddArg(idx) 15582 v.AddArg(mem) 15583 return true 15584 } 15585 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 15586 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 15587 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 15588 for { 15589 off1 := v.AuxInt 15590 sym1 := v.Aux 15591 _ = v.Args[1] 15592 v_0 := v.Args[0] 15593 if v_0.Op != OpAMD64LEAL { 15594 break 15595 } 15596 off2 := v_0.AuxInt 15597 sym2 := v_0.Aux 15598 base := v_0.Args[0] 15599 mem := v.Args[1] 15600 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 15601 break 15602 } 15603 v.reset(OpAMD64MOVLload) 15604 v.AuxInt = off1 + off2 15605 v.Aux = mergeSym(sym1, sym2) 15606 v.AddArg(base) 15607 v.AddArg(mem) 15608 return true 15609 } 15610 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 15611 // cond: is32Bit(off1+off2) 15612 // result: (MOVLload [off1+off2] {sym} ptr mem) 15613 for { 15614 off1 := v.AuxInt 15615 sym := v.Aux 15616 _ = v.Args[1] 15617 v_0 := v.Args[0] 15618 if v_0.Op != OpAMD64ADDLconst { 15619 break 15620 } 15621 off2 := v_0.AuxInt 15622 ptr := v_0.Args[0] 15623 mem := v.Args[1] 15624 if !(is32Bit(off1 + off2)) { 15625 break 15626 } 15627 v.reset(OpAMD64MOVLload) 15628 v.AuxInt = off1 + off2 15629 v.Aux = sym 15630 v.AddArg(ptr) 15631 v.AddArg(mem) 15632 return true 15633 } 15634 // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) 15635 // cond: 15636 // result: (MOVLf2i val) 15637 for { 15638 off := v.AuxInt 15639 sym := v.Aux 15640 _ = v.Args[1] 15641 ptr := v.Args[0] 15642 v_1 := v.Args[1] 15643 if v_1.Op != OpAMD64MOVSSstore { 15644 break 15645 } 15646 if v_1.AuxInt != off { 15647 break 15648 } 15649 if v_1.Aux != sym { 15650 break 15651 } 15652 _ = v_1.Args[2] 15653 if ptr != v_1.Args[0] { 15654 break 15655 } 15656 val := v_1.Args[1] 15657 v.reset(OpAMD64MOVLf2i) 15658 v.AddArg(val) 15659 return true 15660 } 15661 return false 15662 } 15663 func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool { 15664 b := v.Block 15665 _ = b 15666 config := b.Func.Config 15667 _ = config 15668 // match: (MOVLload [off] {sym} (SB) _) 15669 // cond: symIsRO(sym) 15670 // result: (MOVQconst [int64(read32(sym, off, config.BigEndian))]) 15671 for { 15672 off := v.AuxInt 15673 sym := v.Aux 15674 _ = v.Args[1] 15675 v_0 := v.Args[0] 15676 if v_0.Op != OpSB { 15677 break 15678 } 15679 if !(symIsRO(sym)) { 15680 break 15681 } 15682 v.reset(OpAMD64MOVQconst) 15683 v.AuxInt = int64(read32(sym, off, config.BigEndian)) 15684 return true 15685 } 15686 return false 15687 } 15688 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 15689 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 15690 // cond: 15691 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 15692 for { 15693 c := v.AuxInt 15694 sym := v.Aux 15695 _ = v.Args[2] 15696 ptr := v.Args[0] 15697 v_1 := v.Args[1] 15698 if v_1.Op != OpAMD64SHLQconst { 15699 break 15700 } 15701 if v_1.AuxInt != 2 { 15702 break 15703 } 15704 idx := v_1.Args[0] 15705 mem := v.Args[2] 15706 v.reset(OpAMD64MOVLloadidx4) 15707 v.AuxInt = c 15708 v.Aux = sym 15709 v.AddArg(ptr) 15710 v.AddArg(idx) 15711 v.AddArg(mem) 15712 return true 15713 } 15714 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 15715 // cond: 15716 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 15717 for { 15718 c := v.AuxInt 15719 sym := v.Aux 15720 _ = v.Args[2] 15721 v_0 := v.Args[0] 15722 if v_0.Op != OpAMD64SHLQconst { 15723 break 15724 } 15725 if v_0.AuxInt != 2 { 15726 break 15727 } 15728 idx := v_0.Args[0] 15729 ptr := v.Args[1] 15730 mem := v.Args[2] 15731 v.reset(OpAMD64MOVLloadidx4) 15732 v.AuxInt = c 15733 v.Aux = sym 15734 v.AddArg(ptr) 15735 v.AddArg(idx) 15736 v.AddArg(mem) 15737 return true 15738 } 15739 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 15740 // cond: 15741 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 15742 for { 15743 c := v.AuxInt 15744 sym := v.Aux 15745 _ = v.Args[2] 15746 ptr := v.Args[0] 15747 v_1 := v.Args[1] 15748 if v_1.Op != OpAMD64SHLQconst { 15749 break 15750 } 15751 if v_1.AuxInt != 3 { 15752 break 15753 } 15754 idx := v_1.Args[0] 15755 mem := v.Args[2] 15756 v.reset(OpAMD64MOVLloadidx8) 15757 v.AuxInt = c 15758 v.Aux = sym 15759 v.AddArg(ptr) 15760 v.AddArg(idx) 15761 v.AddArg(mem) 15762 return true 15763 } 15764 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 15765 // cond: 15766 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 15767 for { 15768 c := v.AuxInt 15769 sym := v.Aux 15770 _ = v.Args[2] 15771 v_0 := v.Args[0] 15772 if v_0.Op != OpAMD64SHLQconst { 15773 break 15774 } 15775 if v_0.AuxInt != 3 { 15776 break 15777 } 15778 idx := v_0.Args[0] 15779 ptr := v.Args[1] 15780 mem := v.Args[2] 15781 v.reset(OpAMD64MOVLloadidx8) 15782 v.AuxInt = c 15783 v.Aux = sym 15784 v.AddArg(ptr) 15785 v.AddArg(idx) 15786 v.AddArg(mem) 15787 return true 15788 } 15789 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 15790 // cond: is32Bit(c+d) 15791 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 15792 for { 15793 c := v.AuxInt 15794 sym := v.Aux 15795 _ = v.Args[2] 15796 v_0 := v.Args[0] 15797 if v_0.Op != OpAMD64ADDQconst { 15798 break 15799 } 15800 d := v_0.AuxInt 15801 ptr := v_0.Args[0] 15802 idx := v.Args[1] 15803 mem := v.Args[2] 15804 if !(is32Bit(c + d)) { 15805 break 15806 } 15807 v.reset(OpAMD64MOVLloadidx1) 15808 v.AuxInt = c + d 15809 v.Aux = sym 15810 v.AddArg(ptr) 15811 v.AddArg(idx) 15812 v.AddArg(mem) 15813 return true 15814 } 15815 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 15816 // cond: is32Bit(c+d) 15817 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 15818 for { 15819 c := v.AuxInt 15820 sym := v.Aux 15821 _ = v.Args[2] 15822 idx := v.Args[0] 15823 v_1 := v.Args[1] 15824 if v_1.Op != OpAMD64ADDQconst { 15825 break 15826 } 15827 d := v_1.AuxInt 15828 ptr := v_1.Args[0] 15829 mem := v.Args[2] 15830 if !(is32Bit(c + d)) { 15831 break 15832 } 15833 v.reset(OpAMD64MOVLloadidx1) 15834 v.AuxInt = c + d 15835 v.Aux = sym 15836 v.AddArg(ptr) 15837 v.AddArg(idx) 15838 v.AddArg(mem) 15839 return true 15840 } 15841 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 15842 // cond: is32Bit(c+d) 15843 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 15844 for { 15845 c := v.AuxInt 15846 sym := v.Aux 15847 _ = v.Args[2] 15848 ptr := v.Args[0] 15849 v_1 := v.Args[1] 15850 if v_1.Op != OpAMD64ADDQconst { 15851 break 15852 } 15853 d := v_1.AuxInt 15854 idx := v_1.Args[0] 15855 mem := v.Args[2] 15856 if !(is32Bit(c + d)) { 15857 break 15858 } 15859 v.reset(OpAMD64MOVLloadidx1) 15860 v.AuxInt = c + d 15861 v.Aux = sym 15862 v.AddArg(ptr) 15863 v.AddArg(idx) 15864 v.AddArg(mem) 15865 return true 15866 } 15867 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 15868 // cond: is32Bit(c+d) 15869 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 15870 for { 15871 c := v.AuxInt 15872 sym := v.Aux 15873 _ = v.Args[2] 15874 v_0 := v.Args[0] 15875 if v_0.Op != OpAMD64ADDQconst { 15876 break 15877 } 15878 d := v_0.AuxInt 15879 idx := v_0.Args[0] 15880 ptr := v.Args[1] 15881 mem := v.Args[2] 15882 if !(is32Bit(c + d)) { 15883 break 15884 } 15885 v.reset(OpAMD64MOVLloadidx1) 15886 v.AuxInt = c + d 15887 v.Aux = sym 15888 v.AddArg(ptr) 15889 v.AddArg(idx) 15890 v.AddArg(mem) 15891 return true 15892 } 15893 // match: (MOVLloadidx1 [i] {s} p (MOVQconst [c]) mem) 15894 // cond: is32Bit(i+c) 15895 // result: (MOVLload [i+c] {s} p mem) 15896 for { 15897 i := v.AuxInt 15898 s := v.Aux 15899 _ = v.Args[2] 15900 p := v.Args[0] 15901 v_1 := v.Args[1] 15902 if v_1.Op != OpAMD64MOVQconst { 15903 break 15904 } 15905 c := v_1.AuxInt 15906 mem := v.Args[2] 15907 if !(is32Bit(i + c)) { 15908 break 15909 } 15910 v.reset(OpAMD64MOVLload) 15911 v.AuxInt = i + c 15912 v.Aux = s 15913 v.AddArg(p) 15914 v.AddArg(mem) 15915 return true 15916 } 15917 // match: (MOVLloadidx1 [i] {s} (MOVQconst [c]) p mem) 15918 // cond: is32Bit(i+c) 15919 // result: (MOVLload [i+c] {s} p mem) 15920 for { 15921 i := v.AuxInt 15922 s := v.Aux 15923 _ = v.Args[2] 15924 v_0 := v.Args[0] 15925 if v_0.Op != OpAMD64MOVQconst { 15926 break 15927 } 15928 c := v_0.AuxInt 15929 p := v.Args[1] 15930 mem := v.Args[2] 15931 if !(is32Bit(i + c)) { 15932 break 15933 } 15934 v.reset(OpAMD64MOVLload) 15935 v.AuxInt = i + c 15936 v.Aux = s 15937 v.AddArg(p) 15938 v.AddArg(mem) 15939 return true 15940 } 15941 return false 15942 } 15943 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 15944 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 15945 // cond: is32Bit(c+d) 15946 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 15947 for { 15948 c := v.AuxInt 15949 sym := v.Aux 15950 _ = v.Args[2] 15951 v_0 := v.Args[0] 15952 if v_0.Op != OpAMD64ADDQconst { 15953 break 15954 } 15955 d := v_0.AuxInt 15956 ptr := v_0.Args[0] 15957 idx := v.Args[1] 15958 mem := v.Args[2] 15959 if !(is32Bit(c + d)) { 15960 break 15961 } 15962 v.reset(OpAMD64MOVLloadidx4) 15963 v.AuxInt = c + d 15964 v.Aux = sym 15965 v.AddArg(ptr) 15966 v.AddArg(idx) 15967 v.AddArg(mem) 15968 return true 15969 } 15970 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 15971 // cond: is32Bit(c+4*d) 15972 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 15973 for { 15974 c := v.AuxInt 15975 sym := v.Aux 15976 _ = v.Args[2] 15977 ptr := v.Args[0] 15978 v_1 := v.Args[1] 15979 if v_1.Op != OpAMD64ADDQconst { 15980 break 15981 } 15982 d := v_1.AuxInt 15983 idx := v_1.Args[0] 15984 mem := v.Args[2] 15985 if !(is32Bit(c + 4*d)) { 15986 break 15987 } 15988 v.reset(OpAMD64MOVLloadidx4) 15989 v.AuxInt = c + 4*d 15990 v.Aux = sym 15991 v.AddArg(ptr) 15992 v.AddArg(idx) 15993 v.AddArg(mem) 15994 return true 15995 } 15996 // match: (MOVLloadidx4 [i] {s} p (MOVQconst [c]) mem) 15997 // cond: is32Bit(i+4*c) 15998 // result: (MOVLload [i+4*c] {s} p mem) 15999 for { 16000 i := v.AuxInt 16001 s := v.Aux 16002 _ = v.Args[2] 16003 p := v.Args[0] 16004 v_1 := v.Args[1] 16005 if v_1.Op != OpAMD64MOVQconst { 16006 break 16007 } 16008 c := v_1.AuxInt 16009 mem := v.Args[2] 16010 if !(is32Bit(i + 4*c)) { 16011 break 16012 } 16013 v.reset(OpAMD64MOVLload) 16014 v.AuxInt = i + 4*c 16015 v.Aux = s 16016 v.AddArg(p) 16017 v.AddArg(mem) 16018 return true 16019 } 16020 return false 16021 } 16022 func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { 16023 // match: (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 16024 // cond: is32Bit(c+d) 16025 // result: (MOVLloadidx8 [c+d] {sym} ptr idx mem) 16026 for { 16027 c := v.AuxInt 16028 sym := v.Aux 16029 _ = v.Args[2] 16030 v_0 := v.Args[0] 16031 if v_0.Op != OpAMD64ADDQconst { 16032 break 16033 } 16034 d := v_0.AuxInt 16035 ptr := v_0.Args[0] 16036 idx := v.Args[1] 16037 mem := v.Args[2] 16038 if !(is32Bit(c + d)) { 16039 break 16040 } 16041 v.reset(OpAMD64MOVLloadidx8) 16042 v.AuxInt = c + d 16043 v.Aux = sym 16044 v.AddArg(ptr) 16045 v.AddArg(idx) 16046 v.AddArg(mem) 16047 return true 16048 } 16049 // match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 16050 // cond: is32Bit(c+8*d) 16051 // result: (MOVLloadidx8 [c+8*d] {sym} ptr idx mem) 16052 for { 16053 c := v.AuxInt 16054 sym := v.Aux 16055 _ = v.Args[2] 16056 ptr := v.Args[0] 16057 v_1 := v.Args[1] 16058 if v_1.Op != OpAMD64ADDQconst { 16059 break 16060 } 16061 d := v_1.AuxInt 16062 idx := v_1.Args[0] 16063 mem := v.Args[2] 16064 if !(is32Bit(c + 8*d)) { 16065 break 16066 } 16067 v.reset(OpAMD64MOVLloadidx8) 16068 v.AuxInt = c + 8*d 16069 v.Aux = sym 16070 v.AddArg(ptr) 16071 v.AddArg(idx) 16072 v.AddArg(mem) 16073 return true 16074 } 16075 // match: (MOVLloadidx8 [i] {s} p (MOVQconst [c]) mem) 16076 // cond: is32Bit(i+8*c) 16077 // result: (MOVLload [i+8*c] {s} p mem) 16078 for { 16079 i := v.AuxInt 16080 s := v.Aux 16081 _ = v.Args[2] 16082 p := v.Args[0] 16083 v_1 := v.Args[1] 16084 if v_1.Op != OpAMD64MOVQconst { 16085 break 16086 } 16087 c := v_1.AuxInt 16088 mem := v.Args[2] 16089 if !(is32Bit(i + 8*c)) { 16090 break 16091 } 16092 v.reset(OpAMD64MOVLload) 16093 v.AuxInt = i + 8*c 16094 v.Aux = s 16095 v.AddArg(p) 16096 v.AddArg(mem) 16097 return true 16098 } 16099 return false 16100 } 16101 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 16102 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 16103 // cond: 16104 // result: (MOVLstore [off] {sym} ptr x mem) 16105 for { 16106 off := v.AuxInt 16107 sym := v.Aux 16108 _ = v.Args[2] 16109 ptr := v.Args[0] 16110 v_1 := v.Args[1] 16111 if v_1.Op != OpAMD64MOVLQSX { 16112 break 16113 } 16114 x := v_1.Args[0] 16115 mem := v.Args[2] 16116 v.reset(OpAMD64MOVLstore) 16117 v.AuxInt = off 16118 v.Aux = sym 16119 v.AddArg(ptr) 16120 v.AddArg(x) 16121 v.AddArg(mem) 16122 return true 16123 } 16124 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 16125 // cond: 16126 // result: (MOVLstore [off] {sym} ptr x mem) 16127 for { 16128 off := v.AuxInt 16129 sym := v.Aux 16130 _ = v.Args[2] 16131 ptr := v.Args[0] 16132 v_1 := v.Args[1] 16133 if v_1.Op != OpAMD64MOVLQZX { 16134 break 16135 } 16136 x := v_1.Args[0] 16137 mem := v.Args[2] 16138 v.reset(OpAMD64MOVLstore) 16139 v.AuxInt = off 16140 v.Aux = sym 16141 v.AddArg(ptr) 16142 v.AddArg(x) 16143 v.AddArg(mem) 16144 return true 16145 } 16146 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 16147 // cond: is32Bit(off1+off2) 16148 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 16149 for { 16150 off1 := v.AuxInt 16151 sym := v.Aux 16152 _ = v.Args[2] 16153 v_0 := v.Args[0] 16154 if v_0.Op != OpAMD64ADDQconst { 16155 break 16156 } 16157 off2 := v_0.AuxInt 16158 ptr := v_0.Args[0] 16159 val := v.Args[1] 16160 mem := v.Args[2] 16161 if !(is32Bit(off1 + off2)) { 16162 break 16163 } 16164 v.reset(OpAMD64MOVLstore) 16165 v.AuxInt = off1 + off2 16166 v.Aux = sym 16167 v.AddArg(ptr) 16168 v.AddArg(val) 16169 v.AddArg(mem) 16170 return true 16171 } 16172 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 16173 // cond: validOff(off) 16174 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 16175 for { 16176 off := v.AuxInt 16177 sym := v.Aux 16178 _ = v.Args[2] 16179 ptr := v.Args[0] 16180 v_1 := v.Args[1] 16181 if v_1.Op != OpAMD64MOVLconst { 16182 break 16183 } 16184 c := v_1.AuxInt 16185 mem := v.Args[2] 16186 if !(validOff(off)) { 16187 break 16188 } 16189 v.reset(OpAMD64MOVLstoreconst) 16190 v.AuxInt = makeValAndOff(int64(int32(c)), off) 16191 v.Aux = sym 16192 v.AddArg(ptr) 16193 v.AddArg(mem) 16194 return true 16195 } 16196 // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem) 16197 // cond: validOff(off) 16198 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 16199 for { 16200 off := v.AuxInt 16201 sym := v.Aux 16202 _ = v.Args[2] 16203 ptr := v.Args[0] 16204 v_1 := v.Args[1] 16205 if v_1.Op != OpAMD64MOVQconst { 16206 break 16207 } 16208 c := v_1.AuxInt 16209 mem := v.Args[2] 16210 if !(validOff(off)) { 16211 break 16212 } 16213 v.reset(OpAMD64MOVLstoreconst) 16214 v.AuxInt = makeValAndOff(int64(int32(c)), off) 16215 v.Aux = sym 16216 v.AddArg(ptr) 16217 v.AddArg(mem) 16218 return true 16219 } 16220 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 16221 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16222 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 16223 for { 16224 off1 := v.AuxInt 16225 sym1 := v.Aux 16226 _ = v.Args[2] 16227 v_0 := v.Args[0] 16228 if v_0.Op != OpAMD64LEAQ { 16229 break 16230 } 16231 off2 := v_0.AuxInt 16232 sym2 := v_0.Aux 16233 base := v_0.Args[0] 16234 val := v.Args[1] 16235 mem := v.Args[2] 16236 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16237 break 16238 } 16239 v.reset(OpAMD64MOVLstore) 16240 v.AuxInt = off1 + off2 16241 v.Aux = mergeSym(sym1, sym2) 16242 v.AddArg(base) 16243 v.AddArg(val) 16244 v.AddArg(mem) 16245 return true 16246 } 16247 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 16248 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16249 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 16250 for { 16251 off1 := v.AuxInt 16252 sym1 := v.Aux 16253 _ = v.Args[2] 16254 v_0 := v.Args[0] 16255 if v_0.Op != OpAMD64LEAQ1 { 16256 break 16257 } 16258 off2 := v_0.AuxInt 16259 sym2 := v_0.Aux 16260 _ = v_0.Args[1] 16261 ptr := v_0.Args[0] 16262 idx := v_0.Args[1] 16263 val := v.Args[1] 16264 mem := v.Args[2] 16265 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16266 break 16267 } 16268 v.reset(OpAMD64MOVLstoreidx1) 16269 v.AuxInt = off1 + off2 16270 v.Aux = mergeSym(sym1, sym2) 16271 v.AddArg(ptr) 16272 v.AddArg(idx) 16273 v.AddArg(val) 16274 v.AddArg(mem) 16275 return true 16276 } 16277 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 16278 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16279 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 16280 for { 16281 off1 := v.AuxInt 16282 sym1 := v.Aux 16283 _ = v.Args[2] 16284 v_0 := v.Args[0] 16285 if v_0.Op != OpAMD64LEAQ4 { 16286 break 16287 } 16288 off2 := v_0.AuxInt 16289 sym2 := v_0.Aux 16290 _ = v_0.Args[1] 16291 ptr := v_0.Args[0] 16292 idx := v_0.Args[1] 16293 val := v.Args[1] 16294 mem := v.Args[2] 16295 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16296 break 16297 } 16298 v.reset(OpAMD64MOVLstoreidx4) 16299 v.AuxInt = off1 + off2 16300 v.Aux = mergeSym(sym1, sym2) 16301 v.AddArg(ptr) 16302 v.AddArg(idx) 16303 v.AddArg(val) 16304 v.AddArg(mem) 16305 return true 16306 } 16307 // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 16308 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16309 // result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 16310 for { 16311 off1 := v.AuxInt 16312 sym1 := v.Aux 16313 _ = v.Args[2] 16314 v_0 := v.Args[0] 16315 if v_0.Op != OpAMD64LEAQ8 { 16316 break 16317 } 16318 off2 := v_0.AuxInt 16319 sym2 := v_0.Aux 16320 _ = v_0.Args[1] 16321 ptr := v_0.Args[0] 16322 idx := v_0.Args[1] 16323 val := v.Args[1] 16324 mem := v.Args[2] 16325 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16326 break 16327 } 16328 v.reset(OpAMD64MOVLstoreidx8) 16329 v.AuxInt = off1 + off2 16330 v.Aux = mergeSym(sym1, sym2) 16331 v.AddArg(ptr) 16332 v.AddArg(idx) 16333 v.AddArg(val) 16334 v.AddArg(mem) 16335 return true 16336 } 16337 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 16338 // cond: ptr.Op != OpSB 16339 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 16340 for { 16341 off := v.AuxInt 16342 sym := v.Aux 16343 _ = v.Args[2] 16344 v_0 := v.Args[0] 16345 if v_0.Op != OpAMD64ADDQ { 16346 break 16347 } 16348 _ = v_0.Args[1] 16349 ptr := v_0.Args[0] 16350 idx := v_0.Args[1] 16351 val := v.Args[1] 16352 mem := v.Args[2] 16353 if !(ptr.Op != OpSB) { 16354 break 16355 } 16356 v.reset(OpAMD64MOVLstoreidx1) 16357 v.AuxInt = off 16358 v.Aux = sym 16359 v.AddArg(ptr) 16360 v.AddArg(idx) 16361 v.AddArg(val) 16362 v.AddArg(mem) 16363 return true 16364 } 16365 return false 16366 } 16367 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 16368 b := v.Block 16369 _ = b 16370 typ := &b.Func.Config.Types 16371 _ = typ 16372 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 16373 // cond: x.Uses == 1 && clobber(x) 16374 // result: (MOVQstore [i-4] {s} p w mem) 16375 for { 16376 i := v.AuxInt 16377 s := v.Aux 16378 _ = v.Args[2] 16379 p := v.Args[0] 16380 v_1 := v.Args[1] 16381 if v_1.Op != OpAMD64SHRQconst { 16382 break 16383 } 16384 if v_1.AuxInt != 32 { 16385 break 16386 } 16387 w := v_1.Args[0] 16388 x := v.Args[2] 16389 if x.Op != OpAMD64MOVLstore { 16390 break 16391 } 16392 if x.AuxInt != i-4 { 16393 break 16394 } 16395 if x.Aux != s { 16396 break 16397 } 16398 _ = x.Args[2] 16399 if p != x.Args[0] { 16400 break 16401 } 16402 if w != x.Args[1] { 16403 break 16404 } 16405 mem := x.Args[2] 16406 if !(x.Uses == 1 && clobber(x)) { 16407 break 16408 } 16409 v.reset(OpAMD64MOVQstore) 16410 v.AuxInt = i - 4 16411 v.Aux = s 16412 v.AddArg(p) 16413 v.AddArg(w) 16414 v.AddArg(mem) 16415 return true 16416 } 16417 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 16418 // cond: x.Uses == 1 && clobber(x) 16419 // result: (MOVQstore [i-4] {s} p w0 mem) 16420 for { 16421 i := v.AuxInt 16422 s := v.Aux 16423 _ = v.Args[2] 16424 p := v.Args[0] 16425 v_1 := v.Args[1] 16426 if v_1.Op != OpAMD64SHRQconst { 16427 break 16428 } 16429 j := v_1.AuxInt 16430 w := v_1.Args[0] 16431 x := v.Args[2] 16432 if x.Op != OpAMD64MOVLstore { 16433 break 16434 } 16435 if x.AuxInt != i-4 { 16436 break 16437 } 16438 if x.Aux != s { 16439 break 16440 } 16441 _ = x.Args[2] 16442 if p != x.Args[0] { 16443 break 16444 } 16445 w0 := x.Args[1] 16446 if w0.Op != OpAMD64SHRQconst { 16447 break 16448 } 16449 if w0.AuxInt != j-32 { 16450 break 16451 } 16452 if w != w0.Args[0] { 16453 break 16454 } 16455 mem := x.Args[2] 16456 if !(x.Uses == 1 && clobber(x)) { 16457 break 16458 } 16459 v.reset(OpAMD64MOVQstore) 16460 v.AuxInt = i - 4 16461 v.Aux = s 16462 v.AddArg(p) 16463 v.AddArg(w0) 16464 v.AddArg(mem) 16465 return true 16466 } 16467 // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) 16468 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 16469 // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) 16470 for { 16471 i := v.AuxInt 16472 s := v.Aux 16473 _ = v.Args[2] 16474 p := v.Args[0] 16475 x1 := v.Args[1] 16476 if x1.Op != OpAMD64MOVLload { 16477 break 16478 } 16479 j := x1.AuxInt 16480 s2 := x1.Aux 16481 _ = x1.Args[1] 16482 p2 := x1.Args[0] 16483 mem := x1.Args[1] 16484 mem2 := v.Args[2] 16485 if mem2.Op != OpAMD64MOVLstore { 16486 break 16487 } 16488 if mem2.AuxInt != i-4 { 16489 break 16490 } 16491 if mem2.Aux != s { 16492 break 16493 } 16494 _ = mem2.Args[2] 16495 if p != mem2.Args[0] { 16496 break 16497 } 16498 x2 := mem2.Args[1] 16499 if x2.Op != OpAMD64MOVLload { 16500 break 16501 } 16502 if x2.AuxInt != j-4 { 16503 break 16504 } 16505 if x2.Aux != s2 { 16506 break 16507 } 16508 _ = x2.Args[1] 16509 if p2 != x2.Args[0] { 16510 break 16511 } 16512 if mem != x2.Args[1] { 16513 break 16514 } 16515 if mem != mem2.Args[2] { 16516 break 16517 } 16518 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 16519 break 16520 } 16521 v.reset(OpAMD64MOVQstore) 16522 v.AuxInt = i - 4 16523 v.Aux = s 16524 v.AddArg(p) 16525 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 16526 v0.AuxInt = j - 4 16527 v0.Aux = s2 16528 v0.AddArg(p2) 16529 v0.AddArg(mem) 16530 v.AddArg(v0) 16531 v.AddArg(mem) 16532 return true 16533 } 16534 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 16535 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 16536 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 16537 for { 16538 off1 := v.AuxInt 16539 sym1 := v.Aux 16540 _ = v.Args[2] 16541 v_0 := v.Args[0] 16542 if v_0.Op != OpAMD64LEAL { 16543 break 16544 } 16545 off2 := v_0.AuxInt 16546 sym2 := v_0.Aux 16547 base := v_0.Args[0] 16548 val := v.Args[1] 16549 mem := v.Args[2] 16550 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 16551 break 16552 } 16553 v.reset(OpAMD64MOVLstore) 16554 v.AuxInt = off1 + off2 16555 v.Aux = mergeSym(sym1, sym2) 16556 v.AddArg(base) 16557 v.AddArg(val) 16558 v.AddArg(mem) 16559 return true 16560 } 16561 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 16562 // cond: is32Bit(off1+off2) 16563 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 16564 for { 16565 off1 := v.AuxInt 16566 sym := v.Aux 16567 _ = v.Args[2] 16568 v_0 := v.Args[0] 16569 if v_0.Op != OpAMD64ADDLconst { 16570 break 16571 } 16572 off2 := v_0.AuxInt 16573 ptr := v_0.Args[0] 16574 val := v.Args[1] 16575 mem := v.Args[2] 16576 if !(is32Bit(off1 + off2)) { 16577 break 16578 } 16579 v.reset(OpAMD64MOVLstore) 16580 v.AuxInt = off1 + off2 16581 v.Aux = sym 16582 v.AddArg(ptr) 16583 v.AddArg(val) 16584 v.AddArg(mem) 16585 return true 16586 } 16587 // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) 16588 // cond: y.Uses==1 && clobber(y) 16589 // result: (ADDLmodify [off] {sym} ptr x mem) 16590 for { 16591 off := v.AuxInt 16592 sym := v.Aux 16593 _ = v.Args[2] 16594 ptr := v.Args[0] 16595 y := v.Args[1] 16596 if y.Op != OpAMD64ADDLload { 16597 break 16598 } 16599 if y.AuxInt != off { 16600 break 16601 } 16602 if y.Aux != sym { 16603 break 16604 } 16605 _ = y.Args[2] 16606 x := y.Args[0] 16607 if ptr != y.Args[1] { 16608 break 16609 } 16610 mem := y.Args[2] 16611 if mem != v.Args[2] { 16612 break 16613 } 16614 if !(y.Uses == 1 && clobber(y)) { 16615 break 16616 } 16617 v.reset(OpAMD64ADDLmodify) 16618 v.AuxInt = off 16619 v.Aux = sym 16620 v.AddArg(ptr) 16621 v.AddArg(x) 16622 v.AddArg(mem) 16623 return true 16624 } 16625 // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) 16626 // cond: y.Uses==1 && clobber(y) 16627 // result: (ANDLmodify [off] {sym} ptr x mem) 16628 for { 16629 off := v.AuxInt 16630 sym := v.Aux 16631 _ = v.Args[2] 16632 ptr := v.Args[0] 16633 y := v.Args[1] 16634 if y.Op != OpAMD64ANDLload { 16635 break 16636 } 16637 if y.AuxInt != off { 16638 break 16639 } 16640 if y.Aux != sym { 16641 break 16642 } 16643 _ = y.Args[2] 16644 x := y.Args[0] 16645 if ptr != y.Args[1] { 16646 break 16647 } 16648 mem := y.Args[2] 16649 if mem != v.Args[2] { 16650 break 16651 } 16652 if !(y.Uses == 1 && clobber(y)) { 16653 break 16654 } 16655 v.reset(OpAMD64ANDLmodify) 16656 v.AuxInt = off 16657 v.Aux = sym 16658 v.AddArg(ptr) 16659 v.AddArg(x) 16660 v.AddArg(mem) 16661 return true 16662 } 16663 // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) 16664 // cond: y.Uses==1 && clobber(y) 16665 // result: (ORLmodify [off] {sym} ptr x mem) 16666 for { 16667 off := v.AuxInt 16668 sym := v.Aux 16669 _ = v.Args[2] 16670 ptr := v.Args[0] 16671 y := v.Args[1] 16672 if y.Op != OpAMD64ORLload { 16673 break 16674 } 16675 if y.AuxInt != off { 16676 break 16677 } 16678 if y.Aux != sym { 16679 break 16680 } 16681 _ = y.Args[2] 16682 x := y.Args[0] 16683 if ptr != y.Args[1] { 16684 break 16685 } 16686 mem := y.Args[2] 16687 if mem != v.Args[2] { 16688 break 16689 } 16690 if !(y.Uses == 1 && clobber(y)) { 16691 break 16692 } 16693 v.reset(OpAMD64ORLmodify) 16694 v.AuxInt = off 16695 v.Aux = sym 16696 v.AddArg(ptr) 16697 v.AddArg(x) 16698 v.AddArg(mem) 16699 return true 16700 } 16701 // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) 16702 // cond: y.Uses==1 && clobber(y) 16703 // result: (XORLmodify [off] {sym} ptr x mem) 16704 for { 16705 off := v.AuxInt 16706 sym := v.Aux 16707 _ = v.Args[2] 16708 ptr := v.Args[0] 16709 y := v.Args[1] 16710 if y.Op != OpAMD64XORLload { 16711 break 16712 } 16713 if y.AuxInt != off { 16714 break 16715 } 16716 if y.Aux != sym { 16717 break 16718 } 16719 _ = y.Args[2] 16720 x := y.Args[0] 16721 if ptr != y.Args[1] { 16722 break 16723 } 16724 mem := y.Args[2] 16725 if mem != v.Args[2] { 16726 break 16727 } 16728 if !(y.Uses == 1 && clobber(y)) { 16729 break 16730 } 16731 v.reset(OpAMD64XORLmodify) 16732 v.AuxInt = off 16733 v.Aux = sym 16734 v.AddArg(ptr) 16735 v.AddArg(x) 16736 v.AddArg(mem) 16737 return true 16738 } 16739 // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) 16740 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 16741 // result: (ADDLmodify [off] {sym} ptr x mem) 16742 for { 16743 off := v.AuxInt 16744 sym := v.Aux 16745 _ = v.Args[2] 16746 ptr := v.Args[0] 16747 y := v.Args[1] 16748 if y.Op != OpAMD64ADDL { 16749 break 16750 } 16751 _ = y.Args[1] 16752 l := y.Args[0] 16753 if l.Op != OpAMD64MOVLload { 16754 break 16755 } 16756 if l.AuxInt != off { 16757 break 16758 } 16759 if l.Aux != sym { 16760 break 16761 } 16762 _ = l.Args[1] 16763 if ptr != l.Args[0] { 16764 break 16765 } 16766 mem := l.Args[1] 16767 x := y.Args[1] 16768 if mem != v.Args[2] { 16769 break 16770 } 16771 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 16772 break 16773 } 16774 v.reset(OpAMD64ADDLmodify) 16775 v.AuxInt = off 16776 v.Aux = sym 16777 v.AddArg(ptr) 16778 v.AddArg(x) 16779 v.AddArg(mem) 16780 return true 16781 } 16782 return false 16783 } 16784 func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { 16785 // match: (MOVLstore {sym} [off] ptr y:(ADDL x l:(MOVLload [off] {sym} ptr mem)) mem) 16786 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 16787 // result: (ADDLmodify [off] {sym} ptr x mem) 16788 for { 16789 off := v.AuxInt 16790 sym := v.Aux 16791 _ = v.Args[2] 16792 ptr := v.Args[0] 16793 y := v.Args[1] 16794 if y.Op != OpAMD64ADDL { 16795 break 16796 } 16797 _ = y.Args[1] 16798 x := y.Args[0] 16799 l := y.Args[1] 16800 if l.Op != OpAMD64MOVLload { 16801 break 16802 } 16803 if l.AuxInt != off { 16804 break 16805 } 16806 if l.Aux != sym { 16807 break 16808 } 16809 _ = l.Args[1] 16810 if ptr != l.Args[0] { 16811 break 16812 } 16813 mem := l.Args[1] 16814 if mem != v.Args[2] { 16815 break 16816 } 16817 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 16818 break 16819 } 16820 v.reset(OpAMD64ADDLmodify) 16821 v.AuxInt = off 16822 v.Aux = sym 16823 v.AddArg(ptr) 16824 v.AddArg(x) 16825 v.AddArg(mem) 16826 return true 16827 } 16828 // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) 16829 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 16830 // result: (SUBLmodify [off] {sym} ptr x mem) 16831 for { 16832 off := v.AuxInt 16833 sym := v.Aux 16834 _ = v.Args[2] 16835 ptr := v.Args[0] 16836 y := v.Args[1] 16837 if y.Op != OpAMD64SUBL { 16838 break 16839 } 16840 _ = y.Args[1] 16841 l := y.Args[0] 16842 if l.Op != OpAMD64MOVLload { 16843 break 16844 } 16845 if l.AuxInt != off { 16846 break 16847 } 16848 if l.Aux != sym { 16849 break 16850 } 16851 _ = l.Args[1] 16852 if ptr != l.Args[0] { 16853 break 16854 } 16855 mem := l.Args[1] 16856 x := y.Args[1] 16857 if mem != v.Args[2] { 16858 break 16859 } 16860 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 16861 break 16862 } 16863 v.reset(OpAMD64SUBLmodify) 16864 v.AuxInt = off 16865 v.Aux = sym 16866 v.AddArg(ptr) 16867 v.AddArg(x) 16868 v.AddArg(mem) 16869 return true 16870 } 16871 // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) 16872 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 16873 // result: (ANDLmodify [off] {sym} ptr x mem) 16874 for { 16875 off := v.AuxInt 16876 sym := v.Aux 16877 _ = v.Args[2] 16878 ptr := v.Args[0] 16879 y := v.Args[1] 16880 if y.Op != OpAMD64ANDL { 16881 break 16882 } 16883 _ = y.Args[1] 16884 l := y.Args[0] 16885 if l.Op != OpAMD64MOVLload { 16886 break 16887 } 16888 if l.AuxInt != off { 16889 break 16890 } 16891 if l.Aux != sym { 16892 break 16893 } 16894 _ = l.Args[1] 16895 if ptr != l.Args[0] { 16896 break 16897 } 16898 mem := l.Args[1] 16899 x := y.Args[1] 16900 if mem != v.Args[2] { 16901 break 16902 } 16903 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 16904 break 16905 } 16906 v.reset(OpAMD64ANDLmodify) 16907 v.AuxInt = off 16908 v.Aux = sym 16909 v.AddArg(ptr) 16910 v.AddArg(x) 16911 v.AddArg(mem) 16912 return true 16913 } 16914 // match: (MOVLstore {sym} [off] ptr y:(ANDL x l:(MOVLload [off] {sym} ptr mem)) mem) 16915 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 16916 // result: (ANDLmodify [off] {sym} ptr x mem) 16917 for { 16918 off := v.AuxInt 16919 sym := v.Aux 16920 _ = v.Args[2] 16921 ptr := v.Args[0] 16922 y := v.Args[1] 16923 if y.Op != OpAMD64ANDL { 16924 break 16925 } 16926 _ = y.Args[1] 16927 x := y.Args[0] 16928 l := y.Args[1] 16929 if l.Op != OpAMD64MOVLload { 16930 break 16931 } 16932 if l.AuxInt != off { 16933 break 16934 } 16935 if l.Aux != sym { 16936 break 16937 } 16938 _ = l.Args[1] 16939 if ptr != l.Args[0] { 16940 break 16941 } 16942 mem := l.Args[1] 16943 if mem != v.Args[2] { 16944 break 16945 } 16946 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 16947 break 16948 } 16949 v.reset(OpAMD64ANDLmodify) 16950 v.AuxInt = off 16951 v.Aux = sym 16952 v.AddArg(ptr) 16953 v.AddArg(x) 16954 v.AddArg(mem) 16955 return true 16956 } 16957 // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) 16958 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 16959 // result: (ORLmodify [off] {sym} ptr x mem) 16960 for { 16961 off := v.AuxInt 16962 sym := v.Aux 16963 _ = v.Args[2] 16964 ptr := v.Args[0] 16965 y := v.Args[1] 16966 if y.Op != OpAMD64ORL { 16967 break 16968 } 16969 _ = y.Args[1] 16970 l := y.Args[0] 16971 if l.Op != OpAMD64MOVLload { 16972 break 16973 } 16974 if l.AuxInt != off { 16975 break 16976 } 16977 if l.Aux != sym { 16978 break 16979 } 16980 _ = l.Args[1] 16981 if ptr != l.Args[0] { 16982 break 16983 } 16984 mem := l.Args[1] 16985 x := y.Args[1] 16986 if mem != v.Args[2] { 16987 break 16988 } 16989 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 16990 break 16991 } 16992 v.reset(OpAMD64ORLmodify) 16993 v.AuxInt = off 16994 v.Aux = sym 16995 v.AddArg(ptr) 16996 v.AddArg(x) 16997 v.AddArg(mem) 16998 return true 16999 } 17000 // match: (MOVLstore {sym} [off] ptr y:(ORL x l:(MOVLload [off] {sym} ptr mem)) mem) 17001 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17002 // result: (ORLmodify [off] {sym} ptr x mem) 17003 for { 17004 off := v.AuxInt 17005 sym := v.Aux 17006 _ = v.Args[2] 17007 ptr := v.Args[0] 17008 y := v.Args[1] 17009 if y.Op != OpAMD64ORL { 17010 break 17011 } 17012 _ = y.Args[1] 17013 x := y.Args[0] 17014 l := y.Args[1] 17015 if l.Op != OpAMD64MOVLload { 17016 break 17017 } 17018 if l.AuxInt != off { 17019 break 17020 } 17021 if l.Aux != sym { 17022 break 17023 } 17024 _ = l.Args[1] 17025 if ptr != l.Args[0] { 17026 break 17027 } 17028 mem := l.Args[1] 17029 if mem != v.Args[2] { 17030 break 17031 } 17032 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17033 break 17034 } 17035 v.reset(OpAMD64ORLmodify) 17036 v.AuxInt = off 17037 v.Aux = sym 17038 v.AddArg(ptr) 17039 v.AddArg(x) 17040 v.AddArg(mem) 17041 return true 17042 } 17043 // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) 17044 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17045 // result: (XORLmodify [off] {sym} ptr x mem) 17046 for { 17047 off := v.AuxInt 17048 sym := v.Aux 17049 _ = v.Args[2] 17050 ptr := v.Args[0] 17051 y := v.Args[1] 17052 if y.Op != OpAMD64XORL { 17053 break 17054 } 17055 _ = y.Args[1] 17056 l := y.Args[0] 17057 if l.Op != OpAMD64MOVLload { 17058 break 17059 } 17060 if l.AuxInt != off { 17061 break 17062 } 17063 if l.Aux != sym { 17064 break 17065 } 17066 _ = l.Args[1] 17067 if ptr != l.Args[0] { 17068 break 17069 } 17070 mem := l.Args[1] 17071 x := y.Args[1] 17072 if mem != v.Args[2] { 17073 break 17074 } 17075 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17076 break 17077 } 17078 v.reset(OpAMD64XORLmodify) 17079 v.AuxInt = off 17080 v.Aux = sym 17081 v.AddArg(ptr) 17082 v.AddArg(x) 17083 v.AddArg(mem) 17084 return true 17085 } 17086 // match: (MOVLstore {sym} [off] ptr y:(XORL x l:(MOVLload [off] {sym} ptr mem)) mem) 17087 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17088 // result: (XORLmodify [off] {sym} ptr x mem) 17089 for { 17090 off := v.AuxInt 17091 sym := v.Aux 17092 _ = v.Args[2] 17093 ptr := v.Args[0] 17094 y := v.Args[1] 17095 if y.Op != OpAMD64XORL { 17096 break 17097 } 17098 _ = y.Args[1] 17099 x := y.Args[0] 17100 l := y.Args[1] 17101 if l.Op != OpAMD64MOVLload { 17102 break 17103 } 17104 if l.AuxInt != off { 17105 break 17106 } 17107 if l.Aux != sym { 17108 break 17109 } 17110 _ = l.Args[1] 17111 if ptr != l.Args[0] { 17112 break 17113 } 17114 mem := l.Args[1] 17115 if mem != v.Args[2] { 17116 break 17117 } 17118 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17119 break 17120 } 17121 v.reset(OpAMD64XORLmodify) 17122 v.AuxInt = off 17123 v.Aux = sym 17124 v.AddArg(ptr) 17125 v.AddArg(x) 17126 v.AddArg(mem) 17127 return true 17128 } 17129 // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) x) mem) 17130 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17131 // result: (BTCLmodify [off] {sym} ptr x mem) 17132 for { 17133 off := v.AuxInt 17134 sym := v.Aux 17135 _ = v.Args[2] 17136 ptr := v.Args[0] 17137 y := v.Args[1] 17138 if y.Op != OpAMD64BTCL { 17139 break 17140 } 17141 _ = y.Args[1] 17142 l := y.Args[0] 17143 if l.Op != OpAMD64MOVLload { 17144 break 17145 } 17146 if l.AuxInt != off { 17147 break 17148 } 17149 if l.Aux != sym { 17150 break 17151 } 17152 _ = l.Args[1] 17153 if ptr != l.Args[0] { 17154 break 17155 } 17156 mem := l.Args[1] 17157 x := y.Args[1] 17158 if mem != v.Args[2] { 17159 break 17160 } 17161 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17162 break 17163 } 17164 v.reset(OpAMD64BTCLmodify) 17165 v.AuxInt = off 17166 v.Aux = sym 17167 v.AddArg(ptr) 17168 v.AddArg(x) 17169 v.AddArg(mem) 17170 return true 17171 } 17172 // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem) 17173 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17174 // result: (BTRLmodify [off] {sym} ptr x mem) 17175 for { 17176 off := v.AuxInt 17177 sym := v.Aux 17178 _ = v.Args[2] 17179 ptr := v.Args[0] 17180 y := v.Args[1] 17181 if y.Op != OpAMD64BTRL { 17182 break 17183 } 17184 _ = y.Args[1] 17185 l := y.Args[0] 17186 if l.Op != OpAMD64MOVLload { 17187 break 17188 } 17189 if l.AuxInt != off { 17190 break 17191 } 17192 if l.Aux != sym { 17193 break 17194 } 17195 _ = l.Args[1] 17196 if ptr != l.Args[0] { 17197 break 17198 } 17199 mem := l.Args[1] 17200 x := y.Args[1] 17201 if mem != v.Args[2] { 17202 break 17203 } 17204 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17205 break 17206 } 17207 v.reset(OpAMD64BTRLmodify) 17208 v.AuxInt = off 17209 v.Aux = sym 17210 v.AddArg(ptr) 17211 v.AddArg(x) 17212 v.AddArg(mem) 17213 return true 17214 } 17215 return false 17216 } 17217 func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { 17218 // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem) 17219 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17220 // result: (BTSLmodify [off] {sym} ptr x mem) 17221 for { 17222 off := v.AuxInt 17223 sym := v.Aux 17224 _ = v.Args[2] 17225 ptr := v.Args[0] 17226 y := v.Args[1] 17227 if y.Op != OpAMD64BTSL { 17228 break 17229 } 17230 _ = y.Args[1] 17231 l := y.Args[0] 17232 if l.Op != OpAMD64MOVLload { 17233 break 17234 } 17235 if l.AuxInt != off { 17236 break 17237 } 17238 if l.Aux != sym { 17239 break 17240 } 17241 _ = l.Args[1] 17242 if ptr != l.Args[0] { 17243 break 17244 } 17245 mem := l.Args[1] 17246 x := y.Args[1] 17247 if mem != v.Args[2] { 17248 break 17249 } 17250 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17251 break 17252 } 17253 v.reset(OpAMD64BTSLmodify) 17254 v.AuxInt = off 17255 v.Aux = sym 17256 v.AddArg(ptr) 17257 v.AddArg(x) 17258 v.AddArg(mem) 17259 return true 17260 } 17261 // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17262 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17263 // result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17264 for { 17265 off := v.AuxInt 17266 sym := v.Aux 17267 _ = v.Args[2] 17268 ptr := v.Args[0] 17269 a := v.Args[1] 17270 if a.Op != OpAMD64ADDLconst { 17271 break 17272 } 17273 c := a.AuxInt 17274 l := a.Args[0] 17275 if l.Op != OpAMD64MOVLload { 17276 break 17277 } 17278 if l.AuxInt != off { 17279 break 17280 } 17281 if l.Aux != sym { 17282 break 17283 } 17284 _ = l.Args[1] 17285 ptr2 := l.Args[0] 17286 mem := l.Args[1] 17287 if mem != v.Args[2] { 17288 break 17289 } 17290 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17291 break 17292 } 17293 v.reset(OpAMD64ADDLconstmodify) 17294 v.AuxInt = makeValAndOff(c, off) 17295 v.Aux = sym 17296 v.AddArg(ptr) 17297 v.AddArg(mem) 17298 return true 17299 } 17300 // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17301 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17302 // result: (ANDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17303 for { 17304 off := v.AuxInt 17305 sym := v.Aux 17306 _ = v.Args[2] 17307 ptr := v.Args[0] 17308 a := v.Args[1] 17309 if a.Op != OpAMD64ANDLconst { 17310 break 17311 } 17312 c := a.AuxInt 17313 l := a.Args[0] 17314 if l.Op != OpAMD64MOVLload { 17315 break 17316 } 17317 if l.AuxInt != off { 17318 break 17319 } 17320 if l.Aux != sym { 17321 break 17322 } 17323 _ = l.Args[1] 17324 ptr2 := l.Args[0] 17325 mem := l.Args[1] 17326 if mem != v.Args[2] { 17327 break 17328 } 17329 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17330 break 17331 } 17332 v.reset(OpAMD64ANDLconstmodify) 17333 v.AuxInt = makeValAndOff(c, off) 17334 v.Aux = sym 17335 v.AddArg(ptr) 17336 v.AddArg(mem) 17337 return true 17338 } 17339 // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17340 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17341 // result: (ORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17342 for { 17343 off := v.AuxInt 17344 sym := v.Aux 17345 _ = v.Args[2] 17346 ptr := v.Args[0] 17347 a := v.Args[1] 17348 if a.Op != OpAMD64ORLconst { 17349 break 17350 } 17351 c := a.AuxInt 17352 l := a.Args[0] 17353 if l.Op != OpAMD64MOVLload { 17354 break 17355 } 17356 if l.AuxInt != off { 17357 break 17358 } 17359 if l.Aux != sym { 17360 break 17361 } 17362 _ = l.Args[1] 17363 ptr2 := l.Args[0] 17364 mem := l.Args[1] 17365 if mem != v.Args[2] { 17366 break 17367 } 17368 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17369 break 17370 } 17371 v.reset(OpAMD64ORLconstmodify) 17372 v.AuxInt = makeValAndOff(c, off) 17373 v.Aux = sym 17374 v.AddArg(ptr) 17375 v.AddArg(mem) 17376 return true 17377 } 17378 // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17379 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17380 // result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17381 for { 17382 off := v.AuxInt 17383 sym := v.Aux 17384 _ = v.Args[2] 17385 ptr := v.Args[0] 17386 a := v.Args[1] 17387 if a.Op != OpAMD64XORLconst { 17388 break 17389 } 17390 c := a.AuxInt 17391 l := a.Args[0] 17392 if l.Op != OpAMD64MOVLload { 17393 break 17394 } 17395 if l.AuxInt != off { 17396 break 17397 } 17398 if l.Aux != sym { 17399 break 17400 } 17401 _ = l.Args[1] 17402 ptr2 := l.Args[0] 17403 mem := l.Args[1] 17404 if mem != v.Args[2] { 17405 break 17406 } 17407 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17408 break 17409 } 17410 v.reset(OpAMD64XORLconstmodify) 17411 v.AuxInt = makeValAndOff(c, off) 17412 v.Aux = sym 17413 v.AddArg(ptr) 17414 v.AddArg(mem) 17415 return true 17416 } 17417 // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17418 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17419 // result: (BTCLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17420 for { 17421 off := v.AuxInt 17422 sym := v.Aux 17423 _ = v.Args[2] 17424 ptr := v.Args[0] 17425 a := v.Args[1] 17426 if a.Op != OpAMD64BTCLconst { 17427 break 17428 } 17429 c := a.AuxInt 17430 l := a.Args[0] 17431 if l.Op != OpAMD64MOVLload { 17432 break 17433 } 17434 if l.AuxInt != off { 17435 break 17436 } 17437 if l.Aux != sym { 17438 break 17439 } 17440 _ = l.Args[1] 17441 ptr2 := l.Args[0] 17442 mem := l.Args[1] 17443 if mem != v.Args[2] { 17444 break 17445 } 17446 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17447 break 17448 } 17449 v.reset(OpAMD64BTCLconstmodify) 17450 v.AuxInt = makeValAndOff(c, off) 17451 v.Aux = sym 17452 v.AddArg(ptr) 17453 v.AddArg(mem) 17454 return true 17455 } 17456 // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17457 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17458 // result: (BTRLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17459 for { 17460 off := v.AuxInt 17461 sym := v.Aux 17462 _ = v.Args[2] 17463 ptr := v.Args[0] 17464 a := v.Args[1] 17465 if a.Op != OpAMD64BTRLconst { 17466 break 17467 } 17468 c := a.AuxInt 17469 l := a.Args[0] 17470 if l.Op != OpAMD64MOVLload { 17471 break 17472 } 17473 if l.AuxInt != off { 17474 break 17475 } 17476 if l.Aux != sym { 17477 break 17478 } 17479 _ = l.Args[1] 17480 ptr2 := l.Args[0] 17481 mem := l.Args[1] 17482 if mem != v.Args[2] { 17483 break 17484 } 17485 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17486 break 17487 } 17488 v.reset(OpAMD64BTRLconstmodify) 17489 v.AuxInt = makeValAndOff(c, off) 17490 v.Aux = sym 17491 v.AddArg(ptr) 17492 v.AddArg(mem) 17493 return true 17494 } 17495 // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 17496 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 17497 // result: (BTSLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 17498 for { 17499 off := v.AuxInt 17500 sym := v.Aux 17501 _ = v.Args[2] 17502 ptr := v.Args[0] 17503 a := v.Args[1] 17504 if a.Op != OpAMD64BTSLconst { 17505 break 17506 } 17507 c := a.AuxInt 17508 l := a.Args[0] 17509 if l.Op != OpAMD64MOVLload { 17510 break 17511 } 17512 if l.AuxInt != off { 17513 break 17514 } 17515 if l.Aux != sym { 17516 break 17517 } 17518 _ = l.Args[1] 17519 ptr2 := l.Args[0] 17520 mem := l.Args[1] 17521 if mem != v.Args[2] { 17522 break 17523 } 17524 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 17525 break 17526 } 17527 v.reset(OpAMD64BTSLconstmodify) 17528 v.AuxInt = makeValAndOff(c, off) 17529 v.Aux = sym 17530 v.AddArg(ptr) 17531 v.AddArg(mem) 17532 return true 17533 } 17534 // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) 17535 // cond: 17536 // result: (MOVSSstore [off] {sym} ptr val mem) 17537 for { 17538 off := v.AuxInt 17539 sym := v.Aux 17540 _ = v.Args[2] 17541 ptr := v.Args[0] 17542 v_1 := v.Args[1] 17543 if v_1.Op != OpAMD64MOVLf2i { 17544 break 17545 } 17546 val := v_1.Args[0] 17547 mem := v.Args[2] 17548 v.reset(OpAMD64MOVSSstore) 17549 v.AuxInt = off 17550 v.Aux = sym 17551 v.AddArg(ptr) 17552 v.AddArg(val) 17553 v.AddArg(mem) 17554 return true 17555 } 17556 return false 17557 } 17558 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 17559 b := v.Block 17560 _ = b 17561 typ := &b.Func.Config.Types 17562 _ = typ 17563 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 17564 // cond: ValAndOff(sc).canAdd(off) 17565 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 17566 for { 17567 sc := v.AuxInt 17568 s := v.Aux 17569 _ = v.Args[1] 17570 v_0 := v.Args[0] 17571 if v_0.Op != OpAMD64ADDQconst { 17572 break 17573 } 17574 off := v_0.AuxInt 17575 ptr := v_0.Args[0] 17576 mem := v.Args[1] 17577 if !(ValAndOff(sc).canAdd(off)) { 17578 break 17579 } 17580 v.reset(OpAMD64MOVLstoreconst) 17581 v.AuxInt = ValAndOff(sc).add(off) 17582 v.Aux = s 17583 v.AddArg(ptr) 17584 v.AddArg(mem) 17585 return true 17586 } 17587 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 17588 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 17589 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 17590 for { 17591 sc := v.AuxInt 17592 sym1 := v.Aux 17593 _ = v.Args[1] 17594 v_0 := v.Args[0] 17595 if v_0.Op != OpAMD64LEAQ { 17596 break 17597 } 17598 off := v_0.AuxInt 17599 sym2 := v_0.Aux 17600 ptr := v_0.Args[0] 17601 mem := v.Args[1] 17602 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 17603 break 17604 } 17605 v.reset(OpAMD64MOVLstoreconst) 17606 v.AuxInt = ValAndOff(sc).add(off) 17607 v.Aux = mergeSym(sym1, sym2) 17608 v.AddArg(ptr) 17609 v.AddArg(mem) 17610 return true 17611 } 17612 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 17613 // cond: canMergeSym(sym1, sym2) 17614 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 17615 for { 17616 x := v.AuxInt 17617 sym1 := v.Aux 17618 _ = v.Args[1] 17619 v_0 := v.Args[0] 17620 if v_0.Op != OpAMD64LEAQ1 { 17621 break 17622 } 17623 off := v_0.AuxInt 17624 sym2 := v_0.Aux 17625 _ = v_0.Args[1] 17626 ptr := v_0.Args[0] 17627 idx := v_0.Args[1] 17628 mem := v.Args[1] 17629 if !(canMergeSym(sym1, sym2)) { 17630 break 17631 } 17632 v.reset(OpAMD64MOVLstoreconstidx1) 17633 v.AuxInt = ValAndOff(x).add(off) 17634 v.Aux = mergeSym(sym1, sym2) 17635 v.AddArg(ptr) 17636 v.AddArg(idx) 17637 v.AddArg(mem) 17638 return true 17639 } 17640 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 17641 // cond: canMergeSym(sym1, sym2) 17642 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 17643 for { 17644 x := v.AuxInt 17645 sym1 := v.Aux 17646 _ = v.Args[1] 17647 v_0 := v.Args[0] 17648 if v_0.Op != OpAMD64LEAQ4 { 17649 break 17650 } 17651 off := v_0.AuxInt 17652 sym2 := v_0.Aux 17653 _ = v_0.Args[1] 17654 ptr := v_0.Args[0] 17655 idx := v_0.Args[1] 17656 mem := v.Args[1] 17657 if !(canMergeSym(sym1, sym2)) { 17658 break 17659 } 17660 v.reset(OpAMD64MOVLstoreconstidx4) 17661 v.AuxInt = ValAndOff(x).add(off) 17662 v.Aux = mergeSym(sym1, sym2) 17663 v.AddArg(ptr) 17664 v.AddArg(idx) 17665 v.AddArg(mem) 17666 return true 17667 } 17668 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 17669 // cond: 17670 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 17671 for { 17672 x := v.AuxInt 17673 sym := v.Aux 17674 _ = v.Args[1] 17675 v_0 := v.Args[0] 17676 if v_0.Op != OpAMD64ADDQ { 17677 break 17678 } 17679 _ = v_0.Args[1] 17680 ptr := v_0.Args[0] 17681 idx := v_0.Args[1] 17682 mem := v.Args[1] 17683 v.reset(OpAMD64MOVLstoreconstidx1) 17684 v.AuxInt = x 17685 v.Aux = sym 17686 v.AddArg(ptr) 17687 v.AddArg(idx) 17688 v.AddArg(mem) 17689 return true 17690 } 17691 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 17692 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 17693 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 17694 for { 17695 c := v.AuxInt 17696 s := v.Aux 17697 _ = v.Args[1] 17698 p := v.Args[0] 17699 x := v.Args[1] 17700 if x.Op != OpAMD64MOVLstoreconst { 17701 break 17702 } 17703 a := x.AuxInt 17704 if x.Aux != s { 17705 break 17706 } 17707 _ = x.Args[1] 17708 if p != x.Args[0] { 17709 break 17710 } 17711 mem := x.Args[1] 17712 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 17713 break 17714 } 17715 v.reset(OpAMD64MOVQstore) 17716 v.AuxInt = ValAndOff(a).Off() 17717 v.Aux = s 17718 v.AddArg(p) 17719 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 17720 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 17721 v.AddArg(v0) 17722 v.AddArg(mem) 17723 return true 17724 } 17725 // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem)) 17726 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 17727 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 17728 for { 17729 a := v.AuxInt 17730 s := v.Aux 17731 _ = v.Args[1] 17732 p := v.Args[0] 17733 x := v.Args[1] 17734 if x.Op != OpAMD64MOVLstoreconst { 17735 break 17736 } 17737 c := x.AuxInt 17738 if x.Aux != s { 17739 break 17740 } 17741 _ = x.Args[1] 17742 if p != x.Args[0] { 17743 break 17744 } 17745 mem := x.Args[1] 17746 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 17747 break 17748 } 17749 v.reset(OpAMD64MOVQstore) 17750 v.AuxInt = ValAndOff(a).Off() 17751 v.Aux = s 17752 v.AddArg(p) 17753 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 17754 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 17755 v.AddArg(v0) 17756 v.AddArg(mem) 17757 return true 17758 } 17759 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 17760 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 17761 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 17762 for { 17763 sc := v.AuxInt 17764 sym1 := v.Aux 17765 _ = v.Args[1] 17766 v_0 := v.Args[0] 17767 if v_0.Op != OpAMD64LEAL { 17768 break 17769 } 17770 off := v_0.AuxInt 17771 sym2 := v_0.Aux 17772 ptr := v_0.Args[0] 17773 mem := v.Args[1] 17774 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 17775 break 17776 } 17777 v.reset(OpAMD64MOVLstoreconst) 17778 v.AuxInt = ValAndOff(sc).add(off) 17779 v.Aux = mergeSym(sym1, sym2) 17780 v.AddArg(ptr) 17781 v.AddArg(mem) 17782 return true 17783 } 17784 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 17785 // cond: ValAndOff(sc).canAdd(off) 17786 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 17787 for { 17788 sc := v.AuxInt 17789 s := v.Aux 17790 _ = v.Args[1] 17791 v_0 := v.Args[0] 17792 if v_0.Op != OpAMD64ADDLconst { 17793 break 17794 } 17795 off := v_0.AuxInt 17796 ptr := v_0.Args[0] 17797 mem := v.Args[1] 17798 if !(ValAndOff(sc).canAdd(off)) { 17799 break 17800 } 17801 v.reset(OpAMD64MOVLstoreconst) 17802 v.AuxInt = ValAndOff(sc).add(off) 17803 v.Aux = s 17804 v.AddArg(ptr) 17805 v.AddArg(mem) 17806 return true 17807 } 17808 return false 17809 } 17810 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 17811 b := v.Block 17812 _ = b 17813 typ := &b.Func.Config.Types 17814 _ = typ 17815 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 17816 // cond: 17817 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 17818 for { 17819 c := v.AuxInt 17820 sym := v.Aux 17821 _ = v.Args[2] 17822 ptr := v.Args[0] 17823 v_1 := v.Args[1] 17824 if v_1.Op != OpAMD64SHLQconst { 17825 break 17826 } 17827 if v_1.AuxInt != 2 { 17828 break 17829 } 17830 idx := v_1.Args[0] 17831 mem := v.Args[2] 17832 v.reset(OpAMD64MOVLstoreconstidx4) 17833 v.AuxInt = c 17834 v.Aux = sym 17835 v.AddArg(ptr) 17836 v.AddArg(idx) 17837 v.AddArg(mem) 17838 return true 17839 } 17840 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 17841 // cond: ValAndOff(x).canAdd(c) 17842 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 17843 for { 17844 x := v.AuxInt 17845 sym := v.Aux 17846 _ = v.Args[2] 17847 v_0 := v.Args[0] 17848 if v_0.Op != OpAMD64ADDQconst { 17849 break 17850 } 17851 c := v_0.AuxInt 17852 ptr := v_0.Args[0] 17853 idx := v.Args[1] 17854 mem := v.Args[2] 17855 if !(ValAndOff(x).canAdd(c)) { 17856 break 17857 } 17858 v.reset(OpAMD64MOVLstoreconstidx1) 17859 v.AuxInt = ValAndOff(x).add(c) 17860 v.Aux = sym 17861 v.AddArg(ptr) 17862 v.AddArg(idx) 17863 v.AddArg(mem) 17864 return true 17865 } 17866 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 17867 // cond: ValAndOff(x).canAdd(c) 17868 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 17869 for { 17870 x := v.AuxInt 17871 sym := v.Aux 17872 _ = v.Args[2] 17873 ptr := v.Args[0] 17874 v_1 := v.Args[1] 17875 if v_1.Op != OpAMD64ADDQconst { 17876 break 17877 } 17878 c := v_1.AuxInt 17879 idx := v_1.Args[0] 17880 mem := v.Args[2] 17881 if !(ValAndOff(x).canAdd(c)) { 17882 break 17883 } 17884 v.reset(OpAMD64MOVLstoreconstidx1) 17885 v.AuxInt = ValAndOff(x).add(c) 17886 v.Aux = sym 17887 v.AddArg(ptr) 17888 v.AddArg(idx) 17889 v.AddArg(mem) 17890 return true 17891 } 17892 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 17893 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 17894 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 17895 for { 17896 c := v.AuxInt 17897 s := v.Aux 17898 _ = v.Args[2] 17899 p := v.Args[0] 17900 i := v.Args[1] 17901 x := v.Args[2] 17902 if x.Op != OpAMD64MOVLstoreconstidx1 { 17903 break 17904 } 17905 a := x.AuxInt 17906 if x.Aux != s { 17907 break 17908 } 17909 _ = x.Args[2] 17910 if p != x.Args[0] { 17911 break 17912 } 17913 if i != x.Args[1] { 17914 break 17915 } 17916 mem := x.Args[2] 17917 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 17918 break 17919 } 17920 v.reset(OpAMD64MOVQstoreidx1) 17921 v.AuxInt = ValAndOff(a).Off() 17922 v.Aux = s 17923 v.AddArg(p) 17924 v.AddArg(i) 17925 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 17926 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 17927 v.AddArg(v0) 17928 v.AddArg(mem) 17929 return true 17930 } 17931 return false 17932 } 17933 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 17934 b := v.Block 17935 _ = b 17936 typ := &b.Func.Config.Types 17937 _ = typ 17938 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 17939 // cond: ValAndOff(x).canAdd(c) 17940 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 17941 for { 17942 x := v.AuxInt 17943 sym := v.Aux 17944 _ = v.Args[2] 17945 v_0 := v.Args[0] 17946 if v_0.Op != OpAMD64ADDQconst { 17947 break 17948 } 17949 c := v_0.AuxInt 17950 ptr := v_0.Args[0] 17951 idx := v.Args[1] 17952 mem := v.Args[2] 17953 if !(ValAndOff(x).canAdd(c)) { 17954 break 17955 } 17956 v.reset(OpAMD64MOVLstoreconstidx4) 17957 v.AuxInt = ValAndOff(x).add(c) 17958 v.Aux = sym 17959 v.AddArg(ptr) 17960 v.AddArg(idx) 17961 v.AddArg(mem) 17962 return true 17963 } 17964 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 17965 // cond: ValAndOff(x).canAdd(4*c) 17966 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 17967 for { 17968 x := v.AuxInt 17969 sym := v.Aux 17970 _ = v.Args[2] 17971 ptr := v.Args[0] 17972 v_1 := v.Args[1] 17973 if v_1.Op != OpAMD64ADDQconst { 17974 break 17975 } 17976 c := v_1.AuxInt 17977 idx := v_1.Args[0] 17978 mem := v.Args[2] 17979 if !(ValAndOff(x).canAdd(4 * c)) { 17980 break 17981 } 17982 v.reset(OpAMD64MOVLstoreconstidx4) 17983 v.AuxInt = ValAndOff(x).add(4 * c) 17984 v.Aux = sym 17985 v.AddArg(ptr) 17986 v.AddArg(idx) 17987 v.AddArg(mem) 17988 return true 17989 } 17990 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 17991 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 17992 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 17993 for { 17994 c := v.AuxInt 17995 s := v.Aux 17996 _ = v.Args[2] 17997 p := v.Args[0] 17998 i := v.Args[1] 17999 x := v.Args[2] 18000 if x.Op != OpAMD64MOVLstoreconstidx4 { 18001 break 18002 } 18003 a := x.AuxInt 18004 if x.Aux != s { 18005 break 18006 } 18007 _ = x.Args[2] 18008 if p != x.Args[0] { 18009 break 18010 } 18011 if i != x.Args[1] { 18012 break 18013 } 18014 mem := x.Args[2] 18015 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 18016 break 18017 } 18018 v.reset(OpAMD64MOVQstoreidx1) 18019 v.AuxInt = ValAndOff(a).Off() 18020 v.Aux = s 18021 v.AddArg(p) 18022 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 18023 v0.AuxInt = 2 18024 v0.AddArg(i) 18025 v.AddArg(v0) 18026 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 18027 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 18028 v.AddArg(v1) 18029 v.AddArg(mem) 18030 return true 18031 } 18032 return false 18033 } 18034 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 18035 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 18036 // cond: 18037 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 18038 for { 18039 c := v.AuxInt 18040 sym := v.Aux 18041 _ = v.Args[3] 18042 ptr := v.Args[0] 18043 v_1 := v.Args[1] 18044 if v_1.Op != OpAMD64SHLQconst { 18045 break 18046 } 18047 if v_1.AuxInt != 2 { 18048 break 18049 } 18050 idx := v_1.Args[0] 18051 val := v.Args[2] 18052 mem := v.Args[3] 18053 v.reset(OpAMD64MOVLstoreidx4) 18054 v.AuxInt = c 18055 v.Aux = sym 18056 v.AddArg(ptr) 18057 v.AddArg(idx) 18058 v.AddArg(val) 18059 v.AddArg(mem) 18060 return true 18061 } 18062 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 18063 // cond: 18064 // result: (MOVLstoreidx8 [c] {sym} ptr idx val mem) 18065 for { 18066 c := v.AuxInt 18067 sym := v.Aux 18068 _ = v.Args[3] 18069 ptr := v.Args[0] 18070 v_1 := v.Args[1] 18071 if v_1.Op != OpAMD64SHLQconst { 18072 break 18073 } 18074 if v_1.AuxInt != 3 { 18075 break 18076 } 18077 idx := v_1.Args[0] 18078 val := v.Args[2] 18079 mem := v.Args[3] 18080 v.reset(OpAMD64MOVLstoreidx8) 18081 v.AuxInt = c 18082 v.Aux = sym 18083 v.AddArg(ptr) 18084 v.AddArg(idx) 18085 v.AddArg(val) 18086 v.AddArg(mem) 18087 return true 18088 } 18089 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 18090 // cond: is32Bit(c+d) 18091 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 18092 for { 18093 c := v.AuxInt 18094 sym := v.Aux 18095 _ = v.Args[3] 18096 v_0 := v.Args[0] 18097 if v_0.Op != OpAMD64ADDQconst { 18098 break 18099 } 18100 d := v_0.AuxInt 18101 ptr := v_0.Args[0] 18102 idx := v.Args[1] 18103 val := v.Args[2] 18104 mem := v.Args[3] 18105 if !(is32Bit(c + d)) { 18106 break 18107 } 18108 v.reset(OpAMD64MOVLstoreidx1) 18109 v.AuxInt = c + d 18110 v.Aux = sym 18111 v.AddArg(ptr) 18112 v.AddArg(idx) 18113 v.AddArg(val) 18114 v.AddArg(mem) 18115 return true 18116 } 18117 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 18118 // cond: is32Bit(c+d) 18119 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 18120 for { 18121 c := v.AuxInt 18122 sym := v.Aux 18123 _ = v.Args[3] 18124 ptr := v.Args[0] 18125 v_1 := v.Args[1] 18126 if v_1.Op != OpAMD64ADDQconst { 18127 break 18128 } 18129 d := v_1.AuxInt 18130 idx := v_1.Args[0] 18131 val := v.Args[2] 18132 mem := v.Args[3] 18133 if !(is32Bit(c + d)) { 18134 break 18135 } 18136 v.reset(OpAMD64MOVLstoreidx1) 18137 v.AuxInt = c + d 18138 v.Aux = sym 18139 v.AddArg(ptr) 18140 v.AddArg(idx) 18141 v.AddArg(val) 18142 v.AddArg(mem) 18143 return true 18144 } 18145 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 18146 // cond: x.Uses == 1 && clobber(x) 18147 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 18148 for { 18149 i := v.AuxInt 18150 s := v.Aux 18151 _ = v.Args[3] 18152 p := v.Args[0] 18153 idx := v.Args[1] 18154 v_2 := v.Args[2] 18155 if v_2.Op != OpAMD64SHRQconst { 18156 break 18157 } 18158 if v_2.AuxInt != 32 { 18159 break 18160 } 18161 w := v_2.Args[0] 18162 x := v.Args[3] 18163 if x.Op != OpAMD64MOVLstoreidx1 { 18164 break 18165 } 18166 if x.AuxInt != i-4 { 18167 break 18168 } 18169 if x.Aux != s { 18170 break 18171 } 18172 _ = x.Args[3] 18173 if p != x.Args[0] { 18174 break 18175 } 18176 if idx != x.Args[1] { 18177 break 18178 } 18179 if w != x.Args[2] { 18180 break 18181 } 18182 mem := x.Args[3] 18183 if !(x.Uses == 1 && clobber(x)) { 18184 break 18185 } 18186 v.reset(OpAMD64MOVQstoreidx1) 18187 v.AuxInt = i - 4 18188 v.Aux = s 18189 v.AddArg(p) 18190 v.AddArg(idx) 18191 v.AddArg(w) 18192 v.AddArg(mem) 18193 return true 18194 } 18195 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 18196 // cond: x.Uses == 1 && clobber(x) 18197 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 18198 for { 18199 i := v.AuxInt 18200 s := v.Aux 18201 _ = v.Args[3] 18202 p := v.Args[0] 18203 idx := v.Args[1] 18204 v_2 := v.Args[2] 18205 if v_2.Op != OpAMD64SHRQconst { 18206 break 18207 } 18208 j := v_2.AuxInt 18209 w := v_2.Args[0] 18210 x := v.Args[3] 18211 if x.Op != OpAMD64MOVLstoreidx1 { 18212 break 18213 } 18214 if x.AuxInt != i-4 { 18215 break 18216 } 18217 if x.Aux != s { 18218 break 18219 } 18220 _ = x.Args[3] 18221 if p != x.Args[0] { 18222 break 18223 } 18224 if idx != x.Args[1] { 18225 break 18226 } 18227 w0 := x.Args[2] 18228 if w0.Op != OpAMD64SHRQconst { 18229 break 18230 } 18231 if w0.AuxInt != j-32 { 18232 break 18233 } 18234 if w != w0.Args[0] { 18235 break 18236 } 18237 mem := x.Args[3] 18238 if !(x.Uses == 1 && clobber(x)) { 18239 break 18240 } 18241 v.reset(OpAMD64MOVQstoreidx1) 18242 v.AuxInt = i - 4 18243 v.Aux = s 18244 v.AddArg(p) 18245 v.AddArg(idx) 18246 v.AddArg(w0) 18247 v.AddArg(mem) 18248 return true 18249 } 18250 // match: (MOVLstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 18251 // cond: is32Bit(i+c) 18252 // result: (MOVLstore [i+c] {s} p w mem) 18253 for { 18254 i := v.AuxInt 18255 s := v.Aux 18256 _ = v.Args[3] 18257 p := v.Args[0] 18258 v_1 := v.Args[1] 18259 if v_1.Op != OpAMD64MOVQconst { 18260 break 18261 } 18262 c := v_1.AuxInt 18263 w := v.Args[2] 18264 mem := v.Args[3] 18265 if !(is32Bit(i + c)) { 18266 break 18267 } 18268 v.reset(OpAMD64MOVLstore) 18269 v.AuxInt = i + c 18270 v.Aux = s 18271 v.AddArg(p) 18272 v.AddArg(w) 18273 v.AddArg(mem) 18274 return true 18275 } 18276 return false 18277 } 18278 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 18279 b := v.Block 18280 _ = b 18281 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 18282 // cond: is32Bit(c+d) 18283 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 18284 for { 18285 c := v.AuxInt 18286 sym := v.Aux 18287 _ = v.Args[3] 18288 v_0 := v.Args[0] 18289 if v_0.Op != OpAMD64ADDQconst { 18290 break 18291 } 18292 d := v_0.AuxInt 18293 ptr := v_0.Args[0] 18294 idx := v.Args[1] 18295 val := v.Args[2] 18296 mem := v.Args[3] 18297 if !(is32Bit(c + d)) { 18298 break 18299 } 18300 v.reset(OpAMD64MOVLstoreidx4) 18301 v.AuxInt = c + d 18302 v.Aux = sym 18303 v.AddArg(ptr) 18304 v.AddArg(idx) 18305 v.AddArg(val) 18306 v.AddArg(mem) 18307 return true 18308 } 18309 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 18310 // cond: is32Bit(c+4*d) 18311 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 18312 for { 18313 c := v.AuxInt 18314 sym := v.Aux 18315 _ = v.Args[3] 18316 ptr := v.Args[0] 18317 v_1 := v.Args[1] 18318 if v_1.Op != OpAMD64ADDQconst { 18319 break 18320 } 18321 d := v_1.AuxInt 18322 idx := v_1.Args[0] 18323 val := v.Args[2] 18324 mem := v.Args[3] 18325 if !(is32Bit(c + 4*d)) { 18326 break 18327 } 18328 v.reset(OpAMD64MOVLstoreidx4) 18329 v.AuxInt = c + 4*d 18330 v.Aux = sym 18331 v.AddArg(ptr) 18332 v.AddArg(idx) 18333 v.AddArg(val) 18334 v.AddArg(mem) 18335 return true 18336 } 18337 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 18338 // cond: x.Uses == 1 && clobber(x) 18339 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 18340 for { 18341 i := v.AuxInt 18342 s := v.Aux 18343 _ = v.Args[3] 18344 p := v.Args[0] 18345 idx := v.Args[1] 18346 v_2 := v.Args[2] 18347 if v_2.Op != OpAMD64SHRQconst { 18348 break 18349 } 18350 if v_2.AuxInt != 32 { 18351 break 18352 } 18353 w := v_2.Args[0] 18354 x := v.Args[3] 18355 if x.Op != OpAMD64MOVLstoreidx4 { 18356 break 18357 } 18358 if x.AuxInt != i-4 { 18359 break 18360 } 18361 if x.Aux != s { 18362 break 18363 } 18364 _ = x.Args[3] 18365 if p != x.Args[0] { 18366 break 18367 } 18368 if idx != x.Args[1] { 18369 break 18370 } 18371 if w != x.Args[2] { 18372 break 18373 } 18374 mem := x.Args[3] 18375 if !(x.Uses == 1 && clobber(x)) { 18376 break 18377 } 18378 v.reset(OpAMD64MOVQstoreidx1) 18379 v.AuxInt = i - 4 18380 v.Aux = s 18381 v.AddArg(p) 18382 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 18383 v0.AuxInt = 2 18384 v0.AddArg(idx) 18385 v.AddArg(v0) 18386 v.AddArg(w) 18387 v.AddArg(mem) 18388 return true 18389 } 18390 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 18391 // cond: x.Uses == 1 && clobber(x) 18392 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 18393 for { 18394 i := v.AuxInt 18395 s := v.Aux 18396 _ = v.Args[3] 18397 p := v.Args[0] 18398 idx := v.Args[1] 18399 v_2 := v.Args[2] 18400 if v_2.Op != OpAMD64SHRQconst { 18401 break 18402 } 18403 j := v_2.AuxInt 18404 w := v_2.Args[0] 18405 x := v.Args[3] 18406 if x.Op != OpAMD64MOVLstoreidx4 { 18407 break 18408 } 18409 if x.AuxInt != i-4 { 18410 break 18411 } 18412 if x.Aux != s { 18413 break 18414 } 18415 _ = x.Args[3] 18416 if p != x.Args[0] { 18417 break 18418 } 18419 if idx != x.Args[1] { 18420 break 18421 } 18422 w0 := x.Args[2] 18423 if w0.Op != OpAMD64SHRQconst { 18424 break 18425 } 18426 if w0.AuxInt != j-32 { 18427 break 18428 } 18429 if w != w0.Args[0] { 18430 break 18431 } 18432 mem := x.Args[3] 18433 if !(x.Uses == 1 && clobber(x)) { 18434 break 18435 } 18436 v.reset(OpAMD64MOVQstoreidx1) 18437 v.AuxInt = i - 4 18438 v.Aux = s 18439 v.AddArg(p) 18440 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 18441 v0.AuxInt = 2 18442 v0.AddArg(idx) 18443 v.AddArg(v0) 18444 v.AddArg(w0) 18445 v.AddArg(mem) 18446 return true 18447 } 18448 // match: (MOVLstoreidx4 [i] {s} p (MOVQconst [c]) w mem) 18449 // cond: is32Bit(i+4*c) 18450 // result: (MOVLstore [i+4*c] {s} p w mem) 18451 for { 18452 i := v.AuxInt 18453 s := v.Aux 18454 _ = v.Args[3] 18455 p := v.Args[0] 18456 v_1 := v.Args[1] 18457 if v_1.Op != OpAMD64MOVQconst { 18458 break 18459 } 18460 c := v_1.AuxInt 18461 w := v.Args[2] 18462 mem := v.Args[3] 18463 if !(is32Bit(i + 4*c)) { 18464 break 18465 } 18466 v.reset(OpAMD64MOVLstore) 18467 v.AuxInt = i + 4*c 18468 v.Aux = s 18469 v.AddArg(p) 18470 v.AddArg(w) 18471 v.AddArg(mem) 18472 return true 18473 } 18474 return false 18475 } 18476 func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { 18477 // match: (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 18478 // cond: is32Bit(c+d) 18479 // result: (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) 18480 for { 18481 c := v.AuxInt 18482 sym := v.Aux 18483 _ = v.Args[3] 18484 v_0 := v.Args[0] 18485 if v_0.Op != OpAMD64ADDQconst { 18486 break 18487 } 18488 d := v_0.AuxInt 18489 ptr := v_0.Args[0] 18490 idx := v.Args[1] 18491 val := v.Args[2] 18492 mem := v.Args[3] 18493 if !(is32Bit(c + d)) { 18494 break 18495 } 18496 v.reset(OpAMD64MOVLstoreidx8) 18497 v.AuxInt = c + d 18498 v.Aux = sym 18499 v.AddArg(ptr) 18500 v.AddArg(idx) 18501 v.AddArg(val) 18502 v.AddArg(mem) 18503 return true 18504 } 18505 // match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 18506 // cond: is32Bit(c+8*d) 18507 // result: (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem) 18508 for { 18509 c := v.AuxInt 18510 sym := v.Aux 18511 _ = v.Args[3] 18512 ptr := v.Args[0] 18513 v_1 := v.Args[1] 18514 if v_1.Op != OpAMD64ADDQconst { 18515 break 18516 } 18517 d := v_1.AuxInt 18518 idx := v_1.Args[0] 18519 val := v.Args[2] 18520 mem := v.Args[3] 18521 if !(is32Bit(c + 8*d)) { 18522 break 18523 } 18524 v.reset(OpAMD64MOVLstoreidx8) 18525 v.AuxInt = c + 8*d 18526 v.Aux = sym 18527 v.AddArg(ptr) 18528 v.AddArg(idx) 18529 v.AddArg(val) 18530 v.AddArg(mem) 18531 return true 18532 } 18533 // match: (MOVLstoreidx8 [i] {s} p (MOVQconst [c]) w mem) 18534 // cond: is32Bit(i+8*c) 18535 // result: (MOVLstore [i+8*c] {s} p w mem) 18536 for { 18537 i := v.AuxInt 18538 s := v.Aux 18539 _ = v.Args[3] 18540 p := v.Args[0] 18541 v_1 := v.Args[1] 18542 if v_1.Op != OpAMD64MOVQconst { 18543 break 18544 } 18545 c := v_1.AuxInt 18546 w := v.Args[2] 18547 mem := v.Args[3] 18548 if !(is32Bit(i + 8*c)) { 18549 break 18550 } 18551 v.reset(OpAMD64MOVLstore) 18552 v.AuxInt = i + 8*c 18553 v.Aux = s 18554 v.AddArg(p) 18555 v.AddArg(w) 18556 v.AddArg(mem) 18557 return true 18558 } 18559 return false 18560 } 18561 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 18562 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 18563 // cond: is32Bit(off1+off2) 18564 // result: (MOVOload [off1+off2] {sym} ptr mem) 18565 for { 18566 off1 := v.AuxInt 18567 sym := v.Aux 18568 _ = v.Args[1] 18569 v_0 := v.Args[0] 18570 if v_0.Op != OpAMD64ADDQconst { 18571 break 18572 } 18573 off2 := v_0.AuxInt 18574 ptr := v_0.Args[0] 18575 mem := v.Args[1] 18576 if !(is32Bit(off1 + off2)) { 18577 break 18578 } 18579 v.reset(OpAMD64MOVOload) 18580 v.AuxInt = off1 + off2 18581 v.Aux = sym 18582 v.AddArg(ptr) 18583 v.AddArg(mem) 18584 return true 18585 } 18586 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 18587 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18588 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 18589 for { 18590 off1 := v.AuxInt 18591 sym1 := v.Aux 18592 _ = v.Args[1] 18593 v_0 := v.Args[0] 18594 if v_0.Op != OpAMD64LEAQ { 18595 break 18596 } 18597 off2 := v_0.AuxInt 18598 sym2 := v_0.Aux 18599 base := v_0.Args[0] 18600 mem := v.Args[1] 18601 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18602 break 18603 } 18604 v.reset(OpAMD64MOVOload) 18605 v.AuxInt = off1 + off2 18606 v.Aux = mergeSym(sym1, sym2) 18607 v.AddArg(base) 18608 v.AddArg(mem) 18609 return true 18610 } 18611 return false 18612 } 18613 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 18614 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 18615 // cond: is32Bit(off1+off2) 18616 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 18617 for { 18618 off1 := v.AuxInt 18619 sym := v.Aux 18620 _ = v.Args[2] 18621 v_0 := v.Args[0] 18622 if v_0.Op != OpAMD64ADDQconst { 18623 break 18624 } 18625 off2 := v_0.AuxInt 18626 ptr := v_0.Args[0] 18627 val := v.Args[1] 18628 mem := v.Args[2] 18629 if !(is32Bit(off1 + off2)) { 18630 break 18631 } 18632 v.reset(OpAMD64MOVOstore) 18633 v.AuxInt = off1 + off2 18634 v.Aux = sym 18635 v.AddArg(ptr) 18636 v.AddArg(val) 18637 v.AddArg(mem) 18638 return true 18639 } 18640 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 18641 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18642 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 18643 for { 18644 off1 := v.AuxInt 18645 sym1 := v.Aux 18646 _ = v.Args[2] 18647 v_0 := v.Args[0] 18648 if v_0.Op != OpAMD64LEAQ { 18649 break 18650 } 18651 off2 := v_0.AuxInt 18652 sym2 := v_0.Aux 18653 base := v_0.Args[0] 18654 val := v.Args[1] 18655 mem := v.Args[2] 18656 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18657 break 18658 } 18659 v.reset(OpAMD64MOVOstore) 18660 v.AuxInt = off1 + off2 18661 v.Aux = mergeSym(sym1, sym2) 18662 v.AddArg(base) 18663 v.AddArg(val) 18664 v.AddArg(mem) 18665 return true 18666 } 18667 return false 18668 } 18669 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 18670 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 18671 // cond: is32Bit(off1+off2) 18672 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 18673 for { 18674 off1 := v.AuxInt 18675 sym := v.Aux 18676 _ = v.Args[1] 18677 v_0 := v.Args[0] 18678 if v_0.Op != OpAMD64ADDQconst { 18679 break 18680 } 18681 off2 := v_0.AuxInt 18682 ptr := v_0.Args[0] 18683 mem := v.Args[1] 18684 if !(is32Bit(off1 + off2)) { 18685 break 18686 } 18687 v.reset(OpAMD64MOVQatomicload) 18688 v.AuxInt = off1 + off2 18689 v.Aux = sym 18690 v.AddArg(ptr) 18691 v.AddArg(mem) 18692 return true 18693 } 18694 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 18695 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18696 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 18697 for { 18698 off1 := v.AuxInt 18699 sym1 := v.Aux 18700 _ = v.Args[1] 18701 v_0 := v.Args[0] 18702 if v_0.Op != OpAMD64LEAQ { 18703 break 18704 } 18705 off2 := v_0.AuxInt 18706 sym2 := v_0.Aux 18707 ptr := v_0.Args[0] 18708 mem := v.Args[1] 18709 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18710 break 18711 } 18712 v.reset(OpAMD64MOVQatomicload) 18713 v.AuxInt = off1 + off2 18714 v.Aux = mergeSym(sym1, sym2) 18715 v.AddArg(ptr) 18716 v.AddArg(mem) 18717 return true 18718 } 18719 return false 18720 } 18721 func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool { 18722 b := v.Block 18723 _ = b 18724 // match: (MOVQf2i <t> (Arg <u> [off] {sym})) 18725 // cond: t.Size() == u.Size() 18726 // result: @b.Func.Entry (Arg <t> [off] {sym}) 18727 for { 18728 t := v.Type 18729 v_0 := v.Args[0] 18730 if v_0.Op != OpArg { 18731 break 18732 } 18733 u := v_0.Type 18734 off := v_0.AuxInt 18735 sym := v_0.Aux 18736 if !(t.Size() == u.Size()) { 18737 break 18738 } 18739 b = b.Func.Entry 18740 v0 := b.NewValue0(v.Pos, OpArg, t) 18741 v.reset(OpCopy) 18742 v.AddArg(v0) 18743 v0.AuxInt = off 18744 v0.Aux = sym 18745 return true 18746 } 18747 return false 18748 } 18749 func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { 18750 b := v.Block 18751 _ = b 18752 // match: (MOVQi2f <t> (Arg <u> [off] {sym})) 18753 // cond: t.Size() == u.Size() 18754 // result: @b.Func.Entry (Arg <t> [off] {sym}) 18755 for { 18756 t := v.Type 18757 v_0 := v.Args[0] 18758 if v_0.Op != OpArg { 18759 break 18760 } 18761 u := v_0.Type 18762 off := v_0.AuxInt 18763 sym := v_0.Aux 18764 if !(t.Size() == u.Size()) { 18765 break 18766 } 18767 b = b.Func.Entry 18768 v0 := b.NewValue0(v.Pos, OpArg, t) 18769 v.reset(OpCopy) 18770 v.AddArg(v0) 18771 v0.AuxInt = off 18772 v0.Aux = sym 18773 return true 18774 } 18775 return false 18776 } 18777 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 18778 b := v.Block 18779 _ = b 18780 config := b.Func.Config 18781 _ = config 18782 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 18783 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 18784 // result: x 18785 for { 18786 off := v.AuxInt 18787 sym := v.Aux 18788 _ = v.Args[1] 18789 ptr := v.Args[0] 18790 v_1 := v.Args[1] 18791 if v_1.Op != OpAMD64MOVQstore { 18792 break 18793 } 18794 off2 := v_1.AuxInt 18795 sym2 := v_1.Aux 18796 _ = v_1.Args[2] 18797 ptr2 := v_1.Args[0] 18798 x := v_1.Args[1] 18799 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 18800 break 18801 } 18802 v.reset(OpCopy) 18803 v.Type = x.Type 18804 v.AddArg(x) 18805 return true 18806 } 18807 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 18808 // cond: is32Bit(off1+off2) 18809 // result: (MOVQload [off1+off2] {sym} ptr mem) 18810 for { 18811 off1 := v.AuxInt 18812 sym := v.Aux 18813 _ = v.Args[1] 18814 v_0 := v.Args[0] 18815 if v_0.Op != OpAMD64ADDQconst { 18816 break 18817 } 18818 off2 := v_0.AuxInt 18819 ptr := v_0.Args[0] 18820 mem := v.Args[1] 18821 if !(is32Bit(off1 + off2)) { 18822 break 18823 } 18824 v.reset(OpAMD64MOVQload) 18825 v.AuxInt = off1 + off2 18826 v.Aux = sym 18827 v.AddArg(ptr) 18828 v.AddArg(mem) 18829 return true 18830 } 18831 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 18832 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18833 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 18834 for { 18835 off1 := v.AuxInt 18836 sym1 := v.Aux 18837 _ = v.Args[1] 18838 v_0 := v.Args[0] 18839 if v_0.Op != OpAMD64LEAQ { 18840 break 18841 } 18842 off2 := v_0.AuxInt 18843 sym2 := v_0.Aux 18844 base := v_0.Args[0] 18845 mem := v.Args[1] 18846 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18847 break 18848 } 18849 v.reset(OpAMD64MOVQload) 18850 v.AuxInt = off1 + off2 18851 v.Aux = mergeSym(sym1, sym2) 18852 v.AddArg(base) 18853 v.AddArg(mem) 18854 return true 18855 } 18856 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 18857 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18858 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 18859 for { 18860 off1 := v.AuxInt 18861 sym1 := v.Aux 18862 _ = v.Args[1] 18863 v_0 := v.Args[0] 18864 if v_0.Op != OpAMD64LEAQ1 { 18865 break 18866 } 18867 off2 := v_0.AuxInt 18868 sym2 := v_0.Aux 18869 _ = v_0.Args[1] 18870 ptr := v_0.Args[0] 18871 idx := v_0.Args[1] 18872 mem := v.Args[1] 18873 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18874 break 18875 } 18876 v.reset(OpAMD64MOVQloadidx1) 18877 v.AuxInt = off1 + off2 18878 v.Aux = mergeSym(sym1, sym2) 18879 v.AddArg(ptr) 18880 v.AddArg(idx) 18881 v.AddArg(mem) 18882 return true 18883 } 18884 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 18885 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18886 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 18887 for { 18888 off1 := v.AuxInt 18889 sym1 := v.Aux 18890 _ = v.Args[1] 18891 v_0 := v.Args[0] 18892 if v_0.Op != OpAMD64LEAQ8 { 18893 break 18894 } 18895 off2 := v_0.AuxInt 18896 sym2 := v_0.Aux 18897 _ = v_0.Args[1] 18898 ptr := v_0.Args[0] 18899 idx := v_0.Args[1] 18900 mem := v.Args[1] 18901 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18902 break 18903 } 18904 v.reset(OpAMD64MOVQloadidx8) 18905 v.AuxInt = off1 + off2 18906 v.Aux = mergeSym(sym1, sym2) 18907 v.AddArg(ptr) 18908 v.AddArg(idx) 18909 v.AddArg(mem) 18910 return true 18911 } 18912 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 18913 // cond: ptr.Op != OpSB 18914 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 18915 for { 18916 off := v.AuxInt 18917 sym := v.Aux 18918 _ = v.Args[1] 18919 v_0 := v.Args[0] 18920 if v_0.Op != OpAMD64ADDQ { 18921 break 18922 } 18923 _ = v_0.Args[1] 18924 ptr := v_0.Args[0] 18925 idx := v_0.Args[1] 18926 mem := v.Args[1] 18927 if !(ptr.Op != OpSB) { 18928 break 18929 } 18930 v.reset(OpAMD64MOVQloadidx1) 18931 v.AuxInt = off 18932 v.Aux = sym 18933 v.AddArg(ptr) 18934 v.AddArg(idx) 18935 v.AddArg(mem) 18936 return true 18937 } 18938 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 18939 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 18940 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 18941 for { 18942 off1 := v.AuxInt 18943 sym1 := v.Aux 18944 _ = v.Args[1] 18945 v_0 := v.Args[0] 18946 if v_0.Op != OpAMD64LEAL { 18947 break 18948 } 18949 off2 := v_0.AuxInt 18950 sym2 := v_0.Aux 18951 base := v_0.Args[0] 18952 mem := v.Args[1] 18953 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 18954 break 18955 } 18956 v.reset(OpAMD64MOVQload) 18957 v.AuxInt = off1 + off2 18958 v.Aux = mergeSym(sym1, sym2) 18959 v.AddArg(base) 18960 v.AddArg(mem) 18961 return true 18962 } 18963 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 18964 // cond: is32Bit(off1+off2) 18965 // result: (MOVQload [off1+off2] {sym} ptr mem) 18966 for { 18967 off1 := v.AuxInt 18968 sym := v.Aux 18969 _ = v.Args[1] 18970 v_0 := v.Args[0] 18971 if v_0.Op != OpAMD64ADDLconst { 18972 break 18973 } 18974 off2 := v_0.AuxInt 18975 ptr := v_0.Args[0] 18976 mem := v.Args[1] 18977 if !(is32Bit(off1 + off2)) { 18978 break 18979 } 18980 v.reset(OpAMD64MOVQload) 18981 v.AuxInt = off1 + off2 18982 v.Aux = sym 18983 v.AddArg(ptr) 18984 v.AddArg(mem) 18985 return true 18986 } 18987 // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) 18988 // cond: 18989 // result: (MOVQf2i val) 18990 for { 18991 off := v.AuxInt 18992 sym := v.Aux 18993 _ = v.Args[1] 18994 ptr := v.Args[0] 18995 v_1 := v.Args[1] 18996 if v_1.Op != OpAMD64MOVSDstore { 18997 break 18998 } 18999 if v_1.AuxInt != off { 19000 break 19001 } 19002 if v_1.Aux != sym { 19003 break 19004 } 19005 _ = v_1.Args[2] 19006 if ptr != v_1.Args[0] { 19007 break 19008 } 19009 val := v_1.Args[1] 19010 v.reset(OpAMD64MOVQf2i) 19011 v.AddArg(val) 19012 return true 19013 } 19014 // match: (MOVQload [off] {sym} (SB) _) 19015 // cond: symIsRO(sym) 19016 // result: (MOVQconst [int64(read64(sym, off, config.BigEndian))]) 19017 for { 19018 off := v.AuxInt 19019 sym := v.Aux 19020 _ = v.Args[1] 19021 v_0 := v.Args[0] 19022 if v_0.Op != OpSB { 19023 break 19024 } 19025 if !(symIsRO(sym)) { 19026 break 19027 } 19028 v.reset(OpAMD64MOVQconst) 19029 v.AuxInt = int64(read64(sym, off, config.BigEndian)) 19030 return true 19031 } 19032 return false 19033 } 19034 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 19035 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 19036 // cond: 19037 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 19038 for { 19039 c := v.AuxInt 19040 sym := v.Aux 19041 _ = v.Args[2] 19042 ptr := v.Args[0] 19043 v_1 := v.Args[1] 19044 if v_1.Op != OpAMD64SHLQconst { 19045 break 19046 } 19047 if v_1.AuxInt != 3 { 19048 break 19049 } 19050 idx := v_1.Args[0] 19051 mem := v.Args[2] 19052 v.reset(OpAMD64MOVQloadidx8) 19053 v.AuxInt = c 19054 v.Aux = sym 19055 v.AddArg(ptr) 19056 v.AddArg(idx) 19057 v.AddArg(mem) 19058 return true 19059 } 19060 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 19061 // cond: 19062 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 19063 for { 19064 c := v.AuxInt 19065 sym := v.Aux 19066 _ = v.Args[2] 19067 v_0 := v.Args[0] 19068 if v_0.Op != OpAMD64SHLQconst { 19069 break 19070 } 19071 if v_0.AuxInt != 3 { 19072 break 19073 } 19074 idx := v_0.Args[0] 19075 ptr := v.Args[1] 19076 mem := v.Args[2] 19077 v.reset(OpAMD64MOVQloadidx8) 19078 v.AuxInt = c 19079 v.Aux = sym 19080 v.AddArg(ptr) 19081 v.AddArg(idx) 19082 v.AddArg(mem) 19083 return true 19084 } 19085 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 19086 // cond: is32Bit(c+d) 19087 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 19088 for { 19089 c := v.AuxInt 19090 sym := v.Aux 19091 _ = v.Args[2] 19092 v_0 := v.Args[0] 19093 if v_0.Op != OpAMD64ADDQconst { 19094 break 19095 } 19096 d := v_0.AuxInt 19097 ptr := v_0.Args[0] 19098 idx := v.Args[1] 19099 mem := v.Args[2] 19100 if !(is32Bit(c + d)) { 19101 break 19102 } 19103 v.reset(OpAMD64MOVQloadidx1) 19104 v.AuxInt = c + d 19105 v.Aux = sym 19106 v.AddArg(ptr) 19107 v.AddArg(idx) 19108 v.AddArg(mem) 19109 return true 19110 } 19111 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 19112 // cond: is32Bit(c+d) 19113 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 19114 for { 19115 c := v.AuxInt 19116 sym := v.Aux 19117 _ = v.Args[2] 19118 idx := v.Args[0] 19119 v_1 := v.Args[1] 19120 if v_1.Op != OpAMD64ADDQconst { 19121 break 19122 } 19123 d := v_1.AuxInt 19124 ptr := v_1.Args[0] 19125 mem := v.Args[2] 19126 if !(is32Bit(c + d)) { 19127 break 19128 } 19129 v.reset(OpAMD64MOVQloadidx1) 19130 v.AuxInt = c + d 19131 v.Aux = sym 19132 v.AddArg(ptr) 19133 v.AddArg(idx) 19134 v.AddArg(mem) 19135 return true 19136 } 19137 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 19138 // cond: is32Bit(c+d) 19139 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 19140 for { 19141 c := v.AuxInt 19142 sym := v.Aux 19143 _ = v.Args[2] 19144 ptr := v.Args[0] 19145 v_1 := v.Args[1] 19146 if v_1.Op != OpAMD64ADDQconst { 19147 break 19148 } 19149 d := v_1.AuxInt 19150 idx := v_1.Args[0] 19151 mem := v.Args[2] 19152 if !(is32Bit(c + d)) { 19153 break 19154 } 19155 v.reset(OpAMD64MOVQloadidx1) 19156 v.AuxInt = c + d 19157 v.Aux = sym 19158 v.AddArg(ptr) 19159 v.AddArg(idx) 19160 v.AddArg(mem) 19161 return true 19162 } 19163 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 19164 // cond: is32Bit(c+d) 19165 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 19166 for { 19167 c := v.AuxInt 19168 sym := v.Aux 19169 _ = v.Args[2] 19170 v_0 := v.Args[0] 19171 if v_0.Op != OpAMD64ADDQconst { 19172 break 19173 } 19174 d := v_0.AuxInt 19175 idx := v_0.Args[0] 19176 ptr := v.Args[1] 19177 mem := v.Args[2] 19178 if !(is32Bit(c + d)) { 19179 break 19180 } 19181 v.reset(OpAMD64MOVQloadidx1) 19182 v.AuxInt = c + d 19183 v.Aux = sym 19184 v.AddArg(ptr) 19185 v.AddArg(idx) 19186 v.AddArg(mem) 19187 return true 19188 } 19189 // match: (MOVQloadidx1 [i] {s} p (MOVQconst [c]) mem) 19190 // cond: is32Bit(i+c) 19191 // result: (MOVQload [i+c] {s} p mem) 19192 for { 19193 i := v.AuxInt 19194 s := v.Aux 19195 _ = v.Args[2] 19196 p := v.Args[0] 19197 v_1 := v.Args[1] 19198 if v_1.Op != OpAMD64MOVQconst { 19199 break 19200 } 19201 c := v_1.AuxInt 19202 mem := v.Args[2] 19203 if !(is32Bit(i + c)) { 19204 break 19205 } 19206 v.reset(OpAMD64MOVQload) 19207 v.AuxInt = i + c 19208 v.Aux = s 19209 v.AddArg(p) 19210 v.AddArg(mem) 19211 return true 19212 } 19213 // match: (MOVQloadidx1 [i] {s} (MOVQconst [c]) p mem) 19214 // cond: is32Bit(i+c) 19215 // result: (MOVQload [i+c] {s} p mem) 19216 for { 19217 i := v.AuxInt 19218 s := v.Aux 19219 _ = v.Args[2] 19220 v_0 := v.Args[0] 19221 if v_0.Op != OpAMD64MOVQconst { 19222 break 19223 } 19224 c := v_0.AuxInt 19225 p := v.Args[1] 19226 mem := v.Args[2] 19227 if !(is32Bit(i + c)) { 19228 break 19229 } 19230 v.reset(OpAMD64MOVQload) 19231 v.AuxInt = i + c 19232 v.Aux = s 19233 v.AddArg(p) 19234 v.AddArg(mem) 19235 return true 19236 } 19237 return false 19238 } 19239 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 19240 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 19241 // cond: is32Bit(c+d) 19242 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 19243 for { 19244 c := v.AuxInt 19245 sym := v.Aux 19246 _ = v.Args[2] 19247 v_0 := v.Args[0] 19248 if v_0.Op != OpAMD64ADDQconst { 19249 break 19250 } 19251 d := v_0.AuxInt 19252 ptr := v_0.Args[0] 19253 idx := v.Args[1] 19254 mem := v.Args[2] 19255 if !(is32Bit(c + d)) { 19256 break 19257 } 19258 v.reset(OpAMD64MOVQloadidx8) 19259 v.AuxInt = c + d 19260 v.Aux = sym 19261 v.AddArg(ptr) 19262 v.AddArg(idx) 19263 v.AddArg(mem) 19264 return true 19265 } 19266 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 19267 // cond: is32Bit(c+8*d) 19268 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 19269 for { 19270 c := v.AuxInt 19271 sym := v.Aux 19272 _ = v.Args[2] 19273 ptr := v.Args[0] 19274 v_1 := v.Args[1] 19275 if v_1.Op != OpAMD64ADDQconst { 19276 break 19277 } 19278 d := v_1.AuxInt 19279 idx := v_1.Args[0] 19280 mem := v.Args[2] 19281 if !(is32Bit(c + 8*d)) { 19282 break 19283 } 19284 v.reset(OpAMD64MOVQloadidx8) 19285 v.AuxInt = c + 8*d 19286 v.Aux = sym 19287 v.AddArg(ptr) 19288 v.AddArg(idx) 19289 v.AddArg(mem) 19290 return true 19291 } 19292 // match: (MOVQloadidx8 [i] {s} p (MOVQconst [c]) mem) 19293 // cond: is32Bit(i+8*c) 19294 // result: (MOVQload [i+8*c] {s} p mem) 19295 for { 19296 i := v.AuxInt 19297 s := v.Aux 19298 _ = v.Args[2] 19299 p := v.Args[0] 19300 v_1 := v.Args[1] 19301 if v_1.Op != OpAMD64MOVQconst { 19302 break 19303 } 19304 c := v_1.AuxInt 19305 mem := v.Args[2] 19306 if !(is32Bit(i + 8*c)) { 19307 break 19308 } 19309 v.reset(OpAMD64MOVQload) 19310 v.AuxInt = i + 8*c 19311 v.Aux = s 19312 v.AddArg(p) 19313 v.AddArg(mem) 19314 return true 19315 } 19316 return false 19317 } 19318 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 19319 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 19320 // cond: is32Bit(off1+off2) 19321 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 19322 for { 19323 off1 := v.AuxInt 19324 sym := v.Aux 19325 _ = v.Args[2] 19326 v_0 := v.Args[0] 19327 if v_0.Op != OpAMD64ADDQconst { 19328 break 19329 } 19330 off2 := v_0.AuxInt 19331 ptr := v_0.Args[0] 19332 val := v.Args[1] 19333 mem := v.Args[2] 19334 if !(is32Bit(off1 + off2)) { 19335 break 19336 } 19337 v.reset(OpAMD64MOVQstore) 19338 v.AuxInt = off1 + off2 19339 v.Aux = sym 19340 v.AddArg(ptr) 19341 v.AddArg(val) 19342 v.AddArg(mem) 19343 return true 19344 } 19345 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 19346 // cond: validValAndOff(c,off) 19347 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 19348 for { 19349 off := v.AuxInt 19350 sym := v.Aux 19351 _ = v.Args[2] 19352 ptr := v.Args[0] 19353 v_1 := v.Args[1] 19354 if v_1.Op != OpAMD64MOVQconst { 19355 break 19356 } 19357 c := v_1.AuxInt 19358 mem := v.Args[2] 19359 if !(validValAndOff(c, off)) { 19360 break 19361 } 19362 v.reset(OpAMD64MOVQstoreconst) 19363 v.AuxInt = makeValAndOff(c, off) 19364 v.Aux = sym 19365 v.AddArg(ptr) 19366 v.AddArg(mem) 19367 return true 19368 } 19369 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 19370 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19371 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 19372 for { 19373 off1 := v.AuxInt 19374 sym1 := v.Aux 19375 _ = v.Args[2] 19376 v_0 := v.Args[0] 19377 if v_0.Op != OpAMD64LEAQ { 19378 break 19379 } 19380 off2 := v_0.AuxInt 19381 sym2 := v_0.Aux 19382 base := v_0.Args[0] 19383 val := v.Args[1] 19384 mem := v.Args[2] 19385 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19386 break 19387 } 19388 v.reset(OpAMD64MOVQstore) 19389 v.AuxInt = off1 + off2 19390 v.Aux = mergeSym(sym1, sym2) 19391 v.AddArg(base) 19392 v.AddArg(val) 19393 v.AddArg(mem) 19394 return true 19395 } 19396 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 19397 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19398 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 19399 for { 19400 off1 := v.AuxInt 19401 sym1 := v.Aux 19402 _ = v.Args[2] 19403 v_0 := v.Args[0] 19404 if v_0.Op != OpAMD64LEAQ1 { 19405 break 19406 } 19407 off2 := v_0.AuxInt 19408 sym2 := v_0.Aux 19409 _ = v_0.Args[1] 19410 ptr := v_0.Args[0] 19411 idx := v_0.Args[1] 19412 val := v.Args[1] 19413 mem := v.Args[2] 19414 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19415 break 19416 } 19417 v.reset(OpAMD64MOVQstoreidx1) 19418 v.AuxInt = off1 + off2 19419 v.Aux = mergeSym(sym1, sym2) 19420 v.AddArg(ptr) 19421 v.AddArg(idx) 19422 v.AddArg(val) 19423 v.AddArg(mem) 19424 return true 19425 } 19426 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 19427 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19428 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 19429 for { 19430 off1 := v.AuxInt 19431 sym1 := v.Aux 19432 _ = v.Args[2] 19433 v_0 := v.Args[0] 19434 if v_0.Op != OpAMD64LEAQ8 { 19435 break 19436 } 19437 off2 := v_0.AuxInt 19438 sym2 := v_0.Aux 19439 _ = v_0.Args[1] 19440 ptr := v_0.Args[0] 19441 idx := v_0.Args[1] 19442 val := v.Args[1] 19443 mem := v.Args[2] 19444 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19445 break 19446 } 19447 v.reset(OpAMD64MOVQstoreidx8) 19448 v.AuxInt = off1 + off2 19449 v.Aux = mergeSym(sym1, sym2) 19450 v.AddArg(ptr) 19451 v.AddArg(idx) 19452 v.AddArg(val) 19453 v.AddArg(mem) 19454 return true 19455 } 19456 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 19457 // cond: ptr.Op != OpSB 19458 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 19459 for { 19460 off := v.AuxInt 19461 sym := v.Aux 19462 _ = v.Args[2] 19463 v_0 := v.Args[0] 19464 if v_0.Op != OpAMD64ADDQ { 19465 break 19466 } 19467 _ = v_0.Args[1] 19468 ptr := v_0.Args[0] 19469 idx := v_0.Args[1] 19470 val := v.Args[1] 19471 mem := v.Args[2] 19472 if !(ptr.Op != OpSB) { 19473 break 19474 } 19475 v.reset(OpAMD64MOVQstoreidx1) 19476 v.AuxInt = off 19477 v.Aux = sym 19478 v.AddArg(ptr) 19479 v.AddArg(idx) 19480 v.AddArg(val) 19481 v.AddArg(mem) 19482 return true 19483 } 19484 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 19485 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 19486 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 19487 for { 19488 off1 := v.AuxInt 19489 sym1 := v.Aux 19490 _ = v.Args[2] 19491 v_0 := v.Args[0] 19492 if v_0.Op != OpAMD64LEAL { 19493 break 19494 } 19495 off2 := v_0.AuxInt 19496 sym2 := v_0.Aux 19497 base := v_0.Args[0] 19498 val := v.Args[1] 19499 mem := v.Args[2] 19500 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 19501 break 19502 } 19503 v.reset(OpAMD64MOVQstore) 19504 v.AuxInt = off1 + off2 19505 v.Aux = mergeSym(sym1, sym2) 19506 v.AddArg(base) 19507 v.AddArg(val) 19508 v.AddArg(mem) 19509 return true 19510 } 19511 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 19512 // cond: is32Bit(off1+off2) 19513 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 19514 for { 19515 off1 := v.AuxInt 19516 sym := v.Aux 19517 _ = v.Args[2] 19518 v_0 := v.Args[0] 19519 if v_0.Op != OpAMD64ADDLconst { 19520 break 19521 } 19522 off2 := v_0.AuxInt 19523 ptr := v_0.Args[0] 19524 val := v.Args[1] 19525 mem := v.Args[2] 19526 if !(is32Bit(off1 + off2)) { 19527 break 19528 } 19529 v.reset(OpAMD64MOVQstore) 19530 v.AuxInt = off1 + off2 19531 v.Aux = sym 19532 v.AddArg(ptr) 19533 v.AddArg(val) 19534 v.AddArg(mem) 19535 return true 19536 } 19537 // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem) 19538 // cond: y.Uses==1 && clobber(y) 19539 // result: (ADDQmodify [off] {sym} ptr x mem) 19540 for { 19541 off := v.AuxInt 19542 sym := v.Aux 19543 _ = v.Args[2] 19544 ptr := v.Args[0] 19545 y := v.Args[1] 19546 if y.Op != OpAMD64ADDQload { 19547 break 19548 } 19549 if y.AuxInt != off { 19550 break 19551 } 19552 if y.Aux != sym { 19553 break 19554 } 19555 _ = y.Args[2] 19556 x := y.Args[0] 19557 if ptr != y.Args[1] { 19558 break 19559 } 19560 mem := y.Args[2] 19561 if mem != v.Args[2] { 19562 break 19563 } 19564 if !(y.Uses == 1 && clobber(y)) { 19565 break 19566 } 19567 v.reset(OpAMD64ADDQmodify) 19568 v.AuxInt = off 19569 v.Aux = sym 19570 v.AddArg(ptr) 19571 v.AddArg(x) 19572 v.AddArg(mem) 19573 return true 19574 } 19575 // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem) 19576 // cond: y.Uses==1 && clobber(y) 19577 // result: (ANDQmodify [off] {sym} ptr x mem) 19578 for { 19579 off := v.AuxInt 19580 sym := v.Aux 19581 _ = v.Args[2] 19582 ptr := v.Args[0] 19583 y := v.Args[1] 19584 if y.Op != OpAMD64ANDQload { 19585 break 19586 } 19587 if y.AuxInt != off { 19588 break 19589 } 19590 if y.Aux != sym { 19591 break 19592 } 19593 _ = y.Args[2] 19594 x := y.Args[0] 19595 if ptr != y.Args[1] { 19596 break 19597 } 19598 mem := y.Args[2] 19599 if mem != v.Args[2] { 19600 break 19601 } 19602 if !(y.Uses == 1 && clobber(y)) { 19603 break 19604 } 19605 v.reset(OpAMD64ANDQmodify) 19606 v.AuxInt = off 19607 v.Aux = sym 19608 v.AddArg(ptr) 19609 v.AddArg(x) 19610 v.AddArg(mem) 19611 return true 19612 } 19613 return false 19614 } 19615 func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { 19616 // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem) 19617 // cond: y.Uses==1 && clobber(y) 19618 // result: (ORQmodify [off] {sym} ptr x mem) 19619 for { 19620 off := v.AuxInt 19621 sym := v.Aux 19622 _ = v.Args[2] 19623 ptr := v.Args[0] 19624 y := v.Args[1] 19625 if y.Op != OpAMD64ORQload { 19626 break 19627 } 19628 if y.AuxInt != off { 19629 break 19630 } 19631 if y.Aux != sym { 19632 break 19633 } 19634 _ = y.Args[2] 19635 x := y.Args[0] 19636 if ptr != y.Args[1] { 19637 break 19638 } 19639 mem := y.Args[2] 19640 if mem != v.Args[2] { 19641 break 19642 } 19643 if !(y.Uses == 1 && clobber(y)) { 19644 break 19645 } 19646 v.reset(OpAMD64ORQmodify) 19647 v.AuxInt = off 19648 v.Aux = sym 19649 v.AddArg(ptr) 19650 v.AddArg(x) 19651 v.AddArg(mem) 19652 return true 19653 } 19654 // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem) 19655 // cond: y.Uses==1 && clobber(y) 19656 // result: (XORQmodify [off] {sym} ptr x mem) 19657 for { 19658 off := v.AuxInt 19659 sym := v.Aux 19660 _ = v.Args[2] 19661 ptr := v.Args[0] 19662 y := v.Args[1] 19663 if y.Op != OpAMD64XORQload { 19664 break 19665 } 19666 if y.AuxInt != off { 19667 break 19668 } 19669 if y.Aux != sym { 19670 break 19671 } 19672 _ = y.Args[2] 19673 x := y.Args[0] 19674 if ptr != y.Args[1] { 19675 break 19676 } 19677 mem := y.Args[2] 19678 if mem != v.Args[2] { 19679 break 19680 } 19681 if !(y.Uses == 1 && clobber(y)) { 19682 break 19683 } 19684 v.reset(OpAMD64XORQmodify) 19685 v.AuxInt = off 19686 v.Aux = sym 19687 v.AddArg(ptr) 19688 v.AddArg(x) 19689 v.AddArg(mem) 19690 return true 19691 } 19692 // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem) 19693 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19694 // result: (ADDQmodify [off] {sym} ptr x mem) 19695 for { 19696 off := v.AuxInt 19697 sym := v.Aux 19698 _ = v.Args[2] 19699 ptr := v.Args[0] 19700 y := v.Args[1] 19701 if y.Op != OpAMD64ADDQ { 19702 break 19703 } 19704 _ = y.Args[1] 19705 l := y.Args[0] 19706 if l.Op != OpAMD64MOVQload { 19707 break 19708 } 19709 if l.AuxInt != off { 19710 break 19711 } 19712 if l.Aux != sym { 19713 break 19714 } 19715 _ = l.Args[1] 19716 if ptr != l.Args[0] { 19717 break 19718 } 19719 mem := l.Args[1] 19720 x := y.Args[1] 19721 if mem != v.Args[2] { 19722 break 19723 } 19724 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 19725 break 19726 } 19727 v.reset(OpAMD64ADDQmodify) 19728 v.AuxInt = off 19729 v.Aux = sym 19730 v.AddArg(ptr) 19731 v.AddArg(x) 19732 v.AddArg(mem) 19733 return true 19734 } 19735 // match: (MOVQstore {sym} [off] ptr y:(ADDQ x l:(MOVQload [off] {sym} ptr mem)) mem) 19736 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19737 // result: (ADDQmodify [off] {sym} ptr x mem) 19738 for { 19739 off := v.AuxInt 19740 sym := v.Aux 19741 _ = v.Args[2] 19742 ptr := v.Args[0] 19743 y := v.Args[1] 19744 if y.Op != OpAMD64ADDQ { 19745 break 19746 } 19747 _ = y.Args[1] 19748 x := y.Args[0] 19749 l := y.Args[1] 19750 if l.Op != OpAMD64MOVQload { 19751 break 19752 } 19753 if l.AuxInt != off { 19754 break 19755 } 19756 if l.Aux != sym { 19757 break 19758 } 19759 _ = l.Args[1] 19760 if ptr != l.Args[0] { 19761 break 19762 } 19763 mem := l.Args[1] 19764 if mem != v.Args[2] { 19765 break 19766 } 19767 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 19768 break 19769 } 19770 v.reset(OpAMD64ADDQmodify) 19771 v.AuxInt = off 19772 v.Aux = sym 19773 v.AddArg(ptr) 19774 v.AddArg(x) 19775 v.AddArg(mem) 19776 return true 19777 } 19778 // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem) 19779 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19780 // result: (SUBQmodify [off] {sym} ptr x mem) 19781 for { 19782 off := v.AuxInt 19783 sym := v.Aux 19784 _ = v.Args[2] 19785 ptr := v.Args[0] 19786 y := v.Args[1] 19787 if y.Op != OpAMD64SUBQ { 19788 break 19789 } 19790 _ = y.Args[1] 19791 l := y.Args[0] 19792 if l.Op != OpAMD64MOVQload { 19793 break 19794 } 19795 if l.AuxInt != off { 19796 break 19797 } 19798 if l.Aux != sym { 19799 break 19800 } 19801 _ = l.Args[1] 19802 if ptr != l.Args[0] { 19803 break 19804 } 19805 mem := l.Args[1] 19806 x := y.Args[1] 19807 if mem != v.Args[2] { 19808 break 19809 } 19810 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 19811 break 19812 } 19813 v.reset(OpAMD64SUBQmodify) 19814 v.AuxInt = off 19815 v.Aux = sym 19816 v.AddArg(ptr) 19817 v.AddArg(x) 19818 v.AddArg(mem) 19819 return true 19820 } 19821 // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem) 19822 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19823 // result: (ANDQmodify [off] {sym} ptr x mem) 19824 for { 19825 off := v.AuxInt 19826 sym := v.Aux 19827 _ = v.Args[2] 19828 ptr := v.Args[0] 19829 y := v.Args[1] 19830 if y.Op != OpAMD64ANDQ { 19831 break 19832 } 19833 _ = y.Args[1] 19834 l := y.Args[0] 19835 if l.Op != OpAMD64MOVQload { 19836 break 19837 } 19838 if l.AuxInt != off { 19839 break 19840 } 19841 if l.Aux != sym { 19842 break 19843 } 19844 _ = l.Args[1] 19845 if ptr != l.Args[0] { 19846 break 19847 } 19848 mem := l.Args[1] 19849 x := y.Args[1] 19850 if mem != v.Args[2] { 19851 break 19852 } 19853 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 19854 break 19855 } 19856 v.reset(OpAMD64ANDQmodify) 19857 v.AuxInt = off 19858 v.Aux = sym 19859 v.AddArg(ptr) 19860 v.AddArg(x) 19861 v.AddArg(mem) 19862 return true 19863 } 19864 // match: (MOVQstore {sym} [off] ptr y:(ANDQ x l:(MOVQload [off] {sym} ptr mem)) mem) 19865 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19866 // result: (ANDQmodify [off] {sym} ptr x mem) 19867 for { 19868 off := v.AuxInt 19869 sym := v.Aux 19870 _ = v.Args[2] 19871 ptr := v.Args[0] 19872 y := v.Args[1] 19873 if y.Op != OpAMD64ANDQ { 19874 break 19875 } 19876 _ = y.Args[1] 19877 x := y.Args[0] 19878 l := y.Args[1] 19879 if l.Op != OpAMD64MOVQload { 19880 break 19881 } 19882 if l.AuxInt != off { 19883 break 19884 } 19885 if l.Aux != sym { 19886 break 19887 } 19888 _ = l.Args[1] 19889 if ptr != l.Args[0] { 19890 break 19891 } 19892 mem := l.Args[1] 19893 if mem != v.Args[2] { 19894 break 19895 } 19896 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 19897 break 19898 } 19899 v.reset(OpAMD64ANDQmodify) 19900 v.AuxInt = off 19901 v.Aux = sym 19902 v.AddArg(ptr) 19903 v.AddArg(x) 19904 v.AddArg(mem) 19905 return true 19906 } 19907 // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem) 19908 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19909 // result: (ORQmodify [off] {sym} ptr x mem) 19910 for { 19911 off := v.AuxInt 19912 sym := v.Aux 19913 _ = v.Args[2] 19914 ptr := v.Args[0] 19915 y := v.Args[1] 19916 if y.Op != OpAMD64ORQ { 19917 break 19918 } 19919 _ = y.Args[1] 19920 l := y.Args[0] 19921 if l.Op != OpAMD64MOVQload { 19922 break 19923 } 19924 if l.AuxInt != off { 19925 break 19926 } 19927 if l.Aux != sym { 19928 break 19929 } 19930 _ = l.Args[1] 19931 if ptr != l.Args[0] { 19932 break 19933 } 19934 mem := l.Args[1] 19935 x := y.Args[1] 19936 if mem != v.Args[2] { 19937 break 19938 } 19939 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 19940 break 19941 } 19942 v.reset(OpAMD64ORQmodify) 19943 v.AuxInt = off 19944 v.Aux = sym 19945 v.AddArg(ptr) 19946 v.AddArg(x) 19947 v.AddArg(mem) 19948 return true 19949 } 19950 // match: (MOVQstore {sym} [off] ptr y:(ORQ x l:(MOVQload [off] {sym} ptr mem)) mem) 19951 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19952 // result: (ORQmodify [off] {sym} ptr x mem) 19953 for { 19954 off := v.AuxInt 19955 sym := v.Aux 19956 _ = v.Args[2] 19957 ptr := v.Args[0] 19958 y := v.Args[1] 19959 if y.Op != OpAMD64ORQ { 19960 break 19961 } 19962 _ = y.Args[1] 19963 x := y.Args[0] 19964 l := y.Args[1] 19965 if l.Op != OpAMD64MOVQload { 19966 break 19967 } 19968 if l.AuxInt != off { 19969 break 19970 } 19971 if l.Aux != sym { 19972 break 19973 } 19974 _ = l.Args[1] 19975 if ptr != l.Args[0] { 19976 break 19977 } 19978 mem := l.Args[1] 19979 if mem != v.Args[2] { 19980 break 19981 } 19982 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 19983 break 19984 } 19985 v.reset(OpAMD64ORQmodify) 19986 v.AuxInt = off 19987 v.Aux = sym 19988 v.AddArg(ptr) 19989 v.AddArg(x) 19990 v.AddArg(mem) 19991 return true 19992 } 19993 // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem) 19994 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 19995 // result: (XORQmodify [off] {sym} ptr x mem) 19996 for { 19997 off := v.AuxInt 19998 sym := v.Aux 19999 _ = v.Args[2] 20000 ptr := v.Args[0] 20001 y := v.Args[1] 20002 if y.Op != OpAMD64XORQ { 20003 break 20004 } 20005 _ = y.Args[1] 20006 l := y.Args[0] 20007 if l.Op != OpAMD64MOVQload { 20008 break 20009 } 20010 if l.AuxInt != off { 20011 break 20012 } 20013 if l.Aux != sym { 20014 break 20015 } 20016 _ = l.Args[1] 20017 if ptr != l.Args[0] { 20018 break 20019 } 20020 mem := l.Args[1] 20021 x := y.Args[1] 20022 if mem != v.Args[2] { 20023 break 20024 } 20025 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20026 break 20027 } 20028 v.reset(OpAMD64XORQmodify) 20029 v.AuxInt = off 20030 v.Aux = sym 20031 v.AddArg(ptr) 20032 v.AddArg(x) 20033 v.AddArg(mem) 20034 return true 20035 } 20036 return false 20037 } 20038 func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { 20039 // match: (MOVQstore {sym} [off] ptr y:(XORQ x l:(MOVQload [off] {sym} ptr mem)) mem) 20040 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20041 // result: (XORQmodify [off] {sym} ptr x mem) 20042 for { 20043 off := v.AuxInt 20044 sym := v.Aux 20045 _ = v.Args[2] 20046 ptr := v.Args[0] 20047 y := v.Args[1] 20048 if y.Op != OpAMD64XORQ { 20049 break 20050 } 20051 _ = y.Args[1] 20052 x := y.Args[0] 20053 l := y.Args[1] 20054 if l.Op != OpAMD64MOVQload { 20055 break 20056 } 20057 if l.AuxInt != off { 20058 break 20059 } 20060 if l.Aux != sym { 20061 break 20062 } 20063 _ = l.Args[1] 20064 if ptr != l.Args[0] { 20065 break 20066 } 20067 mem := l.Args[1] 20068 if mem != v.Args[2] { 20069 break 20070 } 20071 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20072 break 20073 } 20074 v.reset(OpAMD64XORQmodify) 20075 v.AuxInt = off 20076 v.Aux = sym 20077 v.AddArg(ptr) 20078 v.AddArg(x) 20079 v.AddArg(mem) 20080 return true 20081 } 20082 // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) x) mem) 20083 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20084 // result: (BTCQmodify [off] {sym} ptr x mem) 20085 for { 20086 off := v.AuxInt 20087 sym := v.Aux 20088 _ = v.Args[2] 20089 ptr := v.Args[0] 20090 y := v.Args[1] 20091 if y.Op != OpAMD64BTCQ { 20092 break 20093 } 20094 _ = y.Args[1] 20095 l := y.Args[0] 20096 if l.Op != OpAMD64MOVQload { 20097 break 20098 } 20099 if l.AuxInt != off { 20100 break 20101 } 20102 if l.Aux != sym { 20103 break 20104 } 20105 _ = l.Args[1] 20106 if ptr != l.Args[0] { 20107 break 20108 } 20109 mem := l.Args[1] 20110 x := y.Args[1] 20111 if mem != v.Args[2] { 20112 break 20113 } 20114 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20115 break 20116 } 20117 v.reset(OpAMD64BTCQmodify) 20118 v.AuxInt = off 20119 v.Aux = sym 20120 v.AddArg(ptr) 20121 v.AddArg(x) 20122 v.AddArg(mem) 20123 return true 20124 } 20125 // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem) 20126 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20127 // result: (BTRQmodify [off] {sym} ptr x mem) 20128 for { 20129 off := v.AuxInt 20130 sym := v.Aux 20131 _ = v.Args[2] 20132 ptr := v.Args[0] 20133 y := v.Args[1] 20134 if y.Op != OpAMD64BTRQ { 20135 break 20136 } 20137 _ = y.Args[1] 20138 l := y.Args[0] 20139 if l.Op != OpAMD64MOVQload { 20140 break 20141 } 20142 if l.AuxInt != off { 20143 break 20144 } 20145 if l.Aux != sym { 20146 break 20147 } 20148 _ = l.Args[1] 20149 if ptr != l.Args[0] { 20150 break 20151 } 20152 mem := l.Args[1] 20153 x := y.Args[1] 20154 if mem != v.Args[2] { 20155 break 20156 } 20157 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20158 break 20159 } 20160 v.reset(OpAMD64BTRQmodify) 20161 v.AuxInt = off 20162 v.Aux = sym 20163 v.AddArg(ptr) 20164 v.AddArg(x) 20165 v.AddArg(mem) 20166 return true 20167 } 20168 // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem) 20169 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 20170 // result: (BTSQmodify [off] {sym} ptr x mem) 20171 for { 20172 off := v.AuxInt 20173 sym := v.Aux 20174 _ = v.Args[2] 20175 ptr := v.Args[0] 20176 y := v.Args[1] 20177 if y.Op != OpAMD64BTSQ { 20178 break 20179 } 20180 _ = y.Args[1] 20181 l := y.Args[0] 20182 if l.Op != OpAMD64MOVQload { 20183 break 20184 } 20185 if l.AuxInt != off { 20186 break 20187 } 20188 if l.Aux != sym { 20189 break 20190 } 20191 _ = l.Args[1] 20192 if ptr != l.Args[0] { 20193 break 20194 } 20195 mem := l.Args[1] 20196 x := y.Args[1] 20197 if mem != v.Args[2] { 20198 break 20199 } 20200 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 20201 break 20202 } 20203 v.reset(OpAMD64BTSQmodify) 20204 v.AuxInt = off 20205 v.Aux = sym 20206 v.AddArg(ptr) 20207 v.AddArg(x) 20208 v.AddArg(mem) 20209 return true 20210 } 20211 // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20212 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20213 // result: (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20214 for { 20215 off := v.AuxInt 20216 sym := v.Aux 20217 _ = v.Args[2] 20218 ptr := v.Args[0] 20219 a := v.Args[1] 20220 if a.Op != OpAMD64ADDQconst { 20221 break 20222 } 20223 c := a.AuxInt 20224 l := a.Args[0] 20225 if l.Op != OpAMD64MOVQload { 20226 break 20227 } 20228 if l.AuxInt != off { 20229 break 20230 } 20231 if l.Aux != sym { 20232 break 20233 } 20234 _ = l.Args[1] 20235 ptr2 := l.Args[0] 20236 mem := l.Args[1] 20237 if mem != v.Args[2] { 20238 break 20239 } 20240 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20241 break 20242 } 20243 v.reset(OpAMD64ADDQconstmodify) 20244 v.AuxInt = makeValAndOff(c, off) 20245 v.Aux = sym 20246 v.AddArg(ptr) 20247 v.AddArg(mem) 20248 return true 20249 } 20250 // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20251 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20252 // result: (ANDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20253 for { 20254 off := v.AuxInt 20255 sym := v.Aux 20256 _ = v.Args[2] 20257 ptr := v.Args[0] 20258 a := v.Args[1] 20259 if a.Op != OpAMD64ANDQconst { 20260 break 20261 } 20262 c := a.AuxInt 20263 l := a.Args[0] 20264 if l.Op != OpAMD64MOVQload { 20265 break 20266 } 20267 if l.AuxInt != off { 20268 break 20269 } 20270 if l.Aux != sym { 20271 break 20272 } 20273 _ = l.Args[1] 20274 ptr2 := l.Args[0] 20275 mem := l.Args[1] 20276 if mem != v.Args[2] { 20277 break 20278 } 20279 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20280 break 20281 } 20282 v.reset(OpAMD64ANDQconstmodify) 20283 v.AuxInt = makeValAndOff(c, off) 20284 v.Aux = sym 20285 v.AddArg(ptr) 20286 v.AddArg(mem) 20287 return true 20288 } 20289 // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20290 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20291 // result: (ORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20292 for { 20293 off := v.AuxInt 20294 sym := v.Aux 20295 _ = v.Args[2] 20296 ptr := v.Args[0] 20297 a := v.Args[1] 20298 if a.Op != OpAMD64ORQconst { 20299 break 20300 } 20301 c := a.AuxInt 20302 l := a.Args[0] 20303 if l.Op != OpAMD64MOVQload { 20304 break 20305 } 20306 if l.AuxInt != off { 20307 break 20308 } 20309 if l.Aux != sym { 20310 break 20311 } 20312 _ = l.Args[1] 20313 ptr2 := l.Args[0] 20314 mem := l.Args[1] 20315 if mem != v.Args[2] { 20316 break 20317 } 20318 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20319 break 20320 } 20321 v.reset(OpAMD64ORQconstmodify) 20322 v.AuxInt = makeValAndOff(c, off) 20323 v.Aux = sym 20324 v.AddArg(ptr) 20325 v.AddArg(mem) 20326 return true 20327 } 20328 // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20329 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20330 // result: (XORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20331 for { 20332 off := v.AuxInt 20333 sym := v.Aux 20334 _ = v.Args[2] 20335 ptr := v.Args[0] 20336 a := v.Args[1] 20337 if a.Op != OpAMD64XORQconst { 20338 break 20339 } 20340 c := a.AuxInt 20341 l := a.Args[0] 20342 if l.Op != OpAMD64MOVQload { 20343 break 20344 } 20345 if l.AuxInt != off { 20346 break 20347 } 20348 if l.Aux != sym { 20349 break 20350 } 20351 _ = l.Args[1] 20352 ptr2 := l.Args[0] 20353 mem := l.Args[1] 20354 if mem != v.Args[2] { 20355 break 20356 } 20357 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20358 break 20359 } 20360 v.reset(OpAMD64XORQconstmodify) 20361 v.AuxInt = makeValAndOff(c, off) 20362 v.Aux = sym 20363 v.AddArg(ptr) 20364 v.AddArg(mem) 20365 return true 20366 } 20367 // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20368 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20369 // result: (BTCQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20370 for { 20371 off := v.AuxInt 20372 sym := v.Aux 20373 _ = v.Args[2] 20374 ptr := v.Args[0] 20375 a := v.Args[1] 20376 if a.Op != OpAMD64BTCQconst { 20377 break 20378 } 20379 c := a.AuxInt 20380 l := a.Args[0] 20381 if l.Op != OpAMD64MOVQload { 20382 break 20383 } 20384 if l.AuxInt != off { 20385 break 20386 } 20387 if l.Aux != sym { 20388 break 20389 } 20390 _ = l.Args[1] 20391 ptr2 := l.Args[0] 20392 mem := l.Args[1] 20393 if mem != v.Args[2] { 20394 break 20395 } 20396 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20397 break 20398 } 20399 v.reset(OpAMD64BTCQconstmodify) 20400 v.AuxInt = makeValAndOff(c, off) 20401 v.Aux = sym 20402 v.AddArg(ptr) 20403 v.AddArg(mem) 20404 return true 20405 } 20406 // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20407 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20408 // result: (BTRQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20409 for { 20410 off := v.AuxInt 20411 sym := v.Aux 20412 _ = v.Args[2] 20413 ptr := v.Args[0] 20414 a := v.Args[1] 20415 if a.Op != OpAMD64BTRQconst { 20416 break 20417 } 20418 c := a.AuxInt 20419 l := a.Args[0] 20420 if l.Op != OpAMD64MOVQload { 20421 break 20422 } 20423 if l.AuxInt != off { 20424 break 20425 } 20426 if l.Aux != sym { 20427 break 20428 } 20429 _ = l.Args[1] 20430 ptr2 := l.Args[0] 20431 mem := l.Args[1] 20432 if mem != v.Args[2] { 20433 break 20434 } 20435 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20436 break 20437 } 20438 v.reset(OpAMD64BTRQconstmodify) 20439 v.AuxInt = makeValAndOff(c, off) 20440 v.Aux = sym 20441 v.AddArg(ptr) 20442 v.AddArg(mem) 20443 return true 20444 } 20445 return false 20446 } 20447 func rewriteValueAMD64_OpAMD64MOVQstore_30(v *Value) bool { 20448 // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 20449 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 20450 // result: (BTSQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 20451 for { 20452 off := v.AuxInt 20453 sym := v.Aux 20454 _ = v.Args[2] 20455 ptr := v.Args[0] 20456 a := v.Args[1] 20457 if a.Op != OpAMD64BTSQconst { 20458 break 20459 } 20460 c := a.AuxInt 20461 l := a.Args[0] 20462 if l.Op != OpAMD64MOVQload { 20463 break 20464 } 20465 if l.AuxInt != off { 20466 break 20467 } 20468 if l.Aux != sym { 20469 break 20470 } 20471 _ = l.Args[1] 20472 ptr2 := l.Args[0] 20473 mem := l.Args[1] 20474 if mem != v.Args[2] { 20475 break 20476 } 20477 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 20478 break 20479 } 20480 v.reset(OpAMD64BTSQconstmodify) 20481 v.AuxInt = makeValAndOff(c, off) 20482 v.Aux = sym 20483 v.AddArg(ptr) 20484 v.AddArg(mem) 20485 return true 20486 } 20487 // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) 20488 // cond: 20489 // result: (MOVSDstore [off] {sym} ptr val mem) 20490 for { 20491 off := v.AuxInt 20492 sym := v.Aux 20493 _ = v.Args[2] 20494 ptr := v.Args[0] 20495 v_1 := v.Args[1] 20496 if v_1.Op != OpAMD64MOVQf2i { 20497 break 20498 } 20499 val := v_1.Args[0] 20500 mem := v.Args[2] 20501 v.reset(OpAMD64MOVSDstore) 20502 v.AuxInt = off 20503 v.Aux = sym 20504 v.AddArg(ptr) 20505 v.AddArg(val) 20506 v.AddArg(mem) 20507 return true 20508 } 20509 return false 20510 } 20511 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 20512 b := v.Block 20513 _ = b 20514 config := b.Func.Config 20515 _ = config 20516 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 20517 // cond: ValAndOff(sc).canAdd(off) 20518 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 20519 for { 20520 sc := v.AuxInt 20521 s := v.Aux 20522 _ = v.Args[1] 20523 v_0 := v.Args[0] 20524 if v_0.Op != OpAMD64ADDQconst { 20525 break 20526 } 20527 off := v_0.AuxInt 20528 ptr := v_0.Args[0] 20529 mem := v.Args[1] 20530 if !(ValAndOff(sc).canAdd(off)) { 20531 break 20532 } 20533 v.reset(OpAMD64MOVQstoreconst) 20534 v.AuxInt = ValAndOff(sc).add(off) 20535 v.Aux = s 20536 v.AddArg(ptr) 20537 v.AddArg(mem) 20538 return true 20539 } 20540 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 20541 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 20542 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 20543 for { 20544 sc := v.AuxInt 20545 sym1 := v.Aux 20546 _ = v.Args[1] 20547 v_0 := v.Args[0] 20548 if v_0.Op != OpAMD64LEAQ { 20549 break 20550 } 20551 off := v_0.AuxInt 20552 sym2 := v_0.Aux 20553 ptr := v_0.Args[0] 20554 mem := v.Args[1] 20555 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 20556 break 20557 } 20558 v.reset(OpAMD64MOVQstoreconst) 20559 v.AuxInt = ValAndOff(sc).add(off) 20560 v.Aux = mergeSym(sym1, sym2) 20561 v.AddArg(ptr) 20562 v.AddArg(mem) 20563 return true 20564 } 20565 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 20566 // cond: canMergeSym(sym1, sym2) 20567 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 20568 for { 20569 x := v.AuxInt 20570 sym1 := v.Aux 20571 _ = v.Args[1] 20572 v_0 := v.Args[0] 20573 if v_0.Op != OpAMD64LEAQ1 { 20574 break 20575 } 20576 off := v_0.AuxInt 20577 sym2 := v_0.Aux 20578 _ = v_0.Args[1] 20579 ptr := v_0.Args[0] 20580 idx := v_0.Args[1] 20581 mem := v.Args[1] 20582 if !(canMergeSym(sym1, sym2)) { 20583 break 20584 } 20585 v.reset(OpAMD64MOVQstoreconstidx1) 20586 v.AuxInt = ValAndOff(x).add(off) 20587 v.Aux = mergeSym(sym1, sym2) 20588 v.AddArg(ptr) 20589 v.AddArg(idx) 20590 v.AddArg(mem) 20591 return true 20592 } 20593 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 20594 // cond: canMergeSym(sym1, sym2) 20595 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 20596 for { 20597 x := v.AuxInt 20598 sym1 := v.Aux 20599 _ = v.Args[1] 20600 v_0 := v.Args[0] 20601 if v_0.Op != OpAMD64LEAQ8 { 20602 break 20603 } 20604 off := v_0.AuxInt 20605 sym2 := v_0.Aux 20606 _ = v_0.Args[1] 20607 ptr := v_0.Args[0] 20608 idx := v_0.Args[1] 20609 mem := v.Args[1] 20610 if !(canMergeSym(sym1, sym2)) { 20611 break 20612 } 20613 v.reset(OpAMD64MOVQstoreconstidx8) 20614 v.AuxInt = ValAndOff(x).add(off) 20615 v.Aux = mergeSym(sym1, sym2) 20616 v.AddArg(ptr) 20617 v.AddArg(idx) 20618 v.AddArg(mem) 20619 return true 20620 } 20621 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 20622 // cond: 20623 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 20624 for { 20625 x := v.AuxInt 20626 sym := v.Aux 20627 _ = v.Args[1] 20628 v_0 := v.Args[0] 20629 if v_0.Op != OpAMD64ADDQ { 20630 break 20631 } 20632 _ = v_0.Args[1] 20633 ptr := v_0.Args[0] 20634 idx := v_0.Args[1] 20635 mem := v.Args[1] 20636 v.reset(OpAMD64MOVQstoreconstidx1) 20637 v.AuxInt = x 20638 v.Aux = sym 20639 v.AddArg(ptr) 20640 v.AddArg(idx) 20641 v.AddArg(mem) 20642 return true 20643 } 20644 // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) 20645 // cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x) 20646 // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) 20647 for { 20648 c := v.AuxInt 20649 s := v.Aux 20650 _ = v.Args[1] 20651 p := v.Args[0] 20652 x := v.Args[1] 20653 if x.Op != OpAMD64MOVQstoreconst { 20654 break 20655 } 20656 c2 := x.AuxInt 20657 if x.Aux != s { 20658 break 20659 } 20660 _ = x.Args[1] 20661 if p != x.Args[0] { 20662 break 20663 } 20664 mem := x.Args[1] 20665 if !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) { 20666 break 20667 } 20668 v.reset(OpAMD64MOVOstore) 20669 v.AuxInt = ValAndOff(c2).Off() 20670 v.Aux = s 20671 v.AddArg(p) 20672 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 20673 v0.AuxInt = 0 20674 v.AddArg(v0) 20675 v.AddArg(mem) 20676 return true 20677 } 20678 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 20679 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 20680 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 20681 for { 20682 sc := v.AuxInt 20683 sym1 := v.Aux 20684 _ = v.Args[1] 20685 v_0 := v.Args[0] 20686 if v_0.Op != OpAMD64LEAL { 20687 break 20688 } 20689 off := v_0.AuxInt 20690 sym2 := v_0.Aux 20691 ptr := v_0.Args[0] 20692 mem := v.Args[1] 20693 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 20694 break 20695 } 20696 v.reset(OpAMD64MOVQstoreconst) 20697 v.AuxInt = ValAndOff(sc).add(off) 20698 v.Aux = mergeSym(sym1, sym2) 20699 v.AddArg(ptr) 20700 v.AddArg(mem) 20701 return true 20702 } 20703 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 20704 // cond: ValAndOff(sc).canAdd(off) 20705 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 20706 for { 20707 sc := v.AuxInt 20708 s := v.Aux 20709 _ = v.Args[1] 20710 v_0 := v.Args[0] 20711 if v_0.Op != OpAMD64ADDLconst { 20712 break 20713 } 20714 off := v_0.AuxInt 20715 ptr := v_0.Args[0] 20716 mem := v.Args[1] 20717 if !(ValAndOff(sc).canAdd(off)) { 20718 break 20719 } 20720 v.reset(OpAMD64MOVQstoreconst) 20721 v.AuxInt = ValAndOff(sc).add(off) 20722 v.Aux = s 20723 v.AddArg(ptr) 20724 v.AddArg(mem) 20725 return true 20726 } 20727 return false 20728 } 20729 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 20730 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 20731 // cond: 20732 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 20733 for { 20734 c := v.AuxInt 20735 sym := v.Aux 20736 _ = v.Args[2] 20737 ptr := v.Args[0] 20738 v_1 := v.Args[1] 20739 if v_1.Op != OpAMD64SHLQconst { 20740 break 20741 } 20742 if v_1.AuxInt != 3 { 20743 break 20744 } 20745 idx := v_1.Args[0] 20746 mem := v.Args[2] 20747 v.reset(OpAMD64MOVQstoreconstidx8) 20748 v.AuxInt = c 20749 v.Aux = sym 20750 v.AddArg(ptr) 20751 v.AddArg(idx) 20752 v.AddArg(mem) 20753 return true 20754 } 20755 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 20756 // cond: ValAndOff(x).canAdd(c) 20757 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 20758 for { 20759 x := v.AuxInt 20760 sym := v.Aux 20761 _ = v.Args[2] 20762 v_0 := v.Args[0] 20763 if v_0.Op != OpAMD64ADDQconst { 20764 break 20765 } 20766 c := v_0.AuxInt 20767 ptr := v_0.Args[0] 20768 idx := v.Args[1] 20769 mem := v.Args[2] 20770 if !(ValAndOff(x).canAdd(c)) { 20771 break 20772 } 20773 v.reset(OpAMD64MOVQstoreconstidx1) 20774 v.AuxInt = ValAndOff(x).add(c) 20775 v.Aux = sym 20776 v.AddArg(ptr) 20777 v.AddArg(idx) 20778 v.AddArg(mem) 20779 return true 20780 } 20781 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 20782 // cond: ValAndOff(x).canAdd(c) 20783 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 20784 for { 20785 x := v.AuxInt 20786 sym := v.Aux 20787 _ = v.Args[2] 20788 ptr := v.Args[0] 20789 v_1 := v.Args[1] 20790 if v_1.Op != OpAMD64ADDQconst { 20791 break 20792 } 20793 c := v_1.AuxInt 20794 idx := v_1.Args[0] 20795 mem := v.Args[2] 20796 if !(ValAndOff(x).canAdd(c)) { 20797 break 20798 } 20799 v.reset(OpAMD64MOVQstoreconstidx1) 20800 v.AuxInt = ValAndOff(x).add(c) 20801 v.Aux = sym 20802 v.AddArg(ptr) 20803 v.AddArg(idx) 20804 v.AddArg(mem) 20805 return true 20806 } 20807 return false 20808 } 20809 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 20810 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 20811 // cond: ValAndOff(x).canAdd(c) 20812 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 20813 for { 20814 x := v.AuxInt 20815 sym := v.Aux 20816 _ = v.Args[2] 20817 v_0 := v.Args[0] 20818 if v_0.Op != OpAMD64ADDQconst { 20819 break 20820 } 20821 c := v_0.AuxInt 20822 ptr := v_0.Args[0] 20823 idx := v.Args[1] 20824 mem := v.Args[2] 20825 if !(ValAndOff(x).canAdd(c)) { 20826 break 20827 } 20828 v.reset(OpAMD64MOVQstoreconstidx8) 20829 v.AuxInt = ValAndOff(x).add(c) 20830 v.Aux = sym 20831 v.AddArg(ptr) 20832 v.AddArg(idx) 20833 v.AddArg(mem) 20834 return true 20835 } 20836 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 20837 // cond: ValAndOff(x).canAdd(8*c) 20838 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 20839 for { 20840 x := v.AuxInt 20841 sym := v.Aux 20842 _ = v.Args[2] 20843 ptr := v.Args[0] 20844 v_1 := v.Args[1] 20845 if v_1.Op != OpAMD64ADDQconst { 20846 break 20847 } 20848 c := v_1.AuxInt 20849 idx := v_1.Args[0] 20850 mem := v.Args[2] 20851 if !(ValAndOff(x).canAdd(8 * c)) { 20852 break 20853 } 20854 v.reset(OpAMD64MOVQstoreconstidx8) 20855 v.AuxInt = ValAndOff(x).add(8 * c) 20856 v.Aux = sym 20857 v.AddArg(ptr) 20858 v.AddArg(idx) 20859 v.AddArg(mem) 20860 return true 20861 } 20862 return false 20863 } 20864 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 20865 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 20866 // cond: 20867 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 20868 for { 20869 c := v.AuxInt 20870 sym := v.Aux 20871 _ = v.Args[3] 20872 ptr := v.Args[0] 20873 v_1 := v.Args[1] 20874 if v_1.Op != OpAMD64SHLQconst { 20875 break 20876 } 20877 if v_1.AuxInt != 3 { 20878 break 20879 } 20880 idx := v_1.Args[0] 20881 val := v.Args[2] 20882 mem := v.Args[3] 20883 v.reset(OpAMD64MOVQstoreidx8) 20884 v.AuxInt = c 20885 v.Aux = sym 20886 v.AddArg(ptr) 20887 v.AddArg(idx) 20888 v.AddArg(val) 20889 v.AddArg(mem) 20890 return true 20891 } 20892 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 20893 // cond: is32Bit(c+d) 20894 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 20895 for { 20896 c := v.AuxInt 20897 sym := v.Aux 20898 _ = v.Args[3] 20899 v_0 := v.Args[0] 20900 if v_0.Op != OpAMD64ADDQconst { 20901 break 20902 } 20903 d := v_0.AuxInt 20904 ptr := v_0.Args[0] 20905 idx := v.Args[1] 20906 val := v.Args[2] 20907 mem := v.Args[3] 20908 if !(is32Bit(c + d)) { 20909 break 20910 } 20911 v.reset(OpAMD64MOVQstoreidx1) 20912 v.AuxInt = c + d 20913 v.Aux = sym 20914 v.AddArg(ptr) 20915 v.AddArg(idx) 20916 v.AddArg(val) 20917 v.AddArg(mem) 20918 return true 20919 } 20920 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 20921 // cond: is32Bit(c+d) 20922 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 20923 for { 20924 c := v.AuxInt 20925 sym := v.Aux 20926 _ = v.Args[3] 20927 ptr := v.Args[0] 20928 v_1 := v.Args[1] 20929 if v_1.Op != OpAMD64ADDQconst { 20930 break 20931 } 20932 d := v_1.AuxInt 20933 idx := v_1.Args[0] 20934 val := v.Args[2] 20935 mem := v.Args[3] 20936 if !(is32Bit(c + d)) { 20937 break 20938 } 20939 v.reset(OpAMD64MOVQstoreidx1) 20940 v.AuxInt = c + d 20941 v.Aux = sym 20942 v.AddArg(ptr) 20943 v.AddArg(idx) 20944 v.AddArg(val) 20945 v.AddArg(mem) 20946 return true 20947 } 20948 // match: (MOVQstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 20949 // cond: is32Bit(i+c) 20950 // result: (MOVQstore [i+c] {s} p w mem) 20951 for { 20952 i := v.AuxInt 20953 s := v.Aux 20954 _ = v.Args[3] 20955 p := v.Args[0] 20956 v_1 := v.Args[1] 20957 if v_1.Op != OpAMD64MOVQconst { 20958 break 20959 } 20960 c := v_1.AuxInt 20961 w := v.Args[2] 20962 mem := v.Args[3] 20963 if !(is32Bit(i + c)) { 20964 break 20965 } 20966 v.reset(OpAMD64MOVQstore) 20967 v.AuxInt = i + c 20968 v.Aux = s 20969 v.AddArg(p) 20970 v.AddArg(w) 20971 v.AddArg(mem) 20972 return true 20973 } 20974 return false 20975 } 20976 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 20977 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 20978 // cond: is32Bit(c+d) 20979 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 20980 for { 20981 c := v.AuxInt 20982 sym := v.Aux 20983 _ = v.Args[3] 20984 v_0 := v.Args[0] 20985 if v_0.Op != OpAMD64ADDQconst { 20986 break 20987 } 20988 d := v_0.AuxInt 20989 ptr := v_0.Args[0] 20990 idx := v.Args[1] 20991 val := v.Args[2] 20992 mem := v.Args[3] 20993 if !(is32Bit(c + d)) { 20994 break 20995 } 20996 v.reset(OpAMD64MOVQstoreidx8) 20997 v.AuxInt = c + d 20998 v.Aux = sym 20999 v.AddArg(ptr) 21000 v.AddArg(idx) 21001 v.AddArg(val) 21002 v.AddArg(mem) 21003 return true 21004 } 21005 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 21006 // cond: is32Bit(c+8*d) 21007 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 21008 for { 21009 c := v.AuxInt 21010 sym := v.Aux 21011 _ = v.Args[3] 21012 ptr := v.Args[0] 21013 v_1 := v.Args[1] 21014 if v_1.Op != OpAMD64ADDQconst { 21015 break 21016 } 21017 d := v_1.AuxInt 21018 idx := v_1.Args[0] 21019 val := v.Args[2] 21020 mem := v.Args[3] 21021 if !(is32Bit(c + 8*d)) { 21022 break 21023 } 21024 v.reset(OpAMD64MOVQstoreidx8) 21025 v.AuxInt = c + 8*d 21026 v.Aux = sym 21027 v.AddArg(ptr) 21028 v.AddArg(idx) 21029 v.AddArg(val) 21030 v.AddArg(mem) 21031 return true 21032 } 21033 // match: (MOVQstoreidx8 [i] {s} p (MOVQconst [c]) w mem) 21034 // cond: is32Bit(i+8*c) 21035 // result: (MOVQstore [i+8*c] {s} p w mem) 21036 for { 21037 i := v.AuxInt 21038 s := v.Aux 21039 _ = v.Args[3] 21040 p := v.Args[0] 21041 v_1 := v.Args[1] 21042 if v_1.Op != OpAMD64MOVQconst { 21043 break 21044 } 21045 c := v_1.AuxInt 21046 w := v.Args[2] 21047 mem := v.Args[3] 21048 if !(is32Bit(i + 8*c)) { 21049 break 21050 } 21051 v.reset(OpAMD64MOVQstore) 21052 v.AuxInt = i + 8*c 21053 v.Aux = s 21054 v.AddArg(p) 21055 v.AddArg(w) 21056 v.AddArg(mem) 21057 return true 21058 } 21059 return false 21060 } 21061 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 21062 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 21063 // cond: is32Bit(off1+off2) 21064 // result: (MOVSDload [off1+off2] {sym} ptr mem) 21065 for { 21066 off1 := v.AuxInt 21067 sym := v.Aux 21068 _ = v.Args[1] 21069 v_0 := v.Args[0] 21070 if v_0.Op != OpAMD64ADDQconst { 21071 break 21072 } 21073 off2 := v_0.AuxInt 21074 ptr := v_0.Args[0] 21075 mem := v.Args[1] 21076 if !(is32Bit(off1 + off2)) { 21077 break 21078 } 21079 v.reset(OpAMD64MOVSDload) 21080 v.AuxInt = off1 + off2 21081 v.Aux = sym 21082 v.AddArg(ptr) 21083 v.AddArg(mem) 21084 return true 21085 } 21086 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 21087 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21088 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 21089 for { 21090 off1 := v.AuxInt 21091 sym1 := v.Aux 21092 _ = v.Args[1] 21093 v_0 := v.Args[0] 21094 if v_0.Op != OpAMD64LEAQ { 21095 break 21096 } 21097 off2 := v_0.AuxInt 21098 sym2 := v_0.Aux 21099 base := v_0.Args[0] 21100 mem := v.Args[1] 21101 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21102 break 21103 } 21104 v.reset(OpAMD64MOVSDload) 21105 v.AuxInt = off1 + off2 21106 v.Aux = mergeSym(sym1, sym2) 21107 v.AddArg(base) 21108 v.AddArg(mem) 21109 return true 21110 } 21111 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 21112 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21113 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 21114 for { 21115 off1 := v.AuxInt 21116 sym1 := v.Aux 21117 _ = v.Args[1] 21118 v_0 := v.Args[0] 21119 if v_0.Op != OpAMD64LEAQ1 { 21120 break 21121 } 21122 off2 := v_0.AuxInt 21123 sym2 := v_0.Aux 21124 _ = v_0.Args[1] 21125 ptr := v_0.Args[0] 21126 idx := v_0.Args[1] 21127 mem := v.Args[1] 21128 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21129 break 21130 } 21131 v.reset(OpAMD64MOVSDloadidx1) 21132 v.AuxInt = off1 + off2 21133 v.Aux = mergeSym(sym1, sym2) 21134 v.AddArg(ptr) 21135 v.AddArg(idx) 21136 v.AddArg(mem) 21137 return true 21138 } 21139 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 21140 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21141 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 21142 for { 21143 off1 := v.AuxInt 21144 sym1 := v.Aux 21145 _ = v.Args[1] 21146 v_0 := v.Args[0] 21147 if v_0.Op != OpAMD64LEAQ8 { 21148 break 21149 } 21150 off2 := v_0.AuxInt 21151 sym2 := v_0.Aux 21152 _ = v_0.Args[1] 21153 ptr := v_0.Args[0] 21154 idx := v_0.Args[1] 21155 mem := v.Args[1] 21156 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21157 break 21158 } 21159 v.reset(OpAMD64MOVSDloadidx8) 21160 v.AuxInt = off1 + off2 21161 v.Aux = mergeSym(sym1, sym2) 21162 v.AddArg(ptr) 21163 v.AddArg(idx) 21164 v.AddArg(mem) 21165 return true 21166 } 21167 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 21168 // cond: ptr.Op != OpSB 21169 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 21170 for { 21171 off := v.AuxInt 21172 sym := v.Aux 21173 _ = v.Args[1] 21174 v_0 := v.Args[0] 21175 if v_0.Op != OpAMD64ADDQ { 21176 break 21177 } 21178 _ = v_0.Args[1] 21179 ptr := v_0.Args[0] 21180 idx := v_0.Args[1] 21181 mem := v.Args[1] 21182 if !(ptr.Op != OpSB) { 21183 break 21184 } 21185 v.reset(OpAMD64MOVSDloadidx1) 21186 v.AuxInt = off 21187 v.Aux = sym 21188 v.AddArg(ptr) 21189 v.AddArg(idx) 21190 v.AddArg(mem) 21191 return true 21192 } 21193 // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) 21194 // cond: 21195 // result: (MOVQi2f val) 21196 for { 21197 off := v.AuxInt 21198 sym := v.Aux 21199 _ = v.Args[1] 21200 ptr := v.Args[0] 21201 v_1 := v.Args[1] 21202 if v_1.Op != OpAMD64MOVQstore { 21203 break 21204 } 21205 if v_1.AuxInt != off { 21206 break 21207 } 21208 if v_1.Aux != sym { 21209 break 21210 } 21211 _ = v_1.Args[2] 21212 if ptr != v_1.Args[0] { 21213 break 21214 } 21215 val := v_1.Args[1] 21216 v.reset(OpAMD64MOVQi2f) 21217 v.AddArg(val) 21218 return true 21219 } 21220 return false 21221 } 21222 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 21223 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 21224 // cond: 21225 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 21226 for { 21227 c := v.AuxInt 21228 sym := v.Aux 21229 _ = v.Args[2] 21230 ptr := v.Args[0] 21231 v_1 := v.Args[1] 21232 if v_1.Op != OpAMD64SHLQconst { 21233 break 21234 } 21235 if v_1.AuxInt != 3 { 21236 break 21237 } 21238 idx := v_1.Args[0] 21239 mem := v.Args[2] 21240 v.reset(OpAMD64MOVSDloadidx8) 21241 v.AuxInt = c 21242 v.Aux = sym 21243 v.AddArg(ptr) 21244 v.AddArg(idx) 21245 v.AddArg(mem) 21246 return true 21247 } 21248 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 21249 // cond: is32Bit(c+d) 21250 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 21251 for { 21252 c := v.AuxInt 21253 sym := v.Aux 21254 _ = v.Args[2] 21255 v_0 := v.Args[0] 21256 if v_0.Op != OpAMD64ADDQconst { 21257 break 21258 } 21259 d := v_0.AuxInt 21260 ptr := v_0.Args[0] 21261 idx := v.Args[1] 21262 mem := v.Args[2] 21263 if !(is32Bit(c + d)) { 21264 break 21265 } 21266 v.reset(OpAMD64MOVSDloadidx1) 21267 v.AuxInt = c + d 21268 v.Aux = sym 21269 v.AddArg(ptr) 21270 v.AddArg(idx) 21271 v.AddArg(mem) 21272 return true 21273 } 21274 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 21275 // cond: is32Bit(c+d) 21276 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 21277 for { 21278 c := v.AuxInt 21279 sym := v.Aux 21280 _ = v.Args[2] 21281 ptr := v.Args[0] 21282 v_1 := v.Args[1] 21283 if v_1.Op != OpAMD64ADDQconst { 21284 break 21285 } 21286 d := v_1.AuxInt 21287 idx := v_1.Args[0] 21288 mem := v.Args[2] 21289 if !(is32Bit(c + d)) { 21290 break 21291 } 21292 v.reset(OpAMD64MOVSDloadidx1) 21293 v.AuxInt = c + d 21294 v.Aux = sym 21295 v.AddArg(ptr) 21296 v.AddArg(idx) 21297 v.AddArg(mem) 21298 return true 21299 } 21300 // match: (MOVSDloadidx1 [i] {s} p (MOVQconst [c]) mem) 21301 // cond: is32Bit(i+c) 21302 // result: (MOVSDload [i+c] {s} p mem) 21303 for { 21304 i := v.AuxInt 21305 s := v.Aux 21306 _ = v.Args[2] 21307 p := v.Args[0] 21308 v_1 := v.Args[1] 21309 if v_1.Op != OpAMD64MOVQconst { 21310 break 21311 } 21312 c := v_1.AuxInt 21313 mem := v.Args[2] 21314 if !(is32Bit(i + c)) { 21315 break 21316 } 21317 v.reset(OpAMD64MOVSDload) 21318 v.AuxInt = i + c 21319 v.Aux = s 21320 v.AddArg(p) 21321 v.AddArg(mem) 21322 return true 21323 } 21324 return false 21325 } 21326 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 21327 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 21328 // cond: is32Bit(c+d) 21329 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 21330 for { 21331 c := v.AuxInt 21332 sym := v.Aux 21333 _ = v.Args[2] 21334 v_0 := v.Args[0] 21335 if v_0.Op != OpAMD64ADDQconst { 21336 break 21337 } 21338 d := v_0.AuxInt 21339 ptr := v_0.Args[0] 21340 idx := v.Args[1] 21341 mem := v.Args[2] 21342 if !(is32Bit(c + d)) { 21343 break 21344 } 21345 v.reset(OpAMD64MOVSDloadidx8) 21346 v.AuxInt = c + d 21347 v.Aux = sym 21348 v.AddArg(ptr) 21349 v.AddArg(idx) 21350 v.AddArg(mem) 21351 return true 21352 } 21353 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 21354 // cond: is32Bit(c+8*d) 21355 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 21356 for { 21357 c := v.AuxInt 21358 sym := v.Aux 21359 _ = v.Args[2] 21360 ptr := v.Args[0] 21361 v_1 := v.Args[1] 21362 if v_1.Op != OpAMD64ADDQconst { 21363 break 21364 } 21365 d := v_1.AuxInt 21366 idx := v_1.Args[0] 21367 mem := v.Args[2] 21368 if !(is32Bit(c + 8*d)) { 21369 break 21370 } 21371 v.reset(OpAMD64MOVSDloadidx8) 21372 v.AuxInt = c + 8*d 21373 v.Aux = sym 21374 v.AddArg(ptr) 21375 v.AddArg(idx) 21376 v.AddArg(mem) 21377 return true 21378 } 21379 // match: (MOVSDloadidx8 [i] {s} p (MOVQconst [c]) mem) 21380 // cond: is32Bit(i+8*c) 21381 // result: (MOVSDload [i+8*c] {s} p mem) 21382 for { 21383 i := v.AuxInt 21384 s := v.Aux 21385 _ = v.Args[2] 21386 p := v.Args[0] 21387 v_1 := v.Args[1] 21388 if v_1.Op != OpAMD64MOVQconst { 21389 break 21390 } 21391 c := v_1.AuxInt 21392 mem := v.Args[2] 21393 if !(is32Bit(i + 8*c)) { 21394 break 21395 } 21396 v.reset(OpAMD64MOVSDload) 21397 v.AuxInt = i + 8*c 21398 v.Aux = s 21399 v.AddArg(p) 21400 v.AddArg(mem) 21401 return true 21402 } 21403 return false 21404 } 21405 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 21406 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 21407 // cond: is32Bit(off1+off2) 21408 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 21409 for { 21410 off1 := v.AuxInt 21411 sym := v.Aux 21412 _ = v.Args[2] 21413 v_0 := v.Args[0] 21414 if v_0.Op != OpAMD64ADDQconst { 21415 break 21416 } 21417 off2 := v_0.AuxInt 21418 ptr := v_0.Args[0] 21419 val := v.Args[1] 21420 mem := v.Args[2] 21421 if !(is32Bit(off1 + off2)) { 21422 break 21423 } 21424 v.reset(OpAMD64MOVSDstore) 21425 v.AuxInt = off1 + off2 21426 v.Aux = sym 21427 v.AddArg(ptr) 21428 v.AddArg(val) 21429 v.AddArg(mem) 21430 return true 21431 } 21432 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 21433 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21434 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 21435 for { 21436 off1 := v.AuxInt 21437 sym1 := v.Aux 21438 _ = v.Args[2] 21439 v_0 := v.Args[0] 21440 if v_0.Op != OpAMD64LEAQ { 21441 break 21442 } 21443 off2 := v_0.AuxInt 21444 sym2 := v_0.Aux 21445 base := v_0.Args[0] 21446 val := v.Args[1] 21447 mem := v.Args[2] 21448 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21449 break 21450 } 21451 v.reset(OpAMD64MOVSDstore) 21452 v.AuxInt = off1 + off2 21453 v.Aux = mergeSym(sym1, sym2) 21454 v.AddArg(base) 21455 v.AddArg(val) 21456 v.AddArg(mem) 21457 return true 21458 } 21459 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 21460 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21461 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 21462 for { 21463 off1 := v.AuxInt 21464 sym1 := v.Aux 21465 _ = v.Args[2] 21466 v_0 := v.Args[0] 21467 if v_0.Op != OpAMD64LEAQ1 { 21468 break 21469 } 21470 off2 := v_0.AuxInt 21471 sym2 := v_0.Aux 21472 _ = v_0.Args[1] 21473 ptr := v_0.Args[0] 21474 idx := v_0.Args[1] 21475 val := v.Args[1] 21476 mem := v.Args[2] 21477 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21478 break 21479 } 21480 v.reset(OpAMD64MOVSDstoreidx1) 21481 v.AuxInt = off1 + off2 21482 v.Aux = mergeSym(sym1, sym2) 21483 v.AddArg(ptr) 21484 v.AddArg(idx) 21485 v.AddArg(val) 21486 v.AddArg(mem) 21487 return true 21488 } 21489 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 21490 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21491 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 21492 for { 21493 off1 := v.AuxInt 21494 sym1 := v.Aux 21495 _ = v.Args[2] 21496 v_0 := v.Args[0] 21497 if v_0.Op != OpAMD64LEAQ8 { 21498 break 21499 } 21500 off2 := v_0.AuxInt 21501 sym2 := v_0.Aux 21502 _ = v_0.Args[1] 21503 ptr := v_0.Args[0] 21504 idx := v_0.Args[1] 21505 val := v.Args[1] 21506 mem := v.Args[2] 21507 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21508 break 21509 } 21510 v.reset(OpAMD64MOVSDstoreidx8) 21511 v.AuxInt = off1 + off2 21512 v.Aux = mergeSym(sym1, sym2) 21513 v.AddArg(ptr) 21514 v.AddArg(idx) 21515 v.AddArg(val) 21516 v.AddArg(mem) 21517 return true 21518 } 21519 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 21520 // cond: ptr.Op != OpSB 21521 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 21522 for { 21523 off := v.AuxInt 21524 sym := v.Aux 21525 _ = v.Args[2] 21526 v_0 := v.Args[0] 21527 if v_0.Op != OpAMD64ADDQ { 21528 break 21529 } 21530 _ = v_0.Args[1] 21531 ptr := v_0.Args[0] 21532 idx := v_0.Args[1] 21533 val := v.Args[1] 21534 mem := v.Args[2] 21535 if !(ptr.Op != OpSB) { 21536 break 21537 } 21538 v.reset(OpAMD64MOVSDstoreidx1) 21539 v.AuxInt = off 21540 v.Aux = sym 21541 v.AddArg(ptr) 21542 v.AddArg(idx) 21543 v.AddArg(val) 21544 v.AddArg(mem) 21545 return true 21546 } 21547 // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) 21548 // cond: 21549 // result: (MOVQstore [off] {sym} ptr val mem) 21550 for { 21551 off := v.AuxInt 21552 sym := v.Aux 21553 _ = v.Args[2] 21554 ptr := v.Args[0] 21555 v_1 := v.Args[1] 21556 if v_1.Op != OpAMD64MOVQi2f { 21557 break 21558 } 21559 val := v_1.Args[0] 21560 mem := v.Args[2] 21561 v.reset(OpAMD64MOVQstore) 21562 v.AuxInt = off 21563 v.Aux = sym 21564 v.AddArg(ptr) 21565 v.AddArg(val) 21566 v.AddArg(mem) 21567 return true 21568 } 21569 return false 21570 } 21571 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 21572 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 21573 // cond: 21574 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 21575 for { 21576 c := v.AuxInt 21577 sym := v.Aux 21578 _ = v.Args[3] 21579 ptr := v.Args[0] 21580 v_1 := v.Args[1] 21581 if v_1.Op != OpAMD64SHLQconst { 21582 break 21583 } 21584 if v_1.AuxInt != 3 { 21585 break 21586 } 21587 idx := v_1.Args[0] 21588 val := v.Args[2] 21589 mem := v.Args[3] 21590 v.reset(OpAMD64MOVSDstoreidx8) 21591 v.AuxInt = c 21592 v.Aux = sym 21593 v.AddArg(ptr) 21594 v.AddArg(idx) 21595 v.AddArg(val) 21596 v.AddArg(mem) 21597 return true 21598 } 21599 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 21600 // cond: is32Bit(c+d) 21601 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 21602 for { 21603 c := v.AuxInt 21604 sym := v.Aux 21605 _ = v.Args[3] 21606 v_0 := v.Args[0] 21607 if v_0.Op != OpAMD64ADDQconst { 21608 break 21609 } 21610 d := v_0.AuxInt 21611 ptr := v_0.Args[0] 21612 idx := v.Args[1] 21613 val := v.Args[2] 21614 mem := v.Args[3] 21615 if !(is32Bit(c + d)) { 21616 break 21617 } 21618 v.reset(OpAMD64MOVSDstoreidx1) 21619 v.AuxInt = c + d 21620 v.Aux = sym 21621 v.AddArg(ptr) 21622 v.AddArg(idx) 21623 v.AddArg(val) 21624 v.AddArg(mem) 21625 return true 21626 } 21627 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 21628 // cond: is32Bit(c+d) 21629 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 21630 for { 21631 c := v.AuxInt 21632 sym := v.Aux 21633 _ = v.Args[3] 21634 ptr := v.Args[0] 21635 v_1 := v.Args[1] 21636 if v_1.Op != OpAMD64ADDQconst { 21637 break 21638 } 21639 d := v_1.AuxInt 21640 idx := v_1.Args[0] 21641 val := v.Args[2] 21642 mem := v.Args[3] 21643 if !(is32Bit(c + d)) { 21644 break 21645 } 21646 v.reset(OpAMD64MOVSDstoreidx1) 21647 v.AuxInt = c + d 21648 v.Aux = sym 21649 v.AddArg(ptr) 21650 v.AddArg(idx) 21651 v.AddArg(val) 21652 v.AddArg(mem) 21653 return true 21654 } 21655 // match: (MOVSDstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 21656 // cond: is32Bit(i+c) 21657 // result: (MOVSDstore [i+c] {s} p w mem) 21658 for { 21659 i := v.AuxInt 21660 s := v.Aux 21661 _ = v.Args[3] 21662 p := v.Args[0] 21663 v_1 := v.Args[1] 21664 if v_1.Op != OpAMD64MOVQconst { 21665 break 21666 } 21667 c := v_1.AuxInt 21668 w := v.Args[2] 21669 mem := v.Args[3] 21670 if !(is32Bit(i + c)) { 21671 break 21672 } 21673 v.reset(OpAMD64MOVSDstore) 21674 v.AuxInt = i + c 21675 v.Aux = s 21676 v.AddArg(p) 21677 v.AddArg(w) 21678 v.AddArg(mem) 21679 return true 21680 } 21681 return false 21682 } 21683 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 21684 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 21685 // cond: is32Bit(c+d) 21686 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 21687 for { 21688 c := v.AuxInt 21689 sym := v.Aux 21690 _ = v.Args[3] 21691 v_0 := v.Args[0] 21692 if v_0.Op != OpAMD64ADDQconst { 21693 break 21694 } 21695 d := v_0.AuxInt 21696 ptr := v_0.Args[0] 21697 idx := v.Args[1] 21698 val := v.Args[2] 21699 mem := v.Args[3] 21700 if !(is32Bit(c + d)) { 21701 break 21702 } 21703 v.reset(OpAMD64MOVSDstoreidx8) 21704 v.AuxInt = c + d 21705 v.Aux = sym 21706 v.AddArg(ptr) 21707 v.AddArg(idx) 21708 v.AddArg(val) 21709 v.AddArg(mem) 21710 return true 21711 } 21712 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 21713 // cond: is32Bit(c+8*d) 21714 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 21715 for { 21716 c := v.AuxInt 21717 sym := v.Aux 21718 _ = v.Args[3] 21719 ptr := v.Args[0] 21720 v_1 := v.Args[1] 21721 if v_1.Op != OpAMD64ADDQconst { 21722 break 21723 } 21724 d := v_1.AuxInt 21725 idx := v_1.Args[0] 21726 val := v.Args[2] 21727 mem := v.Args[3] 21728 if !(is32Bit(c + 8*d)) { 21729 break 21730 } 21731 v.reset(OpAMD64MOVSDstoreidx8) 21732 v.AuxInt = c + 8*d 21733 v.Aux = sym 21734 v.AddArg(ptr) 21735 v.AddArg(idx) 21736 v.AddArg(val) 21737 v.AddArg(mem) 21738 return true 21739 } 21740 // match: (MOVSDstoreidx8 [i] {s} p (MOVQconst [c]) w mem) 21741 // cond: is32Bit(i+8*c) 21742 // result: (MOVSDstore [i+8*c] {s} p w mem) 21743 for { 21744 i := v.AuxInt 21745 s := v.Aux 21746 _ = v.Args[3] 21747 p := v.Args[0] 21748 v_1 := v.Args[1] 21749 if v_1.Op != OpAMD64MOVQconst { 21750 break 21751 } 21752 c := v_1.AuxInt 21753 w := v.Args[2] 21754 mem := v.Args[3] 21755 if !(is32Bit(i + 8*c)) { 21756 break 21757 } 21758 v.reset(OpAMD64MOVSDstore) 21759 v.AuxInt = i + 8*c 21760 v.Aux = s 21761 v.AddArg(p) 21762 v.AddArg(w) 21763 v.AddArg(mem) 21764 return true 21765 } 21766 return false 21767 } 21768 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 21769 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 21770 // cond: is32Bit(off1+off2) 21771 // result: (MOVSSload [off1+off2] {sym} ptr mem) 21772 for { 21773 off1 := v.AuxInt 21774 sym := v.Aux 21775 _ = v.Args[1] 21776 v_0 := v.Args[0] 21777 if v_0.Op != OpAMD64ADDQconst { 21778 break 21779 } 21780 off2 := v_0.AuxInt 21781 ptr := v_0.Args[0] 21782 mem := v.Args[1] 21783 if !(is32Bit(off1 + off2)) { 21784 break 21785 } 21786 v.reset(OpAMD64MOVSSload) 21787 v.AuxInt = off1 + off2 21788 v.Aux = sym 21789 v.AddArg(ptr) 21790 v.AddArg(mem) 21791 return true 21792 } 21793 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 21794 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21795 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 21796 for { 21797 off1 := v.AuxInt 21798 sym1 := v.Aux 21799 _ = v.Args[1] 21800 v_0 := v.Args[0] 21801 if v_0.Op != OpAMD64LEAQ { 21802 break 21803 } 21804 off2 := v_0.AuxInt 21805 sym2 := v_0.Aux 21806 base := v_0.Args[0] 21807 mem := v.Args[1] 21808 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21809 break 21810 } 21811 v.reset(OpAMD64MOVSSload) 21812 v.AuxInt = off1 + off2 21813 v.Aux = mergeSym(sym1, sym2) 21814 v.AddArg(base) 21815 v.AddArg(mem) 21816 return true 21817 } 21818 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 21819 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21820 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 21821 for { 21822 off1 := v.AuxInt 21823 sym1 := v.Aux 21824 _ = v.Args[1] 21825 v_0 := v.Args[0] 21826 if v_0.Op != OpAMD64LEAQ1 { 21827 break 21828 } 21829 off2 := v_0.AuxInt 21830 sym2 := v_0.Aux 21831 _ = v_0.Args[1] 21832 ptr := v_0.Args[0] 21833 idx := v_0.Args[1] 21834 mem := v.Args[1] 21835 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21836 break 21837 } 21838 v.reset(OpAMD64MOVSSloadidx1) 21839 v.AuxInt = off1 + off2 21840 v.Aux = mergeSym(sym1, sym2) 21841 v.AddArg(ptr) 21842 v.AddArg(idx) 21843 v.AddArg(mem) 21844 return true 21845 } 21846 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 21847 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21848 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 21849 for { 21850 off1 := v.AuxInt 21851 sym1 := v.Aux 21852 _ = v.Args[1] 21853 v_0 := v.Args[0] 21854 if v_0.Op != OpAMD64LEAQ4 { 21855 break 21856 } 21857 off2 := v_0.AuxInt 21858 sym2 := v_0.Aux 21859 _ = v_0.Args[1] 21860 ptr := v_0.Args[0] 21861 idx := v_0.Args[1] 21862 mem := v.Args[1] 21863 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21864 break 21865 } 21866 v.reset(OpAMD64MOVSSloadidx4) 21867 v.AuxInt = off1 + off2 21868 v.Aux = mergeSym(sym1, sym2) 21869 v.AddArg(ptr) 21870 v.AddArg(idx) 21871 v.AddArg(mem) 21872 return true 21873 } 21874 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 21875 // cond: ptr.Op != OpSB 21876 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 21877 for { 21878 off := v.AuxInt 21879 sym := v.Aux 21880 _ = v.Args[1] 21881 v_0 := v.Args[0] 21882 if v_0.Op != OpAMD64ADDQ { 21883 break 21884 } 21885 _ = v_0.Args[1] 21886 ptr := v_0.Args[0] 21887 idx := v_0.Args[1] 21888 mem := v.Args[1] 21889 if !(ptr.Op != OpSB) { 21890 break 21891 } 21892 v.reset(OpAMD64MOVSSloadidx1) 21893 v.AuxInt = off 21894 v.Aux = sym 21895 v.AddArg(ptr) 21896 v.AddArg(idx) 21897 v.AddArg(mem) 21898 return true 21899 } 21900 // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) 21901 // cond: 21902 // result: (MOVLi2f val) 21903 for { 21904 off := v.AuxInt 21905 sym := v.Aux 21906 _ = v.Args[1] 21907 ptr := v.Args[0] 21908 v_1 := v.Args[1] 21909 if v_1.Op != OpAMD64MOVLstore { 21910 break 21911 } 21912 if v_1.AuxInt != off { 21913 break 21914 } 21915 if v_1.Aux != sym { 21916 break 21917 } 21918 _ = v_1.Args[2] 21919 if ptr != v_1.Args[0] { 21920 break 21921 } 21922 val := v_1.Args[1] 21923 v.reset(OpAMD64MOVLi2f) 21924 v.AddArg(val) 21925 return true 21926 } 21927 return false 21928 } 21929 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 21930 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 21931 // cond: 21932 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 21933 for { 21934 c := v.AuxInt 21935 sym := v.Aux 21936 _ = v.Args[2] 21937 ptr := v.Args[0] 21938 v_1 := v.Args[1] 21939 if v_1.Op != OpAMD64SHLQconst { 21940 break 21941 } 21942 if v_1.AuxInt != 2 { 21943 break 21944 } 21945 idx := v_1.Args[0] 21946 mem := v.Args[2] 21947 v.reset(OpAMD64MOVSSloadidx4) 21948 v.AuxInt = c 21949 v.Aux = sym 21950 v.AddArg(ptr) 21951 v.AddArg(idx) 21952 v.AddArg(mem) 21953 return true 21954 } 21955 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 21956 // cond: is32Bit(c+d) 21957 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 21958 for { 21959 c := v.AuxInt 21960 sym := v.Aux 21961 _ = v.Args[2] 21962 v_0 := v.Args[0] 21963 if v_0.Op != OpAMD64ADDQconst { 21964 break 21965 } 21966 d := v_0.AuxInt 21967 ptr := v_0.Args[0] 21968 idx := v.Args[1] 21969 mem := v.Args[2] 21970 if !(is32Bit(c + d)) { 21971 break 21972 } 21973 v.reset(OpAMD64MOVSSloadidx1) 21974 v.AuxInt = c + d 21975 v.Aux = sym 21976 v.AddArg(ptr) 21977 v.AddArg(idx) 21978 v.AddArg(mem) 21979 return true 21980 } 21981 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 21982 // cond: is32Bit(c+d) 21983 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 21984 for { 21985 c := v.AuxInt 21986 sym := v.Aux 21987 _ = v.Args[2] 21988 ptr := v.Args[0] 21989 v_1 := v.Args[1] 21990 if v_1.Op != OpAMD64ADDQconst { 21991 break 21992 } 21993 d := v_1.AuxInt 21994 idx := v_1.Args[0] 21995 mem := v.Args[2] 21996 if !(is32Bit(c + d)) { 21997 break 21998 } 21999 v.reset(OpAMD64MOVSSloadidx1) 22000 v.AuxInt = c + d 22001 v.Aux = sym 22002 v.AddArg(ptr) 22003 v.AddArg(idx) 22004 v.AddArg(mem) 22005 return true 22006 } 22007 // match: (MOVSSloadidx1 [i] {s} p (MOVQconst [c]) mem) 22008 // cond: is32Bit(i+c) 22009 // result: (MOVSSload [i+c] {s} p mem) 22010 for { 22011 i := v.AuxInt 22012 s := v.Aux 22013 _ = v.Args[2] 22014 p := v.Args[0] 22015 v_1 := v.Args[1] 22016 if v_1.Op != OpAMD64MOVQconst { 22017 break 22018 } 22019 c := v_1.AuxInt 22020 mem := v.Args[2] 22021 if !(is32Bit(i + c)) { 22022 break 22023 } 22024 v.reset(OpAMD64MOVSSload) 22025 v.AuxInt = i + c 22026 v.Aux = s 22027 v.AddArg(p) 22028 v.AddArg(mem) 22029 return true 22030 } 22031 return false 22032 } 22033 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 22034 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 22035 // cond: is32Bit(c+d) 22036 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 22037 for { 22038 c := v.AuxInt 22039 sym := v.Aux 22040 _ = v.Args[2] 22041 v_0 := v.Args[0] 22042 if v_0.Op != OpAMD64ADDQconst { 22043 break 22044 } 22045 d := v_0.AuxInt 22046 ptr := v_0.Args[0] 22047 idx := v.Args[1] 22048 mem := v.Args[2] 22049 if !(is32Bit(c + d)) { 22050 break 22051 } 22052 v.reset(OpAMD64MOVSSloadidx4) 22053 v.AuxInt = c + d 22054 v.Aux = sym 22055 v.AddArg(ptr) 22056 v.AddArg(idx) 22057 v.AddArg(mem) 22058 return true 22059 } 22060 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 22061 // cond: is32Bit(c+4*d) 22062 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 22063 for { 22064 c := v.AuxInt 22065 sym := v.Aux 22066 _ = v.Args[2] 22067 ptr := v.Args[0] 22068 v_1 := v.Args[1] 22069 if v_1.Op != OpAMD64ADDQconst { 22070 break 22071 } 22072 d := v_1.AuxInt 22073 idx := v_1.Args[0] 22074 mem := v.Args[2] 22075 if !(is32Bit(c + 4*d)) { 22076 break 22077 } 22078 v.reset(OpAMD64MOVSSloadidx4) 22079 v.AuxInt = c + 4*d 22080 v.Aux = sym 22081 v.AddArg(ptr) 22082 v.AddArg(idx) 22083 v.AddArg(mem) 22084 return true 22085 } 22086 // match: (MOVSSloadidx4 [i] {s} p (MOVQconst [c]) mem) 22087 // cond: is32Bit(i+4*c) 22088 // result: (MOVSSload [i+4*c] {s} p mem) 22089 for { 22090 i := v.AuxInt 22091 s := v.Aux 22092 _ = v.Args[2] 22093 p := v.Args[0] 22094 v_1 := v.Args[1] 22095 if v_1.Op != OpAMD64MOVQconst { 22096 break 22097 } 22098 c := v_1.AuxInt 22099 mem := v.Args[2] 22100 if !(is32Bit(i + 4*c)) { 22101 break 22102 } 22103 v.reset(OpAMD64MOVSSload) 22104 v.AuxInt = i + 4*c 22105 v.Aux = s 22106 v.AddArg(p) 22107 v.AddArg(mem) 22108 return true 22109 } 22110 return false 22111 } 22112 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 22113 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 22114 // cond: is32Bit(off1+off2) 22115 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 22116 for { 22117 off1 := v.AuxInt 22118 sym := v.Aux 22119 _ = v.Args[2] 22120 v_0 := v.Args[0] 22121 if v_0.Op != OpAMD64ADDQconst { 22122 break 22123 } 22124 off2 := v_0.AuxInt 22125 ptr := v_0.Args[0] 22126 val := v.Args[1] 22127 mem := v.Args[2] 22128 if !(is32Bit(off1 + off2)) { 22129 break 22130 } 22131 v.reset(OpAMD64MOVSSstore) 22132 v.AuxInt = off1 + off2 22133 v.Aux = sym 22134 v.AddArg(ptr) 22135 v.AddArg(val) 22136 v.AddArg(mem) 22137 return true 22138 } 22139 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 22140 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 22141 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 22142 for { 22143 off1 := v.AuxInt 22144 sym1 := v.Aux 22145 _ = v.Args[2] 22146 v_0 := v.Args[0] 22147 if v_0.Op != OpAMD64LEAQ { 22148 break 22149 } 22150 off2 := v_0.AuxInt 22151 sym2 := v_0.Aux 22152 base := v_0.Args[0] 22153 val := v.Args[1] 22154 mem := v.Args[2] 22155 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 22156 break 22157 } 22158 v.reset(OpAMD64MOVSSstore) 22159 v.AuxInt = off1 + off2 22160 v.Aux = mergeSym(sym1, sym2) 22161 v.AddArg(base) 22162 v.AddArg(val) 22163 v.AddArg(mem) 22164 return true 22165 } 22166 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 22167 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 22168 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 22169 for { 22170 off1 := v.AuxInt 22171 sym1 := v.Aux 22172 _ = v.Args[2] 22173 v_0 := v.Args[0] 22174 if v_0.Op != OpAMD64LEAQ1 { 22175 break 22176 } 22177 off2 := v_0.AuxInt 22178 sym2 := v_0.Aux 22179 _ = v_0.Args[1] 22180 ptr := v_0.Args[0] 22181 idx := v_0.Args[1] 22182 val := v.Args[1] 22183 mem := v.Args[2] 22184 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 22185 break 22186 } 22187 v.reset(OpAMD64MOVSSstoreidx1) 22188 v.AuxInt = off1 + off2 22189 v.Aux = mergeSym(sym1, sym2) 22190 v.AddArg(ptr) 22191 v.AddArg(idx) 22192 v.AddArg(val) 22193 v.AddArg(mem) 22194 return true 22195 } 22196 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 22197 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 22198 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 22199 for { 22200 off1 := v.AuxInt 22201 sym1 := v.Aux 22202 _ = v.Args[2] 22203 v_0 := v.Args[0] 22204 if v_0.Op != OpAMD64LEAQ4 { 22205 break 22206 } 22207 off2 := v_0.AuxInt 22208 sym2 := v_0.Aux 22209 _ = v_0.Args[1] 22210 ptr := v_0.Args[0] 22211 idx := v_0.Args[1] 22212 val := v.Args[1] 22213 mem := v.Args[2] 22214 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 22215 break 22216 } 22217 v.reset(OpAMD64MOVSSstoreidx4) 22218 v.AuxInt = off1 + off2 22219 v.Aux = mergeSym(sym1, sym2) 22220 v.AddArg(ptr) 22221 v.AddArg(idx) 22222 v.AddArg(val) 22223 v.AddArg(mem) 22224 return true 22225 } 22226 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 22227 // cond: ptr.Op != OpSB 22228 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 22229 for { 22230 off := v.AuxInt 22231 sym := v.Aux 22232 _ = v.Args[2] 22233 v_0 := v.Args[0] 22234 if v_0.Op != OpAMD64ADDQ { 22235 break 22236 } 22237 _ = v_0.Args[1] 22238 ptr := v_0.Args[0] 22239 idx := v_0.Args[1] 22240 val := v.Args[1] 22241 mem := v.Args[2] 22242 if !(ptr.Op != OpSB) { 22243 break 22244 } 22245 v.reset(OpAMD64MOVSSstoreidx1) 22246 v.AuxInt = off 22247 v.Aux = sym 22248 v.AddArg(ptr) 22249 v.AddArg(idx) 22250 v.AddArg(val) 22251 v.AddArg(mem) 22252 return true 22253 } 22254 // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) 22255 // cond: 22256 // result: (MOVLstore [off] {sym} ptr val mem) 22257 for { 22258 off := v.AuxInt 22259 sym := v.Aux 22260 _ = v.Args[2] 22261 ptr := v.Args[0] 22262 v_1 := v.Args[1] 22263 if v_1.Op != OpAMD64MOVLi2f { 22264 break 22265 } 22266 val := v_1.Args[0] 22267 mem := v.Args[2] 22268 v.reset(OpAMD64MOVLstore) 22269 v.AuxInt = off 22270 v.Aux = sym 22271 v.AddArg(ptr) 22272 v.AddArg(val) 22273 v.AddArg(mem) 22274 return true 22275 } 22276 return false 22277 } 22278 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 22279 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 22280 // cond: 22281 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 22282 for { 22283 c := v.AuxInt 22284 sym := v.Aux 22285 _ = v.Args[3] 22286 ptr := v.Args[0] 22287 v_1 := v.Args[1] 22288 if v_1.Op != OpAMD64SHLQconst { 22289 break 22290 } 22291 if v_1.AuxInt != 2 { 22292 break 22293 } 22294 idx := v_1.Args[0] 22295 val := v.Args[2] 22296 mem := v.Args[3] 22297 v.reset(OpAMD64MOVSSstoreidx4) 22298 v.AuxInt = c 22299 v.Aux = sym 22300 v.AddArg(ptr) 22301 v.AddArg(idx) 22302 v.AddArg(val) 22303 v.AddArg(mem) 22304 return true 22305 } 22306 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 22307 // cond: is32Bit(c+d) 22308 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 22309 for { 22310 c := v.AuxInt 22311 sym := v.Aux 22312 _ = v.Args[3] 22313 v_0 := v.Args[0] 22314 if v_0.Op != OpAMD64ADDQconst { 22315 break 22316 } 22317 d := v_0.AuxInt 22318 ptr := v_0.Args[0] 22319 idx := v.Args[1] 22320 val := v.Args[2] 22321 mem := v.Args[3] 22322 if !(is32Bit(c + d)) { 22323 break 22324 } 22325 v.reset(OpAMD64MOVSSstoreidx1) 22326 v.AuxInt = c + d 22327 v.Aux = sym 22328 v.AddArg(ptr) 22329 v.AddArg(idx) 22330 v.AddArg(val) 22331 v.AddArg(mem) 22332 return true 22333 } 22334 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 22335 // cond: is32Bit(c+d) 22336 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 22337 for { 22338 c := v.AuxInt 22339 sym := v.Aux 22340 _ = v.Args[3] 22341 ptr := v.Args[0] 22342 v_1 := v.Args[1] 22343 if v_1.Op != OpAMD64ADDQconst { 22344 break 22345 } 22346 d := v_1.AuxInt 22347 idx := v_1.Args[0] 22348 val := v.Args[2] 22349 mem := v.Args[3] 22350 if !(is32Bit(c + d)) { 22351 break 22352 } 22353 v.reset(OpAMD64MOVSSstoreidx1) 22354 v.AuxInt = c + d 22355 v.Aux = sym 22356 v.AddArg(ptr) 22357 v.AddArg(idx) 22358 v.AddArg(val) 22359 v.AddArg(mem) 22360 return true 22361 } 22362 // match: (MOVSSstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 22363 // cond: is32Bit(i+c) 22364 // result: (MOVSSstore [i+c] {s} p w mem) 22365 for { 22366 i := v.AuxInt 22367 s := v.Aux 22368 _ = v.Args[3] 22369 p := v.Args[0] 22370 v_1 := v.Args[1] 22371 if v_1.Op != OpAMD64MOVQconst { 22372 break 22373 } 22374 c := v_1.AuxInt 22375 w := v.Args[2] 22376 mem := v.Args[3] 22377 if !(is32Bit(i + c)) { 22378 break 22379 } 22380 v.reset(OpAMD64MOVSSstore) 22381 v.AuxInt = i + c 22382 v.Aux = s 22383 v.AddArg(p) 22384 v.AddArg(w) 22385 v.AddArg(mem) 22386 return true 22387 } 22388 return false 22389 } 22390 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 22391 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 22392 // cond: is32Bit(c+d) 22393 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 22394 for { 22395 c := v.AuxInt 22396 sym := v.Aux 22397 _ = v.Args[3] 22398 v_0 := v.Args[0] 22399 if v_0.Op != OpAMD64ADDQconst { 22400 break 22401 } 22402 d := v_0.AuxInt 22403 ptr := v_0.Args[0] 22404 idx := v.Args[1] 22405 val := v.Args[2] 22406 mem := v.Args[3] 22407 if !(is32Bit(c + d)) { 22408 break 22409 } 22410 v.reset(OpAMD64MOVSSstoreidx4) 22411 v.AuxInt = c + d 22412 v.Aux = sym 22413 v.AddArg(ptr) 22414 v.AddArg(idx) 22415 v.AddArg(val) 22416 v.AddArg(mem) 22417 return true 22418 } 22419 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 22420 // cond: is32Bit(c+4*d) 22421 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 22422 for { 22423 c := v.AuxInt 22424 sym := v.Aux 22425 _ = v.Args[3] 22426 ptr := v.Args[0] 22427 v_1 := v.Args[1] 22428 if v_1.Op != OpAMD64ADDQconst { 22429 break 22430 } 22431 d := v_1.AuxInt 22432 idx := v_1.Args[0] 22433 val := v.Args[2] 22434 mem := v.Args[3] 22435 if !(is32Bit(c + 4*d)) { 22436 break 22437 } 22438 v.reset(OpAMD64MOVSSstoreidx4) 22439 v.AuxInt = c + 4*d 22440 v.Aux = sym 22441 v.AddArg(ptr) 22442 v.AddArg(idx) 22443 v.AddArg(val) 22444 v.AddArg(mem) 22445 return true 22446 } 22447 // match: (MOVSSstoreidx4 [i] {s} p (MOVQconst [c]) w mem) 22448 // cond: is32Bit(i+4*c) 22449 // result: (MOVSSstore [i+4*c] {s} p w mem) 22450 for { 22451 i := v.AuxInt 22452 s := v.Aux 22453 _ = v.Args[3] 22454 p := v.Args[0] 22455 v_1 := v.Args[1] 22456 if v_1.Op != OpAMD64MOVQconst { 22457 break 22458 } 22459 c := v_1.AuxInt 22460 w := v.Args[2] 22461 mem := v.Args[3] 22462 if !(is32Bit(i + 4*c)) { 22463 break 22464 } 22465 v.reset(OpAMD64MOVSSstore) 22466 v.AuxInt = i + 4*c 22467 v.Aux = s 22468 v.AddArg(p) 22469 v.AddArg(w) 22470 v.AddArg(mem) 22471 return true 22472 } 22473 return false 22474 } 22475 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 22476 b := v.Block 22477 _ = b 22478 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 22479 // cond: x.Uses == 1 && clobber(x) 22480 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 22481 for { 22482 x := v.Args[0] 22483 if x.Op != OpAMD64MOVWload { 22484 break 22485 } 22486 off := x.AuxInt 22487 sym := x.Aux 22488 _ = x.Args[1] 22489 ptr := x.Args[0] 22490 mem := x.Args[1] 22491 if !(x.Uses == 1 && clobber(x)) { 22492 break 22493 } 22494 b = x.Block 22495 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 22496 v.reset(OpCopy) 22497 v.AddArg(v0) 22498 v0.AuxInt = off 22499 v0.Aux = sym 22500 v0.AddArg(ptr) 22501 v0.AddArg(mem) 22502 return true 22503 } 22504 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 22505 // cond: x.Uses == 1 && clobber(x) 22506 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 22507 for { 22508 x := v.Args[0] 22509 if x.Op != OpAMD64MOVLload { 22510 break 22511 } 22512 off := x.AuxInt 22513 sym := x.Aux 22514 _ = x.Args[1] 22515 ptr := x.Args[0] 22516 mem := x.Args[1] 22517 if !(x.Uses == 1 && clobber(x)) { 22518 break 22519 } 22520 b = x.Block 22521 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 22522 v.reset(OpCopy) 22523 v.AddArg(v0) 22524 v0.AuxInt = off 22525 v0.Aux = sym 22526 v0.AddArg(ptr) 22527 v0.AddArg(mem) 22528 return true 22529 } 22530 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 22531 // cond: x.Uses == 1 && clobber(x) 22532 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 22533 for { 22534 x := v.Args[0] 22535 if x.Op != OpAMD64MOVQload { 22536 break 22537 } 22538 off := x.AuxInt 22539 sym := x.Aux 22540 _ = x.Args[1] 22541 ptr := x.Args[0] 22542 mem := x.Args[1] 22543 if !(x.Uses == 1 && clobber(x)) { 22544 break 22545 } 22546 b = x.Block 22547 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 22548 v.reset(OpCopy) 22549 v.AddArg(v0) 22550 v0.AuxInt = off 22551 v0.Aux = sym 22552 v0.AddArg(ptr) 22553 v0.AddArg(mem) 22554 return true 22555 } 22556 // match: (MOVWQSX (ANDLconst [c] x)) 22557 // cond: c & 0x8000 == 0 22558 // result: (ANDLconst [c & 0x7fff] x) 22559 for { 22560 v_0 := v.Args[0] 22561 if v_0.Op != OpAMD64ANDLconst { 22562 break 22563 } 22564 c := v_0.AuxInt 22565 x := v_0.Args[0] 22566 if !(c&0x8000 == 0) { 22567 break 22568 } 22569 v.reset(OpAMD64ANDLconst) 22570 v.AuxInt = c & 0x7fff 22571 v.AddArg(x) 22572 return true 22573 } 22574 // match: (MOVWQSX (MOVWQSX x)) 22575 // cond: 22576 // result: (MOVWQSX x) 22577 for { 22578 v_0 := v.Args[0] 22579 if v_0.Op != OpAMD64MOVWQSX { 22580 break 22581 } 22582 x := v_0.Args[0] 22583 v.reset(OpAMD64MOVWQSX) 22584 v.AddArg(x) 22585 return true 22586 } 22587 // match: (MOVWQSX (MOVBQSX x)) 22588 // cond: 22589 // result: (MOVBQSX x) 22590 for { 22591 v_0 := v.Args[0] 22592 if v_0.Op != OpAMD64MOVBQSX { 22593 break 22594 } 22595 x := v_0.Args[0] 22596 v.reset(OpAMD64MOVBQSX) 22597 v.AddArg(x) 22598 return true 22599 } 22600 return false 22601 } 22602 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 22603 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 22604 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 22605 // result: (MOVWQSX x) 22606 for { 22607 off := v.AuxInt 22608 sym := v.Aux 22609 _ = v.Args[1] 22610 ptr := v.Args[0] 22611 v_1 := v.Args[1] 22612 if v_1.Op != OpAMD64MOVWstore { 22613 break 22614 } 22615 off2 := v_1.AuxInt 22616 sym2 := v_1.Aux 22617 _ = v_1.Args[2] 22618 ptr2 := v_1.Args[0] 22619 x := v_1.Args[1] 22620 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 22621 break 22622 } 22623 v.reset(OpAMD64MOVWQSX) 22624 v.AddArg(x) 22625 return true 22626 } 22627 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 22628 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 22629 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 22630 for { 22631 off1 := v.AuxInt 22632 sym1 := v.Aux 22633 _ = v.Args[1] 22634 v_0 := v.Args[0] 22635 if v_0.Op != OpAMD64LEAQ { 22636 break 22637 } 22638 off2 := v_0.AuxInt 22639 sym2 := v_0.Aux 22640 base := v_0.Args[0] 22641 mem := v.Args[1] 22642 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 22643 break 22644 } 22645 v.reset(OpAMD64MOVWQSXload) 22646 v.AuxInt = off1 + off2 22647 v.Aux = mergeSym(sym1, sym2) 22648 v.AddArg(base) 22649 v.AddArg(mem) 22650 return true 22651 } 22652 return false 22653 } 22654 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 22655 b := v.Block 22656 _ = b 22657 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 22658 // cond: x.Uses == 1 && clobber(x) 22659 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 22660 for { 22661 x := v.Args[0] 22662 if x.Op != OpAMD64MOVWload { 22663 break 22664 } 22665 off := x.AuxInt 22666 sym := x.Aux 22667 _ = x.Args[1] 22668 ptr := x.Args[0] 22669 mem := x.Args[1] 22670 if !(x.Uses == 1 && clobber(x)) { 22671 break 22672 } 22673 b = x.Block 22674 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 22675 v.reset(OpCopy) 22676 v.AddArg(v0) 22677 v0.AuxInt = off 22678 v0.Aux = sym 22679 v0.AddArg(ptr) 22680 v0.AddArg(mem) 22681 return true 22682 } 22683 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 22684 // cond: x.Uses == 1 && clobber(x) 22685 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 22686 for { 22687 x := v.Args[0] 22688 if x.Op != OpAMD64MOVLload { 22689 break 22690 } 22691 off := x.AuxInt 22692 sym := x.Aux 22693 _ = x.Args[1] 22694 ptr := x.Args[0] 22695 mem := x.Args[1] 22696 if !(x.Uses == 1 && clobber(x)) { 22697 break 22698 } 22699 b = x.Block 22700 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 22701 v.reset(OpCopy) 22702 v.AddArg(v0) 22703 v0.AuxInt = off 22704 v0.Aux = sym 22705 v0.AddArg(ptr) 22706 v0.AddArg(mem) 22707 return true 22708 } 22709 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 22710 // cond: x.Uses == 1 && clobber(x) 22711 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 22712 for { 22713 x := v.Args[0] 22714 if x.Op != OpAMD64MOVQload { 22715 break 22716 } 22717 off := x.AuxInt 22718 sym := x.Aux 22719 _ = x.Args[1] 22720 ptr := x.Args[0] 22721 mem := x.Args[1] 22722 if !(x.Uses == 1 && clobber(x)) { 22723 break 22724 } 22725 b = x.Block 22726 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 22727 v.reset(OpCopy) 22728 v.AddArg(v0) 22729 v0.AuxInt = off 22730 v0.Aux = sym 22731 v0.AddArg(ptr) 22732 v0.AddArg(mem) 22733 return true 22734 } 22735 // match: (MOVWQZX x) 22736 // cond: zeroUpper48Bits(x,3) 22737 // result: x 22738 for { 22739 x := v.Args[0] 22740 if !(zeroUpper48Bits(x, 3)) { 22741 break 22742 } 22743 v.reset(OpCopy) 22744 v.Type = x.Type 22745 v.AddArg(x) 22746 return true 22747 } 22748 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 22749 // cond: x.Uses == 1 && clobber(x) 22750 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 22751 for { 22752 x := v.Args[0] 22753 if x.Op != OpAMD64MOVWloadidx1 { 22754 break 22755 } 22756 off := x.AuxInt 22757 sym := x.Aux 22758 _ = x.Args[2] 22759 ptr := x.Args[0] 22760 idx := x.Args[1] 22761 mem := x.Args[2] 22762 if !(x.Uses == 1 && clobber(x)) { 22763 break 22764 } 22765 b = x.Block 22766 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 22767 v.reset(OpCopy) 22768 v.AddArg(v0) 22769 v0.AuxInt = off 22770 v0.Aux = sym 22771 v0.AddArg(ptr) 22772 v0.AddArg(idx) 22773 v0.AddArg(mem) 22774 return true 22775 } 22776 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 22777 // cond: x.Uses == 1 && clobber(x) 22778 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 22779 for { 22780 x := v.Args[0] 22781 if x.Op != OpAMD64MOVWloadidx2 { 22782 break 22783 } 22784 off := x.AuxInt 22785 sym := x.Aux 22786 _ = x.Args[2] 22787 ptr := x.Args[0] 22788 idx := x.Args[1] 22789 mem := x.Args[2] 22790 if !(x.Uses == 1 && clobber(x)) { 22791 break 22792 } 22793 b = x.Block 22794 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 22795 v.reset(OpCopy) 22796 v.AddArg(v0) 22797 v0.AuxInt = off 22798 v0.Aux = sym 22799 v0.AddArg(ptr) 22800 v0.AddArg(idx) 22801 v0.AddArg(mem) 22802 return true 22803 } 22804 // match: (MOVWQZX (ANDLconst [c] x)) 22805 // cond: 22806 // result: (ANDLconst [c & 0xffff] x) 22807 for { 22808 v_0 := v.Args[0] 22809 if v_0.Op != OpAMD64ANDLconst { 22810 break 22811 } 22812 c := v_0.AuxInt 22813 x := v_0.Args[0] 22814 v.reset(OpAMD64ANDLconst) 22815 v.AuxInt = c & 0xffff 22816 v.AddArg(x) 22817 return true 22818 } 22819 // match: (MOVWQZX (MOVWQZX x)) 22820 // cond: 22821 // result: (MOVWQZX x) 22822 for { 22823 v_0 := v.Args[0] 22824 if v_0.Op != OpAMD64MOVWQZX { 22825 break 22826 } 22827 x := v_0.Args[0] 22828 v.reset(OpAMD64MOVWQZX) 22829 v.AddArg(x) 22830 return true 22831 } 22832 // match: (MOVWQZX (MOVBQZX x)) 22833 // cond: 22834 // result: (MOVBQZX x) 22835 for { 22836 v_0 := v.Args[0] 22837 if v_0.Op != OpAMD64MOVBQZX { 22838 break 22839 } 22840 x := v_0.Args[0] 22841 v.reset(OpAMD64MOVBQZX) 22842 v.AddArg(x) 22843 return true 22844 } 22845 return false 22846 } 22847 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 22848 b := v.Block 22849 _ = b 22850 config := b.Func.Config 22851 _ = config 22852 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 22853 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 22854 // result: (MOVWQZX x) 22855 for { 22856 off := v.AuxInt 22857 sym := v.Aux 22858 _ = v.Args[1] 22859 ptr := v.Args[0] 22860 v_1 := v.Args[1] 22861 if v_1.Op != OpAMD64MOVWstore { 22862 break 22863 } 22864 off2 := v_1.AuxInt 22865 sym2 := v_1.Aux 22866 _ = v_1.Args[2] 22867 ptr2 := v_1.Args[0] 22868 x := v_1.Args[1] 22869 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 22870 break 22871 } 22872 v.reset(OpAMD64MOVWQZX) 22873 v.AddArg(x) 22874 return true 22875 } 22876 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 22877 // cond: is32Bit(off1+off2) 22878 // result: (MOVWload [off1+off2] {sym} ptr mem) 22879 for { 22880 off1 := v.AuxInt 22881 sym := v.Aux 22882 _ = v.Args[1] 22883 v_0 := v.Args[0] 22884 if v_0.Op != OpAMD64ADDQconst { 22885 break 22886 } 22887 off2 := v_0.AuxInt 22888 ptr := v_0.Args[0] 22889 mem := v.Args[1] 22890 if !(is32Bit(off1 + off2)) { 22891 break 22892 } 22893 v.reset(OpAMD64MOVWload) 22894 v.AuxInt = off1 + off2 22895 v.Aux = sym 22896 v.AddArg(ptr) 22897 v.AddArg(mem) 22898 return true 22899 } 22900 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 22901 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 22902 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 22903 for { 22904 off1 := v.AuxInt 22905 sym1 := v.Aux 22906 _ = v.Args[1] 22907 v_0 := v.Args[0] 22908 if v_0.Op != OpAMD64LEAQ { 22909 break 22910 } 22911 off2 := v_0.AuxInt 22912 sym2 := v_0.Aux 22913 base := v_0.Args[0] 22914 mem := v.Args[1] 22915 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 22916 break 22917 } 22918 v.reset(OpAMD64MOVWload) 22919 v.AuxInt = off1 + off2 22920 v.Aux = mergeSym(sym1, sym2) 22921 v.AddArg(base) 22922 v.AddArg(mem) 22923 return true 22924 } 22925 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 22926 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 22927 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 22928 for { 22929 off1 := v.AuxInt 22930 sym1 := v.Aux 22931 _ = v.Args[1] 22932 v_0 := v.Args[0] 22933 if v_0.Op != OpAMD64LEAQ1 { 22934 break 22935 } 22936 off2 := v_0.AuxInt 22937 sym2 := v_0.Aux 22938 _ = v_0.Args[1] 22939 ptr := v_0.Args[0] 22940 idx := v_0.Args[1] 22941 mem := v.Args[1] 22942 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 22943 break 22944 } 22945 v.reset(OpAMD64MOVWloadidx1) 22946 v.AuxInt = off1 + off2 22947 v.Aux = mergeSym(sym1, sym2) 22948 v.AddArg(ptr) 22949 v.AddArg(idx) 22950 v.AddArg(mem) 22951 return true 22952 } 22953 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 22954 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 22955 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 22956 for { 22957 off1 := v.AuxInt 22958 sym1 := v.Aux 22959 _ = v.Args[1] 22960 v_0 := v.Args[0] 22961 if v_0.Op != OpAMD64LEAQ2 { 22962 break 22963 } 22964 off2 := v_0.AuxInt 22965 sym2 := v_0.Aux 22966 _ = v_0.Args[1] 22967 ptr := v_0.Args[0] 22968 idx := v_0.Args[1] 22969 mem := v.Args[1] 22970 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 22971 break 22972 } 22973 v.reset(OpAMD64MOVWloadidx2) 22974 v.AuxInt = off1 + off2 22975 v.Aux = mergeSym(sym1, sym2) 22976 v.AddArg(ptr) 22977 v.AddArg(idx) 22978 v.AddArg(mem) 22979 return true 22980 } 22981 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 22982 // cond: ptr.Op != OpSB 22983 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 22984 for { 22985 off := v.AuxInt 22986 sym := v.Aux 22987 _ = v.Args[1] 22988 v_0 := v.Args[0] 22989 if v_0.Op != OpAMD64ADDQ { 22990 break 22991 } 22992 _ = v_0.Args[1] 22993 ptr := v_0.Args[0] 22994 idx := v_0.Args[1] 22995 mem := v.Args[1] 22996 if !(ptr.Op != OpSB) { 22997 break 22998 } 22999 v.reset(OpAMD64MOVWloadidx1) 23000 v.AuxInt = off 23001 v.Aux = sym 23002 v.AddArg(ptr) 23003 v.AddArg(idx) 23004 v.AddArg(mem) 23005 return true 23006 } 23007 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 23008 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 23009 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 23010 for { 23011 off1 := v.AuxInt 23012 sym1 := v.Aux 23013 _ = v.Args[1] 23014 v_0 := v.Args[0] 23015 if v_0.Op != OpAMD64LEAL { 23016 break 23017 } 23018 off2 := v_0.AuxInt 23019 sym2 := v_0.Aux 23020 base := v_0.Args[0] 23021 mem := v.Args[1] 23022 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 23023 break 23024 } 23025 v.reset(OpAMD64MOVWload) 23026 v.AuxInt = off1 + off2 23027 v.Aux = mergeSym(sym1, sym2) 23028 v.AddArg(base) 23029 v.AddArg(mem) 23030 return true 23031 } 23032 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 23033 // cond: is32Bit(off1+off2) 23034 // result: (MOVWload [off1+off2] {sym} ptr mem) 23035 for { 23036 off1 := v.AuxInt 23037 sym := v.Aux 23038 _ = v.Args[1] 23039 v_0 := v.Args[0] 23040 if v_0.Op != OpAMD64ADDLconst { 23041 break 23042 } 23043 off2 := v_0.AuxInt 23044 ptr := v_0.Args[0] 23045 mem := v.Args[1] 23046 if !(is32Bit(off1 + off2)) { 23047 break 23048 } 23049 v.reset(OpAMD64MOVWload) 23050 v.AuxInt = off1 + off2 23051 v.Aux = sym 23052 v.AddArg(ptr) 23053 v.AddArg(mem) 23054 return true 23055 } 23056 // match: (MOVWload [off] {sym} (SB) _) 23057 // cond: symIsRO(sym) 23058 // result: (MOVLconst [int64(read16(sym, off, config.BigEndian))]) 23059 for { 23060 off := v.AuxInt 23061 sym := v.Aux 23062 _ = v.Args[1] 23063 v_0 := v.Args[0] 23064 if v_0.Op != OpSB { 23065 break 23066 } 23067 if !(symIsRO(sym)) { 23068 break 23069 } 23070 v.reset(OpAMD64MOVLconst) 23071 v.AuxInt = int64(read16(sym, off, config.BigEndian)) 23072 return true 23073 } 23074 return false 23075 } 23076 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 23077 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 23078 // cond: 23079 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 23080 for { 23081 c := v.AuxInt 23082 sym := v.Aux 23083 _ = v.Args[2] 23084 ptr := v.Args[0] 23085 v_1 := v.Args[1] 23086 if v_1.Op != OpAMD64SHLQconst { 23087 break 23088 } 23089 if v_1.AuxInt != 1 { 23090 break 23091 } 23092 idx := v_1.Args[0] 23093 mem := v.Args[2] 23094 v.reset(OpAMD64MOVWloadidx2) 23095 v.AuxInt = c 23096 v.Aux = sym 23097 v.AddArg(ptr) 23098 v.AddArg(idx) 23099 v.AddArg(mem) 23100 return true 23101 } 23102 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 23103 // cond: 23104 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 23105 for { 23106 c := v.AuxInt 23107 sym := v.Aux 23108 _ = v.Args[2] 23109 v_0 := v.Args[0] 23110 if v_0.Op != OpAMD64SHLQconst { 23111 break 23112 } 23113 if v_0.AuxInt != 1 { 23114 break 23115 } 23116 idx := v_0.Args[0] 23117 ptr := v.Args[1] 23118 mem := v.Args[2] 23119 v.reset(OpAMD64MOVWloadidx2) 23120 v.AuxInt = c 23121 v.Aux = sym 23122 v.AddArg(ptr) 23123 v.AddArg(idx) 23124 v.AddArg(mem) 23125 return true 23126 } 23127 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 23128 // cond: is32Bit(c+d) 23129 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 23130 for { 23131 c := v.AuxInt 23132 sym := v.Aux 23133 _ = v.Args[2] 23134 v_0 := v.Args[0] 23135 if v_0.Op != OpAMD64ADDQconst { 23136 break 23137 } 23138 d := v_0.AuxInt 23139 ptr := v_0.Args[0] 23140 idx := v.Args[1] 23141 mem := v.Args[2] 23142 if !(is32Bit(c + d)) { 23143 break 23144 } 23145 v.reset(OpAMD64MOVWloadidx1) 23146 v.AuxInt = c + d 23147 v.Aux = sym 23148 v.AddArg(ptr) 23149 v.AddArg(idx) 23150 v.AddArg(mem) 23151 return true 23152 } 23153 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 23154 // cond: is32Bit(c+d) 23155 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 23156 for { 23157 c := v.AuxInt 23158 sym := v.Aux 23159 _ = v.Args[2] 23160 idx := v.Args[0] 23161 v_1 := v.Args[1] 23162 if v_1.Op != OpAMD64ADDQconst { 23163 break 23164 } 23165 d := v_1.AuxInt 23166 ptr := v_1.Args[0] 23167 mem := v.Args[2] 23168 if !(is32Bit(c + d)) { 23169 break 23170 } 23171 v.reset(OpAMD64MOVWloadidx1) 23172 v.AuxInt = c + d 23173 v.Aux = sym 23174 v.AddArg(ptr) 23175 v.AddArg(idx) 23176 v.AddArg(mem) 23177 return true 23178 } 23179 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 23180 // cond: is32Bit(c+d) 23181 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 23182 for { 23183 c := v.AuxInt 23184 sym := v.Aux 23185 _ = v.Args[2] 23186 ptr := v.Args[0] 23187 v_1 := v.Args[1] 23188 if v_1.Op != OpAMD64ADDQconst { 23189 break 23190 } 23191 d := v_1.AuxInt 23192 idx := v_1.Args[0] 23193 mem := v.Args[2] 23194 if !(is32Bit(c + d)) { 23195 break 23196 } 23197 v.reset(OpAMD64MOVWloadidx1) 23198 v.AuxInt = c + d 23199 v.Aux = sym 23200 v.AddArg(ptr) 23201 v.AddArg(idx) 23202 v.AddArg(mem) 23203 return true 23204 } 23205 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 23206 // cond: is32Bit(c+d) 23207 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 23208 for { 23209 c := v.AuxInt 23210 sym := v.Aux 23211 _ = v.Args[2] 23212 v_0 := v.Args[0] 23213 if v_0.Op != OpAMD64ADDQconst { 23214 break 23215 } 23216 d := v_0.AuxInt 23217 idx := v_0.Args[0] 23218 ptr := v.Args[1] 23219 mem := v.Args[2] 23220 if !(is32Bit(c + d)) { 23221 break 23222 } 23223 v.reset(OpAMD64MOVWloadidx1) 23224 v.AuxInt = c + d 23225 v.Aux = sym 23226 v.AddArg(ptr) 23227 v.AddArg(idx) 23228 v.AddArg(mem) 23229 return true 23230 } 23231 // match: (MOVWloadidx1 [i] {s} p (MOVQconst [c]) mem) 23232 // cond: is32Bit(i+c) 23233 // result: (MOVWload [i+c] {s} p mem) 23234 for { 23235 i := v.AuxInt 23236 s := v.Aux 23237 _ = v.Args[2] 23238 p := v.Args[0] 23239 v_1 := v.Args[1] 23240 if v_1.Op != OpAMD64MOVQconst { 23241 break 23242 } 23243 c := v_1.AuxInt 23244 mem := v.Args[2] 23245 if !(is32Bit(i + c)) { 23246 break 23247 } 23248 v.reset(OpAMD64MOVWload) 23249 v.AuxInt = i + c 23250 v.Aux = s 23251 v.AddArg(p) 23252 v.AddArg(mem) 23253 return true 23254 } 23255 // match: (MOVWloadidx1 [i] {s} (MOVQconst [c]) p mem) 23256 // cond: is32Bit(i+c) 23257 // result: (MOVWload [i+c] {s} p mem) 23258 for { 23259 i := v.AuxInt 23260 s := v.Aux 23261 _ = v.Args[2] 23262 v_0 := v.Args[0] 23263 if v_0.Op != OpAMD64MOVQconst { 23264 break 23265 } 23266 c := v_0.AuxInt 23267 p := v.Args[1] 23268 mem := v.Args[2] 23269 if !(is32Bit(i + c)) { 23270 break 23271 } 23272 v.reset(OpAMD64MOVWload) 23273 v.AuxInt = i + c 23274 v.Aux = s 23275 v.AddArg(p) 23276 v.AddArg(mem) 23277 return true 23278 } 23279 return false 23280 } 23281 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 23282 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 23283 // cond: is32Bit(c+d) 23284 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 23285 for { 23286 c := v.AuxInt 23287 sym := v.Aux 23288 _ = v.Args[2] 23289 v_0 := v.Args[0] 23290 if v_0.Op != OpAMD64ADDQconst { 23291 break 23292 } 23293 d := v_0.AuxInt 23294 ptr := v_0.Args[0] 23295 idx := v.Args[1] 23296 mem := v.Args[2] 23297 if !(is32Bit(c + d)) { 23298 break 23299 } 23300 v.reset(OpAMD64MOVWloadidx2) 23301 v.AuxInt = c + d 23302 v.Aux = sym 23303 v.AddArg(ptr) 23304 v.AddArg(idx) 23305 v.AddArg(mem) 23306 return true 23307 } 23308 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 23309 // cond: is32Bit(c+2*d) 23310 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 23311 for { 23312 c := v.AuxInt 23313 sym := v.Aux 23314 _ = v.Args[2] 23315 ptr := v.Args[0] 23316 v_1 := v.Args[1] 23317 if v_1.Op != OpAMD64ADDQconst { 23318 break 23319 } 23320 d := v_1.AuxInt 23321 idx := v_1.Args[0] 23322 mem := v.Args[2] 23323 if !(is32Bit(c + 2*d)) { 23324 break 23325 } 23326 v.reset(OpAMD64MOVWloadidx2) 23327 v.AuxInt = c + 2*d 23328 v.Aux = sym 23329 v.AddArg(ptr) 23330 v.AddArg(idx) 23331 v.AddArg(mem) 23332 return true 23333 } 23334 // match: (MOVWloadidx2 [i] {s} p (MOVQconst [c]) mem) 23335 // cond: is32Bit(i+2*c) 23336 // result: (MOVWload [i+2*c] {s} p mem) 23337 for { 23338 i := v.AuxInt 23339 s := v.Aux 23340 _ = v.Args[2] 23341 p := v.Args[0] 23342 v_1 := v.Args[1] 23343 if v_1.Op != OpAMD64MOVQconst { 23344 break 23345 } 23346 c := v_1.AuxInt 23347 mem := v.Args[2] 23348 if !(is32Bit(i + 2*c)) { 23349 break 23350 } 23351 v.reset(OpAMD64MOVWload) 23352 v.AuxInt = i + 2*c 23353 v.Aux = s 23354 v.AddArg(p) 23355 v.AddArg(mem) 23356 return true 23357 } 23358 return false 23359 } 23360 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 23361 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 23362 // cond: 23363 // result: (MOVWstore [off] {sym} ptr x mem) 23364 for { 23365 off := v.AuxInt 23366 sym := v.Aux 23367 _ = v.Args[2] 23368 ptr := v.Args[0] 23369 v_1 := v.Args[1] 23370 if v_1.Op != OpAMD64MOVWQSX { 23371 break 23372 } 23373 x := v_1.Args[0] 23374 mem := v.Args[2] 23375 v.reset(OpAMD64MOVWstore) 23376 v.AuxInt = off 23377 v.Aux = sym 23378 v.AddArg(ptr) 23379 v.AddArg(x) 23380 v.AddArg(mem) 23381 return true 23382 } 23383 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 23384 // cond: 23385 // result: (MOVWstore [off] {sym} ptr x mem) 23386 for { 23387 off := v.AuxInt 23388 sym := v.Aux 23389 _ = v.Args[2] 23390 ptr := v.Args[0] 23391 v_1 := v.Args[1] 23392 if v_1.Op != OpAMD64MOVWQZX { 23393 break 23394 } 23395 x := v_1.Args[0] 23396 mem := v.Args[2] 23397 v.reset(OpAMD64MOVWstore) 23398 v.AuxInt = off 23399 v.Aux = sym 23400 v.AddArg(ptr) 23401 v.AddArg(x) 23402 v.AddArg(mem) 23403 return true 23404 } 23405 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 23406 // cond: is32Bit(off1+off2) 23407 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 23408 for { 23409 off1 := v.AuxInt 23410 sym := v.Aux 23411 _ = v.Args[2] 23412 v_0 := v.Args[0] 23413 if v_0.Op != OpAMD64ADDQconst { 23414 break 23415 } 23416 off2 := v_0.AuxInt 23417 ptr := v_0.Args[0] 23418 val := v.Args[1] 23419 mem := v.Args[2] 23420 if !(is32Bit(off1 + off2)) { 23421 break 23422 } 23423 v.reset(OpAMD64MOVWstore) 23424 v.AuxInt = off1 + off2 23425 v.Aux = sym 23426 v.AddArg(ptr) 23427 v.AddArg(val) 23428 v.AddArg(mem) 23429 return true 23430 } 23431 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 23432 // cond: validOff(off) 23433 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 23434 for { 23435 off := v.AuxInt 23436 sym := v.Aux 23437 _ = v.Args[2] 23438 ptr := v.Args[0] 23439 v_1 := v.Args[1] 23440 if v_1.Op != OpAMD64MOVLconst { 23441 break 23442 } 23443 c := v_1.AuxInt 23444 mem := v.Args[2] 23445 if !(validOff(off)) { 23446 break 23447 } 23448 v.reset(OpAMD64MOVWstoreconst) 23449 v.AuxInt = makeValAndOff(int64(int16(c)), off) 23450 v.Aux = sym 23451 v.AddArg(ptr) 23452 v.AddArg(mem) 23453 return true 23454 } 23455 // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem) 23456 // cond: validOff(off) 23457 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 23458 for { 23459 off := v.AuxInt 23460 sym := v.Aux 23461 _ = v.Args[2] 23462 ptr := v.Args[0] 23463 v_1 := v.Args[1] 23464 if v_1.Op != OpAMD64MOVQconst { 23465 break 23466 } 23467 c := v_1.AuxInt 23468 mem := v.Args[2] 23469 if !(validOff(off)) { 23470 break 23471 } 23472 v.reset(OpAMD64MOVWstoreconst) 23473 v.AuxInt = makeValAndOff(int64(int16(c)), off) 23474 v.Aux = sym 23475 v.AddArg(ptr) 23476 v.AddArg(mem) 23477 return true 23478 } 23479 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 23480 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 23481 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 23482 for { 23483 off1 := v.AuxInt 23484 sym1 := v.Aux 23485 _ = v.Args[2] 23486 v_0 := v.Args[0] 23487 if v_0.Op != OpAMD64LEAQ { 23488 break 23489 } 23490 off2 := v_0.AuxInt 23491 sym2 := v_0.Aux 23492 base := v_0.Args[0] 23493 val := v.Args[1] 23494 mem := v.Args[2] 23495 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 23496 break 23497 } 23498 v.reset(OpAMD64MOVWstore) 23499 v.AuxInt = off1 + off2 23500 v.Aux = mergeSym(sym1, sym2) 23501 v.AddArg(base) 23502 v.AddArg(val) 23503 v.AddArg(mem) 23504 return true 23505 } 23506 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 23507 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 23508 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 23509 for { 23510 off1 := v.AuxInt 23511 sym1 := v.Aux 23512 _ = v.Args[2] 23513 v_0 := v.Args[0] 23514 if v_0.Op != OpAMD64LEAQ1 { 23515 break 23516 } 23517 off2 := v_0.AuxInt 23518 sym2 := v_0.Aux 23519 _ = v_0.Args[1] 23520 ptr := v_0.Args[0] 23521 idx := v_0.Args[1] 23522 val := v.Args[1] 23523 mem := v.Args[2] 23524 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 23525 break 23526 } 23527 v.reset(OpAMD64MOVWstoreidx1) 23528 v.AuxInt = off1 + off2 23529 v.Aux = mergeSym(sym1, sym2) 23530 v.AddArg(ptr) 23531 v.AddArg(idx) 23532 v.AddArg(val) 23533 v.AddArg(mem) 23534 return true 23535 } 23536 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 23537 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 23538 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 23539 for { 23540 off1 := v.AuxInt 23541 sym1 := v.Aux 23542 _ = v.Args[2] 23543 v_0 := v.Args[0] 23544 if v_0.Op != OpAMD64LEAQ2 { 23545 break 23546 } 23547 off2 := v_0.AuxInt 23548 sym2 := v_0.Aux 23549 _ = v_0.Args[1] 23550 ptr := v_0.Args[0] 23551 idx := v_0.Args[1] 23552 val := v.Args[1] 23553 mem := v.Args[2] 23554 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 23555 break 23556 } 23557 v.reset(OpAMD64MOVWstoreidx2) 23558 v.AuxInt = off1 + off2 23559 v.Aux = mergeSym(sym1, sym2) 23560 v.AddArg(ptr) 23561 v.AddArg(idx) 23562 v.AddArg(val) 23563 v.AddArg(mem) 23564 return true 23565 } 23566 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 23567 // cond: ptr.Op != OpSB 23568 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 23569 for { 23570 off := v.AuxInt 23571 sym := v.Aux 23572 _ = v.Args[2] 23573 v_0 := v.Args[0] 23574 if v_0.Op != OpAMD64ADDQ { 23575 break 23576 } 23577 _ = v_0.Args[1] 23578 ptr := v_0.Args[0] 23579 idx := v_0.Args[1] 23580 val := v.Args[1] 23581 mem := v.Args[2] 23582 if !(ptr.Op != OpSB) { 23583 break 23584 } 23585 v.reset(OpAMD64MOVWstoreidx1) 23586 v.AuxInt = off 23587 v.Aux = sym 23588 v.AddArg(ptr) 23589 v.AddArg(idx) 23590 v.AddArg(val) 23591 v.AddArg(mem) 23592 return true 23593 } 23594 // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 23595 // cond: x.Uses == 1 && clobber(x) 23596 // result: (MOVLstore [i-2] {s} p w mem) 23597 for { 23598 i := v.AuxInt 23599 s := v.Aux 23600 _ = v.Args[2] 23601 p := v.Args[0] 23602 v_1 := v.Args[1] 23603 if v_1.Op != OpAMD64SHRLconst { 23604 break 23605 } 23606 if v_1.AuxInt != 16 { 23607 break 23608 } 23609 w := v_1.Args[0] 23610 x := v.Args[2] 23611 if x.Op != OpAMD64MOVWstore { 23612 break 23613 } 23614 if x.AuxInt != i-2 { 23615 break 23616 } 23617 if x.Aux != s { 23618 break 23619 } 23620 _ = x.Args[2] 23621 if p != x.Args[0] { 23622 break 23623 } 23624 if w != x.Args[1] { 23625 break 23626 } 23627 mem := x.Args[2] 23628 if !(x.Uses == 1 && clobber(x)) { 23629 break 23630 } 23631 v.reset(OpAMD64MOVLstore) 23632 v.AuxInt = i - 2 23633 v.Aux = s 23634 v.AddArg(p) 23635 v.AddArg(w) 23636 v.AddArg(mem) 23637 return true 23638 } 23639 return false 23640 } 23641 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 23642 b := v.Block 23643 _ = b 23644 typ := &b.Func.Config.Types 23645 _ = typ 23646 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 23647 // cond: x.Uses == 1 && clobber(x) 23648 // result: (MOVLstore [i-2] {s} p w mem) 23649 for { 23650 i := v.AuxInt 23651 s := v.Aux 23652 _ = v.Args[2] 23653 p := v.Args[0] 23654 v_1 := v.Args[1] 23655 if v_1.Op != OpAMD64SHRQconst { 23656 break 23657 } 23658 if v_1.AuxInt != 16 { 23659 break 23660 } 23661 w := v_1.Args[0] 23662 x := v.Args[2] 23663 if x.Op != OpAMD64MOVWstore { 23664 break 23665 } 23666 if x.AuxInt != i-2 { 23667 break 23668 } 23669 if x.Aux != s { 23670 break 23671 } 23672 _ = x.Args[2] 23673 if p != x.Args[0] { 23674 break 23675 } 23676 if w != x.Args[1] { 23677 break 23678 } 23679 mem := x.Args[2] 23680 if !(x.Uses == 1 && clobber(x)) { 23681 break 23682 } 23683 v.reset(OpAMD64MOVLstore) 23684 v.AuxInt = i - 2 23685 v.Aux = s 23686 v.AddArg(p) 23687 v.AddArg(w) 23688 v.AddArg(mem) 23689 return true 23690 } 23691 // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) 23692 // cond: x.Uses == 1 && clobber(x) 23693 // result: (MOVLstore [i-2] {s} p w0 mem) 23694 for { 23695 i := v.AuxInt 23696 s := v.Aux 23697 _ = v.Args[2] 23698 p := v.Args[0] 23699 v_1 := v.Args[1] 23700 if v_1.Op != OpAMD64SHRLconst { 23701 break 23702 } 23703 j := v_1.AuxInt 23704 w := v_1.Args[0] 23705 x := v.Args[2] 23706 if x.Op != OpAMD64MOVWstore { 23707 break 23708 } 23709 if x.AuxInt != i-2 { 23710 break 23711 } 23712 if x.Aux != s { 23713 break 23714 } 23715 _ = x.Args[2] 23716 if p != x.Args[0] { 23717 break 23718 } 23719 w0 := x.Args[1] 23720 if w0.Op != OpAMD64SHRLconst { 23721 break 23722 } 23723 if w0.AuxInt != j-16 { 23724 break 23725 } 23726 if w != w0.Args[0] { 23727 break 23728 } 23729 mem := x.Args[2] 23730 if !(x.Uses == 1 && clobber(x)) { 23731 break 23732 } 23733 v.reset(OpAMD64MOVLstore) 23734 v.AuxInt = i - 2 23735 v.Aux = s 23736 v.AddArg(p) 23737 v.AddArg(w0) 23738 v.AddArg(mem) 23739 return true 23740 } 23741 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 23742 // cond: x.Uses == 1 && clobber(x) 23743 // result: (MOVLstore [i-2] {s} p w0 mem) 23744 for { 23745 i := v.AuxInt 23746 s := v.Aux 23747 _ = v.Args[2] 23748 p := v.Args[0] 23749 v_1 := v.Args[1] 23750 if v_1.Op != OpAMD64SHRQconst { 23751 break 23752 } 23753 j := v_1.AuxInt 23754 w := v_1.Args[0] 23755 x := v.Args[2] 23756 if x.Op != OpAMD64MOVWstore { 23757 break 23758 } 23759 if x.AuxInt != i-2 { 23760 break 23761 } 23762 if x.Aux != s { 23763 break 23764 } 23765 _ = x.Args[2] 23766 if p != x.Args[0] { 23767 break 23768 } 23769 w0 := x.Args[1] 23770 if w0.Op != OpAMD64SHRQconst { 23771 break 23772 } 23773 if w0.AuxInt != j-16 { 23774 break 23775 } 23776 if w != w0.Args[0] { 23777 break 23778 } 23779 mem := x.Args[2] 23780 if !(x.Uses == 1 && clobber(x)) { 23781 break 23782 } 23783 v.reset(OpAMD64MOVLstore) 23784 v.AuxInt = i - 2 23785 v.Aux = s 23786 v.AddArg(p) 23787 v.AddArg(w0) 23788 v.AddArg(mem) 23789 return true 23790 } 23791 // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) 23792 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 23793 // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) 23794 for { 23795 i := v.AuxInt 23796 s := v.Aux 23797 _ = v.Args[2] 23798 p := v.Args[0] 23799 x1 := v.Args[1] 23800 if x1.Op != OpAMD64MOVWload { 23801 break 23802 } 23803 j := x1.AuxInt 23804 s2 := x1.Aux 23805 _ = x1.Args[1] 23806 p2 := x1.Args[0] 23807 mem := x1.Args[1] 23808 mem2 := v.Args[2] 23809 if mem2.Op != OpAMD64MOVWstore { 23810 break 23811 } 23812 if mem2.AuxInt != i-2 { 23813 break 23814 } 23815 if mem2.Aux != s { 23816 break 23817 } 23818 _ = mem2.Args[2] 23819 if p != mem2.Args[0] { 23820 break 23821 } 23822 x2 := mem2.Args[1] 23823 if x2.Op != OpAMD64MOVWload { 23824 break 23825 } 23826 if x2.AuxInt != j-2 { 23827 break 23828 } 23829 if x2.Aux != s2 { 23830 break 23831 } 23832 _ = x2.Args[1] 23833 if p2 != x2.Args[0] { 23834 break 23835 } 23836 if mem != x2.Args[1] { 23837 break 23838 } 23839 if mem != mem2.Args[2] { 23840 break 23841 } 23842 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 23843 break 23844 } 23845 v.reset(OpAMD64MOVLstore) 23846 v.AuxInt = i - 2 23847 v.Aux = s 23848 v.AddArg(p) 23849 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23850 v0.AuxInt = j - 2 23851 v0.Aux = s2 23852 v0.AddArg(p2) 23853 v0.AddArg(mem) 23854 v.AddArg(v0) 23855 v.AddArg(mem) 23856 return true 23857 } 23858 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 23859 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 23860 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 23861 for { 23862 off1 := v.AuxInt 23863 sym1 := v.Aux 23864 _ = v.Args[2] 23865 v_0 := v.Args[0] 23866 if v_0.Op != OpAMD64LEAL { 23867 break 23868 } 23869 off2 := v_0.AuxInt 23870 sym2 := v_0.Aux 23871 base := v_0.Args[0] 23872 val := v.Args[1] 23873 mem := v.Args[2] 23874 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 23875 break 23876 } 23877 v.reset(OpAMD64MOVWstore) 23878 v.AuxInt = off1 + off2 23879 v.Aux = mergeSym(sym1, sym2) 23880 v.AddArg(base) 23881 v.AddArg(val) 23882 v.AddArg(mem) 23883 return true 23884 } 23885 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 23886 // cond: is32Bit(off1+off2) 23887 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 23888 for { 23889 off1 := v.AuxInt 23890 sym := v.Aux 23891 _ = v.Args[2] 23892 v_0 := v.Args[0] 23893 if v_0.Op != OpAMD64ADDLconst { 23894 break 23895 } 23896 off2 := v_0.AuxInt 23897 ptr := v_0.Args[0] 23898 val := v.Args[1] 23899 mem := v.Args[2] 23900 if !(is32Bit(off1 + off2)) { 23901 break 23902 } 23903 v.reset(OpAMD64MOVWstore) 23904 v.AuxInt = off1 + off2 23905 v.Aux = sym 23906 v.AddArg(ptr) 23907 v.AddArg(val) 23908 v.AddArg(mem) 23909 return true 23910 } 23911 return false 23912 } 23913 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 23914 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 23915 // cond: ValAndOff(sc).canAdd(off) 23916 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 23917 for { 23918 sc := v.AuxInt 23919 s := v.Aux 23920 _ = v.Args[1] 23921 v_0 := v.Args[0] 23922 if v_0.Op != OpAMD64ADDQconst { 23923 break 23924 } 23925 off := v_0.AuxInt 23926 ptr := v_0.Args[0] 23927 mem := v.Args[1] 23928 if !(ValAndOff(sc).canAdd(off)) { 23929 break 23930 } 23931 v.reset(OpAMD64MOVWstoreconst) 23932 v.AuxInt = ValAndOff(sc).add(off) 23933 v.Aux = s 23934 v.AddArg(ptr) 23935 v.AddArg(mem) 23936 return true 23937 } 23938 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 23939 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 23940 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 23941 for { 23942 sc := v.AuxInt 23943 sym1 := v.Aux 23944 _ = v.Args[1] 23945 v_0 := v.Args[0] 23946 if v_0.Op != OpAMD64LEAQ { 23947 break 23948 } 23949 off := v_0.AuxInt 23950 sym2 := v_0.Aux 23951 ptr := v_0.Args[0] 23952 mem := v.Args[1] 23953 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 23954 break 23955 } 23956 v.reset(OpAMD64MOVWstoreconst) 23957 v.AuxInt = ValAndOff(sc).add(off) 23958 v.Aux = mergeSym(sym1, sym2) 23959 v.AddArg(ptr) 23960 v.AddArg(mem) 23961 return true 23962 } 23963 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 23964 // cond: canMergeSym(sym1, sym2) 23965 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 23966 for { 23967 x := v.AuxInt 23968 sym1 := v.Aux 23969 _ = v.Args[1] 23970 v_0 := v.Args[0] 23971 if v_0.Op != OpAMD64LEAQ1 { 23972 break 23973 } 23974 off := v_0.AuxInt 23975 sym2 := v_0.Aux 23976 _ = v_0.Args[1] 23977 ptr := v_0.Args[0] 23978 idx := v_0.Args[1] 23979 mem := v.Args[1] 23980 if !(canMergeSym(sym1, sym2)) { 23981 break 23982 } 23983 v.reset(OpAMD64MOVWstoreconstidx1) 23984 v.AuxInt = ValAndOff(x).add(off) 23985 v.Aux = mergeSym(sym1, sym2) 23986 v.AddArg(ptr) 23987 v.AddArg(idx) 23988 v.AddArg(mem) 23989 return true 23990 } 23991 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 23992 // cond: canMergeSym(sym1, sym2) 23993 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 23994 for { 23995 x := v.AuxInt 23996 sym1 := v.Aux 23997 _ = v.Args[1] 23998 v_0 := v.Args[0] 23999 if v_0.Op != OpAMD64LEAQ2 { 24000 break 24001 } 24002 off := v_0.AuxInt 24003 sym2 := v_0.Aux 24004 _ = v_0.Args[1] 24005 ptr := v_0.Args[0] 24006 idx := v_0.Args[1] 24007 mem := v.Args[1] 24008 if !(canMergeSym(sym1, sym2)) { 24009 break 24010 } 24011 v.reset(OpAMD64MOVWstoreconstidx2) 24012 v.AuxInt = ValAndOff(x).add(off) 24013 v.Aux = mergeSym(sym1, sym2) 24014 v.AddArg(ptr) 24015 v.AddArg(idx) 24016 v.AddArg(mem) 24017 return true 24018 } 24019 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 24020 // cond: 24021 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 24022 for { 24023 x := v.AuxInt 24024 sym := v.Aux 24025 _ = v.Args[1] 24026 v_0 := v.Args[0] 24027 if v_0.Op != OpAMD64ADDQ { 24028 break 24029 } 24030 _ = v_0.Args[1] 24031 ptr := v_0.Args[0] 24032 idx := v_0.Args[1] 24033 mem := v.Args[1] 24034 v.reset(OpAMD64MOVWstoreconstidx1) 24035 v.AuxInt = x 24036 v.Aux = sym 24037 v.AddArg(ptr) 24038 v.AddArg(idx) 24039 v.AddArg(mem) 24040 return true 24041 } 24042 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 24043 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 24044 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 24045 for { 24046 c := v.AuxInt 24047 s := v.Aux 24048 _ = v.Args[1] 24049 p := v.Args[0] 24050 x := v.Args[1] 24051 if x.Op != OpAMD64MOVWstoreconst { 24052 break 24053 } 24054 a := x.AuxInt 24055 if x.Aux != s { 24056 break 24057 } 24058 _ = x.Args[1] 24059 if p != x.Args[0] { 24060 break 24061 } 24062 mem := x.Args[1] 24063 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 24064 break 24065 } 24066 v.reset(OpAMD64MOVLstoreconst) 24067 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 24068 v.Aux = s 24069 v.AddArg(p) 24070 v.AddArg(mem) 24071 return true 24072 } 24073 // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) 24074 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 24075 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 24076 for { 24077 a := v.AuxInt 24078 s := v.Aux 24079 _ = v.Args[1] 24080 p := v.Args[0] 24081 x := v.Args[1] 24082 if x.Op != OpAMD64MOVWstoreconst { 24083 break 24084 } 24085 c := x.AuxInt 24086 if x.Aux != s { 24087 break 24088 } 24089 _ = x.Args[1] 24090 if p != x.Args[0] { 24091 break 24092 } 24093 mem := x.Args[1] 24094 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 24095 break 24096 } 24097 v.reset(OpAMD64MOVLstoreconst) 24098 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 24099 v.Aux = s 24100 v.AddArg(p) 24101 v.AddArg(mem) 24102 return true 24103 } 24104 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 24105 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 24106 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 24107 for { 24108 sc := v.AuxInt 24109 sym1 := v.Aux 24110 _ = v.Args[1] 24111 v_0 := v.Args[0] 24112 if v_0.Op != OpAMD64LEAL { 24113 break 24114 } 24115 off := v_0.AuxInt 24116 sym2 := v_0.Aux 24117 ptr := v_0.Args[0] 24118 mem := v.Args[1] 24119 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 24120 break 24121 } 24122 v.reset(OpAMD64MOVWstoreconst) 24123 v.AuxInt = ValAndOff(sc).add(off) 24124 v.Aux = mergeSym(sym1, sym2) 24125 v.AddArg(ptr) 24126 v.AddArg(mem) 24127 return true 24128 } 24129 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 24130 // cond: ValAndOff(sc).canAdd(off) 24131 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 24132 for { 24133 sc := v.AuxInt 24134 s := v.Aux 24135 _ = v.Args[1] 24136 v_0 := v.Args[0] 24137 if v_0.Op != OpAMD64ADDLconst { 24138 break 24139 } 24140 off := v_0.AuxInt 24141 ptr := v_0.Args[0] 24142 mem := v.Args[1] 24143 if !(ValAndOff(sc).canAdd(off)) { 24144 break 24145 } 24146 v.reset(OpAMD64MOVWstoreconst) 24147 v.AuxInt = ValAndOff(sc).add(off) 24148 v.Aux = s 24149 v.AddArg(ptr) 24150 v.AddArg(mem) 24151 return true 24152 } 24153 return false 24154 } 24155 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 24156 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 24157 // cond: 24158 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 24159 for { 24160 c := v.AuxInt 24161 sym := v.Aux 24162 _ = v.Args[2] 24163 ptr := v.Args[0] 24164 v_1 := v.Args[1] 24165 if v_1.Op != OpAMD64SHLQconst { 24166 break 24167 } 24168 if v_1.AuxInt != 1 { 24169 break 24170 } 24171 idx := v_1.Args[0] 24172 mem := v.Args[2] 24173 v.reset(OpAMD64MOVWstoreconstidx2) 24174 v.AuxInt = c 24175 v.Aux = sym 24176 v.AddArg(ptr) 24177 v.AddArg(idx) 24178 v.AddArg(mem) 24179 return true 24180 } 24181 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 24182 // cond: ValAndOff(x).canAdd(c) 24183 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 24184 for { 24185 x := v.AuxInt 24186 sym := v.Aux 24187 _ = v.Args[2] 24188 v_0 := v.Args[0] 24189 if v_0.Op != OpAMD64ADDQconst { 24190 break 24191 } 24192 c := v_0.AuxInt 24193 ptr := v_0.Args[0] 24194 idx := v.Args[1] 24195 mem := v.Args[2] 24196 if !(ValAndOff(x).canAdd(c)) { 24197 break 24198 } 24199 v.reset(OpAMD64MOVWstoreconstidx1) 24200 v.AuxInt = ValAndOff(x).add(c) 24201 v.Aux = sym 24202 v.AddArg(ptr) 24203 v.AddArg(idx) 24204 v.AddArg(mem) 24205 return true 24206 } 24207 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 24208 // cond: ValAndOff(x).canAdd(c) 24209 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 24210 for { 24211 x := v.AuxInt 24212 sym := v.Aux 24213 _ = v.Args[2] 24214 ptr := v.Args[0] 24215 v_1 := v.Args[1] 24216 if v_1.Op != OpAMD64ADDQconst { 24217 break 24218 } 24219 c := v_1.AuxInt 24220 idx := v_1.Args[0] 24221 mem := v.Args[2] 24222 if !(ValAndOff(x).canAdd(c)) { 24223 break 24224 } 24225 v.reset(OpAMD64MOVWstoreconstidx1) 24226 v.AuxInt = ValAndOff(x).add(c) 24227 v.Aux = sym 24228 v.AddArg(ptr) 24229 v.AddArg(idx) 24230 v.AddArg(mem) 24231 return true 24232 } 24233 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 24234 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 24235 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 24236 for { 24237 c := v.AuxInt 24238 s := v.Aux 24239 _ = v.Args[2] 24240 p := v.Args[0] 24241 i := v.Args[1] 24242 x := v.Args[2] 24243 if x.Op != OpAMD64MOVWstoreconstidx1 { 24244 break 24245 } 24246 a := x.AuxInt 24247 if x.Aux != s { 24248 break 24249 } 24250 _ = x.Args[2] 24251 if p != x.Args[0] { 24252 break 24253 } 24254 if i != x.Args[1] { 24255 break 24256 } 24257 mem := x.Args[2] 24258 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 24259 break 24260 } 24261 v.reset(OpAMD64MOVLstoreconstidx1) 24262 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 24263 v.Aux = s 24264 v.AddArg(p) 24265 v.AddArg(i) 24266 v.AddArg(mem) 24267 return true 24268 } 24269 return false 24270 } 24271 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 24272 b := v.Block 24273 _ = b 24274 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 24275 // cond: ValAndOff(x).canAdd(c) 24276 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 24277 for { 24278 x := v.AuxInt 24279 sym := v.Aux 24280 _ = v.Args[2] 24281 v_0 := v.Args[0] 24282 if v_0.Op != OpAMD64ADDQconst { 24283 break 24284 } 24285 c := v_0.AuxInt 24286 ptr := v_0.Args[0] 24287 idx := v.Args[1] 24288 mem := v.Args[2] 24289 if !(ValAndOff(x).canAdd(c)) { 24290 break 24291 } 24292 v.reset(OpAMD64MOVWstoreconstidx2) 24293 v.AuxInt = ValAndOff(x).add(c) 24294 v.Aux = sym 24295 v.AddArg(ptr) 24296 v.AddArg(idx) 24297 v.AddArg(mem) 24298 return true 24299 } 24300 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 24301 // cond: ValAndOff(x).canAdd(2*c) 24302 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 24303 for { 24304 x := v.AuxInt 24305 sym := v.Aux 24306 _ = v.Args[2] 24307 ptr := v.Args[0] 24308 v_1 := v.Args[1] 24309 if v_1.Op != OpAMD64ADDQconst { 24310 break 24311 } 24312 c := v_1.AuxInt 24313 idx := v_1.Args[0] 24314 mem := v.Args[2] 24315 if !(ValAndOff(x).canAdd(2 * c)) { 24316 break 24317 } 24318 v.reset(OpAMD64MOVWstoreconstidx2) 24319 v.AuxInt = ValAndOff(x).add(2 * c) 24320 v.Aux = sym 24321 v.AddArg(ptr) 24322 v.AddArg(idx) 24323 v.AddArg(mem) 24324 return true 24325 } 24326 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 24327 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 24328 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 24329 for { 24330 c := v.AuxInt 24331 s := v.Aux 24332 _ = v.Args[2] 24333 p := v.Args[0] 24334 i := v.Args[1] 24335 x := v.Args[2] 24336 if x.Op != OpAMD64MOVWstoreconstidx2 { 24337 break 24338 } 24339 a := x.AuxInt 24340 if x.Aux != s { 24341 break 24342 } 24343 _ = x.Args[2] 24344 if p != x.Args[0] { 24345 break 24346 } 24347 if i != x.Args[1] { 24348 break 24349 } 24350 mem := x.Args[2] 24351 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 24352 break 24353 } 24354 v.reset(OpAMD64MOVLstoreconstidx1) 24355 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 24356 v.Aux = s 24357 v.AddArg(p) 24358 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 24359 v0.AuxInt = 1 24360 v0.AddArg(i) 24361 v.AddArg(v0) 24362 v.AddArg(mem) 24363 return true 24364 } 24365 return false 24366 } 24367 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 24368 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 24369 // cond: 24370 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 24371 for { 24372 c := v.AuxInt 24373 sym := v.Aux 24374 _ = v.Args[3] 24375 ptr := v.Args[0] 24376 v_1 := v.Args[1] 24377 if v_1.Op != OpAMD64SHLQconst { 24378 break 24379 } 24380 if v_1.AuxInt != 1 { 24381 break 24382 } 24383 idx := v_1.Args[0] 24384 val := v.Args[2] 24385 mem := v.Args[3] 24386 v.reset(OpAMD64MOVWstoreidx2) 24387 v.AuxInt = c 24388 v.Aux = sym 24389 v.AddArg(ptr) 24390 v.AddArg(idx) 24391 v.AddArg(val) 24392 v.AddArg(mem) 24393 return true 24394 } 24395 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 24396 // cond: is32Bit(c+d) 24397 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 24398 for { 24399 c := v.AuxInt 24400 sym := v.Aux 24401 _ = v.Args[3] 24402 v_0 := v.Args[0] 24403 if v_0.Op != OpAMD64ADDQconst { 24404 break 24405 } 24406 d := v_0.AuxInt 24407 ptr := v_0.Args[0] 24408 idx := v.Args[1] 24409 val := v.Args[2] 24410 mem := v.Args[3] 24411 if !(is32Bit(c + d)) { 24412 break 24413 } 24414 v.reset(OpAMD64MOVWstoreidx1) 24415 v.AuxInt = c + d 24416 v.Aux = sym 24417 v.AddArg(ptr) 24418 v.AddArg(idx) 24419 v.AddArg(val) 24420 v.AddArg(mem) 24421 return true 24422 } 24423 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 24424 // cond: is32Bit(c+d) 24425 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 24426 for { 24427 c := v.AuxInt 24428 sym := v.Aux 24429 _ = v.Args[3] 24430 ptr := v.Args[0] 24431 v_1 := v.Args[1] 24432 if v_1.Op != OpAMD64ADDQconst { 24433 break 24434 } 24435 d := v_1.AuxInt 24436 idx := v_1.Args[0] 24437 val := v.Args[2] 24438 mem := v.Args[3] 24439 if !(is32Bit(c + d)) { 24440 break 24441 } 24442 v.reset(OpAMD64MOVWstoreidx1) 24443 v.AuxInt = c + d 24444 v.Aux = sym 24445 v.AddArg(ptr) 24446 v.AddArg(idx) 24447 v.AddArg(val) 24448 v.AddArg(mem) 24449 return true 24450 } 24451 // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 24452 // cond: x.Uses == 1 && clobber(x) 24453 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 24454 for { 24455 i := v.AuxInt 24456 s := v.Aux 24457 _ = v.Args[3] 24458 p := v.Args[0] 24459 idx := v.Args[1] 24460 v_2 := v.Args[2] 24461 if v_2.Op != OpAMD64SHRLconst { 24462 break 24463 } 24464 if v_2.AuxInt != 16 { 24465 break 24466 } 24467 w := v_2.Args[0] 24468 x := v.Args[3] 24469 if x.Op != OpAMD64MOVWstoreidx1 { 24470 break 24471 } 24472 if x.AuxInt != i-2 { 24473 break 24474 } 24475 if x.Aux != s { 24476 break 24477 } 24478 _ = x.Args[3] 24479 if p != x.Args[0] { 24480 break 24481 } 24482 if idx != x.Args[1] { 24483 break 24484 } 24485 if w != x.Args[2] { 24486 break 24487 } 24488 mem := x.Args[3] 24489 if !(x.Uses == 1 && clobber(x)) { 24490 break 24491 } 24492 v.reset(OpAMD64MOVLstoreidx1) 24493 v.AuxInt = i - 2 24494 v.Aux = s 24495 v.AddArg(p) 24496 v.AddArg(idx) 24497 v.AddArg(w) 24498 v.AddArg(mem) 24499 return true 24500 } 24501 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 24502 // cond: x.Uses == 1 && clobber(x) 24503 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 24504 for { 24505 i := v.AuxInt 24506 s := v.Aux 24507 _ = v.Args[3] 24508 p := v.Args[0] 24509 idx := v.Args[1] 24510 v_2 := v.Args[2] 24511 if v_2.Op != OpAMD64SHRQconst { 24512 break 24513 } 24514 if v_2.AuxInt != 16 { 24515 break 24516 } 24517 w := v_2.Args[0] 24518 x := v.Args[3] 24519 if x.Op != OpAMD64MOVWstoreidx1 { 24520 break 24521 } 24522 if x.AuxInt != i-2 { 24523 break 24524 } 24525 if x.Aux != s { 24526 break 24527 } 24528 _ = x.Args[3] 24529 if p != x.Args[0] { 24530 break 24531 } 24532 if idx != x.Args[1] { 24533 break 24534 } 24535 if w != x.Args[2] { 24536 break 24537 } 24538 mem := x.Args[3] 24539 if !(x.Uses == 1 && clobber(x)) { 24540 break 24541 } 24542 v.reset(OpAMD64MOVLstoreidx1) 24543 v.AuxInt = i - 2 24544 v.Aux = s 24545 v.AddArg(p) 24546 v.AddArg(idx) 24547 v.AddArg(w) 24548 v.AddArg(mem) 24549 return true 24550 } 24551 // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) 24552 // cond: x.Uses == 1 && clobber(x) 24553 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 24554 for { 24555 i := v.AuxInt 24556 s := v.Aux 24557 _ = v.Args[3] 24558 p := v.Args[0] 24559 idx := v.Args[1] 24560 v_2 := v.Args[2] 24561 if v_2.Op != OpAMD64SHRLconst { 24562 break 24563 } 24564 j := v_2.AuxInt 24565 w := v_2.Args[0] 24566 x := v.Args[3] 24567 if x.Op != OpAMD64MOVWstoreidx1 { 24568 break 24569 } 24570 if x.AuxInt != i-2 { 24571 break 24572 } 24573 if x.Aux != s { 24574 break 24575 } 24576 _ = x.Args[3] 24577 if p != x.Args[0] { 24578 break 24579 } 24580 if idx != x.Args[1] { 24581 break 24582 } 24583 w0 := x.Args[2] 24584 if w0.Op != OpAMD64SHRLconst { 24585 break 24586 } 24587 if w0.AuxInt != j-16 { 24588 break 24589 } 24590 if w != w0.Args[0] { 24591 break 24592 } 24593 mem := x.Args[3] 24594 if !(x.Uses == 1 && clobber(x)) { 24595 break 24596 } 24597 v.reset(OpAMD64MOVLstoreidx1) 24598 v.AuxInt = i - 2 24599 v.Aux = s 24600 v.AddArg(p) 24601 v.AddArg(idx) 24602 v.AddArg(w0) 24603 v.AddArg(mem) 24604 return true 24605 } 24606 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 24607 // cond: x.Uses == 1 && clobber(x) 24608 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 24609 for { 24610 i := v.AuxInt 24611 s := v.Aux 24612 _ = v.Args[3] 24613 p := v.Args[0] 24614 idx := v.Args[1] 24615 v_2 := v.Args[2] 24616 if v_2.Op != OpAMD64SHRQconst { 24617 break 24618 } 24619 j := v_2.AuxInt 24620 w := v_2.Args[0] 24621 x := v.Args[3] 24622 if x.Op != OpAMD64MOVWstoreidx1 { 24623 break 24624 } 24625 if x.AuxInt != i-2 { 24626 break 24627 } 24628 if x.Aux != s { 24629 break 24630 } 24631 _ = x.Args[3] 24632 if p != x.Args[0] { 24633 break 24634 } 24635 if idx != x.Args[1] { 24636 break 24637 } 24638 w0 := x.Args[2] 24639 if w0.Op != OpAMD64SHRQconst { 24640 break 24641 } 24642 if w0.AuxInt != j-16 { 24643 break 24644 } 24645 if w != w0.Args[0] { 24646 break 24647 } 24648 mem := x.Args[3] 24649 if !(x.Uses == 1 && clobber(x)) { 24650 break 24651 } 24652 v.reset(OpAMD64MOVLstoreidx1) 24653 v.AuxInt = i - 2 24654 v.Aux = s 24655 v.AddArg(p) 24656 v.AddArg(idx) 24657 v.AddArg(w0) 24658 v.AddArg(mem) 24659 return true 24660 } 24661 // match: (MOVWstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 24662 // cond: is32Bit(i+c) 24663 // result: (MOVWstore [i+c] {s} p w mem) 24664 for { 24665 i := v.AuxInt 24666 s := v.Aux 24667 _ = v.Args[3] 24668 p := v.Args[0] 24669 v_1 := v.Args[1] 24670 if v_1.Op != OpAMD64MOVQconst { 24671 break 24672 } 24673 c := v_1.AuxInt 24674 w := v.Args[2] 24675 mem := v.Args[3] 24676 if !(is32Bit(i + c)) { 24677 break 24678 } 24679 v.reset(OpAMD64MOVWstore) 24680 v.AuxInt = i + c 24681 v.Aux = s 24682 v.AddArg(p) 24683 v.AddArg(w) 24684 v.AddArg(mem) 24685 return true 24686 } 24687 return false 24688 } 24689 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 24690 b := v.Block 24691 _ = b 24692 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 24693 // cond: is32Bit(c+d) 24694 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 24695 for { 24696 c := v.AuxInt 24697 sym := v.Aux 24698 _ = v.Args[3] 24699 v_0 := v.Args[0] 24700 if v_0.Op != OpAMD64ADDQconst { 24701 break 24702 } 24703 d := v_0.AuxInt 24704 ptr := v_0.Args[0] 24705 idx := v.Args[1] 24706 val := v.Args[2] 24707 mem := v.Args[3] 24708 if !(is32Bit(c + d)) { 24709 break 24710 } 24711 v.reset(OpAMD64MOVWstoreidx2) 24712 v.AuxInt = c + d 24713 v.Aux = sym 24714 v.AddArg(ptr) 24715 v.AddArg(idx) 24716 v.AddArg(val) 24717 v.AddArg(mem) 24718 return true 24719 } 24720 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 24721 // cond: is32Bit(c+2*d) 24722 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 24723 for { 24724 c := v.AuxInt 24725 sym := v.Aux 24726 _ = v.Args[3] 24727 ptr := v.Args[0] 24728 v_1 := v.Args[1] 24729 if v_1.Op != OpAMD64ADDQconst { 24730 break 24731 } 24732 d := v_1.AuxInt 24733 idx := v_1.Args[0] 24734 val := v.Args[2] 24735 mem := v.Args[3] 24736 if !(is32Bit(c + 2*d)) { 24737 break 24738 } 24739 v.reset(OpAMD64MOVWstoreidx2) 24740 v.AuxInt = c + 2*d 24741 v.Aux = sym 24742 v.AddArg(ptr) 24743 v.AddArg(idx) 24744 v.AddArg(val) 24745 v.AddArg(mem) 24746 return true 24747 } 24748 // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 24749 // cond: x.Uses == 1 && clobber(x) 24750 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 24751 for { 24752 i := v.AuxInt 24753 s := v.Aux 24754 _ = v.Args[3] 24755 p := v.Args[0] 24756 idx := v.Args[1] 24757 v_2 := v.Args[2] 24758 if v_2.Op != OpAMD64SHRLconst { 24759 break 24760 } 24761 if v_2.AuxInt != 16 { 24762 break 24763 } 24764 w := v_2.Args[0] 24765 x := v.Args[3] 24766 if x.Op != OpAMD64MOVWstoreidx2 { 24767 break 24768 } 24769 if x.AuxInt != i-2 { 24770 break 24771 } 24772 if x.Aux != s { 24773 break 24774 } 24775 _ = x.Args[3] 24776 if p != x.Args[0] { 24777 break 24778 } 24779 if idx != x.Args[1] { 24780 break 24781 } 24782 if w != x.Args[2] { 24783 break 24784 } 24785 mem := x.Args[3] 24786 if !(x.Uses == 1 && clobber(x)) { 24787 break 24788 } 24789 v.reset(OpAMD64MOVLstoreidx1) 24790 v.AuxInt = i - 2 24791 v.Aux = s 24792 v.AddArg(p) 24793 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 24794 v0.AuxInt = 1 24795 v0.AddArg(idx) 24796 v.AddArg(v0) 24797 v.AddArg(w) 24798 v.AddArg(mem) 24799 return true 24800 } 24801 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 24802 // cond: x.Uses == 1 && clobber(x) 24803 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 24804 for { 24805 i := v.AuxInt 24806 s := v.Aux 24807 _ = v.Args[3] 24808 p := v.Args[0] 24809 idx := v.Args[1] 24810 v_2 := v.Args[2] 24811 if v_2.Op != OpAMD64SHRQconst { 24812 break 24813 } 24814 if v_2.AuxInt != 16 { 24815 break 24816 } 24817 w := v_2.Args[0] 24818 x := v.Args[3] 24819 if x.Op != OpAMD64MOVWstoreidx2 { 24820 break 24821 } 24822 if x.AuxInt != i-2 { 24823 break 24824 } 24825 if x.Aux != s { 24826 break 24827 } 24828 _ = x.Args[3] 24829 if p != x.Args[0] { 24830 break 24831 } 24832 if idx != x.Args[1] { 24833 break 24834 } 24835 if w != x.Args[2] { 24836 break 24837 } 24838 mem := x.Args[3] 24839 if !(x.Uses == 1 && clobber(x)) { 24840 break 24841 } 24842 v.reset(OpAMD64MOVLstoreidx1) 24843 v.AuxInt = i - 2 24844 v.Aux = s 24845 v.AddArg(p) 24846 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 24847 v0.AuxInt = 1 24848 v0.AddArg(idx) 24849 v.AddArg(v0) 24850 v.AddArg(w) 24851 v.AddArg(mem) 24852 return true 24853 } 24854 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 24855 // cond: x.Uses == 1 && clobber(x) 24856 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 24857 for { 24858 i := v.AuxInt 24859 s := v.Aux 24860 _ = v.Args[3] 24861 p := v.Args[0] 24862 idx := v.Args[1] 24863 v_2 := v.Args[2] 24864 if v_2.Op != OpAMD64SHRQconst { 24865 break 24866 } 24867 j := v_2.AuxInt 24868 w := v_2.Args[0] 24869 x := v.Args[3] 24870 if x.Op != OpAMD64MOVWstoreidx2 { 24871 break 24872 } 24873 if x.AuxInt != i-2 { 24874 break 24875 } 24876 if x.Aux != s { 24877 break 24878 } 24879 _ = x.Args[3] 24880 if p != x.Args[0] { 24881 break 24882 } 24883 if idx != x.Args[1] { 24884 break 24885 } 24886 w0 := x.Args[2] 24887 if w0.Op != OpAMD64SHRQconst { 24888 break 24889 } 24890 if w0.AuxInt != j-16 { 24891 break 24892 } 24893 if w != w0.Args[0] { 24894 break 24895 } 24896 mem := x.Args[3] 24897 if !(x.Uses == 1 && clobber(x)) { 24898 break 24899 } 24900 v.reset(OpAMD64MOVLstoreidx1) 24901 v.AuxInt = i - 2 24902 v.Aux = s 24903 v.AddArg(p) 24904 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 24905 v0.AuxInt = 1 24906 v0.AddArg(idx) 24907 v.AddArg(v0) 24908 v.AddArg(w0) 24909 v.AddArg(mem) 24910 return true 24911 } 24912 // match: (MOVWstoreidx2 [i] {s} p (MOVQconst [c]) w mem) 24913 // cond: is32Bit(i+2*c) 24914 // result: (MOVWstore [i+2*c] {s} p w mem) 24915 for { 24916 i := v.AuxInt 24917 s := v.Aux 24918 _ = v.Args[3] 24919 p := v.Args[0] 24920 v_1 := v.Args[1] 24921 if v_1.Op != OpAMD64MOVQconst { 24922 break 24923 } 24924 c := v_1.AuxInt 24925 w := v.Args[2] 24926 mem := v.Args[3] 24927 if !(is32Bit(i + 2*c)) { 24928 break 24929 } 24930 v.reset(OpAMD64MOVWstore) 24931 v.AuxInt = i + 2*c 24932 v.Aux = s 24933 v.AddArg(p) 24934 v.AddArg(w) 24935 v.AddArg(mem) 24936 return true 24937 } 24938 return false 24939 } 24940 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 24941 // match: (MULL x (MOVLconst [c])) 24942 // cond: 24943 // result: (MULLconst [c] x) 24944 for { 24945 _ = v.Args[1] 24946 x := v.Args[0] 24947 v_1 := v.Args[1] 24948 if v_1.Op != OpAMD64MOVLconst { 24949 break 24950 } 24951 c := v_1.AuxInt 24952 v.reset(OpAMD64MULLconst) 24953 v.AuxInt = c 24954 v.AddArg(x) 24955 return true 24956 } 24957 // match: (MULL (MOVLconst [c]) x) 24958 // cond: 24959 // result: (MULLconst [c] x) 24960 for { 24961 _ = v.Args[1] 24962 v_0 := v.Args[0] 24963 if v_0.Op != OpAMD64MOVLconst { 24964 break 24965 } 24966 c := v_0.AuxInt 24967 x := v.Args[1] 24968 v.reset(OpAMD64MULLconst) 24969 v.AuxInt = c 24970 v.AddArg(x) 24971 return true 24972 } 24973 return false 24974 } 24975 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 24976 b := v.Block 24977 _ = b 24978 // match: (MULLconst [c] (MULLconst [d] x)) 24979 // cond: 24980 // result: (MULLconst [int64(int32(c * d))] x) 24981 for { 24982 c := v.AuxInt 24983 v_0 := v.Args[0] 24984 if v_0.Op != OpAMD64MULLconst { 24985 break 24986 } 24987 d := v_0.AuxInt 24988 x := v_0.Args[0] 24989 v.reset(OpAMD64MULLconst) 24990 v.AuxInt = int64(int32(c * d)) 24991 v.AddArg(x) 24992 return true 24993 } 24994 // match: (MULLconst [-9] x) 24995 // cond: 24996 // result: (NEGL (LEAL8 <v.Type> x x)) 24997 for { 24998 if v.AuxInt != -9 { 24999 break 25000 } 25001 x := v.Args[0] 25002 v.reset(OpAMD64NEGL) 25003 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25004 v0.AddArg(x) 25005 v0.AddArg(x) 25006 v.AddArg(v0) 25007 return true 25008 } 25009 // match: (MULLconst [-5] x) 25010 // cond: 25011 // result: (NEGL (LEAL4 <v.Type> x x)) 25012 for { 25013 if v.AuxInt != -5 { 25014 break 25015 } 25016 x := v.Args[0] 25017 v.reset(OpAMD64NEGL) 25018 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25019 v0.AddArg(x) 25020 v0.AddArg(x) 25021 v.AddArg(v0) 25022 return true 25023 } 25024 // match: (MULLconst [-3] x) 25025 // cond: 25026 // result: (NEGL (LEAL2 <v.Type> x x)) 25027 for { 25028 if v.AuxInt != -3 { 25029 break 25030 } 25031 x := v.Args[0] 25032 v.reset(OpAMD64NEGL) 25033 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25034 v0.AddArg(x) 25035 v0.AddArg(x) 25036 v.AddArg(v0) 25037 return true 25038 } 25039 // match: (MULLconst [-1] x) 25040 // cond: 25041 // result: (NEGL x) 25042 for { 25043 if v.AuxInt != -1 { 25044 break 25045 } 25046 x := v.Args[0] 25047 v.reset(OpAMD64NEGL) 25048 v.AddArg(x) 25049 return true 25050 } 25051 // match: (MULLconst [ 0] _) 25052 // cond: 25053 // result: (MOVLconst [0]) 25054 for { 25055 if v.AuxInt != 0 { 25056 break 25057 } 25058 v.reset(OpAMD64MOVLconst) 25059 v.AuxInt = 0 25060 return true 25061 } 25062 // match: (MULLconst [ 1] x) 25063 // cond: 25064 // result: x 25065 for { 25066 if v.AuxInt != 1 { 25067 break 25068 } 25069 x := v.Args[0] 25070 v.reset(OpCopy) 25071 v.Type = x.Type 25072 v.AddArg(x) 25073 return true 25074 } 25075 // match: (MULLconst [ 3] x) 25076 // cond: 25077 // result: (LEAL2 x x) 25078 for { 25079 if v.AuxInt != 3 { 25080 break 25081 } 25082 x := v.Args[0] 25083 v.reset(OpAMD64LEAL2) 25084 v.AddArg(x) 25085 v.AddArg(x) 25086 return true 25087 } 25088 // match: (MULLconst [ 5] x) 25089 // cond: 25090 // result: (LEAL4 x x) 25091 for { 25092 if v.AuxInt != 5 { 25093 break 25094 } 25095 x := v.Args[0] 25096 v.reset(OpAMD64LEAL4) 25097 v.AddArg(x) 25098 v.AddArg(x) 25099 return true 25100 } 25101 // match: (MULLconst [ 7] x) 25102 // cond: 25103 // result: (LEAL2 x (LEAL2 <v.Type> x x)) 25104 for { 25105 if v.AuxInt != 7 { 25106 break 25107 } 25108 x := v.Args[0] 25109 v.reset(OpAMD64LEAL2) 25110 v.AddArg(x) 25111 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25112 v0.AddArg(x) 25113 v0.AddArg(x) 25114 v.AddArg(v0) 25115 return true 25116 } 25117 return false 25118 } 25119 func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { 25120 b := v.Block 25121 _ = b 25122 // match: (MULLconst [ 9] x) 25123 // cond: 25124 // result: (LEAL8 x x) 25125 for { 25126 if v.AuxInt != 9 { 25127 break 25128 } 25129 x := v.Args[0] 25130 v.reset(OpAMD64LEAL8) 25131 v.AddArg(x) 25132 v.AddArg(x) 25133 return true 25134 } 25135 // match: (MULLconst [11] x) 25136 // cond: 25137 // result: (LEAL2 x (LEAL4 <v.Type> x x)) 25138 for { 25139 if v.AuxInt != 11 { 25140 break 25141 } 25142 x := v.Args[0] 25143 v.reset(OpAMD64LEAL2) 25144 v.AddArg(x) 25145 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25146 v0.AddArg(x) 25147 v0.AddArg(x) 25148 v.AddArg(v0) 25149 return true 25150 } 25151 // match: (MULLconst [13] x) 25152 // cond: 25153 // result: (LEAL4 x (LEAL2 <v.Type> x x)) 25154 for { 25155 if v.AuxInt != 13 { 25156 break 25157 } 25158 x := v.Args[0] 25159 v.reset(OpAMD64LEAL4) 25160 v.AddArg(x) 25161 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25162 v0.AddArg(x) 25163 v0.AddArg(x) 25164 v.AddArg(v0) 25165 return true 25166 } 25167 // match: (MULLconst [19] x) 25168 // cond: 25169 // result: (LEAL2 x (LEAL8 <v.Type> x x)) 25170 for { 25171 if v.AuxInt != 19 { 25172 break 25173 } 25174 x := v.Args[0] 25175 v.reset(OpAMD64LEAL2) 25176 v.AddArg(x) 25177 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25178 v0.AddArg(x) 25179 v0.AddArg(x) 25180 v.AddArg(v0) 25181 return true 25182 } 25183 // match: (MULLconst [21] x) 25184 // cond: 25185 // result: (LEAL4 x (LEAL4 <v.Type> x x)) 25186 for { 25187 if v.AuxInt != 21 { 25188 break 25189 } 25190 x := v.Args[0] 25191 v.reset(OpAMD64LEAL4) 25192 v.AddArg(x) 25193 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25194 v0.AddArg(x) 25195 v0.AddArg(x) 25196 v.AddArg(v0) 25197 return true 25198 } 25199 // match: (MULLconst [25] x) 25200 // cond: 25201 // result: (LEAL8 x (LEAL2 <v.Type> x x)) 25202 for { 25203 if v.AuxInt != 25 { 25204 break 25205 } 25206 x := v.Args[0] 25207 v.reset(OpAMD64LEAL8) 25208 v.AddArg(x) 25209 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25210 v0.AddArg(x) 25211 v0.AddArg(x) 25212 v.AddArg(v0) 25213 return true 25214 } 25215 // match: (MULLconst [27] x) 25216 // cond: 25217 // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x)) 25218 for { 25219 if v.AuxInt != 27 { 25220 break 25221 } 25222 x := v.Args[0] 25223 v.reset(OpAMD64LEAL8) 25224 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25225 v0.AddArg(x) 25226 v0.AddArg(x) 25227 v.AddArg(v0) 25228 v1 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25229 v1.AddArg(x) 25230 v1.AddArg(x) 25231 v.AddArg(v1) 25232 return true 25233 } 25234 // match: (MULLconst [37] x) 25235 // cond: 25236 // result: (LEAL4 x (LEAL8 <v.Type> x x)) 25237 for { 25238 if v.AuxInt != 37 { 25239 break 25240 } 25241 x := v.Args[0] 25242 v.reset(OpAMD64LEAL4) 25243 v.AddArg(x) 25244 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25245 v0.AddArg(x) 25246 v0.AddArg(x) 25247 v.AddArg(v0) 25248 return true 25249 } 25250 // match: (MULLconst [41] x) 25251 // cond: 25252 // result: (LEAL8 x (LEAL4 <v.Type> x x)) 25253 for { 25254 if v.AuxInt != 41 { 25255 break 25256 } 25257 x := v.Args[0] 25258 v.reset(OpAMD64LEAL8) 25259 v.AddArg(x) 25260 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25261 v0.AddArg(x) 25262 v0.AddArg(x) 25263 v.AddArg(v0) 25264 return true 25265 } 25266 // match: (MULLconst [45] x) 25267 // cond: 25268 // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x)) 25269 for { 25270 if v.AuxInt != 45 { 25271 break 25272 } 25273 x := v.Args[0] 25274 v.reset(OpAMD64LEAL8) 25275 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25276 v0.AddArg(x) 25277 v0.AddArg(x) 25278 v.AddArg(v0) 25279 v1 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25280 v1.AddArg(x) 25281 v1.AddArg(x) 25282 v.AddArg(v1) 25283 return true 25284 } 25285 return false 25286 } 25287 func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { 25288 b := v.Block 25289 _ = b 25290 // match: (MULLconst [73] x) 25291 // cond: 25292 // result: (LEAL8 x (LEAL8 <v.Type> x x)) 25293 for { 25294 if v.AuxInt != 73 { 25295 break 25296 } 25297 x := v.Args[0] 25298 v.reset(OpAMD64LEAL8) 25299 v.AddArg(x) 25300 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25301 v0.AddArg(x) 25302 v0.AddArg(x) 25303 v.AddArg(v0) 25304 return true 25305 } 25306 // match: (MULLconst [81] x) 25307 // cond: 25308 // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x)) 25309 for { 25310 if v.AuxInt != 81 { 25311 break 25312 } 25313 x := v.Args[0] 25314 v.reset(OpAMD64LEAL8) 25315 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25316 v0.AddArg(x) 25317 v0.AddArg(x) 25318 v.AddArg(v0) 25319 v1 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25320 v1.AddArg(x) 25321 v1.AddArg(x) 25322 v.AddArg(v1) 25323 return true 25324 } 25325 // match: (MULLconst [c] x) 25326 // cond: isPowerOfTwo(c+1) && c >= 15 25327 // result: (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x) 25328 for { 25329 c := v.AuxInt 25330 x := v.Args[0] 25331 if !(isPowerOfTwo(c+1) && c >= 15) { 25332 break 25333 } 25334 v.reset(OpAMD64SUBL) 25335 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 25336 v0.AuxInt = log2(c + 1) 25337 v0.AddArg(x) 25338 v.AddArg(v0) 25339 v.AddArg(x) 25340 return true 25341 } 25342 // match: (MULLconst [c] x) 25343 // cond: isPowerOfTwo(c-1) && c >= 17 25344 // result: (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x) 25345 for { 25346 c := v.AuxInt 25347 x := v.Args[0] 25348 if !(isPowerOfTwo(c-1) && c >= 17) { 25349 break 25350 } 25351 v.reset(OpAMD64LEAL1) 25352 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 25353 v0.AuxInt = log2(c - 1) 25354 v0.AddArg(x) 25355 v.AddArg(v0) 25356 v.AddArg(x) 25357 return true 25358 } 25359 // match: (MULLconst [c] x) 25360 // cond: isPowerOfTwo(c-2) && c >= 34 25361 // result: (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x) 25362 for { 25363 c := v.AuxInt 25364 x := v.Args[0] 25365 if !(isPowerOfTwo(c-2) && c >= 34) { 25366 break 25367 } 25368 v.reset(OpAMD64LEAL2) 25369 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 25370 v0.AuxInt = log2(c - 2) 25371 v0.AddArg(x) 25372 v.AddArg(v0) 25373 v.AddArg(x) 25374 return true 25375 } 25376 // match: (MULLconst [c] x) 25377 // cond: isPowerOfTwo(c-4) && c >= 68 25378 // result: (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x) 25379 for { 25380 c := v.AuxInt 25381 x := v.Args[0] 25382 if !(isPowerOfTwo(c-4) && c >= 68) { 25383 break 25384 } 25385 v.reset(OpAMD64LEAL4) 25386 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 25387 v0.AuxInt = log2(c - 4) 25388 v0.AddArg(x) 25389 v.AddArg(v0) 25390 v.AddArg(x) 25391 return true 25392 } 25393 // match: (MULLconst [c] x) 25394 // cond: isPowerOfTwo(c-8) && c >= 136 25395 // result: (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x) 25396 for { 25397 c := v.AuxInt 25398 x := v.Args[0] 25399 if !(isPowerOfTwo(c-8) && c >= 136) { 25400 break 25401 } 25402 v.reset(OpAMD64LEAL8) 25403 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 25404 v0.AuxInt = log2(c - 8) 25405 v0.AddArg(x) 25406 v.AddArg(v0) 25407 v.AddArg(x) 25408 return true 25409 } 25410 // match: (MULLconst [c] x) 25411 // cond: c%3 == 0 && isPowerOfTwo(c/3) 25412 // result: (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x)) 25413 for { 25414 c := v.AuxInt 25415 x := v.Args[0] 25416 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 25417 break 25418 } 25419 v.reset(OpAMD64SHLLconst) 25420 v.AuxInt = log2(c / 3) 25421 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 25422 v0.AddArg(x) 25423 v0.AddArg(x) 25424 v.AddArg(v0) 25425 return true 25426 } 25427 // match: (MULLconst [c] x) 25428 // cond: c%5 == 0 && isPowerOfTwo(c/5) 25429 // result: (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x)) 25430 for { 25431 c := v.AuxInt 25432 x := v.Args[0] 25433 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 25434 break 25435 } 25436 v.reset(OpAMD64SHLLconst) 25437 v.AuxInt = log2(c / 5) 25438 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 25439 v0.AddArg(x) 25440 v0.AddArg(x) 25441 v.AddArg(v0) 25442 return true 25443 } 25444 // match: (MULLconst [c] x) 25445 // cond: c%9 == 0 && isPowerOfTwo(c/9) 25446 // result: (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x)) 25447 for { 25448 c := v.AuxInt 25449 x := v.Args[0] 25450 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 25451 break 25452 } 25453 v.reset(OpAMD64SHLLconst) 25454 v.AuxInt = log2(c / 9) 25455 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 25456 v0.AddArg(x) 25457 v0.AddArg(x) 25458 v.AddArg(v0) 25459 return true 25460 } 25461 return false 25462 } 25463 func rewriteValueAMD64_OpAMD64MULLconst_30(v *Value) bool { 25464 // match: (MULLconst [c] (MOVLconst [d])) 25465 // cond: 25466 // result: (MOVLconst [int64(int32(c*d))]) 25467 for { 25468 c := v.AuxInt 25469 v_0 := v.Args[0] 25470 if v_0.Op != OpAMD64MOVLconst { 25471 break 25472 } 25473 d := v_0.AuxInt 25474 v.reset(OpAMD64MOVLconst) 25475 v.AuxInt = int64(int32(c * d)) 25476 return true 25477 } 25478 return false 25479 } 25480 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 25481 // match: (MULQ x (MOVQconst [c])) 25482 // cond: is32Bit(c) 25483 // result: (MULQconst [c] x) 25484 for { 25485 _ = v.Args[1] 25486 x := v.Args[0] 25487 v_1 := v.Args[1] 25488 if v_1.Op != OpAMD64MOVQconst { 25489 break 25490 } 25491 c := v_1.AuxInt 25492 if !(is32Bit(c)) { 25493 break 25494 } 25495 v.reset(OpAMD64MULQconst) 25496 v.AuxInt = c 25497 v.AddArg(x) 25498 return true 25499 } 25500 // match: (MULQ (MOVQconst [c]) x) 25501 // cond: is32Bit(c) 25502 // result: (MULQconst [c] x) 25503 for { 25504 _ = v.Args[1] 25505 v_0 := v.Args[0] 25506 if v_0.Op != OpAMD64MOVQconst { 25507 break 25508 } 25509 c := v_0.AuxInt 25510 x := v.Args[1] 25511 if !(is32Bit(c)) { 25512 break 25513 } 25514 v.reset(OpAMD64MULQconst) 25515 v.AuxInt = c 25516 v.AddArg(x) 25517 return true 25518 } 25519 return false 25520 } 25521 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 25522 b := v.Block 25523 _ = b 25524 // match: (MULQconst [c] (MULQconst [d] x)) 25525 // cond: is32Bit(c*d) 25526 // result: (MULQconst [c * d] x) 25527 for { 25528 c := v.AuxInt 25529 v_0 := v.Args[0] 25530 if v_0.Op != OpAMD64MULQconst { 25531 break 25532 } 25533 d := v_0.AuxInt 25534 x := v_0.Args[0] 25535 if !(is32Bit(c * d)) { 25536 break 25537 } 25538 v.reset(OpAMD64MULQconst) 25539 v.AuxInt = c * d 25540 v.AddArg(x) 25541 return true 25542 } 25543 // match: (MULQconst [-9] x) 25544 // cond: 25545 // result: (NEGQ (LEAQ8 <v.Type> x x)) 25546 for { 25547 if v.AuxInt != -9 { 25548 break 25549 } 25550 x := v.Args[0] 25551 v.reset(OpAMD64NEGQ) 25552 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 25553 v0.AddArg(x) 25554 v0.AddArg(x) 25555 v.AddArg(v0) 25556 return true 25557 } 25558 // match: (MULQconst [-5] x) 25559 // cond: 25560 // result: (NEGQ (LEAQ4 <v.Type> x x)) 25561 for { 25562 if v.AuxInt != -5 { 25563 break 25564 } 25565 x := v.Args[0] 25566 v.reset(OpAMD64NEGQ) 25567 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25568 v0.AddArg(x) 25569 v0.AddArg(x) 25570 v.AddArg(v0) 25571 return true 25572 } 25573 // match: (MULQconst [-3] x) 25574 // cond: 25575 // result: (NEGQ (LEAQ2 <v.Type> x x)) 25576 for { 25577 if v.AuxInt != -3 { 25578 break 25579 } 25580 x := v.Args[0] 25581 v.reset(OpAMD64NEGQ) 25582 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25583 v0.AddArg(x) 25584 v0.AddArg(x) 25585 v.AddArg(v0) 25586 return true 25587 } 25588 // match: (MULQconst [-1] x) 25589 // cond: 25590 // result: (NEGQ x) 25591 for { 25592 if v.AuxInt != -1 { 25593 break 25594 } 25595 x := v.Args[0] 25596 v.reset(OpAMD64NEGQ) 25597 v.AddArg(x) 25598 return true 25599 } 25600 // match: (MULQconst [ 0] _) 25601 // cond: 25602 // result: (MOVQconst [0]) 25603 for { 25604 if v.AuxInt != 0 { 25605 break 25606 } 25607 v.reset(OpAMD64MOVQconst) 25608 v.AuxInt = 0 25609 return true 25610 } 25611 // match: (MULQconst [ 1] x) 25612 // cond: 25613 // result: x 25614 for { 25615 if v.AuxInt != 1 { 25616 break 25617 } 25618 x := v.Args[0] 25619 v.reset(OpCopy) 25620 v.Type = x.Type 25621 v.AddArg(x) 25622 return true 25623 } 25624 // match: (MULQconst [ 3] x) 25625 // cond: 25626 // result: (LEAQ2 x x) 25627 for { 25628 if v.AuxInt != 3 { 25629 break 25630 } 25631 x := v.Args[0] 25632 v.reset(OpAMD64LEAQ2) 25633 v.AddArg(x) 25634 v.AddArg(x) 25635 return true 25636 } 25637 // match: (MULQconst [ 5] x) 25638 // cond: 25639 // result: (LEAQ4 x x) 25640 for { 25641 if v.AuxInt != 5 { 25642 break 25643 } 25644 x := v.Args[0] 25645 v.reset(OpAMD64LEAQ4) 25646 v.AddArg(x) 25647 v.AddArg(x) 25648 return true 25649 } 25650 // match: (MULQconst [ 7] x) 25651 // cond: 25652 // result: (LEAQ2 x (LEAQ2 <v.Type> x x)) 25653 for { 25654 if v.AuxInt != 7 { 25655 break 25656 } 25657 x := v.Args[0] 25658 v.reset(OpAMD64LEAQ2) 25659 v.AddArg(x) 25660 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25661 v0.AddArg(x) 25662 v0.AddArg(x) 25663 v.AddArg(v0) 25664 return true 25665 } 25666 return false 25667 } 25668 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 25669 b := v.Block 25670 _ = b 25671 // match: (MULQconst [ 9] x) 25672 // cond: 25673 // result: (LEAQ8 x x) 25674 for { 25675 if v.AuxInt != 9 { 25676 break 25677 } 25678 x := v.Args[0] 25679 v.reset(OpAMD64LEAQ8) 25680 v.AddArg(x) 25681 v.AddArg(x) 25682 return true 25683 } 25684 // match: (MULQconst [11] x) 25685 // cond: 25686 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 25687 for { 25688 if v.AuxInt != 11 { 25689 break 25690 } 25691 x := v.Args[0] 25692 v.reset(OpAMD64LEAQ2) 25693 v.AddArg(x) 25694 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25695 v0.AddArg(x) 25696 v0.AddArg(x) 25697 v.AddArg(v0) 25698 return true 25699 } 25700 // match: (MULQconst [13] x) 25701 // cond: 25702 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 25703 for { 25704 if v.AuxInt != 13 { 25705 break 25706 } 25707 x := v.Args[0] 25708 v.reset(OpAMD64LEAQ4) 25709 v.AddArg(x) 25710 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25711 v0.AddArg(x) 25712 v0.AddArg(x) 25713 v.AddArg(v0) 25714 return true 25715 } 25716 // match: (MULQconst [19] x) 25717 // cond: 25718 // result: (LEAQ2 x (LEAQ8 <v.Type> x x)) 25719 for { 25720 if v.AuxInt != 19 { 25721 break 25722 } 25723 x := v.Args[0] 25724 v.reset(OpAMD64LEAQ2) 25725 v.AddArg(x) 25726 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 25727 v0.AddArg(x) 25728 v0.AddArg(x) 25729 v.AddArg(v0) 25730 return true 25731 } 25732 // match: (MULQconst [21] x) 25733 // cond: 25734 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 25735 for { 25736 if v.AuxInt != 21 { 25737 break 25738 } 25739 x := v.Args[0] 25740 v.reset(OpAMD64LEAQ4) 25741 v.AddArg(x) 25742 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25743 v0.AddArg(x) 25744 v0.AddArg(x) 25745 v.AddArg(v0) 25746 return true 25747 } 25748 // match: (MULQconst [25] x) 25749 // cond: 25750 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 25751 for { 25752 if v.AuxInt != 25 { 25753 break 25754 } 25755 x := v.Args[0] 25756 v.reset(OpAMD64LEAQ8) 25757 v.AddArg(x) 25758 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25759 v0.AddArg(x) 25760 v0.AddArg(x) 25761 v.AddArg(v0) 25762 return true 25763 } 25764 // match: (MULQconst [27] x) 25765 // cond: 25766 // result: (LEAQ8 (LEAQ2 <v.Type> x x) (LEAQ2 <v.Type> x x)) 25767 for { 25768 if v.AuxInt != 27 { 25769 break 25770 } 25771 x := v.Args[0] 25772 v.reset(OpAMD64LEAQ8) 25773 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25774 v0.AddArg(x) 25775 v0.AddArg(x) 25776 v.AddArg(v0) 25777 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25778 v1.AddArg(x) 25779 v1.AddArg(x) 25780 v.AddArg(v1) 25781 return true 25782 } 25783 // match: (MULQconst [37] x) 25784 // cond: 25785 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 25786 for { 25787 if v.AuxInt != 37 { 25788 break 25789 } 25790 x := v.Args[0] 25791 v.reset(OpAMD64LEAQ4) 25792 v.AddArg(x) 25793 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 25794 v0.AddArg(x) 25795 v0.AddArg(x) 25796 v.AddArg(v0) 25797 return true 25798 } 25799 // match: (MULQconst [41] x) 25800 // cond: 25801 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 25802 for { 25803 if v.AuxInt != 41 { 25804 break 25805 } 25806 x := v.Args[0] 25807 v.reset(OpAMD64LEAQ8) 25808 v.AddArg(x) 25809 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25810 v0.AddArg(x) 25811 v0.AddArg(x) 25812 v.AddArg(v0) 25813 return true 25814 } 25815 // match: (MULQconst [45] x) 25816 // cond: 25817 // result: (LEAQ8 (LEAQ4 <v.Type> x x) (LEAQ4 <v.Type> x x)) 25818 for { 25819 if v.AuxInt != 45 { 25820 break 25821 } 25822 x := v.Args[0] 25823 v.reset(OpAMD64LEAQ8) 25824 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25825 v0.AddArg(x) 25826 v0.AddArg(x) 25827 v.AddArg(v0) 25828 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25829 v1.AddArg(x) 25830 v1.AddArg(x) 25831 v.AddArg(v1) 25832 return true 25833 } 25834 return false 25835 } 25836 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 25837 b := v.Block 25838 _ = b 25839 // match: (MULQconst [73] x) 25840 // cond: 25841 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 25842 for { 25843 if v.AuxInt != 73 { 25844 break 25845 } 25846 x := v.Args[0] 25847 v.reset(OpAMD64LEAQ8) 25848 v.AddArg(x) 25849 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 25850 v0.AddArg(x) 25851 v0.AddArg(x) 25852 v.AddArg(v0) 25853 return true 25854 } 25855 // match: (MULQconst [81] x) 25856 // cond: 25857 // result: (LEAQ8 (LEAQ8 <v.Type> x x) (LEAQ8 <v.Type> x x)) 25858 for { 25859 if v.AuxInt != 81 { 25860 break 25861 } 25862 x := v.Args[0] 25863 v.reset(OpAMD64LEAQ8) 25864 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 25865 v0.AddArg(x) 25866 v0.AddArg(x) 25867 v.AddArg(v0) 25868 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 25869 v1.AddArg(x) 25870 v1.AddArg(x) 25871 v.AddArg(v1) 25872 return true 25873 } 25874 // match: (MULQconst [c] x) 25875 // cond: isPowerOfTwo(c+1) && c >= 15 25876 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 25877 for { 25878 c := v.AuxInt 25879 x := v.Args[0] 25880 if !(isPowerOfTwo(c+1) && c >= 15) { 25881 break 25882 } 25883 v.reset(OpAMD64SUBQ) 25884 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25885 v0.AuxInt = log2(c + 1) 25886 v0.AddArg(x) 25887 v.AddArg(v0) 25888 v.AddArg(x) 25889 return true 25890 } 25891 // match: (MULQconst [c] x) 25892 // cond: isPowerOfTwo(c-1) && c >= 17 25893 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 25894 for { 25895 c := v.AuxInt 25896 x := v.Args[0] 25897 if !(isPowerOfTwo(c-1) && c >= 17) { 25898 break 25899 } 25900 v.reset(OpAMD64LEAQ1) 25901 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25902 v0.AuxInt = log2(c - 1) 25903 v0.AddArg(x) 25904 v.AddArg(v0) 25905 v.AddArg(x) 25906 return true 25907 } 25908 // match: (MULQconst [c] x) 25909 // cond: isPowerOfTwo(c-2) && c >= 34 25910 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 25911 for { 25912 c := v.AuxInt 25913 x := v.Args[0] 25914 if !(isPowerOfTwo(c-2) && c >= 34) { 25915 break 25916 } 25917 v.reset(OpAMD64LEAQ2) 25918 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25919 v0.AuxInt = log2(c - 2) 25920 v0.AddArg(x) 25921 v.AddArg(v0) 25922 v.AddArg(x) 25923 return true 25924 } 25925 // match: (MULQconst [c] x) 25926 // cond: isPowerOfTwo(c-4) && c >= 68 25927 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 25928 for { 25929 c := v.AuxInt 25930 x := v.Args[0] 25931 if !(isPowerOfTwo(c-4) && c >= 68) { 25932 break 25933 } 25934 v.reset(OpAMD64LEAQ4) 25935 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25936 v0.AuxInt = log2(c - 4) 25937 v0.AddArg(x) 25938 v.AddArg(v0) 25939 v.AddArg(x) 25940 return true 25941 } 25942 // match: (MULQconst [c] x) 25943 // cond: isPowerOfTwo(c-8) && c >= 136 25944 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 25945 for { 25946 c := v.AuxInt 25947 x := v.Args[0] 25948 if !(isPowerOfTwo(c-8) && c >= 136) { 25949 break 25950 } 25951 v.reset(OpAMD64LEAQ8) 25952 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25953 v0.AuxInt = log2(c - 8) 25954 v0.AddArg(x) 25955 v.AddArg(v0) 25956 v.AddArg(x) 25957 return true 25958 } 25959 // match: (MULQconst [c] x) 25960 // cond: c%3 == 0 && isPowerOfTwo(c/3) 25961 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 25962 for { 25963 c := v.AuxInt 25964 x := v.Args[0] 25965 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 25966 break 25967 } 25968 v.reset(OpAMD64SHLQconst) 25969 v.AuxInt = log2(c / 3) 25970 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 25971 v0.AddArg(x) 25972 v0.AddArg(x) 25973 v.AddArg(v0) 25974 return true 25975 } 25976 // match: (MULQconst [c] x) 25977 // cond: c%5 == 0 && isPowerOfTwo(c/5) 25978 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 25979 for { 25980 c := v.AuxInt 25981 x := v.Args[0] 25982 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 25983 break 25984 } 25985 v.reset(OpAMD64SHLQconst) 25986 v.AuxInt = log2(c / 5) 25987 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 25988 v0.AddArg(x) 25989 v0.AddArg(x) 25990 v.AddArg(v0) 25991 return true 25992 } 25993 // match: (MULQconst [c] x) 25994 // cond: c%9 == 0 && isPowerOfTwo(c/9) 25995 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 25996 for { 25997 c := v.AuxInt 25998 x := v.Args[0] 25999 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 26000 break 26001 } 26002 v.reset(OpAMD64SHLQconst) 26003 v.AuxInt = log2(c / 9) 26004 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 26005 v0.AddArg(x) 26006 v0.AddArg(x) 26007 v.AddArg(v0) 26008 return true 26009 } 26010 return false 26011 } 26012 func rewriteValueAMD64_OpAMD64MULQconst_30(v *Value) bool { 26013 // match: (MULQconst [c] (MOVQconst [d])) 26014 // cond: 26015 // result: (MOVQconst [c*d]) 26016 for { 26017 c := v.AuxInt 26018 v_0 := v.Args[0] 26019 if v_0.Op != OpAMD64MOVQconst { 26020 break 26021 } 26022 d := v_0.AuxInt 26023 v.reset(OpAMD64MOVQconst) 26024 v.AuxInt = c * d 26025 return true 26026 } 26027 return false 26028 } 26029 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 26030 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 26031 // cond: canMergeLoad(v, l, x) && clobber(l) 26032 // result: (MULSDload x [off] {sym} ptr mem) 26033 for { 26034 _ = v.Args[1] 26035 x := v.Args[0] 26036 l := v.Args[1] 26037 if l.Op != OpAMD64MOVSDload { 26038 break 26039 } 26040 off := l.AuxInt 26041 sym := l.Aux 26042 _ = l.Args[1] 26043 ptr := l.Args[0] 26044 mem := l.Args[1] 26045 if !(canMergeLoad(v, l, x) && clobber(l)) { 26046 break 26047 } 26048 v.reset(OpAMD64MULSDload) 26049 v.AuxInt = off 26050 v.Aux = sym 26051 v.AddArg(x) 26052 v.AddArg(ptr) 26053 v.AddArg(mem) 26054 return true 26055 } 26056 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 26057 // cond: canMergeLoad(v, l, x) && clobber(l) 26058 // result: (MULSDload x [off] {sym} ptr mem) 26059 for { 26060 _ = v.Args[1] 26061 l := v.Args[0] 26062 if l.Op != OpAMD64MOVSDload { 26063 break 26064 } 26065 off := l.AuxInt 26066 sym := l.Aux 26067 _ = l.Args[1] 26068 ptr := l.Args[0] 26069 mem := l.Args[1] 26070 x := v.Args[1] 26071 if !(canMergeLoad(v, l, x) && clobber(l)) { 26072 break 26073 } 26074 v.reset(OpAMD64MULSDload) 26075 v.AuxInt = off 26076 v.Aux = sym 26077 v.AddArg(x) 26078 v.AddArg(ptr) 26079 v.AddArg(mem) 26080 return true 26081 } 26082 return false 26083 } 26084 func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool { 26085 b := v.Block 26086 _ = b 26087 typ := &b.Func.Config.Types 26088 _ = typ 26089 // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem) 26090 // cond: is32Bit(off1+off2) 26091 // result: (MULSDload [off1+off2] {sym} val base mem) 26092 for { 26093 off1 := v.AuxInt 26094 sym := v.Aux 26095 _ = v.Args[2] 26096 val := v.Args[0] 26097 v_1 := v.Args[1] 26098 if v_1.Op != OpAMD64ADDQconst { 26099 break 26100 } 26101 off2 := v_1.AuxInt 26102 base := v_1.Args[0] 26103 mem := v.Args[2] 26104 if !(is32Bit(off1 + off2)) { 26105 break 26106 } 26107 v.reset(OpAMD64MULSDload) 26108 v.AuxInt = off1 + off2 26109 v.Aux = sym 26110 v.AddArg(val) 26111 v.AddArg(base) 26112 v.AddArg(mem) 26113 return true 26114 } 26115 // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 26116 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 26117 // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 26118 for { 26119 off1 := v.AuxInt 26120 sym1 := v.Aux 26121 _ = v.Args[2] 26122 val := v.Args[0] 26123 v_1 := v.Args[1] 26124 if v_1.Op != OpAMD64LEAQ { 26125 break 26126 } 26127 off2 := v_1.AuxInt 26128 sym2 := v_1.Aux 26129 base := v_1.Args[0] 26130 mem := v.Args[2] 26131 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 26132 break 26133 } 26134 v.reset(OpAMD64MULSDload) 26135 v.AuxInt = off1 + off2 26136 v.Aux = mergeSym(sym1, sym2) 26137 v.AddArg(val) 26138 v.AddArg(base) 26139 v.AddArg(mem) 26140 return true 26141 } 26142 // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 26143 // cond: 26144 // result: (MULSD x (MOVQi2f y)) 26145 for { 26146 off := v.AuxInt 26147 sym := v.Aux 26148 _ = v.Args[2] 26149 x := v.Args[0] 26150 ptr := v.Args[1] 26151 v_2 := v.Args[2] 26152 if v_2.Op != OpAMD64MOVQstore { 26153 break 26154 } 26155 if v_2.AuxInt != off { 26156 break 26157 } 26158 if v_2.Aux != sym { 26159 break 26160 } 26161 _ = v_2.Args[2] 26162 if ptr != v_2.Args[0] { 26163 break 26164 } 26165 y := v_2.Args[1] 26166 v.reset(OpAMD64MULSD) 26167 v.AddArg(x) 26168 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 26169 v0.AddArg(y) 26170 v.AddArg(v0) 26171 return true 26172 } 26173 return false 26174 } 26175 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 26176 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 26177 // cond: canMergeLoad(v, l, x) && clobber(l) 26178 // result: (MULSSload x [off] {sym} ptr mem) 26179 for { 26180 _ = v.Args[1] 26181 x := v.Args[0] 26182 l := v.Args[1] 26183 if l.Op != OpAMD64MOVSSload { 26184 break 26185 } 26186 off := l.AuxInt 26187 sym := l.Aux 26188 _ = l.Args[1] 26189 ptr := l.Args[0] 26190 mem := l.Args[1] 26191 if !(canMergeLoad(v, l, x) && clobber(l)) { 26192 break 26193 } 26194 v.reset(OpAMD64MULSSload) 26195 v.AuxInt = off 26196 v.Aux = sym 26197 v.AddArg(x) 26198 v.AddArg(ptr) 26199 v.AddArg(mem) 26200 return true 26201 } 26202 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 26203 // cond: canMergeLoad(v, l, x) && clobber(l) 26204 // result: (MULSSload x [off] {sym} ptr mem) 26205 for { 26206 _ = v.Args[1] 26207 l := v.Args[0] 26208 if l.Op != OpAMD64MOVSSload { 26209 break 26210 } 26211 off := l.AuxInt 26212 sym := l.Aux 26213 _ = l.Args[1] 26214 ptr := l.Args[0] 26215 mem := l.Args[1] 26216 x := v.Args[1] 26217 if !(canMergeLoad(v, l, x) && clobber(l)) { 26218 break 26219 } 26220 v.reset(OpAMD64MULSSload) 26221 v.AuxInt = off 26222 v.Aux = sym 26223 v.AddArg(x) 26224 v.AddArg(ptr) 26225 v.AddArg(mem) 26226 return true 26227 } 26228 return false 26229 } 26230 func rewriteValueAMD64_OpAMD64MULSSload_0(v *Value) bool { 26231 b := v.Block 26232 _ = b 26233 typ := &b.Func.Config.Types 26234 _ = typ 26235 // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem) 26236 // cond: is32Bit(off1+off2) 26237 // result: (MULSSload [off1+off2] {sym} val base mem) 26238 for { 26239 off1 := v.AuxInt 26240 sym := v.Aux 26241 _ = v.Args[2] 26242 val := v.Args[0] 26243 v_1 := v.Args[1] 26244 if v_1.Op != OpAMD64ADDQconst { 26245 break 26246 } 26247 off2 := v_1.AuxInt 26248 base := v_1.Args[0] 26249 mem := v.Args[2] 26250 if !(is32Bit(off1 + off2)) { 26251 break 26252 } 26253 v.reset(OpAMD64MULSSload) 26254 v.AuxInt = off1 + off2 26255 v.Aux = sym 26256 v.AddArg(val) 26257 v.AddArg(base) 26258 v.AddArg(mem) 26259 return true 26260 } 26261 // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 26262 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 26263 // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 26264 for { 26265 off1 := v.AuxInt 26266 sym1 := v.Aux 26267 _ = v.Args[2] 26268 val := v.Args[0] 26269 v_1 := v.Args[1] 26270 if v_1.Op != OpAMD64LEAQ { 26271 break 26272 } 26273 off2 := v_1.AuxInt 26274 sym2 := v_1.Aux 26275 base := v_1.Args[0] 26276 mem := v.Args[2] 26277 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 26278 break 26279 } 26280 v.reset(OpAMD64MULSSload) 26281 v.AuxInt = off1 + off2 26282 v.Aux = mergeSym(sym1, sym2) 26283 v.AddArg(val) 26284 v.AddArg(base) 26285 v.AddArg(mem) 26286 return true 26287 } 26288 // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 26289 // cond: 26290 // result: (MULSS x (MOVLi2f y)) 26291 for { 26292 off := v.AuxInt 26293 sym := v.Aux 26294 _ = v.Args[2] 26295 x := v.Args[0] 26296 ptr := v.Args[1] 26297 v_2 := v.Args[2] 26298 if v_2.Op != OpAMD64MOVLstore { 26299 break 26300 } 26301 if v_2.AuxInt != off { 26302 break 26303 } 26304 if v_2.Aux != sym { 26305 break 26306 } 26307 _ = v_2.Args[2] 26308 if ptr != v_2.Args[0] { 26309 break 26310 } 26311 y := v_2.Args[1] 26312 v.reset(OpAMD64MULSS) 26313 v.AddArg(x) 26314 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 26315 v0.AddArg(y) 26316 v.AddArg(v0) 26317 return true 26318 } 26319 return false 26320 } 26321 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 26322 // match: (NEGL (NEGL x)) 26323 // cond: 26324 // result: x 26325 for { 26326 v_0 := v.Args[0] 26327 if v_0.Op != OpAMD64NEGL { 26328 break 26329 } 26330 x := v_0.Args[0] 26331 v.reset(OpCopy) 26332 v.Type = x.Type 26333 v.AddArg(x) 26334 return true 26335 } 26336 // match: (NEGL (MOVLconst [c])) 26337 // cond: 26338 // result: (MOVLconst [int64(int32(-c))]) 26339 for { 26340 v_0 := v.Args[0] 26341 if v_0.Op != OpAMD64MOVLconst { 26342 break 26343 } 26344 c := v_0.AuxInt 26345 v.reset(OpAMD64MOVLconst) 26346 v.AuxInt = int64(int32(-c)) 26347 return true 26348 } 26349 return false 26350 } 26351 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 26352 // match: (NEGQ (NEGQ x)) 26353 // cond: 26354 // result: x 26355 for { 26356 v_0 := v.Args[0] 26357 if v_0.Op != OpAMD64NEGQ { 26358 break 26359 } 26360 x := v_0.Args[0] 26361 v.reset(OpCopy) 26362 v.Type = x.Type 26363 v.AddArg(x) 26364 return true 26365 } 26366 // match: (NEGQ (MOVQconst [c])) 26367 // cond: 26368 // result: (MOVQconst [-c]) 26369 for { 26370 v_0 := v.Args[0] 26371 if v_0.Op != OpAMD64MOVQconst { 26372 break 26373 } 26374 c := v_0.AuxInt 26375 v.reset(OpAMD64MOVQconst) 26376 v.AuxInt = -c 26377 return true 26378 } 26379 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 26380 // cond: c != -(1<<31) 26381 // result: (ADDQconst [-c] x) 26382 for { 26383 v_0 := v.Args[0] 26384 if v_0.Op != OpAMD64ADDQconst { 26385 break 26386 } 26387 c := v_0.AuxInt 26388 v_0_0 := v_0.Args[0] 26389 if v_0_0.Op != OpAMD64NEGQ { 26390 break 26391 } 26392 x := v_0_0.Args[0] 26393 if !(c != -(1 << 31)) { 26394 break 26395 } 26396 v.reset(OpAMD64ADDQconst) 26397 v.AuxInt = -c 26398 v.AddArg(x) 26399 return true 26400 } 26401 return false 26402 } 26403 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 26404 // match: (NOTL (MOVLconst [c])) 26405 // cond: 26406 // result: (MOVLconst [^c]) 26407 for { 26408 v_0 := v.Args[0] 26409 if v_0.Op != OpAMD64MOVLconst { 26410 break 26411 } 26412 c := v_0.AuxInt 26413 v.reset(OpAMD64MOVLconst) 26414 v.AuxInt = ^c 26415 return true 26416 } 26417 return false 26418 } 26419 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 26420 // match: (NOTQ (MOVQconst [c])) 26421 // cond: 26422 // result: (MOVQconst [^c]) 26423 for { 26424 v_0 := v.Args[0] 26425 if v_0.Op != OpAMD64MOVQconst { 26426 break 26427 } 26428 c := v_0.AuxInt 26429 v.reset(OpAMD64MOVQconst) 26430 v.AuxInt = ^c 26431 return true 26432 } 26433 return false 26434 } 26435 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 26436 b := v.Block 26437 _ = b 26438 config := b.Func.Config 26439 _ = config 26440 // match: (ORL (SHLL (MOVLconst [1]) y) x) 26441 // cond: !config.nacl 26442 // result: (BTSL x y) 26443 for { 26444 _ = v.Args[1] 26445 v_0 := v.Args[0] 26446 if v_0.Op != OpAMD64SHLL { 26447 break 26448 } 26449 _ = v_0.Args[1] 26450 v_0_0 := v_0.Args[0] 26451 if v_0_0.Op != OpAMD64MOVLconst { 26452 break 26453 } 26454 if v_0_0.AuxInt != 1 { 26455 break 26456 } 26457 y := v_0.Args[1] 26458 x := v.Args[1] 26459 if !(!config.nacl) { 26460 break 26461 } 26462 v.reset(OpAMD64BTSL) 26463 v.AddArg(x) 26464 v.AddArg(y) 26465 return true 26466 } 26467 // match: (ORL x (SHLL (MOVLconst [1]) y)) 26468 // cond: !config.nacl 26469 // result: (BTSL x y) 26470 for { 26471 _ = v.Args[1] 26472 x := v.Args[0] 26473 v_1 := v.Args[1] 26474 if v_1.Op != OpAMD64SHLL { 26475 break 26476 } 26477 _ = v_1.Args[1] 26478 v_1_0 := v_1.Args[0] 26479 if v_1_0.Op != OpAMD64MOVLconst { 26480 break 26481 } 26482 if v_1_0.AuxInt != 1 { 26483 break 26484 } 26485 y := v_1.Args[1] 26486 if !(!config.nacl) { 26487 break 26488 } 26489 v.reset(OpAMD64BTSL) 26490 v.AddArg(x) 26491 v.AddArg(y) 26492 return true 26493 } 26494 // match: (ORL (MOVLconst [c]) x) 26495 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 26496 // result: (BTSLconst [log2uint32(c)] x) 26497 for { 26498 _ = v.Args[1] 26499 v_0 := v.Args[0] 26500 if v_0.Op != OpAMD64MOVLconst { 26501 break 26502 } 26503 c := v_0.AuxInt 26504 x := v.Args[1] 26505 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 26506 break 26507 } 26508 v.reset(OpAMD64BTSLconst) 26509 v.AuxInt = log2uint32(c) 26510 v.AddArg(x) 26511 return true 26512 } 26513 // match: (ORL x (MOVLconst [c])) 26514 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 26515 // result: (BTSLconst [log2uint32(c)] x) 26516 for { 26517 _ = v.Args[1] 26518 x := v.Args[0] 26519 v_1 := v.Args[1] 26520 if v_1.Op != OpAMD64MOVLconst { 26521 break 26522 } 26523 c := v_1.AuxInt 26524 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 26525 break 26526 } 26527 v.reset(OpAMD64BTSLconst) 26528 v.AuxInt = log2uint32(c) 26529 v.AddArg(x) 26530 return true 26531 } 26532 // match: (ORL x (MOVLconst [c])) 26533 // cond: 26534 // result: (ORLconst [c] x) 26535 for { 26536 _ = v.Args[1] 26537 x := v.Args[0] 26538 v_1 := v.Args[1] 26539 if v_1.Op != OpAMD64MOVLconst { 26540 break 26541 } 26542 c := v_1.AuxInt 26543 v.reset(OpAMD64ORLconst) 26544 v.AuxInt = c 26545 v.AddArg(x) 26546 return true 26547 } 26548 // match: (ORL (MOVLconst [c]) x) 26549 // cond: 26550 // result: (ORLconst [c] x) 26551 for { 26552 _ = v.Args[1] 26553 v_0 := v.Args[0] 26554 if v_0.Op != OpAMD64MOVLconst { 26555 break 26556 } 26557 c := v_0.AuxInt 26558 x := v.Args[1] 26559 v.reset(OpAMD64ORLconst) 26560 v.AuxInt = c 26561 v.AddArg(x) 26562 return true 26563 } 26564 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 26565 // cond: d==32-c 26566 // result: (ROLLconst x [c]) 26567 for { 26568 _ = v.Args[1] 26569 v_0 := v.Args[0] 26570 if v_0.Op != OpAMD64SHLLconst { 26571 break 26572 } 26573 c := v_0.AuxInt 26574 x := v_0.Args[0] 26575 v_1 := v.Args[1] 26576 if v_1.Op != OpAMD64SHRLconst { 26577 break 26578 } 26579 d := v_1.AuxInt 26580 if x != v_1.Args[0] { 26581 break 26582 } 26583 if !(d == 32-c) { 26584 break 26585 } 26586 v.reset(OpAMD64ROLLconst) 26587 v.AuxInt = c 26588 v.AddArg(x) 26589 return true 26590 } 26591 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 26592 // cond: d==32-c 26593 // result: (ROLLconst x [c]) 26594 for { 26595 _ = v.Args[1] 26596 v_0 := v.Args[0] 26597 if v_0.Op != OpAMD64SHRLconst { 26598 break 26599 } 26600 d := v_0.AuxInt 26601 x := v_0.Args[0] 26602 v_1 := v.Args[1] 26603 if v_1.Op != OpAMD64SHLLconst { 26604 break 26605 } 26606 c := v_1.AuxInt 26607 if x != v_1.Args[0] { 26608 break 26609 } 26610 if !(d == 32-c) { 26611 break 26612 } 26613 v.reset(OpAMD64ROLLconst) 26614 v.AuxInt = c 26615 v.AddArg(x) 26616 return true 26617 } 26618 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 26619 // cond: d==16-c && c < 16 && t.Size() == 2 26620 // result: (ROLWconst x [c]) 26621 for { 26622 t := v.Type 26623 _ = v.Args[1] 26624 v_0 := v.Args[0] 26625 if v_0.Op != OpAMD64SHLLconst { 26626 break 26627 } 26628 c := v_0.AuxInt 26629 x := v_0.Args[0] 26630 v_1 := v.Args[1] 26631 if v_1.Op != OpAMD64SHRWconst { 26632 break 26633 } 26634 d := v_1.AuxInt 26635 if x != v_1.Args[0] { 26636 break 26637 } 26638 if !(d == 16-c && c < 16 && t.Size() == 2) { 26639 break 26640 } 26641 v.reset(OpAMD64ROLWconst) 26642 v.AuxInt = c 26643 v.AddArg(x) 26644 return true 26645 } 26646 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 26647 // cond: d==16-c && c < 16 && t.Size() == 2 26648 // result: (ROLWconst x [c]) 26649 for { 26650 t := v.Type 26651 _ = v.Args[1] 26652 v_0 := v.Args[0] 26653 if v_0.Op != OpAMD64SHRWconst { 26654 break 26655 } 26656 d := v_0.AuxInt 26657 x := v_0.Args[0] 26658 v_1 := v.Args[1] 26659 if v_1.Op != OpAMD64SHLLconst { 26660 break 26661 } 26662 c := v_1.AuxInt 26663 if x != v_1.Args[0] { 26664 break 26665 } 26666 if !(d == 16-c && c < 16 && t.Size() == 2) { 26667 break 26668 } 26669 v.reset(OpAMD64ROLWconst) 26670 v.AuxInt = c 26671 v.AddArg(x) 26672 return true 26673 } 26674 return false 26675 } 26676 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 26677 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 26678 // cond: d==8-c && c < 8 && t.Size() == 1 26679 // result: (ROLBconst x [c]) 26680 for { 26681 t := v.Type 26682 _ = v.Args[1] 26683 v_0 := v.Args[0] 26684 if v_0.Op != OpAMD64SHLLconst { 26685 break 26686 } 26687 c := v_0.AuxInt 26688 x := v_0.Args[0] 26689 v_1 := v.Args[1] 26690 if v_1.Op != OpAMD64SHRBconst { 26691 break 26692 } 26693 d := v_1.AuxInt 26694 if x != v_1.Args[0] { 26695 break 26696 } 26697 if !(d == 8-c && c < 8 && t.Size() == 1) { 26698 break 26699 } 26700 v.reset(OpAMD64ROLBconst) 26701 v.AuxInt = c 26702 v.AddArg(x) 26703 return true 26704 } 26705 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 26706 // cond: d==8-c && c < 8 && t.Size() == 1 26707 // result: (ROLBconst x [c]) 26708 for { 26709 t := v.Type 26710 _ = v.Args[1] 26711 v_0 := v.Args[0] 26712 if v_0.Op != OpAMD64SHRBconst { 26713 break 26714 } 26715 d := v_0.AuxInt 26716 x := v_0.Args[0] 26717 v_1 := v.Args[1] 26718 if v_1.Op != OpAMD64SHLLconst { 26719 break 26720 } 26721 c := v_1.AuxInt 26722 if x != v_1.Args[0] { 26723 break 26724 } 26725 if !(d == 8-c && c < 8 && t.Size() == 1) { 26726 break 26727 } 26728 v.reset(OpAMD64ROLBconst) 26729 v.AuxInt = c 26730 v.AddArg(x) 26731 return true 26732 } 26733 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 26734 // cond: 26735 // result: (ROLL x y) 26736 for { 26737 _ = v.Args[1] 26738 v_0 := v.Args[0] 26739 if v_0.Op != OpAMD64SHLL { 26740 break 26741 } 26742 _ = v_0.Args[1] 26743 x := v_0.Args[0] 26744 y := v_0.Args[1] 26745 v_1 := v.Args[1] 26746 if v_1.Op != OpAMD64ANDL { 26747 break 26748 } 26749 _ = v_1.Args[1] 26750 v_1_0 := v_1.Args[0] 26751 if v_1_0.Op != OpAMD64SHRL { 26752 break 26753 } 26754 _ = v_1_0.Args[1] 26755 if x != v_1_0.Args[0] { 26756 break 26757 } 26758 v_1_0_1 := v_1_0.Args[1] 26759 if v_1_0_1.Op != OpAMD64NEGQ { 26760 break 26761 } 26762 if y != v_1_0_1.Args[0] { 26763 break 26764 } 26765 v_1_1 := v_1.Args[1] 26766 if v_1_1.Op != OpAMD64SBBLcarrymask { 26767 break 26768 } 26769 v_1_1_0 := v_1_1.Args[0] 26770 if v_1_1_0.Op != OpAMD64CMPQconst { 26771 break 26772 } 26773 if v_1_1_0.AuxInt != 32 { 26774 break 26775 } 26776 v_1_1_0_0 := v_1_1_0.Args[0] 26777 if v_1_1_0_0.Op != OpAMD64NEGQ { 26778 break 26779 } 26780 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 26781 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 26782 break 26783 } 26784 if v_1_1_0_0_0.AuxInt != -32 { 26785 break 26786 } 26787 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 26788 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 26789 break 26790 } 26791 if v_1_1_0_0_0_0.AuxInt != 31 { 26792 break 26793 } 26794 if y != v_1_1_0_0_0_0.Args[0] { 26795 break 26796 } 26797 v.reset(OpAMD64ROLL) 26798 v.AddArg(x) 26799 v.AddArg(y) 26800 return true 26801 } 26802 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 26803 // cond: 26804 // result: (ROLL x y) 26805 for { 26806 _ = v.Args[1] 26807 v_0 := v.Args[0] 26808 if v_0.Op != OpAMD64SHLL { 26809 break 26810 } 26811 _ = v_0.Args[1] 26812 x := v_0.Args[0] 26813 y := v_0.Args[1] 26814 v_1 := v.Args[1] 26815 if v_1.Op != OpAMD64ANDL { 26816 break 26817 } 26818 _ = v_1.Args[1] 26819 v_1_0 := v_1.Args[0] 26820 if v_1_0.Op != OpAMD64SBBLcarrymask { 26821 break 26822 } 26823 v_1_0_0 := v_1_0.Args[0] 26824 if v_1_0_0.Op != OpAMD64CMPQconst { 26825 break 26826 } 26827 if v_1_0_0.AuxInt != 32 { 26828 break 26829 } 26830 v_1_0_0_0 := v_1_0_0.Args[0] 26831 if v_1_0_0_0.Op != OpAMD64NEGQ { 26832 break 26833 } 26834 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 26835 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 26836 break 26837 } 26838 if v_1_0_0_0_0.AuxInt != -32 { 26839 break 26840 } 26841 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 26842 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 26843 break 26844 } 26845 if v_1_0_0_0_0_0.AuxInt != 31 { 26846 break 26847 } 26848 if y != v_1_0_0_0_0_0.Args[0] { 26849 break 26850 } 26851 v_1_1 := v_1.Args[1] 26852 if v_1_1.Op != OpAMD64SHRL { 26853 break 26854 } 26855 _ = v_1_1.Args[1] 26856 if x != v_1_1.Args[0] { 26857 break 26858 } 26859 v_1_1_1 := v_1_1.Args[1] 26860 if v_1_1_1.Op != OpAMD64NEGQ { 26861 break 26862 } 26863 if y != v_1_1_1.Args[0] { 26864 break 26865 } 26866 v.reset(OpAMD64ROLL) 26867 v.AddArg(x) 26868 v.AddArg(y) 26869 return true 26870 } 26871 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 26872 // cond: 26873 // result: (ROLL x y) 26874 for { 26875 _ = v.Args[1] 26876 v_0 := v.Args[0] 26877 if v_0.Op != OpAMD64ANDL { 26878 break 26879 } 26880 _ = v_0.Args[1] 26881 v_0_0 := v_0.Args[0] 26882 if v_0_0.Op != OpAMD64SHRL { 26883 break 26884 } 26885 _ = v_0_0.Args[1] 26886 x := v_0_0.Args[0] 26887 v_0_0_1 := v_0_0.Args[1] 26888 if v_0_0_1.Op != OpAMD64NEGQ { 26889 break 26890 } 26891 y := v_0_0_1.Args[0] 26892 v_0_1 := v_0.Args[1] 26893 if v_0_1.Op != OpAMD64SBBLcarrymask { 26894 break 26895 } 26896 v_0_1_0 := v_0_1.Args[0] 26897 if v_0_1_0.Op != OpAMD64CMPQconst { 26898 break 26899 } 26900 if v_0_1_0.AuxInt != 32 { 26901 break 26902 } 26903 v_0_1_0_0 := v_0_1_0.Args[0] 26904 if v_0_1_0_0.Op != OpAMD64NEGQ { 26905 break 26906 } 26907 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 26908 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 26909 break 26910 } 26911 if v_0_1_0_0_0.AuxInt != -32 { 26912 break 26913 } 26914 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 26915 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 26916 break 26917 } 26918 if v_0_1_0_0_0_0.AuxInt != 31 { 26919 break 26920 } 26921 if y != v_0_1_0_0_0_0.Args[0] { 26922 break 26923 } 26924 v_1 := v.Args[1] 26925 if v_1.Op != OpAMD64SHLL { 26926 break 26927 } 26928 _ = v_1.Args[1] 26929 if x != v_1.Args[0] { 26930 break 26931 } 26932 if y != v_1.Args[1] { 26933 break 26934 } 26935 v.reset(OpAMD64ROLL) 26936 v.AddArg(x) 26937 v.AddArg(y) 26938 return true 26939 } 26940 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 26941 // cond: 26942 // result: (ROLL x y) 26943 for { 26944 _ = v.Args[1] 26945 v_0 := v.Args[0] 26946 if v_0.Op != OpAMD64ANDL { 26947 break 26948 } 26949 _ = v_0.Args[1] 26950 v_0_0 := v_0.Args[0] 26951 if v_0_0.Op != OpAMD64SBBLcarrymask { 26952 break 26953 } 26954 v_0_0_0 := v_0_0.Args[0] 26955 if v_0_0_0.Op != OpAMD64CMPQconst { 26956 break 26957 } 26958 if v_0_0_0.AuxInt != 32 { 26959 break 26960 } 26961 v_0_0_0_0 := v_0_0_0.Args[0] 26962 if v_0_0_0_0.Op != OpAMD64NEGQ { 26963 break 26964 } 26965 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 26966 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 26967 break 26968 } 26969 if v_0_0_0_0_0.AuxInt != -32 { 26970 break 26971 } 26972 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 26973 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 26974 break 26975 } 26976 if v_0_0_0_0_0_0.AuxInt != 31 { 26977 break 26978 } 26979 y := v_0_0_0_0_0_0.Args[0] 26980 v_0_1 := v_0.Args[1] 26981 if v_0_1.Op != OpAMD64SHRL { 26982 break 26983 } 26984 _ = v_0_1.Args[1] 26985 x := v_0_1.Args[0] 26986 v_0_1_1 := v_0_1.Args[1] 26987 if v_0_1_1.Op != OpAMD64NEGQ { 26988 break 26989 } 26990 if y != v_0_1_1.Args[0] { 26991 break 26992 } 26993 v_1 := v.Args[1] 26994 if v_1.Op != OpAMD64SHLL { 26995 break 26996 } 26997 _ = v_1.Args[1] 26998 if x != v_1.Args[0] { 26999 break 27000 } 27001 if y != v_1.Args[1] { 27002 break 27003 } 27004 v.reset(OpAMD64ROLL) 27005 v.AddArg(x) 27006 v.AddArg(y) 27007 return true 27008 } 27009 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 27010 // cond: 27011 // result: (ROLL x y) 27012 for { 27013 _ = v.Args[1] 27014 v_0 := v.Args[0] 27015 if v_0.Op != OpAMD64SHLL { 27016 break 27017 } 27018 _ = v_0.Args[1] 27019 x := v_0.Args[0] 27020 y := v_0.Args[1] 27021 v_1 := v.Args[1] 27022 if v_1.Op != OpAMD64ANDL { 27023 break 27024 } 27025 _ = v_1.Args[1] 27026 v_1_0 := v_1.Args[0] 27027 if v_1_0.Op != OpAMD64SHRL { 27028 break 27029 } 27030 _ = v_1_0.Args[1] 27031 if x != v_1_0.Args[0] { 27032 break 27033 } 27034 v_1_0_1 := v_1_0.Args[1] 27035 if v_1_0_1.Op != OpAMD64NEGL { 27036 break 27037 } 27038 if y != v_1_0_1.Args[0] { 27039 break 27040 } 27041 v_1_1 := v_1.Args[1] 27042 if v_1_1.Op != OpAMD64SBBLcarrymask { 27043 break 27044 } 27045 v_1_1_0 := v_1_1.Args[0] 27046 if v_1_1_0.Op != OpAMD64CMPLconst { 27047 break 27048 } 27049 if v_1_1_0.AuxInt != 32 { 27050 break 27051 } 27052 v_1_1_0_0 := v_1_1_0.Args[0] 27053 if v_1_1_0_0.Op != OpAMD64NEGL { 27054 break 27055 } 27056 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 27057 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 27058 break 27059 } 27060 if v_1_1_0_0_0.AuxInt != -32 { 27061 break 27062 } 27063 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 27064 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 27065 break 27066 } 27067 if v_1_1_0_0_0_0.AuxInt != 31 { 27068 break 27069 } 27070 if y != v_1_1_0_0_0_0.Args[0] { 27071 break 27072 } 27073 v.reset(OpAMD64ROLL) 27074 v.AddArg(x) 27075 v.AddArg(y) 27076 return true 27077 } 27078 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 27079 // cond: 27080 // result: (ROLL x y) 27081 for { 27082 _ = v.Args[1] 27083 v_0 := v.Args[0] 27084 if v_0.Op != OpAMD64SHLL { 27085 break 27086 } 27087 _ = v_0.Args[1] 27088 x := v_0.Args[0] 27089 y := v_0.Args[1] 27090 v_1 := v.Args[1] 27091 if v_1.Op != OpAMD64ANDL { 27092 break 27093 } 27094 _ = v_1.Args[1] 27095 v_1_0 := v_1.Args[0] 27096 if v_1_0.Op != OpAMD64SBBLcarrymask { 27097 break 27098 } 27099 v_1_0_0 := v_1_0.Args[0] 27100 if v_1_0_0.Op != OpAMD64CMPLconst { 27101 break 27102 } 27103 if v_1_0_0.AuxInt != 32 { 27104 break 27105 } 27106 v_1_0_0_0 := v_1_0_0.Args[0] 27107 if v_1_0_0_0.Op != OpAMD64NEGL { 27108 break 27109 } 27110 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 27111 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 27112 break 27113 } 27114 if v_1_0_0_0_0.AuxInt != -32 { 27115 break 27116 } 27117 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 27118 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 27119 break 27120 } 27121 if v_1_0_0_0_0_0.AuxInt != 31 { 27122 break 27123 } 27124 if y != v_1_0_0_0_0_0.Args[0] { 27125 break 27126 } 27127 v_1_1 := v_1.Args[1] 27128 if v_1_1.Op != OpAMD64SHRL { 27129 break 27130 } 27131 _ = v_1_1.Args[1] 27132 if x != v_1_1.Args[0] { 27133 break 27134 } 27135 v_1_1_1 := v_1_1.Args[1] 27136 if v_1_1_1.Op != OpAMD64NEGL { 27137 break 27138 } 27139 if y != v_1_1_1.Args[0] { 27140 break 27141 } 27142 v.reset(OpAMD64ROLL) 27143 v.AddArg(x) 27144 v.AddArg(y) 27145 return true 27146 } 27147 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 27148 // cond: 27149 // result: (ROLL x y) 27150 for { 27151 _ = v.Args[1] 27152 v_0 := v.Args[0] 27153 if v_0.Op != OpAMD64ANDL { 27154 break 27155 } 27156 _ = v_0.Args[1] 27157 v_0_0 := v_0.Args[0] 27158 if v_0_0.Op != OpAMD64SHRL { 27159 break 27160 } 27161 _ = v_0_0.Args[1] 27162 x := v_0_0.Args[0] 27163 v_0_0_1 := v_0_0.Args[1] 27164 if v_0_0_1.Op != OpAMD64NEGL { 27165 break 27166 } 27167 y := v_0_0_1.Args[0] 27168 v_0_1 := v_0.Args[1] 27169 if v_0_1.Op != OpAMD64SBBLcarrymask { 27170 break 27171 } 27172 v_0_1_0 := v_0_1.Args[0] 27173 if v_0_1_0.Op != OpAMD64CMPLconst { 27174 break 27175 } 27176 if v_0_1_0.AuxInt != 32 { 27177 break 27178 } 27179 v_0_1_0_0 := v_0_1_0.Args[0] 27180 if v_0_1_0_0.Op != OpAMD64NEGL { 27181 break 27182 } 27183 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 27184 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 27185 break 27186 } 27187 if v_0_1_0_0_0.AuxInt != -32 { 27188 break 27189 } 27190 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 27191 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 27192 break 27193 } 27194 if v_0_1_0_0_0_0.AuxInt != 31 { 27195 break 27196 } 27197 if y != v_0_1_0_0_0_0.Args[0] { 27198 break 27199 } 27200 v_1 := v.Args[1] 27201 if v_1.Op != OpAMD64SHLL { 27202 break 27203 } 27204 _ = v_1.Args[1] 27205 if x != v_1.Args[0] { 27206 break 27207 } 27208 if y != v_1.Args[1] { 27209 break 27210 } 27211 v.reset(OpAMD64ROLL) 27212 v.AddArg(x) 27213 v.AddArg(y) 27214 return true 27215 } 27216 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 27217 // cond: 27218 // result: (ROLL x y) 27219 for { 27220 _ = v.Args[1] 27221 v_0 := v.Args[0] 27222 if v_0.Op != OpAMD64ANDL { 27223 break 27224 } 27225 _ = v_0.Args[1] 27226 v_0_0 := v_0.Args[0] 27227 if v_0_0.Op != OpAMD64SBBLcarrymask { 27228 break 27229 } 27230 v_0_0_0 := v_0_0.Args[0] 27231 if v_0_0_0.Op != OpAMD64CMPLconst { 27232 break 27233 } 27234 if v_0_0_0.AuxInt != 32 { 27235 break 27236 } 27237 v_0_0_0_0 := v_0_0_0.Args[0] 27238 if v_0_0_0_0.Op != OpAMD64NEGL { 27239 break 27240 } 27241 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 27242 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 27243 break 27244 } 27245 if v_0_0_0_0_0.AuxInt != -32 { 27246 break 27247 } 27248 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 27249 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 27250 break 27251 } 27252 if v_0_0_0_0_0_0.AuxInt != 31 { 27253 break 27254 } 27255 y := v_0_0_0_0_0_0.Args[0] 27256 v_0_1 := v_0.Args[1] 27257 if v_0_1.Op != OpAMD64SHRL { 27258 break 27259 } 27260 _ = v_0_1.Args[1] 27261 x := v_0_1.Args[0] 27262 v_0_1_1 := v_0_1.Args[1] 27263 if v_0_1_1.Op != OpAMD64NEGL { 27264 break 27265 } 27266 if y != v_0_1_1.Args[0] { 27267 break 27268 } 27269 v_1 := v.Args[1] 27270 if v_1.Op != OpAMD64SHLL { 27271 break 27272 } 27273 _ = v_1.Args[1] 27274 if x != v_1.Args[0] { 27275 break 27276 } 27277 if y != v_1.Args[1] { 27278 break 27279 } 27280 v.reset(OpAMD64ROLL) 27281 v.AddArg(x) 27282 v.AddArg(y) 27283 return true 27284 } 27285 return false 27286 } 27287 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 27288 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 27289 // cond: 27290 // result: (RORL x y) 27291 for { 27292 _ = v.Args[1] 27293 v_0 := v.Args[0] 27294 if v_0.Op != OpAMD64SHRL { 27295 break 27296 } 27297 _ = v_0.Args[1] 27298 x := v_0.Args[0] 27299 y := v_0.Args[1] 27300 v_1 := v.Args[1] 27301 if v_1.Op != OpAMD64ANDL { 27302 break 27303 } 27304 _ = v_1.Args[1] 27305 v_1_0 := v_1.Args[0] 27306 if v_1_0.Op != OpAMD64SHLL { 27307 break 27308 } 27309 _ = v_1_0.Args[1] 27310 if x != v_1_0.Args[0] { 27311 break 27312 } 27313 v_1_0_1 := v_1_0.Args[1] 27314 if v_1_0_1.Op != OpAMD64NEGQ { 27315 break 27316 } 27317 if y != v_1_0_1.Args[0] { 27318 break 27319 } 27320 v_1_1 := v_1.Args[1] 27321 if v_1_1.Op != OpAMD64SBBLcarrymask { 27322 break 27323 } 27324 v_1_1_0 := v_1_1.Args[0] 27325 if v_1_1_0.Op != OpAMD64CMPQconst { 27326 break 27327 } 27328 if v_1_1_0.AuxInt != 32 { 27329 break 27330 } 27331 v_1_1_0_0 := v_1_1_0.Args[0] 27332 if v_1_1_0_0.Op != OpAMD64NEGQ { 27333 break 27334 } 27335 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 27336 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 27337 break 27338 } 27339 if v_1_1_0_0_0.AuxInt != -32 { 27340 break 27341 } 27342 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 27343 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 27344 break 27345 } 27346 if v_1_1_0_0_0_0.AuxInt != 31 { 27347 break 27348 } 27349 if y != v_1_1_0_0_0_0.Args[0] { 27350 break 27351 } 27352 v.reset(OpAMD64RORL) 27353 v.AddArg(x) 27354 v.AddArg(y) 27355 return true 27356 } 27357 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 27358 // cond: 27359 // result: (RORL x y) 27360 for { 27361 _ = v.Args[1] 27362 v_0 := v.Args[0] 27363 if v_0.Op != OpAMD64SHRL { 27364 break 27365 } 27366 _ = v_0.Args[1] 27367 x := v_0.Args[0] 27368 y := v_0.Args[1] 27369 v_1 := v.Args[1] 27370 if v_1.Op != OpAMD64ANDL { 27371 break 27372 } 27373 _ = v_1.Args[1] 27374 v_1_0 := v_1.Args[0] 27375 if v_1_0.Op != OpAMD64SBBLcarrymask { 27376 break 27377 } 27378 v_1_0_0 := v_1_0.Args[0] 27379 if v_1_0_0.Op != OpAMD64CMPQconst { 27380 break 27381 } 27382 if v_1_0_0.AuxInt != 32 { 27383 break 27384 } 27385 v_1_0_0_0 := v_1_0_0.Args[0] 27386 if v_1_0_0_0.Op != OpAMD64NEGQ { 27387 break 27388 } 27389 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 27390 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 27391 break 27392 } 27393 if v_1_0_0_0_0.AuxInt != -32 { 27394 break 27395 } 27396 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 27397 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 27398 break 27399 } 27400 if v_1_0_0_0_0_0.AuxInt != 31 { 27401 break 27402 } 27403 if y != v_1_0_0_0_0_0.Args[0] { 27404 break 27405 } 27406 v_1_1 := v_1.Args[1] 27407 if v_1_1.Op != OpAMD64SHLL { 27408 break 27409 } 27410 _ = v_1_1.Args[1] 27411 if x != v_1_1.Args[0] { 27412 break 27413 } 27414 v_1_1_1 := v_1_1.Args[1] 27415 if v_1_1_1.Op != OpAMD64NEGQ { 27416 break 27417 } 27418 if y != v_1_1_1.Args[0] { 27419 break 27420 } 27421 v.reset(OpAMD64RORL) 27422 v.AddArg(x) 27423 v.AddArg(y) 27424 return true 27425 } 27426 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 27427 // cond: 27428 // result: (RORL x y) 27429 for { 27430 _ = v.Args[1] 27431 v_0 := v.Args[0] 27432 if v_0.Op != OpAMD64ANDL { 27433 break 27434 } 27435 _ = v_0.Args[1] 27436 v_0_0 := v_0.Args[0] 27437 if v_0_0.Op != OpAMD64SHLL { 27438 break 27439 } 27440 _ = v_0_0.Args[1] 27441 x := v_0_0.Args[0] 27442 v_0_0_1 := v_0_0.Args[1] 27443 if v_0_0_1.Op != OpAMD64NEGQ { 27444 break 27445 } 27446 y := v_0_0_1.Args[0] 27447 v_0_1 := v_0.Args[1] 27448 if v_0_1.Op != OpAMD64SBBLcarrymask { 27449 break 27450 } 27451 v_0_1_0 := v_0_1.Args[0] 27452 if v_0_1_0.Op != OpAMD64CMPQconst { 27453 break 27454 } 27455 if v_0_1_0.AuxInt != 32 { 27456 break 27457 } 27458 v_0_1_0_0 := v_0_1_0.Args[0] 27459 if v_0_1_0_0.Op != OpAMD64NEGQ { 27460 break 27461 } 27462 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 27463 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 27464 break 27465 } 27466 if v_0_1_0_0_0.AuxInt != -32 { 27467 break 27468 } 27469 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 27470 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 27471 break 27472 } 27473 if v_0_1_0_0_0_0.AuxInt != 31 { 27474 break 27475 } 27476 if y != v_0_1_0_0_0_0.Args[0] { 27477 break 27478 } 27479 v_1 := v.Args[1] 27480 if v_1.Op != OpAMD64SHRL { 27481 break 27482 } 27483 _ = v_1.Args[1] 27484 if x != v_1.Args[0] { 27485 break 27486 } 27487 if y != v_1.Args[1] { 27488 break 27489 } 27490 v.reset(OpAMD64RORL) 27491 v.AddArg(x) 27492 v.AddArg(y) 27493 return true 27494 } 27495 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 27496 // cond: 27497 // result: (RORL x y) 27498 for { 27499 _ = v.Args[1] 27500 v_0 := v.Args[0] 27501 if v_0.Op != OpAMD64ANDL { 27502 break 27503 } 27504 _ = v_0.Args[1] 27505 v_0_0 := v_0.Args[0] 27506 if v_0_0.Op != OpAMD64SBBLcarrymask { 27507 break 27508 } 27509 v_0_0_0 := v_0_0.Args[0] 27510 if v_0_0_0.Op != OpAMD64CMPQconst { 27511 break 27512 } 27513 if v_0_0_0.AuxInt != 32 { 27514 break 27515 } 27516 v_0_0_0_0 := v_0_0_0.Args[0] 27517 if v_0_0_0_0.Op != OpAMD64NEGQ { 27518 break 27519 } 27520 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 27521 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 27522 break 27523 } 27524 if v_0_0_0_0_0.AuxInt != -32 { 27525 break 27526 } 27527 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 27528 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 27529 break 27530 } 27531 if v_0_0_0_0_0_0.AuxInt != 31 { 27532 break 27533 } 27534 y := v_0_0_0_0_0_0.Args[0] 27535 v_0_1 := v_0.Args[1] 27536 if v_0_1.Op != OpAMD64SHLL { 27537 break 27538 } 27539 _ = v_0_1.Args[1] 27540 x := v_0_1.Args[0] 27541 v_0_1_1 := v_0_1.Args[1] 27542 if v_0_1_1.Op != OpAMD64NEGQ { 27543 break 27544 } 27545 if y != v_0_1_1.Args[0] { 27546 break 27547 } 27548 v_1 := v.Args[1] 27549 if v_1.Op != OpAMD64SHRL { 27550 break 27551 } 27552 _ = v_1.Args[1] 27553 if x != v_1.Args[0] { 27554 break 27555 } 27556 if y != v_1.Args[1] { 27557 break 27558 } 27559 v.reset(OpAMD64RORL) 27560 v.AddArg(x) 27561 v.AddArg(y) 27562 return true 27563 } 27564 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 27565 // cond: 27566 // result: (RORL x y) 27567 for { 27568 _ = v.Args[1] 27569 v_0 := v.Args[0] 27570 if v_0.Op != OpAMD64SHRL { 27571 break 27572 } 27573 _ = v_0.Args[1] 27574 x := v_0.Args[0] 27575 y := v_0.Args[1] 27576 v_1 := v.Args[1] 27577 if v_1.Op != OpAMD64ANDL { 27578 break 27579 } 27580 _ = v_1.Args[1] 27581 v_1_0 := v_1.Args[0] 27582 if v_1_0.Op != OpAMD64SHLL { 27583 break 27584 } 27585 _ = v_1_0.Args[1] 27586 if x != v_1_0.Args[0] { 27587 break 27588 } 27589 v_1_0_1 := v_1_0.Args[1] 27590 if v_1_0_1.Op != OpAMD64NEGL { 27591 break 27592 } 27593 if y != v_1_0_1.Args[0] { 27594 break 27595 } 27596 v_1_1 := v_1.Args[1] 27597 if v_1_1.Op != OpAMD64SBBLcarrymask { 27598 break 27599 } 27600 v_1_1_0 := v_1_1.Args[0] 27601 if v_1_1_0.Op != OpAMD64CMPLconst { 27602 break 27603 } 27604 if v_1_1_0.AuxInt != 32 { 27605 break 27606 } 27607 v_1_1_0_0 := v_1_1_0.Args[0] 27608 if v_1_1_0_0.Op != OpAMD64NEGL { 27609 break 27610 } 27611 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 27612 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 27613 break 27614 } 27615 if v_1_1_0_0_0.AuxInt != -32 { 27616 break 27617 } 27618 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 27619 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 27620 break 27621 } 27622 if v_1_1_0_0_0_0.AuxInt != 31 { 27623 break 27624 } 27625 if y != v_1_1_0_0_0_0.Args[0] { 27626 break 27627 } 27628 v.reset(OpAMD64RORL) 27629 v.AddArg(x) 27630 v.AddArg(y) 27631 return true 27632 } 27633 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 27634 // cond: 27635 // result: (RORL x y) 27636 for { 27637 _ = v.Args[1] 27638 v_0 := v.Args[0] 27639 if v_0.Op != OpAMD64SHRL { 27640 break 27641 } 27642 _ = v_0.Args[1] 27643 x := v_0.Args[0] 27644 y := v_0.Args[1] 27645 v_1 := v.Args[1] 27646 if v_1.Op != OpAMD64ANDL { 27647 break 27648 } 27649 _ = v_1.Args[1] 27650 v_1_0 := v_1.Args[0] 27651 if v_1_0.Op != OpAMD64SBBLcarrymask { 27652 break 27653 } 27654 v_1_0_0 := v_1_0.Args[0] 27655 if v_1_0_0.Op != OpAMD64CMPLconst { 27656 break 27657 } 27658 if v_1_0_0.AuxInt != 32 { 27659 break 27660 } 27661 v_1_0_0_0 := v_1_0_0.Args[0] 27662 if v_1_0_0_0.Op != OpAMD64NEGL { 27663 break 27664 } 27665 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 27666 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 27667 break 27668 } 27669 if v_1_0_0_0_0.AuxInt != -32 { 27670 break 27671 } 27672 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 27673 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 27674 break 27675 } 27676 if v_1_0_0_0_0_0.AuxInt != 31 { 27677 break 27678 } 27679 if y != v_1_0_0_0_0_0.Args[0] { 27680 break 27681 } 27682 v_1_1 := v_1.Args[1] 27683 if v_1_1.Op != OpAMD64SHLL { 27684 break 27685 } 27686 _ = v_1_1.Args[1] 27687 if x != v_1_1.Args[0] { 27688 break 27689 } 27690 v_1_1_1 := v_1_1.Args[1] 27691 if v_1_1_1.Op != OpAMD64NEGL { 27692 break 27693 } 27694 if y != v_1_1_1.Args[0] { 27695 break 27696 } 27697 v.reset(OpAMD64RORL) 27698 v.AddArg(x) 27699 v.AddArg(y) 27700 return true 27701 } 27702 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 27703 // cond: 27704 // result: (RORL x y) 27705 for { 27706 _ = v.Args[1] 27707 v_0 := v.Args[0] 27708 if v_0.Op != OpAMD64ANDL { 27709 break 27710 } 27711 _ = v_0.Args[1] 27712 v_0_0 := v_0.Args[0] 27713 if v_0_0.Op != OpAMD64SHLL { 27714 break 27715 } 27716 _ = v_0_0.Args[1] 27717 x := v_0_0.Args[0] 27718 v_0_0_1 := v_0_0.Args[1] 27719 if v_0_0_1.Op != OpAMD64NEGL { 27720 break 27721 } 27722 y := v_0_0_1.Args[0] 27723 v_0_1 := v_0.Args[1] 27724 if v_0_1.Op != OpAMD64SBBLcarrymask { 27725 break 27726 } 27727 v_0_1_0 := v_0_1.Args[0] 27728 if v_0_1_0.Op != OpAMD64CMPLconst { 27729 break 27730 } 27731 if v_0_1_0.AuxInt != 32 { 27732 break 27733 } 27734 v_0_1_0_0 := v_0_1_0.Args[0] 27735 if v_0_1_0_0.Op != OpAMD64NEGL { 27736 break 27737 } 27738 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 27739 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 27740 break 27741 } 27742 if v_0_1_0_0_0.AuxInt != -32 { 27743 break 27744 } 27745 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 27746 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 27747 break 27748 } 27749 if v_0_1_0_0_0_0.AuxInt != 31 { 27750 break 27751 } 27752 if y != v_0_1_0_0_0_0.Args[0] { 27753 break 27754 } 27755 v_1 := v.Args[1] 27756 if v_1.Op != OpAMD64SHRL { 27757 break 27758 } 27759 _ = v_1.Args[1] 27760 if x != v_1.Args[0] { 27761 break 27762 } 27763 if y != v_1.Args[1] { 27764 break 27765 } 27766 v.reset(OpAMD64RORL) 27767 v.AddArg(x) 27768 v.AddArg(y) 27769 return true 27770 } 27771 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 27772 // cond: 27773 // result: (RORL x y) 27774 for { 27775 _ = v.Args[1] 27776 v_0 := v.Args[0] 27777 if v_0.Op != OpAMD64ANDL { 27778 break 27779 } 27780 _ = v_0.Args[1] 27781 v_0_0 := v_0.Args[0] 27782 if v_0_0.Op != OpAMD64SBBLcarrymask { 27783 break 27784 } 27785 v_0_0_0 := v_0_0.Args[0] 27786 if v_0_0_0.Op != OpAMD64CMPLconst { 27787 break 27788 } 27789 if v_0_0_0.AuxInt != 32 { 27790 break 27791 } 27792 v_0_0_0_0 := v_0_0_0.Args[0] 27793 if v_0_0_0_0.Op != OpAMD64NEGL { 27794 break 27795 } 27796 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 27797 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 27798 break 27799 } 27800 if v_0_0_0_0_0.AuxInt != -32 { 27801 break 27802 } 27803 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 27804 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 27805 break 27806 } 27807 if v_0_0_0_0_0_0.AuxInt != 31 { 27808 break 27809 } 27810 y := v_0_0_0_0_0_0.Args[0] 27811 v_0_1 := v_0.Args[1] 27812 if v_0_1.Op != OpAMD64SHLL { 27813 break 27814 } 27815 _ = v_0_1.Args[1] 27816 x := v_0_1.Args[0] 27817 v_0_1_1 := v_0_1.Args[1] 27818 if v_0_1_1.Op != OpAMD64NEGL { 27819 break 27820 } 27821 if y != v_0_1_1.Args[0] { 27822 break 27823 } 27824 v_1 := v.Args[1] 27825 if v_1.Op != OpAMD64SHRL { 27826 break 27827 } 27828 _ = v_1.Args[1] 27829 if x != v_1.Args[0] { 27830 break 27831 } 27832 if y != v_1.Args[1] { 27833 break 27834 } 27835 v.reset(OpAMD64RORL) 27836 v.AddArg(x) 27837 v.AddArg(y) 27838 return true 27839 } 27840 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 27841 // cond: v.Type.Size() == 2 27842 // result: (ROLW x y) 27843 for { 27844 _ = v.Args[1] 27845 v_0 := v.Args[0] 27846 if v_0.Op != OpAMD64SHLL { 27847 break 27848 } 27849 _ = v_0.Args[1] 27850 x := v_0.Args[0] 27851 v_0_1 := v_0.Args[1] 27852 if v_0_1.Op != OpAMD64ANDQconst { 27853 break 27854 } 27855 if v_0_1.AuxInt != 15 { 27856 break 27857 } 27858 y := v_0_1.Args[0] 27859 v_1 := v.Args[1] 27860 if v_1.Op != OpAMD64ANDL { 27861 break 27862 } 27863 _ = v_1.Args[1] 27864 v_1_0 := v_1.Args[0] 27865 if v_1_0.Op != OpAMD64SHRW { 27866 break 27867 } 27868 _ = v_1_0.Args[1] 27869 if x != v_1_0.Args[0] { 27870 break 27871 } 27872 v_1_0_1 := v_1_0.Args[1] 27873 if v_1_0_1.Op != OpAMD64NEGQ { 27874 break 27875 } 27876 v_1_0_1_0 := v_1_0_1.Args[0] 27877 if v_1_0_1_0.Op != OpAMD64ADDQconst { 27878 break 27879 } 27880 if v_1_0_1_0.AuxInt != -16 { 27881 break 27882 } 27883 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 27884 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 27885 break 27886 } 27887 if v_1_0_1_0_0.AuxInt != 15 { 27888 break 27889 } 27890 if y != v_1_0_1_0_0.Args[0] { 27891 break 27892 } 27893 v_1_1 := v_1.Args[1] 27894 if v_1_1.Op != OpAMD64SBBLcarrymask { 27895 break 27896 } 27897 v_1_1_0 := v_1_1.Args[0] 27898 if v_1_1_0.Op != OpAMD64CMPQconst { 27899 break 27900 } 27901 if v_1_1_0.AuxInt != 16 { 27902 break 27903 } 27904 v_1_1_0_0 := v_1_1_0.Args[0] 27905 if v_1_1_0_0.Op != OpAMD64NEGQ { 27906 break 27907 } 27908 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 27909 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 27910 break 27911 } 27912 if v_1_1_0_0_0.AuxInt != -16 { 27913 break 27914 } 27915 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 27916 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 27917 break 27918 } 27919 if v_1_1_0_0_0_0.AuxInt != 15 { 27920 break 27921 } 27922 if y != v_1_1_0_0_0_0.Args[0] { 27923 break 27924 } 27925 if !(v.Type.Size() == 2) { 27926 break 27927 } 27928 v.reset(OpAMD64ROLW) 27929 v.AddArg(x) 27930 v.AddArg(y) 27931 return true 27932 } 27933 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 27934 // cond: v.Type.Size() == 2 27935 // result: (ROLW x y) 27936 for { 27937 _ = v.Args[1] 27938 v_0 := v.Args[0] 27939 if v_0.Op != OpAMD64SHLL { 27940 break 27941 } 27942 _ = v_0.Args[1] 27943 x := v_0.Args[0] 27944 v_0_1 := v_0.Args[1] 27945 if v_0_1.Op != OpAMD64ANDQconst { 27946 break 27947 } 27948 if v_0_1.AuxInt != 15 { 27949 break 27950 } 27951 y := v_0_1.Args[0] 27952 v_1 := v.Args[1] 27953 if v_1.Op != OpAMD64ANDL { 27954 break 27955 } 27956 _ = v_1.Args[1] 27957 v_1_0 := v_1.Args[0] 27958 if v_1_0.Op != OpAMD64SBBLcarrymask { 27959 break 27960 } 27961 v_1_0_0 := v_1_0.Args[0] 27962 if v_1_0_0.Op != OpAMD64CMPQconst { 27963 break 27964 } 27965 if v_1_0_0.AuxInt != 16 { 27966 break 27967 } 27968 v_1_0_0_0 := v_1_0_0.Args[0] 27969 if v_1_0_0_0.Op != OpAMD64NEGQ { 27970 break 27971 } 27972 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 27973 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 27974 break 27975 } 27976 if v_1_0_0_0_0.AuxInt != -16 { 27977 break 27978 } 27979 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 27980 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 27981 break 27982 } 27983 if v_1_0_0_0_0_0.AuxInt != 15 { 27984 break 27985 } 27986 if y != v_1_0_0_0_0_0.Args[0] { 27987 break 27988 } 27989 v_1_1 := v_1.Args[1] 27990 if v_1_1.Op != OpAMD64SHRW { 27991 break 27992 } 27993 _ = v_1_1.Args[1] 27994 if x != v_1_1.Args[0] { 27995 break 27996 } 27997 v_1_1_1 := v_1_1.Args[1] 27998 if v_1_1_1.Op != OpAMD64NEGQ { 27999 break 28000 } 28001 v_1_1_1_0 := v_1_1_1.Args[0] 28002 if v_1_1_1_0.Op != OpAMD64ADDQconst { 28003 break 28004 } 28005 if v_1_1_1_0.AuxInt != -16 { 28006 break 28007 } 28008 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 28009 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 28010 break 28011 } 28012 if v_1_1_1_0_0.AuxInt != 15 { 28013 break 28014 } 28015 if y != v_1_1_1_0_0.Args[0] { 28016 break 28017 } 28018 if !(v.Type.Size() == 2) { 28019 break 28020 } 28021 v.reset(OpAMD64ROLW) 28022 v.AddArg(x) 28023 v.AddArg(y) 28024 return true 28025 } 28026 return false 28027 } 28028 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 28029 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 28030 // cond: v.Type.Size() == 2 28031 // result: (ROLW x y) 28032 for { 28033 _ = v.Args[1] 28034 v_0 := v.Args[0] 28035 if v_0.Op != OpAMD64ANDL { 28036 break 28037 } 28038 _ = v_0.Args[1] 28039 v_0_0 := v_0.Args[0] 28040 if v_0_0.Op != OpAMD64SHRW { 28041 break 28042 } 28043 _ = v_0_0.Args[1] 28044 x := v_0_0.Args[0] 28045 v_0_0_1 := v_0_0.Args[1] 28046 if v_0_0_1.Op != OpAMD64NEGQ { 28047 break 28048 } 28049 v_0_0_1_0 := v_0_0_1.Args[0] 28050 if v_0_0_1_0.Op != OpAMD64ADDQconst { 28051 break 28052 } 28053 if v_0_0_1_0.AuxInt != -16 { 28054 break 28055 } 28056 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 28057 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 28058 break 28059 } 28060 if v_0_0_1_0_0.AuxInt != 15 { 28061 break 28062 } 28063 y := v_0_0_1_0_0.Args[0] 28064 v_0_1 := v_0.Args[1] 28065 if v_0_1.Op != OpAMD64SBBLcarrymask { 28066 break 28067 } 28068 v_0_1_0 := v_0_1.Args[0] 28069 if v_0_1_0.Op != OpAMD64CMPQconst { 28070 break 28071 } 28072 if v_0_1_0.AuxInt != 16 { 28073 break 28074 } 28075 v_0_1_0_0 := v_0_1_0.Args[0] 28076 if v_0_1_0_0.Op != OpAMD64NEGQ { 28077 break 28078 } 28079 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 28080 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 28081 break 28082 } 28083 if v_0_1_0_0_0.AuxInt != -16 { 28084 break 28085 } 28086 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 28087 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 28088 break 28089 } 28090 if v_0_1_0_0_0_0.AuxInt != 15 { 28091 break 28092 } 28093 if y != v_0_1_0_0_0_0.Args[0] { 28094 break 28095 } 28096 v_1 := v.Args[1] 28097 if v_1.Op != OpAMD64SHLL { 28098 break 28099 } 28100 _ = v_1.Args[1] 28101 if x != v_1.Args[0] { 28102 break 28103 } 28104 v_1_1 := v_1.Args[1] 28105 if v_1_1.Op != OpAMD64ANDQconst { 28106 break 28107 } 28108 if v_1_1.AuxInt != 15 { 28109 break 28110 } 28111 if y != v_1_1.Args[0] { 28112 break 28113 } 28114 if !(v.Type.Size() == 2) { 28115 break 28116 } 28117 v.reset(OpAMD64ROLW) 28118 v.AddArg(x) 28119 v.AddArg(y) 28120 return true 28121 } 28122 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 28123 // cond: v.Type.Size() == 2 28124 // result: (ROLW x y) 28125 for { 28126 _ = v.Args[1] 28127 v_0 := v.Args[0] 28128 if v_0.Op != OpAMD64ANDL { 28129 break 28130 } 28131 _ = v_0.Args[1] 28132 v_0_0 := v_0.Args[0] 28133 if v_0_0.Op != OpAMD64SBBLcarrymask { 28134 break 28135 } 28136 v_0_0_0 := v_0_0.Args[0] 28137 if v_0_0_0.Op != OpAMD64CMPQconst { 28138 break 28139 } 28140 if v_0_0_0.AuxInt != 16 { 28141 break 28142 } 28143 v_0_0_0_0 := v_0_0_0.Args[0] 28144 if v_0_0_0_0.Op != OpAMD64NEGQ { 28145 break 28146 } 28147 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 28148 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 28149 break 28150 } 28151 if v_0_0_0_0_0.AuxInt != -16 { 28152 break 28153 } 28154 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 28155 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 28156 break 28157 } 28158 if v_0_0_0_0_0_0.AuxInt != 15 { 28159 break 28160 } 28161 y := v_0_0_0_0_0_0.Args[0] 28162 v_0_1 := v_0.Args[1] 28163 if v_0_1.Op != OpAMD64SHRW { 28164 break 28165 } 28166 _ = v_0_1.Args[1] 28167 x := v_0_1.Args[0] 28168 v_0_1_1 := v_0_1.Args[1] 28169 if v_0_1_1.Op != OpAMD64NEGQ { 28170 break 28171 } 28172 v_0_1_1_0 := v_0_1_1.Args[0] 28173 if v_0_1_1_0.Op != OpAMD64ADDQconst { 28174 break 28175 } 28176 if v_0_1_1_0.AuxInt != -16 { 28177 break 28178 } 28179 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 28180 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 28181 break 28182 } 28183 if v_0_1_1_0_0.AuxInt != 15 { 28184 break 28185 } 28186 if y != v_0_1_1_0_0.Args[0] { 28187 break 28188 } 28189 v_1 := v.Args[1] 28190 if v_1.Op != OpAMD64SHLL { 28191 break 28192 } 28193 _ = v_1.Args[1] 28194 if x != v_1.Args[0] { 28195 break 28196 } 28197 v_1_1 := v_1.Args[1] 28198 if v_1_1.Op != OpAMD64ANDQconst { 28199 break 28200 } 28201 if v_1_1.AuxInt != 15 { 28202 break 28203 } 28204 if y != v_1_1.Args[0] { 28205 break 28206 } 28207 if !(v.Type.Size() == 2) { 28208 break 28209 } 28210 v.reset(OpAMD64ROLW) 28211 v.AddArg(x) 28212 v.AddArg(y) 28213 return true 28214 } 28215 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 28216 // cond: v.Type.Size() == 2 28217 // result: (ROLW x y) 28218 for { 28219 _ = v.Args[1] 28220 v_0 := v.Args[0] 28221 if v_0.Op != OpAMD64SHLL { 28222 break 28223 } 28224 _ = v_0.Args[1] 28225 x := v_0.Args[0] 28226 v_0_1 := v_0.Args[1] 28227 if v_0_1.Op != OpAMD64ANDLconst { 28228 break 28229 } 28230 if v_0_1.AuxInt != 15 { 28231 break 28232 } 28233 y := v_0_1.Args[0] 28234 v_1 := v.Args[1] 28235 if v_1.Op != OpAMD64ANDL { 28236 break 28237 } 28238 _ = v_1.Args[1] 28239 v_1_0 := v_1.Args[0] 28240 if v_1_0.Op != OpAMD64SHRW { 28241 break 28242 } 28243 _ = v_1_0.Args[1] 28244 if x != v_1_0.Args[0] { 28245 break 28246 } 28247 v_1_0_1 := v_1_0.Args[1] 28248 if v_1_0_1.Op != OpAMD64NEGL { 28249 break 28250 } 28251 v_1_0_1_0 := v_1_0_1.Args[0] 28252 if v_1_0_1_0.Op != OpAMD64ADDLconst { 28253 break 28254 } 28255 if v_1_0_1_0.AuxInt != -16 { 28256 break 28257 } 28258 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 28259 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 28260 break 28261 } 28262 if v_1_0_1_0_0.AuxInt != 15 { 28263 break 28264 } 28265 if y != v_1_0_1_0_0.Args[0] { 28266 break 28267 } 28268 v_1_1 := v_1.Args[1] 28269 if v_1_1.Op != OpAMD64SBBLcarrymask { 28270 break 28271 } 28272 v_1_1_0 := v_1_1.Args[0] 28273 if v_1_1_0.Op != OpAMD64CMPLconst { 28274 break 28275 } 28276 if v_1_1_0.AuxInt != 16 { 28277 break 28278 } 28279 v_1_1_0_0 := v_1_1_0.Args[0] 28280 if v_1_1_0_0.Op != OpAMD64NEGL { 28281 break 28282 } 28283 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 28284 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 28285 break 28286 } 28287 if v_1_1_0_0_0.AuxInt != -16 { 28288 break 28289 } 28290 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 28291 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 28292 break 28293 } 28294 if v_1_1_0_0_0_0.AuxInt != 15 { 28295 break 28296 } 28297 if y != v_1_1_0_0_0_0.Args[0] { 28298 break 28299 } 28300 if !(v.Type.Size() == 2) { 28301 break 28302 } 28303 v.reset(OpAMD64ROLW) 28304 v.AddArg(x) 28305 v.AddArg(y) 28306 return true 28307 } 28308 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 28309 // cond: v.Type.Size() == 2 28310 // result: (ROLW x y) 28311 for { 28312 _ = v.Args[1] 28313 v_0 := v.Args[0] 28314 if v_0.Op != OpAMD64SHLL { 28315 break 28316 } 28317 _ = v_0.Args[1] 28318 x := v_0.Args[0] 28319 v_0_1 := v_0.Args[1] 28320 if v_0_1.Op != OpAMD64ANDLconst { 28321 break 28322 } 28323 if v_0_1.AuxInt != 15 { 28324 break 28325 } 28326 y := v_0_1.Args[0] 28327 v_1 := v.Args[1] 28328 if v_1.Op != OpAMD64ANDL { 28329 break 28330 } 28331 _ = v_1.Args[1] 28332 v_1_0 := v_1.Args[0] 28333 if v_1_0.Op != OpAMD64SBBLcarrymask { 28334 break 28335 } 28336 v_1_0_0 := v_1_0.Args[0] 28337 if v_1_0_0.Op != OpAMD64CMPLconst { 28338 break 28339 } 28340 if v_1_0_0.AuxInt != 16 { 28341 break 28342 } 28343 v_1_0_0_0 := v_1_0_0.Args[0] 28344 if v_1_0_0_0.Op != OpAMD64NEGL { 28345 break 28346 } 28347 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 28348 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 28349 break 28350 } 28351 if v_1_0_0_0_0.AuxInt != -16 { 28352 break 28353 } 28354 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 28355 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 28356 break 28357 } 28358 if v_1_0_0_0_0_0.AuxInt != 15 { 28359 break 28360 } 28361 if y != v_1_0_0_0_0_0.Args[0] { 28362 break 28363 } 28364 v_1_1 := v_1.Args[1] 28365 if v_1_1.Op != OpAMD64SHRW { 28366 break 28367 } 28368 _ = v_1_1.Args[1] 28369 if x != v_1_1.Args[0] { 28370 break 28371 } 28372 v_1_1_1 := v_1_1.Args[1] 28373 if v_1_1_1.Op != OpAMD64NEGL { 28374 break 28375 } 28376 v_1_1_1_0 := v_1_1_1.Args[0] 28377 if v_1_1_1_0.Op != OpAMD64ADDLconst { 28378 break 28379 } 28380 if v_1_1_1_0.AuxInt != -16 { 28381 break 28382 } 28383 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 28384 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 28385 break 28386 } 28387 if v_1_1_1_0_0.AuxInt != 15 { 28388 break 28389 } 28390 if y != v_1_1_1_0_0.Args[0] { 28391 break 28392 } 28393 if !(v.Type.Size() == 2) { 28394 break 28395 } 28396 v.reset(OpAMD64ROLW) 28397 v.AddArg(x) 28398 v.AddArg(y) 28399 return true 28400 } 28401 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 28402 // cond: v.Type.Size() == 2 28403 // result: (ROLW x y) 28404 for { 28405 _ = v.Args[1] 28406 v_0 := v.Args[0] 28407 if v_0.Op != OpAMD64ANDL { 28408 break 28409 } 28410 _ = v_0.Args[1] 28411 v_0_0 := v_0.Args[0] 28412 if v_0_0.Op != OpAMD64SHRW { 28413 break 28414 } 28415 _ = v_0_0.Args[1] 28416 x := v_0_0.Args[0] 28417 v_0_0_1 := v_0_0.Args[1] 28418 if v_0_0_1.Op != OpAMD64NEGL { 28419 break 28420 } 28421 v_0_0_1_0 := v_0_0_1.Args[0] 28422 if v_0_0_1_0.Op != OpAMD64ADDLconst { 28423 break 28424 } 28425 if v_0_0_1_0.AuxInt != -16 { 28426 break 28427 } 28428 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 28429 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 28430 break 28431 } 28432 if v_0_0_1_0_0.AuxInt != 15 { 28433 break 28434 } 28435 y := v_0_0_1_0_0.Args[0] 28436 v_0_1 := v_0.Args[1] 28437 if v_0_1.Op != OpAMD64SBBLcarrymask { 28438 break 28439 } 28440 v_0_1_0 := v_0_1.Args[0] 28441 if v_0_1_0.Op != OpAMD64CMPLconst { 28442 break 28443 } 28444 if v_0_1_0.AuxInt != 16 { 28445 break 28446 } 28447 v_0_1_0_0 := v_0_1_0.Args[0] 28448 if v_0_1_0_0.Op != OpAMD64NEGL { 28449 break 28450 } 28451 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 28452 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 28453 break 28454 } 28455 if v_0_1_0_0_0.AuxInt != -16 { 28456 break 28457 } 28458 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 28459 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 28460 break 28461 } 28462 if v_0_1_0_0_0_0.AuxInt != 15 { 28463 break 28464 } 28465 if y != v_0_1_0_0_0_0.Args[0] { 28466 break 28467 } 28468 v_1 := v.Args[1] 28469 if v_1.Op != OpAMD64SHLL { 28470 break 28471 } 28472 _ = v_1.Args[1] 28473 if x != v_1.Args[0] { 28474 break 28475 } 28476 v_1_1 := v_1.Args[1] 28477 if v_1_1.Op != OpAMD64ANDLconst { 28478 break 28479 } 28480 if v_1_1.AuxInt != 15 { 28481 break 28482 } 28483 if y != v_1_1.Args[0] { 28484 break 28485 } 28486 if !(v.Type.Size() == 2) { 28487 break 28488 } 28489 v.reset(OpAMD64ROLW) 28490 v.AddArg(x) 28491 v.AddArg(y) 28492 return true 28493 } 28494 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 28495 // cond: v.Type.Size() == 2 28496 // result: (ROLW x y) 28497 for { 28498 _ = v.Args[1] 28499 v_0 := v.Args[0] 28500 if v_0.Op != OpAMD64ANDL { 28501 break 28502 } 28503 _ = v_0.Args[1] 28504 v_0_0 := v_0.Args[0] 28505 if v_0_0.Op != OpAMD64SBBLcarrymask { 28506 break 28507 } 28508 v_0_0_0 := v_0_0.Args[0] 28509 if v_0_0_0.Op != OpAMD64CMPLconst { 28510 break 28511 } 28512 if v_0_0_0.AuxInt != 16 { 28513 break 28514 } 28515 v_0_0_0_0 := v_0_0_0.Args[0] 28516 if v_0_0_0_0.Op != OpAMD64NEGL { 28517 break 28518 } 28519 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 28520 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 28521 break 28522 } 28523 if v_0_0_0_0_0.AuxInt != -16 { 28524 break 28525 } 28526 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 28527 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 28528 break 28529 } 28530 if v_0_0_0_0_0_0.AuxInt != 15 { 28531 break 28532 } 28533 y := v_0_0_0_0_0_0.Args[0] 28534 v_0_1 := v_0.Args[1] 28535 if v_0_1.Op != OpAMD64SHRW { 28536 break 28537 } 28538 _ = v_0_1.Args[1] 28539 x := v_0_1.Args[0] 28540 v_0_1_1 := v_0_1.Args[1] 28541 if v_0_1_1.Op != OpAMD64NEGL { 28542 break 28543 } 28544 v_0_1_1_0 := v_0_1_1.Args[0] 28545 if v_0_1_1_0.Op != OpAMD64ADDLconst { 28546 break 28547 } 28548 if v_0_1_1_0.AuxInt != -16 { 28549 break 28550 } 28551 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 28552 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 28553 break 28554 } 28555 if v_0_1_1_0_0.AuxInt != 15 { 28556 break 28557 } 28558 if y != v_0_1_1_0_0.Args[0] { 28559 break 28560 } 28561 v_1 := v.Args[1] 28562 if v_1.Op != OpAMD64SHLL { 28563 break 28564 } 28565 _ = v_1.Args[1] 28566 if x != v_1.Args[0] { 28567 break 28568 } 28569 v_1_1 := v_1.Args[1] 28570 if v_1_1.Op != OpAMD64ANDLconst { 28571 break 28572 } 28573 if v_1_1.AuxInt != 15 { 28574 break 28575 } 28576 if y != v_1_1.Args[0] { 28577 break 28578 } 28579 if !(v.Type.Size() == 2) { 28580 break 28581 } 28582 v.reset(OpAMD64ROLW) 28583 v.AddArg(x) 28584 v.AddArg(y) 28585 return true 28586 } 28587 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 28588 // cond: v.Type.Size() == 2 28589 // result: (RORW x y) 28590 for { 28591 _ = v.Args[1] 28592 v_0 := v.Args[0] 28593 if v_0.Op != OpAMD64SHRW { 28594 break 28595 } 28596 _ = v_0.Args[1] 28597 x := v_0.Args[0] 28598 v_0_1 := v_0.Args[1] 28599 if v_0_1.Op != OpAMD64ANDQconst { 28600 break 28601 } 28602 if v_0_1.AuxInt != 15 { 28603 break 28604 } 28605 y := v_0_1.Args[0] 28606 v_1 := v.Args[1] 28607 if v_1.Op != OpAMD64SHLL { 28608 break 28609 } 28610 _ = v_1.Args[1] 28611 if x != v_1.Args[0] { 28612 break 28613 } 28614 v_1_1 := v_1.Args[1] 28615 if v_1_1.Op != OpAMD64NEGQ { 28616 break 28617 } 28618 v_1_1_0 := v_1_1.Args[0] 28619 if v_1_1_0.Op != OpAMD64ADDQconst { 28620 break 28621 } 28622 if v_1_1_0.AuxInt != -16 { 28623 break 28624 } 28625 v_1_1_0_0 := v_1_1_0.Args[0] 28626 if v_1_1_0_0.Op != OpAMD64ANDQconst { 28627 break 28628 } 28629 if v_1_1_0_0.AuxInt != 15 { 28630 break 28631 } 28632 if y != v_1_1_0_0.Args[0] { 28633 break 28634 } 28635 if !(v.Type.Size() == 2) { 28636 break 28637 } 28638 v.reset(OpAMD64RORW) 28639 v.AddArg(x) 28640 v.AddArg(y) 28641 return true 28642 } 28643 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 28644 // cond: v.Type.Size() == 2 28645 // result: (RORW x y) 28646 for { 28647 _ = v.Args[1] 28648 v_0 := v.Args[0] 28649 if v_0.Op != OpAMD64SHLL { 28650 break 28651 } 28652 _ = v_0.Args[1] 28653 x := v_0.Args[0] 28654 v_0_1 := v_0.Args[1] 28655 if v_0_1.Op != OpAMD64NEGQ { 28656 break 28657 } 28658 v_0_1_0 := v_0_1.Args[0] 28659 if v_0_1_0.Op != OpAMD64ADDQconst { 28660 break 28661 } 28662 if v_0_1_0.AuxInt != -16 { 28663 break 28664 } 28665 v_0_1_0_0 := v_0_1_0.Args[0] 28666 if v_0_1_0_0.Op != OpAMD64ANDQconst { 28667 break 28668 } 28669 if v_0_1_0_0.AuxInt != 15 { 28670 break 28671 } 28672 y := v_0_1_0_0.Args[0] 28673 v_1 := v.Args[1] 28674 if v_1.Op != OpAMD64SHRW { 28675 break 28676 } 28677 _ = v_1.Args[1] 28678 if x != v_1.Args[0] { 28679 break 28680 } 28681 v_1_1 := v_1.Args[1] 28682 if v_1_1.Op != OpAMD64ANDQconst { 28683 break 28684 } 28685 if v_1_1.AuxInt != 15 { 28686 break 28687 } 28688 if y != v_1_1.Args[0] { 28689 break 28690 } 28691 if !(v.Type.Size() == 2) { 28692 break 28693 } 28694 v.reset(OpAMD64RORW) 28695 v.AddArg(x) 28696 v.AddArg(y) 28697 return true 28698 } 28699 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 28700 // cond: v.Type.Size() == 2 28701 // result: (RORW x y) 28702 for { 28703 _ = v.Args[1] 28704 v_0 := v.Args[0] 28705 if v_0.Op != OpAMD64SHRW { 28706 break 28707 } 28708 _ = v_0.Args[1] 28709 x := v_0.Args[0] 28710 v_0_1 := v_0.Args[1] 28711 if v_0_1.Op != OpAMD64ANDLconst { 28712 break 28713 } 28714 if v_0_1.AuxInt != 15 { 28715 break 28716 } 28717 y := v_0_1.Args[0] 28718 v_1 := v.Args[1] 28719 if v_1.Op != OpAMD64SHLL { 28720 break 28721 } 28722 _ = v_1.Args[1] 28723 if x != v_1.Args[0] { 28724 break 28725 } 28726 v_1_1 := v_1.Args[1] 28727 if v_1_1.Op != OpAMD64NEGL { 28728 break 28729 } 28730 v_1_1_0 := v_1_1.Args[0] 28731 if v_1_1_0.Op != OpAMD64ADDLconst { 28732 break 28733 } 28734 if v_1_1_0.AuxInt != -16 { 28735 break 28736 } 28737 v_1_1_0_0 := v_1_1_0.Args[0] 28738 if v_1_1_0_0.Op != OpAMD64ANDLconst { 28739 break 28740 } 28741 if v_1_1_0_0.AuxInt != 15 { 28742 break 28743 } 28744 if y != v_1_1_0_0.Args[0] { 28745 break 28746 } 28747 if !(v.Type.Size() == 2) { 28748 break 28749 } 28750 v.reset(OpAMD64RORW) 28751 v.AddArg(x) 28752 v.AddArg(y) 28753 return true 28754 } 28755 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 28756 // cond: v.Type.Size() == 2 28757 // result: (RORW x y) 28758 for { 28759 _ = v.Args[1] 28760 v_0 := v.Args[0] 28761 if v_0.Op != OpAMD64SHLL { 28762 break 28763 } 28764 _ = v_0.Args[1] 28765 x := v_0.Args[0] 28766 v_0_1 := v_0.Args[1] 28767 if v_0_1.Op != OpAMD64NEGL { 28768 break 28769 } 28770 v_0_1_0 := v_0_1.Args[0] 28771 if v_0_1_0.Op != OpAMD64ADDLconst { 28772 break 28773 } 28774 if v_0_1_0.AuxInt != -16 { 28775 break 28776 } 28777 v_0_1_0_0 := v_0_1_0.Args[0] 28778 if v_0_1_0_0.Op != OpAMD64ANDLconst { 28779 break 28780 } 28781 if v_0_1_0_0.AuxInt != 15 { 28782 break 28783 } 28784 y := v_0_1_0_0.Args[0] 28785 v_1 := v.Args[1] 28786 if v_1.Op != OpAMD64SHRW { 28787 break 28788 } 28789 _ = v_1.Args[1] 28790 if x != v_1.Args[0] { 28791 break 28792 } 28793 v_1_1 := v_1.Args[1] 28794 if v_1_1.Op != OpAMD64ANDLconst { 28795 break 28796 } 28797 if v_1_1.AuxInt != 15 { 28798 break 28799 } 28800 if y != v_1_1.Args[0] { 28801 break 28802 } 28803 if !(v.Type.Size() == 2) { 28804 break 28805 } 28806 v.reset(OpAMD64RORW) 28807 v.AddArg(x) 28808 v.AddArg(y) 28809 return true 28810 } 28811 return false 28812 } 28813 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 28814 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 28815 // cond: v.Type.Size() == 1 28816 // result: (ROLB x y) 28817 for { 28818 _ = v.Args[1] 28819 v_0 := v.Args[0] 28820 if v_0.Op != OpAMD64SHLL { 28821 break 28822 } 28823 _ = v_0.Args[1] 28824 x := v_0.Args[0] 28825 v_0_1 := v_0.Args[1] 28826 if v_0_1.Op != OpAMD64ANDQconst { 28827 break 28828 } 28829 if v_0_1.AuxInt != 7 { 28830 break 28831 } 28832 y := v_0_1.Args[0] 28833 v_1 := v.Args[1] 28834 if v_1.Op != OpAMD64ANDL { 28835 break 28836 } 28837 _ = v_1.Args[1] 28838 v_1_0 := v_1.Args[0] 28839 if v_1_0.Op != OpAMD64SHRB { 28840 break 28841 } 28842 _ = v_1_0.Args[1] 28843 if x != v_1_0.Args[0] { 28844 break 28845 } 28846 v_1_0_1 := v_1_0.Args[1] 28847 if v_1_0_1.Op != OpAMD64NEGQ { 28848 break 28849 } 28850 v_1_0_1_0 := v_1_0_1.Args[0] 28851 if v_1_0_1_0.Op != OpAMD64ADDQconst { 28852 break 28853 } 28854 if v_1_0_1_0.AuxInt != -8 { 28855 break 28856 } 28857 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 28858 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 28859 break 28860 } 28861 if v_1_0_1_0_0.AuxInt != 7 { 28862 break 28863 } 28864 if y != v_1_0_1_0_0.Args[0] { 28865 break 28866 } 28867 v_1_1 := v_1.Args[1] 28868 if v_1_1.Op != OpAMD64SBBLcarrymask { 28869 break 28870 } 28871 v_1_1_0 := v_1_1.Args[0] 28872 if v_1_1_0.Op != OpAMD64CMPQconst { 28873 break 28874 } 28875 if v_1_1_0.AuxInt != 8 { 28876 break 28877 } 28878 v_1_1_0_0 := v_1_1_0.Args[0] 28879 if v_1_1_0_0.Op != OpAMD64NEGQ { 28880 break 28881 } 28882 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 28883 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 28884 break 28885 } 28886 if v_1_1_0_0_0.AuxInt != -8 { 28887 break 28888 } 28889 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 28890 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 28891 break 28892 } 28893 if v_1_1_0_0_0_0.AuxInt != 7 { 28894 break 28895 } 28896 if y != v_1_1_0_0_0_0.Args[0] { 28897 break 28898 } 28899 if !(v.Type.Size() == 1) { 28900 break 28901 } 28902 v.reset(OpAMD64ROLB) 28903 v.AddArg(x) 28904 v.AddArg(y) 28905 return true 28906 } 28907 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 28908 // cond: v.Type.Size() == 1 28909 // result: (ROLB x y) 28910 for { 28911 _ = v.Args[1] 28912 v_0 := v.Args[0] 28913 if v_0.Op != OpAMD64SHLL { 28914 break 28915 } 28916 _ = v_0.Args[1] 28917 x := v_0.Args[0] 28918 v_0_1 := v_0.Args[1] 28919 if v_0_1.Op != OpAMD64ANDQconst { 28920 break 28921 } 28922 if v_0_1.AuxInt != 7 { 28923 break 28924 } 28925 y := v_0_1.Args[0] 28926 v_1 := v.Args[1] 28927 if v_1.Op != OpAMD64ANDL { 28928 break 28929 } 28930 _ = v_1.Args[1] 28931 v_1_0 := v_1.Args[0] 28932 if v_1_0.Op != OpAMD64SBBLcarrymask { 28933 break 28934 } 28935 v_1_0_0 := v_1_0.Args[0] 28936 if v_1_0_0.Op != OpAMD64CMPQconst { 28937 break 28938 } 28939 if v_1_0_0.AuxInt != 8 { 28940 break 28941 } 28942 v_1_0_0_0 := v_1_0_0.Args[0] 28943 if v_1_0_0_0.Op != OpAMD64NEGQ { 28944 break 28945 } 28946 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 28947 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 28948 break 28949 } 28950 if v_1_0_0_0_0.AuxInt != -8 { 28951 break 28952 } 28953 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 28954 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 28955 break 28956 } 28957 if v_1_0_0_0_0_0.AuxInt != 7 { 28958 break 28959 } 28960 if y != v_1_0_0_0_0_0.Args[0] { 28961 break 28962 } 28963 v_1_1 := v_1.Args[1] 28964 if v_1_1.Op != OpAMD64SHRB { 28965 break 28966 } 28967 _ = v_1_1.Args[1] 28968 if x != v_1_1.Args[0] { 28969 break 28970 } 28971 v_1_1_1 := v_1_1.Args[1] 28972 if v_1_1_1.Op != OpAMD64NEGQ { 28973 break 28974 } 28975 v_1_1_1_0 := v_1_1_1.Args[0] 28976 if v_1_1_1_0.Op != OpAMD64ADDQconst { 28977 break 28978 } 28979 if v_1_1_1_0.AuxInt != -8 { 28980 break 28981 } 28982 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 28983 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 28984 break 28985 } 28986 if v_1_1_1_0_0.AuxInt != 7 { 28987 break 28988 } 28989 if y != v_1_1_1_0_0.Args[0] { 28990 break 28991 } 28992 if !(v.Type.Size() == 1) { 28993 break 28994 } 28995 v.reset(OpAMD64ROLB) 28996 v.AddArg(x) 28997 v.AddArg(y) 28998 return true 28999 } 29000 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 29001 // cond: v.Type.Size() == 1 29002 // result: (ROLB x y) 29003 for { 29004 _ = v.Args[1] 29005 v_0 := v.Args[0] 29006 if v_0.Op != OpAMD64ANDL { 29007 break 29008 } 29009 _ = v_0.Args[1] 29010 v_0_0 := v_0.Args[0] 29011 if v_0_0.Op != OpAMD64SHRB { 29012 break 29013 } 29014 _ = v_0_0.Args[1] 29015 x := v_0_0.Args[0] 29016 v_0_0_1 := v_0_0.Args[1] 29017 if v_0_0_1.Op != OpAMD64NEGQ { 29018 break 29019 } 29020 v_0_0_1_0 := v_0_0_1.Args[0] 29021 if v_0_0_1_0.Op != OpAMD64ADDQconst { 29022 break 29023 } 29024 if v_0_0_1_0.AuxInt != -8 { 29025 break 29026 } 29027 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 29028 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 29029 break 29030 } 29031 if v_0_0_1_0_0.AuxInt != 7 { 29032 break 29033 } 29034 y := v_0_0_1_0_0.Args[0] 29035 v_0_1 := v_0.Args[1] 29036 if v_0_1.Op != OpAMD64SBBLcarrymask { 29037 break 29038 } 29039 v_0_1_0 := v_0_1.Args[0] 29040 if v_0_1_0.Op != OpAMD64CMPQconst { 29041 break 29042 } 29043 if v_0_1_0.AuxInt != 8 { 29044 break 29045 } 29046 v_0_1_0_0 := v_0_1_0.Args[0] 29047 if v_0_1_0_0.Op != OpAMD64NEGQ { 29048 break 29049 } 29050 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 29051 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 29052 break 29053 } 29054 if v_0_1_0_0_0.AuxInt != -8 { 29055 break 29056 } 29057 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 29058 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 29059 break 29060 } 29061 if v_0_1_0_0_0_0.AuxInt != 7 { 29062 break 29063 } 29064 if y != v_0_1_0_0_0_0.Args[0] { 29065 break 29066 } 29067 v_1 := v.Args[1] 29068 if v_1.Op != OpAMD64SHLL { 29069 break 29070 } 29071 _ = v_1.Args[1] 29072 if x != v_1.Args[0] { 29073 break 29074 } 29075 v_1_1 := v_1.Args[1] 29076 if v_1_1.Op != OpAMD64ANDQconst { 29077 break 29078 } 29079 if v_1_1.AuxInt != 7 { 29080 break 29081 } 29082 if y != v_1_1.Args[0] { 29083 break 29084 } 29085 if !(v.Type.Size() == 1) { 29086 break 29087 } 29088 v.reset(OpAMD64ROLB) 29089 v.AddArg(x) 29090 v.AddArg(y) 29091 return true 29092 } 29093 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 29094 // cond: v.Type.Size() == 1 29095 // result: (ROLB x y) 29096 for { 29097 _ = v.Args[1] 29098 v_0 := v.Args[0] 29099 if v_0.Op != OpAMD64ANDL { 29100 break 29101 } 29102 _ = v_0.Args[1] 29103 v_0_0 := v_0.Args[0] 29104 if v_0_0.Op != OpAMD64SBBLcarrymask { 29105 break 29106 } 29107 v_0_0_0 := v_0_0.Args[0] 29108 if v_0_0_0.Op != OpAMD64CMPQconst { 29109 break 29110 } 29111 if v_0_0_0.AuxInt != 8 { 29112 break 29113 } 29114 v_0_0_0_0 := v_0_0_0.Args[0] 29115 if v_0_0_0_0.Op != OpAMD64NEGQ { 29116 break 29117 } 29118 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 29119 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 29120 break 29121 } 29122 if v_0_0_0_0_0.AuxInt != -8 { 29123 break 29124 } 29125 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 29126 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 29127 break 29128 } 29129 if v_0_0_0_0_0_0.AuxInt != 7 { 29130 break 29131 } 29132 y := v_0_0_0_0_0_0.Args[0] 29133 v_0_1 := v_0.Args[1] 29134 if v_0_1.Op != OpAMD64SHRB { 29135 break 29136 } 29137 _ = v_0_1.Args[1] 29138 x := v_0_1.Args[0] 29139 v_0_1_1 := v_0_1.Args[1] 29140 if v_0_1_1.Op != OpAMD64NEGQ { 29141 break 29142 } 29143 v_0_1_1_0 := v_0_1_1.Args[0] 29144 if v_0_1_1_0.Op != OpAMD64ADDQconst { 29145 break 29146 } 29147 if v_0_1_1_0.AuxInt != -8 { 29148 break 29149 } 29150 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 29151 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 29152 break 29153 } 29154 if v_0_1_1_0_0.AuxInt != 7 { 29155 break 29156 } 29157 if y != v_0_1_1_0_0.Args[0] { 29158 break 29159 } 29160 v_1 := v.Args[1] 29161 if v_1.Op != OpAMD64SHLL { 29162 break 29163 } 29164 _ = v_1.Args[1] 29165 if x != v_1.Args[0] { 29166 break 29167 } 29168 v_1_1 := v_1.Args[1] 29169 if v_1_1.Op != OpAMD64ANDQconst { 29170 break 29171 } 29172 if v_1_1.AuxInt != 7 { 29173 break 29174 } 29175 if y != v_1_1.Args[0] { 29176 break 29177 } 29178 if !(v.Type.Size() == 1) { 29179 break 29180 } 29181 v.reset(OpAMD64ROLB) 29182 v.AddArg(x) 29183 v.AddArg(y) 29184 return true 29185 } 29186 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 29187 // cond: v.Type.Size() == 1 29188 // result: (ROLB x y) 29189 for { 29190 _ = v.Args[1] 29191 v_0 := v.Args[0] 29192 if v_0.Op != OpAMD64SHLL { 29193 break 29194 } 29195 _ = v_0.Args[1] 29196 x := v_0.Args[0] 29197 v_0_1 := v_0.Args[1] 29198 if v_0_1.Op != OpAMD64ANDLconst { 29199 break 29200 } 29201 if v_0_1.AuxInt != 7 { 29202 break 29203 } 29204 y := v_0_1.Args[0] 29205 v_1 := v.Args[1] 29206 if v_1.Op != OpAMD64ANDL { 29207 break 29208 } 29209 _ = v_1.Args[1] 29210 v_1_0 := v_1.Args[0] 29211 if v_1_0.Op != OpAMD64SHRB { 29212 break 29213 } 29214 _ = v_1_0.Args[1] 29215 if x != v_1_0.Args[0] { 29216 break 29217 } 29218 v_1_0_1 := v_1_0.Args[1] 29219 if v_1_0_1.Op != OpAMD64NEGL { 29220 break 29221 } 29222 v_1_0_1_0 := v_1_0_1.Args[0] 29223 if v_1_0_1_0.Op != OpAMD64ADDLconst { 29224 break 29225 } 29226 if v_1_0_1_0.AuxInt != -8 { 29227 break 29228 } 29229 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 29230 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 29231 break 29232 } 29233 if v_1_0_1_0_0.AuxInt != 7 { 29234 break 29235 } 29236 if y != v_1_0_1_0_0.Args[0] { 29237 break 29238 } 29239 v_1_1 := v_1.Args[1] 29240 if v_1_1.Op != OpAMD64SBBLcarrymask { 29241 break 29242 } 29243 v_1_1_0 := v_1_1.Args[0] 29244 if v_1_1_0.Op != OpAMD64CMPLconst { 29245 break 29246 } 29247 if v_1_1_0.AuxInt != 8 { 29248 break 29249 } 29250 v_1_1_0_0 := v_1_1_0.Args[0] 29251 if v_1_1_0_0.Op != OpAMD64NEGL { 29252 break 29253 } 29254 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 29255 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 29256 break 29257 } 29258 if v_1_1_0_0_0.AuxInt != -8 { 29259 break 29260 } 29261 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 29262 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 29263 break 29264 } 29265 if v_1_1_0_0_0_0.AuxInt != 7 { 29266 break 29267 } 29268 if y != v_1_1_0_0_0_0.Args[0] { 29269 break 29270 } 29271 if !(v.Type.Size() == 1) { 29272 break 29273 } 29274 v.reset(OpAMD64ROLB) 29275 v.AddArg(x) 29276 v.AddArg(y) 29277 return true 29278 } 29279 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 29280 // cond: v.Type.Size() == 1 29281 // result: (ROLB x y) 29282 for { 29283 _ = v.Args[1] 29284 v_0 := v.Args[0] 29285 if v_0.Op != OpAMD64SHLL { 29286 break 29287 } 29288 _ = v_0.Args[1] 29289 x := v_0.Args[0] 29290 v_0_1 := v_0.Args[1] 29291 if v_0_1.Op != OpAMD64ANDLconst { 29292 break 29293 } 29294 if v_0_1.AuxInt != 7 { 29295 break 29296 } 29297 y := v_0_1.Args[0] 29298 v_1 := v.Args[1] 29299 if v_1.Op != OpAMD64ANDL { 29300 break 29301 } 29302 _ = v_1.Args[1] 29303 v_1_0 := v_1.Args[0] 29304 if v_1_0.Op != OpAMD64SBBLcarrymask { 29305 break 29306 } 29307 v_1_0_0 := v_1_0.Args[0] 29308 if v_1_0_0.Op != OpAMD64CMPLconst { 29309 break 29310 } 29311 if v_1_0_0.AuxInt != 8 { 29312 break 29313 } 29314 v_1_0_0_0 := v_1_0_0.Args[0] 29315 if v_1_0_0_0.Op != OpAMD64NEGL { 29316 break 29317 } 29318 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 29319 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 29320 break 29321 } 29322 if v_1_0_0_0_0.AuxInt != -8 { 29323 break 29324 } 29325 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 29326 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 29327 break 29328 } 29329 if v_1_0_0_0_0_0.AuxInt != 7 { 29330 break 29331 } 29332 if y != v_1_0_0_0_0_0.Args[0] { 29333 break 29334 } 29335 v_1_1 := v_1.Args[1] 29336 if v_1_1.Op != OpAMD64SHRB { 29337 break 29338 } 29339 _ = v_1_1.Args[1] 29340 if x != v_1_1.Args[0] { 29341 break 29342 } 29343 v_1_1_1 := v_1_1.Args[1] 29344 if v_1_1_1.Op != OpAMD64NEGL { 29345 break 29346 } 29347 v_1_1_1_0 := v_1_1_1.Args[0] 29348 if v_1_1_1_0.Op != OpAMD64ADDLconst { 29349 break 29350 } 29351 if v_1_1_1_0.AuxInt != -8 { 29352 break 29353 } 29354 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 29355 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 29356 break 29357 } 29358 if v_1_1_1_0_0.AuxInt != 7 { 29359 break 29360 } 29361 if y != v_1_1_1_0_0.Args[0] { 29362 break 29363 } 29364 if !(v.Type.Size() == 1) { 29365 break 29366 } 29367 v.reset(OpAMD64ROLB) 29368 v.AddArg(x) 29369 v.AddArg(y) 29370 return true 29371 } 29372 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 29373 // cond: v.Type.Size() == 1 29374 // result: (ROLB x y) 29375 for { 29376 _ = v.Args[1] 29377 v_0 := v.Args[0] 29378 if v_0.Op != OpAMD64ANDL { 29379 break 29380 } 29381 _ = v_0.Args[1] 29382 v_0_0 := v_0.Args[0] 29383 if v_0_0.Op != OpAMD64SHRB { 29384 break 29385 } 29386 _ = v_0_0.Args[1] 29387 x := v_0_0.Args[0] 29388 v_0_0_1 := v_0_0.Args[1] 29389 if v_0_0_1.Op != OpAMD64NEGL { 29390 break 29391 } 29392 v_0_0_1_0 := v_0_0_1.Args[0] 29393 if v_0_0_1_0.Op != OpAMD64ADDLconst { 29394 break 29395 } 29396 if v_0_0_1_0.AuxInt != -8 { 29397 break 29398 } 29399 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 29400 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 29401 break 29402 } 29403 if v_0_0_1_0_0.AuxInt != 7 { 29404 break 29405 } 29406 y := v_0_0_1_0_0.Args[0] 29407 v_0_1 := v_0.Args[1] 29408 if v_0_1.Op != OpAMD64SBBLcarrymask { 29409 break 29410 } 29411 v_0_1_0 := v_0_1.Args[0] 29412 if v_0_1_0.Op != OpAMD64CMPLconst { 29413 break 29414 } 29415 if v_0_1_0.AuxInt != 8 { 29416 break 29417 } 29418 v_0_1_0_0 := v_0_1_0.Args[0] 29419 if v_0_1_0_0.Op != OpAMD64NEGL { 29420 break 29421 } 29422 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 29423 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 29424 break 29425 } 29426 if v_0_1_0_0_0.AuxInt != -8 { 29427 break 29428 } 29429 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 29430 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 29431 break 29432 } 29433 if v_0_1_0_0_0_0.AuxInt != 7 { 29434 break 29435 } 29436 if y != v_0_1_0_0_0_0.Args[0] { 29437 break 29438 } 29439 v_1 := v.Args[1] 29440 if v_1.Op != OpAMD64SHLL { 29441 break 29442 } 29443 _ = v_1.Args[1] 29444 if x != v_1.Args[0] { 29445 break 29446 } 29447 v_1_1 := v_1.Args[1] 29448 if v_1_1.Op != OpAMD64ANDLconst { 29449 break 29450 } 29451 if v_1_1.AuxInt != 7 { 29452 break 29453 } 29454 if y != v_1_1.Args[0] { 29455 break 29456 } 29457 if !(v.Type.Size() == 1) { 29458 break 29459 } 29460 v.reset(OpAMD64ROLB) 29461 v.AddArg(x) 29462 v.AddArg(y) 29463 return true 29464 } 29465 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 29466 // cond: v.Type.Size() == 1 29467 // result: (ROLB x y) 29468 for { 29469 _ = v.Args[1] 29470 v_0 := v.Args[0] 29471 if v_0.Op != OpAMD64ANDL { 29472 break 29473 } 29474 _ = v_0.Args[1] 29475 v_0_0 := v_0.Args[0] 29476 if v_0_0.Op != OpAMD64SBBLcarrymask { 29477 break 29478 } 29479 v_0_0_0 := v_0_0.Args[0] 29480 if v_0_0_0.Op != OpAMD64CMPLconst { 29481 break 29482 } 29483 if v_0_0_0.AuxInt != 8 { 29484 break 29485 } 29486 v_0_0_0_0 := v_0_0_0.Args[0] 29487 if v_0_0_0_0.Op != OpAMD64NEGL { 29488 break 29489 } 29490 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 29491 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 29492 break 29493 } 29494 if v_0_0_0_0_0.AuxInt != -8 { 29495 break 29496 } 29497 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 29498 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 29499 break 29500 } 29501 if v_0_0_0_0_0_0.AuxInt != 7 { 29502 break 29503 } 29504 y := v_0_0_0_0_0_0.Args[0] 29505 v_0_1 := v_0.Args[1] 29506 if v_0_1.Op != OpAMD64SHRB { 29507 break 29508 } 29509 _ = v_0_1.Args[1] 29510 x := v_0_1.Args[0] 29511 v_0_1_1 := v_0_1.Args[1] 29512 if v_0_1_1.Op != OpAMD64NEGL { 29513 break 29514 } 29515 v_0_1_1_0 := v_0_1_1.Args[0] 29516 if v_0_1_1_0.Op != OpAMD64ADDLconst { 29517 break 29518 } 29519 if v_0_1_1_0.AuxInt != -8 { 29520 break 29521 } 29522 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 29523 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 29524 break 29525 } 29526 if v_0_1_1_0_0.AuxInt != 7 { 29527 break 29528 } 29529 if y != v_0_1_1_0_0.Args[0] { 29530 break 29531 } 29532 v_1 := v.Args[1] 29533 if v_1.Op != OpAMD64SHLL { 29534 break 29535 } 29536 _ = v_1.Args[1] 29537 if x != v_1.Args[0] { 29538 break 29539 } 29540 v_1_1 := v_1.Args[1] 29541 if v_1_1.Op != OpAMD64ANDLconst { 29542 break 29543 } 29544 if v_1_1.AuxInt != 7 { 29545 break 29546 } 29547 if y != v_1_1.Args[0] { 29548 break 29549 } 29550 if !(v.Type.Size() == 1) { 29551 break 29552 } 29553 v.reset(OpAMD64ROLB) 29554 v.AddArg(x) 29555 v.AddArg(y) 29556 return true 29557 } 29558 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 29559 // cond: v.Type.Size() == 1 29560 // result: (RORB x y) 29561 for { 29562 _ = v.Args[1] 29563 v_0 := v.Args[0] 29564 if v_0.Op != OpAMD64SHRB { 29565 break 29566 } 29567 _ = v_0.Args[1] 29568 x := v_0.Args[0] 29569 v_0_1 := v_0.Args[1] 29570 if v_0_1.Op != OpAMD64ANDQconst { 29571 break 29572 } 29573 if v_0_1.AuxInt != 7 { 29574 break 29575 } 29576 y := v_0_1.Args[0] 29577 v_1 := v.Args[1] 29578 if v_1.Op != OpAMD64SHLL { 29579 break 29580 } 29581 _ = v_1.Args[1] 29582 if x != v_1.Args[0] { 29583 break 29584 } 29585 v_1_1 := v_1.Args[1] 29586 if v_1_1.Op != OpAMD64NEGQ { 29587 break 29588 } 29589 v_1_1_0 := v_1_1.Args[0] 29590 if v_1_1_0.Op != OpAMD64ADDQconst { 29591 break 29592 } 29593 if v_1_1_0.AuxInt != -8 { 29594 break 29595 } 29596 v_1_1_0_0 := v_1_1_0.Args[0] 29597 if v_1_1_0_0.Op != OpAMD64ANDQconst { 29598 break 29599 } 29600 if v_1_1_0_0.AuxInt != 7 { 29601 break 29602 } 29603 if y != v_1_1_0_0.Args[0] { 29604 break 29605 } 29606 if !(v.Type.Size() == 1) { 29607 break 29608 } 29609 v.reset(OpAMD64RORB) 29610 v.AddArg(x) 29611 v.AddArg(y) 29612 return true 29613 } 29614 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 29615 // cond: v.Type.Size() == 1 29616 // result: (RORB x y) 29617 for { 29618 _ = v.Args[1] 29619 v_0 := v.Args[0] 29620 if v_0.Op != OpAMD64SHLL { 29621 break 29622 } 29623 _ = v_0.Args[1] 29624 x := v_0.Args[0] 29625 v_0_1 := v_0.Args[1] 29626 if v_0_1.Op != OpAMD64NEGQ { 29627 break 29628 } 29629 v_0_1_0 := v_0_1.Args[0] 29630 if v_0_1_0.Op != OpAMD64ADDQconst { 29631 break 29632 } 29633 if v_0_1_0.AuxInt != -8 { 29634 break 29635 } 29636 v_0_1_0_0 := v_0_1_0.Args[0] 29637 if v_0_1_0_0.Op != OpAMD64ANDQconst { 29638 break 29639 } 29640 if v_0_1_0_0.AuxInt != 7 { 29641 break 29642 } 29643 y := v_0_1_0_0.Args[0] 29644 v_1 := v.Args[1] 29645 if v_1.Op != OpAMD64SHRB { 29646 break 29647 } 29648 _ = v_1.Args[1] 29649 if x != v_1.Args[0] { 29650 break 29651 } 29652 v_1_1 := v_1.Args[1] 29653 if v_1_1.Op != OpAMD64ANDQconst { 29654 break 29655 } 29656 if v_1_1.AuxInt != 7 { 29657 break 29658 } 29659 if y != v_1_1.Args[0] { 29660 break 29661 } 29662 if !(v.Type.Size() == 1) { 29663 break 29664 } 29665 v.reset(OpAMD64RORB) 29666 v.AddArg(x) 29667 v.AddArg(y) 29668 return true 29669 } 29670 return false 29671 } 29672 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 29673 b := v.Block 29674 _ = b 29675 typ := &b.Func.Config.Types 29676 _ = typ 29677 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 29678 // cond: v.Type.Size() == 1 29679 // result: (RORB x y) 29680 for { 29681 _ = v.Args[1] 29682 v_0 := v.Args[0] 29683 if v_0.Op != OpAMD64SHRB { 29684 break 29685 } 29686 _ = v_0.Args[1] 29687 x := v_0.Args[0] 29688 v_0_1 := v_0.Args[1] 29689 if v_0_1.Op != OpAMD64ANDLconst { 29690 break 29691 } 29692 if v_0_1.AuxInt != 7 { 29693 break 29694 } 29695 y := v_0_1.Args[0] 29696 v_1 := v.Args[1] 29697 if v_1.Op != OpAMD64SHLL { 29698 break 29699 } 29700 _ = v_1.Args[1] 29701 if x != v_1.Args[0] { 29702 break 29703 } 29704 v_1_1 := v_1.Args[1] 29705 if v_1_1.Op != OpAMD64NEGL { 29706 break 29707 } 29708 v_1_1_0 := v_1_1.Args[0] 29709 if v_1_1_0.Op != OpAMD64ADDLconst { 29710 break 29711 } 29712 if v_1_1_0.AuxInt != -8 { 29713 break 29714 } 29715 v_1_1_0_0 := v_1_1_0.Args[0] 29716 if v_1_1_0_0.Op != OpAMD64ANDLconst { 29717 break 29718 } 29719 if v_1_1_0_0.AuxInt != 7 { 29720 break 29721 } 29722 if y != v_1_1_0_0.Args[0] { 29723 break 29724 } 29725 if !(v.Type.Size() == 1) { 29726 break 29727 } 29728 v.reset(OpAMD64RORB) 29729 v.AddArg(x) 29730 v.AddArg(y) 29731 return true 29732 } 29733 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 29734 // cond: v.Type.Size() == 1 29735 // result: (RORB x y) 29736 for { 29737 _ = v.Args[1] 29738 v_0 := v.Args[0] 29739 if v_0.Op != OpAMD64SHLL { 29740 break 29741 } 29742 _ = v_0.Args[1] 29743 x := v_0.Args[0] 29744 v_0_1 := v_0.Args[1] 29745 if v_0_1.Op != OpAMD64NEGL { 29746 break 29747 } 29748 v_0_1_0 := v_0_1.Args[0] 29749 if v_0_1_0.Op != OpAMD64ADDLconst { 29750 break 29751 } 29752 if v_0_1_0.AuxInt != -8 { 29753 break 29754 } 29755 v_0_1_0_0 := v_0_1_0.Args[0] 29756 if v_0_1_0_0.Op != OpAMD64ANDLconst { 29757 break 29758 } 29759 if v_0_1_0_0.AuxInt != 7 { 29760 break 29761 } 29762 y := v_0_1_0_0.Args[0] 29763 v_1 := v.Args[1] 29764 if v_1.Op != OpAMD64SHRB { 29765 break 29766 } 29767 _ = v_1.Args[1] 29768 if x != v_1.Args[0] { 29769 break 29770 } 29771 v_1_1 := v_1.Args[1] 29772 if v_1_1.Op != OpAMD64ANDLconst { 29773 break 29774 } 29775 if v_1_1.AuxInt != 7 { 29776 break 29777 } 29778 if y != v_1_1.Args[0] { 29779 break 29780 } 29781 if !(v.Type.Size() == 1) { 29782 break 29783 } 29784 v.reset(OpAMD64RORB) 29785 v.AddArg(x) 29786 v.AddArg(y) 29787 return true 29788 } 29789 // match: (ORL x x) 29790 // cond: 29791 // result: x 29792 for { 29793 _ = v.Args[1] 29794 x := v.Args[0] 29795 if x != v.Args[1] { 29796 break 29797 } 29798 v.reset(OpCopy) 29799 v.Type = x.Type 29800 v.AddArg(x) 29801 return true 29802 } 29803 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 29804 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29805 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 29806 for { 29807 _ = v.Args[1] 29808 x0 := v.Args[0] 29809 if x0.Op != OpAMD64MOVBload { 29810 break 29811 } 29812 i0 := x0.AuxInt 29813 s := x0.Aux 29814 _ = x0.Args[1] 29815 p := x0.Args[0] 29816 mem := x0.Args[1] 29817 sh := v.Args[1] 29818 if sh.Op != OpAMD64SHLLconst { 29819 break 29820 } 29821 if sh.AuxInt != 8 { 29822 break 29823 } 29824 x1 := sh.Args[0] 29825 if x1.Op != OpAMD64MOVBload { 29826 break 29827 } 29828 i1 := x1.AuxInt 29829 if x1.Aux != s { 29830 break 29831 } 29832 _ = x1.Args[1] 29833 if p != x1.Args[0] { 29834 break 29835 } 29836 if mem != x1.Args[1] { 29837 break 29838 } 29839 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29840 break 29841 } 29842 b = mergePoint(b, x0, x1) 29843 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29844 v.reset(OpCopy) 29845 v.AddArg(v0) 29846 v0.AuxInt = i0 29847 v0.Aux = s 29848 v0.AddArg(p) 29849 v0.AddArg(mem) 29850 return true 29851 } 29852 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 29853 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29854 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 29855 for { 29856 _ = v.Args[1] 29857 sh := v.Args[0] 29858 if sh.Op != OpAMD64SHLLconst { 29859 break 29860 } 29861 if sh.AuxInt != 8 { 29862 break 29863 } 29864 x1 := sh.Args[0] 29865 if x1.Op != OpAMD64MOVBload { 29866 break 29867 } 29868 i1 := x1.AuxInt 29869 s := x1.Aux 29870 _ = x1.Args[1] 29871 p := x1.Args[0] 29872 mem := x1.Args[1] 29873 x0 := v.Args[1] 29874 if x0.Op != OpAMD64MOVBload { 29875 break 29876 } 29877 i0 := x0.AuxInt 29878 if x0.Aux != s { 29879 break 29880 } 29881 _ = x0.Args[1] 29882 if p != x0.Args[0] { 29883 break 29884 } 29885 if mem != x0.Args[1] { 29886 break 29887 } 29888 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29889 break 29890 } 29891 b = mergePoint(b, x0, x1) 29892 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29893 v.reset(OpCopy) 29894 v.AddArg(v0) 29895 v0.AuxInt = i0 29896 v0.Aux = s 29897 v0.AddArg(p) 29898 v0.AddArg(mem) 29899 return true 29900 } 29901 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 29902 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29903 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 29904 for { 29905 _ = v.Args[1] 29906 x0 := v.Args[0] 29907 if x0.Op != OpAMD64MOVWload { 29908 break 29909 } 29910 i0 := x0.AuxInt 29911 s := x0.Aux 29912 _ = x0.Args[1] 29913 p := x0.Args[0] 29914 mem := x0.Args[1] 29915 sh := v.Args[1] 29916 if sh.Op != OpAMD64SHLLconst { 29917 break 29918 } 29919 if sh.AuxInt != 16 { 29920 break 29921 } 29922 x1 := sh.Args[0] 29923 if x1.Op != OpAMD64MOVWload { 29924 break 29925 } 29926 i1 := x1.AuxInt 29927 if x1.Aux != s { 29928 break 29929 } 29930 _ = x1.Args[1] 29931 if p != x1.Args[0] { 29932 break 29933 } 29934 if mem != x1.Args[1] { 29935 break 29936 } 29937 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29938 break 29939 } 29940 b = mergePoint(b, x0, x1) 29941 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29942 v.reset(OpCopy) 29943 v.AddArg(v0) 29944 v0.AuxInt = i0 29945 v0.Aux = s 29946 v0.AddArg(p) 29947 v0.AddArg(mem) 29948 return true 29949 } 29950 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 29951 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29952 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 29953 for { 29954 _ = v.Args[1] 29955 sh := v.Args[0] 29956 if sh.Op != OpAMD64SHLLconst { 29957 break 29958 } 29959 if sh.AuxInt != 16 { 29960 break 29961 } 29962 x1 := sh.Args[0] 29963 if x1.Op != OpAMD64MOVWload { 29964 break 29965 } 29966 i1 := x1.AuxInt 29967 s := x1.Aux 29968 _ = x1.Args[1] 29969 p := x1.Args[0] 29970 mem := x1.Args[1] 29971 x0 := v.Args[1] 29972 if x0.Op != OpAMD64MOVWload { 29973 break 29974 } 29975 i0 := x0.AuxInt 29976 if x0.Aux != s { 29977 break 29978 } 29979 _ = x0.Args[1] 29980 if p != x0.Args[0] { 29981 break 29982 } 29983 if mem != x0.Args[1] { 29984 break 29985 } 29986 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29987 break 29988 } 29989 b = mergePoint(b, x0, x1) 29990 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29991 v.reset(OpCopy) 29992 v.AddArg(v0) 29993 v0.AuxInt = i0 29994 v0.Aux = s 29995 v0.AddArg(p) 29996 v0.AddArg(mem) 29997 return true 29998 } 29999 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 30000 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30001 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 30002 for { 30003 _ = v.Args[1] 30004 s1 := v.Args[0] 30005 if s1.Op != OpAMD64SHLLconst { 30006 break 30007 } 30008 j1 := s1.AuxInt 30009 x1 := s1.Args[0] 30010 if x1.Op != OpAMD64MOVBload { 30011 break 30012 } 30013 i1 := x1.AuxInt 30014 s := x1.Aux 30015 _ = x1.Args[1] 30016 p := x1.Args[0] 30017 mem := x1.Args[1] 30018 or := v.Args[1] 30019 if or.Op != OpAMD64ORL { 30020 break 30021 } 30022 _ = or.Args[1] 30023 s0 := or.Args[0] 30024 if s0.Op != OpAMD64SHLLconst { 30025 break 30026 } 30027 j0 := s0.AuxInt 30028 x0 := s0.Args[0] 30029 if x0.Op != OpAMD64MOVBload { 30030 break 30031 } 30032 i0 := x0.AuxInt 30033 if x0.Aux != s { 30034 break 30035 } 30036 _ = x0.Args[1] 30037 if p != x0.Args[0] { 30038 break 30039 } 30040 if mem != x0.Args[1] { 30041 break 30042 } 30043 y := or.Args[1] 30044 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30045 break 30046 } 30047 b = mergePoint(b, x0, x1) 30048 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 30049 v.reset(OpCopy) 30050 v.AddArg(v0) 30051 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 30052 v1.AuxInt = j0 30053 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 30054 v2.AuxInt = i0 30055 v2.Aux = s 30056 v2.AddArg(p) 30057 v2.AddArg(mem) 30058 v1.AddArg(v2) 30059 v0.AddArg(v1) 30060 v0.AddArg(y) 30061 return true 30062 } 30063 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 30064 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30065 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 30066 for { 30067 _ = v.Args[1] 30068 s1 := v.Args[0] 30069 if s1.Op != OpAMD64SHLLconst { 30070 break 30071 } 30072 j1 := s1.AuxInt 30073 x1 := s1.Args[0] 30074 if x1.Op != OpAMD64MOVBload { 30075 break 30076 } 30077 i1 := x1.AuxInt 30078 s := x1.Aux 30079 _ = x1.Args[1] 30080 p := x1.Args[0] 30081 mem := x1.Args[1] 30082 or := v.Args[1] 30083 if or.Op != OpAMD64ORL { 30084 break 30085 } 30086 _ = or.Args[1] 30087 y := or.Args[0] 30088 s0 := or.Args[1] 30089 if s0.Op != OpAMD64SHLLconst { 30090 break 30091 } 30092 j0 := s0.AuxInt 30093 x0 := s0.Args[0] 30094 if x0.Op != OpAMD64MOVBload { 30095 break 30096 } 30097 i0 := x0.AuxInt 30098 if x0.Aux != s { 30099 break 30100 } 30101 _ = x0.Args[1] 30102 if p != x0.Args[0] { 30103 break 30104 } 30105 if mem != x0.Args[1] { 30106 break 30107 } 30108 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30109 break 30110 } 30111 b = mergePoint(b, x0, x1) 30112 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 30113 v.reset(OpCopy) 30114 v.AddArg(v0) 30115 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 30116 v1.AuxInt = j0 30117 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 30118 v2.AuxInt = i0 30119 v2.Aux = s 30120 v2.AddArg(p) 30121 v2.AddArg(mem) 30122 v1.AddArg(v2) 30123 v0.AddArg(v1) 30124 v0.AddArg(y) 30125 return true 30126 } 30127 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 30128 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30129 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 30130 for { 30131 _ = v.Args[1] 30132 or := v.Args[0] 30133 if or.Op != OpAMD64ORL { 30134 break 30135 } 30136 _ = or.Args[1] 30137 s0 := or.Args[0] 30138 if s0.Op != OpAMD64SHLLconst { 30139 break 30140 } 30141 j0 := s0.AuxInt 30142 x0 := s0.Args[0] 30143 if x0.Op != OpAMD64MOVBload { 30144 break 30145 } 30146 i0 := x0.AuxInt 30147 s := x0.Aux 30148 _ = x0.Args[1] 30149 p := x0.Args[0] 30150 mem := x0.Args[1] 30151 y := or.Args[1] 30152 s1 := v.Args[1] 30153 if s1.Op != OpAMD64SHLLconst { 30154 break 30155 } 30156 j1 := s1.AuxInt 30157 x1 := s1.Args[0] 30158 if x1.Op != OpAMD64MOVBload { 30159 break 30160 } 30161 i1 := x1.AuxInt 30162 if x1.Aux != s { 30163 break 30164 } 30165 _ = x1.Args[1] 30166 if p != x1.Args[0] { 30167 break 30168 } 30169 if mem != x1.Args[1] { 30170 break 30171 } 30172 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30173 break 30174 } 30175 b = mergePoint(b, x0, x1) 30176 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 30177 v.reset(OpCopy) 30178 v.AddArg(v0) 30179 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 30180 v1.AuxInt = j0 30181 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 30182 v2.AuxInt = i0 30183 v2.Aux = s 30184 v2.AddArg(p) 30185 v2.AddArg(mem) 30186 v1.AddArg(v2) 30187 v0.AddArg(v1) 30188 v0.AddArg(y) 30189 return true 30190 } 30191 return false 30192 } 30193 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 30194 b := v.Block 30195 _ = b 30196 typ := &b.Func.Config.Types 30197 _ = typ 30198 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 30199 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30200 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 30201 for { 30202 _ = v.Args[1] 30203 or := v.Args[0] 30204 if or.Op != OpAMD64ORL { 30205 break 30206 } 30207 _ = or.Args[1] 30208 y := or.Args[0] 30209 s0 := or.Args[1] 30210 if s0.Op != OpAMD64SHLLconst { 30211 break 30212 } 30213 j0 := s0.AuxInt 30214 x0 := s0.Args[0] 30215 if x0.Op != OpAMD64MOVBload { 30216 break 30217 } 30218 i0 := x0.AuxInt 30219 s := x0.Aux 30220 _ = x0.Args[1] 30221 p := x0.Args[0] 30222 mem := x0.Args[1] 30223 s1 := v.Args[1] 30224 if s1.Op != OpAMD64SHLLconst { 30225 break 30226 } 30227 j1 := s1.AuxInt 30228 x1 := s1.Args[0] 30229 if x1.Op != OpAMD64MOVBload { 30230 break 30231 } 30232 i1 := x1.AuxInt 30233 if x1.Aux != s { 30234 break 30235 } 30236 _ = x1.Args[1] 30237 if p != x1.Args[0] { 30238 break 30239 } 30240 if mem != x1.Args[1] { 30241 break 30242 } 30243 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30244 break 30245 } 30246 b = mergePoint(b, x0, x1) 30247 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 30248 v.reset(OpCopy) 30249 v.AddArg(v0) 30250 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 30251 v1.AuxInt = j0 30252 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 30253 v2.AuxInt = i0 30254 v2.Aux = s 30255 v2.AddArg(p) 30256 v2.AddArg(mem) 30257 v1.AddArg(v2) 30258 v0.AddArg(v1) 30259 v0.AddArg(y) 30260 return true 30261 } 30262 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 30263 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30264 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30265 for { 30266 _ = v.Args[1] 30267 x0 := v.Args[0] 30268 if x0.Op != OpAMD64MOVBloadidx1 { 30269 break 30270 } 30271 i0 := x0.AuxInt 30272 s := x0.Aux 30273 _ = x0.Args[2] 30274 p := x0.Args[0] 30275 idx := x0.Args[1] 30276 mem := x0.Args[2] 30277 sh := v.Args[1] 30278 if sh.Op != OpAMD64SHLLconst { 30279 break 30280 } 30281 if sh.AuxInt != 8 { 30282 break 30283 } 30284 x1 := sh.Args[0] 30285 if x1.Op != OpAMD64MOVBloadidx1 { 30286 break 30287 } 30288 i1 := x1.AuxInt 30289 if x1.Aux != s { 30290 break 30291 } 30292 _ = x1.Args[2] 30293 if p != x1.Args[0] { 30294 break 30295 } 30296 if idx != x1.Args[1] { 30297 break 30298 } 30299 if mem != x1.Args[2] { 30300 break 30301 } 30302 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30303 break 30304 } 30305 b = mergePoint(b, x0, x1) 30306 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30307 v.reset(OpCopy) 30308 v.AddArg(v0) 30309 v0.AuxInt = i0 30310 v0.Aux = s 30311 v0.AddArg(p) 30312 v0.AddArg(idx) 30313 v0.AddArg(mem) 30314 return true 30315 } 30316 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 30317 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30318 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30319 for { 30320 _ = v.Args[1] 30321 x0 := v.Args[0] 30322 if x0.Op != OpAMD64MOVBloadidx1 { 30323 break 30324 } 30325 i0 := x0.AuxInt 30326 s := x0.Aux 30327 _ = x0.Args[2] 30328 idx := x0.Args[0] 30329 p := x0.Args[1] 30330 mem := x0.Args[2] 30331 sh := v.Args[1] 30332 if sh.Op != OpAMD64SHLLconst { 30333 break 30334 } 30335 if sh.AuxInt != 8 { 30336 break 30337 } 30338 x1 := sh.Args[0] 30339 if x1.Op != OpAMD64MOVBloadidx1 { 30340 break 30341 } 30342 i1 := x1.AuxInt 30343 if x1.Aux != s { 30344 break 30345 } 30346 _ = x1.Args[2] 30347 if p != x1.Args[0] { 30348 break 30349 } 30350 if idx != x1.Args[1] { 30351 break 30352 } 30353 if mem != x1.Args[2] { 30354 break 30355 } 30356 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30357 break 30358 } 30359 b = mergePoint(b, x0, x1) 30360 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30361 v.reset(OpCopy) 30362 v.AddArg(v0) 30363 v0.AuxInt = i0 30364 v0.Aux = s 30365 v0.AddArg(p) 30366 v0.AddArg(idx) 30367 v0.AddArg(mem) 30368 return true 30369 } 30370 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 30371 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30372 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30373 for { 30374 _ = v.Args[1] 30375 x0 := v.Args[0] 30376 if x0.Op != OpAMD64MOVBloadidx1 { 30377 break 30378 } 30379 i0 := x0.AuxInt 30380 s := x0.Aux 30381 _ = x0.Args[2] 30382 p := x0.Args[0] 30383 idx := x0.Args[1] 30384 mem := x0.Args[2] 30385 sh := v.Args[1] 30386 if sh.Op != OpAMD64SHLLconst { 30387 break 30388 } 30389 if sh.AuxInt != 8 { 30390 break 30391 } 30392 x1 := sh.Args[0] 30393 if x1.Op != OpAMD64MOVBloadidx1 { 30394 break 30395 } 30396 i1 := x1.AuxInt 30397 if x1.Aux != s { 30398 break 30399 } 30400 _ = x1.Args[2] 30401 if idx != x1.Args[0] { 30402 break 30403 } 30404 if p != x1.Args[1] { 30405 break 30406 } 30407 if mem != x1.Args[2] { 30408 break 30409 } 30410 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30411 break 30412 } 30413 b = mergePoint(b, x0, x1) 30414 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30415 v.reset(OpCopy) 30416 v.AddArg(v0) 30417 v0.AuxInt = i0 30418 v0.Aux = s 30419 v0.AddArg(p) 30420 v0.AddArg(idx) 30421 v0.AddArg(mem) 30422 return true 30423 } 30424 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 30425 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30426 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30427 for { 30428 _ = v.Args[1] 30429 x0 := v.Args[0] 30430 if x0.Op != OpAMD64MOVBloadidx1 { 30431 break 30432 } 30433 i0 := x0.AuxInt 30434 s := x0.Aux 30435 _ = x0.Args[2] 30436 idx := x0.Args[0] 30437 p := x0.Args[1] 30438 mem := x0.Args[2] 30439 sh := v.Args[1] 30440 if sh.Op != OpAMD64SHLLconst { 30441 break 30442 } 30443 if sh.AuxInt != 8 { 30444 break 30445 } 30446 x1 := sh.Args[0] 30447 if x1.Op != OpAMD64MOVBloadidx1 { 30448 break 30449 } 30450 i1 := x1.AuxInt 30451 if x1.Aux != s { 30452 break 30453 } 30454 _ = x1.Args[2] 30455 if idx != x1.Args[0] { 30456 break 30457 } 30458 if p != x1.Args[1] { 30459 break 30460 } 30461 if mem != x1.Args[2] { 30462 break 30463 } 30464 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30465 break 30466 } 30467 b = mergePoint(b, x0, x1) 30468 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30469 v.reset(OpCopy) 30470 v.AddArg(v0) 30471 v0.AuxInt = i0 30472 v0.Aux = s 30473 v0.AddArg(p) 30474 v0.AddArg(idx) 30475 v0.AddArg(mem) 30476 return true 30477 } 30478 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 30479 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30480 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30481 for { 30482 _ = v.Args[1] 30483 sh := v.Args[0] 30484 if sh.Op != OpAMD64SHLLconst { 30485 break 30486 } 30487 if sh.AuxInt != 8 { 30488 break 30489 } 30490 x1 := sh.Args[0] 30491 if x1.Op != OpAMD64MOVBloadidx1 { 30492 break 30493 } 30494 i1 := x1.AuxInt 30495 s := x1.Aux 30496 _ = x1.Args[2] 30497 p := x1.Args[0] 30498 idx := x1.Args[1] 30499 mem := x1.Args[2] 30500 x0 := v.Args[1] 30501 if x0.Op != OpAMD64MOVBloadidx1 { 30502 break 30503 } 30504 i0 := x0.AuxInt 30505 if x0.Aux != s { 30506 break 30507 } 30508 _ = x0.Args[2] 30509 if p != x0.Args[0] { 30510 break 30511 } 30512 if idx != x0.Args[1] { 30513 break 30514 } 30515 if mem != x0.Args[2] { 30516 break 30517 } 30518 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30519 break 30520 } 30521 b = mergePoint(b, x0, x1) 30522 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30523 v.reset(OpCopy) 30524 v.AddArg(v0) 30525 v0.AuxInt = i0 30526 v0.Aux = s 30527 v0.AddArg(p) 30528 v0.AddArg(idx) 30529 v0.AddArg(mem) 30530 return true 30531 } 30532 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 30533 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30534 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30535 for { 30536 _ = v.Args[1] 30537 sh := v.Args[0] 30538 if sh.Op != OpAMD64SHLLconst { 30539 break 30540 } 30541 if sh.AuxInt != 8 { 30542 break 30543 } 30544 x1 := sh.Args[0] 30545 if x1.Op != OpAMD64MOVBloadidx1 { 30546 break 30547 } 30548 i1 := x1.AuxInt 30549 s := x1.Aux 30550 _ = x1.Args[2] 30551 idx := x1.Args[0] 30552 p := x1.Args[1] 30553 mem := x1.Args[2] 30554 x0 := v.Args[1] 30555 if x0.Op != OpAMD64MOVBloadidx1 { 30556 break 30557 } 30558 i0 := x0.AuxInt 30559 if x0.Aux != s { 30560 break 30561 } 30562 _ = x0.Args[2] 30563 if p != x0.Args[0] { 30564 break 30565 } 30566 if idx != x0.Args[1] { 30567 break 30568 } 30569 if mem != x0.Args[2] { 30570 break 30571 } 30572 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30573 break 30574 } 30575 b = mergePoint(b, x0, x1) 30576 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30577 v.reset(OpCopy) 30578 v.AddArg(v0) 30579 v0.AuxInt = i0 30580 v0.Aux = s 30581 v0.AddArg(p) 30582 v0.AddArg(idx) 30583 v0.AddArg(mem) 30584 return true 30585 } 30586 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 30587 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30588 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30589 for { 30590 _ = v.Args[1] 30591 sh := v.Args[0] 30592 if sh.Op != OpAMD64SHLLconst { 30593 break 30594 } 30595 if sh.AuxInt != 8 { 30596 break 30597 } 30598 x1 := sh.Args[0] 30599 if x1.Op != OpAMD64MOVBloadidx1 { 30600 break 30601 } 30602 i1 := x1.AuxInt 30603 s := x1.Aux 30604 _ = x1.Args[2] 30605 p := x1.Args[0] 30606 idx := x1.Args[1] 30607 mem := x1.Args[2] 30608 x0 := v.Args[1] 30609 if x0.Op != OpAMD64MOVBloadidx1 { 30610 break 30611 } 30612 i0 := x0.AuxInt 30613 if x0.Aux != s { 30614 break 30615 } 30616 _ = x0.Args[2] 30617 if idx != x0.Args[0] { 30618 break 30619 } 30620 if p != x0.Args[1] { 30621 break 30622 } 30623 if mem != x0.Args[2] { 30624 break 30625 } 30626 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30627 break 30628 } 30629 b = mergePoint(b, x0, x1) 30630 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30631 v.reset(OpCopy) 30632 v.AddArg(v0) 30633 v0.AuxInt = i0 30634 v0.Aux = s 30635 v0.AddArg(p) 30636 v0.AddArg(idx) 30637 v0.AddArg(mem) 30638 return true 30639 } 30640 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 30641 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30642 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 30643 for { 30644 _ = v.Args[1] 30645 sh := v.Args[0] 30646 if sh.Op != OpAMD64SHLLconst { 30647 break 30648 } 30649 if sh.AuxInt != 8 { 30650 break 30651 } 30652 x1 := sh.Args[0] 30653 if x1.Op != OpAMD64MOVBloadidx1 { 30654 break 30655 } 30656 i1 := x1.AuxInt 30657 s := x1.Aux 30658 _ = x1.Args[2] 30659 idx := x1.Args[0] 30660 p := x1.Args[1] 30661 mem := x1.Args[2] 30662 x0 := v.Args[1] 30663 if x0.Op != OpAMD64MOVBloadidx1 { 30664 break 30665 } 30666 i0 := x0.AuxInt 30667 if x0.Aux != s { 30668 break 30669 } 30670 _ = x0.Args[2] 30671 if idx != x0.Args[0] { 30672 break 30673 } 30674 if p != x0.Args[1] { 30675 break 30676 } 30677 if mem != x0.Args[2] { 30678 break 30679 } 30680 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30681 break 30682 } 30683 b = mergePoint(b, x0, x1) 30684 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 30685 v.reset(OpCopy) 30686 v.AddArg(v0) 30687 v0.AuxInt = i0 30688 v0.Aux = s 30689 v0.AddArg(p) 30690 v0.AddArg(idx) 30691 v0.AddArg(mem) 30692 return true 30693 } 30694 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30695 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30696 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 30697 for { 30698 _ = v.Args[1] 30699 x0 := v.Args[0] 30700 if x0.Op != OpAMD64MOVWloadidx1 { 30701 break 30702 } 30703 i0 := x0.AuxInt 30704 s := x0.Aux 30705 _ = x0.Args[2] 30706 p := x0.Args[0] 30707 idx := x0.Args[1] 30708 mem := x0.Args[2] 30709 sh := v.Args[1] 30710 if sh.Op != OpAMD64SHLLconst { 30711 break 30712 } 30713 if sh.AuxInt != 16 { 30714 break 30715 } 30716 x1 := sh.Args[0] 30717 if x1.Op != OpAMD64MOVWloadidx1 { 30718 break 30719 } 30720 i1 := x1.AuxInt 30721 if x1.Aux != s { 30722 break 30723 } 30724 _ = x1.Args[2] 30725 if p != x1.Args[0] { 30726 break 30727 } 30728 if idx != x1.Args[1] { 30729 break 30730 } 30731 if mem != x1.Args[2] { 30732 break 30733 } 30734 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30735 break 30736 } 30737 b = mergePoint(b, x0, x1) 30738 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30739 v.reset(OpCopy) 30740 v.AddArg(v0) 30741 v0.AuxInt = i0 30742 v0.Aux = s 30743 v0.AddArg(p) 30744 v0.AddArg(idx) 30745 v0.AddArg(mem) 30746 return true 30747 } 30748 return false 30749 } 30750 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 30751 b := v.Block 30752 _ = b 30753 typ := &b.Func.Config.Types 30754 _ = typ 30755 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30756 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30757 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 30758 for { 30759 _ = v.Args[1] 30760 x0 := v.Args[0] 30761 if x0.Op != OpAMD64MOVWloadidx1 { 30762 break 30763 } 30764 i0 := x0.AuxInt 30765 s := x0.Aux 30766 _ = x0.Args[2] 30767 idx := x0.Args[0] 30768 p := x0.Args[1] 30769 mem := x0.Args[2] 30770 sh := v.Args[1] 30771 if sh.Op != OpAMD64SHLLconst { 30772 break 30773 } 30774 if sh.AuxInt != 16 { 30775 break 30776 } 30777 x1 := sh.Args[0] 30778 if x1.Op != OpAMD64MOVWloadidx1 { 30779 break 30780 } 30781 i1 := x1.AuxInt 30782 if x1.Aux != s { 30783 break 30784 } 30785 _ = x1.Args[2] 30786 if p != x1.Args[0] { 30787 break 30788 } 30789 if idx != x1.Args[1] { 30790 break 30791 } 30792 if mem != x1.Args[2] { 30793 break 30794 } 30795 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30796 break 30797 } 30798 b = mergePoint(b, x0, x1) 30799 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30800 v.reset(OpCopy) 30801 v.AddArg(v0) 30802 v0.AuxInt = i0 30803 v0.Aux = s 30804 v0.AddArg(p) 30805 v0.AddArg(idx) 30806 v0.AddArg(mem) 30807 return true 30808 } 30809 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30810 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30811 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 30812 for { 30813 _ = v.Args[1] 30814 x0 := v.Args[0] 30815 if x0.Op != OpAMD64MOVWloadidx1 { 30816 break 30817 } 30818 i0 := x0.AuxInt 30819 s := x0.Aux 30820 _ = x0.Args[2] 30821 p := x0.Args[0] 30822 idx := x0.Args[1] 30823 mem := x0.Args[2] 30824 sh := v.Args[1] 30825 if sh.Op != OpAMD64SHLLconst { 30826 break 30827 } 30828 if sh.AuxInt != 16 { 30829 break 30830 } 30831 x1 := sh.Args[0] 30832 if x1.Op != OpAMD64MOVWloadidx1 { 30833 break 30834 } 30835 i1 := x1.AuxInt 30836 if x1.Aux != s { 30837 break 30838 } 30839 _ = x1.Args[2] 30840 if idx != x1.Args[0] { 30841 break 30842 } 30843 if p != x1.Args[1] { 30844 break 30845 } 30846 if mem != x1.Args[2] { 30847 break 30848 } 30849 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30850 break 30851 } 30852 b = mergePoint(b, x0, x1) 30853 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30854 v.reset(OpCopy) 30855 v.AddArg(v0) 30856 v0.AuxInt = i0 30857 v0.Aux = s 30858 v0.AddArg(p) 30859 v0.AddArg(idx) 30860 v0.AddArg(mem) 30861 return true 30862 } 30863 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30864 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30865 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 30866 for { 30867 _ = v.Args[1] 30868 x0 := v.Args[0] 30869 if x0.Op != OpAMD64MOVWloadidx1 { 30870 break 30871 } 30872 i0 := x0.AuxInt 30873 s := x0.Aux 30874 _ = x0.Args[2] 30875 idx := x0.Args[0] 30876 p := x0.Args[1] 30877 mem := x0.Args[2] 30878 sh := v.Args[1] 30879 if sh.Op != OpAMD64SHLLconst { 30880 break 30881 } 30882 if sh.AuxInt != 16 { 30883 break 30884 } 30885 x1 := sh.Args[0] 30886 if x1.Op != OpAMD64MOVWloadidx1 { 30887 break 30888 } 30889 i1 := x1.AuxInt 30890 if x1.Aux != s { 30891 break 30892 } 30893 _ = x1.Args[2] 30894 if idx != x1.Args[0] { 30895 break 30896 } 30897 if p != x1.Args[1] { 30898 break 30899 } 30900 if mem != x1.Args[2] { 30901 break 30902 } 30903 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30904 break 30905 } 30906 b = mergePoint(b, x0, x1) 30907 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30908 v.reset(OpCopy) 30909 v.AddArg(v0) 30910 v0.AuxInt = i0 30911 v0.Aux = s 30912 v0.AddArg(p) 30913 v0.AddArg(idx) 30914 v0.AddArg(mem) 30915 return true 30916 } 30917 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 30918 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30919 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 30920 for { 30921 _ = v.Args[1] 30922 sh := v.Args[0] 30923 if sh.Op != OpAMD64SHLLconst { 30924 break 30925 } 30926 if sh.AuxInt != 16 { 30927 break 30928 } 30929 x1 := sh.Args[0] 30930 if x1.Op != OpAMD64MOVWloadidx1 { 30931 break 30932 } 30933 i1 := x1.AuxInt 30934 s := x1.Aux 30935 _ = x1.Args[2] 30936 p := x1.Args[0] 30937 idx := x1.Args[1] 30938 mem := x1.Args[2] 30939 x0 := v.Args[1] 30940 if x0.Op != OpAMD64MOVWloadidx1 { 30941 break 30942 } 30943 i0 := x0.AuxInt 30944 if x0.Aux != s { 30945 break 30946 } 30947 _ = x0.Args[2] 30948 if p != x0.Args[0] { 30949 break 30950 } 30951 if idx != x0.Args[1] { 30952 break 30953 } 30954 if mem != x0.Args[2] { 30955 break 30956 } 30957 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30958 break 30959 } 30960 b = mergePoint(b, x0, x1) 30961 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30962 v.reset(OpCopy) 30963 v.AddArg(v0) 30964 v0.AuxInt = i0 30965 v0.Aux = s 30966 v0.AddArg(p) 30967 v0.AddArg(idx) 30968 v0.AddArg(mem) 30969 return true 30970 } 30971 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 30972 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30973 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 30974 for { 30975 _ = v.Args[1] 30976 sh := v.Args[0] 30977 if sh.Op != OpAMD64SHLLconst { 30978 break 30979 } 30980 if sh.AuxInt != 16 { 30981 break 30982 } 30983 x1 := sh.Args[0] 30984 if x1.Op != OpAMD64MOVWloadidx1 { 30985 break 30986 } 30987 i1 := x1.AuxInt 30988 s := x1.Aux 30989 _ = x1.Args[2] 30990 idx := x1.Args[0] 30991 p := x1.Args[1] 30992 mem := x1.Args[2] 30993 x0 := v.Args[1] 30994 if x0.Op != OpAMD64MOVWloadidx1 { 30995 break 30996 } 30997 i0 := x0.AuxInt 30998 if x0.Aux != s { 30999 break 31000 } 31001 _ = x0.Args[2] 31002 if p != x0.Args[0] { 31003 break 31004 } 31005 if idx != x0.Args[1] { 31006 break 31007 } 31008 if mem != x0.Args[2] { 31009 break 31010 } 31011 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31012 break 31013 } 31014 b = mergePoint(b, x0, x1) 31015 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31016 v.reset(OpCopy) 31017 v.AddArg(v0) 31018 v0.AuxInt = i0 31019 v0.Aux = s 31020 v0.AddArg(p) 31021 v0.AddArg(idx) 31022 v0.AddArg(mem) 31023 return true 31024 } 31025 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 31026 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31027 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 31028 for { 31029 _ = v.Args[1] 31030 sh := v.Args[0] 31031 if sh.Op != OpAMD64SHLLconst { 31032 break 31033 } 31034 if sh.AuxInt != 16 { 31035 break 31036 } 31037 x1 := sh.Args[0] 31038 if x1.Op != OpAMD64MOVWloadidx1 { 31039 break 31040 } 31041 i1 := x1.AuxInt 31042 s := x1.Aux 31043 _ = x1.Args[2] 31044 p := x1.Args[0] 31045 idx := x1.Args[1] 31046 mem := x1.Args[2] 31047 x0 := v.Args[1] 31048 if x0.Op != OpAMD64MOVWloadidx1 { 31049 break 31050 } 31051 i0 := x0.AuxInt 31052 if x0.Aux != s { 31053 break 31054 } 31055 _ = x0.Args[2] 31056 if idx != x0.Args[0] { 31057 break 31058 } 31059 if p != x0.Args[1] { 31060 break 31061 } 31062 if mem != x0.Args[2] { 31063 break 31064 } 31065 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31066 break 31067 } 31068 b = mergePoint(b, x0, x1) 31069 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31070 v.reset(OpCopy) 31071 v.AddArg(v0) 31072 v0.AuxInt = i0 31073 v0.Aux = s 31074 v0.AddArg(p) 31075 v0.AddArg(idx) 31076 v0.AddArg(mem) 31077 return true 31078 } 31079 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 31080 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31081 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 31082 for { 31083 _ = v.Args[1] 31084 sh := v.Args[0] 31085 if sh.Op != OpAMD64SHLLconst { 31086 break 31087 } 31088 if sh.AuxInt != 16 { 31089 break 31090 } 31091 x1 := sh.Args[0] 31092 if x1.Op != OpAMD64MOVWloadidx1 { 31093 break 31094 } 31095 i1 := x1.AuxInt 31096 s := x1.Aux 31097 _ = x1.Args[2] 31098 idx := x1.Args[0] 31099 p := x1.Args[1] 31100 mem := x1.Args[2] 31101 x0 := v.Args[1] 31102 if x0.Op != OpAMD64MOVWloadidx1 { 31103 break 31104 } 31105 i0 := x0.AuxInt 31106 if x0.Aux != s { 31107 break 31108 } 31109 _ = x0.Args[2] 31110 if idx != x0.Args[0] { 31111 break 31112 } 31113 if p != x0.Args[1] { 31114 break 31115 } 31116 if mem != x0.Args[2] { 31117 break 31118 } 31119 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31120 break 31121 } 31122 b = mergePoint(b, x0, x1) 31123 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31124 v.reset(OpCopy) 31125 v.AddArg(v0) 31126 v0.AuxInt = i0 31127 v0.Aux = s 31128 v0.AddArg(p) 31129 v0.AddArg(idx) 31130 v0.AddArg(mem) 31131 return true 31132 } 31133 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 31134 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31135 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31136 for { 31137 _ = v.Args[1] 31138 s1 := v.Args[0] 31139 if s1.Op != OpAMD64SHLLconst { 31140 break 31141 } 31142 j1 := s1.AuxInt 31143 x1 := s1.Args[0] 31144 if x1.Op != OpAMD64MOVBloadidx1 { 31145 break 31146 } 31147 i1 := x1.AuxInt 31148 s := x1.Aux 31149 _ = x1.Args[2] 31150 p := x1.Args[0] 31151 idx := x1.Args[1] 31152 mem := x1.Args[2] 31153 or := v.Args[1] 31154 if or.Op != OpAMD64ORL { 31155 break 31156 } 31157 _ = or.Args[1] 31158 s0 := or.Args[0] 31159 if s0.Op != OpAMD64SHLLconst { 31160 break 31161 } 31162 j0 := s0.AuxInt 31163 x0 := s0.Args[0] 31164 if x0.Op != OpAMD64MOVBloadidx1 { 31165 break 31166 } 31167 i0 := x0.AuxInt 31168 if x0.Aux != s { 31169 break 31170 } 31171 _ = x0.Args[2] 31172 if p != x0.Args[0] { 31173 break 31174 } 31175 if idx != x0.Args[1] { 31176 break 31177 } 31178 if mem != x0.Args[2] { 31179 break 31180 } 31181 y := or.Args[1] 31182 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31183 break 31184 } 31185 b = mergePoint(b, x0, x1) 31186 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31187 v.reset(OpCopy) 31188 v.AddArg(v0) 31189 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31190 v1.AuxInt = j0 31191 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31192 v2.AuxInt = i0 31193 v2.Aux = s 31194 v2.AddArg(p) 31195 v2.AddArg(idx) 31196 v2.AddArg(mem) 31197 v1.AddArg(v2) 31198 v0.AddArg(v1) 31199 v0.AddArg(y) 31200 return true 31201 } 31202 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 31203 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31204 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31205 for { 31206 _ = v.Args[1] 31207 s1 := v.Args[0] 31208 if s1.Op != OpAMD64SHLLconst { 31209 break 31210 } 31211 j1 := s1.AuxInt 31212 x1 := s1.Args[0] 31213 if x1.Op != OpAMD64MOVBloadidx1 { 31214 break 31215 } 31216 i1 := x1.AuxInt 31217 s := x1.Aux 31218 _ = x1.Args[2] 31219 idx := x1.Args[0] 31220 p := x1.Args[1] 31221 mem := x1.Args[2] 31222 or := v.Args[1] 31223 if or.Op != OpAMD64ORL { 31224 break 31225 } 31226 _ = or.Args[1] 31227 s0 := or.Args[0] 31228 if s0.Op != OpAMD64SHLLconst { 31229 break 31230 } 31231 j0 := s0.AuxInt 31232 x0 := s0.Args[0] 31233 if x0.Op != OpAMD64MOVBloadidx1 { 31234 break 31235 } 31236 i0 := x0.AuxInt 31237 if x0.Aux != s { 31238 break 31239 } 31240 _ = x0.Args[2] 31241 if p != x0.Args[0] { 31242 break 31243 } 31244 if idx != x0.Args[1] { 31245 break 31246 } 31247 if mem != x0.Args[2] { 31248 break 31249 } 31250 y := or.Args[1] 31251 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31252 break 31253 } 31254 b = mergePoint(b, x0, x1) 31255 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31256 v.reset(OpCopy) 31257 v.AddArg(v0) 31258 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31259 v1.AuxInt = j0 31260 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31261 v2.AuxInt = i0 31262 v2.Aux = s 31263 v2.AddArg(p) 31264 v2.AddArg(idx) 31265 v2.AddArg(mem) 31266 v1.AddArg(v2) 31267 v0.AddArg(v1) 31268 v0.AddArg(y) 31269 return true 31270 } 31271 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 31272 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31273 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31274 for { 31275 _ = v.Args[1] 31276 s1 := v.Args[0] 31277 if s1.Op != OpAMD64SHLLconst { 31278 break 31279 } 31280 j1 := s1.AuxInt 31281 x1 := s1.Args[0] 31282 if x1.Op != OpAMD64MOVBloadidx1 { 31283 break 31284 } 31285 i1 := x1.AuxInt 31286 s := x1.Aux 31287 _ = x1.Args[2] 31288 p := x1.Args[0] 31289 idx := x1.Args[1] 31290 mem := x1.Args[2] 31291 or := v.Args[1] 31292 if or.Op != OpAMD64ORL { 31293 break 31294 } 31295 _ = or.Args[1] 31296 s0 := or.Args[0] 31297 if s0.Op != OpAMD64SHLLconst { 31298 break 31299 } 31300 j0 := s0.AuxInt 31301 x0 := s0.Args[0] 31302 if x0.Op != OpAMD64MOVBloadidx1 { 31303 break 31304 } 31305 i0 := x0.AuxInt 31306 if x0.Aux != s { 31307 break 31308 } 31309 _ = x0.Args[2] 31310 if idx != x0.Args[0] { 31311 break 31312 } 31313 if p != x0.Args[1] { 31314 break 31315 } 31316 if mem != x0.Args[2] { 31317 break 31318 } 31319 y := or.Args[1] 31320 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31321 break 31322 } 31323 b = mergePoint(b, x0, x1) 31324 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31325 v.reset(OpCopy) 31326 v.AddArg(v0) 31327 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31328 v1.AuxInt = j0 31329 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31330 v2.AuxInt = i0 31331 v2.Aux = s 31332 v2.AddArg(p) 31333 v2.AddArg(idx) 31334 v2.AddArg(mem) 31335 v1.AddArg(v2) 31336 v0.AddArg(v1) 31337 v0.AddArg(y) 31338 return true 31339 } 31340 return false 31341 } 31342 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 31343 b := v.Block 31344 _ = b 31345 typ := &b.Func.Config.Types 31346 _ = typ 31347 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 31348 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31349 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31350 for { 31351 _ = v.Args[1] 31352 s1 := v.Args[0] 31353 if s1.Op != OpAMD64SHLLconst { 31354 break 31355 } 31356 j1 := s1.AuxInt 31357 x1 := s1.Args[0] 31358 if x1.Op != OpAMD64MOVBloadidx1 { 31359 break 31360 } 31361 i1 := x1.AuxInt 31362 s := x1.Aux 31363 _ = x1.Args[2] 31364 idx := x1.Args[0] 31365 p := x1.Args[1] 31366 mem := x1.Args[2] 31367 or := v.Args[1] 31368 if or.Op != OpAMD64ORL { 31369 break 31370 } 31371 _ = or.Args[1] 31372 s0 := or.Args[0] 31373 if s0.Op != OpAMD64SHLLconst { 31374 break 31375 } 31376 j0 := s0.AuxInt 31377 x0 := s0.Args[0] 31378 if x0.Op != OpAMD64MOVBloadidx1 { 31379 break 31380 } 31381 i0 := x0.AuxInt 31382 if x0.Aux != s { 31383 break 31384 } 31385 _ = x0.Args[2] 31386 if idx != x0.Args[0] { 31387 break 31388 } 31389 if p != x0.Args[1] { 31390 break 31391 } 31392 if mem != x0.Args[2] { 31393 break 31394 } 31395 y := or.Args[1] 31396 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31397 break 31398 } 31399 b = mergePoint(b, x0, x1) 31400 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31401 v.reset(OpCopy) 31402 v.AddArg(v0) 31403 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31404 v1.AuxInt = j0 31405 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31406 v2.AuxInt = i0 31407 v2.Aux = s 31408 v2.AddArg(p) 31409 v2.AddArg(idx) 31410 v2.AddArg(mem) 31411 v1.AddArg(v2) 31412 v0.AddArg(v1) 31413 v0.AddArg(y) 31414 return true 31415 } 31416 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 31417 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31418 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31419 for { 31420 _ = v.Args[1] 31421 s1 := v.Args[0] 31422 if s1.Op != OpAMD64SHLLconst { 31423 break 31424 } 31425 j1 := s1.AuxInt 31426 x1 := s1.Args[0] 31427 if x1.Op != OpAMD64MOVBloadidx1 { 31428 break 31429 } 31430 i1 := x1.AuxInt 31431 s := x1.Aux 31432 _ = x1.Args[2] 31433 p := x1.Args[0] 31434 idx := x1.Args[1] 31435 mem := x1.Args[2] 31436 or := v.Args[1] 31437 if or.Op != OpAMD64ORL { 31438 break 31439 } 31440 _ = or.Args[1] 31441 y := or.Args[0] 31442 s0 := or.Args[1] 31443 if s0.Op != OpAMD64SHLLconst { 31444 break 31445 } 31446 j0 := s0.AuxInt 31447 x0 := s0.Args[0] 31448 if x0.Op != OpAMD64MOVBloadidx1 { 31449 break 31450 } 31451 i0 := x0.AuxInt 31452 if x0.Aux != s { 31453 break 31454 } 31455 _ = x0.Args[2] 31456 if p != x0.Args[0] { 31457 break 31458 } 31459 if idx != x0.Args[1] { 31460 break 31461 } 31462 if mem != x0.Args[2] { 31463 break 31464 } 31465 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31466 break 31467 } 31468 b = mergePoint(b, x0, x1) 31469 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31470 v.reset(OpCopy) 31471 v.AddArg(v0) 31472 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31473 v1.AuxInt = j0 31474 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31475 v2.AuxInt = i0 31476 v2.Aux = s 31477 v2.AddArg(p) 31478 v2.AddArg(idx) 31479 v2.AddArg(mem) 31480 v1.AddArg(v2) 31481 v0.AddArg(v1) 31482 v0.AddArg(y) 31483 return true 31484 } 31485 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 31486 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31487 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31488 for { 31489 _ = v.Args[1] 31490 s1 := v.Args[0] 31491 if s1.Op != OpAMD64SHLLconst { 31492 break 31493 } 31494 j1 := s1.AuxInt 31495 x1 := s1.Args[0] 31496 if x1.Op != OpAMD64MOVBloadidx1 { 31497 break 31498 } 31499 i1 := x1.AuxInt 31500 s := x1.Aux 31501 _ = x1.Args[2] 31502 idx := x1.Args[0] 31503 p := x1.Args[1] 31504 mem := x1.Args[2] 31505 or := v.Args[1] 31506 if or.Op != OpAMD64ORL { 31507 break 31508 } 31509 _ = or.Args[1] 31510 y := or.Args[0] 31511 s0 := or.Args[1] 31512 if s0.Op != OpAMD64SHLLconst { 31513 break 31514 } 31515 j0 := s0.AuxInt 31516 x0 := s0.Args[0] 31517 if x0.Op != OpAMD64MOVBloadidx1 { 31518 break 31519 } 31520 i0 := x0.AuxInt 31521 if x0.Aux != s { 31522 break 31523 } 31524 _ = x0.Args[2] 31525 if p != x0.Args[0] { 31526 break 31527 } 31528 if idx != x0.Args[1] { 31529 break 31530 } 31531 if mem != x0.Args[2] { 31532 break 31533 } 31534 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31535 break 31536 } 31537 b = mergePoint(b, x0, x1) 31538 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31539 v.reset(OpCopy) 31540 v.AddArg(v0) 31541 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31542 v1.AuxInt = j0 31543 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31544 v2.AuxInt = i0 31545 v2.Aux = s 31546 v2.AddArg(p) 31547 v2.AddArg(idx) 31548 v2.AddArg(mem) 31549 v1.AddArg(v2) 31550 v0.AddArg(v1) 31551 v0.AddArg(y) 31552 return true 31553 } 31554 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 31555 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31556 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31557 for { 31558 _ = v.Args[1] 31559 s1 := v.Args[0] 31560 if s1.Op != OpAMD64SHLLconst { 31561 break 31562 } 31563 j1 := s1.AuxInt 31564 x1 := s1.Args[0] 31565 if x1.Op != OpAMD64MOVBloadidx1 { 31566 break 31567 } 31568 i1 := x1.AuxInt 31569 s := x1.Aux 31570 _ = x1.Args[2] 31571 p := x1.Args[0] 31572 idx := x1.Args[1] 31573 mem := x1.Args[2] 31574 or := v.Args[1] 31575 if or.Op != OpAMD64ORL { 31576 break 31577 } 31578 _ = or.Args[1] 31579 y := or.Args[0] 31580 s0 := or.Args[1] 31581 if s0.Op != OpAMD64SHLLconst { 31582 break 31583 } 31584 j0 := s0.AuxInt 31585 x0 := s0.Args[0] 31586 if x0.Op != OpAMD64MOVBloadidx1 { 31587 break 31588 } 31589 i0 := x0.AuxInt 31590 if x0.Aux != s { 31591 break 31592 } 31593 _ = x0.Args[2] 31594 if idx != x0.Args[0] { 31595 break 31596 } 31597 if p != x0.Args[1] { 31598 break 31599 } 31600 if mem != x0.Args[2] { 31601 break 31602 } 31603 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31604 break 31605 } 31606 b = mergePoint(b, x0, x1) 31607 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31608 v.reset(OpCopy) 31609 v.AddArg(v0) 31610 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31611 v1.AuxInt = j0 31612 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31613 v2.AuxInt = i0 31614 v2.Aux = s 31615 v2.AddArg(p) 31616 v2.AddArg(idx) 31617 v2.AddArg(mem) 31618 v1.AddArg(v2) 31619 v0.AddArg(v1) 31620 v0.AddArg(y) 31621 return true 31622 } 31623 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 31624 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31625 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31626 for { 31627 _ = v.Args[1] 31628 s1 := v.Args[0] 31629 if s1.Op != OpAMD64SHLLconst { 31630 break 31631 } 31632 j1 := s1.AuxInt 31633 x1 := s1.Args[0] 31634 if x1.Op != OpAMD64MOVBloadidx1 { 31635 break 31636 } 31637 i1 := x1.AuxInt 31638 s := x1.Aux 31639 _ = x1.Args[2] 31640 idx := x1.Args[0] 31641 p := x1.Args[1] 31642 mem := x1.Args[2] 31643 or := v.Args[1] 31644 if or.Op != OpAMD64ORL { 31645 break 31646 } 31647 _ = or.Args[1] 31648 y := or.Args[0] 31649 s0 := or.Args[1] 31650 if s0.Op != OpAMD64SHLLconst { 31651 break 31652 } 31653 j0 := s0.AuxInt 31654 x0 := s0.Args[0] 31655 if x0.Op != OpAMD64MOVBloadidx1 { 31656 break 31657 } 31658 i0 := x0.AuxInt 31659 if x0.Aux != s { 31660 break 31661 } 31662 _ = x0.Args[2] 31663 if idx != x0.Args[0] { 31664 break 31665 } 31666 if p != x0.Args[1] { 31667 break 31668 } 31669 if mem != x0.Args[2] { 31670 break 31671 } 31672 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31673 break 31674 } 31675 b = mergePoint(b, x0, x1) 31676 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31677 v.reset(OpCopy) 31678 v.AddArg(v0) 31679 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31680 v1.AuxInt = j0 31681 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31682 v2.AuxInt = i0 31683 v2.Aux = s 31684 v2.AddArg(p) 31685 v2.AddArg(idx) 31686 v2.AddArg(mem) 31687 v1.AddArg(v2) 31688 v0.AddArg(v1) 31689 v0.AddArg(y) 31690 return true 31691 } 31692 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 31693 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31694 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31695 for { 31696 _ = v.Args[1] 31697 or := v.Args[0] 31698 if or.Op != OpAMD64ORL { 31699 break 31700 } 31701 _ = or.Args[1] 31702 s0 := or.Args[0] 31703 if s0.Op != OpAMD64SHLLconst { 31704 break 31705 } 31706 j0 := s0.AuxInt 31707 x0 := s0.Args[0] 31708 if x0.Op != OpAMD64MOVBloadidx1 { 31709 break 31710 } 31711 i0 := x0.AuxInt 31712 s := x0.Aux 31713 _ = x0.Args[2] 31714 p := x0.Args[0] 31715 idx := x0.Args[1] 31716 mem := x0.Args[2] 31717 y := or.Args[1] 31718 s1 := v.Args[1] 31719 if s1.Op != OpAMD64SHLLconst { 31720 break 31721 } 31722 j1 := s1.AuxInt 31723 x1 := s1.Args[0] 31724 if x1.Op != OpAMD64MOVBloadidx1 { 31725 break 31726 } 31727 i1 := x1.AuxInt 31728 if x1.Aux != s { 31729 break 31730 } 31731 _ = x1.Args[2] 31732 if p != x1.Args[0] { 31733 break 31734 } 31735 if idx != x1.Args[1] { 31736 break 31737 } 31738 if mem != x1.Args[2] { 31739 break 31740 } 31741 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31742 break 31743 } 31744 b = mergePoint(b, x0, x1) 31745 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31746 v.reset(OpCopy) 31747 v.AddArg(v0) 31748 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31749 v1.AuxInt = j0 31750 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31751 v2.AuxInt = i0 31752 v2.Aux = s 31753 v2.AddArg(p) 31754 v2.AddArg(idx) 31755 v2.AddArg(mem) 31756 v1.AddArg(v2) 31757 v0.AddArg(v1) 31758 v0.AddArg(y) 31759 return true 31760 } 31761 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 31762 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31763 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31764 for { 31765 _ = v.Args[1] 31766 or := v.Args[0] 31767 if or.Op != OpAMD64ORL { 31768 break 31769 } 31770 _ = or.Args[1] 31771 s0 := or.Args[0] 31772 if s0.Op != OpAMD64SHLLconst { 31773 break 31774 } 31775 j0 := s0.AuxInt 31776 x0 := s0.Args[0] 31777 if x0.Op != OpAMD64MOVBloadidx1 { 31778 break 31779 } 31780 i0 := x0.AuxInt 31781 s := x0.Aux 31782 _ = x0.Args[2] 31783 idx := x0.Args[0] 31784 p := x0.Args[1] 31785 mem := x0.Args[2] 31786 y := or.Args[1] 31787 s1 := v.Args[1] 31788 if s1.Op != OpAMD64SHLLconst { 31789 break 31790 } 31791 j1 := s1.AuxInt 31792 x1 := s1.Args[0] 31793 if x1.Op != OpAMD64MOVBloadidx1 { 31794 break 31795 } 31796 i1 := x1.AuxInt 31797 if x1.Aux != s { 31798 break 31799 } 31800 _ = x1.Args[2] 31801 if p != x1.Args[0] { 31802 break 31803 } 31804 if idx != x1.Args[1] { 31805 break 31806 } 31807 if mem != x1.Args[2] { 31808 break 31809 } 31810 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31811 break 31812 } 31813 b = mergePoint(b, x0, x1) 31814 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31815 v.reset(OpCopy) 31816 v.AddArg(v0) 31817 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31818 v1.AuxInt = j0 31819 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31820 v2.AuxInt = i0 31821 v2.Aux = s 31822 v2.AddArg(p) 31823 v2.AddArg(idx) 31824 v2.AddArg(mem) 31825 v1.AddArg(v2) 31826 v0.AddArg(v1) 31827 v0.AddArg(y) 31828 return true 31829 } 31830 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 31831 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31832 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31833 for { 31834 _ = v.Args[1] 31835 or := v.Args[0] 31836 if or.Op != OpAMD64ORL { 31837 break 31838 } 31839 _ = or.Args[1] 31840 y := or.Args[0] 31841 s0 := or.Args[1] 31842 if s0.Op != OpAMD64SHLLconst { 31843 break 31844 } 31845 j0 := s0.AuxInt 31846 x0 := s0.Args[0] 31847 if x0.Op != OpAMD64MOVBloadidx1 { 31848 break 31849 } 31850 i0 := x0.AuxInt 31851 s := x0.Aux 31852 _ = x0.Args[2] 31853 p := x0.Args[0] 31854 idx := x0.Args[1] 31855 mem := x0.Args[2] 31856 s1 := v.Args[1] 31857 if s1.Op != OpAMD64SHLLconst { 31858 break 31859 } 31860 j1 := s1.AuxInt 31861 x1 := s1.Args[0] 31862 if x1.Op != OpAMD64MOVBloadidx1 { 31863 break 31864 } 31865 i1 := x1.AuxInt 31866 if x1.Aux != s { 31867 break 31868 } 31869 _ = x1.Args[2] 31870 if p != x1.Args[0] { 31871 break 31872 } 31873 if idx != x1.Args[1] { 31874 break 31875 } 31876 if mem != x1.Args[2] { 31877 break 31878 } 31879 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31880 break 31881 } 31882 b = mergePoint(b, x0, x1) 31883 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31884 v.reset(OpCopy) 31885 v.AddArg(v0) 31886 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31887 v1.AuxInt = j0 31888 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31889 v2.AuxInt = i0 31890 v2.Aux = s 31891 v2.AddArg(p) 31892 v2.AddArg(idx) 31893 v2.AddArg(mem) 31894 v1.AddArg(v2) 31895 v0.AddArg(v1) 31896 v0.AddArg(y) 31897 return true 31898 } 31899 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 31900 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31901 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31902 for { 31903 _ = v.Args[1] 31904 or := v.Args[0] 31905 if or.Op != OpAMD64ORL { 31906 break 31907 } 31908 _ = or.Args[1] 31909 y := or.Args[0] 31910 s0 := or.Args[1] 31911 if s0.Op != OpAMD64SHLLconst { 31912 break 31913 } 31914 j0 := s0.AuxInt 31915 x0 := s0.Args[0] 31916 if x0.Op != OpAMD64MOVBloadidx1 { 31917 break 31918 } 31919 i0 := x0.AuxInt 31920 s := x0.Aux 31921 _ = x0.Args[2] 31922 idx := x0.Args[0] 31923 p := x0.Args[1] 31924 mem := x0.Args[2] 31925 s1 := v.Args[1] 31926 if s1.Op != OpAMD64SHLLconst { 31927 break 31928 } 31929 j1 := s1.AuxInt 31930 x1 := s1.Args[0] 31931 if x1.Op != OpAMD64MOVBloadidx1 { 31932 break 31933 } 31934 i1 := x1.AuxInt 31935 if x1.Aux != s { 31936 break 31937 } 31938 _ = x1.Args[2] 31939 if p != x1.Args[0] { 31940 break 31941 } 31942 if idx != x1.Args[1] { 31943 break 31944 } 31945 if mem != x1.Args[2] { 31946 break 31947 } 31948 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31949 break 31950 } 31951 b = mergePoint(b, x0, x1) 31952 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 31953 v.reset(OpCopy) 31954 v.AddArg(v0) 31955 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 31956 v1.AuxInt = j0 31957 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31958 v2.AuxInt = i0 31959 v2.Aux = s 31960 v2.AddArg(p) 31961 v2.AddArg(idx) 31962 v2.AddArg(mem) 31963 v1.AddArg(v2) 31964 v0.AddArg(v1) 31965 v0.AddArg(y) 31966 return true 31967 } 31968 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 31969 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31970 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 31971 for { 31972 _ = v.Args[1] 31973 or := v.Args[0] 31974 if or.Op != OpAMD64ORL { 31975 break 31976 } 31977 _ = or.Args[1] 31978 s0 := or.Args[0] 31979 if s0.Op != OpAMD64SHLLconst { 31980 break 31981 } 31982 j0 := s0.AuxInt 31983 x0 := s0.Args[0] 31984 if x0.Op != OpAMD64MOVBloadidx1 { 31985 break 31986 } 31987 i0 := x0.AuxInt 31988 s := x0.Aux 31989 _ = x0.Args[2] 31990 p := x0.Args[0] 31991 idx := x0.Args[1] 31992 mem := x0.Args[2] 31993 y := or.Args[1] 31994 s1 := v.Args[1] 31995 if s1.Op != OpAMD64SHLLconst { 31996 break 31997 } 31998 j1 := s1.AuxInt 31999 x1 := s1.Args[0] 32000 if x1.Op != OpAMD64MOVBloadidx1 { 32001 break 32002 } 32003 i1 := x1.AuxInt 32004 if x1.Aux != s { 32005 break 32006 } 32007 _ = x1.Args[2] 32008 if idx != x1.Args[0] { 32009 break 32010 } 32011 if p != x1.Args[1] { 32012 break 32013 } 32014 if mem != x1.Args[2] { 32015 break 32016 } 32017 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32018 break 32019 } 32020 b = mergePoint(b, x0, x1) 32021 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32022 v.reset(OpCopy) 32023 v.AddArg(v0) 32024 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32025 v1.AuxInt = j0 32026 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32027 v2.AuxInt = i0 32028 v2.Aux = s 32029 v2.AddArg(p) 32030 v2.AddArg(idx) 32031 v2.AddArg(mem) 32032 v1.AddArg(v2) 32033 v0.AddArg(v1) 32034 v0.AddArg(y) 32035 return true 32036 } 32037 return false 32038 } 32039 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 32040 b := v.Block 32041 _ = b 32042 typ := &b.Func.Config.Types 32043 _ = typ 32044 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 32045 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32046 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 32047 for { 32048 _ = v.Args[1] 32049 or := v.Args[0] 32050 if or.Op != OpAMD64ORL { 32051 break 32052 } 32053 _ = or.Args[1] 32054 s0 := or.Args[0] 32055 if s0.Op != OpAMD64SHLLconst { 32056 break 32057 } 32058 j0 := s0.AuxInt 32059 x0 := s0.Args[0] 32060 if x0.Op != OpAMD64MOVBloadidx1 { 32061 break 32062 } 32063 i0 := x0.AuxInt 32064 s := x0.Aux 32065 _ = x0.Args[2] 32066 idx := x0.Args[0] 32067 p := x0.Args[1] 32068 mem := x0.Args[2] 32069 y := or.Args[1] 32070 s1 := v.Args[1] 32071 if s1.Op != OpAMD64SHLLconst { 32072 break 32073 } 32074 j1 := s1.AuxInt 32075 x1 := s1.Args[0] 32076 if x1.Op != OpAMD64MOVBloadidx1 { 32077 break 32078 } 32079 i1 := x1.AuxInt 32080 if x1.Aux != s { 32081 break 32082 } 32083 _ = x1.Args[2] 32084 if idx != x1.Args[0] { 32085 break 32086 } 32087 if p != x1.Args[1] { 32088 break 32089 } 32090 if mem != x1.Args[2] { 32091 break 32092 } 32093 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32094 break 32095 } 32096 b = mergePoint(b, x0, x1) 32097 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32098 v.reset(OpCopy) 32099 v.AddArg(v0) 32100 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32101 v1.AuxInt = j0 32102 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32103 v2.AuxInt = i0 32104 v2.Aux = s 32105 v2.AddArg(p) 32106 v2.AddArg(idx) 32107 v2.AddArg(mem) 32108 v1.AddArg(v2) 32109 v0.AddArg(v1) 32110 v0.AddArg(y) 32111 return true 32112 } 32113 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 32114 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32115 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 32116 for { 32117 _ = v.Args[1] 32118 or := v.Args[0] 32119 if or.Op != OpAMD64ORL { 32120 break 32121 } 32122 _ = or.Args[1] 32123 y := or.Args[0] 32124 s0 := or.Args[1] 32125 if s0.Op != OpAMD64SHLLconst { 32126 break 32127 } 32128 j0 := s0.AuxInt 32129 x0 := s0.Args[0] 32130 if x0.Op != OpAMD64MOVBloadidx1 { 32131 break 32132 } 32133 i0 := x0.AuxInt 32134 s := x0.Aux 32135 _ = x0.Args[2] 32136 p := x0.Args[0] 32137 idx := x0.Args[1] 32138 mem := x0.Args[2] 32139 s1 := v.Args[1] 32140 if s1.Op != OpAMD64SHLLconst { 32141 break 32142 } 32143 j1 := s1.AuxInt 32144 x1 := s1.Args[0] 32145 if x1.Op != OpAMD64MOVBloadidx1 { 32146 break 32147 } 32148 i1 := x1.AuxInt 32149 if x1.Aux != s { 32150 break 32151 } 32152 _ = x1.Args[2] 32153 if idx != x1.Args[0] { 32154 break 32155 } 32156 if p != x1.Args[1] { 32157 break 32158 } 32159 if mem != x1.Args[2] { 32160 break 32161 } 32162 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32163 break 32164 } 32165 b = mergePoint(b, x0, x1) 32166 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32167 v.reset(OpCopy) 32168 v.AddArg(v0) 32169 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32170 v1.AuxInt = j0 32171 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32172 v2.AuxInt = i0 32173 v2.Aux = s 32174 v2.AddArg(p) 32175 v2.AddArg(idx) 32176 v2.AddArg(mem) 32177 v1.AddArg(v2) 32178 v0.AddArg(v1) 32179 v0.AddArg(y) 32180 return true 32181 } 32182 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 32183 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32184 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 32185 for { 32186 _ = v.Args[1] 32187 or := v.Args[0] 32188 if or.Op != OpAMD64ORL { 32189 break 32190 } 32191 _ = or.Args[1] 32192 y := or.Args[0] 32193 s0 := or.Args[1] 32194 if s0.Op != OpAMD64SHLLconst { 32195 break 32196 } 32197 j0 := s0.AuxInt 32198 x0 := s0.Args[0] 32199 if x0.Op != OpAMD64MOVBloadidx1 { 32200 break 32201 } 32202 i0 := x0.AuxInt 32203 s := x0.Aux 32204 _ = x0.Args[2] 32205 idx := x0.Args[0] 32206 p := x0.Args[1] 32207 mem := x0.Args[2] 32208 s1 := v.Args[1] 32209 if s1.Op != OpAMD64SHLLconst { 32210 break 32211 } 32212 j1 := s1.AuxInt 32213 x1 := s1.Args[0] 32214 if x1.Op != OpAMD64MOVBloadidx1 { 32215 break 32216 } 32217 i1 := x1.AuxInt 32218 if x1.Aux != s { 32219 break 32220 } 32221 _ = x1.Args[2] 32222 if idx != x1.Args[0] { 32223 break 32224 } 32225 if p != x1.Args[1] { 32226 break 32227 } 32228 if mem != x1.Args[2] { 32229 break 32230 } 32231 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32232 break 32233 } 32234 b = mergePoint(b, x0, x1) 32235 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32236 v.reset(OpCopy) 32237 v.AddArg(v0) 32238 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32239 v1.AuxInt = j0 32240 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32241 v2.AuxInt = i0 32242 v2.Aux = s 32243 v2.AddArg(p) 32244 v2.AddArg(idx) 32245 v2.AddArg(mem) 32246 v1.AddArg(v2) 32247 v0.AddArg(v1) 32248 v0.AddArg(y) 32249 return true 32250 } 32251 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 32252 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32253 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 32254 for { 32255 _ = v.Args[1] 32256 x1 := v.Args[0] 32257 if x1.Op != OpAMD64MOVBload { 32258 break 32259 } 32260 i1 := x1.AuxInt 32261 s := x1.Aux 32262 _ = x1.Args[1] 32263 p := x1.Args[0] 32264 mem := x1.Args[1] 32265 sh := v.Args[1] 32266 if sh.Op != OpAMD64SHLLconst { 32267 break 32268 } 32269 if sh.AuxInt != 8 { 32270 break 32271 } 32272 x0 := sh.Args[0] 32273 if x0.Op != OpAMD64MOVBload { 32274 break 32275 } 32276 i0 := x0.AuxInt 32277 if x0.Aux != s { 32278 break 32279 } 32280 _ = x0.Args[1] 32281 if p != x0.Args[0] { 32282 break 32283 } 32284 if mem != x0.Args[1] { 32285 break 32286 } 32287 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32288 break 32289 } 32290 b = mergePoint(b, x0, x1) 32291 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 32292 v.reset(OpCopy) 32293 v.AddArg(v0) 32294 v0.AuxInt = 8 32295 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 32296 v1.AuxInt = i0 32297 v1.Aux = s 32298 v1.AddArg(p) 32299 v1.AddArg(mem) 32300 v0.AddArg(v1) 32301 return true 32302 } 32303 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 32304 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32305 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 32306 for { 32307 _ = v.Args[1] 32308 sh := v.Args[0] 32309 if sh.Op != OpAMD64SHLLconst { 32310 break 32311 } 32312 if sh.AuxInt != 8 { 32313 break 32314 } 32315 x0 := sh.Args[0] 32316 if x0.Op != OpAMD64MOVBload { 32317 break 32318 } 32319 i0 := x0.AuxInt 32320 s := x0.Aux 32321 _ = x0.Args[1] 32322 p := x0.Args[0] 32323 mem := x0.Args[1] 32324 x1 := v.Args[1] 32325 if x1.Op != OpAMD64MOVBload { 32326 break 32327 } 32328 i1 := x1.AuxInt 32329 if x1.Aux != s { 32330 break 32331 } 32332 _ = x1.Args[1] 32333 if p != x1.Args[0] { 32334 break 32335 } 32336 if mem != x1.Args[1] { 32337 break 32338 } 32339 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32340 break 32341 } 32342 b = mergePoint(b, x0, x1) 32343 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 32344 v.reset(OpCopy) 32345 v.AddArg(v0) 32346 v0.AuxInt = 8 32347 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 32348 v1.AuxInt = i0 32349 v1.Aux = s 32350 v1.AddArg(p) 32351 v1.AddArg(mem) 32352 v0.AddArg(v1) 32353 return true 32354 } 32355 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 32356 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 32357 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 32358 for { 32359 _ = v.Args[1] 32360 r1 := v.Args[0] 32361 if r1.Op != OpAMD64ROLWconst { 32362 break 32363 } 32364 if r1.AuxInt != 8 { 32365 break 32366 } 32367 x1 := r1.Args[0] 32368 if x1.Op != OpAMD64MOVWload { 32369 break 32370 } 32371 i1 := x1.AuxInt 32372 s := x1.Aux 32373 _ = x1.Args[1] 32374 p := x1.Args[0] 32375 mem := x1.Args[1] 32376 sh := v.Args[1] 32377 if sh.Op != OpAMD64SHLLconst { 32378 break 32379 } 32380 if sh.AuxInt != 16 { 32381 break 32382 } 32383 r0 := sh.Args[0] 32384 if r0.Op != OpAMD64ROLWconst { 32385 break 32386 } 32387 if r0.AuxInt != 8 { 32388 break 32389 } 32390 x0 := r0.Args[0] 32391 if x0.Op != OpAMD64MOVWload { 32392 break 32393 } 32394 i0 := x0.AuxInt 32395 if x0.Aux != s { 32396 break 32397 } 32398 _ = x0.Args[1] 32399 if p != x0.Args[0] { 32400 break 32401 } 32402 if mem != x0.Args[1] { 32403 break 32404 } 32405 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 32406 break 32407 } 32408 b = mergePoint(b, x0, x1) 32409 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 32410 v.reset(OpCopy) 32411 v.AddArg(v0) 32412 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 32413 v1.AuxInt = i0 32414 v1.Aux = s 32415 v1.AddArg(p) 32416 v1.AddArg(mem) 32417 v0.AddArg(v1) 32418 return true 32419 } 32420 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 32421 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 32422 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 32423 for { 32424 _ = v.Args[1] 32425 sh := v.Args[0] 32426 if sh.Op != OpAMD64SHLLconst { 32427 break 32428 } 32429 if sh.AuxInt != 16 { 32430 break 32431 } 32432 r0 := sh.Args[0] 32433 if r0.Op != OpAMD64ROLWconst { 32434 break 32435 } 32436 if r0.AuxInt != 8 { 32437 break 32438 } 32439 x0 := r0.Args[0] 32440 if x0.Op != OpAMD64MOVWload { 32441 break 32442 } 32443 i0 := x0.AuxInt 32444 s := x0.Aux 32445 _ = x0.Args[1] 32446 p := x0.Args[0] 32447 mem := x0.Args[1] 32448 r1 := v.Args[1] 32449 if r1.Op != OpAMD64ROLWconst { 32450 break 32451 } 32452 if r1.AuxInt != 8 { 32453 break 32454 } 32455 x1 := r1.Args[0] 32456 if x1.Op != OpAMD64MOVWload { 32457 break 32458 } 32459 i1 := x1.AuxInt 32460 if x1.Aux != s { 32461 break 32462 } 32463 _ = x1.Args[1] 32464 if p != x1.Args[0] { 32465 break 32466 } 32467 if mem != x1.Args[1] { 32468 break 32469 } 32470 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 32471 break 32472 } 32473 b = mergePoint(b, x0, x1) 32474 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 32475 v.reset(OpCopy) 32476 v.AddArg(v0) 32477 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 32478 v1.AuxInt = i0 32479 v1.Aux = s 32480 v1.AddArg(p) 32481 v1.AddArg(mem) 32482 v0.AddArg(v1) 32483 return true 32484 } 32485 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 32486 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32487 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 32488 for { 32489 _ = v.Args[1] 32490 s0 := v.Args[0] 32491 if s0.Op != OpAMD64SHLLconst { 32492 break 32493 } 32494 j0 := s0.AuxInt 32495 x0 := s0.Args[0] 32496 if x0.Op != OpAMD64MOVBload { 32497 break 32498 } 32499 i0 := x0.AuxInt 32500 s := x0.Aux 32501 _ = x0.Args[1] 32502 p := x0.Args[0] 32503 mem := x0.Args[1] 32504 or := v.Args[1] 32505 if or.Op != OpAMD64ORL { 32506 break 32507 } 32508 _ = or.Args[1] 32509 s1 := or.Args[0] 32510 if s1.Op != OpAMD64SHLLconst { 32511 break 32512 } 32513 j1 := s1.AuxInt 32514 x1 := s1.Args[0] 32515 if x1.Op != OpAMD64MOVBload { 32516 break 32517 } 32518 i1 := x1.AuxInt 32519 if x1.Aux != s { 32520 break 32521 } 32522 _ = x1.Args[1] 32523 if p != x1.Args[0] { 32524 break 32525 } 32526 if mem != x1.Args[1] { 32527 break 32528 } 32529 y := or.Args[1] 32530 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32531 break 32532 } 32533 b = mergePoint(b, x0, x1) 32534 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32535 v.reset(OpCopy) 32536 v.AddArg(v0) 32537 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32538 v1.AuxInt = j1 32539 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32540 v2.AuxInt = 8 32541 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 32542 v3.AuxInt = i0 32543 v3.Aux = s 32544 v3.AddArg(p) 32545 v3.AddArg(mem) 32546 v2.AddArg(v3) 32547 v1.AddArg(v2) 32548 v0.AddArg(v1) 32549 v0.AddArg(y) 32550 return true 32551 } 32552 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 32553 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32554 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 32555 for { 32556 _ = v.Args[1] 32557 s0 := v.Args[0] 32558 if s0.Op != OpAMD64SHLLconst { 32559 break 32560 } 32561 j0 := s0.AuxInt 32562 x0 := s0.Args[0] 32563 if x0.Op != OpAMD64MOVBload { 32564 break 32565 } 32566 i0 := x0.AuxInt 32567 s := x0.Aux 32568 _ = x0.Args[1] 32569 p := x0.Args[0] 32570 mem := x0.Args[1] 32571 or := v.Args[1] 32572 if or.Op != OpAMD64ORL { 32573 break 32574 } 32575 _ = or.Args[1] 32576 y := or.Args[0] 32577 s1 := or.Args[1] 32578 if s1.Op != OpAMD64SHLLconst { 32579 break 32580 } 32581 j1 := s1.AuxInt 32582 x1 := s1.Args[0] 32583 if x1.Op != OpAMD64MOVBload { 32584 break 32585 } 32586 i1 := x1.AuxInt 32587 if x1.Aux != s { 32588 break 32589 } 32590 _ = x1.Args[1] 32591 if p != x1.Args[0] { 32592 break 32593 } 32594 if mem != x1.Args[1] { 32595 break 32596 } 32597 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32598 break 32599 } 32600 b = mergePoint(b, x0, x1) 32601 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32602 v.reset(OpCopy) 32603 v.AddArg(v0) 32604 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32605 v1.AuxInt = j1 32606 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32607 v2.AuxInt = 8 32608 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 32609 v3.AuxInt = i0 32610 v3.Aux = s 32611 v3.AddArg(p) 32612 v3.AddArg(mem) 32613 v2.AddArg(v3) 32614 v1.AddArg(v2) 32615 v0.AddArg(v1) 32616 v0.AddArg(y) 32617 return true 32618 } 32619 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 32620 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32621 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 32622 for { 32623 _ = v.Args[1] 32624 or := v.Args[0] 32625 if or.Op != OpAMD64ORL { 32626 break 32627 } 32628 _ = or.Args[1] 32629 s1 := or.Args[0] 32630 if s1.Op != OpAMD64SHLLconst { 32631 break 32632 } 32633 j1 := s1.AuxInt 32634 x1 := s1.Args[0] 32635 if x1.Op != OpAMD64MOVBload { 32636 break 32637 } 32638 i1 := x1.AuxInt 32639 s := x1.Aux 32640 _ = x1.Args[1] 32641 p := x1.Args[0] 32642 mem := x1.Args[1] 32643 y := or.Args[1] 32644 s0 := v.Args[1] 32645 if s0.Op != OpAMD64SHLLconst { 32646 break 32647 } 32648 j0 := s0.AuxInt 32649 x0 := s0.Args[0] 32650 if x0.Op != OpAMD64MOVBload { 32651 break 32652 } 32653 i0 := x0.AuxInt 32654 if x0.Aux != s { 32655 break 32656 } 32657 _ = x0.Args[1] 32658 if p != x0.Args[0] { 32659 break 32660 } 32661 if mem != x0.Args[1] { 32662 break 32663 } 32664 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32665 break 32666 } 32667 b = mergePoint(b, x0, x1) 32668 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32669 v.reset(OpCopy) 32670 v.AddArg(v0) 32671 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32672 v1.AuxInt = j1 32673 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32674 v2.AuxInt = 8 32675 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 32676 v3.AuxInt = i0 32677 v3.Aux = s 32678 v3.AddArg(p) 32679 v3.AddArg(mem) 32680 v2.AddArg(v3) 32681 v1.AddArg(v2) 32682 v0.AddArg(v1) 32683 v0.AddArg(y) 32684 return true 32685 } 32686 return false 32687 } 32688 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 32689 b := v.Block 32690 _ = b 32691 typ := &b.Func.Config.Types 32692 _ = typ 32693 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 32694 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32695 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 32696 for { 32697 _ = v.Args[1] 32698 or := v.Args[0] 32699 if or.Op != OpAMD64ORL { 32700 break 32701 } 32702 _ = or.Args[1] 32703 y := or.Args[0] 32704 s1 := or.Args[1] 32705 if s1.Op != OpAMD64SHLLconst { 32706 break 32707 } 32708 j1 := s1.AuxInt 32709 x1 := s1.Args[0] 32710 if x1.Op != OpAMD64MOVBload { 32711 break 32712 } 32713 i1 := x1.AuxInt 32714 s := x1.Aux 32715 _ = x1.Args[1] 32716 p := x1.Args[0] 32717 mem := x1.Args[1] 32718 s0 := v.Args[1] 32719 if s0.Op != OpAMD64SHLLconst { 32720 break 32721 } 32722 j0 := s0.AuxInt 32723 x0 := s0.Args[0] 32724 if x0.Op != OpAMD64MOVBload { 32725 break 32726 } 32727 i0 := x0.AuxInt 32728 if x0.Aux != s { 32729 break 32730 } 32731 _ = x0.Args[1] 32732 if p != x0.Args[0] { 32733 break 32734 } 32735 if mem != x0.Args[1] { 32736 break 32737 } 32738 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32739 break 32740 } 32741 b = mergePoint(b, x0, x1) 32742 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 32743 v.reset(OpCopy) 32744 v.AddArg(v0) 32745 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 32746 v1.AuxInt = j1 32747 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32748 v2.AuxInt = 8 32749 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 32750 v3.AuxInt = i0 32751 v3.Aux = s 32752 v3.AddArg(p) 32753 v3.AddArg(mem) 32754 v2.AddArg(v3) 32755 v1.AddArg(v2) 32756 v0.AddArg(v1) 32757 v0.AddArg(y) 32758 return true 32759 } 32760 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32761 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32762 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 32763 for { 32764 _ = v.Args[1] 32765 x1 := v.Args[0] 32766 if x1.Op != OpAMD64MOVBloadidx1 { 32767 break 32768 } 32769 i1 := x1.AuxInt 32770 s := x1.Aux 32771 _ = x1.Args[2] 32772 p := x1.Args[0] 32773 idx := x1.Args[1] 32774 mem := x1.Args[2] 32775 sh := v.Args[1] 32776 if sh.Op != OpAMD64SHLLconst { 32777 break 32778 } 32779 if sh.AuxInt != 8 { 32780 break 32781 } 32782 x0 := sh.Args[0] 32783 if x0.Op != OpAMD64MOVBloadidx1 { 32784 break 32785 } 32786 i0 := x0.AuxInt 32787 if x0.Aux != s { 32788 break 32789 } 32790 _ = x0.Args[2] 32791 if p != x0.Args[0] { 32792 break 32793 } 32794 if idx != x0.Args[1] { 32795 break 32796 } 32797 if mem != x0.Args[2] { 32798 break 32799 } 32800 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32801 break 32802 } 32803 b = mergePoint(b, x0, x1) 32804 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 32805 v.reset(OpCopy) 32806 v.AddArg(v0) 32807 v0.AuxInt = 8 32808 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32809 v1.AuxInt = i0 32810 v1.Aux = s 32811 v1.AddArg(p) 32812 v1.AddArg(idx) 32813 v1.AddArg(mem) 32814 v0.AddArg(v1) 32815 return true 32816 } 32817 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32818 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32819 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 32820 for { 32821 _ = v.Args[1] 32822 x1 := v.Args[0] 32823 if x1.Op != OpAMD64MOVBloadidx1 { 32824 break 32825 } 32826 i1 := x1.AuxInt 32827 s := x1.Aux 32828 _ = x1.Args[2] 32829 idx := x1.Args[0] 32830 p := x1.Args[1] 32831 mem := x1.Args[2] 32832 sh := v.Args[1] 32833 if sh.Op != OpAMD64SHLLconst { 32834 break 32835 } 32836 if sh.AuxInt != 8 { 32837 break 32838 } 32839 x0 := sh.Args[0] 32840 if x0.Op != OpAMD64MOVBloadidx1 { 32841 break 32842 } 32843 i0 := x0.AuxInt 32844 if x0.Aux != s { 32845 break 32846 } 32847 _ = x0.Args[2] 32848 if p != x0.Args[0] { 32849 break 32850 } 32851 if idx != x0.Args[1] { 32852 break 32853 } 32854 if mem != x0.Args[2] { 32855 break 32856 } 32857 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32858 break 32859 } 32860 b = mergePoint(b, x0, x1) 32861 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 32862 v.reset(OpCopy) 32863 v.AddArg(v0) 32864 v0.AuxInt = 8 32865 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32866 v1.AuxInt = i0 32867 v1.Aux = s 32868 v1.AddArg(p) 32869 v1.AddArg(idx) 32870 v1.AddArg(mem) 32871 v0.AddArg(v1) 32872 return true 32873 } 32874 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32875 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32876 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 32877 for { 32878 _ = v.Args[1] 32879 x1 := v.Args[0] 32880 if x1.Op != OpAMD64MOVBloadidx1 { 32881 break 32882 } 32883 i1 := x1.AuxInt 32884 s := x1.Aux 32885 _ = x1.Args[2] 32886 p := x1.Args[0] 32887 idx := x1.Args[1] 32888 mem := x1.Args[2] 32889 sh := v.Args[1] 32890 if sh.Op != OpAMD64SHLLconst { 32891 break 32892 } 32893 if sh.AuxInt != 8 { 32894 break 32895 } 32896 x0 := sh.Args[0] 32897 if x0.Op != OpAMD64MOVBloadidx1 { 32898 break 32899 } 32900 i0 := x0.AuxInt 32901 if x0.Aux != s { 32902 break 32903 } 32904 _ = x0.Args[2] 32905 if idx != x0.Args[0] { 32906 break 32907 } 32908 if p != x0.Args[1] { 32909 break 32910 } 32911 if mem != x0.Args[2] { 32912 break 32913 } 32914 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32915 break 32916 } 32917 b = mergePoint(b, x0, x1) 32918 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 32919 v.reset(OpCopy) 32920 v.AddArg(v0) 32921 v0.AuxInt = 8 32922 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32923 v1.AuxInt = i0 32924 v1.Aux = s 32925 v1.AddArg(p) 32926 v1.AddArg(idx) 32927 v1.AddArg(mem) 32928 v0.AddArg(v1) 32929 return true 32930 } 32931 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32932 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32933 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 32934 for { 32935 _ = v.Args[1] 32936 x1 := v.Args[0] 32937 if x1.Op != OpAMD64MOVBloadidx1 { 32938 break 32939 } 32940 i1 := x1.AuxInt 32941 s := x1.Aux 32942 _ = x1.Args[2] 32943 idx := x1.Args[0] 32944 p := x1.Args[1] 32945 mem := x1.Args[2] 32946 sh := v.Args[1] 32947 if sh.Op != OpAMD64SHLLconst { 32948 break 32949 } 32950 if sh.AuxInt != 8 { 32951 break 32952 } 32953 x0 := sh.Args[0] 32954 if x0.Op != OpAMD64MOVBloadidx1 { 32955 break 32956 } 32957 i0 := x0.AuxInt 32958 if x0.Aux != s { 32959 break 32960 } 32961 _ = x0.Args[2] 32962 if idx != x0.Args[0] { 32963 break 32964 } 32965 if p != x0.Args[1] { 32966 break 32967 } 32968 if mem != x0.Args[2] { 32969 break 32970 } 32971 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32972 break 32973 } 32974 b = mergePoint(b, x0, x1) 32975 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 32976 v.reset(OpCopy) 32977 v.AddArg(v0) 32978 v0.AuxInt = 8 32979 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32980 v1.AuxInt = i0 32981 v1.Aux = s 32982 v1.AddArg(p) 32983 v1.AddArg(idx) 32984 v1.AddArg(mem) 32985 v0.AddArg(v1) 32986 return true 32987 } 32988 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 32989 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32990 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 32991 for { 32992 _ = v.Args[1] 32993 sh := v.Args[0] 32994 if sh.Op != OpAMD64SHLLconst { 32995 break 32996 } 32997 if sh.AuxInt != 8 { 32998 break 32999 } 33000 x0 := sh.Args[0] 33001 if x0.Op != OpAMD64MOVBloadidx1 { 33002 break 33003 } 33004 i0 := x0.AuxInt 33005 s := x0.Aux 33006 _ = x0.Args[2] 33007 p := x0.Args[0] 33008 idx := x0.Args[1] 33009 mem := x0.Args[2] 33010 x1 := v.Args[1] 33011 if x1.Op != OpAMD64MOVBloadidx1 { 33012 break 33013 } 33014 i1 := x1.AuxInt 33015 if x1.Aux != s { 33016 break 33017 } 33018 _ = x1.Args[2] 33019 if p != x1.Args[0] { 33020 break 33021 } 33022 if idx != x1.Args[1] { 33023 break 33024 } 33025 if mem != x1.Args[2] { 33026 break 33027 } 33028 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33029 break 33030 } 33031 b = mergePoint(b, x0, x1) 33032 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 33033 v.reset(OpCopy) 33034 v.AddArg(v0) 33035 v0.AuxInt = 8 33036 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33037 v1.AuxInt = i0 33038 v1.Aux = s 33039 v1.AddArg(p) 33040 v1.AddArg(idx) 33041 v1.AddArg(mem) 33042 v0.AddArg(v1) 33043 return true 33044 } 33045 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 33046 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33047 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 33048 for { 33049 _ = v.Args[1] 33050 sh := v.Args[0] 33051 if sh.Op != OpAMD64SHLLconst { 33052 break 33053 } 33054 if sh.AuxInt != 8 { 33055 break 33056 } 33057 x0 := sh.Args[0] 33058 if x0.Op != OpAMD64MOVBloadidx1 { 33059 break 33060 } 33061 i0 := x0.AuxInt 33062 s := x0.Aux 33063 _ = x0.Args[2] 33064 idx := x0.Args[0] 33065 p := x0.Args[1] 33066 mem := x0.Args[2] 33067 x1 := v.Args[1] 33068 if x1.Op != OpAMD64MOVBloadidx1 { 33069 break 33070 } 33071 i1 := x1.AuxInt 33072 if x1.Aux != s { 33073 break 33074 } 33075 _ = x1.Args[2] 33076 if p != x1.Args[0] { 33077 break 33078 } 33079 if idx != x1.Args[1] { 33080 break 33081 } 33082 if mem != x1.Args[2] { 33083 break 33084 } 33085 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33086 break 33087 } 33088 b = mergePoint(b, x0, x1) 33089 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 33090 v.reset(OpCopy) 33091 v.AddArg(v0) 33092 v0.AuxInt = 8 33093 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33094 v1.AuxInt = i0 33095 v1.Aux = s 33096 v1.AddArg(p) 33097 v1.AddArg(idx) 33098 v1.AddArg(mem) 33099 v0.AddArg(v1) 33100 return true 33101 } 33102 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 33103 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33104 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 33105 for { 33106 _ = v.Args[1] 33107 sh := v.Args[0] 33108 if sh.Op != OpAMD64SHLLconst { 33109 break 33110 } 33111 if sh.AuxInt != 8 { 33112 break 33113 } 33114 x0 := sh.Args[0] 33115 if x0.Op != OpAMD64MOVBloadidx1 { 33116 break 33117 } 33118 i0 := x0.AuxInt 33119 s := x0.Aux 33120 _ = x0.Args[2] 33121 p := x0.Args[0] 33122 idx := x0.Args[1] 33123 mem := x0.Args[2] 33124 x1 := v.Args[1] 33125 if x1.Op != OpAMD64MOVBloadidx1 { 33126 break 33127 } 33128 i1 := x1.AuxInt 33129 if x1.Aux != s { 33130 break 33131 } 33132 _ = x1.Args[2] 33133 if idx != x1.Args[0] { 33134 break 33135 } 33136 if p != x1.Args[1] { 33137 break 33138 } 33139 if mem != x1.Args[2] { 33140 break 33141 } 33142 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33143 break 33144 } 33145 b = mergePoint(b, x0, x1) 33146 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 33147 v.reset(OpCopy) 33148 v.AddArg(v0) 33149 v0.AuxInt = 8 33150 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33151 v1.AuxInt = i0 33152 v1.Aux = s 33153 v1.AddArg(p) 33154 v1.AddArg(idx) 33155 v1.AddArg(mem) 33156 v0.AddArg(v1) 33157 return true 33158 } 33159 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 33160 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33161 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 33162 for { 33163 _ = v.Args[1] 33164 sh := v.Args[0] 33165 if sh.Op != OpAMD64SHLLconst { 33166 break 33167 } 33168 if sh.AuxInt != 8 { 33169 break 33170 } 33171 x0 := sh.Args[0] 33172 if x0.Op != OpAMD64MOVBloadidx1 { 33173 break 33174 } 33175 i0 := x0.AuxInt 33176 s := x0.Aux 33177 _ = x0.Args[2] 33178 idx := x0.Args[0] 33179 p := x0.Args[1] 33180 mem := x0.Args[2] 33181 x1 := v.Args[1] 33182 if x1.Op != OpAMD64MOVBloadidx1 { 33183 break 33184 } 33185 i1 := x1.AuxInt 33186 if x1.Aux != s { 33187 break 33188 } 33189 _ = x1.Args[2] 33190 if idx != x1.Args[0] { 33191 break 33192 } 33193 if p != x1.Args[1] { 33194 break 33195 } 33196 if mem != x1.Args[2] { 33197 break 33198 } 33199 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33200 break 33201 } 33202 b = mergePoint(b, x0, x1) 33203 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 33204 v.reset(OpCopy) 33205 v.AddArg(v0) 33206 v0.AuxInt = 8 33207 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33208 v1.AuxInt = i0 33209 v1.Aux = s 33210 v1.AddArg(p) 33211 v1.AddArg(idx) 33212 v1.AddArg(mem) 33213 v0.AddArg(v1) 33214 return true 33215 } 33216 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33217 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33218 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33219 for { 33220 _ = v.Args[1] 33221 r1 := v.Args[0] 33222 if r1.Op != OpAMD64ROLWconst { 33223 break 33224 } 33225 if r1.AuxInt != 8 { 33226 break 33227 } 33228 x1 := r1.Args[0] 33229 if x1.Op != OpAMD64MOVWloadidx1 { 33230 break 33231 } 33232 i1 := x1.AuxInt 33233 s := x1.Aux 33234 _ = x1.Args[2] 33235 p := x1.Args[0] 33236 idx := x1.Args[1] 33237 mem := x1.Args[2] 33238 sh := v.Args[1] 33239 if sh.Op != OpAMD64SHLLconst { 33240 break 33241 } 33242 if sh.AuxInt != 16 { 33243 break 33244 } 33245 r0 := sh.Args[0] 33246 if r0.Op != OpAMD64ROLWconst { 33247 break 33248 } 33249 if r0.AuxInt != 8 { 33250 break 33251 } 33252 x0 := r0.Args[0] 33253 if x0.Op != OpAMD64MOVWloadidx1 { 33254 break 33255 } 33256 i0 := x0.AuxInt 33257 if x0.Aux != s { 33258 break 33259 } 33260 _ = x0.Args[2] 33261 if p != x0.Args[0] { 33262 break 33263 } 33264 if idx != x0.Args[1] { 33265 break 33266 } 33267 if mem != x0.Args[2] { 33268 break 33269 } 33270 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33271 break 33272 } 33273 b = mergePoint(b, x0, x1) 33274 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33275 v.reset(OpCopy) 33276 v.AddArg(v0) 33277 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33278 v1.AuxInt = i0 33279 v1.Aux = s 33280 v1.AddArg(p) 33281 v1.AddArg(idx) 33282 v1.AddArg(mem) 33283 v0.AddArg(v1) 33284 return true 33285 } 33286 return false 33287 } 33288 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 33289 b := v.Block 33290 _ = b 33291 typ := &b.Func.Config.Types 33292 _ = typ 33293 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33294 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33295 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33296 for { 33297 _ = v.Args[1] 33298 r1 := v.Args[0] 33299 if r1.Op != OpAMD64ROLWconst { 33300 break 33301 } 33302 if r1.AuxInt != 8 { 33303 break 33304 } 33305 x1 := r1.Args[0] 33306 if x1.Op != OpAMD64MOVWloadidx1 { 33307 break 33308 } 33309 i1 := x1.AuxInt 33310 s := x1.Aux 33311 _ = x1.Args[2] 33312 idx := x1.Args[0] 33313 p := x1.Args[1] 33314 mem := x1.Args[2] 33315 sh := v.Args[1] 33316 if sh.Op != OpAMD64SHLLconst { 33317 break 33318 } 33319 if sh.AuxInt != 16 { 33320 break 33321 } 33322 r0 := sh.Args[0] 33323 if r0.Op != OpAMD64ROLWconst { 33324 break 33325 } 33326 if r0.AuxInt != 8 { 33327 break 33328 } 33329 x0 := r0.Args[0] 33330 if x0.Op != OpAMD64MOVWloadidx1 { 33331 break 33332 } 33333 i0 := x0.AuxInt 33334 if x0.Aux != s { 33335 break 33336 } 33337 _ = x0.Args[2] 33338 if p != x0.Args[0] { 33339 break 33340 } 33341 if idx != x0.Args[1] { 33342 break 33343 } 33344 if mem != x0.Args[2] { 33345 break 33346 } 33347 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33348 break 33349 } 33350 b = mergePoint(b, x0, x1) 33351 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33352 v.reset(OpCopy) 33353 v.AddArg(v0) 33354 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33355 v1.AuxInt = i0 33356 v1.Aux = s 33357 v1.AddArg(p) 33358 v1.AddArg(idx) 33359 v1.AddArg(mem) 33360 v0.AddArg(v1) 33361 return true 33362 } 33363 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33364 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33365 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33366 for { 33367 _ = v.Args[1] 33368 r1 := v.Args[0] 33369 if r1.Op != OpAMD64ROLWconst { 33370 break 33371 } 33372 if r1.AuxInt != 8 { 33373 break 33374 } 33375 x1 := r1.Args[0] 33376 if x1.Op != OpAMD64MOVWloadidx1 { 33377 break 33378 } 33379 i1 := x1.AuxInt 33380 s := x1.Aux 33381 _ = x1.Args[2] 33382 p := x1.Args[0] 33383 idx := x1.Args[1] 33384 mem := x1.Args[2] 33385 sh := v.Args[1] 33386 if sh.Op != OpAMD64SHLLconst { 33387 break 33388 } 33389 if sh.AuxInt != 16 { 33390 break 33391 } 33392 r0 := sh.Args[0] 33393 if r0.Op != OpAMD64ROLWconst { 33394 break 33395 } 33396 if r0.AuxInt != 8 { 33397 break 33398 } 33399 x0 := r0.Args[0] 33400 if x0.Op != OpAMD64MOVWloadidx1 { 33401 break 33402 } 33403 i0 := x0.AuxInt 33404 if x0.Aux != s { 33405 break 33406 } 33407 _ = x0.Args[2] 33408 if idx != x0.Args[0] { 33409 break 33410 } 33411 if p != x0.Args[1] { 33412 break 33413 } 33414 if mem != x0.Args[2] { 33415 break 33416 } 33417 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33418 break 33419 } 33420 b = mergePoint(b, x0, x1) 33421 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33422 v.reset(OpCopy) 33423 v.AddArg(v0) 33424 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33425 v1.AuxInt = i0 33426 v1.Aux = s 33427 v1.AddArg(p) 33428 v1.AddArg(idx) 33429 v1.AddArg(mem) 33430 v0.AddArg(v1) 33431 return true 33432 } 33433 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33434 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33435 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33436 for { 33437 _ = v.Args[1] 33438 r1 := v.Args[0] 33439 if r1.Op != OpAMD64ROLWconst { 33440 break 33441 } 33442 if r1.AuxInt != 8 { 33443 break 33444 } 33445 x1 := r1.Args[0] 33446 if x1.Op != OpAMD64MOVWloadidx1 { 33447 break 33448 } 33449 i1 := x1.AuxInt 33450 s := x1.Aux 33451 _ = x1.Args[2] 33452 idx := x1.Args[0] 33453 p := x1.Args[1] 33454 mem := x1.Args[2] 33455 sh := v.Args[1] 33456 if sh.Op != OpAMD64SHLLconst { 33457 break 33458 } 33459 if sh.AuxInt != 16 { 33460 break 33461 } 33462 r0 := sh.Args[0] 33463 if r0.Op != OpAMD64ROLWconst { 33464 break 33465 } 33466 if r0.AuxInt != 8 { 33467 break 33468 } 33469 x0 := r0.Args[0] 33470 if x0.Op != OpAMD64MOVWloadidx1 { 33471 break 33472 } 33473 i0 := x0.AuxInt 33474 if x0.Aux != s { 33475 break 33476 } 33477 _ = x0.Args[2] 33478 if idx != x0.Args[0] { 33479 break 33480 } 33481 if p != x0.Args[1] { 33482 break 33483 } 33484 if mem != x0.Args[2] { 33485 break 33486 } 33487 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33488 break 33489 } 33490 b = mergePoint(b, x0, x1) 33491 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33492 v.reset(OpCopy) 33493 v.AddArg(v0) 33494 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33495 v1.AuxInt = i0 33496 v1.Aux = s 33497 v1.AddArg(p) 33498 v1.AddArg(idx) 33499 v1.AddArg(mem) 33500 v0.AddArg(v1) 33501 return true 33502 } 33503 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 33504 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33505 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33506 for { 33507 _ = v.Args[1] 33508 sh := v.Args[0] 33509 if sh.Op != OpAMD64SHLLconst { 33510 break 33511 } 33512 if sh.AuxInt != 16 { 33513 break 33514 } 33515 r0 := sh.Args[0] 33516 if r0.Op != OpAMD64ROLWconst { 33517 break 33518 } 33519 if r0.AuxInt != 8 { 33520 break 33521 } 33522 x0 := r0.Args[0] 33523 if x0.Op != OpAMD64MOVWloadidx1 { 33524 break 33525 } 33526 i0 := x0.AuxInt 33527 s := x0.Aux 33528 _ = x0.Args[2] 33529 p := x0.Args[0] 33530 idx := x0.Args[1] 33531 mem := x0.Args[2] 33532 r1 := v.Args[1] 33533 if r1.Op != OpAMD64ROLWconst { 33534 break 33535 } 33536 if r1.AuxInt != 8 { 33537 break 33538 } 33539 x1 := r1.Args[0] 33540 if x1.Op != OpAMD64MOVWloadidx1 { 33541 break 33542 } 33543 i1 := x1.AuxInt 33544 if x1.Aux != s { 33545 break 33546 } 33547 _ = x1.Args[2] 33548 if p != x1.Args[0] { 33549 break 33550 } 33551 if idx != x1.Args[1] { 33552 break 33553 } 33554 if mem != x1.Args[2] { 33555 break 33556 } 33557 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33558 break 33559 } 33560 b = mergePoint(b, x0, x1) 33561 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33562 v.reset(OpCopy) 33563 v.AddArg(v0) 33564 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33565 v1.AuxInt = i0 33566 v1.Aux = s 33567 v1.AddArg(p) 33568 v1.AddArg(idx) 33569 v1.AddArg(mem) 33570 v0.AddArg(v1) 33571 return true 33572 } 33573 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 33574 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33575 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33576 for { 33577 _ = v.Args[1] 33578 sh := v.Args[0] 33579 if sh.Op != OpAMD64SHLLconst { 33580 break 33581 } 33582 if sh.AuxInt != 16 { 33583 break 33584 } 33585 r0 := sh.Args[0] 33586 if r0.Op != OpAMD64ROLWconst { 33587 break 33588 } 33589 if r0.AuxInt != 8 { 33590 break 33591 } 33592 x0 := r0.Args[0] 33593 if x0.Op != OpAMD64MOVWloadidx1 { 33594 break 33595 } 33596 i0 := x0.AuxInt 33597 s := x0.Aux 33598 _ = x0.Args[2] 33599 idx := x0.Args[0] 33600 p := x0.Args[1] 33601 mem := x0.Args[2] 33602 r1 := v.Args[1] 33603 if r1.Op != OpAMD64ROLWconst { 33604 break 33605 } 33606 if r1.AuxInt != 8 { 33607 break 33608 } 33609 x1 := r1.Args[0] 33610 if x1.Op != OpAMD64MOVWloadidx1 { 33611 break 33612 } 33613 i1 := x1.AuxInt 33614 if x1.Aux != s { 33615 break 33616 } 33617 _ = x1.Args[2] 33618 if p != x1.Args[0] { 33619 break 33620 } 33621 if idx != x1.Args[1] { 33622 break 33623 } 33624 if mem != x1.Args[2] { 33625 break 33626 } 33627 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33628 break 33629 } 33630 b = mergePoint(b, x0, x1) 33631 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33632 v.reset(OpCopy) 33633 v.AddArg(v0) 33634 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33635 v1.AuxInt = i0 33636 v1.Aux = s 33637 v1.AddArg(p) 33638 v1.AddArg(idx) 33639 v1.AddArg(mem) 33640 v0.AddArg(v1) 33641 return true 33642 } 33643 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 33644 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33645 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33646 for { 33647 _ = v.Args[1] 33648 sh := v.Args[0] 33649 if sh.Op != OpAMD64SHLLconst { 33650 break 33651 } 33652 if sh.AuxInt != 16 { 33653 break 33654 } 33655 r0 := sh.Args[0] 33656 if r0.Op != OpAMD64ROLWconst { 33657 break 33658 } 33659 if r0.AuxInt != 8 { 33660 break 33661 } 33662 x0 := r0.Args[0] 33663 if x0.Op != OpAMD64MOVWloadidx1 { 33664 break 33665 } 33666 i0 := x0.AuxInt 33667 s := x0.Aux 33668 _ = x0.Args[2] 33669 p := x0.Args[0] 33670 idx := x0.Args[1] 33671 mem := x0.Args[2] 33672 r1 := v.Args[1] 33673 if r1.Op != OpAMD64ROLWconst { 33674 break 33675 } 33676 if r1.AuxInt != 8 { 33677 break 33678 } 33679 x1 := r1.Args[0] 33680 if x1.Op != OpAMD64MOVWloadidx1 { 33681 break 33682 } 33683 i1 := x1.AuxInt 33684 if x1.Aux != s { 33685 break 33686 } 33687 _ = x1.Args[2] 33688 if idx != x1.Args[0] { 33689 break 33690 } 33691 if p != x1.Args[1] { 33692 break 33693 } 33694 if mem != x1.Args[2] { 33695 break 33696 } 33697 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33698 break 33699 } 33700 b = mergePoint(b, x0, x1) 33701 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33702 v.reset(OpCopy) 33703 v.AddArg(v0) 33704 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33705 v1.AuxInt = i0 33706 v1.Aux = s 33707 v1.AddArg(p) 33708 v1.AddArg(idx) 33709 v1.AddArg(mem) 33710 v0.AddArg(v1) 33711 return true 33712 } 33713 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 33714 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 33715 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 33716 for { 33717 _ = v.Args[1] 33718 sh := v.Args[0] 33719 if sh.Op != OpAMD64SHLLconst { 33720 break 33721 } 33722 if sh.AuxInt != 16 { 33723 break 33724 } 33725 r0 := sh.Args[0] 33726 if r0.Op != OpAMD64ROLWconst { 33727 break 33728 } 33729 if r0.AuxInt != 8 { 33730 break 33731 } 33732 x0 := r0.Args[0] 33733 if x0.Op != OpAMD64MOVWloadidx1 { 33734 break 33735 } 33736 i0 := x0.AuxInt 33737 s := x0.Aux 33738 _ = x0.Args[2] 33739 idx := x0.Args[0] 33740 p := x0.Args[1] 33741 mem := x0.Args[2] 33742 r1 := v.Args[1] 33743 if r1.Op != OpAMD64ROLWconst { 33744 break 33745 } 33746 if r1.AuxInt != 8 { 33747 break 33748 } 33749 x1 := r1.Args[0] 33750 if x1.Op != OpAMD64MOVWloadidx1 { 33751 break 33752 } 33753 i1 := x1.AuxInt 33754 if x1.Aux != s { 33755 break 33756 } 33757 _ = x1.Args[2] 33758 if idx != x1.Args[0] { 33759 break 33760 } 33761 if p != x1.Args[1] { 33762 break 33763 } 33764 if mem != x1.Args[2] { 33765 break 33766 } 33767 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 33768 break 33769 } 33770 b = mergePoint(b, x0, x1) 33771 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 33772 v.reset(OpCopy) 33773 v.AddArg(v0) 33774 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33775 v1.AuxInt = i0 33776 v1.Aux = s 33777 v1.AddArg(p) 33778 v1.AddArg(idx) 33779 v1.AddArg(mem) 33780 v0.AddArg(v1) 33781 return true 33782 } 33783 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 33784 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33785 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 33786 for { 33787 _ = v.Args[1] 33788 s0 := v.Args[0] 33789 if s0.Op != OpAMD64SHLLconst { 33790 break 33791 } 33792 j0 := s0.AuxInt 33793 x0 := s0.Args[0] 33794 if x0.Op != OpAMD64MOVBloadidx1 { 33795 break 33796 } 33797 i0 := x0.AuxInt 33798 s := x0.Aux 33799 _ = x0.Args[2] 33800 p := x0.Args[0] 33801 idx := x0.Args[1] 33802 mem := x0.Args[2] 33803 or := v.Args[1] 33804 if or.Op != OpAMD64ORL { 33805 break 33806 } 33807 _ = or.Args[1] 33808 s1 := or.Args[0] 33809 if s1.Op != OpAMD64SHLLconst { 33810 break 33811 } 33812 j1 := s1.AuxInt 33813 x1 := s1.Args[0] 33814 if x1.Op != OpAMD64MOVBloadidx1 { 33815 break 33816 } 33817 i1 := x1.AuxInt 33818 if x1.Aux != s { 33819 break 33820 } 33821 _ = x1.Args[2] 33822 if p != x1.Args[0] { 33823 break 33824 } 33825 if idx != x1.Args[1] { 33826 break 33827 } 33828 if mem != x1.Args[2] { 33829 break 33830 } 33831 y := or.Args[1] 33832 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33833 break 33834 } 33835 b = mergePoint(b, x0, x1) 33836 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 33837 v.reset(OpCopy) 33838 v.AddArg(v0) 33839 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 33840 v1.AuxInt = j1 33841 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 33842 v2.AuxInt = 8 33843 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33844 v3.AuxInt = i0 33845 v3.Aux = s 33846 v3.AddArg(p) 33847 v3.AddArg(idx) 33848 v3.AddArg(mem) 33849 v2.AddArg(v3) 33850 v1.AddArg(v2) 33851 v0.AddArg(v1) 33852 v0.AddArg(y) 33853 return true 33854 } 33855 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 33856 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33857 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 33858 for { 33859 _ = v.Args[1] 33860 s0 := v.Args[0] 33861 if s0.Op != OpAMD64SHLLconst { 33862 break 33863 } 33864 j0 := s0.AuxInt 33865 x0 := s0.Args[0] 33866 if x0.Op != OpAMD64MOVBloadidx1 { 33867 break 33868 } 33869 i0 := x0.AuxInt 33870 s := x0.Aux 33871 _ = x0.Args[2] 33872 idx := x0.Args[0] 33873 p := x0.Args[1] 33874 mem := x0.Args[2] 33875 or := v.Args[1] 33876 if or.Op != OpAMD64ORL { 33877 break 33878 } 33879 _ = or.Args[1] 33880 s1 := or.Args[0] 33881 if s1.Op != OpAMD64SHLLconst { 33882 break 33883 } 33884 j1 := s1.AuxInt 33885 x1 := s1.Args[0] 33886 if x1.Op != OpAMD64MOVBloadidx1 { 33887 break 33888 } 33889 i1 := x1.AuxInt 33890 if x1.Aux != s { 33891 break 33892 } 33893 _ = x1.Args[2] 33894 if p != x1.Args[0] { 33895 break 33896 } 33897 if idx != x1.Args[1] { 33898 break 33899 } 33900 if mem != x1.Args[2] { 33901 break 33902 } 33903 y := or.Args[1] 33904 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33905 break 33906 } 33907 b = mergePoint(b, x0, x1) 33908 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 33909 v.reset(OpCopy) 33910 v.AddArg(v0) 33911 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 33912 v1.AuxInt = j1 33913 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 33914 v2.AuxInt = 8 33915 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33916 v3.AuxInt = i0 33917 v3.Aux = s 33918 v3.AddArg(p) 33919 v3.AddArg(idx) 33920 v3.AddArg(mem) 33921 v2.AddArg(v3) 33922 v1.AddArg(v2) 33923 v0.AddArg(v1) 33924 v0.AddArg(y) 33925 return true 33926 } 33927 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 33928 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33929 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 33930 for { 33931 _ = v.Args[1] 33932 s0 := v.Args[0] 33933 if s0.Op != OpAMD64SHLLconst { 33934 break 33935 } 33936 j0 := s0.AuxInt 33937 x0 := s0.Args[0] 33938 if x0.Op != OpAMD64MOVBloadidx1 { 33939 break 33940 } 33941 i0 := x0.AuxInt 33942 s := x0.Aux 33943 _ = x0.Args[2] 33944 p := x0.Args[0] 33945 idx := x0.Args[1] 33946 mem := x0.Args[2] 33947 or := v.Args[1] 33948 if or.Op != OpAMD64ORL { 33949 break 33950 } 33951 _ = or.Args[1] 33952 s1 := or.Args[0] 33953 if s1.Op != OpAMD64SHLLconst { 33954 break 33955 } 33956 j1 := s1.AuxInt 33957 x1 := s1.Args[0] 33958 if x1.Op != OpAMD64MOVBloadidx1 { 33959 break 33960 } 33961 i1 := x1.AuxInt 33962 if x1.Aux != s { 33963 break 33964 } 33965 _ = x1.Args[2] 33966 if idx != x1.Args[0] { 33967 break 33968 } 33969 if p != x1.Args[1] { 33970 break 33971 } 33972 if mem != x1.Args[2] { 33973 break 33974 } 33975 y := or.Args[1] 33976 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33977 break 33978 } 33979 b = mergePoint(b, x0, x1) 33980 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 33981 v.reset(OpCopy) 33982 v.AddArg(v0) 33983 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 33984 v1.AuxInt = j1 33985 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 33986 v2.AuxInt = 8 33987 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33988 v3.AuxInt = i0 33989 v3.Aux = s 33990 v3.AddArg(p) 33991 v3.AddArg(idx) 33992 v3.AddArg(mem) 33993 v2.AddArg(v3) 33994 v1.AddArg(v2) 33995 v0.AddArg(v1) 33996 v0.AddArg(y) 33997 return true 33998 } 33999 return false 34000 } 34001 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 34002 b := v.Block 34003 _ = b 34004 typ := &b.Func.Config.Types 34005 _ = typ 34006 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 34007 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34008 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34009 for { 34010 _ = v.Args[1] 34011 s0 := v.Args[0] 34012 if s0.Op != OpAMD64SHLLconst { 34013 break 34014 } 34015 j0 := s0.AuxInt 34016 x0 := s0.Args[0] 34017 if x0.Op != OpAMD64MOVBloadidx1 { 34018 break 34019 } 34020 i0 := x0.AuxInt 34021 s := x0.Aux 34022 _ = x0.Args[2] 34023 idx := x0.Args[0] 34024 p := x0.Args[1] 34025 mem := x0.Args[2] 34026 or := v.Args[1] 34027 if or.Op != OpAMD64ORL { 34028 break 34029 } 34030 _ = or.Args[1] 34031 s1 := or.Args[0] 34032 if s1.Op != OpAMD64SHLLconst { 34033 break 34034 } 34035 j1 := s1.AuxInt 34036 x1 := s1.Args[0] 34037 if x1.Op != OpAMD64MOVBloadidx1 { 34038 break 34039 } 34040 i1 := x1.AuxInt 34041 if x1.Aux != s { 34042 break 34043 } 34044 _ = x1.Args[2] 34045 if idx != x1.Args[0] { 34046 break 34047 } 34048 if p != x1.Args[1] { 34049 break 34050 } 34051 if mem != x1.Args[2] { 34052 break 34053 } 34054 y := or.Args[1] 34055 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34056 break 34057 } 34058 b = mergePoint(b, x0, x1) 34059 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34060 v.reset(OpCopy) 34061 v.AddArg(v0) 34062 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34063 v1.AuxInt = j1 34064 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34065 v2.AuxInt = 8 34066 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34067 v3.AuxInt = i0 34068 v3.Aux = s 34069 v3.AddArg(p) 34070 v3.AddArg(idx) 34071 v3.AddArg(mem) 34072 v2.AddArg(v3) 34073 v1.AddArg(v2) 34074 v0.AddArg(v1) 34075 v0.AddArg(y) 34076 return true 34077 } 34078 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 34079 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34080 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34081 for { 34082 _ = v.Args[1] 34083 s0 := v.Args[0] 34084 if s0.Op != OpAMD64SHLLconst { 34085 break 34086 } 34087 j0 := s0.AuxInt 34088 x0 := s0.Args[0] 34089 if x0.Op != OpAMD64MOVBloadidx1 { 34090 break 34091 } 34092 i0 := x0.AuxInt 34093 s := x0.Aux 34094 _ = x0.Args[2] 34095 p := x0.Args[0] 34096 idx := x0.Args[1] 34097 mem := x0.Args[2] 34098 or := v.Args[1] 34099 if or.Op != OpAMD64ORL { 34100 break 34101 } 34102 _ = or.Args[1] 34103 y := or.Args[0] 34104 s1 := or.Args[1] 34105 if s1.Op != OpAMD64SHLLconst { 34106 break 34107 } 34108 j1 := s1.AuxInt 34109 x1 := s1.Args[0] 34110 if x1.Op != OpAMD64MOVBloadidx1 { 34111 break 34112 } 34113 i1 := x1.AuxInt 34114 if x1.Aux != s { 34115 break 34116 } 34117 _ = x1.Args[2] 34118 if p != x1.Args[0] { 34119 break 34120 } 34121 if idx != x1.Args[1] { 34122 break 34123 } 34124 if mem != x1.Args[2] { 34125 break 34126 } 34127 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34128 break 34129 } 34130 b = mergePoint(b, x0, x1) 34131 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34132 v.reset(OpCopy) 34133 v.AddArg(v0) 34134 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34135 v1.AuxInt = j1 34136 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34137 v2.AuxInt = 8 34138 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34139 v3.AuxInt = i0 34140 v3.Aux = s 34141 v3.AddArg(p) 34142 v3.AddArg(idx) 34143 v3.AddArg(mem) 34144 v2.AddArg(v3) 34145 v1.AddArg(v2) 34146 v0.AddArg(v1) 34147 v0.AddArg(y) 34148 return true 34149 } 34150 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 34151 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34152 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34153 for { 34154 _ = v.Args[1] 34155 s0 := v.Args[0] 34156 if s0.Op != OpAMD64SHLLconst { 34157 break 34158 } 34159 j0 := s0.AuxInt 34160 x0 := s0.Args[0] 34161 if x0.Op != OpAMD64MOVBloadidx1 { 34162 break 34163 } 34164 i0 := x0.AuxInt 34165 s := x0.Aux 34166 _ = x0.Args[2] 34167 idx := x0.Args[0] 34168 p := x0.Args[1] 34169 mem := x0.Args[2] 34170 or := v.Args[1] 34171 if or.Op != OpAMD64ORL { 34172 break 34173 } 34174 _ = or.Args[1] 34175 y := or.Args[0] 34176 s1 := or.Args[1] 34177 if s1.Op != OpAMD64SHLLconst { 34178 break 34179 } 34180 j1 := s1.AuxInt 34181 x1 := s1.Args[0] 34182 if x1.Op != OpAMD64MOVBloadidx1 { 34183 break 34184 } 34185 i1 := x1.AuxInt 34186 if x1.Aux != s { 34187 break 34188 } 34189 _ = x1.Args[2] 34190 if p != x1.Args[0] { 34191 break 34192 } 34193 if idx != x1.Args[1] { 34194 break 34195 } 34196 if mem != x1.Args[2] { 34197 break 34198 } 34199 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34200 break 34201 } 34202 b = mergePoint(b, x0, x1) 34203 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34204 v.reset(OpCopy) 34205 v.AddArg(v0) 34206 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34207 v1.AuxInt = j1 34208 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34209 v2.AuxInt = 8 34210 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34211 v3.AuxInt = i0 34212 v3.Aux = s 34213 v3.AddArg(p) 34214 v3.AddArg(idx) 34215 v3.AddArg(mem) 34216 v2.AddArg(v3) 34217 v1.AddArg(v2) 34218 v0.AddArg(v1) 34219 v0.AddArg(y) 34220 return true 34221 } 34222 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 34223 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34224 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34225 for { 34226 _ = v.Args[1] 34227 s0 := v.Args[0] 34228 if s0.Op != OpAMD64SHLLconst { 34229 break 34230 } 34231 j0 := s0.AuxInt 34232 x0 := s0.Args[0] 34233 if x0.Op != OpAMD64MOVBloadidx1 { 34234 break 34235 } 34236 i0 := x0.AuxInt 34237 s := x0.Aux 34238 _ = x0.Args[2] 34239 p := x0.Args[0] 34240 idx := x0.Args[1] 34241 mem := x0.Args[2] 34242 or := v.Args[1] 34243 if or.Op != OpAMD64ORL { 34244 break 34245 } 34246 _ = or.Args[1] 34247 y := or.Args[0] 34248 s1 := or.Args[1] 34249 if s1.Op != OpAMD64SHLLconst { 34250 break 34251 } 34252 j1 := s1.AuxInt 34253 x1 := s1.Args[0] 34254 if x1.Op != OpAMD64MOVBloadidx1 { 34255 break 34256 } 34257 i1 := x1.AuxInt 34258 if x1.Aux != s { 34259 break 34260 } 34261 _ = x1.Args[2] 34262 if idx != x1.Args[0] { 34263 break 34264 } 34265 if p != x1.Args[1] { 34266 break 34267 } 34268 if mem != x1.Args[2] { 34269 break 34270 } 34271 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34272 break 34273 } 34274 b = mergePoint(b, x0, x1) 34275 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34276 v.reset(OpCopy) 34277 v.AddArg(v0) 34278 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34279 v1.AuxInt = j1 34280 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34281 v2.AuxInt = 8 34282 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34283 v3.AuxInt = i0 34284 v3.Aux = s 34285 v3.AddArg(p) 34286 v3.AddArg(idx) 34287 v3.AddArg(mem) 34288 v2.AddArg(v3) 34289 v1.AddArg(v2) 34290 v0.AddArg(v1) 34291 v0.AddArg(y) 34292 return true 34293 } 34294 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 34295 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34296 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34297 for { 34298 _ = v.Args[1] 34299 s0 := v.Args[0] 34300 if s0.Op != OpAMD64SHLLconst { 34301 break 34302 } 34303 j0 := s0.AuxInt 34304 x0 := s0.Args[0] 34305 if x0.Op != OpAMD64MOVBloadidx1 { 34306 break 34307 } 34308 i0 := x0.AuxInt 34309 s := x0.Aux 34310 _ = x0.Args[2] 34311 idx := x0.Args[0] 34312 p := x0.Args[1] 34313 mem := x0.Args[2] 34314 or := v.Args[1] 34315 if or.Op != OpAMD64ORL { 34316 break 34317 } 34318 _ = or.Args[1] 34319 y := or.Args[0] 34320 s1 := or.Args[1] 34321 if s1.Op != OpAMD64SHLLconst { 34322 break 34323 } 34324 j1 := s1.AuxInt 34325 x1 := s1.Args[0] 34326 if x1.Op != OpAMD64MOVBloadidx1 { 34327 break 34328 } 34329 i1 := x1.AuxInt 34330 if x1.Aux != s { 34331 break 34332 } 34333 _ = x1.Args[2] 34334 if idx != x1.Args[0] { 34335 break 34336 } 34337 if p != x1.Args[1] { 34338 break 34339 } 34340 if mem != x1.Args[2] { 34341 break 34342 } 34343 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34344 break 34345 } 34346 b = mergePoint(b, x0, x1) 34347 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34348 v.reset(OpCopy) 34349 v.AddArg(v0) 34350 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34351 v1.AuxInt = j1 34352 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34353 v2.AuxInt = 8 34354 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34355 v3.AuxInt = i0 34356 v3.Aux = s 34357 v3.AddArg(p) 34358 v3.AddArg(idx) 34359 v3.AddArg(mem) 34360 v2.AddArg(v3) 34361 v1.AddArg(v2) 34362 v0.AddArg(v1) 34363 v0.AddArg(y) 34364 return true 34365 } 34366 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 34367 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34368 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34369 for { 34370 _ = v.Args[1] 34371 or := v.Args[0] 34372 if or.Op != OpAMD64ORL { 34373 break 34374 } 34375 _ = or.Args[1] 34376 s1 := or.Args[0] 34377 if s1.Op != OpAMD64SHLLconst { 34378 break 34379 } 34380 j1 := s1.AuxInt 34381 x1 := s1.Args[0] 34382 if x1.Op != OpAMD64MOVBloadidx1 { 34383 break 34384 } 34385 i1 := x1.AuxInt 34386 s := x1.Aux 34387 _ = x1.Args[2] 34388 p := x1.Args[0] 34389 idx := x1.Args[1] 34390 mem := x1.Args[2] 34391 y := or.Args[1] 34392 s0 := v.Args[1] 34393 if s0.Op != OpAMD64SHLLconst { 34394 break 34395 } 34396 j0 := s0.AuxInt 34397 x0 := s0.Args[0] 34398 if x0.Op != OpAMD64MOVBloadidx1 { 34399 break 34400 } 34401 i0 := x0.AuxInt 34402 if x0.Aux != s { 34403 break 34404 } 34405 _ = x0.Args[2] 34406 if p != x0.Args[0] { 34407 break 34408 } 34409 if idx != x0.Args[1] { 34410 break 34411 } 34412 if mem != x0.Args[2] { 34413 break 34414 } 34415 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34416 break 34417 } 34418 b = mergePoint(b, x0, x1) 34419 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34420 v.reset(OpCopy) 34421 v.AddArg(v0) 34422 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34423 v1.AuxInt = j1 34424 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34425 v2.AuxInt = 8 34426 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34427 v3.AuxInt = i0 34428 v3.Aux = s 34429 v3.AddArg(p) 34430 v3.AddArg(idx) 34431 v3.AddArg(mem) 34432 v2.AddArg(v3) 34433 v1.AddArg(v2) 34434 v0.AddArg(v1) 34435 v0.AddArg(y) 34436 return true 34437 } 34438 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 34439 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34440 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34441 for { 34442 _ = v.Args[1] 34443 or := v.Args[0] 34444 if or.Op != OpAMD64ORL { 34445 break 34446 } 34447 _ = or.Args[1] 34448 s1 := or.Args[0] 34449 if s1.Op != OpAMD64SHLLconst { 34450 break 34451 } 34452 j1 := s1.AuxInt 34453 x1 := s1.Args[0] 34454 if x1.Op != OpAMD64MOVBloadidx1 { 34455 break 34456 } 34457 i1 := x1.AuxInt 34458 s := x1.Aux 34459 _ = x1.Args[2] 34460 idx := x1.Args[0] 34461 p := x1.Args[1] 34462 mem := x1.Args[2] 34463 y := or.Args[1] 34464 s0 := v.Args[1] 34465 if s0.Op != OpAMD64SHLLconst { 34466 break 34467 } 34468 j0 := s0.AuxInt 34469 x0 := s0.Args[0] 34470 if x0.Op != OpAMD64MOVBloadidx1 { 34471 break 34472 } 34473 i0 := x0.AuxInt 34474 if x0.Aux != s { 34475 break 34476 } 34477 _ = x0.Args[2] 34478 if p != x0.Args[0] { 34479 break 34480 } 34481 if idx != x0.Args[1] { 34482 break 34483 } 34484 if mem != x0.Args[2] { 34485 break 34486 } 34487 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34488 break 34489 } 34490 b = mergePoint(b, x0, x1) 34491 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34492 v.reset(OpCopy) 34493 v.AddArg(v0) 34494 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34495 v1.AuxInt = j1 34496 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34497 v2.AuxInt = 8 34498 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34499 v3.AuxInt = i0 34500 v3.Aux = s 34501 v3.AddArg(p) 34502 v3.AddArg(idx) 34503 v3.AddArg(mem) 34504 v2.AddArg(v3) 34505 v1.AddArg(v2) 34506 v0.AddArg(v1) 34507 v0.AddArg(y) 34508 return true 34509 } 34510 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 34511 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34512 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34513 for { 34514 _ = v.Args[1] 34515 or := v.Args[0] 34516 if or.Op != OpAMD64ORL { 34517 break 34518 } 34519 _ = or.Args[1] 34520 y := or.Args[0] 34521 s1 := or.Args[1] 34522 if s1.Op != OpAMD64SHLLconst { 34523 break 34524 } 34525 j1 := s1.AuxInt 34526 x1 := s1.Args[0] 34527 if x1.Op != OpAMD64MOVBloadidx1 { 34528 break 34529 } 34530 i1 := x1.AuxInt 34531 s := x1.Aux 34532 _ = x1.Args[2] 34533 p := x1.Args[0] 34534 idx := x1.Args[1] 34535 mem := x1.Args[2] 34536 s0 := v.Args[1] 34537 if s0.Op != OpAMD64SHLLconst { 34538 break 34539 } 34540 j0 := s0.AuxInt 34541 x0 := s0.Args[0] 34542 if x0.Op != OpAMD64MOVBloadidx1 { 34543 break 34544 } 34545 i0 := x0.AuxInt 34546 if x0.Aux != s { 34547 break 34548 } 34549 _ = x0.Args[2] 34550 if p != x0.Args[0] { 34551 break 34552 } 34553 if idx != x0.Args[1] { 34554 break 34555 } 34556 if mem != x0.Args[2] { 34557 break 34558 } 34559 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34560 break 34561 } 34562 b = mergePoint(b, x0, x1) 34563 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34564 v.reset(OpCopy) 34565 v.AddArg(v0) 34566 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34567 v1.AuxInt = j1 34568 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34569 v2.AuxInt = 8 34570 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34571 v3.AuxInt = i0 34572 v3.Aux = s 34573 v3.AddArg(p) 34574 v3.AddArg(idx) 34575 v3.AddArg(mem) 34576 v2.AddArg(v3) 34577 v1.AddArg(v2) 34578 v0.AddArg(v1) 34579 v0.AddArg(y) 34580 return true 34581 } 34582 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 34583 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34584 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34585 for { 34586 _ = v.Args[1] 34587 or := v.Args[0] 34588 if or.Op != OpAMD64ORL { 34589 break 34590 } 34591 _ = or.Args[1] 34592 y := or.Args[0] 34593 s1 := or.Args[1] 34594 if s1.Op != OpAMD64SHLLconst { 34595 break 34596 } 34597 j1 := s1.AuxInt 34598 x1 := s1.Args[0] 34599 if x1.Op != OpAMD64MOVBloadidx1 { 34600 break 34601 } 34602 i1 := x1.AuxInt 34603 s := x1.Aux 34604 _ = x1.Args[2] 34605 idx := x1.Args[0] 34606 p := x1.Args[1] 34607 mem := x1.Args[2] 34608 s0 := v.Args[1] 34609 if s0.Op != OpAMD64SHLLconst { 34610 break 34611 } 34612 j0 := s0.AuxInt 34613 x0 := s0.Args[0] 34614 if x0.Op != OpAMD64MOVBloadidx1 { 34615 break 34616 } 34617 i0 := x0.AuxInt 34618 if x0.Aux != s { 34619 break 34620 } 34621 _ = x0.Args[2] 34622 if p != x0.Args[0] { 34623 break 34624 } 34625 if idx != x0.Args[1] { 34626 break 34627 } 34628 if mem != x0.Args[2] { 34629 break 34630 } 34631 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34632 break 34633 } 34634 b = mergePoint(b, x0, x1) 34635 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34636 v.reset(OpCopy) 34637 v.AddArg(v0) 34638 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34639 v1.AuxInt = j1 34640 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34641 v2.AuxInt = 8 34642 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34643 v3.AuxInt = i0 34644 v3.Aux = s 34645 v3.AddArg(p) 34646 v3.AddArg(idx) 34647 v3.AddArg(mem) 34648 v2.AddArg(v3) 34649 v1.AddArg(v2) 34650 v0.AddArg(v1) 34651 v0.AddArg(y) 34652 return true 34653 } 34654 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 34655 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34656 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34657 for { 34658 _ = v.Args[1] 34659 or := v.Args[0] 34660 if or.Op != OpAMD64ORL { 34661 break 34662 } 34663 _ = or.Args[1] 34664 s1 := or.Args[0] 34665 if s1.Op != OpAMD64SHLLconst { 34666 break 34667 } 34668 j1 := s1.AuxInt 34669 x1 := s1.Args[0] 34670 if x1.Op != OpAMD64MOVBloadidx1 { 34671 break 34672 } 34673 i1 := x1.AuxInt 34674 s := x1.Aux 34675 _ = x1.Args[2] 34676 p := x1.Args[0] 34677 idx := x1.Args[1] 34678 mem := x1.Args[2] 34679 y := or.Args[1] 34680 s0 := v.Args[1] 34681 if s0.Op != OpAMD64SHLLconst { 34682 break 34683 } 34684 j0 := s0.AuxInt 34685 x0 := s0.Args[0] 34686 if x0.Op != OpAMD64MOVBloadidx1 { 34687 break 34688 } 34689 i0 := x0.AuxInt 34690 if x0.Aux != s { 34691 break 34692 } 34693 _ = x0.Args[2] 34694 if idx != x0.Args[0] { 34695 break 34696 } 34697 if p != x0.Args[1] { 34698 break 34699 } 34700 if mem != x0.Args[2] { 34701 break 34702 } 34703 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34704 break 34705 } 34706 b = mergePoint(b, x0, x1) 34707 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34708 v.reset(OpCopy) 34709 v.AddArg(v0) 34710 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34711 v1.AuxInt = j1 34712 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34713 v2.AuxInt = 8 34714 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34715 v3.AuxInt = i0 34716 v3.Aux = s 34717 v3.AddArg(p) 34718 v3.AddArg(idx) 34719 v3.AddArg(mem) 34720 v2.AddArg(v3) 34721 v1.AddArg(v2) 34722 v0.AddArg(v1) 34723 v0.AddArg(y) 34724 return true 34725 } 34726 return false 34727 } 34728 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 34729 b := v.Block 34730 _ = b 34731 typ := &b.Func.Config.Types 34732 _ = typ 34733 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 34734 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34735 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34736 for { 34737 _ = v.Args[1] 34738 or := v.Args[0] 34739 if or.Op != OpAMD64ORL { 34740 break 34741 } 34742 _ = or.Args[1] 34743 s1 := or.Args[0] 34744 if s1.Op != OpAMD64SHLLconst { 34745 break 34746 } 34747 j1 := s1.AuxInt 34748 x1 := s1.Args[0] 34749 if x1.Op != OpAMD64MOVBloadidx1 { 34750 break 34751 } 34752 i1 := x1.AuxInt 34753 s := x1.Aux 34754 _ = x1.Args[2] 34755 idx := x1.Args[0] 34756 p := x1.Args[1] 34757 mem := x1.Args[2] 34758 y := or.Args[1] 34759 s0 := v.Args[1] 34760 if s0.Op != OpAMD64SHLLconst { 34761 break 34762 } 34763 j0 := s0.AuxInt 34764 x0 := s0.Args[0] 34765 if x0.Op != OpAMD64MOVBloadidx1 { 34766 break 34767 } 34768 i0 := x0.AuxInt 34769 if x0.Aux != s { 34770 break 34771 } 34772 _ = x0.Args[2] 34773 if idx != x0.Args[0] { 34774 break 34775 } 34776 if p != x0.Args[1] { 34777 break 34778 } 34779 if mem != x0.Args[2] { 34780 break 34781 } 34782 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34783 break 34784 } 34785 b = mergePoint(b, x0, x1) 34786 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34787 v.reset(OpCopy) 34788 v.AddArg(v0) 34789 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34790 v1.AuxInt = j1 34791 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34792 v2.AuxInt = 8 34793 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34794 v3.AuxInt = i0 34795 v3.Aux = s 34796 v3.AddArg(p) 34797 v3.AddArg(idx) 34798 v3.AddArg(mem) 34799 v2.AddArg(v3) 34800 v1.AddArg(v2) 34801 v0.AddArg(v1) 34802 v0.AddArg(y) 34803 return true 34804 } 34805 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 34806 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34807 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34808 for { 34809 _ = v.Args[1] 34810 or := v.Args[0] 34811 if or.Op != OpAMD64ORL { 34812 break 34813 } 34814 _ = or.Args[1] 34815 y := or.Args[0] 34816 s1 := or.Args[1] 34817 if s1.Op != OpAMD64SHLLconst { 34818 break 34819 } 34820 j1 := s1.AuxInt 34821 x1 := s1.Args[0] 34822 if x1.Op != OpAMD64MOVBloadidx1 { 34823 break 34824 } 34825 i1 := x1.AuxInt 34826 s := x1.Aux 34827 _ = x1.Args[2] 34828 p := x1.Args[0] 34829 idx := x1.Args[1] 34830 mem := x1.Args[2] 34831 s0 := v.Args[1] 34832 if s0.Op != OpAMD64SHLLconst { 34833 break 34834 } 34835 j0 := s0.AuxInt 34836 x0 := s0.Args[0] 34837 if x0.Op != OpAMD64MOVBloadidx1 { 34838 break 34839 } 34840 i0 := x0.AuxInt 34841 if x0.Aux != s { 34842 break 34843 } 34844 _ = x0.Args[2] 34845 if idx != x0.Args[0] { 34846 break 34847 } 34848 if p != x0.Args[1] { 34849 break 34850 } 34851 if mem != x0.Args[2] { 34852 break 34853 } 34854 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34855 break 34856 } 34857 b = mergePoint(b, x0, x1) 34858 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34859 v.reset(OpCopy) 34860 v.AddArg(v0) 34861 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34862 v1.AuxInt = j1 34863 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34864 v2.AuxInt = 8 34865 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34866 v3.AuxInt = i0 34867 v3.Aux = s 34868 v3.AddArg(p) 34869 v3.AddArg(idx) 34870 v3.AddArg(mem) 34871 v2.AddArg(v3) 34872 v1.AddArg(v2) 34873 v0.AddArg(v1) 34874 v0.AddArg(y) 34875 return true 34876 } 34877 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 34878 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34879 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 34880 for { 34881 _ = v.Args[1] 34882 or := v.Args[0] 34883 if or.Op != OpAMD64ORL { 34884 break 34885 } 34886 _ = or.Args[1] 34887 y := or.Args[0] 34888 s1 := or.Args[1] 34889 if s1.Op != OpAMD64SHLLconst { 34890 break 34891 } 34892 j1 := s1.AuxInt 34893 x1 := s1.Args[0] 34894 if x1.Op != OpAMD64MOVBloadidx1 { 34895 break 34896 } 34897 i1 := x1.AuxInt 34898 s := x1.Aux 34899 _ = x1.Args[2] 34900 idx := x1.Args[0] 34901 p := x1.Args[1] 34902 mem := x1.Args[2] 34903 s0 := v.Args[1] 34904 if s0.Op != OpAMD64SHLLconst { 34905 break 34906 } 34907 j0 := s0.AuxInt 34908 x0 := s0.Args[0] 34909 if x0.Op != OpAMD64MOVBloadidx1 { 34910 break 34911 } 34912 i0 := x0.AuxInt 34913 if x0.Aux != s { 34914 break 34915 } 34916 _ = x0.Args[2] 34917 if idx != x0.Args[0] { 34918 break 34919 } 34920 if p != x0.Args[1] { 34921 break 34922 } 34923 if mem != x0.Args[2] { 34924 break 34925 } 34926 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34927 break 34928 } 34929 b = mergePoint(b, x0, x1) 34930 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 34931 v.reset(OpCopy) 34932 v.AddArg(v0) 34933 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 34934 v1.AuxInt = j1 34935 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 34936 v2.AuxInt = 8 34937 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34938 v3.AuxInt = i0 34939 v3.Aux = s 34940 v3.AddArg(p) 34941 v3.AddArg(idx) 34942 v3.AddArg(mem) 34943 v2.AddArg(v3) 34944 v1.AddArg(v2) 34945 v0.AddArg(v1) 34946 v0.AddArg(y) 34947 return true 34948 } 34949 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 34950 // cond: canMergeLoad(v, l, x) && clobber(l) 34951 // result: (ORLload x [off] {sym} ptr mem) 34952 for { 34953 _ = v.Args[1] 34954 x := v.Args[0] 34955 l := v.Args[1] 34956 if l.Op != OpAMD64MOVLload { 34957 break 34958 } 34959 off := l.AuxInt 34960 sym := l.Aux 34961 _ = l.Args[1] 34962 ptr := l.Args[0] 34963 mem := l.Args[1] 34964 if !(canMergeLoad(v, l, x) && clobber(l)) { 34965 break 34966 } 34967 v.reset(OpAMD64ORLload) 34968 v.AuxInt = off 34969 v.Aux = sym 34970 v.AddArg(x) 34971 v.AddArg(ptr) 34972 v.AddArg(mem) 34973 return true 34974 } 34975 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 34976 // cond: canMergeLoad(v, l, x) && clobber(l) 34977 // result: (ORLload x [off] {sym} ptr mem) 34978 for { 34979 _ = v.Args[1] 34980 l := v.Args[0] 34981 if l.Op != OpAMD64MOVLload { 34982 break 34983 } 34984 off := l.AuxInt 34985 sym := l.Aux 34986 _ = l.Args[1] 34987 ptr := l.Args[0] 34988 mem := l.Args[1] 34989 x := v.Args[1] 34990 if !(canMergeLoad(v, l, x) && clobber(l)) { 34991 break 34992 } 34993 v.reset(OpAMD64ORLload) 34994 v.AuxInt = off 34995 v.Aux = sym 34996 v.AddArg(x) 34997 v.AddArg(ptr) 34998 v.AddArg(mem) 34999 return true 35000 } 35001 return false 35002 } 35003 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 35004 b := v.Block 35005 _ = b 35006 config := b.Func.Config 35007 _ = config 35008 // match: (ORLconst [c] x) 35009 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 35010 // result: (BTSLconst [log2uint32(c)] x) 35011 for { 35012 c := v.AuxInt 35013 x := v.Args[0] 35014 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 35015 break 35016 } 35017 v.reset(OpAMD64BTSLconst) 35018 v.AuxInt = log2uint32(c) 35019 v.AddArg(x) 35020 return true 35021 } 35022 // match: (ORLconst [c] (ORLconst [d] x)) 35023 // cond: 35024 // result: (ORLconst [c | d] x) 35025 for { 35026 c := v.AuxInt 35027 v_0 := v.Args[0] 35028 if v_0.Op != OpAMD64ORLconst { 35029 break 35030 } 35031 d := v_0.AuxInt 35032 x := v_0.Args[0] 35033 v.reset(OpAMD64ORLconst) 35034 v.AuxInt = c | d 35035 v.AddArg(x) 35036 return true 35037 } 35038 // match: (ORLconst [c] (BTSLconst [d] x)) 35039 // cond: 35040 // result: (ORLconst [c | 1<<uint32(d)] x) 35041 for { 35042 c := v.AuxInt 35043 v_0 := v.Args[0] 35044 if v_0.Op != OpAMD64BTSLconst { 35045 break 35046 } 35047 d := v_0.AuxInt 35048 x := v_0.Args[0] 35049 v.reset(OpAMD64ORLconst) 35050 v.AuxInt = c | 1<<uint32(d) 35051 v.AddArg(x) 35052 return true 35053 } 35054 // match: (ORLconst [c] x) 35055 // cond: int32(c)==0 35056 // result: x 35057 for { 35058 c := v.AuxInt 35059 x := v.Args[0] 35060 if !(int32(c) == 0) { 35061 break 35062 } 35063 v.reset(OpCopy) 35064 v.Type = x.Type 35065 v.AddArg(x) 35066 return true 35067 } 35068 // match: (ORLconst [c] _) 35069 // cond: int32(c)==-1 35070 // result: (MOVLconst [-1]) 35071 for { 35072 c := v.AuxInt 35073 if !(int32(c) == -1) { 35074 break 35075 } 35076 v.reset(OpAMD64MOVLconst) 35077 v.AuxInt = -1 35078 return true 35079 } 35080 // match: (ORLconst [c] (MOVLconst [d])) 35081 // cond: 35082 // result: (MOVLconst [c|d]) 35083 for { 35084 c := v.AuxInt 35085 v_0 := v.Args[0] 35086 if v_0.Op != OpAMD64MOVLconst { 35087 break 35088 } 35089 d := v_0.AuxInt 35090 v.reset(OpAMD64MOVLconst) 35091 v.AuxInt = c | d 35092 return true 35093 } 35094 return false 35095 } 35096 func rewriteValueAMD64_OpAMD64ORLconstmodify_0(v *Value) bool { 35097 // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 35098 // cond: ValAndOff(valoff1).canAdd(off2) 35099 // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 35100 for { 35101 valoff1 := v.AuxInt 35102 sym := v.Aux 35103 _ = v.Args[1] 35104 v_0 := v.Args[0] 35105 if v_0.Op != OpAMD64ADDQconst { 35106 break 35107 } 35108 off2 := v_0.AuxInt 35109 base := v_0.Args[0] 35110 mem := v.Args[1] 35111 if !(ValAndOff(valoff1).canAdd(off2)) { 35112 break 35113 } 35114 v.reset(OpAMD64ORLconstmodify) 35115 v.AuxInt = ValAndOff(valoff1).add(off2) 35116 v.Aux = sym 35117 v.AddArg(base) 35118 v.AddArg(mem) 35119 return true 35120 } 35121 // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 35122 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 35123 // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 35124 for { 35125 valoff1 := v.AuxInt 35126 sym1 := v.Aux 35127 _ = v.Args[1] 35128 v_0 := v.Args[0] 35129 if v_0.Op != OpAMD64LEAQ { 35130 break 35131 } 35132 off2 := v_0.AuxInt 35133 sym2 := v_0.Aux 35134 base := v_0.Args[0] 35135 mem := v.Args[1] 35136 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 35137 break 35138 } 35139 v.reset(OpAMD64ORLconstmodify) 35140 v.AuxInt = ValAndOff(valoff1).add(off2) 35141 v.Aux = mergeSym(sym1, sym2) 35142 v.AddArg(base) 35143 v.AddArg(mem) 35144 return true 35145 } 35146 return false 35147 } 35148 func rewriteValueAMD64_OpAMD64ORLload_0(v *Value) bool { 35149 b := v.Block 35150 _ = b 35151 typ := &b.Func.Config.Types 35152 _ = typ 35153 // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem) 35154 // cond: is32Bit(off1+off2) 35155 // result: (ORLload [off1+off2] {sym} val base mem) 35156 for { 35157 off1 := v.AuxInt 35158 sym := v.Aux 35159 _ = v.Args[2] 35160 val := v.Args[0] 35161 v_1 := v.Args[1] 35162 if v_1.Op != OpAMD64ADDQconst { 35163 break 35164 } 35165 off2 := v_1.AuxInt 35166 base := v_1.Args[0] 35167 mem := v.Args[2] 35168 if !(is32Bit(off1 + off2)) { 35169 break 35170 } 35171 v.reset(OpAMD64ORLload) 35172 v.AuxInt = off1 + off2 35173 v.Aux = sym 35174 v.AddArg(val) 35175 v.AddArg(base) 35176 v.AddArg(mem) 35177 return true 35178 } 35179 // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 35180 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 35181 // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 35182 for { 35183 off1 := v.AuxInt 35184 sym1 := v.Aux 35185 _ = v.Args[2] 35186 val := v.Args[0] 35187 v_1 := v.Args[1] 35188 if v_1.Op != OpAMD64LEAQ { 35189 break 35190 } 35191 off2 := v_1.AuxInt 35192 sym2 := v_1.Aux 35193 base := v_1.Args[0] 35194 mem := v.Args[2] 35195 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 35196 break 35197 } 35198 v.reset(OpAMD64ORLload) 35199 v.AuxInt = off1 + off2 35200 v.Aux = mergeSym(sym1, sym2) 35201 v.AddArg(val) 35202 v.AddArg(base) 35203 v.AddArg(mem) 35204 return true 35205 } 35206 // match: (ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 35207 // cond: 35208 // result: ( ORL x (MOVLf2i y)) 35209 for { 35210 off := v.AuxInt 35211 sym := v.Aux 35212 _ = v.Args[2] 35213 x := v.Args[0] 35214 ptr := v.Args[1] 35215 v_2 := v.Args[2] 35216 if v_2.Op != OpAMD64MOVSSstore { 35217 break 35218 } 35219 if v_2.AuxInt != off { 35220 break 35221 } 35222 if v_2.Aux != sym { 35223 break 35224 } 35225 _ = v_2.Args[2] 35226 if ptr != v_2.Args[0] { 35227 break 35228 } 35229 y := v_2.Args[1] 35230 v.reset(OpAMD64ORL) 35231 v.AddArg(x) 35232 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 35233 v0.AddArg(y) 35234 v.AddArg(v0) 35235 return true 35236 } 35237 return false 35238 } 35239 func rewriteValueAMD64_OpAMD64ORLmodify_0(v *Value) bool { 35240 // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 35241 // cond: is32Bit(off1+off2) 35242 // result: (ORLmodify [off1+off2] {sym} base val mem) 35243 for { 35244 off1 := v.AuxInt 35245 sym := v.Aux 35246 _ = v.Args[2] 35247 v_0 := v.Args[0] 35248 if v_0.Op != OpAMD64ADDQconst { 35249 break 35250 } 35251 off2 := v_0.AuxInt 35252 base := v_0.Args[0] 35253 val := v.Args[1] 35254 mem := v.Args[2] 35255 if !(is32Bit(off1 + off2)) { 35256 break 35257 } 35258 v.reset(OpAMD64ORLmodify) 35259 v.AuxInt = off1 + off2 35260 v.Aux = sym 35261 v.AddArg(base) 35262 v.AddArg(val) 35263 v.AddArg(mem) 35264 return true 35265 } 35266 // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 35267 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 35268 // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 35269 for { 35270 off1 := v.AuxInt 35271 sym1 := v.Aux 35272 _ = v.Args[2] 35273 v_0 := v.Args[0] 35274 if v_0.Op != OpAMD64LEAQ { 35275 break 35276 } 35277 off2 := v_0.AuxInt 35278 sym2 := v_0.Aux 35279 base := v_0.Args[0] 35280 val := v.Args[1] 35281 mem := v.Args[2] 35282 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 35283 break 35284 } 35285 v.reset(OpAMD64ORLmodify) 35286 v.AuxInt = off1 + off2 35287 v.Aux = mergeSym(sym1, sym2) 35288 v.AddArg(base) 35289 v.AddArg(val) 35290 v.AddArg(mem) 35291 return true 35292 } 35293 return false 35294 } 35295 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 35296 b := v.Block 35297 _ = b 35298 config := b.Func.Config 35299 _ = config 35300 // match: (ORQ (SHLQ (MOVQconst [1]) y) x) 35301 // cond: !config.nacl 35302 // result: (BTSQ x y) 35303 for { 35304 _ = v.Args[1] 35305 v_0 := v.Args[0] 35306 if v_0.Op != OpAMD64SHLQ { 35307 break 35308 } 35309 _ = v_0.Args[1] 35310 v_0_0 := v_0.Args[0] 35311 if v_0_0.Op != OpAMD64MOVQconst { 35312 break 35313 } 35314 if v_0_0.AuxInt != 1 { 35315 break 35316 } 35317 y := v_0.Args[1] 35318 x := v.Args[1] 35319 if !(!config.nacl) { 35320 break 35321 } 35322 v.reset(OpAMD64BTSQ) 35323 v.AddArg(x) 35324 v.AddArg(y) 35325 return true 35326 } 35327 // match: (ORQ x (SHLQ (MOVQconst [1]) y)) 35328 // cond: !config.nacl 35329 // result: (BTSQ x y) 35330 for { 35331 _ = v.Args[1] 35332 x := v.Args[0] 35333 v_1 := v.Args[1] 35334 if v_1.Op != OpAMD64SHLQ { 35335 break 35336 } 35337 _ = v_1.Args[1] 35338 v_1_0 := v_1.Args[0] 35339 if v_1_0.Op != OpAMD64MOVQconst { 35340 break 35341 } 35342 if v_1_0.AuxInt != 1 { 35343 break 35344 } 35345 y := v_1.Args[1] 35346 if !(!config.nacl) { 35347 break 35348 } 35349 v.reset(OpAMD64BTSQ) 35350 v.AddArg(x) 35351 v.AddArg(y) 35352 return true 35353 } 35354 // match: (ORQ (MOVQconst [c]) x) 35355 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 35356 // result: (BTSQconst [log2(c)] x) 35357 for { 35358 _ = v.Args[1] 35359 v_0 := v.Args[0] 35360 if v_0.Op != OpAMD64MOVQconst { 35361 break 35362 } 35363 c := v_0.AuxInt 35364 x := v.Args[1] 35365 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 35366 break 35367 } 35368 v.reset(OpAMD64BTSQconst) 35369 v.AuxInt = log2(c) 35370 v.AddArg(x) 35371 return true 35372 } 35373 // match: (ORQ x (MOVQconst [c])) 35374 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 35375 // result: (BTSQconst [log2(c)] x) 35376 for { 35377 _ = v.Args[1] 35378 x := v.Args[0] 35379 v_1 := v.Args[1] 35380 if v_1.Op != OpAMD64MOVQconst { 35381 break 35382 } 35383 c := v_1.AuxInt 35384 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 35385 break 35386 } 35387 v.reset(OpAMD64BTSQconst) 35388 v.AuxInt = log2(c) 35389 v.AddArg(x) 35390 return true 35391 } 35392 // match: (ORQ x (MOVQconst [c])) 35393 // cond: is32Bit(c) 35394 // result: (ORQconst [c] x) 35395 for { 35396 _ = v.Args[1] 35397 x := v.Args[0] 35398 v_1 := v.Args[1] 35399 if v_1.Op != OpAMD64MOVQconst { 35400 break 35401 } 35402 c := v_1.AuxInt 35403 if !(is32Bit(c)) { 35404 break 35405 } 35406 v.reset(OpAMD64ORQconst) 35407 v.AuxInt = c 35408 v.AddArg(x) 35409 return true 35410 } 35411 // match: (ORQ (MOVQconst [c]) x) 35412 // cond: is32Bit(c) 35413 // result: (ORQconst [c] x) 35414 for { 35415 _ = v.Args[1] 35416 v_0 := v.Args[0] 35417 if v_0.Op != OpAMD64MOVQconst { 35418 break 35419 } 35420 c := v_0.AuxInt 35421 x := v.Args[1] 35422 if !(is32Bit(c)) { 35423 break 35424 } 35425 v.reset(OpAMD64ORQconst) 35426 v.AuxInt = c 35427 v.AddArg(x) 35428 return true 35429 } 35430 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 35431 // cond: d==64-c 35432 // result: (ROLQconst x [c]) 35433 for { 35434 _ = v.Args[1] 35435 v_0 := v.Args[0] 35436 if v_0.Op != OpAMD64SHLQconst { 35437 break 35438 } 35439 c := v_0.AuxInt 35440 x := v_0.Args[0] 35441 v_1 := v.Args[1] 35442 if v_1.Op != OpAMD64SHRQconst { 35443 break 35444 } 35445 d := v_1.AuxInt 35446 if x != v_1.Args[0] { 35447 break 35448 } 35449 if !(d == 64-c) { 35450 break 35451 } 35452 v.reset(OpAMD64ROLQconst) 35453 v.AuxInt = c 35454 v.AddArg(x) 35455 return true 35456 } 35457 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 35458 // cond: d==64-c 35459 // result: (ROLQconst x [c]) 35460 for { 35461 _ = v.Args[1] 35462 v_0 := v.Args[0] 35463 if v_0.Op != OpAMD64SHRQconst { 35464 break 35465 } 35466 d := v_0.AuxInt 35467 x := v_0.Args[0] 35468 v_1 := v.Args[1] 35469 if v_1.Op != OpAMD64SHLQconst { 35470 break 35471 } 35472 c := v_1.AuxInt 35473 if x != v_1.Args[0] { 35474 break 35475 } 35476 if !(d == 64-c) { 35477 break 35478 } 35479 v.reset(OpAMD64ROLQconst) 35480 v.AuxInt = c 35481 v.AddArg(x) 35482 return true 35483 } 35484 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 35485 // cond: 35486 // result: (ROLQ x y) 35487 for { 35488 _ = v.Args[1] 35489 v_0 := v.Args[0] 35490 if v_0.Op != OpAMD64SHLQ { 35491 break 35492 } 35493 _ = v_0.Args[1] 35494 x := v_0.Args[0] 35495 y := v_0.Args[1] 35496 v_1 := v.Args[1] 35497 if v_1.Op != OpAMD64ANDQ { 35498 break 35499 } 35500 _ = v_1.Args[1] 35501 v_1_0 := v_1.Args[0] 35502 if v_1_0.Op != OpAMD64SHRQ { 35503 break 35504 } 35505 _ = v_1_0.Args[1] 35506 if x != v_1_0.Args[0] { 35507 break 35508 } 35509 v_1_0_1 := v_1_0.Args[1] 35510 if v_1_0_1.Op != OpAMD64NEGQ { 35511 break 35512 } 35513 if y != v_1_0_1.Args[0] { 35514 break 35515 } 35516 v_1_1 := v_1.Args[1] 35517 if v_1_1.Op != OpAMD64SBBQcarrymask { 35518 break 35519 } 35520 v_1_1_0 := v_1_1.Args[0] 35521 if v_1_1_0.Op != OpAMD64CMPQconst { 35522 break 35523 } 35524 if v_1_1_0.AuxInt != 64 { 35525 break 35526 } 35527 v_1_1_0_0 := v_1_1_0.Args[0] 35528 if v_1_1_0_0.Op != OpAMD64NEGQ { 35529 break 35530 } 35531 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 35532 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 35533 break 35534 } 35535 if v_1_1_0_0_0.AuxInt != -64 { 35536 break 35537 } 35538 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 35539 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 35540 break 35541 } 35542 if v_1_1_0_0_0_0.AuxInt != 63 { 35543 break 35544 } 35545 if y != v_1_1_0_0_0_0.Args[0] { 35546 break 35547 } 35548 v.reset(OpAMD64ROLQ) 35549 v.AddArg(x) 35550 v.AddArg(y) 35551 return true 35552 } 35553 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 35554 // cond: 35555 // result: (ROLQ x y) 35556 for { 35557 _ = v.Args[1] 35558 v_0 := v.Args[0] 35559 if v_0.Op != OpAMD64SHLQ { 35560 break 35561 } 35562 _ = v_0.Args[1] 35563 x := v_0.Args[0] 35564 y := v_0.Args[1] 35565 v_1 := v.Args[1] 35566 if v_1.Op != OpAMD64ANDQ { 35567 break 35568 } 35569 _ = v_1.Args[1] 35570 v_1_0 := v_1.Args[0] 35571 if v_1_0.Op != OpAMD64SBBQcarrymask { 35572 break 35573 } 35574 v_1_0_0 := v_1_0.Args[0] 35575 if v_1_0_0.Op != OpAMD64CMPQconst { 35576 break 35577 } 35578 if v_1_0_0.AuxInt != 64 { 35579 break 35580 } 35581 v_1_0_0_0 := v_1_0_0.Args[0] 35582 if v_1_0_0_0.Op != OpAMD64NEGQ { 35583 break 35584 } 35585 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 35586 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 35587 break 35588 } 35589 if v_1_0_0_0_0.AuxInt != -64 { 35590 break 35591 } 35592 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 35593 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 35594 break 35595 } 35596 if v_1_0_0_0_0_0.AuxInt != 63 { 35597 break 35598 } 35599 if y != v_1_0_0_0_0_0.Args[0] { 35600 break 35601 } 35602 v_1_1 := v_1.Args[1] 35603 if v_1_1.Op != OpAMD64SHRQ { 35604 break 35605 } 35606 _ = v_1_1.Args[1] 35607 if x != v_1_1.Args[0] { 35608 break 35609 } 35610 v_1_1_1 := v_1_1.Args[1] 35611 if v_1_1_1.Op != OpAMD64NEGQ { 35612 break 35613 } 35614 if y != v_1_1_1.Args[0] { 35615 break 35616 } 35617 v.reset(OpAMD64ROLQ) 35618 v.AddArg(x) 35619 v.AddArg(y) 35620 return true 35621 } 35622 return false 35623 } 35624 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 35625 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 35626 // cond: 35627 // result: (ROLQ x y) 35628 for { 35629 _ = v.Args[1] 35630 v_0 := v.Args[0] 35631 if v_0.Op != OpAMD64ANDQ { 35632 break 35633 } 35634 _ = v_0.Args[1] 35635 v_0_0 := v_0.Args[0] 35636 if v_0_0.Op != OpAMD64SHRQ { 35637 break 35638 } 35639 _ = v_0_0.Args[1] 35640 x := v_0_0.Args[0] 35641 v_0_0_1 := v_0_0.Args[1] 35642 if v_0_0_1.Op != OpAMD64NEGQ { 35643 break 35644 } 35645 y := v_0_0_1.Args[0] 35646 v_0_1 := v_0.Args[1] 35647 if v_0_1.Op != OpAMD64SBBQcarrymask { 35648 break 35649 } 35650 v_0_1_0 := v_0_1.Args[0] 35651 if v_0_1_0.Op != OpAMD64CMPQconst { 35652 break 35653 } 35654 if v_0_1_0.AuxInt != 64 { 35655 break 35656 } 35657 v_0_1_0_0 := v_0_1_0.Args[0] 35658 if v_0_1_0_0.Op != OpAMD64NEGQ { 35659 break 35660 } 35661 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 35662 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 35663 break 35664 } 35665 if v_0_1_0_0_0.AuxInt != -64 { 35666 break 35667 } 35668 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 35669 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 35670 break 35671 } 35672 if v_0_1_0_0_0_0.AuxInt != 63 { 35673 break 35674 } 35675 if y != v_0_1_0_0_0_0.Args[0] { 35676 break 35677 } 35678 v_1 := v.Args[1] 35679 if v_1.Op != OpAMD64SHLQ { 35680 break 35681 } 35682 _ = v_1.Args[1] 35683 if x != v_1.Args[0] { 35684 break 35685 } 35686 if y != v_1.Args[1] { 35687 break 35688 } 35689 v.reset(OpAMD64ROLQ) 35690 v.AddArg(x) 35691 v.AddArg(y) 35692 return true 35693 } 35694 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 35695 // cond: 35696 // result: (ROLQ x y) 35697 for { 35698 _ = v.Args[1] 35699 v_0 := v.Args[0] 35700 if v_0.Op != OpAMD64ANDQ { 35701 break 35702 } 35703 _ = v_0.Args[1] 35704 v_0_0 := v_0.Args[0] 35705 if v_0_0.Op != OpAMD64SBBQcarrymask { 35706 break 35707 } 35708 v_0_0_0 := v_0_0.Args[0] 35709 if v_0_0_0.Op != OpAMD64CMPQconst { 35710 break 35711 } 35712 if v_0_0_0.AuxInt != 64 { 35713 break 35714 } 35715 v_0_0_0_0 := v_0_0_0.Args[0] 35716 if v_0_0_0_0.Op != OpAMD64NEGQ { 35717 break 35718 } 35719 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 35720 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 35721 break 35722 } 35723 if v_0_0_0_0_0.AuxInt != -64 { 35724 break 35725 } 35726 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 35727 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 35728 break 35729 } 35730 if v_0_0_0_0_0_0.AuxInt != 63 { 35731 break 35732 } 35733 y := v_0_0_0_0_0_0.Args[0] 35734 v_0_1 := v_0.Args[1] 35735 if v_0_1.Op != OpAMD64SHRQ { 35736 break 35737 } 35738 _ = v_0_1.Args[1] 35739 x := v_0_1.Args[0] 35740 v_0_1_1 := v_0_1.Args[1] 35741 if v_0_1_1.Op != OpAMD64NEGQ { 35742 break 35743 } 35744 if y != v_0_1_1.Args[0] { 35745 break 35746 } 35747 v_1 := v.Args[1] 35748 if v_1.Op != OpAMD64SHLQ { 35749 break 35750 } 35751 _ = v_1.Args[1] 35752 if x != v_1.Args[0] { 35753 break 35754 } 35755 if y != v_1.Args[1] { 35756 break 35757 } 35758 v.reset(OpAMD64ROLQ) 35759 v.AddArg(x) 35760 v.AddArg(y) 35761 return true 35762 } 35763 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 35764 // cond: 35765 // result: (ROLQ x y) 35766 for { 35767 _ = v.Args[1] 35768 v_0 := v.Args[0] 35769 if v_0.Op != OpAMD64SHLQ { 35770 break 35771 } 35772 _ = v_0.Args[1] 35773 x := v_0.Args[0] 35774 y := v_0.Args[1] 35775 v_1 := v.Args[1] 35776 if v_1.Op != OpAMD64ANDQ { 35777 break 35778 } 35779 _ = v_1.Args[1] 35780 v_1_0 := v_1.Args[0] 35781 if v_1_0.Op != OpAMD64SHRQ { 35782 break 35783 } 35784 _ = v_1_0.Args[1] 35785 if x != v_1_0.Args[0] { 35786 break 35787 } 35788 v_1_0_1 := v_1_0.Args[1] 35789 if v_1_0_1.Op != OpAMD64NEGL { 35790 break 35791 } 35792 if y != v_1_0_1.Args[0] { 35793 break 35794 } 35795 v_1_1 := v_1.Args[1] 35796 if v_1_1.Op != OpAMD64SBBQcarrymask { 35797 break 35798 } 35799 v_1_1_0 := v_1_1.Args[0] 35800 if v_1_1_0.Op != OpAMD64CMPLconst { 35801 break 35802 } 35803 if v_1_1_0.AuxInt != 64 { 35804 break 35805 } 35806 v_1_1_0_0 := v_1_1_0.Args[0] 35807 if v_1_1_0_0.Op != OpAMD64NEGL { 35808 break 35809 } 35810 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 35811 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 35812 break 35813 } 35814 if v_1_1_0_0_0.AuxInt != -64 { 35815 break 35816 } 35817 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 35818 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 35819 break 35820 } 35821 if v_1_1_0_0_0_0.AuxInt != 63 { 35822 break 35823 } 35824 if y != v_1_1_0_0_0_0.Args[0] { 35825 break 35826 } 35827 v.reset(OpAMD64ROLQ) 35828 v.AddArg(x) 35829 v.AddArg(y) 35830 return true 35831 } 35832 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 35833 // cond: 35834 // result: (ROLQ x y) 35835 for { 35836 _ = v.Args[1] 35837 v_0 := v.Args[0] 35838 if v_0.Op != OpAMD64SHLQ { 35839 break 35840 } 35841 _ = v_0.Args[1] 35842 x := v_0.Args[0] 35843 y := v_0.Args[1] 35844 v_1 := v.Args[1] 35845 if v_1.Op != OpAMD64ANDQ { 35846 break 35847 } 35848 _ = v_1.Args[1] 35849 v_1_0 := v_1.Args[0] 35850 if v_1_0.Op != OpAMD64SBBQcarrymask { 35851 break 35852 } 35853 v_1_0_0 := v_1_0.Args[0] 35854 if v_1_0_0.Op != OpAMD64CMPLconst { 35855 break 35856 } 35857 if v_1_0_0.AuxInt != 64 { 35858 break 35859 } 35860 v_1_0_0_0 := v_1_0_0.Args[0] 35861 if v_1_0_0_0.Op != OpAMD64NEGL { 35862 break 35863 } 35864 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 35865 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 35866 break 35867 } 35868 if v_1_0_0_0_0.AuxInt != -64 { 35869 break 35870 } 35871 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 35872 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 35873 break 35874 } 35875 if v_1_0_0_0_0_0.AuxInt != 63 { 35876 break 35877 } 35878 if y != v_1_0_0_0_0_0.Args[0] { 35879 break 35880 } 35881 v_1_1 := v_1.Args[1] 35882 if v_1_1.Op != OpAMD64SHRQ { 35883 break 35884 } 35885 _ = v_1_1.Args[1] 35886 if x != v_1_1.Args[0] { 35887 break 35888 } 35889 v_1_1_1 := v_1_1.Args[1] 35890 if v_1_1_1.Op != OpAMD64NEGL { 35891 break 35892 } 35893 if y != v_1_1_1.Args[0] { 35894 break 35895 } 35896 v.reset(OpAMD64ROLQ) 35897 v.AddArg(x) 35898 v.AddArg(y) 35899 return true 35900 } 35901 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 35902 // cond: 35903 // result: (ROLQ x y) 35904 for { 35905 _ = v.Args[1] 35906 v_0 := v.Args[0] 35907 if v_0.Op != OpAMD64ANDQ { 35908 break 35909 } 35910 _ = v_0.Args[1] 35911 v_0_0 := v_0.Args[0] 35912 if v_0_0.Op != OpAMD64SHRQ { 35913 break 35914 } 35915 _ = v_0_0.Args[1] 35916 x := v_0_0.Args[0] 35917 v_0_0_1 := v_0_0.Args[1] 35918 if v_0_0_1.Op != OpAMD64NEGL { 35919 break 35920 } 35921 y := v_0_0_1.Args[0] 35922 v_0_1 := v_0.Args[1] 35923 if v_0_1.Op != OpAMD64SBBQcarrymask { 35924 break 35925 } 35926 v_0_1_0 := v_0_1.Args[0] 35927 if v_0_1_0.Op != OpAMD64CMPLconst { 35928 break 35929 } 35930 if v_0_1_0.AuxInt != 64 { 35931 break 35932 } 35933 v_0_1_0_0 := v_0_1_0.Args[0] 35934 if v_0_1_0_0.Op != OpAMD64NEGL { 35935 break 35936 } 35937 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 35938 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 35939 break 35940 } 35941 if v_0_1_0_0_0.AuxInt != -64 { 35942 break 35943 } 35944 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 35945 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 35946 break 35947 } 35948 if v_0_1_0_0_0_0.AuxInt != 63 { 35949 break 35950 } 35951 if y != v_0_1_0_0_0_0.Args[0] { 35952 break 35953 } 35954 v_1 := v.Args[1] 35955 if v_1.Op != OpAMD64SHLQ { 35956 break 35957 } 35958 _ = v_1.Args[1] 35959 if x != v_1.Args[0] { 35960 break 35961 } 35962 if y != v_1.Args[1] { 35963 break 35964 } 35965 v.reset(OpAMD64ROLQ) 35966 v.AddArg(x) 35967 v.AddArg(y) 35968 return true 35969 } 35970 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 35971 // cond: 35972 // result: (ROLQ x y) 35973 for { 35974 _ = v.Args[1] 35975 v_0 := v.Args[0] 35976 if v_0.Op != OpAMD64ANDQ { 35977 break 35978 } 35979 _ = v_0.Args[1] 35980 v_0_0 := v_0.Args[0] 35981 if v_0_0.Op != OpAMD64SBBQcarrymask { 35982 break 35983 } 35984 v_0_0_0 := v_0_0.Args[0] 35985 if v_0_0_0.Op != OpAMD64CMPLconst { 35986 break 35987 } 35988 if v_0_0_0.AuxInt != 64 { 35989 break 35990 } 35991 v_0_0_0_0 := v_0_0_0.Args[0] 35992 if v_0_0_0_0.Op != OpAMD64NEGL { 35993 break 35994 } 35995 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 35996 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 35997 break 35998 } 35999 if v_0_0_0_0_0.AuxInt != -64 { 36000 break 36001 } 36002 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 36003 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 36004 break 36005 } 36006 if v_0_0_0_0_0_0.AuxInt != 63 { 36007 break 36008 } 36009 y := v_0_0_0_0_0_0.Args[0] 36010 v_0_1 := v_0.Args[1] 36011 if v_0_1.Op != OpAMD64SHRQ { 36012 break 36013 } 36014 _ = v_0_1.Args[1] 36015 x := v_0_1.Args[0] 36016 v_0_1_1 := v_0_1.Args[1] 36017 if v_0_1_1.Op != OpAMD64NEGL { 36018 break 36019 } 36020 if y != v_0_1_1.Args[0] { 36021 break 36022 } 36023 v_1 := v.Args[1] 36024 if v_1.Op != OpAMD64SHLQ { 36025 break 36026 } 36027 _ = v_1.Args[1] 36028 if x != v_1.Args[0] { 36029 break 36030 } 36031 if y != v_1.Args[1] { 36032 break 36033 } 36034 v.reset(OpAMD64ROLQ) 36035 v.AddArg(x) 36036 v.AddArg(y) 36037 return true 36038 } 36039 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 36040 // cond: 36041 // result: (RORQ x y) 36042 for { 36043 _ = v.Args[1] 36044 v_0 := v.Args[0] 36045 if v_0.Op != OpAMD64SHRQ { 36046 break 36047 } 36048 _ = v_0.Args[1] 36049 x := v_0.Args[0] 36050 y := v_0.Args[1] 36051 v_1 := v.Args[1] 36052 if v_1.Op != OpAMD64ANDQ { 36053 break 36054 } 36055 _ = v_1.Args[1] 36056 v_1_0 := v_1.Args[0] 36057 if v_1_0.Op != OpAMD64SHLQ { 36058 break 36059 } 36060 _ = v_1_0.Args[1] 36061 if x != v_1_0.Args[0] { 36062 break 36063 } 36064 v_1_0_1 := v_1_0.Args[1] 36065 if v_1_0_1.Op != OpAMD64NEGQ { 36066 break 36067 } 36068 if y != v_1_0_1.Args[0] { 36069 break 36070 } 36071 v_1_1 := v_1.Args[1] 36072 if v_1_1.Op != OpAMD64SBBQcarrymask { 36073 break 36074 } 36075 v_1_1_0 := v_1_1.Args[0] 36076 if v_1_1_0.Op != OpAMD64CMPQconst { 36077 break 36078 } 36079 if v_1_1_0.AuxInt != 64 { 36080 break 36081 } 36082 v_1_1_0_0 := v_1_1_0.Args[0] 36083 if v_1_1_0_0.Op != OpAMD64NEGQ { 36084 break 36085 } 36086 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 36087 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 36088 break 36089 } 36090 if v_1_1_0_0_0.AuxInt != -64 { 36091 break 36092 } 36093 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 36094 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 36095 break 36096 } 36097 if v_1_1_0_0_0_0.AuxInt != 63 { 36098 break 36099 } 36100 if y != v_1_1_0_0_0_0.Args[0] { 36101 break 36102 } 36103 v.reset(OpAMD64RORQ) 36104 v.AddArg(x) 36105 v.AddArg(y) 36106 return true 36107 } 36108 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 36109 // cond: 36110 // result: (RORQ x y) 36111 for { 36112 _ = v.Args[1] 36113 v_0 := v.Args[0] 36114 if v_0.Op != OpAMD64SHRQ { 36115 break 36116 } 36117 _ = v_0.Args[1] 36118 x := v_0.Args[0] 36119 y := v_0.Args[1] 36120 v_1 := v.Args[1] 36121 if v_1.Op != OpAMD64ANDQ { 36122 break 36123 } 36124 _ = v_1.Args[1] 36125 v_1_0 := v_1.Args[0] 36126 if v_1_0.Op != OpAMD64SBBQcarrymask { 36127 break 36128 } 36129 v_1_0_0 := v_1_0.Args[0] 36130 if v_1_0_0.Op != OpAMD64CMPQconst { 36131 break 36132 } 36133 if v_1_0_0.AuxInt != 64 { 36134 break 36135 } 36136 v_1_0_0_0 := v_1_0_0.Args[0] 36137 if v_1_0_0_0.Op != OpAMD64NEGQ { 36138 break 36139 } 36140 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 36141 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 36142 break 36143 } 36144 if v_1_0_0_0_0.AuxInt != -64 { 36145 break 36146 } 36147 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 36148 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 36149 break 36150 } 36151 if v_1_0_0_0_0_0.AuxInt != 63 { 36152 break 36153 } 36154 if y != v_1_0_0_0_0_0.Args[0] { 36155 break 36156 } 36157 v_1_1 := v_1.Args[1] 36158 if v_1_1.Op != OpAMD64SHLQ { 36159 break 36160 } 36161 _ = v_1_1.Args[1] 36162 if x != v_1_1.Args[0] { 36163 break 36164 } 36165 v_1_1_1 := v_1_1.Args[1] 36166 if v_1_1_1.Op != OpAMD64NEGQ { 36167 break 36168 } 36169 if y != v_1_1_1.Args[0] { 36170 break 36171 } 36172 v.reset(OpAMD64RORQ) 36173 v.AddArg(x) 36174 v.AddArg(y) 36175 return true 36176 } 36177 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 36178 // cond: 36179 // result: (RORQ x y) 36180 for { 36181 _ = v.Args[1] 36182 v_0 := v.Args[0] 36183 if v_0.Op != OpAMD64ANDQ { 36184 break 36185 } 36186 _ = v_0.Args[1] 36187 v_0_0 := v_0.Args[0] 36188 if v_0_0.Op != OpAMD64SHLQ { 36189 break 36190 } 36191 _ = v_0_0.Args[1] 36192 x := v_0_0.Args[0] 36193 v_0_0_1 := v_0_0.Args[1] 36194 if v_0_0_1.Op != OpAMD64NEGQ { 36195 break 36196 } 36197 y := v_0_0_1.Args[0] 36198 v_0_1 := v_0.Args[1] 36199 if v_0_1.Op != OpAMD64SBBQcarrymask { 36200 break 36201 } 36202 v_0_1_0 := v_0_1.Args[0] 36203 if v_0_1_0.Op != OpAMD64CMPQconst { 36204 break 36205 } 36206 if v_0_1_0.AuxInt != 64 { 36207 break 36208 } 36209 v_0_1_0_0 := v_0_1_0.Args[0] 36210 if v_0_1_0_0.Op != OpAMD64NEGQ { 36211 break 36212 } 36213 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 36214 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 36215 break 36216 } 36217 if v_0_1_0_0_0.AuxInt != -64 { 36218 break 36219 } 36220 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 36221 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 36222 break 36223 } 36224 if v_0_1_0_0_0_0.AuxInt != 63 { 36225 break 36226 } 36227 if y != v_0_1_0_0_0_0.Args[0] { 36228 break 36229 } 36230 v_1 := v.Args[1] 36231 if v_1.Op != OpAMD64SHRQ { 36232 break 36233 } 36234 _ = v_1.Args[1] 36235 if x != v_1.Args[0] { 36236 break 36237 } 36238 if y != v_1.Args[1] { 36239 break 36240 } 36241 v.reset(OpAMD64RORQ) 36242 v.AddArg(x) 36243 v.AddArg(y) 36244 return true 36245 } 36246 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 36247 // cond: 36248 // result: (RORQ x y) 36249 for { 36250 _ = v.Args[1] 36251 v_0 := v.Args[0] 36252 if v_0.Op != OpAMD64ANDQ { 36253 break 36254 } 36255 _ = v_0.Args[1] 36256 v_0_0 := v_0.Args[0] 36257 if v_0_0.Op != OpAMD64SBBQcarrymask { 36258 break 36259 } 36260 v_0_0_0 := v_0_0.Args[0] 36261 if v_0_0_0.Op != OpAMD64CMPQconst { 36262 break 36263 } 36264 if v_0_0_0.AuxInt != 64 { 36265 break 36266 } 36267 v_0_0_0_0 := v_0_0_0.Args[0] 36268 if v_0_0_0_0.Op != OpAMD64NEGQ { 36269 break 36270 } 36271 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 36272 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 36273 break 36274 } 36275 if v_0_0_0_0_0.AuxInt != -64 { 36276 break 36277 } 36278 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 36279 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 36280 break 36281 } 36282 if v_0_0_0_0_0_0.AuxInt != 63 { 36283 break 36284 } 36285 y := v_0_0_0_0_0_0.Args[0] 36286 v_0_1 := v_0.Args[1] 36287 if v_0_1.Op != OpAMD64SHLQ { 36288 break 36289 } 36290 _ = v_0_1.Args[1] 36291 x := v_0_1.Args[0] 36292 v_0_1_1 := v_0_1.Args[1] 36293 if v_0_1_1.Op != OpAMD64NEGQ { 36294 break 36295 } 36296 if y != v_0_1_1.Args[0] { 36297 break 36298 } 36299 v_1 := v.Args[1] 36300 if v_1.Op != OpAMD64SHRQ { 36301 break 36302 } 36303 _ = v_1.Args[1] 36304 if x != v_1.Args[0] { 36305 break 36306 } 36307 if y != v_1.Args[1] { 36308 break 36309 } 36310 v.reset(OpAMD64RORQ) 36311 v.AddArg(x) 36312 v.AddArg(y) 36313 return true 36314 } 36315 return false 36316 } 36317 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 36318 b := v.Block 36319 _ = b 36320 typ := &b.Func.Config.Types 36321 _ = typ 36322 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 36323 // cond: 36324 // result: (RORQ x y) 36325 for { 36326 _ = v.Args[1] 36327 v_0 := v.Args[0] 36328 if v_0.Op != OpAMD64SHRQ { 36329 break 36330 } 36331 _ = v_0.Args[1] 36332 x := v_0.Args[0] 36333 y := v_0.Args[1] 36334 v_1 := v.Args[1] 36335 if v_1.Op != OpAMD64ANDQ { 36336 break 36337 } 36338 _ = v_1.Args[1] 36339 v_1_0 := v_1.Args[0] 36340 if v_1_0.Op != OpAMD64SHLQ { 36341 break 36342 } 36343 _ = v_1_0.Args[1] 36344 if x != v_1_0.Args[0] { 36345 break 36346 } 36347 v_1_0_1 := v_1_0.Args[1] 36348 if v_1_0_1.Op != OpAMD64NEGL { 36349 break 36350 } 36351 if y != v_1_0_1.Args[0] { 36352 break 36353 } 36354 v_1_1 := v_1.Args[1] 36355 if v_1_1.Op != OpAMD64SBBQcarrymask { 36356 break 36357 } 36358 v_1_1_0 := v_1_1.Args[0] 36359 if v_1_1_0.Op != OpAMD64CMPLconst { 36360 break 36361 } 36362 if v_1_1_0.AuxInt != 64 { 36363 break 36364 } 36365 v_1_1_0_0 := v_1_1_0.Args[0] 36366 if v_1_1_0_0.Op != OpAMD64NEGL { 36367 break 36368 } 36369 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 36370 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 36371 break 36372 } 36373 if v_1_1_0_0_0.AuxInt != -64 { 36374 break 36375 } 36376 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 36377 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 36378 break 36379 } 36380 if v_1_1_0_0_0_0.AuxInt != 63 { 36381 break 36382 } 36383 if y != v_1_1_0_0_0_0.Args[0] { 36384 break 36385 } 36386 v.reset(OpAMD64RORQ) 36387 v.AddArg(x) 36388 v.AddArg(y) 36389 return true 36390 } 36391 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 36392 // cond: 36393 // result: (RORQ x y) 36394 for { 36395 _ = v.Args[1] 36396 v_0 := v.Args[0] 36397 if v_0.Op != OpAMD64SHRQ { 36398 break 36399 } 36400 _ = v_0.Args[1] 36401 x := v_0.Args[0] 36402 y := v_0.Args[1] 36403 v_1 := v.Args[1] 36404 if v_1.Op != OpAMD64ANDQ { 36405 break 36406 } 36407 _ = v_1.Args[1] 36408 v_1_0 := v_1.Args[0] 36409 if v_1_0.Op != OpAMD64SBBQcarrymask { 36410 break 36411 } 36412 v_1_0_0 := v_1_0.Args[0] 36413 if v_1_0_0.Op != OpAMD64CMPLconst { 36414 break 36415 } 36416 if v_1_0_0.AuxInt != 64 { 36417 break 36418 } 36419 v_1_0_0_0 := v_1_0_0.Args[0] 36420 if v_1_0_0_0.Op != OpAMD64NEGL { 36421 break 36422 } 36423 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 36424 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 36425 break 36426 } 36427 if v_1_0_0_0_0.AuxInt != -64 { 36428 break 36429 } 36430 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 36431 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 36432 break 36433 } 36434 if v_1_0_0_0_0_0.AuxInt != 63 { 36435 break 36436 } 36437 if y != v_1_0_0_0_0_0.Args[0] { 36438 break 36439 } 36440 v_1_1 := v_1.Args[1] 36441 if v_1_1.Op != OpAMD64SHLQ { 36442 break 36443 } 36444 _ = v_1_1.Args[1] 36445 if x != v_1_1.Args[0] { 36446 break 36447 } 36448 v_1_1_1 := v_1_1.Args[1] 36449 if v_1_1_1.Op != OpAMD64NEGL { 36450 break 36451 } 36452 if y != v_1_1_1.Args[0] { 36453 break 36454 } 36455 v.reset(OpAMD64RORQ) 36456 v.AddArg(x) 36457 v.AddArg(y) 36458 return true 36459 } 36460 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 36461 // cond: 36462 // result: (RORQ x y) 36463 for { 36464 _ = v.Args[1] 36465 v_0 := v.Args[0] 36466 if v_0.Op != OpAMD64ANDQ { 36467 break 36468 } 36469 _ = v_0.Args[1] 36470 v_0_0 := v_0.Args[0] 36471 if v_0_0.Op != OpAMD64SHLQ { 36472 break 36473 } 36474 _ = v_0_0.Args[1] 36475 x := v_0_0.Args[0] 36476 v_0_0_1 := v_0_0.Args[1] 36477 if v_0_0_1.Op != OpAMD64NEGL { 36478 break 36479 } 36480 y := v_0_0_1.Args[0] 36481 v_0_1 := v_0.Args[1] 36482 if v_0_1.Op != OpAMD64SBBQcarrymask { 36483 break 36484 } 36485 v_0_1_0 := v_0_1.Args[0] 36486 if v_0_1_0.Op != OpAMD64CMPLconst { 36487 break 36488 } 36489 if v_0_1_0.AuxInt != 64 { 36490 break 36491 } 36492 v_0_1_0_0 := v_0_1_0.Args[0] 36493 if v_0_1_0_0.Op != OpAMD64NEGL { 36494 break 36495 } 36496 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 36497 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 36498 break 36499 } 36500 if v_0_1_0_0_0.AuxInt != -64 { 36501 break 36502 } 36503 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 36504 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 36505 break 36506 } 36507 if v_0_1_0_0_0_0.AuxInt != 63 { 36508 break 36509 } 36510 if y != v_0_1_0_0_0_0.Args[0] { 36511 break 36512 } 36513 v_1 := v.Args[1] 36514 if v_1.Op != OpAMD64SHRQ { 36515 break 36516 } 36517 _ = v_1.Args[1] 36518 if x != v_1.Args[0] { 36519 break 36520 } 36521 if y != v_1.Args[1] { 36522 break 36523 } 36524 v.reset(OpAMD64RORQ) 36525 v.AddArg(x) 36526 v.AddArg(y) 36527 return true 36528 } 36529 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 36530 // cond: 36531 // result: (RORQ x y) 36532 for { 36533 _ = v.Args[1] 36534 v_0 := v.Args[0] 36535 if v_0.Op != OpAMD64ANDQ { 36536 break 36537 } 36538 _ = v_0.Args[1] 36539 v_0_0 := v_0.Args[0] 36540 if v_0_0.Op != OpAMD64SBBQcarrymask { 36541 break 36542 } 36543 v_0_0_0 := v_0_0.Args[0] 36544 if v_0_0_0.Op != OpAMD64CMPLconst { 36545 break 36546 } 36547 if v_0_0_0.AuxInt != 64 { 36548 break 36549 } 36550 v_0_0_0_0 := v_0_0_0.Args[0] 36551 if v_0_0_0_0.Op != OpAMD64NEGL { 36552 break 36553 } 36554 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 36555 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 36556 break 36557 } 36558 if v_0_0_0_0_0.AuxInt != -64 { 36559 break 36560 } 36561 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 36562 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 36563 break 36564 } 36565 if v_0_0_0_0_0_0.AuxInt != 63 { 36566 break 36567 } 36568 y := v_0_0_0_0_0_0.Args[0] 36569 v_0_1 := v_0.Args[1] 36570 if v_0_1.Op != OpAMD64SHLQ { 36571 break 36572 } 36573 _ = v_0_1.Args[1] 36574 x := v_0_1.Args[0] 36575 v_0_1_1 := v_0_1.Args[1] 36576 if v_0_1_1.Op != OpAMD64NEGL { 36577 break 36578 } 36579 if y != v_0_1_1.Args[0] { 36580 break 36581 } 36582 v_1 := v.Args[1] 36583 if v_1.Op != OpAMD64SHRQ { 36584 break 36585 } 36586 _ = v_1.Args[1] 36587 if x != v_1.Args[0] { 36588 break 36589 } 36590 if y != v_1.Args[1] { 36591 break 36592 } 36593 v.reset(OpAMD64RORQ) 36594 v.AddArg(x) 36595 v.AddArg(y) 36596 return true 36597 } 36598 // match: (ORQ x x) 36599 // cond: 36600 // result: x 36601 for { 36602 _ = v.Args[1] 36603 x := v.Args[0] 36604 if x != v.Args[1] { 36605 break 36606 } 36607 v.reset(OpCopy) 36608 v.Type = x.Type 36609 v.AddArg(x) 36610 return true 36611 } 36612 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 36613 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36614 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 36615 for { 36616 _ = v.Args[1] 36617 x0 := v.Args[0] 36618 if x0.Op != OpAMD64MOVBload { 36619 break 36620 } 36621 i0 := x0.AuxInt 36622 s := x0.Aux 36623 _ = x0.Args[1] 36624 p := x0.Args[0] 36625 mem := x0.Args[1] 36626 sh := v.Args[1] 36627 if sh.Op != OpAMD64SHLQconst { 36628 break 36629 } 36630 if sh.AuxInt != 8 { 36631 break 36632 } 36633 x1 := sh.Args[0] 36634 if x1.Op != OpAMD64MOVBload { 36635 break 36636 } 36637 i1 := x1.AuxInt 36638 if x1.Aux != s { 36639 break 36640 } 36641 _ = x1.Args[1] 36642 if p != x1.Args[0] { 36643 break 36644 } 36645 if mem != x1.Args[1] { 36646 break 36647 } 36648 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36649 break 36650 } 36651 b = mergePoint(b, x0, x1) 36652 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 36653 v.reset(OpCopy) 36654 v.AddArg(v0) 36655 v0.AuxInt = i0 36656 v0.Aux = s 36657 v0.AddArg(p) 36658 v0.AddArg(mem) 36659 return true 36660 } 36661 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 36662 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36663 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 36664 for { 36665 _ = v.Args[1] 36666 sh := v.Args[0] 36667 if sh.Op != OpAMD64SHLQconst { 36668 break 36669 } 36670 if sh.AuxInt != 8 { 36671 break 36672 } 36673 x1 := sh.Args[0] 36674 if x1.Op != OpAMD64MOVBload { 36675 break 36676 } 36677 i1 := x1.AuxInt 36678 s := x1.Aux 36679 _ = x1.Args[1] 36680 p := x1.Args[0] 36681 mem := x1.Args[1] 36682 x0 := v.Args[1] 36683 if x0.Op != OpAMD64MOVBload { 36684 break 36685 } 36686 i0 := x0.AuxInt 36687 if x0.Aux != s { 36688 break 36689 } 36690 _ = x0.Args[1] 36691 if p != x0.Args[0] { 36692 break 36693 } 36694 if mem != x0.Args[1] { 36695 break 36696 } 36697 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36698 break 36699 } 36700 b = mergePoint(b, x0, x1) 36701 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 36702 v.reset(OpCopy) 36703 v.AddArg(v0) 36704 v0.AuxInt = i0 36705 v0.Aux = s 36706 v0.AddArg(p) 36707 v0.AddArg(mem) 36708 return true 36709 } 36710 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 36711 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36712 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 36713 for { 36714 _ = v.Args[1] 36715 x0 := v.Args[0] 36716 if x0.Op != OpAMD64MOVWload { 36717 break 36718 } 36719 i0 := x0.AuxInt 36720 s := x0.Aux 36721 _ = x0.Args[1] 36722 p := x0.Args[0] 36723 mem := x0.Args[1] 36724 sh := v.Args[1] 36725 if sh.Op != OpAMD64SHLQconst { 36726 break 36727 } 36728 if sh.AuxInt != 16 { 36729 break 36730 } 36731 x1 := sh.Args[0] 36732 if x1.Op != OpAMD64MOVWload { 36733 break 36734 } 36735 i1 := x1.AuxInt 36736 if x1.Aux != s { 36737 break 36738 } 36739 _ = x1.Args[1] 36740 if p != x1.Args[0] { 36741 break 36742 } 36743 if mem != x1.Args[1] { 36744 break 36745 } 36746 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36747 break 36748 } 36749 b = mergePoint(b, x0, x1) 36750 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 36751 v.reset(OpCopy) 36752 v.AddArg(v0) 36753 v0.AuxInt = i0 36754 v0.Aux = s 36755 v0.AddArg(p) 36756 v0.AddArg(mem) 36757 return true 36758 } 36759 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 36760 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36761 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 36762 for { 36763 _ = v.Args[1] 36764 sh := v.Args[0] 36765 if sh.Op != OpAMD64SHLQconst { 36766 break 36767 } 36768 if sh.AuxInt != 16 { 36769 break 36770 } 36771 x1 := sh.Args[0] 36772 if x1.Op != OpAMD64MOVWload { 36773 break 36774 } 36775 i1 := x1.AuxInt 36776 s := x1.Aux 36777 _ = x1.Args[1] 36778 p := x1.Args[0] 36779 mem := x1.Args[1] 36780 x0 := v.Args[1] 36781 if x0.Op != OpAMD64MOVWload { 36782 break 36783 } 36784 i0 := x0.AuxInt 36785 if x0.Aux != s { 36786 break 36787 } 36788 _ = x0.Args[1] 36789 if p != x0.Args[0] { 36790 break 36791 } 36792 if mem != x0.Args[1] { 36793 break 36794 } 36795 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36796 break 36797 } 36798 b = mergePoint(b, x0, x1) 36799 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 36800 v.reset(OpCopy) 36801 v.AddArg(v0) 36802 v0.AuxInt = i0 36803 v0.Aux = s 36804 v0.AddArg(p) 36805 v0.AddArg(mem) 36806 return true 36807 } 36808 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 36809 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36810 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 36811 for { 36812 _ = v.Args[1] 36813 x0 := v.Args[0] 36814 if x0.Op != OpAMD64MOVLload { 36815 break 36816 } 36817 i0 := x0.AuxInt 36818 s := x0.Aux 36819 _ = x0.Args[1] 36820 p := x0.Args[0] 36821 mem := x0.Args[1] 36822 sh := v.Args[1] 36823 if sh.Op != OpAMD64SHLQconst { 36824 break 36825 } 36826 if sh.AuxInt != 32 { 36827 break 36828 } 36829 x1 := sh.Args[0] 36830 if x1.Op != OpAMD64MOVLload { 36831 break 36832 } 36833 i1 := x1.AuxInt 36834 if x1.Aux != s { 36835 break 36836 } 36837 _ = x1.Args[1] 36838 if p != x1.Args[0] { 36839 break 36840 } 36841 if mem != x1.Args[1] { 36842 break 36843 } 36844 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36845 break 36846 } 36847 b = mergePoint(b, x0, x1) 36848 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 36849 v.reset(OpCopy) 36850 v.AddArg(v0) 36851 v0.AuxInt = i0 36852 v0.Aux = s 36853 v0.AddArg(p) 36854 v0.AddArg(mem) 36855 return true 36856 } 36857 return false 36858 } 36859 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 36860 b := v.Block 36861 _ = b 36862 typ := &b.Func.Config.Types 36863 _ = typ 36864 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 36865 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36866 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 36867 for { 36868 _ = v.Args[1] 36869 sh := v.Args[0] 36870 if sh.Op != OpAMD64SHLQconst { 36871 break 36872 } 36873 if sh.AuxInt != 32 { 36874 break 36875 } 36876 x1 := sh.Args[0] 36877 if x1.Op != OpAMD64MOVLload { 36878 break 36879 } 36880 i1 := x1.AuxInt 36881 s := x1.Aux 36882 _ = x1.Args[1] 36883 p := x1.Args[0] 36884 mem := x1.Args[1] 36885 x0 := v.Args[1] 36886 if x0.Op != OpAMD64MOVLload { 36887 break 36888 } 36889 i0 := x0.AuxInt 36890 if x0.Aux != s { 36891 break 36892 } 36893 _ = x0.Args[1] 36894 if p != x0.Args[0] { 36895 break 36896 } 36897 if mem != x0.Args[1] { 36898 break 36899 } 36900 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36901 break 36902 } 36903 b = mergePoint(b, x0, x1) 36904 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 36905 v.reset(OpCopy) 36906 v.AddArg(v0) 36907 v0.AuxInt = i0 36908 v0.Aux = s 36909 v0.AddArg(p) 36910 v0.AddArg(mem) 36911 return true 36912 } 36913 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 36914 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 36915 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 36916 for { 36917 _ = v.Args[1] 36918 s1 := v.Args[0] 36919 if s1.Op != OpAMD64SHLQconst { 36920 break 36921 } 36922 j1 := s1.AuxInt 36923 x1 := s1.Args[0] 36924 if x1.Op != OpAMD64MOVBload { 36925 break 36926 } 36927 i1 := x1.AuxInt 36928 s := x1.Aux 36929 _ = x1.Args[1] 36930 p := x1.Args[0] 36931 mem := x1.Args[1] 36932 or := v.Args[1] 36933 if or.Op != OpAMD64ORQ { 36934 break 36935 } 36936 _ = or.Args[1] 36937 s0 := or.Args[0] 36938 if s0.Op != OpAMD64SHLQconst { 36939 break 36940 } 36941 j0 := s0.AuxInt 36942 x0 := s0.Args[0] 36943 if x0.Op != OpAMD64MOVBload { 36944 break 36945 } 36946 i0 := x0.AuxInt 36947 if x0.Aux != s { 36948 break 36949 } 36950 _ = x0.Args[1] 36951 if p != x0.Args[0] { 36952 break 36953 } 36954 if mem != x0.Args[1] { 36955 break 36956 } 36957 y := or.Args[1] 36958 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 36959 break 36960 } 36961 b = mergePoint(b, x0, x1) 36962 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 36963 v.reset(OpCopy) 36964 v.AddArg(v0) 36965 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 36966 v1.AuxInt = j0 36967 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 36968 v2.AuxInt = i0 36969 v2.Aux = s 36970 v2.AddArg(p) 36971 v2.AddArg(mem) 36972 v1.AddArg(v2) 36973 v0.AddArg(v1) 36974 v0.AddArg(y) 36975 return true 36976 } 36977 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 36978 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 36979 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 36980 for { 36981 _ = v.Args[1] 36982 s1 := v.Args[0] 36983 if s1.Op != OpAMD64SHLQconst { 36984 break 36985 } 36986 j1 := s1.AuxInt 36987 x1 := s1.Args[0] 36988 if x1.Op != OpAMD64MOVBload { 36989 break 36990 } 36991 i1 := x1.AuxInt 36992 s := x1.Aux 36993 _ = x1.Args[1] 36994 p := x1.Args[0] 36995 mem := x1.Args[1] 36996 or := v.Args[1] 36997 if or.Op != OpAMD64ORQ { 36998 break 36999 } 37000 _ = or.Args[1] 37001 y := or.Args[0] 37002 s0 := or.Args[1] 37003 if s0.Op != OpAMD64SHLQconst { 37004 break 37005 } 37006 j0 := s0.AuxInt 37007 x0 := s0.Args[0] 37008 if x0.Op != OpAMD64MOVBload { 37009 break 37010 } 37011 i0 := x0.AuxInt 37012 if x0.Aux != s { 37013 break 37014 } 37015 _ = x0.Args[1] 37016 if p != x0.Args[0] { 37017 break 37018 } 37019 if mem != x0.Args[1] { 37020 break 37021 } 37022 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37023 break 37024 } 37025 b = mergePoint(b, x0, x1) 37026 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37027 v.reset(OpCopy) 37028 v.AddArg(v0) 37029 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37030 v1.AuxInt = j0 37031 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 37032 v2.AuxInt = i0 37033 v2.Aux = s 37034 v2.AddArg(p) 37035 v2.AddArg(mem) 37036 v1.AddArg(v2) 37037 v0.AddArg(v1) 37038 v0.AddArg(y) 37039 return true 37040 } 37041 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 37042 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37043 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 37044 for { 37045 _ = v.Args[1] 37046 or := v.Args[0] 37047 if or.Op != OpAMD64ORQ { 37048 break 37049 } 37050 _ = or.Args[1] 37051 s0 := or.Args[0] 37052 if s0.Op != OpAMD64SHLQconst { 37053 break 37054 } 37055 j0 := s0.AuxInt 37056 x0 := s0.Args[0] 37057 if x0.Op != OpAMD64MOVBload { 37058 break 37059 } 37060 i0 := x0.AuxInt 37061 s := x0.Aux 37062 _ = x0.Args[1] 37063 p := x0.Args[0] 37064 mem := x0.Args[1] 37065 y := or.Args[1] 37066 s1 := v.Args[1] 37067 if s1.Op != OpAMD64SHLQconst { 37068 break 37069 } 37070 j1 := s1.AuxInt 37071 x1 := s1.Args[0] 37072 if x1.Op != OpAMD64MOVBload { 37073 break 37074 } 37075 i1 := x1.AuxInt 37076 if x1.Aux != s { 37077 break 37078 } 37079 _ = x1.Args[1] 37080 if p != x1.Args[0] { 37081 break 37082 } 37083 if mem != x1.Args[1] { 37084 break 37085 } 37086 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37087 break 37088 } 37089 b = mergePoint(b, x0, x1) 37090 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37091 v.reset(OpCopy) 37092 v.AddArg(v0) 37093 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37094 v1.AuxInt = j0 37095 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 37096 v2.AuxInt = i0 37097 v2.Aux = s 37098 v2.AddArg(p) 37099 v2.AddArg(mem) 37100 v1.AddArg(v2) 37101 v0.AddArg(v1) 37102 v0.AddArg(y) 37103 return true 37104 } 37105 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 37106 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37107 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 37108 for { 37109 _ = v.Args[1] 37110 or := v.Args[0] 37111 if or.Op != OpAMD64ORQ { 37112 break 37113 } 37114 _ = or.Args[1] 37115 y := or.Args[0] 37116 s0 := or.Args[1] 37117 if s0.Op != OpAMD64SHLQconst { 37118 break 37119 } 37120 j0 := s0.AuxInt 37121 x0 := s0.Args[0] 37122 if x0.Op != OpAMD64MOVBload { 37123 break 37124 } 37125 i0 := x0.AuxInt 37126 s := x0.Aux 37127 _ = x0.Args[1] 37128 p := x0.Args[0] 37129 mem := x0.Args[1] 37130 s1 := v.Args[1] 37131 if s1.Op != OpAMD64SHLQconst { 37132 break 37133 } 37134 j1 := s1.AuxInt 37135 x1 := s1.Args[0] 37136 if x1.Op != OpAMD64MOVBload { 37137 break 37138 } 37139 i1 := x1.AuxInt 37140 if x1.Aux != s { 37141 break 37142 } 37143 _ = x1.Args[1] 37144 if p != x1.Args[0] { 37145 break 37146 } 37147 if mem != x1.Args[1] { 37148 break 37149 } 37150 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37151 break 37152 } 37153 b = mergePoint(b, x0, x1) 37154 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37155 v.reset(OpCopy) 37156 v.AddArg(v0) 37157 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37158 v1.AuxInt = j0 37159 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 37160 v2.AuxInt = i0 37161 v2.Aux = s 37162 v2.AddArg(p) 37163 v2.AddArg(mem) 37164 v1.AddArg(v2) 37165 v0.AddArg(v1) 37166 v0.AddArg(y) 37167 return true 37168 } 37169 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 37170 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37171 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 37172 for { 37173 _ = v.Args[1] 37174 s1 := v.Args[0] 37175 if s1.Op != OpAMD64SHLQconst { 37176 break 37177 } 37178 j1 := s1.AuxInt 37179 x1 := s1.Args[0] 37180 if x1.Op != OpAMD64MOVWload { 37181 break 37182 } 37183 i1 := x1.AuxInt 37184 s := x1.Aux 37185 _ = x1.Args[1] 37186 p := x1.Args[0] 37187 mem := x1.Args[1] 37188 or := v.Args[1] 37189 if or.Op != OpAMD64ORQ { 37190 break 37191 } 37192 _ = or.Args[1] 37193 s0 := or.Args[0] 37194 if s0.Op != OpAMD64SHLQconst { 37195 break 37196 } 37197 j0 := s0.AuxInt 37198 x0 := s0.Args[0] 37199 if x0.Op != OpAMD64MOVWload { 37200 break 37201 } 37202 i0 := x0.AuxInt 37203 if x0.Aux != s { 37204 break 37205 } 37206 _ = x0.Args[1] 37207 if p != x0.Args[0] { 37208 break 37209 } 37210 if mem != x0.Args[1] { 37211 break 37212 } 37213 y := or.Args[1] 37214 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37215 break 37216 } 37217 b = mergePoint(b, x0, x1) 37218 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37219 v.reset(OpCopy) 37220 v.AddArg(v0) 37221 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37222 v1.AuxInt = j0 37223 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 37224 v2.AuxInt = i0 37225 v2.Aux = s 37226 v2.AddArg(p) 37227 v2.AddArg(mem) 37228 v1.AddArg(v2) 37229 v0.AddArg(v1) 37230 v0.AddArg(y) 37231 return true 37232 } 37233 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 37234 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37235 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 37236 for { 37237 _ = v.Args[1] 37238 s1 := v.Args[0] 37239 if s1.Op != OpAMD64SHLQconst { 37240 break 37241 } 37242 j1 := s1.AuxInt 37243 x1 := s1.Args[0] 37244 if x1.Op != OpAMD64MOVWload { 37245 break 37246 } 37247 i1 := x1.AuxInt 37248 s := x1.Aux 37249 _ = x1.Args[1] 37250 p := x1.Args[0] 37251 mem := x1.Args[1] 37252 or := v.Args[1] 37253 if or.Op != OpAMD64ORQ { 37254 break 37255 } 37256 _ = or.Args[1] 37257 y := or.Args[0] 37258 s0 := or.Args[1] 37259 if s0.Op != OpAMD64SHLQconst { 37260 break 37261 } 37262 j0 := s0.AuxInt 37263 x0 := s0.Args[0] 37264 if x0.Op != OpAMD64MOVWload { 37265 break 37266 } 37267 i0 := x0.AuxInt 37268 if x0.Aux != s { 37269 break 37270 } 37271 _ = x0.Args[1] 37272 if p != x0.Args[0] { 37273 break 37274 } 37275 if mem != x0.Args[1] { 37276 break 37277 } 37278 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37279 break 37280 } 37281 b = mergePoint(b, x0, x1) 37282 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37283 v.reset(OpCopy) 37284 v.AddArg(v0) 37285 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37286 v1.AuxInt = j0 37287 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 37288 v2.AuxInt = i0 37289 v2.Aux = s 37290 v2.AddArg(p) 37291 v2.AddArg(mem) 37292 v1.AddArg(v2) 37293 v0.AddArg(v1) 37294 v0.AddArg(y) 37295 return true 37296 } 37297 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 37298 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37299 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 37300 for { 37301 _ = v.Args[1] 37302 or := v.Args[0] 37303 if or.Op != OpAMD64ORQ { 37304 break 37305 } 37306 _ = or.Args[1] 37307 s0 := or.Args[0] 37308 if s0.Op != OpAMD64SHLQconst { 37309 break 37310 } 37311 j0 := s0.AuxInt 37312 x0 := s0.Args[0] 37313 if x0.Op != OpAMD64MOVWload { 37314 break 37315 } 37316 i0 := x0.AuxInt 37317 s := x0.Aux 37318 _ = x0.Args[1] 37319 p := x0.Args[0] 37320 mem := x0.Args[1] 37321 y := or.Args[1] 37322 s1 := v.Args[1] 37323 if s1.Op != OpAMD64SHLQconst { 37324 break 37325 } 37326 j1 := s1.AuxInt 37327 x1 := s1.Args[0] 37328 if x1.Op != OpAMD64MOVWload { 37329 break 37330 } 37331 i1 := x1.AuxInt 37332 if x1.Aux != s { 37333 break 37334 } 37335 _ = x1.Args[1] 37336 if p != x1.Args[0] { 37337 break 37338 } 37339 if mem != x1.Args[1] { 37340 break 37341 } 37342 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37343 break 37344 } 37345 b = mergePoint(b, x0, x1) 37346 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37347 v.reset(OpCopy) 37348 v.AddArg(v0) 37349 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37350 v1.AuxInt = j0 37351 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 37352 v2.AuxInt = i0 37353 v2.Aux = s 37354 v2.AddArg(p) 37355 v2.AddArg(mem) 37356 v1.AddArg(v2) 37357 v0.AddArg(v1) 37358 v0.AddArg(y) 37359 return true 37360 } 37361 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 37362 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37363 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 37364 for { 37365 _ = v.Args[1] 37366 or := v.Args[0] 37367 if or.Op != OpAMD64ORQ { 37368 break 37369 } 37370 _ = or.Args[1] 37371 y := or.Args[0] 37372 s0 := or.Args[1] 37373 if s0.Op != OpAMD64SHLQconst { 37374 break 37375 } 37376 j0 := s0.AuxInt 37377 x0 := s0.Args[0] 37378 if x0.Op != OpAMD64MOVWload { 37379 break 37380 } 37381 i0 := x0.AuxInt 37382 s := x0.Aux 37383 _ = x0.Args[1] 37384 p := x0.Args[0] 37385 mem := x0.Args[1] 37386 s1 := v.Args[1] 37387 if s1.Op != OpAMD64SHLQconst { 37388 break 37389 } 37390 j1 := s1.AuxInt 37391 x1 := s1.Args[0] 37392 if x1.Op != OpAMD64MOVWload { 37393 break 37394 } 37395 i1 := x1.AuxInt 37396 if x1.Aux != s { 37397 break 37398 } 37399 _ = x1.Args[1] 37400 if p != x1.Args[0] { 37401 break 37402 } 37403 if mem != x1.Args[1] { 37404 break 37405 } 37406 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37407 break 37408 } 37409 b = mergePoint(b, x0, x1) 37410 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37411 v.reset(OpCopy) 37412 v.AddArg(v0) 37413 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37414 v1.AuxInt = j0 37415 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 37416 v2.AuxInt = i0 37417 v2.Aux = s 37418 v2.AddArg(p) 37419 v2.AddArg(mem) 37420 v1.AddArg(v2) 37421 v0.AddArg(v1) 37422 v0.AddArg(y) 37423 return true 37424 } 37425 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 37426 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37427 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37428 for { 37429 _ = v.Args[1] 37430 x0 := v.Args[0] 37431 if x0.Op != OpAMD64MOVBloadidx1 { 37432 break 37433 } 37434 i0 := x0.AuxInt 37435 s := x0.Aux 37436 _ = x0.Args[2] 37437 p := x0.Args[0] 37438 idx := x0.Args[1] 37439 mem := x0.Args[2] 37440 sh := v.Args[1] 37441 if sh.Op != OpAMD64SHLQconst { 37442 break 37443 } 37444 if sh.AuxInt != 8 { 37445 break 37446 } 37447 x1 := sh.Args[0] 37448 if x1.Op != OpAMD64MOVBloadidx1 { 37449 break 37450 } 37451 i1 := x1.AuxInt 37452 if x1.Aux != s { 37453 break 37454 } 37455 _ = x1.Args[2] 37456 if p != x1.Args[0] { 37457 break 37458 } 37459 if idx != x1.Args[1] { 37460 break 37461 } 37462 if mem != x1.Args[2] { 37463 break 37464 } 37465 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37466 break 37467 } 37468 b = mergePoint(b, x0, x1) 37469 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37470 v.reset(OpCopy) 37471 v.AddArg(v0) 37472 v0.AuxInt = i0 37473 v0.Aux = s 37474 v0.AddArg(p) 37475 v0.AddArg(idx) 37476 v0.AddArg(mem) 37477 return true 37478 } 37479 return false 37480 } 37481 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 37482 b := v.Block 37483 _ = b 37484 typ := &b.Func.Config.Types 37485 _ = typ 37486 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 37487 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37488 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37489 for { 37490 _ = v.Args[1] 37491 x0 := v.Args[0] 37492 if x0.Op != OpAMD64MOVBloadidx1 { 37493 break 37494 } 37495 i0 := x0.AuxInt 37496 s := x0.Aux 37497 _ = x0.Args[2] 37498 idx := x0.Args[0] 37499 p := x0.Args[1] 37500 mem := x0.Args[2] 37501 sh := v.Args[1] 37502 if sh.Op != OpAMD64SHLQconst { 37503 break 37504 } 37505 if sh.AuxInt != 8 { 37506 break 37507 } 37508 x1 := sh.Args[0] 37509 if x1.Op != OpAMD64MOVBloadidx1 { 37510 break 37511 } 37512 i1 := x1.AuxInt 37513 if x1.Aux != s { 37514 break 37515 } 37516 _ = x1.Args[2] 37517 if p != x1.Args[0] { 37518 break 37519 } 37520 if idx != x1.Args[1] { 37521 break 37522 } 37523 if mem != x1.Args[2] { 37524 break 37525 } 37526 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37527 break 37528 } 37529 b = mergePoint(b, x0, x1) 37530 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37531 v.reset(OpCopy) 37532 v.AddArg(v0) 37533 v0.AuxInt = i0 37534 v0.Aux = s 37535 v0.AddArg(p) 37536 v0.AddArg(idx) 37537 v0.AddArg(mem) 37538 return true 37539 } 37540 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 37541 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37542 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37543 for { 37544 _ = v.Args[1] 37545 x0 := v.Args[0] 37546 if x0.Op != OpAMD64MOVBloadidx1 { 37547 break 37548 } 37549 i0 := x0.AuxInt 37550 s := x0.Aux 37551 _ = x0.Args[2] 37552 p := x0.Args[0] 37553 idx := x0.Args[1] 37554 mem := x0.Args[2] 37555 sh := v.Args[1] 37556 if sh.Op != OpAMD64SHLQconst { 37557 break 37558 } 37559 if sh.AuxInt != 8 { 37560 break 37561 } 37562 x1 := sh.Args[0] 37563 if x1.Op != OpAMD64MOVBloadidx1 { 37564 break 37565 } 37566 i1 := x1.AuxInt 37567 if x1.Aux != s { 37568 break 37569 } 37570 _ = x1.Args[2] 37571 if idx != x1.Args[0] { 37572 break 37573 } 37574 if p != x1.Args[1] { 37575 break 37576 } 37577 if mem != x1.Args[2] { 37578 break 37579 } 37580 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37581 break 37582 } 37583 b = mergePoint(b, x0, x1) 37584 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37585 v.reset(OpCopy) 37586 v.AddArg(v0) 37587 v0.AuxInt = i0 37588 v0.Aux = s 37589 v0.AddArg(p) 37590 v0.AddArg(idx) 37591 v0.AddArg(mem) 37592 return true 37593 } 37594 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 37595 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37596 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37597 for { 37598 _ = v.Args[1] 37599 x0 := v.Args[0] 37600 if x0.Op != OpAMD64MOVBloadidx1 { 37601 break 37602 } 37603 i0 := x0.AuxInt 37604 s := x0.Aux 37605 _ = x0.Args[2] 37606 idx := x0.Args[0] 37607 p := x0.Args[1] 37608 mem := x0.Args[2] 37609 sh := v.Args[1] 37610 if sh.Op != OpAMD64SHLQconst { 37611 break 37612 } 37613 if sh.AuxInt != 8 { 37614 break 37615 } 37616 x1 := sh.Args[0] 37617 if x1.Op != OpAMD64MOVBloadidx1 { 37618 break 37619 } 37620 i1 := x1.AuxInt 37621 if x1.Aux != s { 37622 break 37623 } 37624 _ = x1.Args[2] 37625 if idx != x1.Args[0] { 37626 break 37627 } 37628 if p != x1.Args[1] { 37629 break 37630 } 37631 if mem != x1.Args[2] { 37632 break 37633 } 37634 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37635 break 37636 } 37637 b = mergePoint(b, x0, x1) 37638 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37639 v.reset(OpCopy) 37640 v.AddArg(v0) 37641 v0.AuxInt = i0 37642 v0.Aux = s 37643 v0.AddArg(p) 37644 v0.AddArg(idx) 37645 v0.AddArg(mem) 37646 return true 37647 } 37648 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 37649 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37650 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37651 for { 37652 _ = v.Args[1] 37653 sh := v.Args[0] 37654 if sh.Op != OpAMD64SHLQconst { 37655 break 37656 } 37657 if sh.AuxInt != 8 { 37658 break 37659 } 37660 x1 := sh.Args[0] 37661 if x1.Op != OpAMD64MOVBloadidx1 { 37662 break 37663 } 37664 i1 := x1.AuxInt 37665 s := x1.Aux 37666 _ = x1.Args[2] 37667 p := x1.Args[0] 37668 idx := x1.Args[1] 37669 mem := x1.Args[2] 37670 x0 := v.Args[1] 37671 if x0.Op != OpAMD64MOVBloadidx1 { 37672 break 37673 } 37674 i0 := x0.AuxInt 37675 if x0.Aux != s { 37676 break 37677 } 37678 _ = x0.Args[2] 37679 if p != x0.Args[0] { 37680 break 37681 } 37682 if idx != x0.Args[1] { 37683 break 37684 } 37685 if mem != x0.Args[2] { 37686 break 37687 } 37688 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37689 break 37690 } 37691 b = mergePoint(b, x0, x1) 37692 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37693 v.reset(OpCopy) 37694 v.AddArg(v0) 37695 v0.AuxInt = i0 37696 v0.Aux = s 37697 v0.AddArg(p) 37698 v0.AddArg(idx) 37699 v0.AddArg(mem) 37700 return true 37701 } 37702 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 37703 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37704 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37705 for { 37706 _ = v.Args[1] 37707 sh := v.Args[0] 37708 if sh.Op != OpAMD64SHLQconst { 37709 break 37710 } 37711 if sh.AuxInt != 8 { 37712 break 37713 } 37714 x1 := sh.Args[0] 37715 if x1.Op != OpAMD64MOVBloadidx1 { 37716 break 37717 } 37718 i1 := x1.AuxInt 37719 s := x1.Aux 37720 _ = x1.Args[2] 37721 idx := x1.Args[0] 37722 p := x1.Args[1] 37723 mem := x1.Args[2] 37724 x0 := v.Args[1] 37725 if x0.Op != OpAMD64MOVBloadidx1 { 37726 break 37727 } 37728 i0 := x0.AuxInt 37729 if x0.Aux != s { 37730 break 37731 } 37732 _ = x0.Args[2] 37733 if p != x0.Args[0] { 37734 break 37735 } 37736 if idx != x0.Args[1] { 37737 break 37738 } 37739 if mem != x0.Args[2] { 37740 break 37741 } 37742 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37743 break 37744 } 37745 b = mergePoint(b, x0, x1) 37746 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37747 v.reset(OpCopy) 37748 v.AddArg(v0) 37749 v0.AuxInt = i0 37750 v0.Aux = s 37751 v0.AddArg(p) 37752 v0.AddArg(idx) 37753 v0.AddArg(mem) 37754 return true 37755 } 37756 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 37757 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37758 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37759 for { 37760 _ = v.Args[1] 37761 sh := v.Args[0] 37762 if sh.Op != OpAMD64SHLQconst { 37763 break 37764 } 37765 if sh.AuxInt != 8 { 37766 break 37767 } 37768 x1 := sh.Args[0] 37769 if x1.Op != OpAMD64MOVBloadidx1 { 37770 break 37771 } 37772 i1 := x1.AuxInt 37773 s := x1.Aux 37774 _ = x1.Args[2] 37775 p := x1.Args[0] 37776 idx := x1.Args[1] 37777 mem := x1.Args[2] 37778 x0 := v.Args[1] 37779 if x0.Op != OpAMD64MOVBloadidx1 { 37780 break 37781 } 37782 i0 := x0.AuxInt 37783 if x0.Aux != s { 37784 break 37785 } 37786 _ = x0.Args[2] 37787 if idx != x0.Args[0] { 37788 break 37789 } 37790 if p != x0.Args[1] { 37791 break 37792 } 37793 if mem != x0.Args[2] { 37794 break 37795 } 37796 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37797 break 37798 } 37799 b = mergePoint(b, x0, x1) 37800 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37801 v.reset(OpCopy) 37802 v.AddArg(v0) 37803 v0.AuxInt = i0 37804 v0.Aux = s 37805 v0.AddArg(p) 37806 v0.AddArg(idx) 37807 v0.AddArg(mem) 37808 return true 37809 } 37810 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 37811 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37812 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 37813 for { 37814 _ = v.Args[1] 37815 sh := v.Args[0] 37816 if sh.Op != OpAMD64SHLQconst { 37817 break 37818 } 37819 if sh.AuxInt != 8 { 37820 break 37821 } 37822 x1 := sh.Args[0] 37823 if x1.Op != OpAMD64MOVBloadidx1 { 37824 break 37825 } 37826 i1 := x1.AuxInt 37827 s := x1.Aux 37828 _ = x1.Args[2] 37829 idx := x1.Args[0] 37830 p := x1.Args[1] 37831 mem := x1.Args[2] 37832 x0 := v.Args[1] 37833 if x0.Op != OpAMD64MOVBloadidx1 { 37834 break 37835 } 37836 i0 := x0.AuxInt 37837 if x0.Aux != s { 37838 break 37839 } 37840 _ = x0.Args[2] 37841 if idx != x0.Args[0] { 37842 break 37843 } 37844 if p != x0.Args[1] { 37845 break 37846 } 37847 if mem != x0.Args[2] { 37848 break 37849 } 37850 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37851 break 37852 } 37853 b = mergePoint(b, x0, x1) 37854 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 37855 v.reset(OpCopy) 37856 v.AddArg(v0) 37857 v0.AuxInt = i0 37858 v0.Aux = s 37859 v0.AddArg(p) 37860 v0.AddArg(idx) 37861 v0.AddArg(mem) 37862 return true 37863 } 37864 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 37865 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37866 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 37867 for { 37868 _ = v.Args[1] 37869 x0 := v.Args[0] 37870 if x0.Op != OpAMD64MOVWloadidx1 { 37871 break 37872 } 37873 i0 := x0.AuxInt 37874 s := x0.Aux 37875 _ = x0.Args[2] 37876 p := x0.Args[0] 37877 idx := x0.Args[1] 37878 mem := x0.Args[2] 37879 sh := v.Args[1] 37880 if sh.Op != OpAMD64SHLQconst { 37881 break 37882 } 37883 if sh.AuxInt != 16 { 37884 break 37885 } 37886 x1 := sh.Args[0] 37887 if x1.Op != OpAMD64MOVWloadidx1 { 37888 break 37889 } 37890 i1 := x1.AuxInt 37891 if x1.Aux != s { 37892 break 37893 } 37894 _ = x1.Args[2] 37895 if p != x1.Args[0] { 37896 break 37897 } 37898 if idx != x1.Args[1] { 37899 break 37900 } 37901 if mem != x1.Args[2] { 37902 break 37903 } 37904 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37905 break 37906 } 37907 b = mergePoint(b, x0, x1) 37908 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 37909 v.reset(OpCopy) 37910 v.AddArg(v0) 37911 v0.AuxInt = i0 37912 v0.Aux = s 37913 v0.AddArg(p) 37914 v0.AddArg(idx) 37915 v0.AddArg(mem) 37916 return true 37917 } 37918 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 37919 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37920 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 37921 for { 37922 _ = v.Args[1] 37923 x0 := v.Args[0] 37924 if x0.Op != OpAMD64MOVWloadidx1 { 37925 break 37926 } 37927 i0 := x0.AuxInt 37928 s := x0.Aux 37929 _ = x0.Args[2] 37930 idx := x0.Args[0] 37931 p := x0.Args[1] 37932 mem := x0.Args[2] 37933 sh := v.Args[1] 37934 if sh.Op != OpAMD64SHLQconst { 37935 break 37936 } 37937 if sh.AuxInt != 16 { 37938 break 37939 } 37940 x1 := sh.Args[0] 37941 if x1.Op != OpAMD64MOVWloadidx1 { 37942 break 37943 } 37944 i1 := x1.AuxInt 37945 if x1.Aux != s { 37946 break 37947 } 37948 _ = x1.Args[2] 37949 if p != x1.Args[0] { 37950 break 37951 } 37952 if idx != x1.Args[1] { 37953 break 37954 } 37955 if mem != x1.Args[2] { 37956 break 37957 } 37958 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 37959 break 37960 } 37961 b = mergePoint(b, x0, x1) 37962 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 37963 v.reset(OpCopy) 37964 v.AddArg(v0) 37965 v0.AuxInt = i0 37966 v0.Aux = s 37967 v0.AddArg(p) 37968 v0.AddArg(idx) 37969 v0.AddArg(mem) 37970 return true 37971 } 37972 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 37973 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 37974 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 37975 for { 37976 _ = v.Args[1] 37977 x0 := v.Args[0] 37978 if x0.Op != OpAMD64MOVWloadidx1 { 37979 break 37980 } 37981 i0 := x0.AuxInt 37982 s := x0.Aux 37983 _ = x0.Args[2] 37984 p := x0.Args[0] 37985 idx := x0.Args[1] 37986 mem := x0.Args[2] 37987 sh := v.Args[1] 37988 if sh.Op != OpAMD64SHLQconst { 37989 break 37990 } 37991 if sh.AuxInt != 16 { 37992 break 37993 } 37994 x1 := sh.Args[0] 37995 if x1.Op != OpAMD64MOVWloadidx1 { 37996 break 37997 } 37998 i1 := x1.AuxInt 37999 if x1.Aux != s { 38000 break 38001 } 38002 _ = x1.Args[2] 38003 if idx != x1.Args[0] { 38004 break 38005 } 38006 if p != x1.Args[1] { 38007 break 38008 } 38009 if mem != x1.Args[2] { 38010 break 38011 } 38012 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38013 break 38014 } 38015 b = mergePoint(b, x0, x1) 38016 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38017 v.reset(OpCopy) 38018 v.AddArg(v0) 38019 v0.AuxInt = i0 38020 v0.Aux = s 38021 v0.AddArg(p) 38022 v0.AddArg(idx) 38023 v0.AddArg(mem) 38024 return true 38025 } 38026 return false 38027 } 38028 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 38029 b := v.Block 38030 _ = b 38031 typ := &b.Func.Config.Types 38032 _ = typ 38033 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 38034 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38035 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38036 for { 38037 _ = v.Args[1] 38038 x0 := v.Args[0] 38039 if x0.Op != OpAMD64MOVWloadidx1 { 38040 break 38041 } 38042 i0 := x0.AuxInt 38043 s := x0.Aux 38044 _ = x0.Args[2] 38045 idx := x0.Args[0] 38046 p := x0.Args[1] 38047 mem := x0.Args[2] 38048 sh := v.Args[1] 38049 if sh.Op != OpAMD64SHLQconst { 38050 break 38051 } 38052 if sh.AuxInt != 16 { 38053 break 38054 } 38055 x1 := sh.Args[0] 38056 if x1.Op != OpAMD64MOVWloadidx1 { 38057 break 38058 } 38059 i1 := x1.AuxInt 38060 if x1.Aux != s { 38061 break 38062 } 38063 _ = x1.Args[2] 38064 if idx != x1.Args[0] { 38065 break 38066 } 38067 if p != x1.Args[1] { 38068 break 38069 } 38070 if mem != x1.Args[2] { 38071 break 38072 } 38073 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38074 break 38075 } 38076 b = mergePoint(b, x0, x1) 38077 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38078 v.reset(OpCopy) 38079 v.AddArg(v0) 38080 v0.AuxInt = i0 38081 v0.Aux = s 38082 v0.AddArg(p) 38083 v0.AddArg(idx) 38084 v0.AddArg(mem) 38085 return true 38086 } 38087 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 38088 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38089 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38090 for { 38091 _ = v.Args[1] 38092 sh := v.Args[0] 38093 if sh.Op != OpAMD64SHLQconst { 38094 break 38095 } 38096 if sh.AuxInt != 16 { 38097 break 38098 } 38099 x1 := sh.Args[0] 38100 if x1.Op != OpAMD64MOVWloadidx1 { 38101 break 38102 } 38103 i1 := x1.AuxInt 38104 s := x1.Aux 38105 _ = x1.Args[2] 38106 p := x1.Args[0] 38107 idx := x1.Args[1] 38108 mem := x1.Args[2] 38109 x0 := v.Args[1] 38110 if x0.Op != OpAMD64MOVWloadidx1 { 38111 break 38112 } 38113 i0 := x0.AuxInt 38114 if x0.Aux != s { 38115 break 38116 } 38117 _ = x0.Args[2] 38118 if p != x0.Args[0] { 38119 break 38120 } 38121 if idx != x0.Args[1] { 38122 break 38123 } 38124 if mem != x0.Args[2] { 38125 break 38126 } 38127 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38128 break 38129 } 38130 b = mergePoint(b, x0, x1) 38131 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38132 v.reset(OpCopy) 38133 v.AddArg(v0) 38134 v0.AuxInt = i0 38135 v0.Aux = s 38136 v0.AddArg(p) 38137 v0.AddArg(idx) 38138 v0.AddArg(mem) 38139 return true 38140 } 38141 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 38142 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38143 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38144 for { 38145 _ = v.Args[1] 38146 sh := v.Args[0] 38147 if sh.Op != OpAMD64SHLQconst { 38148 break 38149 } 38150 if sh.AuxInt != 16 { 38151 break 38152 } 38153 x1 := sh.Args[0] 38154 if x1.Op != OpAMD64MOVWloadidx1 { 38155 break 38156 } 38157 i1 := x1.AuxInt 38158 s := x1.Aux 38159 _ = x1.Args[2] 38160 idx := x1.Args[0] 38161 p := x1.Args[1] 38162 mem := x1.Args[2] 38163 x0 := v.Args[1] 38164 if x0.Op != OpAMD64MOVWloadidx1 { 38165 break 38166 } 38167 i0 := x0.AuxInt 38168 if x0.Aux != s { 38169 break 38170 } 38171 _ = x0.Args[2] 38172 if p != x0.Args[0] { 38173 break 38174 } 38175 if idx != x0.Args[1] { 38176 break 38177 } 38178 if mem != x0.Args[2] { 38179 break 38180 } 38181 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38182 break 38183 } 38184 b = mergePoint(b, x0, x1) 38185 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38186 v.reset(OpCopy) 38187 v.AddArg(v0) 38188 v0.AuxInt = i0 38189 v0.Aux = s 38190 v0.AddArg(p) 38191 v0.AddArg(idx) 38192 v0.AddArg(mem) 38193 return true 38194 } 38195 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 38196 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38197 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38198 for { 38199 _ = v.Args[1] 38200 sh := v.Args[0] 38201 if sh.Op != OpAMD64SHLQconst { 38202 break 38203 } 38204 if sh.AuxInt != 16 { 38205 break 38206 } 38207 x1 := sh.Args[0] 38208 if x1.Op != OpAMD64MOVWloadidx1 { 38209 break 38210 } 38211 i1 := x1.AuxInt 38212 s := x1.Aux 38213 _ = x1.Args[2] 38214 p := x1.Args[0] 38215 idx := x1.Args[1] 38216 mem := x1.Args[2] 38217 x0 := v.Args[1] 38218 if x0.Op != OpAMD64MOVWloadidx1 { 38219 break 38220 } 38221 i0 := x0.AuxInt 38222 if x0.Aux != s { 38223 break 38224 } 38225 _ = x0.Args[2] 38226 if idx != x0.Args[0] { 38227 break 38228 } 38229 if p != x0.Args[1] { 38230 break 38231 } 38232 if mem != x0.Args[2] { 38233 break 38234 } 38235 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38236 break 38237 } 38238 b = mergePoint(b, x0, x1) 38239 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38240 v.reset(OpCopy) 38241 v.AddArg(v0) 38242 v0.AuxInt = i0 38243 v0.Aux = s 38244 v0.AddArg(p) 38245 v0.AddArg(idx) 38246 v0.AddArg(mem) 38247 return true 38248 } 38249 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 38250 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38251 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 38252 for { 38253 _ = v.Args[1] 38254 sh := v.Args[0] 38255 if sh.Op != OpAMD64SHLQconst { 38256 break 38257 } 38258 if sh.AuxInt != 16 { 38259 break 38260 } 38261 x1 := sh.Args[0] 38262 if x1.Op != OpAMD64MOVWloadidx1 { 38263 break 38264 } 38265 i1 := x1.AuxInt 38266 s := x1.Aux 38267 _ = x1.Args[2] 38268 idx := x1.Args[0] 38269 p := x1.Args[1] 38270 mem := x1.Args[2] 38271 x0 := v.Args[1] 38272 if x0.Op != OpAMD64MOVWloadidx1 { 38273 break 38274 } 38275 i0 := x0.AuxInt 38276 if x0.Aux != s { 38277 break 38278 } 38279 _ = x0.Args[2] 38280 if idx != x0.Args[0] { 38281 break 38282 } 38283 if p != x0.Args[1] { 38284 break 38285 } 38286 if mem != x0.Args[2] { 38287 break 38288 } 38289 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38290 break 38291 } 38292 b = mergePoint(b, x0, x1) 38293 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38294 v.reset(OpCopy) 38295 v.AddArg(v0) 38296 v0.AuxInt = i0 38297 v0.Aux = s 38298 v0.AddArg(p) 38299 v0.AddArg(idx) 38300 v0.AddArg(mem) 38301 return true 38302 } 38303 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 38304 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38305 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38306 for { 38307 _ = v.Args[1] 38308 x0 := v.Args[0] 38309 if x0.Op != OpAMD64MOVLloadidx1 { 38310 break 38311 } 38312 i0 := x0.AuxInt 38313 s := x0.Aux 38314 _ = x0.Args[2] 38315 p := x0.Args[0] 38316 idx := x0.Args[1] 38317 mem := x0.Args[2] 38318 sh := v.Args[1] 38319 if sh.Op != OpAMD64SHLQconst { 38320 break 38321 } 38322 if sh.AuxInt != 32 { 38323 break 38324 } 38325 x1 := sh.Args[0] 38326 if x1.Op != OpAMD64MOVLloadidx1 { 38327 break 38328 } 38329 i1 := x1.AuxInt 38330 if x1.Aux != s { 38331 break 38332 } 38333 _ = x1.Args[2] 38334 if p != x1.Args[0] { 38335 break 38336 } 38337 if idx != x1.Args[1] { 38338 break 38339 } 38340 if mem != x1.Args[2] { 38341 break 38342 } 38343 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38344 break 38345 } 38346 b = mergePoint(b, x0, x1) 38347 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38348 v.reset(OpCopy) 38349 v.AddArg(v0) 38350 v0.AuxInt = i0 38351 v0.Aux = s 38352 v0.AddArg(p) 38353 v0.AddArg(idx) 38354 v0.AddArg(mem) 38355 return true 38356 } 38357 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 38358 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38359 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38360 for { 38361 _ = v.Args[1] 38362 x0 := v.Args[0] 38363 if x0.Op != OpAMD64MOVLloadidx1 { 38364 break 38365 } 38366 i0 := x0.AuxInt 38367 s := x0.Aux 38368 _ = x0.Args[2] 38369 idx := x0.Args[0] 38370 p := x0.Args[1] 38371 mem := x0.Args[2] 38372 sh := v.Args[1] 38373 if sh.Op != OpAMD64SHLQconst { 38374 break 38375 } 38376 if sh.AuxInt != 32 { 38377 break 38378 } 38379 x1 := sh.Args[0] 38380 if x1.Op != OpAMD64MOVLloadidx1 { 38381 break 38382 } 38383 i1 := x1.AuxInt 38384 if x1.Aux != s { 38385 break 38386 } 38387 _ = x1.Args[2] 38388 if p != x1.Args[0] { 38389 break 38390 } 38391 if idx != x1.Args[1] { 38392 break 38393 } 38394 if mem != x1.Args[2] { 38395 break 38396 } 38397 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38398 break 38399 } 38400 b = mergePoint(b, x0, x1) 38401 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38402 v.reset(OpCopy) 38403 v.AddArg(v0) 38404 v0.AuxInt = i0 38405 v0.Aux = s 38406 v0.AddArg(p) 38407 v0.AddArg(idx) 38408 v0.AddArg(mem) 38409 return true 38410 } 38411 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 38412 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38413 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38414 for { 38415 _ = v.Args[1] 38416 x0 := v.Args[0] 38417 if x0.Op != OpAMD64MOVLloadidx1 { 38418 break 38419 } 38420 i0 := x0.AuxInt 38421 s := x0.Aux 38422 _ = x0.Args[2] 38423 p := x0.Args[0] 38424 idx := x0.Args[1] 38425 mem := x0.Args[2] 38426 sh := v.Args[1] 38427 if sh.Op != OpAMD64SHLQconst { 38428 break 38429 } 38430 if sh.AuxInt != 32 { 38431 break 38432 } 38433 x1 := sh.Args[0] 38434 if x1.Op != OpAMD64MOVLloadidx1 { 38435 break 38436 } 38437 i1 := x1.AuxInt 38438 if x1.Aux != s { 38439 break 38440 } 38441 _ = x1.Args[2] 38442 if idx != x1.Args[0] { 38443 break 38444 } 38445 if p != x1.Args[1] { 38446 break 38447 } 38448 if mem != x1.Args[2] { 38449 break 38450 } 38451 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38452 break 38453 } 38454 b = mergePoint(b, x0, x1) 38455 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38456 v.reset(OpCopy) 38457 v.AddArg(v0) 38458 v0.AuxInt = i0 38459 v0.Aux = s 38460 v0.AddArg(p) 38461 v0.AddArg(idx) 38462 v0.AddArg(mem) 38463 return true 38464 } 38465 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 38466 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38467 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38468 for { 38469 _ = v.Args[1] 38470 x0 := v.Args[0] 38471 if x0.Op != OpAMD64MOVLloadidx1 { 38472 break 38473 } 38474 i0 := x0.AuxInt 38475 s := x0.Aux 38476 _ = x0.Args[2] 38477 idx := x0.Args[0] 38478 p := x0.Args[1] 38479 mem := x0.Args[2] 38480 sh := v.Args[1] 38481 if sh.Op != OpAMD64SHLQconst { 38482 break 38483 } 38484 if sh.AuxInt != 32 { 38485 break 38486 } 38487 x1 := sh.Args[0] 38488 if x1.Op != OpAMD64MOVLloadidx1 { 38489 break 38490 } 38491 i1 := x1.AuxInt 38492 if x1.Aux != s { 38493 break 38494 } 38495 _ = x1.Args[2] 38496 if idx != x1.Args[0] { 38497 break 38498 } 38499 if p != x1.Args[1] { 38500 break 38501 } 38502 if mem != x1.Args[2] { 38503 break 38504 } 38505 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38506 break 38507 } 38508 b = mergePoint(b, x0, x1) 38509 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38510 v.reset(OpCopy) 38511 v.AddArg(v0) 38512 v0.AuxInt = i0 38513 v0.Aux = s 38514 v0.AddArg(p) 38515 v0.AddArg(idx) 38516 v0.AddArg(mem) 38517 return true 38518 } 38519 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 38520 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38521 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38522 for { 38523 _ = v.Args[1] 38524 sh := v.Args[0] 38525 if sh.Op != OpAMD64SHLQconst { 38526 break 38527 } 38528 if sh.AuxInt != 32 { 38529 break 38530 } 38531 x1 := sh.Args[0] 38532 if x1.Op != OpAMD64MOVLloadidx1 { 38533 break 38534 } 38535 i1 := x1.AuxInt 38536 s := x1.Aux 38537 _ = x1.Args[2] 38538 p := x1.Args[0] 38539 idx := x1.Args[1] 38540 mem := x1.Args[2] 38541 x0 := v.Args[1] 38542 if x0.Op != OpAMD64MOVLloadidx1 { 38543 break 38544 } 38545 i0 := x0.AuxInt 38546 if x0.Aux != s { 38547 break 38548 } 38549 _ = x0.Args[2] 38550 if p != x0.Args[0] { 38551 break 38552 } 38553 if idx != x0.Args[1] { 38554 break 38555 } 38556 if mem != x0.Args[2] { 38557 break 38558 } 38559 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38560 break 38561 } 38562 b = mergePoint(b, x0, x1) 38563 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38564 v.reset(OpCopy) 38565 v.AddArg(v0) 38566 v0.AuxInt = i0 38567 v0.Aux = s 38568 v0.AddArg(p) 38569 v0.AddArg(idx) 38570 v0.AddArg(mem) 38571 return true 38572 } 38573 return false 38574 } 38575 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 38576 b := v.Block 38577 _ = b 38578 typ := &b.Func.Config.Types 38579 _ = typ 38580 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 38581 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38582 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38583 for { 38584 _ = v.Args[1] 38585 sh := v.Args[0] 38586 if sh.Op != OpAMD64SHLQconst { 38587 break 38588 } 38589 if sh.AuxInt != 32 { 38590 break 38591 } 38592 x1 := sh.Args[0] 38593 if x1.Op != OpAMD64MOVLloadidx1 { 38594 break 38595 } 38596 i1 := x1.AuxInt 38597 s := x1.Aux 38598 _ = x1.Args[2] 38599 idx := x1.Args[0] 38600 p := x1.Args[1] 38601 mem := x1.Args[2] 38602 x0 := v.Args[1] 38603 if x0.Op != OpAMD64MOVLloadidx1 { 38604 break 38605 } 38606 i0 := x0.AuxInt 38607 if x0.Aux != s { 38608 break 38609 } 38610 _ = x0.Args[2] 38611 if p != x0.Args[0] { 38612 break 38613 } 38614 if idx != x0.Args[1] { 38615 break 38616 } 38617 if mem != x0.Args[2] { 38618 break 38619 } 38620 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38621 break 38622 } 38623 b = mergePoint(b, x0, x1) 38624 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38625 v.reset(OpCopy) 38626 v.AddArg(v0) 38627 v0.AuxInt = i0 38628 v0.Aux = s 38629 v0.AddArg(p) 38630 v0.AddArg(idx) 38631 v0.AddArg(mem) 38632 return true 38633 } 38634 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 38635 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38636 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38637 for { 38638 _ = v.Args[1] 38639 sh := v.Args[0] 38640 if sh.Op != OpAMD64SHLQconst { 38641 break 38642 } 38643 if sh.AuxInt != 32 { 38644 break 38645 } 38646 x1 := sh.Args[0] 38647 if x1.Op != OpAMD64MOVLloadidx1 { 38648 break 38649 } 38650 i1 := x1.AuxInt 38651 s := x1.Aux 38652 _ = x1.Args[2] 38653 p := x1.Args[0] 38654 idx := x1.Args[1] 38655 mem := x1.Args[2] 38656 x0 := v.Args[1] 38657 if x0.Op != OpAMD64MOVLloadidx1 { 38658 break 38659 } 38660 i0 := x0.AuxInt 38661 if x0.Aux != s { 38662 break 38663 } 38664 _ = x0.Args[2] 38665 if idx != x0.Args[0] { 38666 break 38667 } 38668 if p != x0.Args[1] { 38669 break 38670 } 38671 if mem != x0.Args[2] { 38672 break 38673 } 38674 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38675 break 38676 } 38677 b = mergePoint(b, x0, x1) 38678 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38679 v.reset(OpCopy) 38680 v.AddArg(v0) 38681 v0.AuxInt = i0 38682 v0.Aux = s 38683 v0.AddArg(p) 38684 v0.AddArg(idx) 38685 v0.AddArg(mem) 38686 return true 38687 } 38688 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 38689 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 38690 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 38691 for { 38692 _ = v.Args[1] 38693 sh := v.Args[0] 38694 if sh.Op != OpAMD64SHLQconst { 38695 break 38696 } 38697 if sh.AuxInt != 32 { 38698 break 38699 } 38700 x1 := sh.Args[0] 38701 if x1.Op != OpAMD64MOVLloadidx1 { 38702 break 38703 } 38704 i1 := x1.AuxInt 38705 s := x1.Aux 38706 _ = x1.Args[2] 38707 idx := x1.Args[0] 38708 p := x1.Args[1] 38709 mem := x1.Args[2] 38710 x0 := v.Args[1] 38711 if x0.Op != OpAMD64MOVLloadidx1 { 38712 break 38713 } 38714 i0 := x0.AuxInt 38715 if x0.Aux != s { 38716 break 38717 } 38718 _ = x0.Args[2] 38719 if idx != x0.Args[0] { 38720 break 38721 } 38722 if p != x0.Args[1] { 38723 break 38724 } 38725 if mem != x0.Args[2] { 38726 break 38727 } 38728 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 38729 break 38730 } 38731 b = mergePoint(b, x0, x1) 38732 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 38733 v.reset(OpCopy) 38734 v.AddArg(v0) 38735 v0.AuxInt = i0 38736 v0.Aux = s 38737 v0.AddArg(p) 38738 v0.AddArg(idx) 38739 v0.AddArg(mem) 38740 return true 38741 } 38742 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 38743 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 38744 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 38745 for { 38746 _ = v.Args[1] 38747 s1 := v.Args[0] 38748 if s1.Op != OpAMD64SHLQconst { 38749 break 38750 } 38751 j1 := s1.AuxInt 38752 x1 := s1.Args[0] 38753 if x1.Op != OpAMD64MOVBloadidx1 { 38754 break 38755 } 38756 i1 := x1.AuxInt 38757 s := x1.Aux 38758 _ = x1.Args[2] 38759 p := x1.Args[0] 38760 idx := x1.Args[1] 38761 mem := x1.Args[2] 38762 or := v.Args[1] 38763 if or.Op != OpAMD64ORQ { 38764 break 38765 } 38766 _ = or.Args[1] 38767 s0 := or.Args[0] 38768 if s0.Op != OpAMD64SHLQconst { 38769 break 38770 } 38771 j0 := s0.AuxInt 38772 x0 := s0.Args[0] 38773 if x0.Op != OpAMD64MOVBloadidx1 { 38774 break 38775 } 38776 i0 := x0.AuxInt 38777 if x0.Aux != s { 38778 break 38779 } 38780 _ = x0.Args[2] 38781 if p != x0.Args[0] { 38782 break 38783 } 38784 if idx != x0.Args[1] { 38785 break 38786 } 38787 if mem != x0.Args[2] { 38788 break 38789 } 38790 y := or.Args[1] 38791 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 38792 break 38793 } 38794 b = mergePoint(b, x0, x1) 38795 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38796 v.reset(OpCopy) 38797 v.AddArg(v0) 38798 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38799 v1.AuxInt = j0 38800 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 38801 v2.AuxInt = i0 38802 v2.Aux = s 38803 v2.AddArg(p) 38804 v2.AddArg(idx) 38805 v2.AddArg(mem) 38806 v1.AddArg(v2) 38807 v0.AddArg(v1) 38808 v0.AddArg(y) 38809 return true 38810 } 38811 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 38812 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 38813 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 38814 for { 38815 _ = v.Args[1] 38816 s1 := v.Args[0] 38817 if s1.Op != OpAMD64SHLQconst { 38818 break 38819 } 38820 j1 := s1.AuxInt 38821 x1 := s1.Args[0] 38822 if x1.Op != OpAMD64MOVBloadidx1 { 38823 break 38824 } 38825 i1 := x1.AuxInt 38826 s := x1.Aux 38827 _ = x1.Args[2] 38828 idx := x1.Args[0] 38829 p := x1.Args[1] 38830 mem := x1.Args[2] 38831 or := v.Args[1] 38832 if or.Op != OpAMD64ORQ { 38833 break 38834 } 38835 _ = or.Args[1] 38836 s0 := or.Args[0] 38837 if s0.Op != OpAMD64SHLQconst { 38838 break 38839 } 38840 j0 := s0.AuxInt 38841 x0 := s0.Args[0] 38842 if x0.Op != OpAMD64MOVBloadidx1 { 38843 break 38844 } 38845 i0 := x0.AuxInt 38846 if x0.Aux != s { 38847 break 38848 } 38849 _ = x0.Args[2] 38850 if p != x0.Args[0] { 38851 break 38852 } 38853 if idx != x0.Args[1] { 38854 break 38855 } 38856 if mem != x0.Args[2] { 38857 break 38858 } 38859 y := or.Args[1] 38860 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 38861 break 38862 } 38863 b = mergePoint(b, x0, x1) 38864 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38865 v.reset(OpCopy) 38866 v.AddArg(v0) 38867 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38868 v1.AuxInt = j0 38869 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 38870 v2.AuxInt = i0 38871 v2.Aux = s 38872 v2.AddArg(p) 38873 v2.AddArg(idx) 38874 v2.AddArg(mem) 38875 v1.AddArg(v2) 38876 v0.AddArg(v1) 38877 v0.AddArg(y) 38878 return true 38879 } 38880 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 38881 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 38882 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 38883 for { 38884 _ = v.Args[1] 38885 s1 := v.Args[0] 38886 if s1.Op != OpAMD64SHLQconst { 38887 break 38888 } 38889 j1 := s1.AuxInt 38890 x1 := s1.Args[0] 38891 if x1.Op != OpAMD64MOVBloadidx1 { 38892 break 38893 } 38894 i1 := x1.AuxInt 38895 s := x1.Aux 38896 _ = x1.Args[2] 38897 p := x1.Args[0] 38898 idx := x1.Args[1] 38899 mem := x1.Args[2] 38900 or := v.Args[1] 38901 if or.Op != OpAMD64ORQ { 38902 break 38903 } 38904 _ = or.Args[1] 38905 s0 := or.Args[0] 38906 if s0.Op != OpAMD64SHLQconst { 38907 break 38908 } 38909 j0 := s0.AuxInt 38910 x0 := s0.Args[0] 38911 if x0.Op != OpAMD64MOVBloadidx1 { 38912 break 38913 } 38914 i0 := x0.AuxInt 38915 if x0.Aux != s { 38916 break 38917 } 38918 _ = x0.Args[2] 38919 if idx != x0.Args[0] { 38920 break 38921 } 38922 if p != x0.Args[1] { 38923 break 38924 } 38925 if mem != x0.Args[2] { 38926 break 38927 } 38928 y := or.Args[1] 38929 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 38930 break 38931 } 38932 b = mergePoint(b, x0, x1) 38933 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38934 v.reset(OpCopy) 38935 v.AddArg(v0) 38936 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38937 v1.AuxInt = j0 38938 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 38939 v2.AuxInt = i0 38940 v2.Aux = s 38941 v2.AddArg(p) 38942 v2.AddArg(idx) 38943 v2.AddArg(mem) 38944 v1.AddArg(v2) 38945 v0.AddArg(v1) 38946 v0.AddArg(y) 38947 return true 38948 } 38949 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 38950 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 38951 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 38952 for { 38953 _ = v.Args[1] 38954 s1 := v.Args[0] 38955 if s1.Op != OpAMD64SHLQconst { 38956 break 38957 } 38958 j1 := s1.AuxInt 38959 x1 := s1.Args[0] 38960 if x1.Op != OpAMD64MOVBloadidx1 { 38961 break 38962 } 38963 i1 := x1.AuxInt 38964 s := x1.Aux 38965 _ = x1.Args[2] 38966 idx := x1.Args[0] 38967 p := x1.Args[1] 38968 mem := x1.Args[2] 38969 or := v.Args[1] 38970 if or.Op != OpAMD64ORQ { 38971 break 38972 } 38973 _ = or.Args[1] 38974 s0 := or.Args[0] 38975 if s0.Op != OpAMD64SHLQconst { 38976 break 38977 } 38978 j0 := s0.AuxInt 38979 x0 := s0.Args[0] 38980 if x0.Op != OpAMD64MOVBloadidx1 { 38981 break 38982 } 38983 i0 := x0.AuxInt 38984 if x0.Aux != s { 38985 break 38986 } 38987 _ = x0.Args[2] 38988 if idx != x0.Args[0] { 38989 break 38990 } 38991 if p != x0.Args[1] { 38992 break 38993 } 38994 if mem != x0.Args[2] { 38995 break 38996 } 38997 y := or.Args[1] 38998 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 38999 break 39000 } 39001 b = mergePoint(b, x0, x1) 39002 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39003 v.reset(OpCopy) 39004 v.AddArg(v0) 39005 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39006 v1.AuxInt = j0 39007 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39008 v2.AuxInt = i0 39009 v2.Aux = s 39010 v2.AddArg(p) 39011 v2.AddArg(idx) 39012 v2.AddArg(mem) 39013 v1.AddArg(v2) 39014 v0.AddArg(v1) 39015 v0.AddArg(y) 39016 return true 39017 } 39018 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 39019 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39020 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39021 for { 39022 _ = v.Args[1] 39023 s1 := v.Args[0] 39024 if s1.Op != OpAMD64SHLQconst { 39025 break 39026 } 39027 j1 := s1.AuxInt 39028 x1 := s1.Args[0] 39029 if x1.Op != OpAMD64MOVBloadidx1 { 39030 break 39031 } 39032 i1 := x1.AuxInt 39033 s := x1.Aux 39034 _ = x1.Args[2] 39035 p := x1.Args[0] 39036 idx := x1.Args[1] 39037 mem := x1.Args[2] 39038 or := v.Args[1] 39039 if or.Op != OpAMD64ORQ { 39040 break 39041 } 39042 _ = or.Args[1] 39043 y := or.Args[0] 39044 s0 := or.Args[1] 39045 if s0.Op != OpAMD64SHLQconst { 39046 break 39047 } 39048 j0 := s0.AuxInt 39049 x0 := s0.Args[0] 39050 if x0.Op != OpAMD64MOVBloadidx1 { 39051 break 39052 } 39053 i0 := x0.AuxInt 39054 if x0.Aux != s { 39055 break 39056 } 39057 _ = x0.Args[2] 39058 if p != x0.Args[0] { 39059 break 39060 } 39061 if idx != x0.Args[1] { 39062 break 39063 } 39064 if mem != x0.Args[2] { 39065 break 39066 } 39067 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39068 break 39069 } 39070 b = mergePoint(b, x0, x1) 39071 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39072 v.reset(OpCopy) 39073 v.AddArg(v0) 39074 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39075 v1.AuxInt = j0 39076 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39077 v2.AuxInt = i0 39078 v2.Aux = s 39079 v2.AddArg(p) 39080 v2.AddArg(idx) 39081 v2.AddArg(mem) 39082 v1.AddArg(v2) 39083 v0.AddArg(v1) 39084 v0.AddArg(y) 39085 return true 39086 } 39087 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 39088 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39089 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39090 for { 39091 _ = v.Args[1] 39092 s1 := v.Args[0] 39093 if s1.Op != OpAMD64SHLQconst { 39094 break 39095 } 39096 j1 := s1.AuxInt 39097 x1 := s1.Args[0] 39098 if x1.Op != OpAMD64MOVBloadidx1 { 39099 break 39100 } 39101 i1 := x1.AuxInt 39102 s := x1.Aux 39103 _ = x1.Args[2] 39104 idx := x1.Args[0] 39105 p := x1.Args[1] 39106 mem := x1.Args[2] 39107 or := v.Args[1] 39108 if or.Op != OpAMD64ORQ { 39109 break 39110 } 39111 _ = or.Args[1] 39112 y := or.Args[0] 39113 s0 := or.Args[1] 39114 if s0.Op != OpAMD64SHLQconst { 39115 break 39116 } 39117 j0 := s0.AuxInt 39118 x0 := s0.Args[0] 39119 if x0.Op != OpAMD64MOVBloadidx1 { 39120 break 39121 } 39122 i0 := x0.AuxInt 39123 if x0.Aux != s { 39124 break 39125 } 39126 _ = x0.Args[2] 39127 if p != x0.Args[0] { 39128 break 39129 } 39130 if idx != x0.Args[1] { 39131 break 39132 } 39133 if mem != x0.Args[2] { 39134 break 39135 } 39136 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39137 break 39138 } 39139 b = mergePoint(b, x0, x1) 39140 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39141 v.reset(OpCopy) 39142 v.AddArg(v0) 39143 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39144 v1.AuxInt = j0 39145 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39146 v2.AuxInt = i0 39147 v2.Aux = s 39148 v2.AddArg(p) 39149 v2.AddArg(idx) 39150 v2.AddArg(mem) 39151 v1.AddArg(v2) 39152 v0.AddArg(v1) 39153 v0.AddArg(y) 39154 return true 39155 } 39156 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 39157 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39158 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39159 for { 39160 _ = v.Args[1] 39161 s1 := v.Args[0] 39162 if s1.Op != OpAMD64SHLQconst { 39163 break 39164 } 39165 j1 := s1.AuxInt 39166 x1 := s1.Args[0] 39167 if x1.Op != OpAMD64MOVBloadidx1 { 39168 break 39169 } 39170 i1 := x1.AuxInt 39171 s := x1.Aux 39172 _ = x1.Args[2] 39173 p := x1.Args[0] 39174 idx := x1.Args[1] 39175 mem := x1.Args[2] 39176 or := v.Args[1] 39177 if or.Op != OpAMD64ORQ { 39178 break 39179 } 39180 _ = or.Args[1] 39181 y := or.Args[0] 39182 s0 := or.Args[1] 39183 if s0.Op != OpAMD64SHLQconst { 39184 break 39185 } 39186 j0 := s0.AuxInt 39187 x0 := s0.Args[0] 39188 if x0.Op != OpAMD64MOVBloadidx1 { 39189 break 39190 } 39191 i0 := x0.AuxInt 39192 if x0.Aux != s { 39193 break 39194 } 39195 _ = x0.Args[2] 39196 if idx != x0.Args[0] { 39197 break 39198 } 39199 if p != x0.Args[1] { 39200 break 39201 } 39202 if mem != x0.Args[2] { 39203 break 39204 } 39205 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39206 break 39207 } 39208 b = mergePoint(b, x0, x1) 39209 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39210 v.reset(OpCopy) 39211 v.AddArg(v0) 39212 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39213 v1.AuxInt = j0 39214 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39215 v2.AuxInt = i0 39216 v2.Aux = s 39217 v2.AddArg(p) 39218 v2.AddArg(idx) 39219 v2.AddArg(mem) 39220 v1.AddArg(v2) 39221 v0.AddArg(v1) 39222 v0.AddArg(y) 39223 return true 39224 } 39225 return false 39226 } 39227 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 39228 b := v.Block 39229 _ = b 39230 typ := &b.Func.Config.Types 39231 _ = typ 39232 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 39233 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39234 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39235 for { 39236 _ = v.Args[1] 39237 s1 := v.Args[0] 39238 if s1.Op != OpAMD64SHLQconst { 39239 break 39240 } 39241 j1 := s1.AuxInt 39242 x1 := s1.Args[0] 39243 if x1.Op != OpAMD64MOVBloadidx1 { 39244 break 39245 } 39246 i1 := x1.AuxInt 39247 s := x1.Aux 39248 _ = x1.Args[2] 39249 idx := x1.Args[0] 39250 p := x1.Args[1] 39251 mem := x1.Args[2] 39252 or := v.Args[1] 39253 if or.Op != OpAMD64ORQ { 39254 break 39255 } 39256 _ = or.Args[1] 39257 y := or.Args[0] 39258 s0 := or.Args[1] 39259 if s0.Op != OpAMD64SHLQconst { 39260 break 39261 } 39262 j0 := s0.AuxInt 39263 x0 := s0.Args[0] 39264 if x0.Op != OpAMD64MOVBloadidx1 { 39265 break 39266 } 39267 i0 := x0.AuxInt 39268 if x0.Aux != s { 39269 break 39270 } 39271 _ = x0.Args[2] 39272 if idx != x0.Args[0] { 39273 break 39274 } 39275 if p != x0.Args[1] { 39276 break 39277 } 39278 if mem != x0.Args[2] { 39279 break 39280 } 39281 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39282 break 39283 } 39284 b = mergePoint(b, x0, x1) 39285 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39286 v.reset(OpCopy) 39287 v.AddArg(v0) 39288 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39289 v1.AuxInt = j0 39290 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39291 v2.AuxInt = i0 39292 v2.Aux = s 39293 v2.AddArg(p) 39294 v2.AddArg(idx) 39295 v2.AddArg(mem) 39296 v1.AddArg(v2) 39297 v0.AddArg(v1) 39298 v0.AddArg(y) 39299 return true 39300 } 39301 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 39302 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39303 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39304 for { 39305 _ = v.Args[1] 39306 or := v.Args[0] 39307 if or.Op != OpAMD64ORQ { 39308 break 39309 } 39310 _ = or.Args[1] 39311 s0 := or.Args[0] 39312 if s0.Op != OpAMD64SHLQconst { 39313 break 39314 } 39315 j0 := s0.AuxInt 39316 x0 := s0.Args[0] 39317 if x0.Op != OpAMD64MOVBloadidx1 { 39318 break 39319 } 39320 i0 := x0.AuxInt 39321 s := x0.Aux 39322 _ = x0.Args[2] 39323 p := x0.Args[0] 39324 idx := x0.Args[1] 39325 mem := x0.Args[2] 39326 y := or.Args[1] 39327 s1 := v.Args[1] 39328 if s1.Op != OpAMD64SHLQconst { 39329 break 39330 } 39331 j1 := s1.AuxInt 39332 x1 := s1.Args[0] 39333 if x1.Op != OpAMD64MOVBloadidx1 { 39334 break 39335 } 39336 i1 := x1.AuxInt 39337 if x1.Aux != s { 39338 break 39339 } 39340 _ = x1.Args[2] 39341 if p != x1.Args[0] { 39342 break 39343 } 39344 if idx != x1.Args[1] { 39345 break 39346 } 39347 if mem != x1.Args[2] { 39348 break 39349 } 39350 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39351 break 39352 } 39353 b = mergePoint(b, x0, x1) 39354 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39355 v.reset(OpCopy) 39356 v.AddArg(v0) 39357 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39358 v1.AuxInt = j0 39359 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39360 v2.AuxInt = i0 39361 v2.Aux = s 39362 v2.AddArg(p) 39363 v2.AddArg(idx) 39364 v2.AddArg(mem) 39365 v1.AddArg(v2) 39366 v0.AddArg(v1) 39367 v0.AddArg(y) 39368 return true 39369 } 39370 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 39371 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39372 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39373 for { 39374 _ = v.Args[1] 39375 or := v.Args[0] 39376 if or.Op != OpAMD64ORQ { 39377 break 39378 } 39379 _ = or.Args[1] 39380 s0 := or.Args[0] 39381 if s0.Op != OpAMD64SHLQconst { 39382 break 39383 } 39384 j0 := s0.AuxInt 39385 x0 := s0.Args[0] 39386 if x0.Op != OpAMD64MOVBloadidx1 { 39387 break 39388 } 39389 i0 := x0.AuxInt 39390 s := x0.Aux 39391 _ = x0.Args[2] 39392 idx := x0.Args[0] 39393 p := x0.Args[1] 39394 mem := x0.Args[2] 39395 y := or.Args[1] 39396 s1 := v.Args[1] 39397 if s1.Op != OpAMD64SHLQconst { 39398 break 39399 } 39400 j1 := s1.AuxInt 39401 x1 := s1.Args[0] 39402 if x1.Op != OpAMD64MOVBloadidx1 { 39403 break 39404 } 39405 i1 := x1.AuxInt 39406 if x1.Aux != s { 39407 break 39408 } 39409 _ = x1.Args[2] 39410 if p != x1.Args[0] { 39411 break 39412 } 39413 if idx != x1.Args[1] { 39414 break 39415 } 39416 if mem != x1.Args[2] { 39417 break 39418 } 39419 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39420 break 39421 } 39422 b = mergePoint(b, x0, x1) 39423 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39424 v.reset(OpCopy) 39425 v.AddArg(v0) 39426 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39427 v1.AuxInt = j0 39428 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39429 v2.AuxInt = i0 39430 v2.Aux = s 39431 v2.AddArg(p) 39432 v2.AddArg(idx) 39433 v2.AddArg(mem) 39434 v1.AddArg(v2) 39435 v0.AddArg(v1) 39436 v0.AddArg(y) 39437 return true 39438 } 39439 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 39440 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39441 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39442 for { 39443 _ = v.Args[1] 39444 or := v.Args[0] 39445 if or.Op != OpAMD64ORQ { 39446 break 39447 } 39448 _ = or.Args[1] 39449 y := or.Args[0] 39450 s0 := or.Args[1] 39451 if s0.Op != OpAMD64SHLQconst { 39452 break 39453 } 39454 j0 := s0.AuxInt 39455 x0 := s0.Args[0] 39456 if x0.Op != OpAMD64MOVBloadidx1 { 39457 break 39458 } 39459 i0 := x0.AuxInt 39460 s := x0.Aux 39461 _ = x0.Args[2] 39462 p := x0.Args[0] 39463 idx := x0.Args[1] 39464 mem := x0.Args[2] 39465 s1 := v.Args[1] 39466 if s1.Op != OpAMD64SHLQconst { 39467 break 39468 } 39469 j1 := s1.AuxInt 39470 x1 := s1.Args[0] 39471 if x1.Op != OpAMD64MOVBloadidx1 { 39472 break 39473 } 39474 i1 := x1.AuxInt 39475 if x1.Aux != s { 39476 break 39477 } 39478 _ = x1.Args[2] 39479 if p != x1.Args[0] { 39480 break 39481 } 39482 if idx != x1.Args[1] { 39483 break 39484 } 39485 if mem != x1.Args[2] { 39486 break 39487 } 39488 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39489 break 39490 } 39491 b = mergePoint(b, x0, x1) 39492 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39493 v.reset(OpCopy) 39494 v.AddArg(v0) 39495 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39496 v1.AuxInt = j0 39497 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39498 v2.AuxInt = i0 39499 v2.Aux = s 39500 v2.AddArg(p) 39501 v2.AddArg(idx) 39502 v2.AddArg(mem) 39503 v1.AddArg(v2) 39504 v0.AddArg(v1) 39505 v0.AddArg(y) 39506 return true 39507 } 39508 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 39509 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39510 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39511 for { 39512 _ = v.Args[1] 39513 or := v.Args[0] 39514 if or.Op != OpAMD64ORQ { 39515 break 39516 } 39517 _ = or.Args[1] 39518 y := or.Args[0] 39519 s0 := or.Args[1] 39520 if s0.Op != OpAMD64SHLQconst { 39521 break 39522 } 39523 j0 := s0.AuxInt 39524 x0 := s0.Args[0] 39525 if x0.Op != OpAMD64MOVBloadidx1 { 39526 break 39527 } 39528 i0 := x0.AuxInt 39529 s := x0.Aux 39530 _ = x0.Args[2] 39531 idx := x0.Args[0] 39532 p := x0.Args[1] 39533 mem := x0.Args[2] 39534 s1 := v.Args[1] 39535 if s1.Op != OpAMD64SHLQconst { 39536 break 39537 } 39538 j1 := s1.AuxInt 39539 x1 := s1.Args[0] 39540 if x1.Op != OpAMD64MOVBloadidx1 { 39541 break 39542 } 39543 i1 := x1.AuxInt 39544 if x1.Aux != s { 39545 break 39546 } 39547 _ = x1.Args[2] 39548 if p != x1.Args[0] { 39549 break 39550 } 39551 if idx != x1.Args[1] { 39552 break 39553 } 39554 if mem != x1.Args[2] { 39555 break 39556 } 39557 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39558 break 39559 } 39560 b = mergePoint(b, x0, x1) 39561 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39562 v.reset(OpCopy) 39563 v.AddArg(v0) 39564 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39565 v1.AuxInt = j0 39566 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39567 v2.AuxInt = i0 39568 v2.Aux = s 39569 v2.AddArg(p) 39570 v2.AddArg(idx) 39571 v2.AddArg(mem) 39572 v1.AddArg(v2) 39573 v0.AddArg(v1) 39574 v0.AddArg(y) 39575 return true 39576 } 39577 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 39578 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39579 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39580 for { 39581 _ = v.Args[1] 39582 or := v.Args[0] 39583 if or.Op != OpAMD64ORQ { 39584 break 39585 } 39586 _ = or.Args[1] 39587 s0 := or.Args[0] 39588 if s0.Op != OpAMD64SHLQconst { 39589 break 39590 } 39591 j0 := s0.AuxInt 39592 x0 := s0.Args[0] 39593 if x0.Op != OpAMD64MOVBloadidx1 { 39594 break 39595 } 39596 i0 := x0.AuxInt 39597 s := x0.Aux 39598 _ = x0.Args[2] 39599 p := x0.Args[0] 39600 idx := x0.Args[1] 39601 mem := x0.Args[2] 39602 y := or.Args[1] 39603 s1 := v.Args[1] 39604 if s1.Op != OpAMD64SHLQconst { 39605 break 39606 } 39607 j1 := s1.AuxInt 39608 x1 := s1.Args[0] 39609 if x1.Op != OpAMD64MOVBloadidx1 { 39610 break 39611 } 39612 i1 := x1.AuxInt 39613 if x1.Aux != s { 39614 break 39615 } 39616 _ = x1.Args[2] 39617 if idx != x1.Args[0] { 39618 break 39619 } 39620 if p != x1.Args[1] { 39621 break 39622 } 39623 if mem != x1.Args[2] { 39624 break 39625 } 39626 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39627 break 39628 } 39629 b = mergePoint(b, x0, x1) 39630 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39631 v.reset(OpCopy) 39632 v.AddArg(v0) 39633 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39634 v1.AuxInt = j0 39635 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39636 v2.AuxInt = i0 39637 v2.Aux = s 39638 v2.AddArg(p) 39639 v2.AddArg(idx) 39640 v2.AddArg(mem) 39641 v1.AddArg(v2) 39642 v0.AddArg(v1) 39643 v0.AddArg(y) 39644 return true 39645 } 39646 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 39647 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39648 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39649 for { 39650 _ = v.Args[1] 39651 or := v.Args[0] 39652 if or.Op != OpAMD64ORQ { 39653 break 39654 } 39655 _ = or.Args[1] 39656 s0 := or.Args[0] 39657 if s0.Op != OpAMD64SHLQconst { 39658 break 39659 } 39660 j0 := s0.AuxInt 39661 x0 := s0.Args[0] 39662 if x0.Op != OpAMD64MOVBloadidx1 { 39663 break 39664 } 39665 i0 := x0.AuxInt 39666 s := x0.Aux 39667 _ = x0.Args[2] 39668 idx := x0.Args[0] 39669 p := x0.Args[1] 39670 mem := x0.Args[2] 39671 y := or.Args[1] 39672 s1 := v.Args[1] 39673 if s1.Op != OpAMD64SHLQconst { 39674 break 39675 } 39676 j1 := s1.AuxInt 39677 x1 := s1.Args[0] 39678 if x1.Op != OpAMD64MOVBloadidx1 { 39679 break 39680 } 39681 i1 := x1.AuxInt 39682 if x1.Aux != s { 39683 break 39684 } 39685 _ = x1.Args[2] 39686 if idx != x1.Args[0] { 39687 break 39688 } 39689 if p != x1.Args[1] { 39690 break 39691 } 39692 if mem != x1.Args[2] { 39693 break 39694 } 39695 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39696 break 39697 } 39698 b = mergePoint(b, x0, x1) 39699 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39700 v.reset(OpCopy) 39701 v.AddArg(v0) 39702 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39703 v1.AuxInt = j0 39704 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39705 v2.AuxInt = i0 39706 v2.Aux = s 39707 v2.AddArg(p) 39708 v2.AddArg(idx) 39709 v2.AddArg(mem) 39710 v1.AddArg(v2) 39711 v0.AddArg(v1) 39712 v0.AddArg(y) 39713 return true 39714 } 39715 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 39716 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39717 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39718 for { 39719 _ = v.Args[1] 39720 or := v.Args[0] 39721 if or.Op != OpAMD64ORQ { 39722 break 39723 } 39724 _ = or.Args[1] 39725 y := or.Args[0] 39726 s0 := or.Args[1] 39727 if s0.Op != OpAMD64SHLQconst { 39728 break 39729 } 39730 j0 := s0.AuxInt 39731 x0 := s0.Args[0] 39732 if x0.Op != OpAMD64MOVBloadidx1 { 39733 break 39734 } 39735 i0 := x0.AuxInt 39736 s := x0.Aux 39737 _ = x0.Args[2] 39738 p := x0.Args[0] 39739 idx := x0.Args[1] 39740 mem := x0.Args[2] 39741 s1 := v.Args[1] 39742 if s1.Op != OpAMD64SHLQconst { 39743 break 39744 } 39745 j1 := s1.AuxInt 39746 x1 := s1.Args[0] 39747 if x1.Op != OpAMD64MOVBloadidx1 { 39748 break 39749 } 39750 i1 := x1.AuxInt 39751 if x1.Aux != s { 39752 break 39753 } 39754 _ = x1.Args[2] 39755 if idx != x1.Args[0] { 39756 break 39757 } 39758 if p != x1.Args[1] { 39759 break 39760 } 39761 if mem != x1.Args[2] { 39762 break 39763 } 39764 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39765 break 39766 } 39767 b = mergePoint(b, x0, x1) 39768 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39769 v.reset(OpCopy) 39770 v.AddArg(v0) 39771 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39772 v1.AuxInt = j0 39773 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39774 v2.AuxInt = i0 39775 v2.Aux = s 39776 v2.AddArg(p) 39777 v2.AddArg(idx) 39778 v2.AddArg(mem) 39779 v1.AddArg(v2) 39780 v0.AddArg(v1) 39781 v0.AddArg(y) 39782 return true 39783 } 39784 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 39785 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39786 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 39787 for { 39788 _ = v.Args[1] 39789 or := v.Args[0] 39790 if or.Op != OpAMD64ORQ { 39791 break 39792 } 39793 _ = or.Args[1] 39794 y := or.Args[0] 39795 s0 := or.Args[1] 39796 if s0.Op != OpAMD64SHLQconst { 39797 break 39798 } 39799 j0 := s0.AuxInt 39800 x0 := s0.Args[0] 39801 if x0.Op != OpAMD64MOVBloadidx1 { 39802 break 39803 } 39804 i0 := x0.AuxInt 39805 s := x0.Aux 39806 _ = x0.Args[2] 39807 idx := x0.Args[0] 39808 p := x0.Args[1] 39809 mem := x0.Args[2] 39810 s1 := v.Args[1] 39811 if s1.Op != OpAMD64SHLQconst { 39812 break 39813 } 39814 j1 := s1.AuxInt 39815 x1 := s1.Args[0] 39816 if x1.Op != OpAMD64MOVBloadidx1 { 39817 break 39818 } 39819 i1 := x1.AuxInt 39820 if x1.Aux != s { 39821 break 39822 } 39823 _ = x1.Args[2] 39824 if idx != x1.Args[0] { 39825 break 39826 } 39827 if p != x1.Args[1] { 39828 break 39829 } 39830 if mem != x1.Args[2] { 39831 break 39832 } 39833 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39834 break 39835 } 39836 b = mergePoint(b, x0, x1) 39837 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39838 v.reset(OpCopy) 39839 v.AddArg(v0) 39840 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39841 v1.AuxInt = j0 39842 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 39843 v2.AuxInt = i0 39844 v2.Aux = s 39845 v2.AddArg(p) 39846 v2.AddArg(idx) 39847 v2.AddArg(mem) 39848 v1.AddArg(v2) 39849 v0.AddArg(v1) 39850 v0.AddArg(y) 39851 return true 39852 } 39853 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 39854 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39855 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 39856 for { 39857 _ = v.Args[1] 39858 s1 := v.Args[0] 39859 if s1.Op != OpAMD64SHLQconst { 39860 break 39861 } 39862 j1 := s1.AuxInt 39863 x1 := s1.Args[0] 39864 if x1.Op != OpAMD64MOVWloadidx1 { 39865 break 39866 } 39867 i1 := x1.AuxInt 39868 s := x1.Aux 39869 _ = x1.Args[2] 39870 p := x1.Args[0] 39871 idx := x1.Args[1] 39872 mem := x1.Args[2] 39873 or := v.Args[1] 39874 if or.Op != OpAMD64ORQ { 39875 break 39876 } 39877 _ = or.Args[1] 39878 s0 := or.Args[0] 39879 if s0.Op != OpAMD64SHLQconst { 39880 break 39881 } 39882 j0 := s0.AuxInt 39883 x0 := s0.Args[0] 39884 if x0.Op != OpAMD64MOVWloadidx1 { 39885 break 39886 } 39887 i0 := x0.AuxInt 39888 if x0.Aux != s { 39889 break 39890 } 39891 _ = x0.Args[2] 39892 if p != x0.Args[0] { 39893 break 39894 } 39895 if idx != x0.Args[1] { 39896 break 39897 } 39898 if mem != x0.Args[2] { 39899 break 39900 } 39901 y := or.Args[1] 39902 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39903 break 39904 } 39905 b = mergePoint(b, x0, x1) 39906 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39907 v.reset(OpCopy) 39908 v.AddArg(v0) 39909 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39910 v1.AuxInt = j0 39911 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 39912 v2.AuxInt = i0 39913 v2.Aux = s 39914 v2.AddArg(p) 39915 v2.AddArg(idx) 39916 v2.AddArg(mem) 39917 v1.AddArg(v2) 39918 v0.AddArg(v1) 39919 v0.AddArg(y) 39920 return true 39921 } 39922 return false 39923 } 39924 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 39925 b := v.Block 39926 _ = b 39927 typ := &b.Func.Config.Types 39928 _ = typ 39929 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 39930 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 39931 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 39932 for { 39933 _ = v.Args[1] 39934 s1 := v.Args[0] 39935 if s1.Op != OpAMD64SHLQconst { 39936 break 39937 } 39938 j1 := s1.AuxInt 39939 x1 := s1.Args[0] 39940 if x1.Op != OpAMD64MOVWloadidx1 { 39941 break 39942 } 39943 i1 := x1.AuxInt 39944 s := x1.Aux 39945 _ = x1.Args[2] 39946 idx := x1.Args[0] 39947 p := x1.Args[1] 39948 mem := x1.Args[2] 39949 or := v.Args[1] 39950 if or.Op != OpAMD64ORQ { 39951 break 39952 } 39953 _ = or.Args[1] 39954 s0 := or.Args[0] 39955 if s0.Op != OpAMD64SHLQconst { 39956 break 39957 } 39958 j0 := s0.AuxInt 39959 x0 := s0.Args[0] 39960 if x0.Op != OpAMD64MOVWloadidx1 { 39961 break 39962 } 39963 i0 := x0.AuxInt 39964 if x0.Aux != s { 39965 break 39966 } 39967 _ = x0.Args[2] 39968 if p != x0.Args[0] { 39969 break 39970 } 39971 if idx != x0.Args[1] { 39972 break 39973 } 39974 if mem != x0.Args[2] { 39975 break 39976 } 39977 y := or.Args[1] 39978 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 39979 break 39980 } 39981 b = mergePoint(b, x0, x1) 39982 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39983 v.reset(OpCopy) 39984 v.AddArg(v0) 39985 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39986 v1.AuxInt = j0 39987 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 39988 v2.AuxInt = i0 39989 v2.Aux = s 39990 v2.AddArg(p) 39991 v2.AddArg(idx) 39992 v2.AddArg(mem) 39993 v1.AddArg(v2) 39994 v0.AddArg(v1) 39995 v0.AddArg(y) 39996 return true 39997 } 39998 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 39999 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40000 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40001 for { 40002 _ = v.Args[1] 40003 s1 := v.Args[0] 40004 if s1.Op != OpAMD64SHLQconst { 40005 break 40006 } 40007 j1 := s1.AuxInt 40008 x1 := s1.Args[0] 40009 if x1.Op != OpAMD64MOVWloadidx1 { 40010 break 40011 } 40012 i1 := x1.AuxInt 40013 s := x1.Aux 40014 _ = x1.Args[2] 40015 p := x1.Args[0] 40016 idx := x1.Args[1] 40017 mem := x1.Args[2] 40018 or := v.Args[1] 40019 if or.Op != OpAMD64ORQ { 40020 break 40021 } 40022 _ = or.Args[1] 40023 s0 := or.Args[0] 40024 if s0.Op != OpAMD64SHLQconst { 40025 break 40026 } 40027 j0 := s0.AuxInt 40028 x0 := s0.Args[0] 40029 if x0.Op != OpAMD64MOVWloadidx1 { 40030 break 40031 } 40032 i0 := x0.AuxInt 40033 if x0.Aux != s { 40034 break 40035 } 40036 _ = x0.Args[2] 40037 if idx != x0.Args[0] { 40038 break 40039 } 40040 if p != x0.Args[1] { 40041 break 40042 } 40043 if mem != x0.Args[2] { 40044 break 40045 } 40046 y := or.Args[1] 40047 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40048 break 40049 } 40050 b = mergePoint(b, x0, x1) 40051 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40052 v.reset(OpCopy) 40053 v.AddArg(v0) 40054 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40055 v1.AuxInt = j0 40056 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40057 v2.AuxInt = i0 40058 v2.Aux = s 40059 v2.AddArg(p) 40060 v2.AddArg(idx) 40061 v2.AddArg(mem) 40062 v1.AddArg(v2) 40063 v0.AddArg(v1) 40064 v0.AddArg(y) 40065 return true 40066 } 40067 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 40068 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40069 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40070 for { 40071 _ = v.Args[1] 40072 s1 := v.Args[0] 40073 if s1.Op != OpAMD64SHLQconst { 40074 break 40075 } 40076 j1 := s1.AuxInt 40077 x1 := s1.Args[0] 40078 if x1.Op != OpAMD64MOVWloadidx1 { 40079 break 40080 } 40081 i1 := x1.AuxInt 40082 s := x1.Aux 40083 _ = x1.Args[2] 40084 idx := x1.Args[0] 40085 p := x1.Args[1] 40086 mem := x1.Args[2] 40087 or := v.Args[1] 40088 if or.Op != OpAMD64ORQ { 40089 break 40090 } 40091 _ = or.Args[1] 40092 s0 := or.Args[0] 40093 if s0.Op != OpAMD64SHLQconst { 40094 break 40095 } 40096 j0 := s0.AuxInt 40097 x0 := s0.Args[0] 40098 if x0.Op != OpAMD64MOVWloadidx1 { 40099 break 40100 } 40101 i0 := x0.AuxInt 40102 if x0.Aux != s { 40103 break 40104 } 40105 _ = x0.Args[2] 40106 if idx != x0.Args[0] { 40107 break 40108 } 40109 if p != x0.Args[1] { 40110 break 40111 } 40112 if mem != x0.Args[2] { 40113 break 40114 } 40115 y := or.Args[1] 40116 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40117 break 40118 } 40119 b = mergePoint(b, x0, x1) 40120 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40121 v.reset(OpCopy) 40122 v.AddArg(v0) 40123 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40124 v1.AuxInt = j0 40125 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40126 v2.AuxInt = i0 40127 v2.Aux = s 40128 v2.AddArg(p) 40129 v2.AddArg(idx) 40130 v2.AddArg(mem) 40131 v1.AddArg(v2) 40132 v0.AddArg(v1) 40133 v0.AddArg(y) 40134 return true 40135 } 40136 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 40137 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40138 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40139 for { 40140 _ = v.Args[1] 40141 s1 := v.Args[0] 40142 if s1.Op != OpAMD64SHLQconst { 40143 break 40144 } 40145 j1 := s1.AuxInt 40146 x1 := s1.Args[0] 40147 if x1.Op != OpAMD64MOVWloadidx1 { 40148 break 40149 } 40150 i1 := x1.AuxInt 40151 s := x1.Aux 40152 _ = x1.Args[2] 40153 p := x1.Args[0] 40154 idx := x1.Args[1] 40155 mem := x1.Args[2] 40156 or := v.Args[1] 40157 if or.Op != OpAMD64ORQ { 40158 break 40159 } 40160 _ = or.Args[1] 40161 y := or.Args[0] 40162 s0 := or.Args[1] 40163 if s0.Op != OpAMD64SHLQconst { 40164 break 40165 } 40166 j0 := s0.AuxInt 40167 x0 := s0.Args[0] 40168 if x0.Op != OpAMD64MOVWloadidx1 { 40169 break 40170 } 40171 i0 := x0.AuxInt 40172 if x0.Aux != s { 40173 break 40174 } 40175 _ = x0.Args[2] 40176 if p != x0.Args[0] { 40177 break 40178 } 40179 if idx != x0.Args[1] { 40180 break 40181 } 40182 if mem != x0.Args[2] { 40183 break 40184 } 40185 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40186 break 40187 } 40188 b = mergePoint(b, x0, x1) 40189 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40190 v.reset(OpCopy) 40191 v.AddArg(v0) 40192 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40193 v1.AuxInt = j0 40194 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40195 v2.AuxInt = i0 40196 v2.Aux = s 40197 v2.AddArg(p) 40198 v2.AddArg(idx) 40199 v2.AddArg(mem) 40200 v1.AddArg(v2) 40201 v0.AddArg(v1) 40202 v0.AddArg(y) 40203 return true 40204 } 40205 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 40206 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40207 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40208 for { 40209 _ = v.Args[1] 40210 s1 := v.Args[0] 40211 if s1.Op != OpAMD64SHLQconst { 40212 break 40213 } 40214 j1 := s1.AuxInt 40215 x1 := s1.Args[0] 40216 if x1.Op != OpAMD64MOVWloadidx1 { 40217 break 40218 } 40219 i1 := x1.AuxInt 40220 s := x1.Aux 40221 _ = x1.Args[2] 40222 idx := x1.Args[0] 40223 p := x1.Args[1] 40224 mem := x1.Args[2] 40225 or := v.Args[1] 40226 if or.Op != OpAMD64ORQ { 40227 break 40228 } 40229 _ = or.Args[1] 40230 y := or.Args[0] 40231 s0 := or.Args[1] 40232 if s0.Op != OpAMD64SHLQconst { 40233 break 40234 } 40235 j0 := s0.AuxInt 40236 x0 := s0.Args[0] 40237 if x0.Op != OpAMD64MOVWloadidx1 { 40238 break 40239 } 40240 i0 := x0.AuxInt 40241 if x0.Aux != s { 40242 break 40243 } 40244 _ = x0.Args[2] 40245 if p != x0.Args[0] { 40246 break 40247 } 40248 if idx != x0.Args[1] { 40249 break 40250 } 40251 if mem != x0.Args[2] { 40252 break 40253 } 40254 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40255 break 40256 } 40257 b = mergePoint(b, x0, x1) 40258 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40259 v.reset(OpCopy) 40260 v.AddArg(v0) 40261 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40262 v1.AuxInt = j0 40263 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40264 v2.AuxInt = i0 40265 v2.Aux = s 40266 v2.AddArg(p) 40267 v2.AddArg(idx) 40268 v2.AddArg(mem) 40269 v1.AddArg(v2) 40270 v0.AddArg(v1) 40271 v0.AddArg(y) 40272 return true 40273 } 40274 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 40275 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40276 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40277 for { 40278 _ = v.Args[1] 40279 s1 := v.Args[0] 40280 if s1.Op != OpAMD64SHLQconst { 40281 break 40282 } 40283 j1 := s1.AuxInt 40284 x1 := s1.Args[0] 40285 if x1.Op != OpAMD64MOVWloadidx1 { 40286 break 40287 } 40288 i1 := x1.AuxInt 40289 s := x1.Aux 40290 _ = x1.Args[2] 40291 p := x1.Args[0] 40292 idx := x1.Args[1] 40293 mem := x1.Args[2] 40294 or := v.Args[1] 40295 if or.Op != OpAMD64ORQ { 40296 break 40297 } 40298 _ = or.Args[1] 40299 y := or.Args[0] 40300 s0 := or.Args[1] 40301 if s0.Op != OpAMD64SHLQconst { 40302 break 40303 } 40304 j0 := s0.AuxInt 40305 x0 := s0.Args[0] 40306 if x0.Op != OpAMD64MOVWloadidx1 { 40307 break 40308 } 40309 i0 := x0.AuxInt 40310 if x0.Aux != s { 40311 break 40312 } 40313 _ = x0.Args[2] 40314 if idx != x0.Args[0] { 40315 break 40316 } 40317 if p != x0.Args[1] { 40318 break 40319 } 40320 if mem != x0.Args[2] { 40321 break 40322 } 40323 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40324 break 40325 } 40326 b = mergePoint(b, x0, x1) 40327 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40328 v.reset(OpCopy) 40329 v.AddArg(v0) 40330 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40331 v1.AuxInt = j0 40332 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40333 v2.AuxInt = i0 40334 v2.Aux = s 40335 v2.AddArg(p) 40336 v2.AddArg(idx) 40337 v2.AddArg(mem) 40338 v1.AddArg(v2) 40339 v0.AddArg(v1) 40340 v0.AddArg(y) 40341 return true 40342 } 40343 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 40344 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40345 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40346 for { 40347 _ = v.Args[1] 40348 s1 := v.Args[0] 40349 if s1.Op != OpAMD64SHLQconst { 40350 break 40351 } 40352 j1 := s1.AuxInt 40353 x1 := s1.Args[0] 40354 if x1.Op != OpAMD64MOVWloadidx1 { 40355 break 40356 } 40357 i1 := x1.AuxInt 40358 s := x1.Aux 40359 _ = x1.Args[2] 40360 idx := x1.Args[0] 40361 p := x1.Args[1] 40362 mem := x1.Args[2] 40363 or := v.Args[1] 40364 if or.Op != OpAMD64ORQ { 40365 break 40366 } 40367 _ = or.Args[1] 40368 y := or.Args[0] 40369 s0 := or.Args[1] 40370 if s0.Op != OpAMD64SHLQconst { 40371 break 40372 } 40373 j0 := s0.AuxInt 40374 x0 := s0.Args[0] 40375 if x0.Op != OpAMD64MOVWloadidx1 { 40376 break 40377 } 40378 i0 := x0.AuxInt 40379 if x0.Aux != s { 40380 break 40381 } 40382 _ = x0.Args[2] 40383 if idx != x0.Args[0] { 40384 break 40385 } 40386 if p != x0.Args[1] { 40387 break 40388 } 40389 if mem != x0.Args[2] { 40390 break 40391 } 40392 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40393 break 40394 } 40395 b = mergePoint(b, x0, x1) 40396 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40397 v.reset(OpCopy) 40398 v.AddArg(v0) 40399 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40400 v1.AuxInt = j0 40401 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40402 v2.AuxInt = i0 40403 v2.Aux = s 40404 v2.AddArg(p) 40405 v2.AddArg(idx) 40406 v2.AddArg(mem) 40407 v1.AddArg(v2) 40408 v0.AddArg(v1) 40409 v0.AddArg(y) 40410 return true 40411 } 40412 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 40413 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40414 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40415 for { 40416 _ = v.Args[1] 40417 or := v.Args[0] 40418 if or.Op != OpAMD64ORQ { 40419 break 40420 } 40421 _ = or.Args[1] 40422 s0 := or.Args[0] 40423 if s0.Op != OpAMD64SHLQconst { 40424 break 40425 } 40426 j0 := s0.AuxInt 40427 x0 := s0.Args[0] 40428 if x0.Op != OpAMD64MOVWloadidx1 { 40429 break 40430 } 40431 i0 := x0.AuxInt 40432 s := x0.Aux 40433 _ = x0.Args[2] 40434 p := x0.Args[0] 40435 idx := x0.Args[1] 40436 mem := x0.Args[2] 40437 y := or.Args[1] 40438 s1 := v.Args[1] 40439 if s1.Op != OpAMD64SHLQconst { 40440 break 40441 } 40442 j1 := s1.AuxInt 40443 x1 := s1.Args[0] 40444 if x1.Op != OpAMD64MOVWloadidx1 { 40445 break 40446 } 40447 i1 := x1.AuxInt 40448 if x1.Aux != s { 40449 break 40450 } 40451 _ = x1.Args[2] 40452 if p != x1.Args[0] { 40453 break 40454 } 40455 if idx != x1.Args[1] { 40456 break 40457 } 40458 if mem != x1.Args[2] { 40459 break 40460 } 40461 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40462 break 40463 } 40464 b = mergePoint(b, x0, x1) 40465 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40466 v.reset(OpCopy) 40467 v.AddArg(v0) 40468 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40469 v1.AuxInt = j0 40470 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40471 v2.AuxInt = i0 40472 v2.Aux = s 40473 v2.AddArg(p) 40474 v2.AddArg(idx) 40475 v2.AddArg(mem) 40476 v1.AddArg(v2) 40477 v0.AddArg(v1) 40478 v0.AddArg(y) 40479 return true 40480 } 40481 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 40482 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40483 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40484 for { 40485 _ = v.Args[1] 40486 or := v.Args[0] 40487 if or.Op != OpAMD64ORQ { 40488 break 40489 } 40490 _ = or.Args[1] 40491 s0 := or.Args[0] 40492 if s0.Op != OpAMD64SHLQconst { 40493 break 40494 } 40495 j0 := s0.AuxInt 40496 x0 := s0.Args[0] 40497 if x0.Op != OpAMD64MOVWloadidx1 { 40498 break 40499 } 40500 i0 := x0.AuxInt 40501 s := x0.Aux 40502 _ = x0.Args[2] 40503 idx := x0.Args[0] 40504 p := x0.Args[1] 40505 mem := x0.Args[2] 40506 y := or.Args[1] 40507 s1 := v.Args[1] 40508 if s1.Op != OpAMD64SHLQconst { 40509 break 40510 } 40511 j1 := s1.AuxInt 40512 x1 := s1.Args[0] 40513 if x1.Op != OpAMD64MOVWloadidx1 { 40514 break 40515 } 40516 i1 := x1.AuxInt 40517 if x1.Aux != s { 40518 break 40519 } 40520 _ = x1.Args[2] 40521 if p != x1.Args[0] { 40522 break 40523 } 40524 if idx != x1.Args[1] { 40525 break 40526 } 40527 if mem != x1.Args[2] { 40528 break 40529 } 40530 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40531 break 40532 } 40533 b = mergePoint(b, x0, x1) 40534 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40535 v.reset(OpCopy) 40536 v.AddArg(v0) 40537 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40538 v1.AuxInt = j0 40539 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40540 v2.AuxInt = i0 40541 v2.Aux = s 40542 v2.AddArg(p) 40543 v2.AddArg(idx) 40544 v2.AddArg(mem) 40545 v1.AddArg(v2) 40546 v0.AddArg(v1) 40547 v0.AddArg(y) 40548 return true 40549 } 40550 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 40551 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40552 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40553 for { 40554 _ = v.Args[1] 40555 or := v.Args[0] 40556 if or.Op != OpAMD64ORQ { 40557 break 40558 } 40559 _ = or.Args[1] 40560 y := or.Args[0] 40561 s0 := or.Args[1] 40562 if s0.Op != OpAMD64SHLQconst { 40563 break 40564 } 40565 j0 := s0.AuxInt 40566 x0 := s0.Args[0] 40567 if x0.Op != OpAMD64MOVWloadidx1 { 40568 break 40569 } 40570 i0 := x0.AuxInt 40571 s := x0.Aux 40572 _ = x0.Args[2] 40573 p := x0.Args[0] 40574 idx := x0.Args[1] 40575 mem := x0.Args[2] 40576 s1 := v.Args[1] 40577 if s1.Op != OpAMD64SHLQconst { 40578 break 40579 } 40580 j1 := s1.AuxInt 40581 x1 := s1.Args[0] 40582 if x1.Op != OpAMD64MOVWloadidx1 { 40583 break 40584 } 40585 i1 := x1.AuxInt 40586 if x1.Aux != s { 40587 break 40588 } 40589 _ = x1.Args[2] 40590 if p != x1.Args[0] { 40591 break 40592 } 40593 if idx != x1.Args[1] { 40594 break 40595 } 40596 if mem != x1.Args[2] { 40597 break 40598 } 40599 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40600 break 40601 } 40602 b = mergePoint(b, x0, x1) 40603 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40604 v.reset(OpCopy) 40605 v.AddArg(v0) 40606 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40607 v1.AuxInt = j0 40608 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40609 v2.AuxInt = i0 40610 v2.Aux = s 40611 v2.AddArg(p) 40612 v2.AddArg(idx) 40613 v2.AddArg(mem) 40614 v1.AddArg(v2) 40615 v0.AddArg(v1) 40616 v0.AddArg(y) 40617 return true 40618 } 40619 return false 40620 } 40621 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 40622 b := v.Block 40623 _ = b 40624 typ := &b.Func.Config.Types 40625 _ = typ 40626 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 40627 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40628 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40629 for { 40630 _ = v.Args[1] 40631 or := v.Args[0] 40632 if or.Op != OpAMD64ORQ { 40633 break 40634 } 40635 _ = or.Args[1] 40636 y := or.Args[0] 40637 s0 := or.Args[1] 40638 if s0.Op != OpAMD64SHLQconst { 40639 break 40640 } 40641 j0 := s0.AuxInt 40642 x0 := s0.Args[0] 40643 if x0.Op != OpAMD64MOVWloadidx1 { 40644 break 40645 } 40646 i0 := x0.AuxInt 40647 s := x0.Aux 40648 _ = x0.Args[2] 40649 idx := x0.Args[0] 40650 p := x0.Args[1] 40651 mem := x0.Args[2] 40652 s1 := v.Args[1] 40653 if s1.Op != OpAMD64SHLQconst { 40654 break 40655 } 40656 j1 := s1.AuxInt 40657 x1 := s1.Args[0] 40658 if x1.Op != OpAMD64MOVWloadidx1 { 40659 break 40660 } 40661 i1 := x1.AuxInt 40662 if x1.Aux != s { 40663 break 40664 } 40665 _ = x1.Args[2] 40666 if p != x1.Args[0] { 40667 break 40668 } 40669 if idx != x1.Args[1] { 40670 break 40671 } 40672 if mem != x1.Args[2] { 40673 break 40674 } 40675 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40676 break 40677 } 40678 b = mergePoint(b, x0, x1) 40679 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40680 v.reset(OpCopy) 40681 v.AddArg(v0) 40682 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40683 v1.AuxInt = j0 40684 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40685 v2.AuxInt = i0 40686 v2.Aux = s 40687 v2.AddArg(p) 40688 v2.AddArg(idx) 40689 v2.AddArg(mem) 40690 v1.AddArg(v2) 40691 v0.AddArg(v1) 40692 v0.AddArg(y) 40693 return true 40694 } 40695 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 40696 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40697 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40698 for { 40699 _ = v.Args[1] 40700 or := v.Args[0] 40701 if or.Op != OpAMD64ORQ { 40702 break 40703 } 40704 _ = or.Args[1] 40705 s0 := or.Args[0] 40706 if s0.Op != OpAMD64SHLQconst { 40707 break 40708 } 40709 j0 := s0.AuxInt 40710 x0 := s0.Args[0] 40711 if x0.Op != OpAMD64MOVWloadidx1 { 40712 break 40713 } 40714 i0 := x0.AuxInt 40715 s := x0.Aux 40716 _ = x0.Args[2] 40717 p := x0.Args[0] 40718 idx := x0.Args[1] 40719 mem := x0.Args[2] 40720 y := or.Args[1] 40721 s1 := v.Args[1] 40722 if s1.Op != OpAMD64SHLQconst { 40723 break 40724 } 40725 j1 := s1.AuxInt 40726 x1 := s1.Args[0] 40727 if x1.Op != OpAMD64MOVWloadidx1 { 40728 break 40729 } 40730 i1 := x1.AuxInt 40731 if x1.Aux != s { 40732 break 40733 } 40734 _ = x1.Args[2] 40735 if idx != x1.Args[0] { 40736 break 40737 } 40738 if p != x1.Args[1] { 40739 break 40740 } 40741 if mem != x1.Args[2] { 40742 break 40743 } 40744 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40745 break 40746 } 40747 b = mergePoint(b, x0, x1) 40748 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40749 v.reset(OpCopy) 40750 v.AddArg(v0) 40751 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40752 v1.AuxInt = j0 40753 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40754 v2.AuxInt = i0 40755 v2.Aux = s 40756 v2.AddArg(p) 40757 v2.AddArg(idx) 40758 v2.AddArg(mem) 40759 v1.AddArg(v2) 40760 v0.AddArg(v1) 40761 v0.AddArg(y) 40762 return true 40763 } 40764 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 40765 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40766 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40767 for { 40768 _ = v.Args[1] 40769 or := v.Args[0] 40770 if or.Op != OpAMD64ORQ { 40771 break 40772 } 40773 _ = or.Args[1] 40774 s0 := or.Args[0] 40775 if s0.Op != OpAMD64SHLQconst { 40776 break 40777 } 40778 j0 := s0.AuxInt 40779 x0 := s0.Args[0] 40780 if x0.Op != OpAMD64MOVWloadidx1 { 40781 break 40782 } 40783 i0 := x0.AuxInt 40784 s := x0.Aux 40785 _ = x0.Args[2] 40786 idx := x0.Args[0] 40787 p := x0.Args[1] 40788 mem := x0.Args[2] 40789 y := or.Args[1] 40790 s1 := v.Args[1] 40791 if s1.Op != OpAMD64SHLQconst { 40792 break 40793 } 40794 j1 := s1.AuxInt 40795 x1 := s1.Args[0] 40796 if x1.Op != OpAMD64MOVWloadidx1 { 40797 break 40798 } 40799 i1 := x1.AuxInt 40800 if x1.Aux != s { 40801 break 40802 } 40803 _ = x1.Args[2] 40804 if idx != x1.Args[0] { 40805 break 40806 } 40807 if p != x1.Args[1] { 40808 break 40809 } 40810 if mem != x1.Args[2] { 40811 break 40812 } 40813 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40814 break 40815 } 40816 b = mergePoint(b, x0, x1) 40817 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40818 v.reset(OpCopy) 40819 v.AddArg(v0) 40820 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40821 v1.AuxInt = j0 40822 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40823 v2.AuxInt = i0 40824 v2.Aux = s 40825 v2.AddArg(p) 40826 v2.AddArg(idx) 40827 v2.AddArg(mem) 40828 v1.AddArg(v2) 40829 v0.AddArg(v1) 40830 v0.AddArg(y) 40831 return true 40832 } 40833 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 40834 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40835 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40836 for { 40837 _ = v.Args[1] 40838 or := v.Args[0] 40839 if or.Op != OpAMD64ORQ { 40840 break 40841 } 40842 _ = or.Args[1] 40843 y := or.Args[0] 40844 s0 := or.Args[1] 40845 if s0.Op != OpAMD64SHLQconst { 40846 break 40847 } 40848 j0 := s0.AuxInt 40849 x0 := s0.Args[0] 40850 if x0.Op != OpAMD64MOVWloadidx1 { 40851 break 40852 } 40853 i0 := x0.AuxInt 40854 s := x0.Aux 40855 _ = x0.Args[2] 40856 p := x0.Args[0] 40857 idx := x0.Args[1] 40858 mem := x0.Args[2] 40859 s1 := v.Args[1] 40860 if s1.Op != OpAMD64SHLQconst { 40861 break 40862 } 40863 j1 := s1.AuxInt 40864 x1 := s1.Args[0] 40865 if x1.Op != OpAMD64MOVWloadidx1 { 40866 break 40867 } 40868 i1 := x1.AuxInt 40869 if x1.Aux != s { 40870 break 40871 } 40872 _ = x1.Args[2] 40873 if idx != x1.Args[0] { 40874 break 40875 } 40876 if p != x1.Args[1] { 40877 break 40878 } 40879 if mem != x1.Args[2] { 40880 break 40881 } 40882 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40883 break 40884 } 40885 b = mergePoint(b, x0, x1) 40886 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40887 v.reset(OpCopy) 40888 v.AddArg(v0) 40889 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40890 v1.AuxInt = j0 40891 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40892 v2.AuxInt = i0 40893 v2.Aux = s 40894 v2.AddArg(p) 40895 v2.AddArg(idx) 40896 v2.AddArg(mem) 40897 v1.AddArg(v2) 40898 v0.AddArg(v1) 40899 v0.AddArg(y) 40900 return true 40901 } 40902 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 40903 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 40904 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 40905 for { 40906 _ = v.Args[1] 40907 or := v.Args[0] 40908 if or.Op != OpAMD64ORQ { 40909 break 40910 } 40911 _ = or.Args[1] 40912 y := or.Args[0] 40913 s0 := or.Args[1] 40914 if s0.Op != OpAMD64SHLQconst { 40915 break 40916 } 40917 j0 := s0.AuxInt 40918 x0 := s0.Args[0] 40919 if x0.Op != OpAMD64MOVWloadidx1 { 40920 break 40921 } 40922 i0 := x0.AuxInt 40923 s := x0.Aux 40924 _ = x0.Args[2] 40925 idx := x0.Args[0] 40926 p := x0.Args[1] 40927 mem := x0.Args[2] 40928 s1 := v.Args[1] 40929 if s1.Op != OpAMD64SHLQconst { 40930 break 40931 } 40932 j1 := s1.AuxInt 40933 x1 := s1.Args[0] 40934 if x1.Op != OpAMD64MOVWloadidx1 { 40935 break 40936 } 40937 i1 := x1.AuxInt 40938 if x1.Aux != s { 40939 break 40940 } 40941 _ = x1.Args[2] 40942 if idx != x1.Args[0] { 40943 break 40944 } 40945 if p != x1.Args[1] { 40946 break 40947 } 40948 if mem != x1.Args[2] { 40949 break 40950 } 40951 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 40952 break 40953 } 40954 b = mergePoint(b, x0, x1) 40955 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 40956 v.reset(OpCopy) 40957 v.AddArg(v0) 40958 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 40959 v1.AuxInt = j0 40960 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 40961 v2.AuxInt = i0 40962 v2.Aux = s 40963 v2.AddArg(p) 40964 v2.AddArg(idx) 40965 v2.AddArg(mem) 40966 v1.AddArg(v2) 40967 v0.AddArg(v1) 40968 v0.AddArg(y) 40969 return true 40970 } 40971 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 40972 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 40973 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 40974 for { 40975 _ = v.Args[1] 40976 x1 := v.Args[0] 40977 if x1.Op != OpAMD64MOVBload { 40978 break 40979 } 40980 i1 := x1.AuxInt 40981 s := x1.Aux 40982 _ = x1.Args[1] 40983 p := x1.Args[0] 40984 mem := x1.Args[1] 40985 sh := v.Args[1] 40986 if sh.Op != OpAMD64SHLQconst { 40987 break 40988 } 40989 if sh.AuxInt != 8 { 40990 break 40991 } 40992 x0 := sh.Args[0] 40993 if x0.Op != OpAMD64MOVBload { 40994 break 40995 } 40996 i0 := x0.AuxInt 40997 if x0.Aux != s { 40998 break 40999 } 41000 _ = x0.Args[1] 41001 if p != x0.Args[0] { 41002 break 41003 } 41004 if mem != x0.Args[1] { 41005 break 41006 } 41007 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 41008 break 41009 } 41010 b = mergePoint(b, x0, x1) 41011 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 41012 v.reset(OpCopy) 41013 v.AddArg(v0) 41014 v0.AuxInt = 8 41015 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 41016 v1.AuxInt = i0 41017 v1.Aux = s 41018 v1.AddArg(p) 41019 v1.AddArg(mem) 41020 v0.AddArg(v1) 41021 return true 41022 } 41023 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 41024 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 41025 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 41026 for { 41027 _ = v.Args[1] 41028 sh := v.Args[0] 41029 if sh.Op != OpAMD64SHLQconst { 41030 break 41031 } 41032 if sh.AuxInt != 8 { 41033 break 41034 } 41035 x0 := sh.Args[0] 41036 if x0.Op != OpAMD64MOVBload { 41037 break 41038 } 41039 i0 := x0.AuxInt 41040 s := x0.Aux 41041 _ = x0.Args[1] 41042 p := x0.Args[0] 41043 mem := x0.Args[1] 41044 x1 := v.Args[1] 41045 if x1.Op != OpAMD64MOVBload { 41046 break 41047 } 41048 i1 := x1.AuxInt 41049 if x1.Aux != s { 41050 break 41051 } 41052 _ = x1.Args[1] 41053 if p != x1.Args[0] { 41054 break 41055 } 41056 if mem != x1.Args[1] { 41057 break 41058 } 41059 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 41060 break 41061 } 41062 b = mergePoint(b, x0, x1) 41063 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 41064 v.reset(OpCopy) 41065 v.AddArg(v0) 41066 v0.AuxInt = 8 41067 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 41068 v1.AuxInt = i0 41069 v1.Aux = s 41070 v1.AddArg(p) 41071 v1.AddArg(mem) 41072 v0.AddArg(v1) 41073 return true 41074 } 41075 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 41076 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 41077 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 41078 for { 41079 _ = v.Args[1] 41080 r1 := v.Args[0] 41081 if r1.Op != OpAMD64ROLWconst { 41082 break 41083 } 41084 if r1.AuxInt != 8 { 41085 break 41086 } 41087 x1 := r1.Args[0] 41088 if x1.Op != OpAMD64MOVWload { 41089 break 41090 } 41091 i1 := x1.AuxInt 41092 s := x1.Aux 41093 _ = x1.Args[1] 41094 p := x1.Args[0] 41095 mem := x1.Args[1] 41096 sh := v.Args[1] 41097 if sh.Op != OpAMD64SHLQconst { 41098 break 41099 } 41100 if sh.AuxInt != 16 { 41101 break 41102 } 41103 r0 := sh.Args[0] 41104 if r0.Op != OpAMD64ROLWconst { 41105 break 41106 } 41107 if r0.AuxInt != 8 { 41108 break 41109 } 41110 x0 := r0.Args[0] 41111 if x0.Op != OpAMD64MOVWload { 41112 break 41113 } 41114 i0 := x0.AuxInt 41115 if x0.Aux != s { 41116 break 41117 } 41118 _ = x0.Args[1] 41119 if p != x0.Args[0] { 41120 break 41121 } 41122 if mem != x0.Args[1] { 41123 break 41124 } 41125 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 41126 break 41127 } 41128 b = mergePoint(b, x0, x1) 41129 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 41130 v.reset(OpCopy) 41131 v.AddArg(v0) 41132 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 41133 v1.AuxInt = i0 41134 v1.Aux = s 41135 v1.AddArg(p) 41136 v1.AddArg(mem) 41137 v0.AddArg(v1) 41138 return true 41139 } 41140 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 41141 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 41142 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 41143 for { 41144 _ = v.Args[1] 41145 sh := v.Args[0] 41146 if sh.Op != OpAMD64SHLQconst { 41147 break 41148 } 41149 if sh.AuxInt != 16 { 41150 break 41151 } 41152 r0 := sh.Args[0] 41153 if r0.Op != OpAMD64ROLWconst { 41154 break 41155 } 41156 if r0.AuxInt != 8 { 41157 break 41158 } 41159 x0 := r0.Args[0] 41160 if x0.Op != OpAMD64MOVWload { 41161 break 41162 } 41163 i0 := x0.AuxInt 41164 s := x0.Aux 41165 _ = x0.Args[1] 41166 p := x0.Args[0] 41167 mem := x0.Args[1] 41168 r1 := v.Args[1] 41169 if r1.Op != OpAMD64ROLWconst { 41170 break 41171 } 41172 if r1.AuxInt != 8 { 41173 break 41174 } 41175 x1 := r1.Args[0] 41176 if x1.Op != OpAMD64MOVWload { 41177 break 41178 } 41179 i1 := x1.AuxInt 41180 if x1.Aux != s { 41181 break 41182 } 41183 _ = x1.Args[1] 41184 if p != x1.Args[0] { 41185 break 41186 } 41187 if mem != x1.Args[1] { 41188 break 41189 } 41190 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 41191 break 41192 } 41193 b = mergePoint(b, x0, x1) 41194 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 41195 v.reset(OpCopy) 41196 v.AddArg(v0) 41197 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 41198 v1.AuxInt = i0 41199 v1.Aux = s 41200 v1.AddArg(p) 41201 v1.AddArg(mem) 41202 v0.AddArg(v1) 41203 return true 41204 } 41205 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 41206 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 41207 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 41208 for { 41209 _ = v.Args[1] 41210 r1 := v.Args[0] 41211 if r1.Op != OpAMD64BSWAPL { 41212 break 41213 } 41214 x1 := r1.Args[0] 41215 if x1.Op != OpAMD64MOVLload { 41216 break 41217 } 41218 i1 := x1.AuxInt 41219 s := x1.Aux 41220 _ = x1.Args[1] 41221 p := x1.Args[0] 41222 mem := x1.Args[1] 41223 sh := v.Args[1] 41224 if sh.Op != OpAMD64SHLQconst { 41225 break 41226 } 41227 if sh.AuxInt != 32 { 41228 break 41229 } 41230 r0 := sh.Args[0] 41231 if r0.Op != OpAMD64BSWAPL { 41232 break 41233 } 41234 x0 := r0.Args[0] 41235 if x0.Op != OpAMD64MOVLload { 41236 break 41237 } 41238 i0 := x0.AuxInt 41239 if x0.Aux != s { 41240 break 41241 } 41242 _ = x0.Args[1] 41243 if p != x0.Args[0] { 41244 break 41245 } 41246 if mem != x0.Args[1] { 41247 break 41248 } 41249 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 41250 break 41251 } 41252 b = mergePoint(b, x0, x1) 41253 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 41254 v.reset(OpCopy) 41255 v.AddArg(v0) 41256 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 41257 v1.AuxInt = i0 41258 v1.Aux = s 41259 v1.AddArg(p) 41260 v1.AddArg(mem) 41261 v0.AddArg(v1) 41262 return true 41263 } 41264 return false 41265 } 41266 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 41267 b := v.Block 41268 _ = b 41269 typ := &b.Func.Config.Types 41270 _ = typ 41271 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 41272 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 41273 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 41274 for { 41275 _ = v.Args[1] 41276 sh := v.Args[0] 41277 if sh.Op != OpAMD64SHLQconst { 41278 break 41279 } 41280 if sh.AuxInt != 32 { 41281 break 41282 } 41283 r0 := sh.Args[0] 41284 if r0.Op != OpAMD64BSWAPL { 41285 break 41286 } 41287 x0 := r0.Args[0] 41288 if x0.Op != OpAMD64MOVLload { 41289 break 41290 } 41291 i0 := x0.AuxInt 41292 s := x0.Aux 41293 _ = x0.Args[1] 41294 p := x0.Args[0] 41295 mem := x0.Args[1] 41296 r1 := v.Args[1] 41297 if r1.Op != OpAMD64BSWAPL { 41298 break 41299 } 41300 x1 := r1.Args[0] 41301 if x1.Op != OpAMD64MOVLload { 41302 break 41303 } 41304 i1 := x1.AuxInt 41305 if x1.Aux != s { 41306 break 41307 } 41308 _ = x1.Args[1] 41309 if p != x1.Args[0] { 41310 break 41311 } 41312 if mem != x1.Args[1] { 41313 break 41314 } 41315 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 41316 break 41317 } 41318 b = mergePoint(b, x0, x1) 41319 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 41320 v.reset(OpCopy) 41321 v.AddArg(v0) 41322 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 41323 v1.AuxInt = i0 41324 v1.Aux = s 41325 v1.AddArg(p) 41326 v1.AddArg(mem) 41327 v0.AddArg(v1) 41328 return true 41329 } 41330 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 41331 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 41332 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 41333 for { 41334 _ = v.Args[1] 41335 s0 := v.Args[0] 41336 if s0.Op != OpAMD64SHLQconst { 41337 break 41338 } 41339 j0 := s0.AuxInt 41340 x0 := s0.Args[0] 41341 if x0.Op != OpAMD64MOVBload { 41342 break 41343 } 41344 i0 := x0.AuxInt 41345 s := x0.Aux 41346 _ = x0.Args[1] 41347 p := x0.Args[0] 41348 mem := x0.Args[1] 41349 or := v.Args[1] 41350 if or.Op != OpAMD64ORQ { 41351 break 41352 } 41353 _ = or.Args[1] 41354 s1 := or.Args[0] 41355 if s1.Op != OpAMD64SHLQconst { 41356 break 41357 } 41358 j1 := s1.AuxInt 41359 x1 := s1.Args[0] 41360 if x1.Op != OpAMD64MOVBload { 41361 break 41362 } 41363 i1 := x1.AuxInt 41364 if x1.Aux != s { 41365 break 41366 } 41367 _ = x1.Args[1] 41368 if p != x1.Args[0] { 41369 break 41370 } 41371 if mem != x1.Args[1] { 41372 break 41373 } 41374 y := or.Args[1] 41375 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 41376 break 41377 } 41378 b = mergePoint(b, x0, x1) 41379 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 41380 v.reset(OpCopy) 41381 v.AddArg(v0) 41382 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 41383 v1.AuxInt = j1 41384 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 41385 v2.AuxInt = 8 41386 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 41387 v3.AuxInt = i0 41388 v3.Aux = s 41389 v3.AddArg(p) 41390 v3.AddArg(mem) 41391 v2.AddArg(v3) 41392 v1.AddArg(v2) 41393 v0.AddArg(v1) 41394 v0.AddArg(y) 41395 return true 41396 } 41397 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 41398 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 41399 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 41400 for { 41401 _ = v.Args[1] 41402 s0 := v.Args[0] 41403 if s0.Op != OpAMD64SHLQconst { 41404 break 41405 } 41406 j0 := s0.AuxInt 41407 x0 := s0.Args[0] 41408 if x0.Op != OpAMD64MOVBload { 41409 break 41410 } 41411 i0 := x0.AuxInt 41412 s := x0.Aux 41413 _ = x0.Args[1] 41414 p := x0.Args[0] 41415 mem := x0.Args[1] 41416 or := v.Args[1] 41417 if or.Op != OpAMD64ORQ { 41418 break 41419 } 41420 _ = or.Args[1] 41421 y := or.Args[0] 41422 s1 := or.Args[1] 41423 if s1.Op != OpAMD64SHLQconst { 41424 break 41425 } 41426 j1 := s1.AuxInt 41427 x1 := s1.Args[0] 41428 if x1.Op != OpAMD64MOVBload { 41429 break 41430 } 41431 i1 := x1.AuxInt 41432 if x1.Aux != s { 41433 break 41434 } 41435 _ = x1.Args[1] 41436 if p != x1.Args[0] { 41437 break 41438 } 41439 if mem != x1.Args[1] { 41440 break 41441 } 41442 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 41443 break 41444 } 41445 b = mergePoint(b, x0, x1) 41446 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 41447 v.reset(OpCopy) 41448 v.AddArg(v0) 41449 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 41450 v1.AuxInt = j1 41451 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 41452 v2.AuxInt = 8 41453 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 41454 v3.AuxInt = i0 41455 v3.Aux = s 41456 v3.AddArg(p) 41457 v3.AddArg(mem) 41458 v2.AddArg(v3) 41459 v1.AddArg(v2) 41460 v0.AddArg(v1) 41461 v0.AddArg(y) 41462 return true 41463 } 41464 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 41465 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 41466 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 41467 for { 41468 _ = v.Args[1] 41469 or := v.Args[0] 41470 if or.Op != OpAMD64ORQ { 41471 break 41472 } 41473 _ = or.Args[1] 41474 s1 := or.Args[0] 41475 if s1.Op != OpAMD64SHLQconst { 41476 break 41477 } 41478 j1 := s1.AuxInt 41479 x1 := s1.Args[0] 41480 if x1.Op != OpAMD64MOVBload { 41481 break 41482 } 41483 i1 := x1.AuxInt 41484 s := x1.Aux 41485 _ = x1.Args[1] 41486 p := x1.Args[0] 41487 mem := x1.Args[1] 41488 y := or.Args[1] 41489 s0 := v.Args[1] 41490 if s0.Op != OpAMD64SHLQconst { 41491 break 41492 } 41493 j0 := s0.AuxInt 41494 x0 := s0.Args[0] 41495 if x0.Op != OpAMD64MOVBload { 41496 break 41497 } 41498 i0 := x0.AuxInt 41499 if x0.Aux != s { 41500 break 41501 } 41502 _ = x0.Args[1] 41503 if p != x0.Args[0] { 41504 break 41505 } 41506 if mem != x0.Args[1] { 41507 break 41508 } 41509 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 41510 break 41511 } 41512 b = mergePoint(b, x0, x1) 41513 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 41514 v.reset(OpCopy) 41515 v.AddArg(v0) 41516 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 41517 v1.AuxInt = j1 41518 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 41519 v2.AuxInt = 8 41520 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 41521 v3.AuxInt = i0 41522 v3.Aux = s 41523 v3.AddArg(p) 41524 v3.AddArg(mem) 41525 v2.AddArg(v3) 41526 v1.AddArg(v2) 41527 v0.AddArg(v1) 41528 v0.AddArg(y) 41529 return true 41530 } 41531 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 41532 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 41533 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 41534 for { 41535 _ = v.Args[1] 41536 or := v.Args[0] 41537 if or.Op != OpAMD64ORQ { 41538 break 41539 } 41540 _ = or.Args[1] 41541 y := or.Args[0] 41542 s1 := or.Args[1] 41543 if s1.Op != OpAMD64SHLQconst { 41544 break 41545 } 41546 j1 := s1.AuxInt 41547 x1 := s1.Args[0] 41548 if x1.Op != OpAMD64MOVBload { 41549 break 41550 } 41551 i1 := x1.AuxInt 41552 s := x1.Aux 41553 _ = x1.Args[1] 41554 p := x1.Args[0] 41555 mem := x1.Args[1] 41556 s0 := v.Args[1] 41557 if s0.Op != OpAMD64SHLQconst { 41558 break 41559 } 41560 j0 := s0.AuxInt 41561 x0 := s0.Args[0] 41562 if x0.Op != OpAMD64MOVBload { 41563 break 41564 } 41565 i0 := x0.AuxInt 41566 if x0.Aux != s { 41567 break 41568 } 41569 _ = x0.Args[1] 41570 if p != x0.Args[0] { 41571 break 41572 } 41573 if mem != x0.Args[1] { 41574 break 41575 } 41576 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 41577 break 41578 } 41579 b = mergePoint(b, x0, x1) 41580 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 41581 v.reset(OpCopy) 41582 v.AddArg(v0) 41583 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 41584 v1.AuxInt = j1 41585 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 41586 v2.AuxInt = 8 41587 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 41588 v3.AuxInt = i0 41589 v3.Aux = s 41590 v3.AddArg(p) 41591 v3.AddArg(mem) 41592 v2.AddArg(v3) 41593 v1.AddArg(v2) 41594 v0.AddArg(v1) 41595 v0.AddArg(y) 41596 return true 41597 } 41598 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 41599 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 41600 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 41601 for { 41602 _ = v.Args[1] 41603 s0 := v.Args[0] 41604 if s0.Op != OpAMD64SHLQconst { 41605 break 41606 } 41607 j0 := s0.AuxInt 41608 r0 := s0.Args[0] 41609 if r0.Op != OpAMD64ROLWconst { 41610 break 41611 } 41612 if r0.AuxInt != 8 { 41613 break 41614 } 41615 x0 := r0.Args[0] 41616 if x0.Op != OpAMD64MOVWload { 41617 break 41618 } 41619 i0 := x0.AuxInt 41620 s := x0.Aux 41621 _ = x0.Args[1] 41622 p := x0.Args[0] 41623 mem := x0.Args[1] 41624 or := v.Args[1] 41625 if or.Op != OpAMD64ORQ { 41626 break 41627 } 41628 _ = or.Args[1] 41629 s1 := or.Args[0] 41630 if s1.Op != OpAMD64SHLQconst { 41631 break 41632 } 41633 j1 := s1.AuxInt 41634 r1 := s1.Args[0] 41635 if r1.Op != OpAMD64ROLWconst { 41636 break 41637 } 41638 if r1.AuxInt != 8 { 41639 break 41640 } 41641 x1 := r1.Args[0] 41642 if x1.Op != OpAMD64MOVWload { 41643 break 41644 } 41645 i1 := x1.AuxInt 41646 if x1.Aux != s { 41647 break 41648 } 41649 _ = x1.Args[1] 41650 if p != x1.Args[0] { 41651 break 41652 } 41653 if mem != x1.Args[1] { 41654 break 41655 } 41656 y := or.Args[1] 41657 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 41658 break 41659 } 41660 b = mergePoint(b, x0, x1) 41661 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 41662 v.reset(OpCopy) 41663 v.AddArg(v0) 41664 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 41665 v1.AuxInt = j1 41666 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 41667 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 41668 v3.AuxInt = i0 41669 v3.Aux = s 41670 v3.AddArg(p) 41671 v3.AddArg(mem) 41672 v2.AddArg(v3) 41673 v1.AddArg(v2) 41674 v0.AddArg(v1) 41675 v0.AddArg(y) 41676 return true 41677 } 41678 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 41679 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 41680 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 41681 for { 41682 _ = v.Args[1] 41683 s0 := v.Args[0] 41684 if s0.Op != OpAMD64SHLQconst { 41685 break 41686 } 41687 j0 := s0.AuxInt 41688 r0 := s0.Args[0] 41689 if r0.Op != OpAMD64ROLWconst { 41690 break 41691 } 41692 if r0.AuxInt != 8 { 41693 break 41694 } 41695 x0 := r0.Args[0] 41696 if x0.Op != OpAMD64MOVWload { 41697 break 41698 } 41699 i0 := x0.AuxInt 41700 s := x0.Aux 41701 _ = x0.Args[1] 41702 p := x0.Args[0] 41703 mem := x0.Args[1] 41704 or := v.Args[1] 41705 if or.Op != OpAMD64ORQ { 41706 break 41707 } 41708 _ = or.Args[1] 41709 y := or.Args[0] 41710 s1 := or.Args[1] 41711 if s1.Op != OpAMD64SHLQconst { 41712 break 41713 } 41714 j1 := s1.AuxInt 41715 r1 := s1.Args[0] 41716 if r1.Op != OpAMD64ROLWconst { 41717 break 41718 } 41719 if r1.AuxInt != 8 { 41720 break 41721 } 41722 x1 := r1.Args[0] 41723 if x1.Op != OpAMD64MOVWload { 41724 break 41725 } 41726 i1 := x1.AuxInt 41727 if x1.Aux != s { 41728 break 41729 } 41730 _ = x1.Args[1] 41731 if p != x1.Args[0] { 41732 break 41733 } 41734 if mem != x1.Args[1] { 41735 break 41736 } 41737 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 41738 break 41739 } 41740 b = mergePoint(b, x0, x1) 41741 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 41742 v.reset(OpCopy) 41743 v.AddArg(v0) 41744 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 41745 v1.AuxInt = j1 41746 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 41747 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 41748 v3.AuxInt = i0 41749 v3.Aux = s 41750 v3.AddArg(p) 41751 v3.AddArg(mem) 41752 v2.AddArg(v3) 41753 v1.AddArg(v2) 41754 v0.AddArg(v1) 41755 v0.AddArg(y) 41756 return true 41757 } 41758 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 41759 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 41760 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 41761 for { 41762 _ = v.Args[1] 41763 or := v.Args[0] 41764 if or.Op != OpAMD64ORQ { 41765 break 41766 } 41767 _ = or.Args[1] 41768 s1 := or.Args[0] 41769 if s1.Op != OpAMD64SHLQconst { 41770 break 41771 } 41772 j1 := s1.AuxInt 41773 r1 := s1.Args[0] 41774 if r1.Op != OpAMD64ROLWconst { 41775 break 41776 } 41777 if r1.AuxInt != 8 { 41778 break 41779 } 41780 x1 := r1.Args[0] 41781 if x1.Op != OpAMD64MOVWload { 41782 break 41783 } 41784 i1 := x1.AuxInt 41785 s := x1.Aux 41786 _ = x1.Args[1] 41787 p := x1.Args[0] 41788 mem := x1.Args[1] 41789 y := or.Args[1] 41790 s0 := v.Args[1] 41791 if s0.Op != OpAMD64SHLQconst { 41792 break 41793 } 41794 j0 := s0.AuxInt 41795 r0 := s0.Args[0] 41796 if r0.Op != OpAMD64ROLWconst { 41797 break 41798 } 41799 if r0.AuxInt != 8 { 41800 break 41801 } 41802 x0 := r0.Args[0] 41803 if x0.Op != OpAMD64MOVWload { 41804 break 41805 } 41806 i0 := x0.AuxInt 41807 if x0.Aux != s { 41808 break 41809 } 41810 _ = x0.Args[1] 41811 if p != x0.Args[0] { 41812 break 41813 } 41814 if mem != x0.Args[1] { 41815 break 41816 } 41817 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 41818 break 41819 } 41820 b = mergePoint(b, x0, x1) 41821 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 41822 v.reset(OpCopy) 41823 v.AddArg(v0) 41824 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 41825 v1.AuxInt = j1 41826 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 41827 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 41828 v3.AuxInt = i0 41829 v3.Aux = s 41830 v3.AddArg(p) 41831 v3.AddArg(mem) 41832 v2.AddArg(v3) 41833 v1.AddArg(v2) 41834 v0.AddArg(v1) 41835 v0.AddArg(y) 41836 return true 41837 } 41838 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 41839 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 41840 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 41841 for { 41842 _ = v.Args[1] 41843 or := v.Args[0] 41844 if or.Op != OpAMD64ORQ { 41845 break 41846 } 41847 _ = or.Args[1] 41848 y := or.Args[0] 41849 s1 := or.Args[1] 41850 if s1.Op != OpAMD64SHLQconst { 41851 break 41852 } 41853 j1 := s1.AuxInt 41854 r1 := s1.Args[0] 41855 if r1.Op != OpAMD64ROLWconst { 41856 break 41857 } 41858 if r1.AuxInt != 8 { 41859 break 41860 } 41861 x1 := r1.Args[0] 41862 if x1.Op != OpAMD64MOVWload { 41863 break 41864 } 41865 i1 := x1.AuxInt 41866 s := x1.Aux 41867 _ = x1.Args[1] 41868 p := x1.Args[0] 41869 mem := x1.Args[1] 41870 s0 := v.Args[1] 41871 if s0.Op != OpAMD64SHLQconst { 41872 break 41873 } 41874 j0 := s0.AuxInt 41875 r0 := s0.Args[0] 41876 if r0.Op != OpAMD64ROLWconst { 41877 break 41878 } 41879 if r0.AuxInt != 8 { 41880 break 41881 } 41882 x0 := r0.Args[0] 41883 if x0.Op != OpAMD64MOVWload { 41884 break 41885 } 41886 i0 := x0.AuxInt 41887 if x0.Aux != s { 41888 break 41889 } 41890 _ = x0.Args[1] 41891 if p != x0.Args[0] { 41892 break 41893 } 41894 if mem != x0.Args[1] { 41895 break 41896 } 41897 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 41898 break 41899 } 41900 b = mergePoint(b, x0, x1) 41901 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 41902 v.reset(OpCopy) 41903 v.AddArg(v0) 41904 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 41905 v1.AuxInt = j1 41906 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 41907 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 41908 v3.AuxInt = i0 41909 v3.Aux = s 41910 v3.AddArg(p) 41911 v3.AddArg(mem) 41912 v2.AddArg(v3) 41913 v1.AddArg(v2) 41914 v0.AddArg(v1) 41915 v0.AddArg(y) 41916 return true 41917 } 41918 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 41919 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 41920 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 41921 for { 41922 _ = v.Args[1] 41923 x1 := v.Args[0] 41924 if x1.Op != OpAMD64MOVBloadidx1 { 41925 break 41926 } 41927 i1 := x1.AuxInt 41928 s := x1.Aux 41929 _ = x1.Args[2] 41930 p := x1.Args[0] 41931 idx := x1.Args[1] 41932 mem := x1.Args[2] 41933 sh := v.Args[1] 41934 if sh.Op != OpAMD64SHLQconst { 41935 break 41936 } 41937 if sh.AuxInt != 8 { 41938 break 41939 } 41940 x0 := sh.Args[0] 41941 if x0.Op != OpAMD64MOVBloadidx1 { 41942 break 41943 } 41944 i0 := x0.AuxInt 41945 if x0.Aux != s { 41946 break 41947 } 41948 _ = x0.Args[2] 41949 if p != x0.Args[0] { 41950 break 41951 } 41952 if idx != x0.Args[1] { 41953 break 41954 } 41955 if mem != x0.Args[2] { 41956 break 41957 } 41958 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 41959 break 41960 } 41961 b = mergePoint(b, x0, x1) 41962 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 41963 v.reset(OpCopy) 41964 v.AddArg(v0) 41965 v0.AuxInt = 8 41966 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 41967 v1.AuxInt = i0 41968 v1.Aux = s 41969 v1.AddArg(p) 41970 v1.AddArg(idx) 41971 v1.AddArg(mem) 41972 v0.AddArg(v1) 41973 return true 41974 } 41975 return false 41976 } 41977 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 41978 b := v.Block 41979 _ = b 41980 typ := &b.Func.Config.Types 41981 _ = typ 41982 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 41983 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 41984 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 41985 for { 41986 _ = v.Args[1] 41987 x1 := v.Args[0] 41988 if x1.Op != OpAMD64MOVBloadidx1 { 41989 break 41990 } 41991 i1 := x1.AuxInt 41992 s := x1.Aux 41993 _ = x1.Args[2] 41994 idx := x1.Args[0] 41995 p := x1.Args[1] 41996 mem := x1.Args[2] 41997 sh := v.Args[1] 41998 if sh.Op != OpAMD64SHLQconst { 41999 break 42000 } 42001 if sh.AuxInt != 8 { 42002 break 42003 } 42004 x0 := sh.Args[0] 42005 if x0.Op != OpAMD64MOVBloadidx1 { 42006 break 42007 } 42008 i0 := x0.AuxInt 42009 if x0.Aux != s { 42010 break 42011 } 42012 _ = x0.Args[2] 42013 if p != x0.Args[0] { 42014 break 42015 } 42016 if idx != x0.Args[1] { 42017 break 42018 } 42019 if mem != x0.Args[2] { 42020 break 42021 } 42022 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42023 break 42024 } 42025 b = mergePoint(b, x0, x1) 42026 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42027 v.reset(OpCopy) 42028 v.AddArg(v0) 42029 v0.AuxInt = 8 42030 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42031 v1.AuxInt = i0 42032 v1.Aux = s 42033 v1.AddArg(p) 42034 v1.AddArg(idx) 42035 v1.AddArg(mem) 42036 v0.AddArg(v1) 42037 return true 42038 } 42039 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 42040 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42041 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42042 for { 42043 _ = v.Args[1] 42044 x1 := v.Args[0] 42045 if x1.Op != OpAMD64MOVBloadidx1 { 42046 break 42047 } 42048 i1 := x1.AuxInt 42049 s := x1.Aux 42050 _ = x1.Args[2] 42051 p := x1.Args[0] 42052 idx := x1.Args[1] 42053 mem := x1.Args[2] 42054 sh := v.Args[1] 42055 if sh.Op != OpAMD64SHLQconst { 42056 break 42057 } 42058 if sh.AuxInt != 8 { 42059 break 42060 } 42061 x0 := sh.Args[0] 42062 if x0.Op != OpAMD64MOVBloadidx1 { 42063 break 42064 } 42065 i0 := x0.AuxInt 42066 if x0.Aux != s { 42067 break 42068 } 42069 _ = x0.Args[2] 42070 if idx != x0.Args[0] { 42071 break 42072 } 42073 if p != x0.Args[1] { 42074 break 42075 } 42076 if mem != x0.Args[2] { 42077 break 42078 } 42079 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42080 break 42081 } 42082 b = mergePoint(b, x0, x1) 42083 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42084 v.reset(OpCopy) 42085 v.AddArg(v0) 42086 v0.AuxInt = 8 42087 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42088 v1.AuxInt = i0 42089 v1.Aux = s 42090 v1.AddArg(p) 42091 v1.AddArg(idx) 42092 v1.AddArg(mem) 42093 v0.AddArg(v1) 42094 return true 42095 } 42096 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 42097 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42098 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42099 for { 42100 _ = v.Args[1] 42101 x1 := v.Args[0] 42102 if x1.Op != OpAMD64MOVBloadidx1 { 42103 break 42104 } 42105 i1 := x1.AuxInt 42106 s := x1.Aux 42107 _ = x1.Args[2] 42108 idx := x1.Args[0] 42109 p := x1.Args[1] 42110 mem := x1.Args[2] 42111 sh := v.Args[1] 42112 if sh.Op != OpAMD64SHLQconst { 42113 break 42114 } 42115 if sh.AuxInt != 8 { 42116 break 42117 } 42118 x0 := sh.Args[0] 42119 if x0.Op != OpAMD64MOVBloadidx1 { 42120 break 42121 } 42122 i0 := x0.AuxInt 42123 if x0.Aux != s { 42124 break 42125 } 42126 _ = x0.Args[2] 42127 if idx != x0.Args[0] { 42128 break 42129 } 42130 if p != x0.Args[1] { 42131 break 42132 } 42133 if mem != x0.Args[2] { 42134 break 42135 } 42136 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42137 break 42138 } 42139 b = mergePoint(b, x0, x1) 42140 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42141 v.reset(OpCopy) 42142 v.AddArg(v0) 42143 v0.AuxInt = 8 42144 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42145 v1.AuxInt = i0 42146 v1.Aux = s 42147 v1.AddArg(p) 42148 v1.AddArg(idx) 42149 v1.AddArg(mem) 42150 v0.AddArg(v1) 42151 return true 42152 } 42153 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 42154 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42155 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42156 for { 42157 _ = v.Args[1] 42158 sh := v.Args[0] 42159 if sh.Op != OpAMD64SHLQconst { 42160 break 42161 } 42162 if sh.AuxInt != 8 { 42163 break 42164 } 42165 x0 := sh.Args[0] 42166 if x0.Op != OpAMD64MOVBloadidx1 { 42167 break 42168 } 42169 i0 := x0.AuxInt 42170 s := x0.Aux 42171 _ = x0.Args[2] 42172 p := x0.Args[0] 42173 idx := x0.Args[1] 42174 mem := x0.Args[2] 42175 x1 := v.Args[1] 42176 if x1.Op != OpAMD64MOVBloadidx1 { 42177 break 42178 } 42179 i1 := x1.AuxInt 42180 if x1.Aux != s { 42181 break 42182 } 42183 _ = x1.Args[2] 42184 if p != x1.Args[0] { 42185 break 42186 } 42187 if idx != x1.Args[1] { 42188 break 42189 } 42190 if mem != x1.Args[2] { 42191 break 42192 } 42193 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42194 break 42195 } 42196 b = mergePoint(b, x0, x1) 42197 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42198 v.reset(OpCopy) 42199 v.AddArg(v0) 42200 v0.AuxInt = 8 42201 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42202 v1.AuxInt = i0 42203 v1.Aux = s 42204 v1.AddArg(p) 42205 v1.AddArg(idx) 42206 v1.AddArg(mem) 42207 v0.AddArg(v1) 42208 return true 42209 } 42210 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 42211 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42212 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42213 for { 42214 _ = v.Args[1] 42215 sh := v.Args[0] 42216 if sh.Op != OpAMD64SHLQconst { 42217 break 42218 } 42219 if sh.AuxInt != 8 { 42220 break 42221 } 42222 x0 := sh.Args[0] 42223 if x0.Op != OpAMD64MOVBloadidx1 { 42224 break 42225 } 42226 i0 := x0.AuxInt 42227 s := x0.Aux 42228 _ = x0.Args[2] 42229 idx := x0.Args[0] 42230 p := x0.Args[1] 42231 mem := x0.Args[2] 42232 x1 := v.Args[1] 42233 if x1.Op != OpAMD64MOVBloadidx1 { 42234 break 42235 } 42236 i1 := x1.AuxInt 42237 if x1.Aux != s { 42238 break 42239 } 42240 _ = x1.Args[2] 42241 if p != x1.Args[0] { 42242 break 42243 } 42244 if idx != x1.Args[1] { 42245 break 42246 } 42247 if mem != x1.Args[2] { 42248 break 42249 } 42250 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42251 break 42252 } 42253 b = mergePoint(b, x0, x1) 42254 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42255 v.reset(OpCopy) 42256 v.AddArg(v0) 42257 v0.AuxInt = 8 42258 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42259 v1.AuxInt = i0 42260 v1.Aux = s 42261 v1.AddArg(p) 42262 v1.AddArg(idx) 42263 v1.AddArg(mem) 42264 v0.AddArg(v1) 42265 return true 42266 } 42267 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 42268 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42269 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42270 for { 42271 _ = v.Args[1] 42272 sh := v.Args[0] 42273 if sh.Op != OpAMD64SHLQconst { 42274 break 42275 } 42276 if sh.AuxInt != 8 { 42277 break 42278 } 42279 x0 := sh.Args[0] 42280 if x0.Op != OpAMD64MOVBloadidx1 { 42281 break 42282 } 42283 i0 := x0.AuxInt 42284 s := x0.Aux 42285 _ = x0.Args[2] 42286 p := x0.Args[0] 42287 idx := x0.Args[1] 42288 mem := x0.Args[2] 42289 x1 := v.Args[1] 42290 if x1.Op != OpAMD64MOVBloadidx1 { 42291 break 42292 } 42293 i1 := x1.AuxInt 42294 if x1.Aux != s { 42295 break 42296 } 42297 _ = x1.Args[2] 42298 if idx != x1.Args[0] { 42299 break 42300 } 42301 if p != x1.Args[1] { 42302 break 42303 } 42304 if mem != x1.Args[2] { 42305 break 42306 } 42307 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42308 break 42309 } 42310 b = mergePoint(b, x0, x1) 42311 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42312 v.reset(OpCopy) 42313 v.AddArg(v0) 42314 v0.AuxInt = 8 42315 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42316 v1.AuxInt = i0 42317 v1.Aux = s 42318 v1.AddArg(p) 42319 v1.AddArg(idx) 42320 v1.AddArg(mem) 42321 v0.AddArg(v1) 42322 return true 42323 } 42324 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 42325 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 42326 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 42327 for { 42328 _ = v.Args[1] 42329 sh := v.Args[0] 42330 if sh.Op != OpAMD64SHLQconst { 42331 break 42332 } 42333 if sh.AuxInt != 8 { 42334 break 42335 } 42336 x0 := sh.Args[0] 42337 if x0.Op != OpAMD64MOVBloadidx1 { 42338 break 42339 } 42340 i0 := x0.AuxInt 42341 s := x0.Aux 42342 _ = x0.Args[2] 42343 idx := x0.Args[0] 42344 p := x0.Args[1] 42345 mem := x0.Args[2] 42346 x1 := v.Args[1] 42347 if x1.Op != OpAMD64MOVBloadidx1 { 42348 break 42349 } 42350 i1 := x1.AuxInt 42351 if x1.Aux != s { 42352 break 42353 } 42354 _ = x1.Args[2] 42355 if idx != x1.Args[0] { 42356 break 42357 } 42358 if p != x1.Args[1] { 42359 break 42360 } 42361 if mem != x1.Args[2] { 42362 break 42363 } 42364 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 42365 break 42366 } 42367 b = mergePoint(b, x0, x1) 42368 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 42369 v.reset(OpCopy) 42370 v.AddArg(v0) 42371 v0.AuxInt = 8 42372 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 42373 v1.AuxInt = i0 42374 v1.Aux = s 42375 v1.AddArg(p) 42376 v1.AddArg(idx) 42377 v1.AddArg(mem) 42378 v0.AddArg(v1) 42379 return true 42380 } 42381 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 42382 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42383 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42384 for { 42385 _ = v.Args[1] 42386 r1 := v.Args[0] 42387 if r1.Op != OpAMD64ROLWconst { 42388 break 42389 } 42390 if r1.AuxInt != 8 { 42391 break 42392 } 42393 x1 := r1.Args[0] 42394 if x1.Op != OpAMD64MOVWloadidx1 { 42395 break 42396 } 42397 i1 := x1.AuxInt 42398 s := x1.Aux 42399 _ = x1.Args[2] 42400 p := x1.Args[0] 42401 idx := x1.Args[1] 42402 mem := x1.Args[2] 42403 sh := v.Args[1] 42404 if sh.Op != OpAMD64SHLQconst { 42405 break 42406 } 42407 if sh.AuxInt != 16 { 42408 break 42409 } 42410 r0 := sh.Args[0] 42411 if r0.Op != OpAMD64ROLWconst { 42412 break 42413 } 42414 if r0.AuxInt != 8 { 42415 break 42416 } 42417 x0 := r0.Args[0] 42418 if x0.Op != OpAMD64MOVWloadidx1 { 42419 break 42420 } 42421 i0 := x0.AuxInt 42422 if x0.Aux != s { 42423 break 42424 } 42425 _ = x0.Args[2] 42426 if p != x0.Args[0] { 42427 break 42428 } 42429 if idx != x0.Args[1] { 42430 break 42431 } 42432 if mem != x0.Args[2] { 42433 break 42434 } 42435 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42436 break 42437 } 42438 b = mergePoint(b, x0, x1) 42439 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42440 v.reset(OpCopy) 42441 v.AddArg(v0) 42442 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42443 v1.AuxInt = i0 42444 v1.Aux = s 42445 v1.AddArg(p) 42446 v1.AddArg(idx) 42447 v1.AddArg(mem) 42448 v0.AddArg(v1) 42449 return true 42450 } 42451 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 42452 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42453 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42454 for { 42455 _ = v.Args[1] 42456 r1 := v.Args[0] 42457 if r1.Op != OpAMD64ROLWconst { 42458 break 42459 } 42460 if r1.AuxInt != 8 { 42461 break 42462 } 42463 x1 := r1.Args[0] 42464 if x1.Op != OpAMD64MOVWloadidx1 { 42465 break 42466 } 42467 i1 := x1.AuxInt 42468 s := x1.Aux 42469 _ = x1.Args[2] 42470 idx := x1.Args[0] 42471 p := x1.Args[1] 42472 mem := x1.Args[2] 42473 sh := v.Args[1] 42474 if sh.Op != OpAMD64SHLQconst { 42475 break 42476 } 42477 if sh.AuxInt != 16 { 42478 break 42479 } 42480 r0 := sh.Args[0] 42481 if r0.Op != OpAMD64ROLWconst { 42482 break 42483 } 42484 if r0.AuxInt != 8 { 42485 break 42486 } 42487 x0 := r0.Args[0] 42488 if x0.Op != OpAMD64MOVWloadidx1 { 42489 break 42490 } 42491 i0 := x0.AuxInt 42492 if x0.Aux != s { 42493 break 42494 } 42495 _ = x0.Args[2] 42496 if p != x0.Args[0] { 42497 break 42498 } 42499 if idx != x0.Args[1] { 42500 break 42501 } 42502 if mem != x0.Args[2] { 42503 break 42504 } 42505 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42506 break 42507 } 42508 b = mergePoint(b, x0, x1) 42509 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42510 v.reset(OpCopy) 42511 v.AddArg(v0) 42512 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42513 v1.AuxInt = i0 42514 v1.Aux = s 42515 v1.AddArg(p) 42516 v1.AddArg(idx) 42517 v1.AddArg(mem) 42518 v0.AddArg(v1) 42519 return true 42520 } 42521 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 42522 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42523 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42524 for { 42525 _ = v.Args[1] 42526 r1 := v.Args[0] 42527 if r1.Op != OpAMD64ROLWconst { 42528 break 42529 } 42530 if r1.AuxInt != 8 { 42531 break 42532 } 42533 x1 := r1.Args[0] 42534 if x1.Op != OpAMD64MOVWloadidx1 { 42535 break 42536 } 42537 i1 := x1.AuxInt 42538 s := x1.Aux 42539 _ = x1.Args[2] 42540 p := x1.Args[0] 42541 idx := x1.Args[1] 42542 mem := x1.Args[2] 42543 sh := v.Args[1] 42544 if sh.Op != OpAMD64SHLQconst { 42545 break 42546 } 42547 if sh.AuxInt != 16 { 42548 break 42549 } 42550 r0 := sh.Args[0] 42551 if r0.Op != OpAMD64ROLWconst { 42552 break 42553 } 42554 if r0.AuxInt != 8 { 42555 break 42556 } 42557 x0 := r0.Args[0] 42558 if x0.Op != OpAMD64MOVWloadidx1 { 42559 break 42560 } 42561 i0 := x0.AuxInt 42562 if x0.Aux != s { 42563 break 42564 } 42565 _ = x0.Args[2] 42566 if idx != x0.Args[0] { 42567 break 42568 } 42569 if p != x0.Args[1] { 42570 break 42571 } 42572 if mem != x0.Args[2] { 42573 break 42574 } 42575 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42576 break 42577 } 42578 b = mergePoint(b, x0, x1) 42579 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42580 v.reset(OpCopy) 42581 v.AddArg(v0) 42582 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42583 v1.AuxInt = i0 42584 v1.Aux = s 42585 v1.AddArg(p) 42586 v1.AddArg(idx) 42587 v1.AddArg(mem) 42588 v0.AddArg(v1) 42589 return true 42590 } 42591 return false 42592 } 42593 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 42594 b := v.Block 42595 _ = b 42596 typ := &b.Func.Config.Types 42597 _ = typ 42598 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 42599 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42600 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42601 for { 42602 _ = v.Args[1] 42603 r1 := v.Args[0] 42604 if r1.Op != OpAMD64ROLWconst { 42605 break 42606 } 42607 if r1.AuxInt != 8 { 42608 break 42609 } 42610 x1 := r1.Args[0] 42611 if x1.Op != OpAMD64MOVWloadidx1 { 42612 break 42613 } 42614 i1 := x1.AuxInt 42615 s := x1.Aux 42616 _ = x1.Args[2] 42617 idx := x1.Args[0] 42618 p := x1.Args[1] 42619 mem := x1.Args[2] 42620 sh := v.Args[1] 42621 if sh.Op != OpAMD64SHLQconst { 42622 break 42623 } 42624 if sh.AuxInt != 16 { 42625 break 42626 } 42627 r0 := sh.Args[0] 42628 if r0.Op != OpAMD64ROLWconst { 42629 break 42630 } 42631 if r0.AuxInt != 8 { 42632 break 42633 } 42634 x0 := r0.Args[0] 42635 if x0.Op != OpAMD64MOVWloadidx1 { 42636 break 42637 } 42638 i0 := x0.AuxInt 42639 if x0.Aux != s { 42640 break 42641 } 42642 _ = x0.Args[2] 42643 if idx != x0.Args[0] { 42644 break 42645 } 42646 if p != x0.Args[1] { 42647 break 42648 } 42649 if mem != x0.Args[2] { 42650 break 42651 } 42652 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42653 break 42654 } 42655 b = mergePoint(b, x0, x1) 42656 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42657 v.reset(OpCopy) 42658 v.AddArg(v0) 42659 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42660 v1.AuxInt = i0 42661 v1.Aux = s 42662 v1.AddArg(p) 42663 v1.AddArg(idx) 42664 v1.AddArg(mem) 42665 v0.AddArg(v1) 42666 return true 42667 } 42668 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 42669 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42670 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42671 for { 42672 _ = v.Args[1] 42673 sh := v.Args[0] 42674 if sh.Op != OpAMD64SHLQconst { 42675 break 42676 } 42677 if sh.AuxInt != 16 { 42678 break 42679 } 42680 r0 := sh.Args[0] 42681 if r0.Op != OpAMD64ROLWconst { 42682 break 42683 } 42684 if r0.AuxInt != 8 { 42685 break 42686 } 42687 x0 := r0.Args[0] 42688 if x0.Op != OpAMD64MOVWloadidx1 { 42689 break 42690 } 42691 i0 := x0.AuxInt 42692 s := x0.Aux 42693 _ = x0.Args[2] 42694 p := x0.Args[0] 42695 idx := x0.Args[1] 42696 mem := x0.Args[2] 42697 r1 := v.Args[1] 42698 if r1.Op != OpAMD64ROLWconst { 42699 break 42700 } 42701 if r1.AuxInt != 8 { 42702 break 42703 } 42704 x1 := r1.Args[0] 42705 if x1.Op != OpAMD64MOVWloadidx1 { 42706 break 42707 } 42708 i1 := x1.AuxInt 42709 if x1.Aux != s { 42710 break 42711 } 42712 _ = x1.Args[2] 42713 if p != x1.Args[0] { 42714 break 42715 } 42716 if idx != x1.Args[1] { 42717 break 42718 } 42719 if mem != x1.Args[2] { 42720 break 42721 } 42722 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42723 break 42724 } 42725 b = mergePoint(b, x0, x1) 42726 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42727 v.reset(OpCopy) 42728 v.AddArg(v0) 42729 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42730 v1.AuxInt = i0 42731 v1.Aux = s 42732 v1.AddArg(p) 42733 v1.AddArg(idx) 42734 v1.AddArg(mem) 42735 v0.AddArg(v1) 42736 return true 42737 } 42738 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 42739 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42740 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42741 for { 42742 _ = v.Args[1] 42743 sh := v.Args[0] 42744 if sh.Op != OpAMD64SHLQconst { 42745 break 42746 } 42747 if sh.AuxInt != 16 { 42748 break 42749 } 42750 r0 := sh.Args[0] 42751 if r0.Op != OpAMD64ROLWconst { 42752 break 42753 } 42754 if r0.AuxInt != 8 { 42755 break 42756 } 42757 x0 := r0.Args[0] 42758 if x0.Op != OpAMD64MOVWloadidx1 { 42759 break 42760 } 42761 i0 := x0.AuxInt 42762 s := x0.Aux 42763 _ = x0.Args[2] 42764 idx := x0.Args[0] 42765 p := x0.Args[1] 42766 mem := x0.Args[2] 42767 r1 := v.Args[1] 42768 if r1.Op != OpAMD64ROLWconst { 42769 break 42770 } 42771 if r1.AuxInt != 8 { 42772 break 42773 } 42774 x1 := r1.Args[0] 42775 if x1.Op != OpAMD64MOVWloadidx1 { 42776 break 42777 } 42778 i1 := x1.AuxInt 42779 if x1.Aux != s { 42780 break 42781 } 42782 _ = x1.Args[2] 42783 if p != x1.Args[0] { 42784 break 42785 } 42786 if idx != x1.Args[1] { 42787 break 42788 } 42789 if mem != x1.Args[2] { 42790 break 42791 } 42792 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42793 break 42794 } 42795 b = mergePoint(b, x0, x1) 42796 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42797 v.reset(OpCopy) 42798 v.AddArg(v0) 42799 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42800 v1.AuxInt = i0 42801 v1.Aux = s 42802 v1.AddArg(p) 42803 v1.AddArg(idx) 42804 v1.AddArg(mem) 42805 v0.AddArg(v1) 42806 return true 42807 } 42808 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 42809 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42810 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42811 for { 42812 _ = v.Args[1] 42813 sh := v.Args[0] 42814 if sh.Op != OpAMD64SHLQconst { 42815 break 42816 } 42817 if sh.AuxInt != 16 { 42818 break 42819 } 42820 r0 := sh.Args[0] 42821 if r0.Op != OpAMD64ROLWconst { 42822 break 42823 } 42824 if r0.AuxInt != 8 { 42825 break 42826 } 42827 x0 := r0.Args[0] 42828 if x0.Op != OpAMD64MOVWloadidx1 { 42829 break 42830 } 42831 i0 := x0.AuxInt 42832 s := x0.Aux 42833 _ = x0.Args[2] 42834 p := x0.Args[0] 42835 idx := x0.Args[1] 42836 mem := x0.Args[2] 42837 r1 := v.Args[1] 42838 if r1.Op != OpAMD64ROLWconst { 42839 break 42840 } 42841 if r1.AuxInt != 8 { 42842 break 42843 } 42844 x1 := r1.Args[0] 42845 if x1.Op != OpAMD64MOVWloadidx1 { 42846 break 42847 } 42848 i1 := x1.AuxInt 42849 if x1.Aux != s { 42850 break 42851 } 42852 _ = x1.Args[2] 42853 if idx != x1.Args[0] { 42854 break 42855 } 42856 if p != x1.Args[1] { 42857 break 42858 } 42859 if mem != x1.Args[2] { 42860 break 42861 } 42862 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42863 break 42864 } 42865 b = mergePoint(b, x0, x1) 42866 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42867 v.reset(OpCopy) 42868 v.AddArg(v0) 42869 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42870 v1.AuxInt = i0 42871 v1.Aux = s 42872 v1.AddArg(p) 42873 v1.AddArg(idx) 42874 v1.AddArg(mem) 42875 v0.AddArg(v1) 42876 return true 42877 } 42878 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 42879 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42880 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 42881 for { 42882 _ = v.Args[1] 42883 sh := v.Args[0] 42884 if sh.Op != OpAMD64SHLQconst { 42885 break 42886 } 42887 if sh.AuxInt != 16 { 42888 break 42889 } 42890 r0 := sh.Args[0] 42891 if r0.Op != OpAMD64ROLWconst { 42892 break 42893 } 42894 if r0.AuxInt != 8 { 42895 break 42896 } 42897 x0 := r0.Args[0] 42898 if x0.Op != OpAMD64MOVWloadidx1 { 42899 break 42900 } 42901 i0 := x0.AuxInt 42902 s := x0.Aux 42903 _ = x0.Args[2] 42904 idx := x0.Args[0] 42905 p := x0.Args[1] 42906 mem := x0.Args[2] 42907 r1 := v.Args[1] 42908 if r1.Op != OpAMD64ROLWconst { 42909 break 42910 } 42911 if r1.AuxInt != 8 { 42912 break 42913 } 42914 x1 := r1.Args[0] 42915 if x1.Op != OpAMD64MOVWloadidx1 { 42916 break 42917 } 42918 i1 := x1.AuxInt 42919 if x1.Aux != s { 42920 break 42921 } 42922 _ = x1.Args[2] 42923 if idx != x1.Args[0] { 42924 break 42925 } 42926 if p != x1.Args[1] { 42927 break 42928 } 42929 if mem != x1.Args[2] { 42930 break 42931 } 42932 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42933 break 42934 } 42935 b = mergePoint(b, x0, x1) 42936 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 42937 v.reset(OpCopy) 42938 v.AddArg(v0) 42939 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 42940 v1.AuxInt = i0 42941 v1.Aux = s 42942 v1.AddArg(p) 42943 v1.AddArg(idx) 42944 v1.AddArg(mem) 42945 v0.AddArg(v1) 42946 return true 42947 } 42948 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 42949 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 42950 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 42951 for { 42952 _ = v.Args[1] 42953 r1 := v.Args[0] 42954 if r1.Op != OpAMD64BSWAPL { 42955 break 42956 } 42957 x1 := r1.Args[0] 42958 if x1.Op != OpAMD64MOVLloadidx1 { 42959 break 42960 } 42961 i1 := x1.AuxInt 42962 s := x1.Aux 42963 _ = x1.Args[2] 42964 p := x1.Args[0] 42965 idx := x1.Args[1] 42966 mem := x1.Args[2] 42967 sh := v.Args[1] 42968 if sh.Op != OpAMD64SHLQconst { 42969 break 42970 } 42971 if sh.AuxInt != 32 { 42972 break 42973 } 42974 r0 := sh.Args[0] 42975 if r0.Op != OpAMD64BSWAPL { 42976 break 42977 } 42978 x0 := r0.Args[0] 42979 if x0.Op != OpAMD64MOVLloadidx1 { 42980 break 42981 } 42982 i0 := x0.AuxInt 42983 if x0.Aux != s { 42984 break 42985 } 42986 _ = x0.Args[2] 42987 if p != x0.Args[0] { 42988 break 42989 } 42990 if idx != x0.Args[1] { 42991 break 42992 } 42993 if mem != x0.Args[2] { 42994 break 42995 } 42996 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 42997 break 42998 } 42999 b = mergePoint(b, x0, x1) 43000 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43001 v.reset(OpCopy) 43002 v.AddArg(v0) 43003 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43004 v1.AuxInt = i0 43005 v1.Aux = s 43006 v1.AddArg(p) 43007 v1.AddArg(idx) 43008 v1.AddArg(mem) 43009 v0.AddArg(v1) 43010 return true 43011 } 43012 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 43013 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43014 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43015 for { 43016 _ = v.Args[1] 43017 r1 := v.Args[0] 43018 if r1.Op != OpAMD64BSWAPL { 43019 break 43020 } 43021 x1 := r1.Args[0] 43022 if x1.Op != OpAMD64MOVLloadidx1 { 43023 break 43024 } 43025 i1 := x1.AuxInt 43026 s := x1.Aux 43027 _ = x1.Args[2] 43028 idx := x1.Args[0] 43029 p := x1.Args[1] 43030 mem := x1.Args[2] 43031 sh := v.Args[1] 43032 if sh.Op != OpAMD64SHLQconst { 43033 break 43034 } 43035 if sh.AuxInt != 32 { 43036 break 43037 } 43038 r0 := sh.Args[0] 43039 if r0.Op != OpAMD64BSWAPL { 43040 break 43041 } 43042 x0 := r0.Args[0] 43043 if x0.Op != OpAMD64MOVLloadidx1 { 43044 break 43045 } 43046 i0 := x0.AuxInt 43047 if x0.Aux != s { 43048 break 43049 } 43050 _ = x0.Args[2] 43051 if p != x0.Args[0] { 43052 break 43053 } 43054 if idx != x0.Args[1] { 43055 break 43056 } 43057 if mem != x0.Args[2] { 43058 break 43059 } 43060 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43061 break 43062 } 43063 b = mergePoint(b, x0, x1) 43064 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43065 v.reset(OpCopy) 43066 v.AddArg(v0) 43067 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43068 v1.AuxInt = i0 43069 v1.Aux = s 43070 v1.AddArg(p) 43071 v1.AddArg(idx) 43072 v1.AddArg(mem) 43073 v0.AddArg(v1) 43074 return true 43075 } 43076 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 43077 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43078 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43079 for { 43080 _ = v.Args[1] 43081 r1 := v.Args[0] 43082 if r1.Op != OpAMD64BSWAPL { 43083 break 43084 } 43085 x1 := r1.Args[0] 43086 if x1.Op != OpAMD64MOVLloadidx1 { 43087 break 43088 } 43089 i1 := x1.AuxInt 43090 s := x1.Aux 43091 _ = x1.Args[2] 43092 p := x1.Args[0] 43093 idx := x1.Args[1] 43094 mem := x1.Args[2] 43095 sh := v.Args[1] 43096 if sh.Op != OpAMD64SHLQconst { 43097 break 43098 } 43099 if sh.AuxInt != 32 { 43100 break 43101 } 43102 r0 := sh.Args[0] 43103 if r0.Op != OpAMD64BSWAPL { 43104 break 43105 } 43106 x0 := r0.Args[0] 43107 if x0.Op != OpAMD64MOVLloadidx1 { 43108 break 43109 } 43110 i0 := x0.AuxInt 43111 if x0.Aux != s { 43112 break 43113 } 43114 _ = x0.Args[2] 43115 if idx != x0.Args[0] { 43116 break 43117 } 43118 if p != x0.Args[1] { 43119 break 43120 } 43121 if mem != x0.Args[2] { 43122 break 43123 } 43124 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43125 break 43126 } 43127 b = mergePoint(b, x0, x1) 43128 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43129 v.reset(OpCopy) 43130 v.AddArg(v0) 43131 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43132 v1.AuxInt = i0 43133 v1.Aux = s 43134 v1.AddArg(p) 43135 v1.AddArg(idx) 43136 v1.AddArg(mem) 43137 v0.AddArg(v1) 43138 return true 43139 } 43140 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 43141 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43142 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43143 for { 43144 _ = v.Args[1] 43145 r1 := v.Args[0] 43146 if r1.Op != OpAMD64BSWAPL { 43147 break 43148 } 43149 x1 := r1.Args[0] 43150 if x1.Op != OpAMD64MOVLloadidx1 { 43151 break 43152 } 43153 i1 := x1.AuxInt 43154 s := x1.Aux 43155 _ = x1.Args[2] 43156 idx := x1.Args[0] 43157 p := x1.Args[1] 43158 mem := x1.Args[2] 43159 sh := v.Args[1] 43160 if sh.Op != OpAMD64SHLQconst { 43161 break 43162 } 43163 if sh.AuxInt != 32 { 43164 break 43165 } 43166 r0 := sh.Args[0] 43167 if r0.Op != OpAMD64BSWAPL { 43168 break 43169 } 43170 x0 := r0.Args[0] 43171 if x0.Op != OpAMD64MOVLloadidx1 { 43172 break 43173 } 43174 i0 := x0.AuxInt 43175 if x0.Aux != s { 43176 break 43177 } 43178 _ = x0.Args[2] 43179 if idx != x0.Args[0] { 43180 break 43181 } 43182 if p != x0.Args[1] { 43183 break 43184 } 43185 if mem != x0.Args[2] { 43186 break 43187 } 43188 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43189 break 43190 } 43191 b = mergePoint(b, x0, x1) 43192 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43193 v.reset(OpCopy) 43194 v.AddArg(v0) 43195 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43196 v1.AuxInt = i0 43197 v1.Aux = s 43198 v1.AddArg(p) 43199 v1.AddArg(idx) 43200 v1.AddArg(mem) 43201 v0.AddArg(v1) 43202 return true 43203 } 43204 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 43205 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43206 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43207 for { 43208 _ = v.Args[1] 43209 sh := v.Args[0] 43210 if sh.Op != OpAMD64SHLQconst { 43211 break 43212 } 43213 if sh.AuxInt != 32 { 43214 break 43215 } 43216 r0 := sh.Args[0] 43217 if r0.Op != OpAMD64BSWAPL { 43218 break 43219 } 43220 x0 := r0.Args[0] 43221 if x0.Op != OpAMD64MOVLloadidx1 { 43222 break 43223 } 43224 i0 := x0.AuxInt 43225 s := x0.Aux 43226 _ = x0.Args[2] 43227 p := x0.Args[0] 43228 idx := x0.Args[1] 43229 mem := x0.Args[2] 43230 r1 := v.Args[1] 43231 if r1.Op != OpAMD64BSWAPL { 43232 break 43233 } 43234 x1 := r1.Args[0] 43235 if x1.Op != OpAMD64MOVLloadidx1 { 43236 break 43237 } 43238 i1 := x1.AuxInt 43239 if x1.Aux != s { 43240 break 43241 } 43242 _ = x1.Args[2] 43243 if p != x1.Args[0] { 43244 break 43245 } 43246 if idx != x1.Args[1] { 43247 break 43248 } 43249 if mem != x1.Args[2] { 43250 break 43251 } 43252 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43253 break 43254 } 43255 b = mergePoint(b, x0, x1) 43256 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43257 v.reset(OpCopy) 43258 v.AddArg(v0) 43259 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43260 v1.AuxInt = i0 43261 v1.Aux = s 43262 v1.AddArg(p) 43263 v1.AddArg(idx) 43264 v1.AddArg(mem) 43265 v0.AddArg(v1) 43266 return true 43267 } 43268 return false 43269 } 43270 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 43271 b := v.Block 43272 _ = b 43273 typ := &b.Func.Config.Types 43274 _ = typ 43275 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 43276 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43277 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43278 for { 43279 _ = v.Args[1] 43280 sh := v.Args[0] 43281 if sh.Op != OpAMD64SHLQconst { 43282 break 43283 } 43284 if sh.AuxInt != 32 { 43285 break 43286 } 43287 r0 := sh.Args[0] 43288 if r0.Op != OpAMD64BSWAPL { 43289 break 43290 } 43291 x0 := r0.Args[0] 43292 if x0.Op != OpAMD64MOVLloadidx1 { 43293 break 43294 } 43295 i0 := x0.AuxInt 43296 s := x0.Aux 43297 _ = x0.Args[2] 43298 idx := x0.Args[0] 43299 p := x0.Args[1] 43300 mem := x0.Args[2] 43301 r1 := v.Args[1] 43302 if r1.Op != OpAMD64BSWAPL { 43303 break 43304 } 43305 x1 := r1.Args[0] 43306 if x1.Op != OpAMD64MOVLloadidx1 { 43307 break 43308 } 43309 i1 := x1.AuxInt 43310 if x1.Aux != s { 43311 break 43312 } 43313 _ = x1.Args[2] 43314 if p != x1.Args[0] { 43315 break 43316 } 43317 if idx != x1.Args[1] { 43318 break 43319 } 43320 if mem != x1.Args[2] { 43321 break 43322 } 43323 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43324 break 43325 } 43326 b = mergePoint(b, x0, x1) 43327 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43328 v.reset(OpCopy) 43329 v.AddArg(v0) 43330 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43331 v1.AuxInt = i0 43332 v1.Aux = s 43333 v1.AddArg(p) 43334 v1.AddArg(idx) 43335 v1.AddArg(mem) 43336 v0.AddArg(v1) 43337 return true 43338 } 43339 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 43340 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43341 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43342 for { 43343 _ = v.Args[1] 43344 sh := v.Args[0] 43345 if sh.Op != OpAMD64SHLQconst { 43346 break 43347 } 43348 if sh.AuxInt != 32 { 43349 break 43350 } 43351 r0 := sh.Args[0] 43352 if r0.Op != OpAMD64BSWAPL { 43353 break 43354 } 43355 x0 := r0.Args[0] 43356 if x0.Op != OpAMD64MOVLloadidx1 { 43357 break 43358 } 43359 i0 := x0.AuxInt 43360 s := x0.Aux 43361 _ = x0.Args[2] 43362 p := x0.Args[0] 43363 idx := x0.Args[1] 43364 mem := x0.Args[2] 43365 r1 := v.Args[1] 43366 if r1.Op != OpAMD64BSWAPL { 43367 break 43368 } 43369 x1 := r1.Args[0] 43370 if x1.Op != OpAMD64MOVLloadidx1 { 43371 break 43372 } 43373 i1 := x1.AuxInt 43374 if x1.Aux != s { 43375 break 43376 } 43377 _ = x1.Args[2] 43378 if idx != x1.Args[0] { 43379 break 43380 } 43381 if p != x1.Args[1] { 43382 break 43383 } 43384 if mem != x1.Args[2] { 43385 break 43386 } 43387 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43388 break 43389 } 43390 b = mergePoint(b, x0, x1) 43391 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43392 v.reset(OpCopy) 43393 v.AddArg(v0) 43394 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43395 v1.AuxInt = i0 43396 v1.Aux = s 43397 v1.AddArg(p) 43398 v1.AddArg(idx) 43399 v1.AddArg(mem) 43400 v0.AddArg(v1) 43401 return true 43402 } 43403 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 43404 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 43405 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 43406 for { 43407 _ = v.Args[1] 43408 sh := v.Args[0] 43409 if sh.Op != OpAMD64SHLQconst { 43410 break 43411 } 43412 if sh.AuxInt != 32 { 43413 break 43414 } 43415 r0 := sh.Args[0] 43416 if r0.Op != OpAMD64BSWAPL { 43417 break 43418 } 43419 x0 := r0.Args[0] 43420 if x0.Op != OpAMD64MOVLloadidx1 { 43421 break 43422 } 43423 i0 := x0.AuxInt 43424 s := x0.Aux 43425 _ = x0.Args[2] 43426 idx := x0.Args[0] 43427 p := x0.Args[1] 43428 mem := x0.Args[2] 43429 r1 := v.Args[1] 43430 if r1.Op != OpAMD64BSWAPL { 43431 break 43432 } 43433 x1 := r1.Args[0] 43434 if x1.Op != OpAMD64MOVLloadidx1 { 43435 break 43436 } 43437 i1 := x1.AuxInt 43438 if x1.Aux != s { 43439 break 43440 } 43441 _ = x1.Args[2] 43442 if idx != x1.Args[0] { 43443 break 43444 } 43445 if p != x1.Args[1] { 43446 break 43447 } 43448 if mem != x1.Args[2] { 43449 break 43450 } 43451 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 43452 break 43453 } 43454 b = mergePoint(b, x0, x1) 43455 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 43456 v.reset(OpCopy) 43457 v.AddArg(v0) 43458 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 43459 v1.AuxInt = i0 43460 v1.Aux = s 43461 v1.AddArg(p) 43462 v1.AddArg(idx) 43463 v1.AddArg(mem) 43464 v0.AddArg(v1) 43465 return true 43466 } 43467 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 43468 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43469 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43470 for { 43471 _ = v.Args[1] 43472 s0 := v.Args[0] 43473 if s0.Op != OpAMD64SHLQconst { 43474 break 43475 } 43476 j0 := s0.AuxInt 43477 x0 := s0.Args[0] 43478 if x0.Op != OpAMD64MOVBloadidx1 { 43479 break 43480 } 43481 i0 := x0.AuxInt 43482 s := x0.Aux 43483 _ = x0.Args[2] 43484 p := x0.Args[0] 43485 idx := x0.Args[1] 43486 mem := x0.Args[2] 43487 or := v.Args[1] 43488 if or.Op != OpAMD64ORQ { 43489 break 43490 } 43491 _ = or.Args[1] 43492 s1 := or.Args[0] 43493 if s1.Op != OpAMD64SHLQconst { 43494 break 43495 } 43496 j1 := s1.AuxInt 43497 x1 := s1.Args[0] 43498 if x1.Op != OpAMD64MOVBloadidx1 { 43499 break 43500 } 43501 i1 := x1.AuxInt 43502 if x1.Aux != s { 43503 break 43504 } 43505 _ = x1.Args[2] 43506 if p != x1.Args[0] { 43507 break 43508 } 43509 if idx != x1.Args[1] { 43510 break 43511 } 43512 if mem != x1.Args[2] { 43513 break 43514 } 43515 y := or.Args[1] 43516 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43517 break 43518 } 43519 b = mergePoint(b, x0, x1) 43520 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43521 v.reset(OpCopy) 43522 v.AddArg(v0) 43523 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43524 v1.AuxInt = j1 43525 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43526 v2.AuxInt = 8 43527 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43528 v3.AuxInt = i0 43529 v3.Aux = s 43530 v3.AddArg(p) 43531 v3.AddArg(idx) 43532 v3.AddArg(mem) 43533 v2.AddArg(v3) 43534 v1.AddArg(v2) 43535 v0.AddArg(v1) 43536 v0.AddArg(y) 43537 return true 43538 } 43539 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 43540 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43541 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43542 for { 43543 _ = v.Args[1] 43544 s0 := v.Args[0] 43545 if s0.Op != OpAMD64SHLQconst { 43546 break 43547 } 43548 j0 := s0.AuxInt 43549 x0 := s0.Args[0] 43550 if x0.Op != OpAMD64MOVBloadidx1 { 43551 break 43552 } 43553 i0 := x0.AuxInt 43554 s := x0.Aux 43555 _ = x0.Args[2] 43556 idx := x0.Args[0] 43557 p := x0.Args[1] 43558 mem := x0.Args[2] 43559 or := v.Args[1] 43560 if or.Op != OpAMD64ORQ { 43561 break 43562 } 43563 _ = or.Args[1] 43564 s1 := or.Args[0] 43565 if s1.Op != OpAMD64SHLQconst { 43566 break 43567 } 43568 j1 := s1.AuxInt 43569 x1 := s1.Args[0] 43570 if x1.Op != OpAMD64MOVBloadidx1 { 43571 break 43572 } 43573 i1 := x1.AuxInt 43574 if x1.Aux != s { 43575 break 43576 } 43577 _ = x1.Args[2] 43578 if p != x1.Args[0] { 43579 break 43580 } 43581 if idx != x1.Args[1] { 43582 break 43583 } 43584 if mem != x1.Args[2] { 43585 break 43586 } 43587 y := or.Args[1] 43588 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43589 break 43590 } 43591 b = mergePoint(b, x0, x1) 43592 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43593 v.reset(OpCopy) 43594 v.AddArg(v0) 43595 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43596 v1.AuxInt = j1 43597 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43598 v2.AuxInt = 8 43599 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43600 v3.AuxInt = i0 43601 v3.Aux = s 43602 v3.AddArg(p) 43603 v3.AddArg(idx) 43604 v3.AddArg(mem) 43605 v2.AddArg(v3) 43606 v1.AddArg(v2) 43607 v0.AddArg(v1) 43608 v0.AddArg(y) 43609 return true 43610 } 43611 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 43612 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43613 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43614 for { 43615 _ = v.Args[1] 43616 s0 := v.Args[0] 43617 if s0.Op != OpAMD64SHLQconst { 43618 break 43619 } 43620 j0 := s0.AuxInt 43621 x0 := s0.Args[0] 43622 if x0.Op != OpAMD64MOVBloadidx1 { 43623 break 43624 } 43625 i0 := x0.AuxInt 43626 s := x0.Aux 43627 _ = x0.Args[2] 43628 p := x0.Args[0] 43629 idx := x0.Args[1] 43630 mem := x0.Args[2] 43631 or := v.Args[1] 43632 if or.Op != OpAMD64ORQ { 43633 break 43634 } 43635 _ = or.Args[1] 43636 s1 := or.Args[0] 43637 if s1.Op != OpAMD64SHLQconst { 43638 break 43639 } 43640 j1 := s1.AuxInt 43641 x1 := s1.Args[0] 43642 if x1.Op != OpAMD64MOVBloadidx1 { 43643 break 43644 } 43645 i1 := x1.AuxInt 43646 if x1.Aux != s { 43647 break 43648 } 43649 _ = x1.Args[2] 43650 if idx != x1.Args[0] { 43651 break 43652 } 43653 if p != x1.Args[1] { 43654 break 43655 } 43656 if mem != x1.Args[2] { 43657 break 43658 } 43659 y := or.Args[1] 43660 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43661 break 43662 } 43663 b = mergePoint(b, x0, x1) 43664 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43665 v.reset(OpCopy) 43666 v.AddArg(v0) 43667 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43668 v1.AuxInt = j1 43669 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43670 v2.AuxInt = 8 43671 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43672 v3.AuxInt = i0 43673 v3.Aux = s 43674 v3.AddArg(p) 43675 v3.AddArg(idx) 43676 v3.AddArg(mem) 43677 v2.AddArg(v3) 43678 v1.AddArg(v2) 43679 v0.AddArg(v1) 43680 v0.AddArg(y) 43681 return true 43682 } 43683 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 43684 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43685 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43686 for { 43687 _ = v.Args[1] 43688 s0 := v.Args[0] 43689 if s0.Op != OpAMD64SHLQconst { 43690 break 43691 } 43692 j0 := s0.AuxInt 43693 x0 := s0.Args[0] 43694 if x0.Op != OpAMD64MOVBloadidx1 { 43695 break 43696 } 43697 i0 := x0.AuxInt 43698 s := x0.Aux 43699 _ = x0.Args[2] 43700 idx := x0.Args[0] 43701 p := x0.Args[1] 43702 mem := x0.Args[2] 43703 or := v.Args[1] 43704 if or.Op != OpAMD64ORQ { 43705 break 43706 } 43707 _ = or.Args[1] 43708 s1 := or.Args[0] 43709 if s1.Op != OpAMD64SHLQconst { 43710 break 43711 } 43712 j1 := s1.AuxInt 43713 x1 := s1.Args[0] 43714 if x1.Op != OpAMD64MOVBloadidx1 { 43715 break 43716 } 43717 i1 := x1.AuxInt 43718 if x1.Aux != s { 43719 break 43720 } 43721 _ = x1.Args[2] 43722 if idx != x1.Args[0] { 43723 break 43724 } 43725 if p != x1.Args[1] { 43726 break 43727 } 43728 if mem != x1.Args[2] { 43729 break 43730 } 43731 y := or.Args[1] 43732 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43733 break 43734 } 43735 b = mergePoint(b, x0, x1) 43736 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43737 v.reset(OpCopy) 43738 v.AddArg(v0) 43739 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43740 v1.AuxInt = j1 43741 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43742 v2.AuxInt = 8 43743 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43744 v3.AuxInt = i0 43745 v3.Aux = s 43746 v3.AddArg(p) 43747 v3.AddArg(idx) 43748 v3.AddArg(mem) 43749 v2.AddArg(v3) 43750 v1.AddArg(v2) 43751 v0.AddArg(v1) 43752 v0.AddArg(y) 43753 return true 43754 } 43755 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 43756 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43757 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43758 for { 43759 _ = v.Args[1] 43760 s0 := v.Args[0] 43761 if s0.Op != OpAMD64SHLQconst { 43762 break 43763 } 43764 j0 := s0.AuxInt 43765 x0 := s0.Args[0] 43766 if x0.Op != OpAMD64MOVBloadidx1 { 43767 break 43768 } 43769 i0 := x0.AuxInt 43770 s := x0.Aux 43771 _ = x0.Args[2] 43772 p := x0.Args[0] 43773 idx := x0.Args[1] 43774 mem := x0.Args[2] 43775 or := v.Args[1] 43776 if or.Op != OpAMD64ORQ { 43777 break 43778 } 43779 _ = or.Args[1] 43780 y := or.Args[0] 43781 s1 := or.Args[1] 43782 if s1.Op != OpAMD64SHLQconst { 43783 break 43784 } 43785 j1 := s1.AuxInt 43786 x1 := s1.Args[0] 43787 if x1.Op != OpAMD64MOVBloadidx1 { 43788 break 43789 } 43790 i1 := x1.AuxInt 43791 if x1.Aux != s { 43792 break 43793 } 43794 _ = x1.Args[2] 43795 if p != x1.Args[0] { 43796 break 43797 } 43798 if idx != x1.Args[1] { 43799 break 43800 } 43801 if mem != x1.Args[2] { 43802 break 43803 } 43804 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43805 break 43806 } 43807 b = mergePoint(b, x0, x1) 43808 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43809 v.reset(OpCopy) 43810 v.AddArg(v0) 43811 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43812 v1.AuxInt = j1 43813 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43814 v2.AuxInt = 8 43815 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43816 v3.AuxInt = i0 43817 v3.Aux = s 43818 v3.AddArg(p) 43819 v3.AddArg(idx) 43820 v3.AddArg(mem) 43821 v2.AddArg(v3) 43822 v1.AddArg(v2) 43823 v0.AddArg(v1) 43824 v0.AddArg(y) 43825 return true 43826 } 43827 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 43828 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43829 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43830 for { 43831 _ = v.Args[1] 43832 s0 := v.Args[0] 43833 if s0.Op != OpAMD64SHLQconst { 43834 break 43835 } 43836 j0 := s0.AuxInt 43837 x0 := s0.Args[0] 43838 if x0.Op != OpAMD64MOVBloadidx1 { 43839 break 43840 } 43841 i0 := x0.AuxInt 43842 s := x0.Aux 43843 _ = x0.Args[2] 43844 idx := x0.Args[0] 43845 p := x0.Args[1] 43846 mem := x0.Args[2] 43847 or := v.Args[1] 43848 if or.Op != OpAMD64ORQ { 43849 break 43850 } 43851 _ = or.Args[1] 43852 y := or.Args[0] 43853 s1 := or.Args[1] 43854 if s1.Op != OpAMD64SHLQconst { 43855 break 43856 } 43857 j1 := s1.AuxInt 43858 x1 := s1.Args[0] 43859 if x1.Op != OpAMD64MOVBloadidx1 { 43860 break 43861 } 43862 i1 := x1.AuxInt 43863 if x1.Aux != s { 43864 break 43865 } 43866 _ = x1.Args[2] 43867 if p != x1.Args[0] { 43868 break 43869 } 43870 if idx != x1.Args[1] { 43871 break 43872 } 43873 if mem != x1.Args[2] { 43874 break 43875 } 43876 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43877 break 43878 } 43879 b = mergePoint(b, x0, x1) 43880 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43881 v.reset(OpCopy) 43882 v.AddArg(v0) 43883 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43884 v1.AuxInt = j1 43885 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43886 v2.AuxInt = 8 43887 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43888 v3.AuxInt = i0 43889 v3.Aux = s 43890 v3.AddArg(p) 43891 v3.AddArg(idx) 43892 v3.AddArg(mem) 43893 v2.AddArg(v3) 43894 v1.AddArg(v2) 43895 v0.AddArg(v1) 43896 v0.AddArg(y) 43897 return true 43898 } 43899 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 43900 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43901 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43902 for { 43903 _ = v.Args[1] 43904 s0 := v.Args[0] 43905 if s0.Op != OpAMD64SHLQconst { 43906 break 43907 } 43908 j0 := s0.AuxInt 43909 x0 := s0.Args[0] 43910 if x0.Op != OpAMD64MOVBloadidx1 { 43911 break 43912 } 43913 i0 := x0.AuxInt 43914 s := x0.Aux 43915 _ = x0.Args[2] 43916 p := x0.Args[0] 43917 idx := x0.Args[1] 43918 mem := x0.Args[2] 43919 or := v.Args[1] 43920 if or.Op != OpAMD64ORQ { 43921 break 43922 } 43923 _ = or.Args[1] 43924 y := or.Args[0] 43925 s1 := or.Args[1] 43926 if s1.Op != OpAMD64SHLQconst { 43927 break 43928 } 43929 j1 := s1.AuxInt 43930 x1 := s1.Args[0] 43931 if x1.Op != OpAMD64MOVBloadidx1 { 43932 break 43933 } 43934 i1 := x1.AuxInt 43935 if x1.Aux != s { 43936 break 43937 } 43938 _ = x1.Args[2] 43939 if idx != x1.Args[0] { 43940 break 43941 } 43942 if p != x1.Args[1] { 43943 break 43944 } 43945 if mem != x1.Args[2] { 43946 break 43947 } 43948 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 43949 break 43950 } 43951 b = mergePoint(b, x0, x1) 43952 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 43953 v.reset(OpCopy) 43954 v.AddArg(v0) 43955 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 43956 v1.AuxInt = j1 43957 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 43958 v2.AuxInt = 8 43959 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 43960 v3.AuxInt = i0 43961 v3.Aux = s 43962 v3.AddArg(p) 43963 v3.AddArg(idx) 43964 v3.AddArg(mem) 43965 v2.AddArg(v3) 43966 v1.AddArg(v2) 43967 v0.AddArg(v1) 43968 v0.AddArg(y) 43969 return true 43970 } 43971 return false 43972 } 43973 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 43974 b := v.Block 43975 _ = b 43976 typ := &b.Func.Config.Types 43977 _ = typ 43978 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 43979 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 43980 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 43981 for { 43982 _ = v.Args[1] 43983 s0 := v.Args[0] 43984 if s0.Op != OpAMD64SHLQconst { 43985 break 43986 } 43987 j0 := s0.AuxInt 43988 x0 := s0.Args[0] 43989 if x0.Op != OpAMD64MOVBloadidx1 { 43990 break 43991 } 43992 i0 := x0.AuxInt 43993 s := x0.Aux 43994 _ = x0.Args[2] 43995 idx := x0.Args[0] 43996 p := x0.Args[1] 43997 mem := x0.Args[2] 43998 or := v.Args[1] 43999 if or.Op != OpAMD64ORQ { 44000 break 44001 } 44002 _ = or.Args[1] 44003 y := or.Args[0] 44004 s1 := or.Args[1] 44005 if s1.Op != OpAMD64SHLQconst { 44006 break 44007 } 44008 j1 := s1.AuxInt 44009 x1 := s1.Args[0] 44010 if x1.Op != OpAMD64MOVBloadidx1 { 44011 break 44012 } 44013 i1 := x1.AuxInt 44014 if x1.Aux != s { 44015 break 44016 } 44017 _ = x1.Args[2] 44018 if idx != x1.Args[0] { 44019 break 44020 } 44021 if p != x1.Args[1] { 44022 break 44023 } 44024 if mem != x1.Args[2] { 44025 break 44026 } 44027 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44028 break 44029 } 44030 b = mergePoint(b, x0, x1) 44031 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44032 v.reset(OpCopy) 44033 v.AddArg(v0) 44034 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44035 v1.AuxInt = j1 44036 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44037 v2.AuxInt = 8 44038 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44039 v3.AuxInt = i0 44040 v3.Aux = s 44041 v3.AddArg(p) 44042 v3.AddArg(idx) 44043 v3.AddArg(mem) 44044 v2.AddArg(v3) 44045 v1.AddArg(v2) 44046 v0.AddArg(v1) 44047 v0.AddArg(y) 44048 return true 44049 } 44050 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 44051 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44052 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44053 for { 44054 _ = v.Args[1] 44055 or := v.Args[0] 44056 if or.Op != OpAMD64ORQ { 44057 break 44058 } 44059 _ = or.Args[1] 44060 s1 := or.Args[0] 44061 if s1.Op != OpAMD64SHLQconst { 44062 break 44063 } 44064 j1 := s1.AuxInt 44065 x1 := s1.Args[0] 44066 if x1.Op != OpAMD64MOVBloadidx1 { 44067 break 44068 } 44069 i1 := x1.AuxInt 44070 s := x1.Aux 44071 _ = x1.Args[2] 44072 p := x1.Args[0] 44073 idx := x1.Args[1] 44074 mem := x1.Args[2] 44075 y := or.Args[1] 44076 s0 := v.Args[1] 44077 if s0.Op != OpAMD64SHLQconst { 44078 break 44079 } 44080 j0 := s0.AuxInt 44081 x0 := s0.Args[0] 44082 if x0.Op != OpAMD64MOVBloadidx1 { 44083 break 44084 } 44085 i0 := x0.AuxInt 44086 if x0.Aux != s { 44087 break 44088 } 44089 _ = x0.Args[2] 44090 if p != x0.Args[0] { 44091 break 44092 } 44093 if idx != x0.Args[1] { 44094 break 44095 } 44096 if mem != x0.Args[2] { 44097 break 44098 } 44099 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44100 break 44101 } 44102 b = mergePoint(b, x0, x1) 44103 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44104 v.reset(OpCopy) 44105 v.AddArg(v0) 44106 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44107 v1.AuxInt = j1 44108 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44109 v2.AuxInt = 8 44110 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44111 v3.AuxInt = i0 44112 v3.Aux = s 44113 v3.AddArg(p) 44114 v3.AddArg(idx) 44115 v3.AddArg(mem) 44116 v2.AddArg(v3) 44117 v1.AddArg(v2) 44118 v0.AddArg(v1) 44119 v0.AddArg(y) 44120 return true 44121 } 44122 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 44123 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44124 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44125 for { 44126 _ = v.Args[1] 44127 or := v.Args[0] 44128 if or.Op != OpAMD64ORQ { 44129 break 44130 } 44131 _ = or.Args[1] 44132 s1 := or.Args[0] 44133 if s1.Op != OpAMD64SHLQconst { 44134 break 44135 } 44136 j1 := s1.AuxInt 44137 x1 := s1.Args[0] 44138 if x1.Op != OpAMD64MOVBloadidx1 { 44139 break 44140 } 44141 i1 := x1.AuxInt 44142 s := x1.Aux 44143 _ = x1.Args[2] 44144 idx := x1.Args[0] 44145 p := x1.Args[1] 44146 mem := x1.Args[2] 44147 y := or.Args[1] 44148 s0 := v.Args[1] 44149 if s0.Op != OpAMD64SHLQconst { 44150 break 44151 } 44152 j0 := s0.AuxInt 44153 x0 := s0.Args[0] 44154 if x0.Op != OpAMD64MOVBloadidx1 { 44155 break 44156 } 44157 i0 := x0.AuxInt 44158 if x0.Aux != s { 44159 break 44160 } 44161 _ = x0.Args[2] 44162 if p != x0.Args[0] { 44163 break 44164 } 44165 if idx != x0.Args[1] { 44166 break 44167 } 44168 if mem != x0.Args[2] { 44169 break 44170 } 44171 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44172 break 44173 } 44174 b = mergePoint(b, x0, x1) 44175 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44176 v.reset(OpCopy) 44177 v.AddArg(v0) 44178 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44179 v1.AuxInt = j1 44180 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44181 v2.AuxInt = 8 44182 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44183 v3.AuxInt = i0 44184 v3.Aux = s 44185 v3.AddArg(p) 44186 v3.AddArg(idx) 44187 v3.AddArg(mem) 44188 v2.AddArg(v3) 44189 v1.AddArg(v2) 44190 v0.AddArg(v1) 44191 v0.AddArg(y) 44192 return true 44193 } 44194 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 44195 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44196 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44197 for { 44198 _ = v.Args[1] 44199 or := v.Args[0] 44200 if or.Op != OpAMD64ORQ { 44201 break 44202 } 44203 _ = or.Args[1] 44204 y := or.Args[0] 44205 s1 := or.Args[1] 44206 if s1.Op != OpAMD64SHLQconst { 44207 break 44208 } 44209 j1 := s1.AuxInt 44210 x1 := s1.Args[0] 44211 if x1.Op != OpAMD64MOVBloadidx1 { 44212 break 44213 } 44214 i1 := x1.AuxInt 44215 s := x1.Aux 44216 _ = x1.Args[2] 44217 p := x1.Args[0] 44218 idx := x1.Args[1] 44219 mem := x1.Args[2] 44220 s0 := v.Args[1] 44221 if s0.Op != OpAMD64SHLQconst { 44222 break 44223 } 44224 j0 := s0.AuxInt 44225 x0 := s0.Args[0] 44226 if x0.Op != OpAMD64MOVBloadidx1 { 44227 break 44228 } 44229 i0 := x0.AuxInt 44230 if x0.Aux != s { 44231 break 44232 } 44233 _ = x0.Args[2] 44234 if p != x0.Args[0] { 44235 break 44236 } 44237 if idx != x0.Args[1] { 44238 break 44239 } 44240 if mem != x0.Args[2] { 44241 break 44242 } 44243 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44244 break 44245 } 44246 b = mergePoint(b, x0, x1) 44247 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44248 v.reset(OpCopy) 44249 v.AddArg(v0) 44250 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44251 v1.AuxInt = j1 44252 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44253 v2.AuxInt = 8 44254 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44255 v3.AuxInt = i0 44256 v3.Aux = s 44257 v3.AddArg(p) 44258 v3.AddArg(idx) 44259 v3.AddArg(mem) 44260 v2.AddArg(v3) 44261 v1.AddArg(v2) 44262 v0.AddArg(v1) 44263 v0.AddArg(y) 44264 return true 44265 } 44266 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 44267 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44268 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44269 for { 44270 _ = v.Args[1] 44271 or := v.Args[0] 44272 if or.Op != OpAMD64ORQ { 44273 break 44274 } 44275 _ = or.Args[1] 44276 y := or.Args[0] 44277 s1 := or.Args[1] 44278 if s1.Op != OpAMD64SHLQconst { 44279 break 44280 } 44281 j1 := s1.AuxInt 44282 x1 := s1.Args[0] 44283 if x1.Op != OpAMD64MOVBloadidx1 { 44284 break 44285 } 44286 i1 := x1.AuxInt 44287 s := x1.Aux 44288 _ = x1.Args[2] 44289 idx := x1.Args[0] 44290 p := x1.Args[1] 44291 mem := x1.Args[2] 44292 s0 := v.Args[1] 44293 if s0.Op != OpAMD64SHLQconst { 44294 break 44295 } 44296 j0 := s0.AuxInt 44297 x0 := s0.Args[0] 44298 if x0.Op != OpAMD64MOVBloadidx1 { 44299 break 44300 } 44301 i0 := x0.AuxInt 44302 if x0.Aux != s { 44303 break 44304 } 44305 _ = x0.Args[2] 44306 if p != x0.Args[0] { 44307 break 44308 } 44309 if idx != x0.Args[1] { 44310 break 44311 } 44312 if mem != x0.Args[2] { 44313 break 44314 } 44315 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44316 break 44317 } 44318 b = mergePoint(b, x0, x1) 44319 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44320 v.reset(OpCopy) 44321 v.AddArg(v0) 44322 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44323 v1.AuxInt = j1 44324 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44325 v2.AuxInt = 8 44326 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44327 v3.AuxInt = i0 44328 v3.Aux = s 44329 v3.AddArg(p) 44330 v3.AddArg(idx) 44331 v3.AddArg(mem) 44332 v2.AddArg(v3) 44333 v1.AddArg(v2) 44334 v0.AddArg(v1) 44335 v0.AddArg(y) 44336 return true 44337 } 44338 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 44339 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44340 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44341 for { 44342 _ = v.Args[1] 44343 or := v.Args[0] 44344 if or.Op != OpAMD64ORQ { 44345 break 44346 } 44347 _ = or.Args[1] 44348 s1 := or.Args[0] 44349 if s1.Op != OpAMD64SHLQconst { 44350 break 44351 } 44352 j1 := s1.AuxInt 44353 x1 := s1.Args[0] 44354 if x1.Op != OpAMD64MOVBloadidx1 { 44355 break 44356 } 44357 i1 := x1.AuxInt 44358 s := x1.Aux 44359 _ = x1.Args[2] 44360 p := x1.Args[0] 44361 idx := x1.Args[1] 44362 mem := x1.Args[2] 44363 y := or.Args[1] 44364 s0 := v.Args[1] 44365 if s0.Op != OpAMD64SHLQconst { 44366 break 44367 } 44368 j0 := s0.AuxInt 44369 x0 := s0.Args[0] 44370 if x0.Op != OpAMD64MOVBloadidx1 { 44371 break 44372 } 44373 i0 := x0.AuxInt 44374 if x0.Aux != s { 44375 break 44376 } 44377 _ = x0.Args[2] 44378 if idx != x0.Args[0] { 44379 break 44380 } 44381 if p != x0.Args[1] { 44382 break 44383 } 44384 if mem != x0.Args[2] { 44385 break 44386 } 44387 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44388 break 44389 } 44390 b = mergePoint(b, x0, x1) 44391 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44392 v.reset(OpCopy) 44393 v.AddArg(v0) 44394 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44395 v1.AuxInt = j1 44396 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44397 v2.AuxInt = 8 44398 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44399 v3.AuxInt = i0 44400 v3.Aux = s 44401 v3.AddArg(p) 44402 v3.AddArg(idx) 44403 v3.AddArg(mem) 44404 v2.AddArg(v3) 44405 v1.AddArg(v2) 44406 v0.AddArg(v1) 44407 v0.AddArg(y) 44408 return true 44409 } 44410 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 44411 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44412 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44413 for { 44414 _ = v.Args[1] 44415 or := v.Args[0] 44416 if or.Op != OpAMD64ORQ { 44417 break 44418 } 44419 _ = or.Args[1] 44420 s1 := or.Args[0] 44421 if s1.Op != OpAMD64SHLQconst { 44422 break 44423 } 44424 j1 := s1.AuxInt 44425 x1 := s1.Args[0] 44426 if x1.Op != OpAMD64MOVBloadidx1 { 44427 break 44428 } 44429 i1 := x1.AuxInt 44430 s := x1.Aux 44431 _ = x1.Args[2] 44432 idx := x1.Args[0] 44433 p := x1.Args[1] 44434 mem := x1.Args[2] 44435 y := or.Args[1] 44436 s0 := v.Args[1] 44437 if s0.Op != OpAMD64SHLQconst { 44438 break 44439 } 44440 j0 := s0.AuxInt 44441 x0 := s0.Args[0] 44442 if x0.Op != OpAMD64MOVBloadidx1 { 44443 break 44444 } 44445 i0 := x0.AuxInt 44446 if x0.Aux != s { 44447 break 44448 } 44449 _ = x0.Args[2] 44450 if idx != x0.Args[0] { 44451 break 44452 } 44453 if p != x0.Args[1] { 44454 break 44455 } 44456 if mem != x0.Args[2] { 44457 break 44458 } 44459 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44460 break 44461 } 44462 b = mergePoint(b, x0, x1) 44463 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44464 v.reset(OpCopy) 44465 v.AddArg(v0) 44466 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44467 v1.AuxInt = j1 44468 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44469 v2.AuxInt = 8 44470 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44471 v3.AuxInt = i0 44472 v3.Aux = s 44473 v3.AddArg(p) 44474 v3.AddArg(idx) 44475 v3.AddArg(mem) 44476 v2.AddArg(v3) 44477 v1.AddArg(v2) 44478 v0.AddArg(v1) 44479 v0.AddArg(y) 44480 return true 44481 } 44482 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 44483 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44484 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44485 for { 44486 _ = v.Args[1] 44487 or := v.Args[0] 44488 if or.Op != OpAMD64ORQ { 44489 break 44490 } 44491 _ = or.Args[1] 44492 y := or.Args[0] 44493 s1 := or.Args[1] 44494 if s1.Op != OpAMD64SHLQconst { 44495 break 44496 } 44497 j1 := s1.AuxInt 44498 x1 := s1.Args[0] 44499 if x1.Op != OpAMD64MOVBloadidx1 { 44500 break 44501 } 44502 i1 := x1.AuxInt 44503 s := x1.Aux 44504 _ = x1.Args[2] 44505 p := x1.Args[0] 44506 idx := x1.Args[1] 44507 mem := x1.Args[2] 44508 s0 := v.Args[1] 44509 if s0.Op != OpAMD64SHLQconst { 44510 break 44511 } 44512 j0 := s0.AuxInt 44513 x0 := s0.Args[0] 44514 if x0.Op != OpAMD64MOVBloadidx1 { 44515 break 44516 } 44517 i0 := x0.AuxInt 44518 if x0.Aux != s { 44519 break 44520 } 44521 _ = x0.Args[2] 44522 if idx != x0.Args[0] { 44523 break 44524 } 44525 if p != x0.Args[1] { 44526 break 44527 } 44528 if mem != x0.Args[2] { 44529 break 44530 } 44531 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44532 break 44533 } 44534 b = mergePoint(b, x0, x1) 44535 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44536 v.reset(OpCopy) 44537 v.AddArg(v0) 44538 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44539 v1.AuxInt = j1 44540 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44541 v2.AuxInt = 8 44542 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44543 v3.AuxInt = i0 44544 v3.Aux = s 44545 v3.AddArg(p) 44546 v3.AddArg(idx) 44547 v3.AddArg(mem) 44548 v2.AddArg(v3) 44549 v1.AddArg(v2) 44550 v0.AddArg(v1) 44551 v0.AddArg(y) 44552 return true 44553 } 44554 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 44555 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 44556 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 44557 for { 44558 _ = v.Args[1] 44559 or := v.Args[0] 44560 if or.Op != OpAMD64ORQ { 44561 break 44562 } 44563 _ = or.Args[1] 44564 y := or.Args[0] 44565 s1 := or.Args[1] 44566 if s1.Op != OpAMD64SHLQconst { 44567 break 44568 } 44569 j1 := s1.AuxInt 44570 x1 := s1.Args[0] 44571 if x1.Op != OpAMD64MOVBloadidx1 { 44572 break 44573 } 44574 i1 := x1.AuxInt 44575 s := x1.Aux 44576 _ = x1.Args[2] 44577 idx := x1.Args[0] 44578 p := x1.Args[1] 44579 mem := x1.Args[2] 44580 s0 := v.Args[1] 44581 if s0.Op != OpAMD64SHLQconst { 44582 break 44583 } 44584 j0 := s0.AuxInt 44585 x0 := s0.Args[0] 44586 if x0.Op != OpAMD64MOVBloadidx1 { 44587 break 44588 } 44589 i0 := x0.AuxInt 44590 if x0.Aux != s { 44591 break 44592 } 44593 _ = x0.Args[2] 44594 if idx != x0.Args[0] { 44595 break 44596 } 44597 if p != x0.Args[1] { 44598 break 44599 } 44600 if mem != x0.Args[2] { 44601 break 44602 } 44603 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 44604 break 44605 } 44606 b = mergePoint(b, x0, x1) 44607 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44608 v.reset(OpCopy) 44609 v.AddArg(v0) 44610 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44611 v1.AuxInt = j1 44612 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 44613 v2.AuxInt = 8 44614 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 44615 v3.AuxInt = i0 44616 v3.Aux = s 44617 v3.AddArg(p) 44618 v3.AddArg(idx) 44619 v3.AddArg(mem) 44620 v2.AddArg(v3) 44621 v1.AddArg(v2) 44622 v0.AddArg(v1) 44623 v0.AddArg(y) 44624 return true 44625 } 44626 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 44627 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 44628 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 44629 for { 44630 _ = v.Args[1] 44631 s0 := v.Args[0] 44632 if s0.Op != OpAMD64SHLQconst { 44633 break 44634 } 44635 j0 := s0.AuxInt 44636 r0 := s0.Args[0] 44637 if r0.Op != OpAMD64ROLWconst { 44638 break 44639 } 44640 if r0.AuxInt != 8 { 44641 break 44642 } 44643 x0 := r0.Args[0] 44644 if x0.Op != OpAMD64MOVWloadidx1 { 44645 break 44646 } 44647 i0 := x0.AuxInt 44648 s := x0.Aux 44649 _ = x0.Args[2] 44650 p := x0.Args[0] 44651 idx := x0.Args[1] 44652 mem := x0.Args[2] 44653 or := v.Args[1] 44654 if or.Op != OpAMD64ORQ { 44655 break 44656 } 44657 _ = or.Args[1] 44658 s1 := or.Args[0] 44659 if s1.Op != OpAMD64SHLQconst { 44660 break 44661 } 44662 j1 := s1.AuxInt 44663 r1 := s1.Args[0] 44664 if r1.Op != OpAMD64ROLWconst { 44665 break 44666 } 44667 if r1.AuxInt != 8 { 44668 break 44669 } 44670 x1 := r1.Args[0] 44671 if x1.Op != OpAMD64MOVWloadidx1 { 44672 break 44673 } 44674 i1 := x1.AuxInt 44675 if x1.Aux != s { 44676 break 44677 } 44678 _ = x1.Args[2] 44679 if p != x1.Args[0] { 44680 break 44681 } 44682 if idx != x1.Args[1] { 44683 break 44684 } 44685 if mem != x1.Args[2] { 44686 break 44687 } 44688 y := or.Args[1] 44689 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 44690 break 44691 } 44692 b = mergePoint(b, x0, x1) 44693 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44694 v.reset(OpCopy) 44695 v.AddArg(v0) 44696 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44697 v1.AuxInt = j1 44698 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 44699 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 44700 v3.AuxInt = i0 44701 v3.Aux = s 44702 v3.AddArg(p) 44703 v3.AddArg(idx) 44704 v3.AddArg(mem) 44705 v2.AddArg(v3) 44706 v1.AddArg(v2) 44707 v0.AddArg(v1) 44708 v0.AddArg(y) 44709 return true 44710 } 44711 return false 44712 } 44713 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 44714 b := v.Block 44715 _ = b 44716 typ := &b.Func.Config.Types 44717 _ = typ 44718 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 44719 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 44720 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 44721 for { 44722 _ = v.Args[1] 44723 s0 := v.Args[0] 44724 if s0.Op != OpAMD64SHLQconst { 44725 break 44726 } 44727 j0 := s0.AuxInt 44728 r0 := s0.Args[0] 44729 if r0.Op != OpAMD64ROLWconst { 44730 break 44731 } 44732 if r0.AuxInt != 8 { 44733 break 44734 } 44735 x0 := r0.Args[0] 44736 if x0.Op != OpAMD64MOVWloadidx1 { 44737 break 44738 } 44739 i0 := x0.AuxInt 44740 s := x0.Aux 44741 _ = x0.Args[2] 44742 idx := x0.Args[0] 44743 p := x0.Args[1] 44744 mem := x0.Args[2] 44745 or := v.Args[1] 44746 if or.Op != OpAMD64ORQ { 44747 break 44748 } 44749 _ = or.Args[1] 44750 s1 := or.Args[0] 44751 if s1.Op != OpAMD64SHLQconst { 44752 break 44753 } 44754 j1 := s1.AuxInt 44755 r1 := s1.Args[0] 44756 if r1.Op != OpAMD64ROLWconst { 44757 break 44758 } 44759 if r1.AuxInt != 8 { 44760 break 44761 } 44762 x1 := r1.Args[0] 44763 if x1.Op != OpAMD64MOVWloadidx1 { 44764 break 44765 } 44766 i1 := x1.AuxInt 44767 if x1.Aux != s { 44768 break 44769 } 44770 _ = x1.Args[2] 44771 if p != x1.Args[0] { 44772 break 44773 } 44774 if idx != x1.Args[1] { 44775 break 44776 } 44777 if mem != x1.Args[2] { 44778 break 44779 } 44780 y := or.Args[1] 44781 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 44782 break 44783 } 44784 b = mergePoint(b, x0, x1) 44785 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44786 v.reset(OpCopy) 44787 v.AddArg(v0) 44788 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44789 v1.AuxInt = j1 44790 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 44791 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 44792 v3.AuxInt = i0 44793 v3.Aux = s 44794 v3.AddArg(p) 44795 v3.AddArg(idx) 44796 v3.AddArg(mem) 44797 v2.AddArg(v3) 44798 v1.AddArg(v2) 44799 v0.AddArg(v1) 44800 v0.AddArg(y) 44801 return true 44802 } 44803 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 44804 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 44805 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 44806 for { 44807 _ = v.Args[1] 44808 s0 := v.Args[0] 44809 if s0.Op != OpAMD64SHLQconst { 44810 break 44811 } 44812 j0 := s0.AuxInt 44813 r0 := s0.Args[0] 44814 if r0.Op != OpAMD64ROLWconst { 44815 break 44816 } 44817 if r0.AuxInt != 8 { 44818 break 44819 } 44820 x0 := r0.Args[0] 44821 if x0.Op != OpAMD64MOVWloadidx1 { 44822 break 44823 } 44824 i0 := x0.AuxInt 44825 s := x0.Aux 44826 _ = x0.Args[2] 44827 p := x0.Args[0] 44828 idx := x0.Args[1] 44829 mem := x0.Args[2] 44830 or := v.Args[1] 44831 if or.Op != OpAMD64ORQ { 44832 break 44833 } 44834 _ = or.Args[1] 44835 s1 := or.Args[0] 44836 if s1.Op != OpAMD64SHLQconst { 44837 break 44838 } 44839 j1 := s1.AuxInt 44840 r1 := s1.Args[0] 44841 if r1.Op != OpAMD64ROLWconst { 44842 break 44843 } 44844 if r1.AuxInt != 8 { 44845 break 44846 } 44847 x1 := r1.Args[0] 44848 if x1.Op != OpAMD64MOVWloadidx1 { 44849 break 44850 } 44851 i1 := x1.AuxInt 44852 if x1.Aux != s { 44853 break 44854 } 44855 _ = x1.Args[2] 44856 if idx != x1.Args[0] { 44857 break 44858 } 44859 if p != x1.Args[1] { 44860 break 44861 } 44862 if mem != x1.Args[2] { 44863 break 44864 } 44865 y := or.Args[1] 44866 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 44867 break 44868 } 44869 b = mergePoint(b, x0, x1) 44870 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44871 v.reset(OpCopy) 44872 v.AddArg(v0) 44873 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44874 v1.AuxInt = j1 44875 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 44876 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 44877 v3.AuxInt = i0 44878 v3.Aux = s 44879 v3.AddArg(p) 44880 v3.AddArg(idx) 44881 v3.AddArg(mem) 44882 v2.AddArg(v3) 44883 v1.AddArg(v2) 44884 v0.AddArg(v1) 44885 v0.AddArg(y) 44886 return true 44887 } 44888 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 44889 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 44890 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 44891 for { 44892 _ = v.Args[1] 44893 s0 := v.Args[0] 44894 if s0.Op != OpAMD64SHLQconst { 44895 break 44896 } 44897 j0 := s0.AuxInt 44898 r0 := s0.Args[0] 44899 if r0.Op != OpAMD64ROLWconst { 44900 break 44901 } 44902 if r0.AuxInt != 8 { 44903 break 44904 } 44905 x0 := r0.Args[0] 44906 if x0.Op != OpAMD64MOVWloadidx1 { 44907 break 44908 } 44909 i0 := x0.AuxInt 44910 s := x0.Aux 44911 _ = x0.Args[2] 44912 idx := x0.Args[0] 44913 p := x0.Args[1] 44914 mem := x0.Args[2] 44915 or := v.Args[1] 44916 if or.Op != OpAMD64ORQ { 44917 break 44918 } 44919 _ = or.Args[1] 44920 s1 := or.Args[0] 44921 if s1.Op != OpAMD64SHLQconst { 44922 break 44923 } 44924 j1 := s1.AuxInt 44925 r1 := s1.Args[0] 44926 if r1.Op != OpAMD64ROLWconst { 44927 break 44928 } 44929 if r1.AuxInt != 8 { 44930 break 44931 } 44932 x1 := r1.Args[0] 44933 if x1.Op != OpAMD64MOVWloadidx1 { 44934 break 44935 } 44936 i1 := x1.AuxInt 44937 if x1.Aux != s { 44938 break 44939 } 44940 _ = x1.Args[2] 44941 if idx != x1.Args[0] { 44942 break 44943 } 44944 if p != x1.Args[1] { 44945 break 44946 } 44947 if mem != x1.Args[2] { 44948 break 44949 } 44950 y := or.Args[1] 44951 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 44952 break 44953 } 44954 b = mergePoint(b, x0, x1) 44955 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 44956 v.reset(OpCopy) 44957 v.AddArg(v0) 44958 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 44959 v1.AuxInt = j1 44960 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 44961 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 44962 v3.AuxInt = i0 44963 v3.Aux = s 44964 v3.AddArg(p) 44965 v3.AddArg(idx) 44966 v3.AddArg(mem) 44967 v2.AddArg(v3) 44968 v1.AddArg(v2) 44969 v0.AddArg(v1) 44970 v0.AddArg(y) 44971 return true 44972 } 44973 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 44974 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 44975 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 44976 for { 44977 _ = v.Args[1] 44978 s0 := v.Args[0] 44979 if s0.Op != OpAMD64SHLQconst { 44980 break 44981 } 44982 j0 := s0.AuxInt 44983 r0 := s0.Args[0] 44984 if r0.Op != OpAMD64ROLWconst { 44985 break 44986 } 44987 if r0.AuxInt != 8 { 44988 break 44989 } 44990 x0 := r0.Args[0] 44991 if x0.Op != OpAMD64MOVWloadidx1 { 44992 break 44993 } 44994 i0 := x0.AuxInt 44995 s := x0.Aux 44996 _ = x0.Args[2] 44997 p := x0.Args[0] 44998 idx := x0.Args[1] 44999 mem := x0.Args[2] 45000 or := v.Args[1] 45001 if or.Op != OpAMD64ORQ { 45002 break 45003 } 45004 _ = or.Args[1] 45005 y := or.Args[0] 45006 s1 := or.Args[1] 45007 if s1.Op != OpAMD64SHLQconst { 45008 break 45009 } 45010 j1 := s1.AuxInt 45011 r1 := s1.Args[0] 45012 if r1.Op != OpAMD64ROLWconst { 45013 break 45014 } 45015 if r1.AuxInt != 8 { 45016 break 45017 } 45018 x1 := r1.Args[0] 45019 if x1.Op != OpAMD64MOVWloadidx1 { 45020 break 45021 } 45022 i1 := x1.AuxInt 45023 if x1.Aux != s { 45024 break 45025 } 45026 _ = x1.Args[2] 45027 if p != x1.Args[0] { 45028 break 45029 } 45030 if idx != x1.Args[1] { 45031 break 45032 } 45033 if mem != x1.Args[2] { 45034 break 45035 } 45036 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45037 break 45038 } 45039 b = mergePoint(b, x0, x1) 45040 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45041 v.reset(OpCopy) 45042 v.AddArg(v0) 45043 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45044 v1.AuxInt = j1 45045 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45046 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45047 v3.AuxInt = i0 45048 v3.Aux = s 45049 v3.AddArg(p) 45050 v3.AddArg(idx) 45051 v3.AddArg(mem) 45052 v2.AddArg(v3) 45053 v1.AddArg(v2) 45054 v0.AddArg(v1) 45055 v0.AddArg(y) 45056 return true 45057 } 45058 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 45059 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45060 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45061 for { 45062 _ = v.Args[1] 45063 s0 := v.Args[0] 45064 if s0.Op != OpAMD64SHLQconst { 45065 break 45066 } 45067 j0 := s0.AuxInt 45068 r0 := s0.Args[0] 45069 if r0.Op != OpAMD64ROLWconst { 45070 break 45071 } 45072 if r0.AuxInt != 8 { 45073 break 45074 } 45075 x0 := r0.Args[0] 45076 if x0.Op != OpAMD64MOVWloadidx1 { 45077 break 45078 } 45079 i0 := x0.AuxInt 45080 s := x0.Aux 45081 _ = x0.Args[2] 45082 idx := x0.Args[0] 45083 p := x0.Args[1] 45084 mem := x0.Args[2] 45085 or := v.Args[1] 45086 if or.Op != OpAMD64ORQ { 45087 break 45088 } 45089 _ = or.Args[1] 45090 y := or.Args[0] 45091 s1 := or.Args[1] 45092 if s1.Op != OpAMD64SHLQconst { 45093 break 45094 } 45095 j1 := s1.AuxInt 45096 r1 := s1.Args[0] 45097 if r1.Op != OpAMD64ROLWconst { 45098 break 45099 } 45100 if r1.AuxInt != 8 { 45101 break 45102 } 45103 x1 := r1.Args[0] 45104 if x1.Op != OpAMD64MOVWloadidx1 { 45105 break 45106 } 45107 i1 := x1.AuxInt 45108 if x1.Aux != s { 45109 break 45110 } 45111 _ = x1.Args[2] 45112 if p != x1.Args[0] { 45113 break 45114 } 45115 if idx != x1.Args[1] { 45116 break 45117 } 45118 if mem != x1.Args[2] { 45119 break 45120 } 45121 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45122 break 45123 } 45124 b = mergePoint(b, x0, x1) 45125 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45126 v.reset(OpCopy) 45127 v.AddArg(v0) 45128 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45129 v1.AuxInt = j1 45130 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45131 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45132 v3.AuxInt = i0 45133 v3.Aux = s 45134 v3.AddArg(p) 45135 v3.AddArg(idx) 45136 v3.AddArg(mem) 45137 v2.AddArg(v3) 45138 v1.AddArg(v2) 45139 v0.AddArg(v1) 45140 v0.AddArg(y) 45141 return true 45142 } 45143 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 45144 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45145 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45146 for { 45147 _ = v.Args[1] 45148 s0 := v.Args[0] 45149 if s0.Op != OpAMD64SHLQconst { 45150 break 45151 } 45152 j0 := s0.AuxInt 45153 r0 := s0.Args[0] 45154 if r0.Op != OpAMD64ROLWconst { 45155 break 45156 } 45157 if r0.AuxInt != 8 { 45158 break 45159 } 45160 x0 := r0.Args[0] 45161 if x0.Op != OpAMD64MOVWloadidx1 { 45162 break 45163 } 45164 i0 := x0.AuxInt 45165 s := x0.Aux 45166 _ = x0.Args[2] 45167 p := x0.Args[0] 45168 idx := x0.Args[1] 45169 mem := x0.Args[2] 45170 or := v.Args[1] 45171 if or.Op != OpAMD64ORQ { 45172 break 45173 } 45174 _ = or.Args[1] 45175 y := or.Args[0] 45176 s1 := or.Args[1] 45177 if s1.Op != OpAMD64SHLQconst { 45178 break 45179 } 45180 j1 := s1.AuxInt 45181 r1 := s1.Args[0] 45182 if r1.Op != OpAMD64ROLWconst { 45183 break 45184 } 45185 if r1.AuxInt != 8 { 45186 break 45187 } 45188 x1 := r1.Args[0] 45189 if x1.Op != OpAMD64MOVWloadidx1 { 45190 break 45191 } 45192 i1 := x1.AuxInt 45193 if x1.Aux != s { 45194 break 45195 } 45196 _ = x1.Args[2] 45197 if idx != x1.Args[0] { 45198 break 45199 } 45200 if p != x1.Args[1] { 45201 break 45202 } 45203 if mem != x1.Args[2] { 45204 break 45205 } 45206 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45207 break 45208 } 45209 b = mergePoint(b, x0, x1) 45210 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45211 v.reset(OpCopy) 45212 v.AddArg(v0) 45213 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45214 v1.AuxInt = j1 45215 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45216 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45217 v3.AuxInt = i0 45218 v3.Aux = s 45219 v3.AddArg(p) 45220 v3.AddArg(idx) 45221 v3.AddArg(mem) 45222 v2.AddArg(v3) 45223 v1.AddArg(v2) 45224 v0.AddArg(v1) 45225 v0.AddArg(y) 45226 return true 45227 } 45228 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 45229 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45230 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45231 for { 45232 _ = v.Args[1] 45233 s0 := v.Args[0] 45234 if s0.Op != OpAMD64SHLQconst { 45235 break 45236 } 45237 j0 := s0.AuxInt 45238 r0 := s0.Args[0] 45239 if r0.Op != OpAMD64ROLWconst { 45240 break 45241 } 45242 if r0.AuxInt != 8 { 45243 break 45244 } 45245 x0 := r0.Args[0] 45246 if x0.Op != OpAMD64MOVWloadidx1 { 45247 break 45248 } 45249 i0 := x0.AuxInt 45250 s := x0.Aux 45251 _ = x0.Args[2] 45252 idx := x0.Args[0] 45253 p := x0.Args[1] 45254 mem := x0.Args[2] 45255 or := v.Args[1] 45256 if or.Op != OpAMD64ORQ { 45257 break 45258 } 45259 _ = or.Args[1] 45260 y := or.Args[0] 45261 s1 := or.Args[1] 45262 if s1.Op != OpAMD64SHLQconst { 45263 break 45264 } 45265 j1 := s1.AuxInt 45266 r1 := s1.Args[0] 45267 if r1.Op != OpAMD64ROLWconst { 45268 break 45269 } 45270 if r1.AuxInt != 8 { 45271 break 45272 } 45273 x1 := r1.Args[0] 45274 if x1.Op != OpAMD64MOVWloadidx1 { 45275 break 45276 } 45277 i1 := x1.AuxInt 45278 if x1.Aux != s { 45279 break 45280 } 45281 _ = x1.Args[2] 45282 if idx != x1.Args[0] { 45283 break 45284 } 45285 if p != x1.Args[1] { 45286 break 45287 } 45288 if mem != x1.Args[2] { 45289 break 45290 } 45291 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45292 break 45293 } 45294 b = mergePoint(b, x0, x1) 45295 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45296 v.reset(OpCopy) 45297 v.AddArg(v0) 45298 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45299 v1.AuxInt = j1 45300 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45301 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45302 v3.AuxInt = i0 45303 v3.Aux = s 45304 v3.AddArg(p) 45305 v3.AddArg(idx) 45306 v3.AddArg(mem) 45307 v2.AddArg(v3) 45308 v1.AddArg(v2) 45309 v0.AddArg(v1) 45310 v0.AddArg(y) 45311 return true 45312 } 45313 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 45314 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45315 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45316 for { 45317 _ = v.Args[1] 45318 or := v.Args[0] 45319 if or.Op != OpAMD64ORQ { 45320 break 45321 } 45322 _ = or.Args[1] 45323 s1 := or.Args[0] 45324 if s1.Op != OpAMD64SHLQconst { 45325 break 45326 } 45327 j1 := s1.AuxInt 45328 r1 := s1.Args[0] 45329 if r1.Op != OpAMD64ROLWconst { 45330 break 45331 } 45332 if r1.AuxInt != 8 { 45333 break 45334 } 45335 x1 := r1.Args[0] 45336 if x1.Op != OpAMD64MOVWloadidx1 { 45337 break 45338 } 45339 i1 := x1.AuxInt 45340 s := x1.Aux 45341 _ = x1.Args[2] 45342 p := x1.Args[0] 45343 idx := x1.Args[1] 45344 mem := x1.Args[2] 45345 y := or.Args[1] 45346 s0 := v.Args[1] 45347 if s0.Op != OpAMD64SHLQconst { 45348 break 45349 } 45350 j0 := s0.AuxInt 45351 r0 := s0.Args[0] 45352 if r0.Op != OpAMD64ROLWconst { 45353 break 45354 } 45355 if r0.AuxInt != 8 { 45356 break 45357 } 45358 x0 := r0.Args[0] 45359 if x0.Op != OpAMD64MOVWloadidx1 { 45360 break 45361 } 45362 i0 := x0.AuxInt 45363 if x0.Aux != s { 45364 break 45365 } 45366 _ = x0.Args[2] 45367 if p != x0.Args[0] { 45368 break 45369 } 45370 if idx != x0.Args[1] { 45371 break 45372 } 45373 if mem != x0.Args[2] { 45374 break 45375 } 45376 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45377 break 45378 } 45379 b = mergePoint(b, x0, x1) 45380 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45381 v.reset(OpCopy) 45382 v.AddArg(v0) 45383 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45384 v1.AuxInt = j1 45385 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45386 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45387 v3.AuxInt = i0 45388 v3.Aux = s 45389 v3.AddArg(p) 45390 v3.AddArg(idx) 45391 v3.AddArg(mem) 45392 v2.AddArg(v3) 45393 v1.AddArg(v2) 45394 v0.AddArg(v1) 45395 v0.AddArg(y) 45396 return true 45397 } 45398 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 45399 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45400 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45401 for { 45402 _ = v.Args[1] 45403 or := v.Args[0] 45404 if or.Op != OpAMD64ORQ { 45405 break 45406 } 45407 _ = or.Args[1] 45408 s1 := or.Args[0] 45409 if s1.Op != OpAMD64SHLQconst { 45410 break 45411 } 45412 j1 := s1.AuxInt 45413 r1 := s1.Args[0] 45414 if r1.Op != OpAMD64ROLWconst { 45415 break 45416 } 45417 if r1.AuxInt != 8 { 45418 break 45419 } 45420 x1 := r1.Args[0] 45421 if x1.Op != OpAMD64MOVWloadidx1 { 45422 break 45423 } 45424 i1 := x1.AuxInt 45425 s := x1.Aux 45426 _ = x1.Args[2] 45427 idx := x1.Args[0] 45428 p := x1.Args[1] 45429 mem := x1.Args[2] 45430 y := or.Args[1] 45431 s0 := v.Args[1] 45432 if s0.Op != OpAMD64SHLQconst { 45433 break 45434 } 45435 j0 := s0.AuxInt 45436 r0 := s0.Args[0] 45437 if r0.Op != OpAMD64ROLWconst { 45438 break 45439 } 45440 if r0.AuxInt != 8 { 45441 break 45442 } 45443 x0 := r0.Args[0] 45444 if x0.Op != OpAMD64MOVWloadidx1 { 45445 break 45446 } 45447 i0 := x0.AuxInt 45448 if x0.Aux != s { 45449 break 45450 } 45451 _ = x0.Args[2] 45452 if p != x0.Args[0] { 45453 break 45454 } 45455 if idx != x0.Args[1] { 45456 break 45457 } 45458 if mem != x0.Args[2] { 45459 break 45460 } 45461 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45462 break 45463 } 45464 b = mergePoint(b, x0, x1) 45465 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45466 v.reset(OpCopy) 45467 v.AddArg(v0) 45468 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45469 v1.AuxInt = j1 45470 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45471 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45472 v3.AuxInt = i0 45473 v3.Aux = s 45474 v3.AddArg(p) 45475 v3.AddArg(idx) 45476 v3.AddArg(mem) 45477 v2.AddArg(v3) 45478 v1.AddArg(v2) 45479 v0.AddArg(v1) 45480 v0.AddArg(y) 45481 return true 45482 } 45483 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 45484 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45485 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45486 for { 45487 _ = v.Args[1] 45488 or := v.Args[0] 45489 if or.Op != OpAMD64ORQ { 45490 break 45491 } 45492 _ = or.Args[1] 45493 y := or.Args[0] 45494 s1 := or.Args[1] 45495 if s1.Op != OpAMD64SHLQconst { 45496 break 45497 } 45498 j1 := s1.AuxInt 45499 r1 := s1.Args[0] 45500 if r1.Op != OpAMD64ROLWconst { 45501 break 45502 } 45503 if r1.AuxInt != 8 { 45504 break 45505 } 45506 x1 := r1.Args[0] 45507 if x1.Op != OpAMD64MOVWloadidx1 { 45508 break 45509 } 45510 i1 := x1.AuxInt 45511 s := x1.Aux 45512 _ = x1.Args[2] 45513 p := x1.Args[0] 45514 idx := x1.Args[1] 45515 mem := x1.Args[2] 45516 s0 := v.Args[1] 45517 if s0.Op != OpAMD64SHLQconst { 45518 break 45519 } 45520 j0 := s0.AuxInt 45521 r0 := s0.Args[0] 45522 if r0.Op != OpAMD64ROLWconst { 45523 break 45524 } 45525 if r0.AuxInt != 8 { 45526 break 45527 } 45528 x0 := r0.Args[0] 45529 if x0.Op != OpAMD64MOVWloadidx1 { 45530 break 45531 } 45532 i0 := x0.AuxInt 45533 if x0.Aux != s { 45534 break 45535 } 45536 _ = x0.Args[2] 45537 if p != x0.Args[0] { 45538 break 45539 } 45540 if idx != x0.Args[1] { 45541 break 45542 } 45543 if mem != x0.Args[2] { 45544 break 45545 } 45546 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45547 break 45548 } 45549 b = mergePoint(b, x0, x1) 45550 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45551 v.reset(OpCopy) 45552 v.AddArg(v0) 45553 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45554 v1.AuxInt = j1 45555 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45556 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45557 v3.AuxInt = i0 45558 v3.Aux = s 45559 v3.AddArg(p) 45560 v3.AddArg(idx) 45561 v3.AddArg(mem) 45562 v2.AddArg(v3) 45563 v1.AddArg(v2) 45564 v0.AddArg(v1) 45565 v0.AddArg(y) 45566 return true 45567 } 45568 return false 45569 } 45570 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 45571 b := v.Block 45572 _ = b 45573 typ := &b.Func.Config.Types 45574 _ = typ 45575 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 45576 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45577 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45578 for { 45579 _ = v.Args[1] 45580 or := v.Args[0] 45581 if or.Op != OpAMD64ORQ { 45582 break 45583 } 45584 _ = or.Args[1] 45585 y := or.Args[0] 45586 s1 := or.Args[1] 45587 if s1.Op != OpAMD64SHLQconst { 45588 break 45589 } 45590 j1 := s1.AuxInt 45591 r1 := s1.Args[0] 45592 if r1.Op != OpAMD64ROLWconst { 45593 break 45594 } 45595 if r1.AuxInt != 8 { 45596 break 45597 } 45598 x1 := r1.Args[0] 45599 if x1.Op != OpAMD64MOVWloadidx1 { 45600 break 45601 } 45602 i1 := x1.AuxInt 45603 s := x1.Aux 45604 _ = x1.Args[2] 45605 idx := x1.Args[0] 45606 p := x1.Args[1] 45607 mem := x1.Args[2] 45608 s0 := v.Args[1] 45609 if s0.Op != OpAMD64SHLQconst { 45610 break 45611 } 45612 j0 := s0.AuxInt 45613 r0 := s0.Args[0] 45614 if r0.Op != OpAMD64ROLWconst { 45615 break 45616 } 45617 if r0.AuxInt != 8 { 45618 break 45619 } 45620 x0 := r0.Args[0] 45621 if x0.Op != OpAMD64MOVWloadidx1 { 45622 break 45623 } 45624 i0 := x0.AuxInt 45625 if x0.Aux != s { 45626 break 45627 } 45628 _ = x0.Args[2] 45629 if p != x0.Args[0] { 45630 break 45631 } 45632 if idx != x0.Args[1] { 45633 break 45634 } 45635 if mem != x0.Args[2] { 45636 break 45637 } 45638 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45639 break 45640 } 45641 b = mergePoint(b, x0, x1) 45642 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45643 v.reset(OpCopy) 45644 v.AddArg(v0) 45645 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45646 v1.AuxInt = j1 45647 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45648 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45649 v3.AuxInt = i0 45650 v3.Aux = s 45651 v3.AddArg(p) 45652 v3.AddArg(idx) 45653 v3.AddArg(mem) 45654 v2.AddArg(v3) 45655 v1.AddArg(v2) 45656 v0.AddArg(v1) 45657 v0.AddArg(y) 45658 return true 45659 } 45660 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 45661 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45662 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45663 for { 45664 _ = v.Args[1] 45665 or := v.Args[0] 45666 if or.Op != OpAMD64ORQ { 45667 break 45668 } 45669 _ = or.Args[1] 45670 s1 := or.Args[0] 45671 if s1.Op != OpAMD64SHLQconst { 45672 break 45673 } 45674 j1 := s1.AuxInt 45675 r1 := s1.Args[0] 45676 if r1.Op != OpAMD64ROLWconst { 45677 break 45678 } 45679 if r1.AuxInt != 8 { 45680 break 45681 } 45682 x1 := r1.Args[0] 45683 if x1.Op != OpAMD64MOVWloadidx1 { 45684 break 45685 } 45686 i1 := x1.AuxInt 45687 s := x1.Aux 45688 _ = x1.Args[2] 45689 p := x1.Args[0] 45690 idx := x1.Args[1] 45691 mem := x1.Args[2] 45692 y := or.Args[1] 45693 s0 := v.Args[1] 45694 if s0.Op != OpAMD64SHLQconst { 45695 break 45696 } 45697 j0 := s0.AuxInt 45698 r0 := s0.Args[0] 45699 if r0.Op != OpAMD64ROLWconst { 45700 break 45701 } 45702 if r0.AuxInt != 8 { 45703 break 45704 } 45705 x0 := r0.Args[0] 45706 if x0.Op != OpAMD64MOVWloadidx1 { 45707 break 45708 } 45709 i0 := x0.AuxInt 45710 if x0.Aux != s { 45711 break 45712 } 45713 _ = x0.Args[2] 45714 if idx != x0.Args[0] { 45715 break 45716 } 45717 if p != x0.Args[1] { 45718 break 45719 } 45720 if mem != x0.Args[2] { 45721 break 45722 } 45723 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45724 break 45725 } 45726 b = mergePoint(b, x0, x1) 45727 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45728 v.reset(OpCopy) 45729 v.AddArg(v0) 45730 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45731 v1.AuxInt = j1 45732 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45733 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45734 v3.AuxInt = i0 45735 v3.Aux = s 45736 v3.AddArg(p) 45737 v3.AddArg(idx) 45738 v3.AddArg(mem) 45739 v2.AddArg(v3) 45740 v1.AddArg(v2) 45741 v0.AddArg(v1) 45742 v0.AddArg(y) 45743 return true 45744 } 45745 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 45746 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45747 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45748 for { 45749 _ = v.Args[1] 45750 or := v.Args[0] 45751 if or.Op != OpAMD64ORQ { 45752 break 45753 } 45754 _ = or.Args[1] 45755 s1 := or.Args[0] 45756 if s1.Op != OpAMD64SHLQconst { 45757 break 45758 } 45759 j1 := s1.AuxInt 45760 r1 := s1.Args[0] 45761 if r1.Op != OpAMD64ROLWconst { 45762 break 45763 } 45764 if r1.AuxInt != 8 { 45765 break 45766 } 45767 x1 := r1.Args[0] 45768 if x1.Op != OpAMD64MOVWloadidx1 { 45769 break 45770 } 45771 i1 := x1.AuxInt 45772 s := x1.Aux 45773 _ = x1.Args[2] 45774 idx := x1.Args[0] 45775 p := x1.Args[1] 45776 mem := x1.Args[2] 45777 y := or.Args[1] 45778 s0 := v.Args[1] 45779 if s0.Op != OpAMD64SHLQconst { 45780 break 45781 } 45782 j0 := s0.AuxInt 45783 r0 := s0.Args[0] 45784 if r0.Op != OpAMD64ROLWconst { 45785 break 45786 } 45787 if r0.AuxInt != 8 { 45788 break 45789 } 45790 x0 := r0.Args[0] 45791 if x0.Op != OpAMD64MOVWloadidx1 { 45792 break 45793 } 45794 i0 := x0.AuxInt 45795 if x0.Aux != s { 45796 break 45797 } 45798 _ = x0.Args[2] 45799 if idx != x0.Args[0] { 45800 break 45801 } 45802 if p != x0.Args[1] { 45803 break 45804 } 45805 if mem != x0.Args[2] { 45806 break 45807 } 45808 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45809 break 45810 } 45811 b = mergePoint(b, x0, x1) 45812 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45813 v.reset(OpCopy) 45814 v.AddArg(v0) 45815 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45816 v1.AuxInt = j1 45817 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45818 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45819 v3.AuxInt = i0 45820 v3.Aux = s 45821 v3.AddArg(p) 45822 v3.AddArg(idx) 45823 v3.AddArg(mem) 45824 v2.AddArg(v3) 45825 v1.AddArg(v2) 45826 v0.AddArg(v1) 45827 v0.AddArg(y) 45828 return true 45829 } 45830 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 45831 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45832 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45833 for { 45834 _ = v.Args[1] 45835 or := v.Args[0] 45836 if or.Op != OpAMD64ORQ { 45837 break 45838 } 45839 _ = or.Args[1] 45840 y := or.Args[0] 45841 s1 := or.Args[1] 45842 if s1.Op != OpAMD64SHLQconst { 45843 break 45844 } 45845 j1 := s1.AuxInt 45846 r1 := s1.Args[0] 45847 if r1.Op != OpAMD64ROLWconst { 45848 break 45849 } 45850 if r1.AuxInt != 8 { 45851 break 45852 } 45853 x1 := r1.Args[0] 45854 if x1.Op != OpAMD64MOVWloadidx1 { 45855 break 45856 } 45857 i1 := x1.AuxInt 45858 s := x1.Aux 45859 _ = x1.Args[2] 45860 p := x1.Args[0] 45861 idx := x1.Args[1] 45862 mem := x1.Args[2] 45863 s0 := v.Args[1] 45864 if s0.Op != OpAMD64SHLQconst { 45865 break 45866 } 45867 j0 := s0.AuxInt 45868 r0 := s0.Args[0] 45869 if r0.Op != OpAMD64ROLWconst { 45870 break 45871 } 45872 if r0.AuxInt != 8 { 45873 break 45874 } 45875 x0 := r0.Args[0] 45876 if x0.Op != OpAMD64MOVWloadidx1 { 45877 break 45878 } 45879 i0 := x0.AuxInt 45880 if x0.Aux != s { 45881 break 45882 } 45883 _ = x0.Args[2] 45884 if idx != x0.Args[0] { 45885 break 45886 } 45887 if p != x0.Args[1] { 45888 break 45889 } 45890 if mem != x0.Args[2] { 45891 break 45892 } 45893 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45894 break 45895 } 45896 b = mergePoint(b, x0, x1) 45897 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45898 v.reset(OpCopy) 45899 v.AddArg(v0) 45900 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45901 v1.AuxInt = j1 45902 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45903 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45904 v3.AuxInt = i0 45905 v3.Aux = s 45906 v3.AddArg(p) 45907 v3.AddArg(idx) 45908 v3.AddArg(mem) 45909 v2.AddArg(v3) 45910 v1.AddArg(v2) 45911 v0.AddArg(v1) 45912 v0.AddArg(y) 45913 return true 45914 } 45915 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 45916 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 45917 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 45918 for { 45919 _ = v.Args[1] 45920 or := v.Args[0] 45921 if or.Op != OpAMD64ORQ { 45922 break 45923 } 45924 _ = or.Args[1] 45925 y := or.Args[0] 45926 s1 := or.Args[1] 45927 if s1.Op != OpAMD64SHLQconst { 45928 break 45929 } 45930 j1 := s1.AuxInt 45931 r1 := s1.Args[0] 45932 if r1.Op != OpAMD64ROLWconst { 45933 break 45934 } 45935 if r1.AuxInt != 8 { 45936 break 45937 } 45938 x1 := r1.Args[0] 45939 if x1.Op != OpAMD64MOVWloadidx1 { 45940 break 45941 } 45942 i1 := x1.AuxInt 45943 s := x1.Aux 45944 _ = x1.Args[2] 45945 idx := x1.Args[0] 45946 p := x1.Args[1] 45947 mem := x1.Args[2] 45948 s0 := v.Args[1] 45949 if s0.Op != OpAMD64SHLQconst { 45950 break 45951 } 45952 j0 := s0.AuxInt 45953 r0 := s0.Args[0] 45954 if r0.Op != OpAMD64ROLWconst { 45955 break 45956 } 45957 if r0.AuxInt != 8 { 45958 break 45959 } 45960 x0 := r0.Args[0] 45961 if x0.Op != OpAMD64MOVWloadidx1 { 45962 break 45963 } 45964 i0 := x0.AuxInt 45965 if x0.Aux != s { 45966 break 45967 } 45968 _ = x0.Args[2] 45969 if idx != x0.Args[0] { 45970 break 45971 } 45972 if p != x0.Args[1] { 45973 break 45974 } 45975 if mem != x0.Args[2] { 45976 break 45977 } 45978 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 45979 break 45980 } 45981 b = mergePoint(b, x0, x1) 45982 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 45983 v.reset(OpCopy) 45984 v.AddArg(v0) 45985 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 45986 v1.AuxInt = j1 45987 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 45988 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 45989 v3.AuxInt = i0 45990 v3.Aux = s 45991 v3.AddArg(p) 45992 v3.AddArg(idx) 45993 v3.AddArg(mem) 45994 v2.AddArg(v3) 45995 v1.AddArg(v2) 45996 v0.AddArg(v1) 45997 v0.AddArg(y) 45998 return true 45999 } 46000 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 46001 // cond: canMergeLoad(v, l, x) && clobber(l) 46002 // result: (ORQload x [off] {sym} ptr mem) 46003 for { 46004 _ = v.Args[1] 46005 x := v.Args[0] 46006 l := v.Args[1] 46007 if l.Op != OpAMD64MOVQload { 46008 break 46009 } 46010 off := l.AuxInt 46011 sym := l.Aux 46012 _ = l.Args[1] 46013 ptr := l.Args[0] 46014 mem := l.Args[1] 46015 if !(canMergeLoad(v, l, x) && clobber(l)) { 46016 break 46017 } 46018 v.reset(OpAMD64ORQload) 46019 v.AuxInt = off 46020 v.Aux = sym 46021 v.AddArg(x) 46022 v.AddArg(ptr) 46023 v.AddArg(mem) 46024 return true 46025 } 46026 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 46027 // cond: canMergeLoad(v, l, x) && clobber(l) 46028 // result: (ORQload x [off] {sym} ptr mem) 46029 for { 46030 _ = v.Args[1] 46031 l := v.Args[0] 46032 if l.Op != OpAMD64MOVQload { 46033 break 46034 } 46035 off := l.AuxInt 46036 sym := l.Aux 46037 _ = l.Args[1] 46038 ptr := l.Args[0] 46039 mem := l.Args[1] 46040 x := v.Args[1] 46041 if !(canMergeLoad(v, l, x) && clobber(l)) { 46042 break 46043 } 46044 v.reset(OpAMD64ORQload) 46045 v.AuxInt = off 46046 v.Aux = sym 46047 v.AddArg(x) 46048 v.AddArg(ptr) 46049 v.AddArg(mem) 46050 return true 46051 } 46052 return false 46053 } 46054 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 46055 b := v.Block 46056 _ = b 46057 config := b.Func.Config 46058 _ = config 46059 // match: (ORQconst [c] x) 46060 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 46061 // result: (BTSQconst [log2(c)] x) 46062 for { 46063 c := v.AuxInt 46064 x := v.Args[0] 46065 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 46066 break 46067 } 46068 v.reset(OpAMD64BTSQconst) 46069 v.AuxInt = log2(c) 46070 v.AddArg(x) 46071 return true 46072 } 46073 // match: (ORQconst [c] (ORQconst [d] x)) 46074 // cond: 46075 // result: (ORQconst [c | d] x) 46076 for { 46077 c := v.AuxInt 46078 v_0 := v.Args[0] 46079 if v_0.Op != OpAMD64ORQconst { 46080 break 46081 } 46082 d := v_0.AuxInt 46083 x := v_0.Args[0] 46084 v.reset(OpAMD64ORQconst) 46085 v.AuxInt = c | d 46086 v.AddArg(x) 46087 return true 46088 } 46089 // match: (ORQconst [c] (BTSQconst [d] x)) 46090 // cond: 46091 // result: (ORQconst [c | 1<<uint32(d)] x) 46092 for { 46093 c := v.AuxInt 46094 v_0 := v.Args[0] 46095 if v_0.Op != OpAMD64BTSQconst { 46096 break 46097 } 46098 d := v_0.AuxInt 46099 x := v_0.Args[0] 46100 v.reset(OpAMD64ORQconst) 46101 v.AuxInt = c | 1<<uint32(d) 46102 v.AddArg(x) 46103 return true 46104 } 46105 // match: (ORQconst [0] x) 46106 // cond: 46107 // result: x 46108 for { 46109 if v.AuxInt != 0 { 46110 break 46111 } 46112 x := v.Args[0] 46113 v.reset(OpCopy) 46114 v.Type = x.Type 46115 v.AddArg(x) 46116 return true 46117 } 46118 // match: (ORQconst [-1] _) 46119 // cond: 46120 // result: (MOVQconst [-1]) 46121 for { 46122 if v.AuxInt != -1 { 46123 break 46124 } 46125 v.reset(OpAMD64MOVQconst) 46126 v.AuxInt = -1 46127 return true 46128 } 46129 // match: (ORQconst [c] (MOVQconst [d])) 46130 // cond: 46131 // result: (MOVQconst [c|d]) 46132 for { 46133 c := v.AuxInt 46134 v_0 := v.Args[0] 46135 if v_0.Op != OpAMD64MOVQconst { 46136 break 46137 } 46138 d := v_0.AuxInt 46139 v.reset(OpAMD64MOVQconst) 46140 v.AuxInt = c | d 46141 return true 46142 } 46143 return false 46144 } 46145 func rewriteValueAMD64_OpAMD64ORQconstmodify_0(v *Value) bool { 46146 // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 46147 // cond: ValAndOff(valoff1).canAdd(off2) 46148 // result: (ORQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 46149 for { 46150 valoff1 := v.AuxInt 46151 sym := v.Aux 46152 _ = v.Args[1] 46153 v_0 := v.Args[0] 46154 if v_0.Op != OpAMD64ADDQconst { 46155 break 46156 } 46157 off2 := v_0.AuxInt 46158 base := v_0.Args[0] 46159 mem := v.Args[1] 46160 if !(ValAndOff(valoff1).canAdd(off2)) { 46161 break 46162 } 46163 v.reset(OpAMD64ORQconstmodify) 46164 v.AuxInt = ValAndOff(valoff1).add(off2) 46165 v.Aux = sym 46166 v.AddArg(base) 46167 v.AddArg(mem) 46168 return true 46169 } 46170 // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 46171 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 46172 // result: (ORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 46173 for { 46174 valoff1 := v.AuxInt 46175 sym1 := v.Aux 46176 _ = v.Args[1] 46177 v_0 := v.Args[0] 46178 if v_0.Op != OpAMD64LEAQ { 46179 break 46180 } 46181 off2 := v_0.AuxInt 46182 sym2 := v_0.Aux 46183 base := v_0.Args[0] 46184 mem := v.Args[1] 46185 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 46186 break 46187 } 46188 v.reset(OpAMD64ORQconstmodify) 46189 v.AuxInt = ValAndOff(valoff1).add(off2) 46190 v.Aux = mergeSym(sym1, sym2) 46191 v.AddArg(base) 46192 v.AddArg(mem) 46193 return true 46194 } 46195 return false 46196 } 46197 func rewriteValueAMD64_OpAMD64ORQload_0(v *Value) bool { 46198 b := v.Block 46199 _ = b 46200 typ := &b.Func.Config.Types 46201 _ = typ 46202 // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem) 46203 // cond: is32Bit(off1+off2) 46204 // result: (ORQload [off1+off2] {sym} val base mem) 46205 for { 46206 off1 := v.AuxInt 46207 sym := v.Aux 46208 _ = v.Args[2] 46209 val := v.Args[0] 46210 v_1 := v.Args[1] 46211 if v_1.Op != OpAMD64ADDQconst { 46212 break 46213 } 46214 off2 := v_1.AuxInt 46215 base := v_1.Args[0] 46216 mem := v.Args[2] 46217 if !(is32Bit(off1 + off2)) { 46218 break 46219 } 46220 v.reset(OpAMD64ORQload) 46221 v.AuxInt = off1 + off2 46222 v.Aux = sym 46223 v.AddArg(val) 46224 v.AddArg(base) 46225 v.AddArg(mem) 46226 return true 46227 } 46228 // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 46229 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 46230 // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 46231 for { 46232 off1 := v.AuxInt 46233 sym1 := v.Aux 46234 _ = v.Args[2] 46235 val := v.Args[0] 46236 v_1 := v.Args[1] 46237 if v_1.Op != OpAMD64LEAQ { 46238 break 46239 } 46240 off2 := v_1.AuxInt 46241 sym2 := v_1.Aux 46242 base := v_1.Args[0] 46243 mem := v.Args[2] 46244 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 46245 break 46246 } 46247 v.reset(OpAMD64ORQload) 46248 v.AuxInt = off1 + off2 46249 v.Aux = mergeSym(sym1, sym2) 46250 v.AddArg(val) 46251 v.AddArg(base) 46252 v.AddArg(mem) 46253 return true 46254 } 46255 // match: (ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 46256 // cond: 46257 // result: ( ORQ x (MOVQf2i y)) 46258 for { 46259 off := v.AuxInt 46260 sym := v.Aux 46261 _ = v.Args[2] 46262 x := v.Args[0] 46263 ptr := v.Args[1] 46264 v_2 := v.Args[2] 46265 if v_2.Op != OpAMD64MOVSDstore { 46266 break 46267 } 46268 if v_2.AuxInt != off { 46269 break 46270 } 46271 if v_2.Aux != sym { 46272 break 46273 } 46274 _ = v_2.Args[2] 46275 if ptr != v_2.Args[0] { 46276 break 46277 } 46278 y := v_2.Args[1] 46279 v.reset(OpAMD64ORQ) 46280 v.AddArg(x) 46281 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 46282 v0.AddArg(y) 46283 v.AddArg(v0) 46284 return true 46285 } 46286 return false 46287 } 46288 func rewriteValueAMD64_OpAMD64ORQmodify_0(v *Value) bool { 46289 // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 46290 // cond: is32Bit(off1+off2) 46291 // result: (ORQmodify [off1+off2] {sym} base val mem) 46292 for { 46293 off1 := v.AuxInt 46294 sym := v.Aux 46295 _ = v.Args[2] 46296 v_0 := v.Args[0] 46297 if v_0.Op != OpAMD64ADDQconst { 46298 break 46299 } 46300 off2 := v_0.AuxInt 46301 base := v_0.Args[0] 46302 val := v.Args[1] 46303 mem := v.Args[2] 46304 if !(is32Bit(off1 + off2)) { 46305 break 46306 } 46307 v.reset(OpAMD64ORQmodify) 46308 v.AuxInt = off1 + off2 46309 v.Aux = sym 46310 v.AddArg(base) 46311 v.AddArg(val) 46312 v.AddArg(mem) 46313 return true 46314 } 46315 // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 46316 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 46317 // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 46318 for { 46319 off1 := v.AuxInt 46320 sym1 := v.Aux 46321 _ = v.Args[2] 46322 v_0 := v.Args[0] 46323 if v_0.Op != OpAMD64LEAQ { 46324 break 46325 } 46326 off2 := v_0.AuxInt 46327 sym2 := v_0.Aux 46328 base := v_0.Args[0] 46329 val := v.Args[1] 46330 mem := v.Args[2] 46331 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 46332 break 46333 } 46334 v.reset(OpAMD64ORQmodify) 46335 v.AuxInt = off1 + off2 46336 v.Aux = mergeSym(sym1, sym2) 46337 v.AddArg(base) 46338 v.AddArg(val) 46339 v.AddArg(mem) 46340 return true 46341 } 46342 return false 46343 } 46344 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 46345 // match: (ROLB x (NEGQ y)) 46346 // cond: 46347 // result: (RORB x y) 46348 for { 46349 _ = v.Args[1] 46350 x := v.Args[0] 46351 v_1 := v.Args[1] 46352 if v_1.Op != OpAMD64NEGQ { 46353 break 46354 } 46355 y := v_1.Args[0] 46356 v.reset(OpAMD64RORB) 46357 v.AddArg(x) 46358 v.AddArg(y) 46359 return true 46360 } 46361 // match: (ROLB x (NEGL y)) 46362 // cond: 46363 // result: (RORB x y) 46364 for { 46365 _ = v.Args[1] 46366 x := v.Args[0] 46367 v_1 := v.Args[1] 46368 if v_1.Op != OpAMD64NEGL { 46369 break 46370 } 46371 y := v_1.Args[0] 46372 v.reset(OpAMD64RORB) 46373 v.AddArg(x) 46374 v.AddArg(y) 46375 return true 46376 } 46377 // match: (ROLB x (MOVQconst [c])) 46378 // cond: 46379 // result: (ROLBconst [c&7 ] x) 46380 for { 46381 _ = v.Args[1] 46382 x := v.Args[0] 46383 v_1 := v.Args[1] 46384 if v_1.Op != OpAMD64MOVQconst { 46385 break 46386 } 46387 c := v_1.AuxInt 46388 v.reset(OpAMD64ROLBconst) 46389 v.AuxInt = c & 7 46390 v.AddArg(x) 46391 return true 46392 } 46393 // match: (ROLB x (MOVLconst [c])) 46394 // cond: 46395 // result: (ROLBconst [c&7 ] x) 46396 for { 46397 _ = v.Args[1] 46398 x := v.Args[0] 46399 v_1 := v.Args[1] 46400 if v_1.Op != OpAMD64MOVLconst { 46401 break 46402 } 46403 c := v_1.AuxInt 46404 v.reset(OpAMD64ROLBconst) 46405 v.AuxInt = c & 7 46406 v.AddArg(x) 46407 return true 46408 } 46409 return false 46410 } 46411 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 46412 // match: (ROLBconst [c] (ROLBconst [d] x)) 46413 // cond: 46414 // result: (ROLBconst [(c+d)& 7] x) 46415 for { 46416 c := v.AuxInt 46417 v_0 := v.Args[0] 46418 if v_0.Op != OpAMD64ROLBconst { 46419 break 46420 } 46421 d := v_0.AuxInt 46422 x := v_0.Args[0] 46423 v.reset(OpAMD64ROLBconst) 46424 v.AuxInt = (c + d) & 7 46425 v.AddArg(x) 46426 return true 46427 } 46428 // match: (ROLBconst x [0]) 46429 // cond: 46430 // result: x 46431 for { 46432 if v.AuxInt != 0 { 46433 break 46434 } 46435 x := v.Args[0] 46436 v.reset(OpCopy) 46437 v.Type = x.Type 46438 v.AddArg(x) 46439 return true 46440 } 46441 return false 46442 } 46443 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 46444 // match: (ROLL x (NEGQ y)) 46445 // cond: 46446 // result: (RORL x y) 46447 for { 46448 _ = v.Args[1] 46449 x := v.Args[0] 46450 v_1 := v.Args[1] 46451 if v_1.Op != OpAMD64NEGQ { 46452 break 46453 } 46454 y := v_1.Args[0] 46455 v.reset(OpAMD64RORL) 46456 v.AddArg(x) 46457 v.AddArg(y) 46458 return true 46459 } 46460 // match: (ROLL x (NEGL y)) 46461 // cond: 46462 // result: (RORL x y) 46463 for { 46464 _ = v.Args[1] 46465 x := v.Args[0] 46466 v_1 := v.Args[1] 46467 if v_1.Op != OpAMD64NEGL { 46468 break 46469 } 46470 y := v_1.Args[0] 46471 v.reset(OpAMD64RORL) 46472 v.AddArg(x) 46473 v.AddArg(y) 46474 return true 46475 } 46476 // match: (ROLL x (MOVQconst [c])) 46477 // cond: 46478 // result: (ROLLconst [c&31] x) 46479 for { 46480 _ = v.Args[1] 46481 x := v.Args[0] 46482 v_1 := v.Args[1] 46483 if v_1.Op != OpAMD64MOVQconst { 46484 break 46485 } 46486 c := v_1.AuxInt 46487 v.reset(OpAMD64ROLLconst) 46488 v.AuxInt = c & 31 46489 v.AddArg(x) 46490 return true 46491 } 46492 // match: (ROLL x (MOVLconst [c])) 46493 // cond: 46494 // result: (ROLLconst [c&31] x) 46495 for { 46496 _ = v.Args[1] 46497 x := v.Args[0] 46498 v_1 := v.Args[1] 46499 if v_1.Op != OpAMD64MOVLconst { 46500 break 46501 } 46502 c := v_1.AuxInt 46503 v.reset(OpAMD64ROLLconst) 46504 v.AuxInt = c & 31 46505 v.AddArg(x) 46506 return true 46507 } 46508 return false 46509 } 46510 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 46511 // match: (ROLLconst [c] (ROLLconst [d] x)) 46512 // cond: 46513 // result: (ROLLconst [(c+d)&31] x) 46514 for { 46515 c := v.AuxInt 46516 v_0 := v.Args[0] 46517 if v_0.Op != OpAMD64ROLLconst { 46518 break 46519 } 46520 d := v_0.AuxInt 46521 x := v_0.Args[0] 46522 v.reset(OpAMD64ROLLconst) 46523 v.AuxInt = (c + d) & 31 46524 v.AddArg(x) 46525 return true 46526 } 46527 // match: (ROLLconst x [0]) 46528 // cond: 46529 // result: x 46530 for { 46531 if v.AuxInt != 0 { 46532 break 46533 } 46534 x := v.Args[0] 46535 v.reset(OpCopy) 46536 v.Type = x.Type 46537 v.AddArg(x) 46538 return true 46539 } 46540 return false 46541 } 46542 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 46543 // match: (ROLQ x (NEGQ y)) 46544 // cond: 46545 // result: (RORQ x y) 46546 for { 46547 _ = v.Args[1] 46548 x := v.Args[0] 46549 v_1 := v.Args[1] 46550 if v_1.Op != OpAMD64NEGQ { 46551 break 46552 } 46553 y := v_1.Args[0] 46554 v.reset(OpAMD64RORQ) 46555 v.AddArg(x) 46556 v.AddArg(y) 46557 return true 46558 } 46559 // match: (ROLQ x (NEGL y)) 46560 // cond: 46561 // result: (RORQ x y) 46562 for { 46563 _ = v.Args[1] 46564 x := v.Args[0] 46565 v_1 := v.Args[1] 46566 if v_1.Op != OpAMD64NEGL { 46567 break 46568 } 46569 y := v_1.Args[0] 46570 v.reset(OpAMD64RORQ) 46571 v.AddArg(x) 46572 v.AddArg(y) 46573 return true 46574 } 46575 // match: (ROLQ x (MOVQconst [c])) 46576 // cond: 46577 // result: (ROLQconst [c&63] x) 46578 for { 46579 _ = v.Args[1] 46580 x := v.Args[0] 46581 v_1 := v.Args[1] 46582 if v_1.Op != OpAMD64MOVQconst { 46583 break 46584 } 46585 c := v_1.AuxInt 46586 v.reset(OpAMD64ROLQconst) 46587 v.AuxInt = c & 63 46588 v.AddArg(x) 46589 return true 46590 } 46591 // match: (ROLQ x (MOVLconst [c])) 46592 // cond: 46593 // result: (ROLQconst [c&63] x) 46594 for { 46595 _ = v.Args[1] 46596 x := v.Args[0] 46597 v_1 := v.Args[1] 46598 if v_1.Op != OpAMD64MOVLconst { 46599 break 46600 } 46601 c := v_1.AuxInt 46602 v.reset(OpAMD64ROLQconst) 46603 v.AuxInt = c & 63 46604 v.AddArg(x) 46605 return true 46606 } 46607 return false 46608 } 46609 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 46610 // match: (ROLQconst [c] (ROLQconst [d] x)) 46611 // cond: 46612 // result: (ROLQconst [(c+d)&63] x) 46613 for { 46614 c := v.AuxInt 46615 v_0 := v.Args[0] 46616 if v_0.Op != OpAMD64ROLQconst { 46617 break 46618 } 46619 d := v_0.AuxInt 46620 x := v_0.Args[0] 46621 v.reset(OpAMD64ROLQconst) 46622 v.AuxInt = (c + d) & 63 46623 v.AddArg(x) 46624 return true 46625 } 46626 // match: (ROLQconst x [0]) 46627 // cond: 46628 // result: x 46629 for { 46630 if v.AuxInt != 0 { 46631 break 46632 } 46633 x := v.Args[0] 46634 v.reset(OpCopy) 46635 v.Type = x.Type 46636 v.AddArg(x) 46637 return true 46638 } 46639 return false 46640 } 46641 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 46642 // match: (ROLW x (NEGQ y)) 46643 // cond: 46644 // result: (RORW x y) 46645 for { 46646 _ = v.Args[1] 46647 x := v.Args[0] 46648 v_1 := v.Args[1] 46649 if v_1.Op != OpAMD64NEGQ { 46650 break 46651 } 46652 y := v_1.Args[0] 46653 v.reset(OpAMD64RORW) 46654 v.AddArg(x) 46655 v.AddArg(y) 46656 return true 46657 } 46658 // match: (ROLW x (NEGL y)) 46659 // cond: 46660 // result: (RORW x y) 46661 for { 46662 _ = v.Args[1] 46663 x := v.Args[0] 46664 v_1 := v.Args[1] 46665 if v_1.Op != OpAMD64NEGL { 46666 break 46667 } 46668 y := v_1.Args[0] 46669 v.reset(OpAMD64RORW) 46670 v.AddArg(x) 46671 v.AddArg(y) 46672 return true 46673 } 46674 // match: (ROLW x (MOVQconst [c])) 46675 // cond: 46676 // result: (ROLWconst [c&15] x) 46677 for { 46678 _ = v.Args[1] 46679 x := v.Args[0] 46680 v_1 := v.Args[1] 46681 if v_1.Op != OpAMD64MOVQconst { 46682 break 46683 } 46684 c := v_1.AuxInt 46685 v.reset(OpAMD64ROLWconst) 46686 v.AuxInt = c & 15 46687 v.AddArg(x) 46688 return true 46689 } 46690 // match: (ROLW x (MOVLconst [c])) 46691 // cond: 46692 // result: (ROLWconst [c&15] x) 46693 for { 46694 _ = v.Args[1] 46695 x := v.Args[0] 46696 v_1 := v.Args[1] 46697 if v_1.Op != OpAMD64MOVLconst { 46698 break 46699 } 46700 c := v_1.AuxInt 46701 v.reset(OpAMD64ROLWconst) 46702 v.AuxInt = c & 15 46703 v.AddArg(x) 46704 return true 46705 } 46706 return false 46707 } 46708 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 46709 // match: (ROLWconst [c] (ROLWconst [d] x)) 46710 // cond: 46711 // result: (ROLWconst [(c+d)&15] x) 46712 for { 46713 c := v.AuxInt 46714 v_0 := v.Args[0] 46715 if v_0.Op != OpAMD64ROLWconst { 46716 break 46717 } 46718 d := v_0.AuxInt 46719 x := v_0.Args[0] 46720 v.reset(OpAMD64ROLWconst) 46721 v.AuxInt = (c + d) & 15 46722 v.AddArg(x) 46723 return true 46724 } 46725 // match: (ROLWconst x [0]) 46726 // cond: 46727 // result: x 46728 for { 46729 if v.AuxInt != 0 { 46730 break 46731 } 46732 x := v.Args[0] 46733 v.reset(OpCopy) 46734 v.Type = x.Type 46735 v.AddArg(x) 46736 return true 46737 } 46738 return false 46739 } 46740 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 46741 // match: (RORB x (NEGQ y)) 46742 // cond: 46743 // result: (ROLB x y) 46744 for { 46745 _ = v.Args[1] 46746 x := v.Args[0] 46747 v_1 := v.Args[1] 46748 if v_1.Op != OpAMD64NEGQ { 46749 break 46750 } 46751 y := v_1.Args[0] 46752 v.reset(OpAMD64ROLB) 46753 v.AddArg(x) 46754 v.AddArg(y) 46755 return true 46756 } 46757 // match: (RORB x (NEGL y)) 46758 // cond: 46759 // result: (ROLB x y) 46760 for { 46761 _ = v.Args[1] 46762 x := v.Args[0] 46763 v_1 := v.Args[1] 46764 if v_1.Op != OpAMD64NEGL { 46765 break 46766 } 46767 y := v_1.Args[0] 46768 v.reset(OpAMD64ROLB) 46769 v.AddArg(x) 46770 v.AddArg(y) 46771 return true 46772 } 46773 // match: (RORB x (MOVQconst [c])) 46774 // cond: 46775 // result: (ROLBconst [(-c)&7 ] x) 46776 for { 46777 _ = v.Args[1] 46778 x := v.Args[0] 46779 v_1 := v.Args[1] 46780 if v_1.Op != OpAMD64MOVQconst { 46781 break 46782 } 46783 c := v_1.AuxInt 46784 v.reset(OpAMD64ROLBconst) 46785 v.AuxInt = (-c) & 7 46786 v.AddArg(x) 46787 return true 46788 } 46789 // match: (RORB x (MOVLconst [c])) 46790 // cond: 46791 // result: (ROLBconst [(-c)&7 ] x) 46792 for { 46793 _ = v.Args[1] 46794 x := v.Args[0] 46795 v_1 := v.Args[1] 46796 if v_1.Op != OpAMD64MOVLconst { 46797 break 46798 } 46799 c := v_1.AuxInt 46800 v.reset(OpAMD64ROLBconst) 46801 v.AuxInt = (-c) & 7 46802 v.AddArg(x) 46803 return true 46804 } 46805 return false 46806 } 46807 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 46808 // match: (RORL x (NEGQ y)) 46809 // cond: 46810 // result: (ROLL x y) 46811 for { 46812 _ = v.Args[1] 46813 x := v.Args[0] 46814 v_1 := v.Args[1] 46815 if v_1.Op != OpAMD64NEGQ { 46816 break 46817 } 46818 y := v_1.Args[0] 46819 v.reset(OpAMD64ROLL) 46820 v.AddArg(x) 46821 v.AddArg(y) 46822 return true 46823 } 46824 // match: (RORL x (NEGL y)) 46825 // cond: 46826 // result: (ROLL x y) 46827 for { 46828 _ = v.Args[1] 46829 x := v.Args[0] 46830 v_1 := v.Args[1] 46831 if v_1.Op != OpAMD64NEGL { 46832 break 46833 } 46834 y := v_1.Args[0] 46835 v.reset(OpAMD64ROLL) 46836 v.AddArg(x) 46837 v.AddArg(y) 46838 return true 46839 } 46840 // match: (RORL x (MOVQconst [c])) 46841 // cond: 46842 // result: (ROLLconst [(-c)&31] x) 46843 for { 46844 _ = v.Args[1] 46845 x := v.Args[0] 46846 v_1 := v.Args[1] 46847 if v_1.Op != OpAMD64MOVQconst { 46848 break 46849 } 46850 c := v_1.AuxInt 46851 v.reset(OpAMD64ROLLconst) 46852 v.AuxInt = (-c) & 31 46853 v.AddArg(x) 46854 return true 46855 } 46856 // match: (RORL x (MOVLconst [c])) 46857 // cond: 46858 // result: (ROLLconst [(-c)&31] x) 46859 for { 46860 _ = v.Args[1] 46861 x := v.Args[0] 46862 v_1 := v.Args[1] 46863 if v_1.Op != OpAMD64MOVLconst { 46864 break 46865 } 46866 c := v_1.AuxInt 46867 v.reset(OpAMD64ROLLconst) 46868 v.AuxInt = (-c) & 31 46869 v.AddArg(x) 46870 return true 46871 } 46872 return false 46873 } 46874 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 46875 // match: (RORQ x (NEGQ y)) 46876 // cond: 46877 // result: (ROLQ x y) 46878 for { 46879 _ = v.Args[1] 46880 x := v.Args[0] 46881 v_1 := v.Args[1] 46882 if v_1.Op != OpAMD64NEGQ { 46883 break 46884 } 46885 y := v_1.Args[0] 46886 v.reset(OpAMD64ROLQ) 46887 v.AddArg(x) 46888 v.AddArg(y) 46889 return true 46890 } 46891 // match: (RORQ x (NEGL y)) 46892 // cond: 46893 // result: (ROLQ x y) 46894 for { 46895 _ = v.Args[1] 46896 x := v.Args[0] 46897 v_1 := v.Args[1] 46898 if v_1.Op != OpAMD64NEGL { 46899 break 46900 } 46901 y := v_1.Args[0] 46902 v.reset(OpAMD64ROLQ) 46903 v.AddArg(x) 46904 v.AddArg(y) 46905 return true 46906 } 46907 // match: (RORQ x (MOVQconst [c])) 46908 // cond: 46909 // result: (ROLQconst [(-c)&63] x) 46910 for { 46911 _ = v.Args[1] 46912 x := v.Args[0] 46913 v_1 := v.Args[1] 46914 if v_1.Op != OpAMD64MOVQconst { 46915 break 46916 } 46917 c := v_1.AuxInt 46918 v.reset(OpAMD64ROLQconst) 46919 v.AuxInt = (-c) & 63 46920 v.AddArg(x) 46921 return true 46922 } 46923 // match: (RORQ x (MOVLconst [c])) 46924 // cond: 46925 // result: (ROLQconst [(-c)&63] x) 46926 for { 46927 _ = v.Args[1] 46928 x := v.Args[0] 46929 v_1 := v.Args[1] 46930 if v_1.Op != OpAMD64MOVLconst { 46931 break 46932 } 46933 c := v_1.AuxInt 46934 v.reset(OpAMD64ROLQconst) 46935 v.AuxInt = (-c) & 63 46936 v.AddArg(x) 46937 return true 46938 } 46939 return false 46940 } 46941 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 46942 // match: (RORW x (NEGQ y)) 46943 // cond: 46944 // result: (ROLW x y) 46945 for { 46946 _ = v.Args[1] 46947 x := v.Args[0] 46948 v_1 := v.Args[1] 46949 if v_1.Op != OpAMD64NEGQ { 46950 break 46951 } 46952 y := v_1.Args[0] 46953 v.reset(OpAMD64ROLW) 46954 v.AddArg(x) 46955 v.AddArg(y) 46956 return true 46957 } 46958 // match: (RORW x (NEGL y)) 46959 // cond: 46960 // result: (ROLW x y) 46961 for { 46962 _ = v.Args[1] 46963 x := v.Args[0] 46964 v_1 := v.Args[1] 46965 if v_1.Op != OpAMD64NEGL { 46966 break 46967 } 46968 y := v_1.Args[0] 46969 v.reset(OpAMD64ROLW) 46970 v.AddArg(x) 46971 v.AddArg(y) 46972 return true 46973 } 46974 // match: (RORW x (MOVQconst [c])) 46975 // cond: 46976 // result: (ROLWconst [(-c)&15] x) 46977 for { 46978 _ = v.Args[1] 46979 x := v.Args[0] 46980 v_1 := v.Args[1] 46981 if v_1.Op != OpAMD64MOVQconst { 46982 break 46983 } 46984 c := v_1.AuxInt 46985 v.reset(OpAMD64ROLWconst) 46986 v.AuxInt = (-c) & 15 46987 v.AddArg(x) 46988 return true 46989 } 46990 // match: (RORW x (MOVLconst [c])) 46991 // cond: 46992 // result: (ROLWconst [(-c)&15] x) 46993 for { 46994 _ = v.Args[1] 46995 x := v.Args[0] 46996 v_1 := v.Args[1] 46997 if v_1.Op != OpAMD64MOVLconst { 46998 break 46999 } 47000 c := v_1.AuxInt 47001 v.reset(OpAMD64ROLWconst) 47002 v.AuxInt = (-c) & 15 47003 v.AddArg(x) 47004 return true 47005 } 47006 return false 47007 } 47008 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 47009 // match: (SARB x (MOVQconst [c])) 47010 // cond: 47011 // result: (SARBconst [min(c&31,7)] x) 47012 for { 47013 _ = v.Args[1] 47014 x := v.Args[0] 47015 v_1 := v.Args[1] 47016 if v_1.Op != OpAMD64MOVQconst { 47017 break 47018 } 47019 c := v_1.AuxInt 47020 v.reset(OpAMD64SARBconst) 47021 v.AuxInt = min(c&31, 7) 47022 v.AddArg(x) 47023 return true 47024 } 47025 // match: (SARB x (MOVLconst [c])) 47026 // cond: 47027 // result: (SARBconst [min(c&31,7)] x) 47028 for { 47029 _ = v.Args[1] 47030 x := v.Args[0] 47031 v_1 := v.Args[1] 47032 if v_1.Op != OpAMD64MOVLconst { 47033 break 47034 } 47035 c := v_1.AuxInt 47036 v.reset(OpAMD64SARBconst) 47037 v.AuxInt = min(c&31, 7) 47038 v.AddArg(x) 47039 return true 47040 } 47041 return false 47042 } 47043 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 47044 // match: (SARBconst x [0]) 47045 // cond: 47046 // result: x 47047 for { 47048 if v.AuxInt != 0 { 47049 break 47050 } 47051 x := v.Args[0] 47052 v.reset(OpCopy) 47053 v.Type = x.Type 47054 v.AddArg(x) 47055 return true 47056 } 47057 // match: (SARBconst [c] (MOVQconst [d])) 47058 // cond: 47059 // result: (MOVQconst [int64(int8(d))>>uint64(c)]) 47060 for { 47061 c := v.AuxInt 47062 v_0 := v.Args[0] 47063 if v_0.Op != OpAMD64MOVQconst { 47064 break 47065 } 47066 d := v_0.AuxInt 47067 v.reset(OpAMD64MOVQconst) 47068 v.AuxInt = int64(int8(d)) >> uint64(c) 47069 return true 47070 } 47071 return false 47072 } 47073 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 47074 b := v.Block 47075 _ = b 47076 // match: (SARL x (MOVQconst [c])) 47077 // cond: 47078 // result: (SARLconst [c&31] x) 47079 for { 47080 _ = v.Args[1] 47081 x := v.Args[0] 47082 v_1 := v.Args[1] 47083 if v_1.Op != OpAMD64MOVQconst { 47084 break 47085 } 47086 c := v_1.AuxInt 47087 v.reset(OpAMD64SARLconst) 47088 v.AuxInt = c & 31 47089 v.AddArg(x) 47090 return true 47091 } 47092 // match: (SARL x (MOVLconst [c])) 47093 // cond: 47094 // result: (SARLconst [c&31] x) 47095 for { 47096 _ = v.Args[1] 47097 x := v.Args[0] 47098 v_1 := v.Args[1] 47099 if v_1.Op != OpAMD64MOVLconst { 47100 break 47101 } 47102 c := v_1.AuxInt 47103 v.reset(OpAMD64SARLconst) 47104 v.AuxInt = c & 31 47105 v.AddArg(x) 47106 return true 47107 } 47108 // match: (SARL x (ADDQconst [c] y)) 47109 // cond: c & 31 == 0 47110 // result: (SARL x y) 47111 for { 47112 _ = v.Args[1] 47113 x := v.Args[0] 47114 v_1 := v.Args[1] 47115 if v_1.Op != OpAMD64ADDQconst { 47116 break 47117 } 47118 c := v_1.AuxInt 47119 y := v_1.Args[0] 47120 if !(c&31 == 0) { 47121 break 47122 } 47123 v.reset(OpAMD64SARL) 47124 v.AddArg(x) 47125 v.AddArg(y) 47126 return true 47127 } 47128 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 47129 // cond: c & 31 == 0 47130 // result: (SARL x (NEGQ <t> y)) 47131 for { 47132 _ = v.Args[1] 47133 x := v.Args[0] 47134 v_1 := v.Args[1] 47135 if v_1.Op != OpAMD64NEGQ { 47136 break 47137 } 47138 t := v_1.Type 47139 v_1_0 := v_1.Args[0] 47140 if v_1_0.Op != OpAMD64ADDQconst { 47141 break 47142 } 47143 c := v_1_0.AuxInt 47144 y := v_1_0.Args[0] 47145 if !(c&31 == 0) { 47146 break 47147 } 47148 v.reset(OpAMD64SARL) 47149 v.AddArg(x) 47150 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 47151 v0.AddArg(y) 47152 v.AddArg(v0) 47153 return true 47154 } 47155 // match: (SARL x (ANDQconst [c] y)) 47156 // cond: c & 31 == 31 47157 // result: (SARL x y) 47158 for { 47159 _ = v.Args[1] 47160 x := v.Args[0] 47161 v_1 := v.Args[1] 47162 if v_1.Op != OpAMD64ANDQconst { 47163 break 47164 } 47165 c := v_1.AuxInt 47166 y := v_1.Args[0] 47167 if !(c&31 == 31) { 47168 break 47169 } 47170 v.reset(OpAMD64SARL) 47171 v.AddArg(x) 47172 v.AddArg(y) 47173 return true 47174 } 47175 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 47176 // cond: c & 31 == 31 47177 // result: (SARL x (NEGQ <t> y)) 47178 for { 47179 _ = v.Args[1] 47180 x := v.Args[0] 47181 v_1 := v.Args[1] 47182 if v_1.Op != OpAMD64NEGQ { 47183 break 47184 } 47185 t := v_1.Type 47186 v_1_0 := v_1.Args[0] 47187 if v_1_0.Op != OpAMD64ANDQconst { 47188 break 47189 } 47190 c := v_1_0.AuxInt 47191 y := v_1_0.Args[0] 47192 if !(c&31 == 31) { 47193 break 47194 } 47195 v.reset(OpAMD64SARL) 47196 v.AddArg(x) 47197 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 47198 v0.AddArg(y) 47199 v.AddArg(v0) 47200 return true 47201 } 47202 // match: (SARL x (ADDLconst [c] y)) 47203 // cond: c & 31 == 0 47204 // result: (SARL x y) 47205 for { 47206 _ = v.Args[1] 47207 x := v.Args[0] 47208 v_1 := v.Args[1] 47209 if v_1.Op != OpAMD64ADDLconst { 47210 break 47211 } 47212 c := v_1.AuxInt 47213 y := v_1.Args[0] 47214 if !(c&31 == 0) { 47215 break 47216 } 47217 v.reset(OpAMD64SARL) 47218 v.AddArg(x) 47219 v.AddArg(y) 47220 return true 47221 } 47222 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 47223 // cond: c & 31 == 0 47224 // result: (SARL x (NEGL <t> y)) 47225 for { 47226 _ = v.Args[1] 47227 x := v.Args[0] 47228 v_1 := v.Args[1] 47229 if v_1.Op != OpAMD64NEGL { 47230 break 47231 } 47232 t := v_1.Type 47233 v_1_0 := v_1.Args[0] 47234 if v_1_0.Op != OpAMD64ADDLconst { 47235 break 47236 } 47237 c := v_1_0.AuxInt 47238 y := v_1_0.Args[0] 47239 if !(c&31 == 0) { 47240 break 47241 } 47242 v.reset(OpAMD64SARL) 47243 v.AddArg(x) 47244 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 47245 v0.AddArg(y) 47246 v.AddArg(v0) 47247 return true 47248 } 47249 // match: (SARL x (ANDLconst [c] y)) 47250 // cond: c & 31 == 31 47251 // result: (SARL x y) 47252 for { 47253 _ = v.Args[1] 47254 x := v.Args[0] 47255 v_1 := v.Args[1] 47256 if v_1.Op != OpAMD64ANDLconst { 47257 break 47258 } 47259 c := v_1.AuxInt 47260 y := v_1.Args[0] 47261 if !(c&31 == 31) { 47262 break 47263 } 47264 v.reset(OpAMD64SARL) 47265 v.AddArg(x) 47266 v.AddArg(y) 47267 return true 47268 } 47269 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 47270 // cond: c & 31 == 31 47271 // result: (SARL x (NEGL <t> y)) 47272 for { 47273 _ = v.Args[1] 47274 x := v.Args[0] 47275 v_1 := v.Args[1] 47276 if v_1.Op != OpAMD64NEGL { 47277 break 47278 } 47279 t := v_1.Type 47280 v_1_0 := v_1.Args[0] 47281 if v_1_0.Op != OpAMD64ANDLconst { 47282 break 47283 } 47284 c := v_1_0.AuxInt 47285 y := v_1_0.Args[0] 47286 if !(c&31 == 31) { 47287 break 47288 } 47289 v.reset(OpAMD64SARL) 47290 v.AddArg(x) 47291 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 47292 v0.AddArg(y) 47293 v.AddArg(v0) 47294 return true 47295 } 47296 return false 47297 } 47298 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 47299 // match: (SARLconst x [0]) 47300 // cond: 47301 // result: x 47302 for { 47303 if v.AuxInt != 0 { 47304 break 47305 } 47306 x := v.Args[0] 47307 v.reset(OpCopy) 47308 v.Type = x.Type 47309 v.AddArg(x) 47310 return true 47311 } 47312 // match: (SARLconst [c] (MOVQconst [d])) 47313 // cond: 47314 // result: (MOVQconst [int64(int32(d))>>uint64(c)]) 47315 for { 47316 c := v.AuxInt 47317 v_0 := v.Args[0] 47318 if v_0.Op != OpAMD64MOVQconst { 47319 break 47320 } 47321 d := v_0.AuxInt 47322 v.reset(OpAMD64MOVQconst) 47323 v.AuxInt = int64(int32(d)) >> uint64(c) 47324 return true 47325 } 47326 return false 47327 } 47328 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 47329 b := v.Block 47330 _ = b 47331 // match: (SARQ x (MOVQconst [c])) 47332 // cond: 47333 // result: (SARQconst [c&63] x) 47334 for { 47335 _ = v.Args[1] 47336 x := v.Args[0] 47337 v_1 := v.Args[1] 47338 if v_1.Op != OpAMD64MOVQconst { 47339 break 47340 } 47341 c := v_1.AuxInt 47342 v.reset(OpAMD64SARQconst) 47343 v.AuxInt = c & 63 47344 v.AddArg(x) 47345 return true 47346 } 47347 // match: (SARQ x (MOVLconst [c])) 47348 // cond: 47349 // result: (SARQconst [c&63] x) 47350 for { 47351 _ = v.Args[1] 47352 x := v.Args[0] 47353 v_1 := v.Args[1] 47354 if v_1.Op != OpAMD64MOVLconst { 47355 break 47356 } 47357 c := v_1.AuxInt 47358 v.reset(OpAMD64SARQconst) 47359 v.AuxInt = c & 63 47360 v.AddArg(x) 47361 return true 47362 } 47363 // match: (SARQ x (ADDQconst [c] y)) 47364 // cond: c & 63 == 0 47365 // result: (SARQ x y) 47366 for { 47367 _ = v.Args[1] 47368 x := v.Args[0] 47369 v_1 := v.Args[1] 47370 if v_1.Op != OpAMD64ADDQconst { 47371 break 47372 } 47373 c := v_1.AuxInt 47374 y := v_1.Args[0] 47375 if !(c&63 == 0) { 47376 break 47377 } 47378 v.reset(OpAMD64SARQ) 47379 v.AddArg(x) 47380 v.AddArg(y) 47381 return true 47382 } 47383 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 47384 // cond: c & 63 == 0 47385 // result: (SARQ x (NEGQ <t> y)) 47386 for { 47387 _ = v.Args[1] 47388 x := v.Args[0] 47389 v_1 := v.Args[1] 47390 if v_1.Op != OpAMD64NEGQ { 47391 break 47392 } 47393 t := v_1.Type 47394 v_1_0 := v_1.Args[0] 47395 if v_1_0.Op != OpAMD64ADDQconst { 47396 break 47397 } 47398 c := v_1_0.AuxInt 47399 y := v_1_0.Args[0] 47400 if !(c&63 == 0) { 47401 break 47402 } 47403 v.reset(OpAMD64SARQ) 47404 v.AddArg(x) 47405 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 47406 v0.AddArg(y) 47407 v.AddArg(v0) 47408 return true 47409 } 47410 // match: (SARQ x (ANDQconst [c] y)) 47411 // cond: c & 63 == 63 47412 // result: (SARQ x y) 47413 for { 47414 _ = v.Args[1] 47415 x := v.Args[0] 47416 v_1 := v.Args[1] 47417 if v_1.Op != OpAMD64ANDQconst { 47418 break 47419 } 47420 c := v_1.AuxInt 47421 y := v_1.Args[0] 47422 if !(c&63 == 63) { 47423 break 47424 } 47425 v.reset(OpAMD64SARQ) 47426 v.AddArg(x) 47427 v.AddArg(y) 47428 return true 47429 } 47430 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 47431 // cond: c & 63 == 63 47432 // result: (SARQ x (NEGQ <t> y)) 47433 for { 47434 _ = v.Args[1] 47435 x := v.Args[0] 47436 v_1 := v.Args[1] 47437 if v_1.Op != OpAMD64NEGQ { 47438 break 47439 } 47440 t := v_1.Type 47441 v_1_0 := v_1.Args[0] 47442 if v_1_0.Op != OpAMD64ANDQconst { 47443 break 47444 } 47445 c := v_1_0.AuxInt 47446 y := v_1_0.Args[0] 47447 if !(c&63 == 63) { 47448 break 47449 } 47450 v.reset(OpAMD64SARQ) 47451 v.AddArg(x) 47452 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 47453 v0.AddArg(y) 47454 v.AddArg(v0) 47455 return true 47456 } 47457 // match: (SARQ x (ADDLconst [c] y)) 47458 // cond: c & 63 == 0 47459 // result: (SARQ x y) 47460 for { 47461 _ = v.Args[1] 47462 x := v.Args[0] 47463 v_1 := v.Args[1] 47464 if v_1.Op != OpAMD64ADDLconst { 47465 break 47466 } 47467 c := v_1.AuxInt 47468 y := v_1.Args[0] 47469 if !(c&63 == 0) { 47470 break 47471 } 47472 v.reset(OpAMD64SARQ) 47473 v.AddArg(x) 47474 v.AddArg(y) 47475 return true 47476 } 47477 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 47478 // cond: c & 63 == 0 47479 // result: (SARQ x (NEGL <t> y)) 47480 for { 47481 _ = v.Args[1] 47482 x := v.Args[0] 47483 v_1 := v.Args[1] 47484 if v_1.Op != OpAMD64NEGL { 47485 break 47486 } 47487 t := v_1.Type 47488 v_1_0 := v_1.Args[0] 47489 if v_1_0.Op != OpAMD64ADDLconst { 47490 break 47491 } 47492 c := v_1_0.AuxInt 47493 y := v_1_0.Args[0] 47494 if !(c&63 == 0) { 47495 break 47496 } 47497 v.reset(OpAMD64SARQ) 47498 v.AddArg(x) 47499 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 47500 v0.AddArg(y) 47501 v.AddArg(v0) 47502 return true 47503 } 47504 // match: (SARQ x (ANDLconst [c] y)) 47505 // cond: c & 63 == 63 47506 // result: (SARQ x y) 47507 for { 47508 _ = v.Args[1] 47509 x := v.Args[0] 47510 v_1 := v.Args[1] 47511 if v_1.Op != OpAMD64ANDLconst { 47512 break 47513 } 47514 c := v_1.AuxInt 47515 y := v_1.Args[0] 47516 if !(c&63 == 63) { 47517 break 47518 } 47519 v.reset(OpAMD64SARQ) 47520 v.AddArg(x) 47521 v.AddArg(y) 47522 return true 47523 } 47524 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 47525 // cond: c & 63 == 63 47526 // result: (SARQ x (NEGL <t> y)) 47527 for { 47528 _ = v.Args[1] 47529 x := v.Args[0] 47530 v_1 := v.Args[1] 47531 if v_1.Op != OpAMD64NEGL { 47532 break 47533 } 47534 t := v_1.Type 47535 v_1_0 := v_1.Args[0] 47536 if v_1_0.Op != OpAMD64ANDLconst { 47537 break 47538 } 47539 c := v_1_0.AuxInt 47540 y := v_1_0.Args[0] 47541 if !(c&63 == 63) { 47542 break 47543 } 47544 v.reset(OpAMD64SARQ) 47545 v.AddArg(x) 47546 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 47547 v0.AddArg(y) 47548 v.AddArg(v0) 47549 return true 47550 } 47551 return false 47552 } 47553 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 47554 // match: (SARQconst x [0]) 47555 // cond: 47556 // result: x 47557 for { 47558 if v.AuxInt != 0 { 47559 break 47560 } 47561 x := v.Args[0] 47562 v.reset(OpCopy) 47563 v.Type = x.Type 47564 v.AddArg(x) 47565 return true 47566 } 47567 // match: (SARQconst [c] (MOVQconst [d])) 47568 // cond: 47569 // result: (MOVQconst [d>>uint64(c)]) 47570 for { 47571 c := v.AuxInt 47572 v_0 := v.Args[0] 47573 if v_0.Op != OpAMD64MOVQconst { 47574 break 47575 } 47576 d := v_0.AuxInt 47577 v.reset(OpAMD64MOVQconst) 47578 v.AuxInt = d >> uint64(c) 47579 return true 47580 } 47581 return false 47582 } 47583 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 47584 // match: (SARW x (MOVQconst [c])) 47585 // cond: 47586 // result: (SARWconst [min(c&31,15)] x) 47587 for { 47588 _ = v.Args[1] 47589 x := v.Args[0] 47590 v_1 := v.Args[1] 47591 if v_1.Op != OpAMD64MOVQconst { 47592 break 47593 } 47594 c := v_1.AuxInt 47595 v.reset(OpAMD64SARWconst) 47596 v.AuxInt = min(c&31, 15) 47597 v.AddArg(x) 47598 return true 47599 } 47600 // match: (SARW x (MOVLconst [c])) 47601 // cond: 47602 // result: (SARWconst [min(c&31,15)] x) 47603 for { 47604 _ = v.Args[1] 47605 x := v.Args[0] 47606 v_1 := v.Args[1] 47607 if v_1.Op != OpAMD64MOVLconst { 47608 break 47609 } 47610 c := v_1.AuxInt 47611 v.reset(OpAMD64SARWconst) 47612 v.AuxInt = min(c&31, 15) 47613 v.AddArg(x) 47614 return true 47615 } 47616 return false 47617 } 47618 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 47619 // match: (SARWconst x [0]) 47620 // cond: 47621 // result: x 47622 for { 47623 if v.AuxInt != 0 { 47624 break 47625 } 47626 x := v.Args[0] 47627 v.reset(OpCopy) 47628 v.Type = x.Type 47629 v.AddArg(x) 47630 return true 47631 } 47632 // match: (SARWconst [c] (MOVQconst [d])) 47633 // cond: 47634 // result: (MOVQconst [int64(int16(d))>>uint64(c)]) 47635 for { 47636 c := v.AuxInt 47637 v_0 := v.Args[0] 47638 if v_0.Op != OpAMD64MOVQconst { 47639 break 47640 } 47641 d := v_0.AuxInt 47642 v.reset(OpAMD64MOVQconst) 47643 v.AuxInt = int64(int16(d)) >> uint64(c) 47644 return true 47645 } 47646 return false 47647 } 47648 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 47649 // match: (SBBLcarrymask (FlagEQ)) 47650 // cond: 47651 // result: (MOVLconst [0]) 47652 for { 47653 v_0 := v.Args[0] 47654 if v_0.Op != OpAMD64FlagEQ { 47655 break 47656 } 47657 v.reset(OpAMD64MOVLconst) 47658 v.AuxInt = 0 47659 return true 47660 } 47661 // match: (SBBLcarrymask (FlagLT_ULT)) 47662 // cond: 47663 // result: (MOVLconst [-1]) 47664 for { 47665 v_0 := v.Args[0] 47666 if v_0.Op != OpAMD64FlagLT_ULT { 47667 break 47668 } 47669 v.reset(OpAMD64MOVLconst) 47670 v.AuxInt = -1 47671 return true 47672 } 47673 // match: (SBBLcarrymask (FlagLT_UGT)) 47674 // cond: 47675 // result: (MOVLconst [0]) 47676 for { 47677 v_0 := v.Args[0] 47678 if v_0.Op != OpAMD64FlagLT_UGT { 47679 break 47680 } 47681 v.reset(OpAMD64MOVLconst) 47682 v.AuxInt = 0 47683 return true 47684 } 47685 // match: (SBBLcarrymask (FlagGT_ULT)) 47686 // cond: 47687 // result: (MOVLconst [-1]) 47688 for { 47689 v_0 := v.Args[0] 47690 if v_0.Op != OpAMD64FlagGT_ULT { 47691 break 47692 } 47693 v.reset(OpAMD64MOVLconst) 47694 v.AuxInt = -1 47695 return true 47696 } 47697 // match: (SBBLcarrymask (FlagGT_UGT)) 47698 // cond: 47699 // result: (MOVLconst [0]) 47700 for { 47701 v_0 := v.Args[0] 47702 if v_0.Op != OpAMD64FlagGT_UGT { 47703 break 47704 } 47705 v.reset(OpAMD64MOVLconst) 47706 v.AuxInt = 0 47707 return true 47708 } 47709 return false 47710 } 47711 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 47712 // match: (SBBQcarrymask (FlagEQ)) 47713 // cond: 47714 // result: (MOVQconst [0]) 47715 for { 47716 v_0 := v.Args[0] 47717 if v_0.Op != OpAMD64FlagEQ { 47718 break 47719 } 47720 v.reset(OpAMD64MOVQconst) 47721 v.AuxInt = 0 47722 return true 47723 } 47724 // match: (SBBQcarrymask (FlagLT_ULT)) 47725 // cond: 47726 // result: (MOVQconst [-1]) 47727 for { 47728 v_0 := v.Args[0] 47729 if v_0.Op != OpAMD64FlagLT_ULT { 47730 break 47731 } 47732 v.reset(OpAMD64MOVQconst) 47733 v.AuxInt = -1 47734 return true 47735 } 47736 // match: (SBBQcarrymask (FlagLT_UGT)) 47737 // cond: 47738 // result: (MOVQconst [0]) 47739 for { 47740 v_0 := v.Args[0] 47741 if v_0.Op != OpAMD64FlagLT_UGT { 47742 break 47743 } 47744 v.reset(OpAMD64MOVQconst) 47745 v.AuxInt = 0 47746 return true 47747 } 47748 // match: (SBBQcarrymask (FlagGT_ULT)) 47749 // cond: 47750 // result: (MOVQconst [-1]) 47751 for { 47752 v_0 := v.Args[0] 47753 if v_0.Op != OpAMD64FlagGT_ULT { 47754 break 47755 } 47756 v.reset(OpAMD64MOVQconst) 47757 v.AuxInt = -1 47758 return true 47759 } 47760 // match: (SBBQcarrymask (FlagGT_UGT)) 47761 // cond: 47762 // result: (MOVQconst [0]) 47763 for { 47764 v_0 := v.Args[0] 47765 if v_0.Op != OpAMD64FlagGT_UGT { 47766 break 47767 } 47768 v.reset(OpAMD64MOVQconst) 47769 v.AuxInt = 0 47770 return true 47771 } 47772 return false 47773 } 47774 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 47775 // match: (SETA (InvertFlags x)) 47776 // cond: 47777 // result: (SETB x) 47778 for { 47779 v_0 := v.Args[0] 47780 if v_0.Op != OpAMD64InvertFlags { 47781 break 47782 } 47783 x := v_0.Args[0] 47784 v.reset(OpAMD64SETB) 47785 v.AddArg(x) 47786 return true 47787 } 47788 // match: (SETA (FlagEQ)) 47789 // cond: 47790 // result: (MOVLconst [0]) 47791 for { 47792 v_0 := v.Args[0] 47793 if v_0.Op != OpAMD64FlagEQ { 47794 break 47795 } 47796 v.reset(OpAMD64MOVLconst) 47797 v.AuxInt = 0 47798 return true 47799 } 47800 // match: (SETA (FlagLT_ULT)) 47801 // cond: 47802 // result: (MOVLconst [0]) 47803 for { 47804 v_0 := v.Args[0] 47805 if v_0.Op != OpAMD64FlagLT_ULT { 47806 break 47807 } 47808 v.reset(OpAMD64MOVLconst) 47809 v.AuxInt = 0 47810 return true 47811 } 47812 // match: (SETA (FlagLT_UGT)) 47813 // cond: 47814 // result: (MOVLconst [1]) 47815 for { 47816 v_0 := v.Args[0] 47817 if v_0.Op != OpAMD64FlagLT_UGT { 47818 break 47819 } 47820 v.reset(OpAMD64MOVLconst) 47821 v.AuxInt = 1 47822 return true 47823 } 47824 // match: (SETA (FlagGT_ULT)) 47825 // cond: 47826 // result: (MOVLconst [0]) 47827 for { 47828 v_0 := v.Args[0] 47829 if v_0.Op != OpAMD64FlagGT_ULT { 47830 break 47831 } 47832 v.reset(OpAMD64MOVLconst) 47833 v.AuxInt = 0 47834 return true 47835 } 47836 // match: (SETA (FlagGT_UGT)) 47837 // cond: 47838 // result: (MOVLconst [1]) 47839 for { 47840 v_0 := v.Args[0] 47841 if v_0.Op != OpAMD64FlagGT_UGT { 47842 break 47843 } 47844 v.reset(OpAMD64MOVLconst) 47845 v.AuxInt = 1 47846 return true 47847 } 47848 return false 47849 } 47850 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 47851 // match: (SETAE (InvertFlags x)) 47852 // cond: 47853 // result: (SETBE x) 47854 for { 47855 v_0 := v.Args[0] 47856 if v_0.Op != OpAMD64InvertFlags { 47857 break 47858 } 47859 x := v_0.Args[0] 47860 v.reset(OpAMD64SETBE) 47861 v.AddArg(x) 47862 return true 47863 } 47864 // match: (SETAE (FlagEQ)) 47865 // cond: 47866 // result: (MOVLconst [1]) 47867 for { 47868 v_0 := v.Args[0] 47869 if v_0.Op != OpAMD64FlagEQ { 47870 break 47871 } 47872 v.reset(OpAMD64MOVLconst) 47873 v.AuxInt = 1 47874 return true 47875 } 47876 // match: (SETAE (FlagLT_ULT)) 47877 // cond: 47878 // result: (MOVLconst [0]) 47879 for { 47880 v_0 := v.Args[0] 47881 if v_0.Op != OpAMD64FlagLT_ULT { 47882 break 47883 } 47884 v.reset(OpAMD64MOVLconst) 47885 v.AuxInt = 0 47886 return true 47887 } 47888 // match: (SETAE (FlagLT_UGT)) 47889 // cond: 47890 // result: (MOVLconst [1]) 47891 for { 47892 v_0 := v.Args[0] 47893 if v_0.Op != OpAMD64FlagLT_UGT { 47894 break 47895 } 47896 v.reset(OpAMD64MOVLconst) 47897 v.AuxInt = 1 47898 return true 47899 } 47900 // match: (SETAE (FlagGT_ULT)) 47901 // cond: 47902 // result: (MOVLconst [0]) 47903 for { 47904 v_0 := v.Args[0] 47905 if v_0.Op != OpAMD64FlagGT_ULT { 47906 break 47907 } 47908 v.reset(OpAMD64MOVLconst) 47909 v.AuxInt = 0 47910 return true 47911 } 47912 // match: (SETAE (FlagGT_UGT)) 47913 // cond: 47914 // result: (MOVLconst [1]) 47915 for { 47916 v_0 := v.Args[0] 47917 if v_0.Op != OpAMD64FlagGT_UGT { 47918 break 47919 } 47920 v.reset(OpAMD64MOVLconst) 47921 v.AuxInt = 1 47922 return true 47923 } 47924 return false 47925 } 47926 func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { 47927 b := v.Block 47928 _ = b 47929 // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem) 47930 // cond: 47931 // result: (SETBEstore [off] {sym} ptr x mem) 47932 for { 47933 off := v.AuxInt 47934 sym := v.Aux 47935 _ = v.Args[2] 47936 ptr := v.Args[0] 47937 v_1 := v.Args[1] 47938 if v_1.Op != OpAMD64InvertFlags { 47939 break 47940 } 47941 x := v_1.Args[0] 47942 mem := v.Args[2] 47943 v.reset(OpAMD64SETBEstore) 47944 v.AuxInt = off 47945 v.Aux = sym 47946 v.AddArg(ptr) 47947 v.AddArg(x) 47948 v.AddArg(mem) 47949 return true 47950 } 47951 // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem) 47952 // cond: is32Bit(off1+off2) 47953 // result: (SETAEstore [off1+off2] {sym} base val mem) 47954 for { 47955 off1 := v.AuxInt 47956 sym := v.Aux 47957 _ = v.Args[2] 47958 v_0 := v.Args[0] 47959 if v_0.Op != OpAMD64ADDQconst { 47960 break 47961 } 47962 off2 := v_0.AuxInt 47963 base := v_0.Args[0] 47964 val := v.Args[1] 47965 mem := v.Args[2] 47966 if !(is32Bit(off1 + off2)) { 47967 break 47968 } 47969 v.reset(OpAMD64SETAEstore) 47970 v.AuxInt = off1 + off2 47971 v.Aux = sym 47972 v.AddArg(base) 47973 v.AddArg(val) 47974 v.AddArg(mem) 47975 return true 47976 } 47977 // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 47978 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 47979 // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 47980 for { 47981 off1 := v.AuxInt 47982 sym1 := v.Aux 47983 _ = v.Args[2] 47984 v_0 := v.Args[0] 47985 if v_0.Op != OpAMD64LEAQ { 47986 break 47987 } 47988 off2 := v_0.AuxInt 47989 sym2 := v_0.Aux 47990 base := v_0.Args[0] 47991 val := v.Args[1] 47992 mem := v.Args[2] 47993 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 47994 break 47995 } 47996 v.reset(OpAMD64SETAEstore) 47997 v.AuxInt = off1 + off2 47998 v.Aux = mergeSym(sym1, sym2) 47999 v.AddArg(base) 48000 v.AddArg(val) 48001 v.AddArg(mem) 48002 return true 48003 } 48004 // match: (SETAEstore [off] {sym} ptr x:(FlagEQ) mem) 48005 // cond: 48006 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48007 for { 48008 off := v.AuxInt 48009 sym := v.Aux 48010 _ = v.Args[2] 48011 ptr := v.Args[0] 48012 x := v.Args[1] 48013 if x.Op != OpAMD64FlagEQ { 48014 break 48015 } 48016 mem := v.Args[2] 48017 v.reset(OpAMD64MOVBstore) 48018 v.AuxInt = off 48019 v.Aux = sym 48020 v.AddArg(ptr) 48021 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48022 v0.AuxInt = 1 48023 v.AddArg(v0) 48024 v.AddArg(mem) 48025 return true 48026 } 48027 // match: (SETAEstore [off] {sym} ptr x:(FlagLT_ULT) mem) 48028 // cond: 48029 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48030 for { 48031 off := v.AuxInt 48032 sym := v.Aux 48033 _ = v.Args[2] 48034 ptr := v.Args[0] 48035 x := v.Args[1] 48036 if x.Op != OpAMD64FlagLT_ULT { 48037 break 48038 } 48039 mem := v.Args[2] 48040 v.reset(OpAMD64MOVBstore) 48041 v.AuxInt = off 48042 v.Aux = sym 48043 v.AddArg(ptr) 48044 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48045 v0.AuxInt = 0 48046 v.AddArg(v0) 48047 v.AddArg(mem) 48048 return true 48049 } 48050 // match: (SETAEstore [off] {sym} ptr x:(FlagLT_UGT) mem) 48051 // cond: 48052 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48053 for { 48054 off := v.AuxInt 48055 sym := v.Aux 48056 _ = v.Args[2] 48057 ptr := v.Args[0] 48058 x := v.Args[1] 48059 if x.Op != OpAMD64FlagLT_UGT { 48060 break 48061 } 48062 mem := v.Args[2] 48063 v.reset(OpAMD64MOVBstore) 48064 v.AuxInt = off 48065 v.Aux = sym 48066 v.AddArg(ptr) 48067 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48068 v0.AuxInt = 1 48069 v.AddArg(v0) 48070 v.AddArg(mem) 48071 return true 48072 } 48073 // match: (SETAEstore [off] {sym} ptr x:(FlagGT_ULT) mem) 48074 // cond: 48075 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48076 for { 48077 off := v.AuxInt 48078 sym := v.Aux 48079 _ = v.Args[2] 48080 ptr := v.Args[0] 48081 x := v.Args[1] 48082 if x.Op != OpAMD64FlagGT_ULT { 48083 break 48084 } 48085 mem := v.Args[2] 48086 v.reset(OpAMD64MOVBstore) 48087 v.AuxInt = off 48088 v.Aux = sym 48089 v.AddArg(ptr) 48090 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48091 v0.AuxInt = 0 48092 v.AddArg(v0) 48093 v.AddArg(mem) 48094 return true 48095 } 48096 // match: (SETAEstore [off] {sym} ptr x:(FlagGT_UGT) mem) 48097 // cond: 48098 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48099 for { 48100 off := v.AuxInt 48101 sym := v.Aux 48102 _ = v.Args[2] 48103 ptr := v.Args[0] 48104 x := v.Args[1] 48105 if x.Op != OpAMD64FlagGT_UGT { 48106 break 48107 } 48108 mem := v.Args[2] 48109 v.reset(OpAMD64MOVBstore) 48110 v.AuxInt = off 48111 v.Aux = sym 48112 v.AddArg(ptr) 48113 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48114 v0.AuxInt = 1 48115 v.AddArg(v0) 48116 v.AddArg(mem) 48117 return true 48118 } 48119 return false 48120 } 48121 func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { 48122 b := v.Block 48123 _ = b 48124 // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem) 48125 // cond: 48126 // result: (SETBstore [off] {sym} ptr x mem) 48127 for { 48128 off := v.AuxInt 48129 sym := v.Aux 48130 _ = v.Args[2] 48131 ptr := v.Args[0] 48132 v_1 := v.Args[1] 48133 if v_1.Op != OpAMD64InvertFlags { 48134 break 48135 } 48136 x := v_1.Args[0] 48137 mem := v.Args[2] 48138 v.reset(OpAMD64SETBstore) 48139 v.AuxInt = off 48140 v.Aux = sym 48141 v.AddArg(ptr) 48142 v.AddArg(x) 48143 v.AddArg(mem) 48144 return true 48145 } 48146 // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem) 48147 // cond: is32Bit(off1+off2) 48148 // result: (SETAstore [off1+off2] {sym} base val mem) 48149 for { 48150 off1 := v.AuxInt 48151 sym := v.Aux 48152 _ = v.Args[2] 48153 v_0 := v.Args[0] 48154 if v_0.Op != OpAMD64ADDQconst { 48155 break 48156 } 48157 off2 := v_0.AuxInt 48158 base := v_0.Args[0] 48159 val := v.Args[1] 48160 mem := v.Args[2] 48161 if !(is32Bit(off1 + off2)) { 48162 break 48163 } 48164 v.reset(OpAMD64SETAstore) 48165 v.AuxInt = off1 + off2 48166 v.Aux = sym 48167 v.AddArg(base) 48168 v.AddArg(val) 48169 v.AddArg(mem) 48170 return true 48171 } 48172 // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 48173 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 48174 // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 48175 for { 48176 off1 := v.AuxInt 48177 sym1 := v.Aux 48178 _ = v.Args[2] 48179 v_0 := v.Args[0] 48180 if v_0.Op != OpAMD64LEAQ { 48181 break 48182 } 48183 off2 := v_0.AuxInt 48184 sym2 := v_0.Aux 48185 base := v_0.Args[0] 48186 val := v.Args[1] 48187 mem := v.Args[2] 48188 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 48189 break 48190 } 48191 v.reset(OpAMD64SETAstore) 48192 v.AuxInt = off1 + off2 48193 v.Aux = mergeSym(sym1, sym2) 48194 v.AddArg(base) 48195 v.AddArg(val) 48196 v.AddArg(mem) 48197 return true 48198 } 48199 // match: (SETAstore [off] {sym} ptr x:(FlagEQ) mem) 48200 // cond: 48201 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48202 for { 48203 off := v.AuxInt 48204 sym := v.Aux 48205 _ = v.Args[2] 48206 ptr := v.Args[0] 48207 x := v.Args[1] 48208 if x.Op != OpAMD64FlagEQ { 48209 break 48210 } 48211 mem := v.Args[2] 48212 v.reset(OpAMD64MOVBstore) 48213 v.AuxInt = off 48214 v.Aux = sym 48215 v.AddArg(ptr) 48216 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48217 v0.AuxInt = 0 48218 v.AddArg(v0) 48219 v.AddArg(mem) 48220 return true 48221 } 48222 // match: (SETAstore [off] {sym} ptr x:(FlagLT_ULT) mem) 48223 // cond: 48224 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48225 for { 48226 off := v.AuxInt 48227 sym := v.Aux 48228 _ = v.Args[2] 48229 ptr := v.Args[0] 48230 x := v.Args[1] 48231 if x.Op != OpAMD64FlagLT_ULT { 48232 break 48233 } 48234 mem := v.Args[2] 48235 v.reset(OpAMD64MOVBstore) 48236 v.AuxInt = off 48237 v.Aux = sym 48238 v.AddArg(ptr) 48239 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48240 v0.AuxInt = 0 48241 v.AddArg(v0) 48242 v.AddArg(mem) 48243 return true 48244 } 48245 // match: (SETAstore [off] {sym} ptr x:(FlagLT_UGT) mem) 48246 // cond: 48247 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48248 for { 48249 off := v.AuxInt 48250 sym := v.Aux 48251 _ = v.Args[2] 48252 ptr := v.Args[0] 48253 x := v.Args[1] 48254 if x.Op != OpAMD64FlagLT_UGT { 48255 break 48256 } 48257 mem := v.Args[2] 48258 v.reset(OpAMD64MOVBstore) 48259 v.AuxInt = off 48260 v.Aux = sym 48261 v.AddArg(ptr) 48262 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48263 v0.AuxInt = 1 48264 v.AddArg(v0) 48265 v.AddArg(mem) 48266 return true 48267 } 48268 // match: (SETAstore [off] {sym} ptr x:(FlagGT_ULT) mem) 48269 // cond: 48270 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48271 for { 48272 off := v.AuxInt 48273 sym := v.Aux 48274 _ = v.Args[2] 48275 ptr := v.Args[0] 48276 x := v.Args[1] 48277 if x.Op != OpAMD64FlagGT_ULT { 48278 break 48279 } 48280 mem := v.Args[2] 48281 v.reset(OpAMD64MOVBstore) 48282 v.AuxInt = off 48283 v.Aux = sym 48284 v.AddArg(ptr) 48285 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48286 v0.AuxInt = 0 48287 v.AddArg(v0) 48288 v.AddArg(mem) 48289 return true 48290 } 48291 // match: (SETAstore [off] {sym} ptr x:(FlagGT_UGT) mem) 48292 // cond: 48293 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48294 for { 48295 off := v.AuxInt 48296 sym := v.Aux 48297 _ = v.Args[2] 48298 ptr := v.Args[0] 48299 x := v.Args[1] 48300 if x.Op != OpAMD64FlagGT_UGT { 48301 break 48302 } 48303 mem := v.Args[2] 48304 v.reset(OpAMD64MOVBstore) 48305 v.AuxInt = off 48306 v.Aux = sym 48307 v.AddArg(ptr) 48308 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48309 v0.AuxInt = 1 48310 v.AddArg(v0) 48311 v.AddArg(mem) 48312 return true 48313 } 48314 return false 48315 } 48316 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 48317 // match: (SETB (InvertFlags x)) 48318 // cond: 48319 // result: (SETA x) 48320 for { 48321 v_0 := v.Args[0] 48322 if v_0.Op != OpAMD64InvertFlags { 48323 break 48324 } 48325 x := v_0.Args[0] 48326 v.reset(OpAMD64SETA) 48327 v.AddArg(x) 48328 return true 48329 } 48330 // match: (SETB (FlagEQ)) 48331 // cond: 48332 // result: (MOVLconst [0]) 48333 for { 48334 v_0 := v.Args[0] 48335 if v_0.Op != OpAMD64FlagEQ { 48336 break 48337 } 48338 v.reset(OpAMD64MOVLconst) 48339 v.AuxInt = 0 48340 return true 48341 } 48342 // match: (SETB (FlagLT_ULT)) 48343 // cond: 48344 // result: (MOVLconst [1]) 48345 for { 48346 v_0 := v.Args[0] 48347 if v_0.Op != OpAMD64FlagLT_ULT { 48348 break 48349 } 48350 v.reset(OpAMD64MOVLconst) 48351 v.AuxInt = 1 48352 return true 48353 } 48354 // match: (SETB (FlagLT_UGT)) 48355 // cond: 48356 // result: (MOVLconst [0]) 48357 for { 48358 v_0 := v.Args[0] 48359 if v_0.Op != OpAMD64FlagLT_UGT { 48360 break 48361 } 48362 v.reset(OpAMD64MOVLconst) 48363 v.AuxInt = 0 48364 return true 48365 } 48366 // match: (SETB (FlagGT_ULT)) 48367 // cond: 48368 // result: (MOVLconst [1]) 48369 for { 48370 v_0 := v.Args[0] 48371 if v_0.Op != OpAMD64FlagGT_ULT { 48372 break 48373 } 48374 v.reset(OpAMD64MOVLconst) 48375 v.AuxInt = 1 48376 return true 48377 } 48378 // match: (SETB (FlagGT_UGT)) 48379 // cond: 48380 // result: (MOVLconst [0]) 48381 for { 48382 v_0 := v.Args[0] 48383 if v_0.Op != OpAMD64FlagGT_UGT { 48384 break 48385 } 48386 v.reset(OpAMD64MOVLconst) 48387 v.AuxInt = 0 48388 return true 48389 } 48390 return false 48391 } 48392 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 48393 // match: (SETBE (InvertFlags x)) 48394 // cond: 48395 // result: (SETAE x) 48396 for { 48397 v_0 := v.Args[0] 48398 if v_0.Op != OpAMD64InvertFlags { 48399 break 48400 } 48401 x := v_0.Args[0] 48402 v.reset(OpAMD64SETAE) 48403 v.AddArg(x) 48404 return true 48405 } 48406 // match: (SETBE (FlagEQ)) 48407 // cond: 48408 // result: (MOVLconst [1]) 48409 for { 48410 v_0 := v.Args[0] 48411 if v_0.Op != OpAMD64FlagEQ { 48412 break 48413 } 48414 v.reset(OpAMD64MOVLconst) 48415 v.AuxInt = 1 48416 return true 48417 } 48418 // match: (SETBE (FlagLT_ULT)) 48419 // cond: 48420 // result: (MOVLconst [1]) 48421 for { 48422 v_0 := v.Args[0] 48423 if v_0.Op != OpAMD64FlagLT_ULT { 48424 break 48425 } 48426 v.reset(OpAMD64MOVLconst) 48427 v.AuxInt = 1 48428 return true 48429 } 48430 // match: (SETBE (FlagLT_UGT)) 48431 // cond: 48432 // result: (MOVLconst [0]) 48433 for { 48434 v_0 := v.Args[0] 48435 if v_0.Op != OpAMD64FlagLT_UGT { 48436 break 48437 } 48438 v.reset(OpAMD64MOVLconst) 48439 v.AuxInt = 0 48440 return true 48441 } 48442 // match: (SETBE (FlagGT_ULT)) 48443 // cond: 48444 // result: (MOVLconst [1]) 48445 for { 48446 v_0 := v.Args[0] 48447 if v_0.Op != OpAMD64FlagGT_ULT { 48448 break 48449 } 48450 v.reset(OpAMD64MOVLconst) 48451 v.AuxInt = 1 48452 return true 48453 } 48454 // match: (SETBE (FlagGT_UGT)) 48455 // cond: 48456 // result: (MOVLconst [0]) 48457 for { 48458 v_0 := v.Args[0] 48459 if v_0.Op != OpAMD64FlagGT_UGT { 48460 break 48461 } 48462 v.reset(OpAMD64MOVLconst) 48463 v.AuxInt = 0 48464 return true 48465 } 48466 return false 48467 } 48468 func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { 48469 b := v.Block 48470 _ = b 48471 // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem) 48472 // cond: 48473 // result: (SETAEstore [off] {sym} ptr x mem) 48474 for { 48475 off := v.AuxInt 48476 sym := v.Aux 48477 _ = v.Args[2] 48478 ptr := v.Args[0] 48479 v_1 := v.Args[1] 48480 if v_1.Op != OpAMD64InvertFlags { 48481 break 48482 } 48483 x := v_1.Args[0] 48484 mem := v.Args[2] 48485 v.reset(OpAMD64SETAEstore) 48486 v.AuxInt = off 48487 v.Aux = sym 48488 v.AddArg(ptr) 48489 v.AddArg(x) 48490 v.AddArg(mem) 48491 return true 48492 } 48493 // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem) 48494 // cond: is32Bit(off1+off2) 48495 // result: (SETBEstore [off1+off2] {sym} base val mem) 48496 for { 48497 off1 := v.AuxInt 48498 sym := v.Aux 48499 _ = v.Args[2] 48500 v_0 := v.Args[0] 48501 if v_0.Op != OpAMD64ADDQconst { 48502 break 48503 } 48504 off2 := v_0.AuxInt 48505 base := v_0.Args[0] 48506 val := v.Args[1] 48507 mem := v.Args[2] 48508 if !(is32Bit(off1 + off2)) { 48509 break 48510 } 48511 v.reset(OpAMD64SETBEstore) 48512 v.AuxInt = off1 + off2 48513 v.Aux = sym 48514 v.AddArg(base) 48515 v.AddArg(val) 48516 v.AddArg(mem) 48517 return true 48518 } 48519 // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 48520 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 48521 // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 48522 for { 48523 off1 := v.AuxInt 48524 sym1 := v.Aux 48525 _ = v.Args[2] 48526 v_0 := v.Args[0] 48527 if v_0.Op != OpAMD64LEAQ { 48528 break 48529 } 48530 off2 := v_0.AuxInt 48531 sym2 := v_0.Aux 48532 base := v_0.Args[0] 48533 val := v.Args[1] 48534 mem := v.Args[2] 48535 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 48536 break 48537 } 48538 v.reset(OpAMD64SETBEstore) 48539 v.AuxInt = off1 + off2 48540 v.Aux = mergeSym(sym1, sym2) 48541 v.AddArg(base) 48542 v.AddArg(val) 48543 v.AddArg(mem) 48544 return true 48545 } 48546 // match: (SETBEstore [off] {sym} ptr x:(FlagEQ) mem) 48547 // cond: 48548 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48549 for { 48550 off := v.AuxInt 48551 sym := v.Aux 48552 _ = v.Args[2] 48553 ptr := v.Args[0] 48554 x := v.Args[1] 48555 if x.Op != OpAMD64FlagEQ { 48556 break 48557 } 48558 mem := v.Args[2] 48559 v.reset(OpAMD64MOVBstore) 48560 v.AuxInt = off 48561 v.Aux = sym 48562 v.AddArg(ptr) 48563 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48564 v0.AuxInt = 1 48565 v.AddArg(v0) 48566 v.AddArg(mem) 48567 return true 48568 } 48569 // match: (SETBEstore [off] {sym} ptr x:(FlagLT_ULT) mem) 48570 // cond: 48571 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48572 for { 48573 off := v.AuxInt 48574 sym := v.Aux 48575 _ = v.Args[2] 48576 ptr := v.Args[0] 48577 x := v.Args[1] 48578 if x.Op != OpAMD64FlagLT_ULT { 48579 break 48580 } 48581 mem := v.Args[2] 48582 v.reset(OpAMD64MOVBstore) 48583 v.AuxInt = off 48584 v.Aux = sym 48585 v.AddArg(ptr) 48586 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48587 v0.AuxInt = 1 48588 v.AddArg(v0) 48589 v.AddArg(mem) 48590 return true 48591 } 48592 // match: (SETBEstore [off] {sym} ptr x:(FlagLT_UGT) mem) 48593 // cond: 48594 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48595 for { 48596 off := v.AuxInt 48597 sym := v.Aux 48598 _ = v.Args[2] 48599 ptr := v.Args[0] 48600 x := v.Args[1] 48601 if x.Op != OpAMD64FlagLT_UGT { 48602 break 48603 } 48604 mem := v.Args[2] 48605 v.reset(OpAMD64MOVBstore) 48606 v.AuxInt = off 48607 v.Aux = sym 48608 v.AddArg(ptr) 48609 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48610 v0.AuxInt = 0 48611 v.AddArg(v0) 48612 v.AddArg(mem) 48613 return true 48614 } 48615 // match: (SETBEstore [off] {sym} ptr x:(FlagGT_ULT) mem) 48616 // cond: 48617 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48618 for { 48619 off := v.AuxInt 48620 sym := v.Aux 48621 _ = v.Args[2] 48622 ptr := v.Args[0] 48623 x := v.Args[1] 48624 if x.Op != OpAMD64FlagGT_ULT { 48625 break 48626 } 48627 mem := v.Args[2] 48628 v.reset(OpAMD64MOVBstore) 48629 v.AuxInt = off 48630 v.Aux = sym 48631 v.AddArg(ptr) 48632 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48633 v0.AuxInt = 1 48634 v.AddArg(v0) 48635 v.AddArg(mem) 48636 return true 48637 } 48638 // match: (SETBEstore [off] {sym} ptr x:(FlagGT_UGT) mem) 48639 // cond: 48640 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48641 for { 48642 off := v.AuxInt 48643 sym := v.Aux 48644 _ = v.Args[2] 48645 ptr := v.Args[0] 48646 x := v.Args[1] 48647 if x.Op != OpAMD64FlagGT_UGT { 48648 break 48649 } 48650 mem := v.Args[2] 48651 v.reset(OpAMD64MOVBstore) 48652 v.AuxInt = off 48653 v.Aux = sym 48654 v.AddArg(ptr) 48655 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48656 v0.AuxInt = 0 48657 v.AddArg(v0) 48658 v.AddArg(mem) 48659 return true 48660 } 48661 return false 48662 } 48663 func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { 48664 b := v.Block 48665 _ = b 48666 // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem) 48667 // cond: 48668 // result: (SETAstore [off] {sym} ptr x mem) 48669 for { 48670 off := v.AuxInt 48671 sym := v.Aux 48672 _ = v.Args[2] 48673 ptr := v.Args[0] 48674 v_1 := v.Args[1] 48675 if v_1.Op != OpAMD64InvertFlags { 48676 break 48677 } 48678 x := v_1.Args[0] 48679 mem := v.Args[2] 48680 v.reset(OpAMD64SETAstore) 48681 v.AuxInt = off 48682 v.Aux = sym 48683 v.AddArg(ptr) 48684 v.AddArg(x) 48685 v.AddArg(mem) 48686 return true 48687 } 48688 // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem) 48689 // cond: is32Bit(off1+off2) 48690 // result: (SETBstore [off1+off2] {sym} base val mem) 48691 for { 48692 off1 := v.AuxInt 48693 sym := v.Aux 48694 _ = v.Args[2] 48695 v_0 := v.Args[0] 48696 if v_0.Op != OpAMD64ADDQconst { 48697 break 48698 } 48699 off2 := v_0.AuxInt 48700 base := v_0.Args[0] 48701 val := v.Args[1] 48702 mem := v.Args[2] 48703 if !(is32Bit(off1 + off2)) { 48704 break 48705 } 48706 v.reset(OpAMD64SETBstore) 48707 v.AuxInt = off1 + off2 48708 v.Aux = sym 48709 v.AddArg(base) 48710 v.AddArg(val) 48711 v.AddArg(mem) 48712 return true 48713 } 48714 // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 48715 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 48716 // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 48717 for { 48718 off1 := v.AuxInt 48719 sym1 := v.Aux 48720 _ = v.Args[2] 48721 v_0 := v.Args[0] 48722 if v_0.Op != OpAMD64LEAQ { 48723 break 48724 } 48725 off2 := v_0.AuxInt 48726 sym2 := v_0.Aux 48727 base := v_0.Args[0] 48728 val := v.Args[1] 48729 mem := v.Args[2] 48730 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 48731 break 48732 } 48733 v.reset(OpAMD64SETBstore) 48734 v.AuxInt = off1 + off2 48735 v.Aux = mergeSym(sym1, sym2) 48736 v.AddArg(base) 48737 v.AddArg(val) 48738 v.AddArg(mem) 48739 return true 48740 } 48741 // match: (SETBstore [off] {sym} ptr x:(FlagEQ) mem) 48742 // cond: 48743 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48744 for { 48745 off := v.AuxInt 48746 sym := v.Aux 48747 _ = v.Args[2] 48748 ptr := v.Args[0] 48749 x := v.Args[1] 48750 if x.Op != OpAMD64FlagEQ { 48751 break 48752 } 48753 mem := v.Args[2] 48754 v.reset(OpAMD64MOVBstore) 48755 v.AuxInt = off 48756 v.Aux = sym 48757 v.AddArg(ptr) 48758 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48759 v0.AuxInt = 0 48760 v.AddArg(v0) 48761 v.AddArg(mem) 48762 return true 48763 } 48764 // match: (SETBstore [off] {sym} ptr x:(FlagLT_ULT) mem) 48765 // cond: 48766 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48767 for { 48768 off := v.AuxInt 48769 sym := v.Aux 48770 _ = v.Args[2] 48771 ptr := v.Args[0] 48772 x := v.Args[1] 48773 if x.Op != OpAMD64FlagLT_ULT { 48774 break 48775 } 48776 mem := v.Args[2] 48777 v.reset(OpAMD64MOVBstore) 48778 v.AuxInt = off 48779 v.Aux = sym 48780 v.AddArg(ptr) 48781 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48782 v0.AuxInt = 1 48783 v.AddArg(v0) 48784 v.AddArg(mem) 48785 return true 48786 } 48787 // match: (SETBstore [off] {sym} ptr x:(FlagLT_UGT) mem) 48788 // cond: 48789 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48790 for { 48791 off := v.AuxInt 48792 sym := v.Aux 48793 _ = v.Args[2] 48794 ptr := v.Args[0] 48795 x := v.Args[1] 48796 if x.Op != OpAMD64FlagLT_UGT { 48797 break 48798 } 48799 mem := v.Args[2] 48800 v.reset(OpAMD64MOVBstore) 48801 v.AuxInt = off 48802 v.Aux = sym 48803 v.AddArg(ptr) 48804 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48805 v0.AuxInt = 0 48806 v.AddArg(v0) 48807 v.AddArg(mem) 48808 return true 48809 } 48810 // match: (SETBstore [off] {sym} ptr x:(FlagGT_ULT) mem) 48811 // cond: 48812 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 48813 for { 48814 off := v.AuxInt 48815 sym := v.Aux 48816 _ = v.Args[2] 48817 ptr := v.Args[0] 48818 x := v.Args[1] 48819 if x.Op != OpAMD64FlagGT_ULT { 48820 break 48821 } 48822 mem := v.Args[2] 48823 v.reset(OpAMD64MOVBstore) 48824 v.AuxInt = off 48825 v.Aux = sym 48826 v.AddArg(ptr) 48827 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48828 v0.AuxInt = 1 48829 v.AddArg(v0) 48830 v.AddArg(mem) 48831 return true 48832 } 48833 // match: (SETBstore [off] {sym} ptr x:(FlagGT_UGT) mem) 48834 // cond: 48835 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 48836 for { 48837 off := v.AuxInt 48838 sym := v.Aux 48839 _ = v.Args[2] 48840 ptr := v.Args[0] 48841 x := v.Args[1] 48842 if x.Op != OpAMD64FlagGT_UGT { 48843 break 48844 } 48845 mem := v.Args[2] 48846 v.reset(OpAMD64MOVBstore) 48847 v.AuxInt = off 48848 v.Aux = sym 48849 v.AddArg(ptr) 48850 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 48851 v0.AuxInt = 0 48852 v.AddArg(v0) 48853 v.AddArg(mem) 48854 return true 48855 } 48856 return false 48857 } 48858 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 48859 b := v.Block 48860 _ = b 48861 config := b.Func.Config 48862 _ = config 48863 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 48864 // cond: !config.nacl 48865 // result: (SETAE (BTL x y)) 48866 for { 48867 v_0 := v.Args[0] 48868 if v_0.Op != OpAMD64TESTL { 48869 break 48870 } 48871 _ = v_0.Args[1] 48872 v_0_0 := v_0.Args[0] 48873 if v_0_0.Op != OpAMD64SHLL { 48874 break 48875 } 48876 _ = v_0_0.Args[1] 48877 v_0_0_0 := v_0_0.Args[0] 48878 if v_0_0_0.Op != OpAMD64MOVLconst { 48879 break 48880 } 48881 if v_0_0_0.AuxInt != 1 { 48882 break 48883 } 48884 x := v_0_0.Args[1] 48885 y := v_0.Args[1] 48886 if !(!config.nacl) { 48887 break 48888 } 48889 v.reset(OpAMD64SETAE) 48890 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 48891 v0.AddArg(x) 48892 v0.AddArg(y) 48893 v.AddArg(v0) 48894 return true 48895 } 48896 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 48897 // cond: !config.nacl 48898 // result: (SETAE (BTL x y)) 48899 for { 48900 v_0 := v.Args[0] 48901 if v_0.Op != OpAMD64TESTL { 48902 break 48903 } 48904 _ = v_0.Args[1] 48905 y := v_0.Args[0] 48906 v_0_1 := v_0.Args[1] 48907 if v_0_1.Op != OpAMD64SHLL { 48908 break 48909 } 48910 _ = v_0_1.Args[1] 48911 v_0_1_0 := v_0_1.Args[0] 48912 if v_0_1_0.Op != OpAMD64MOVLconst { 48913 break 48914 } 48915 if v_0_1_0.AuxInt != 1 { 48916 break 48917 } 48918 x := v_0_1.Args[1] 48919 if !(!config.nacl) { 48920 break 48921 } 48922 v.reset(OpAMD64SETAE) 48923 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 48924 v0.AddArg(x) 48925 v0.AddArg(y) 48926 v.AddArg(v0) 48927 return true 48928 } 48929 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 48930 // cond: !config.nacl 48931 // result: (SETAE (BTQ x y)) 48932 for { 48933 v_0 := v.Args[0] 48934 if v_0.Op != OpAMD64TESTQ { 48935 break 48936 } 48937 _ = v_0.Args[1] 48938 v_0_0 := v_0.Args[0] 48939 if v_0_0.Op != OpAMD64SHLQ { 48940 break 48941 } 48942 _ = v_0_0.Args[1] 48943 v_0_0_0 := v_0_0.Args[0] 48944 if v_0_0_0.Op != OpAMD64MOVQconst { 48945 break 48946 } 48947 if v_0_0_0.AuxInt != 1 { 48948 break 48949 } 48950 x := v_0_0.Args[1] 48951 y := v_0.Args[1] 48952 if !(!config.nacl) { 48953 break 48954 } 48955 v.reset(OpAMD64SETAE) 48956 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 48957 v0.AddArg(x) 48958 v0.AddArg(y) 48959 v.AddArg(v0) 48960 return true 48961 } 48962 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 48963 // cond: !config.nacl 48964 // result: (SETAE (BTQ x y)) 48965 for { 48966 v_0 := v.Args[0] 48967 if v_0.Op != OpAMD64TESTQ { 48968 break 48969 } 48970 _ = v_0.Args[1] 48971 y := v_0.Args[0] 48972 v_0_1 := v_0.Args[1] 48973 if v_0_1.Op != OpAMD64SHLQ { 48974 break 48975 } 48976 _ = v_0_1.Args[1] 48977 v_0_1_0 := v_0_1.Args[0] 48978 if v_0_1_0.Op != OpAMD64MOVQconst { 48979 break 48980 } 48981 if v_0_1_0.AuxInt != 1 { 48982 break 48983 } 48984 x := v_0_1.Args[1] 48985 if !(!config.nacl) { 48986 break 48987 } 48988 v.reset(OpAMD64SETAE) 48989 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 48990 v0.AddArg(x) 48991 v0.AddArg(y) 48992 v.AddArg(v0) 48993 return true 48994 } 48995 // match: (SETEQ (TESTLconst [c] x)) 48996 // cond: isUint32PowerOfTwo(c) && !config.nacl 48997 // result: (SETAE (BTLconst [log2uint32(c)] x)) 48998 for { 48999 v_0 := v.Args[0] 49000 if v_0.Op != OpAMD64TESTLconst { 49001 break 49002 } 49003 c := v_0.AuxInt 49004 x := v_0.Args[0] 49005 if !(isUint32PowerOfTwo(c) && !config.nacl) { 49006 break 49007 } 49008 v.reset(OpAMD64SETAE) 49009 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49010 v0.AuxInt = log2uint32(c) 49011 v0.AddArg(x) 49012 v.AddArg(v0) 49013 return true 49014 } 49015 // match: (SETEQ (TESTQconst [c] x)) 49016 // cond: isUint64PowerOfTwo(c) && !config.nacl 49017 // result: (SETAE (BTQconst [log2(c)] x)) 49018 for { 49019 v_0 := v.Args[0] 49020 if v_0.Op != OpAMD64TESTQconst { 49021 break 49022 } 49023 c := v_0.AuxInt 49024 x := v_0.Args[0] 49025 if !(isUint64PowerOfTwo(c) && !config.nacl) { 49026 break 49027 } 49028 v.reset(OpAMD64SETAE) 49029 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49030 v0.AuxInt = log2(c) 49031 v0.AddArg(x) 49032 v.AddArg(v0) 49033 return true 49034 } 49035 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 49036 // cond: isUint64PowerOfTwo(c) && !config.nacl 49037 // result: (SETAE (BTQconst [log2(c)] x)) 49038 for { 49039 v_0 := v.Args[0] 49040 if v_0.Op != OpAMD64TESTQ { 49041 break 49042 } 49043 _ = v_0.Args[1] 49044 v_0_0 := v_0.Args[0] 49045 if v_0_0.Op != OpAMD64MOVQconst { 49046 break 49047 } 49048 c := v_0_0.AuxInt 49049 x := v_0.Args[1] 49050 if !(isUint64PowerOfTwo(c) && !config.nacl) { 49051 break 49052 } 49053 v.reset(OpAMD64SETAE) 49054 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49055 v0.AuxInt = log2(c) 49056 v0.AddArg(x) 49057 v.AddArg(v0) 49058 return true 49059 } 49060 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 49061 // cond: isUint64PowerOfTwo(c) && !config.nacl 49062 // result: (SETAE (BTQconst [log2(c)] x)) 49063 for { 49064 v_0 := v.Args[0] 49065 if v_0.Op != OpAMD64TESTQ { 49066 break 49067 } 49068 _ = v_0.Args[1] 49069 x := v_0.Args[0] 49070 v_0_1 := v_0.Args[1] 49071 if v_0_1.Op != OpAMD64MOVQconst { 49072 break 49073 } 49074 c := v_0_1.AuxInt 49075 if !(isUint64PowerOfTwo(c) && !config.nacl) { 49076 break 49077 } 49078 v.reset(OpAMD64SETAE) 49079 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49080 v0.AuxInt = log2(c) 49081 v0.AddArg(x) 49082 v.AddArg(v0) 49083 return true 49084 } 49085 // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _))) 49086 // cond: 49087 // result: (SETNE (CMPLconst [0] s)) 49088 for { 49089 v_0 := v.Args[0] 49090 if v_0.Op != OpAMD64CMPLconst { 49091 break 49092 } 49093 if v_0.AuxInt != 1 { 49094 break 49095 } 49096 s := v_0.Args[0] 49097 if s.Op != OpAMD64ANDLconst { 49098 break 49099 } 49100 if s.AuxInt != 1 { 49101 break 49102 } 49103 v.reset(OpAMD64SETNE) 49104 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 49105 v0.AuxInt = 0 49106 v0.AddArg(s) 49107 v.AddArg(v0) 49108 return true 49109 } 49110 // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _))) 49111 // cond: 49112 // result: (SETNE (CMPQconst [0] s)) 49113 for { 49114 v_0 := v.Args[0] 49115 if v_0.Op != OpAMD64CMPQconst { 49116 break 49117 } 49118 if v_0.AuxInt != 1 { 49119 break 49120 } 49121 s := v_0.Args[0] 49122 if s.Op != OpAMD64ANDQconst { 49123 break 49124 } 49125 if s.AuxInt != 1 { 49126 break 49127 } 49128 v.reset(OpAMD64SETNE) 49129 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 49130 v0.AuxInt = 0 49131 v0.AddArg(s) 49132 v.AddArg(v0) 49133 return true 49134 } 49135 return false 49136 } 49137 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 49138 b := v.Block 49139 _ = b 49140 config := b.Func.Config 49141 _ = config 49142 // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 49143 // cond: z1==z2 && !config.nacl 49144 // result: (SETAE (BTQconst [63] x)) 49145 for { 49146 v_0 := v.Args[0] 49147 if v_0.Op != OpAMD64TESTQ { 49148 break 49149 } 49150 _ = v_0.Args[1] 49151 z1 := v_0.Args[0] 49152 if z1.Op != OpAMD64SHLQconst { 49153 break 49154 } 49155 if z1.AuxInt != 63 { 49156 break 49157 } 49158 z1_0 := z1.Args[0] 49159 if z1_0.Op != OpAMD64SHRQconst { 49160 break 49161 } 49162 if z1_0.AuxInt != 63 { 49163 break 49164 } 49165 x := z1_0.Args[0] 49166 z2 := v_0.Args[1] 49167 if !(z1 == z2 && !config.nacl) { 49168 break 49169 } 49170 v.reset(OpAMD64SETAE) 49171 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49172 v0.AuxInt = 63 49173 v0.AddArg(x) 49174 v.AddArg(v0) 49175 return true 49176 } 49177 // match: (SETEQ (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 49178 // cond: z1==z2 && !config.nacl 49179 // result: (SETAE (BTQconst [63] x)) 49180 for { 49181 v_0 := v.Args[0] 49182 if v_0.Op != OpAMD64TESTQ { 49183 break 49184 } 49185 _ = v_0.Args[1] 49186 z2 := v_0.Args[0] 49187 z1 := v_0.Args[1] 49188 if z1.Op != OpAMD64SHLQconst { 49189 break 49190 } 49191 if z1.AuxInt != 63 { 49192 break 49193 } 49194 z1_0 := z1.Args[0] 49195 if z1_0.Op != OpAMD64SHRQconst { 49196 break 49197 } 49198 if z1_0.AuxInt != 63 { 49199 break 49200 } 49201 x := z1_0.Args[0] 49202 if !(z1 == z2 && !config.nacl) { 49203 break 49204 } 49205 v.reset(OpAMD64SETAE) 49206 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49207 v0.AuxInt = 63 49208 v0.AddArg(x) 49209 v.AddArg(v0) 49210 return true 49211 } 49212 // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 49213 // cond: z1==z2 && !config.nacl 49214 // result: (SETAE (BTQconst [31] x)) 49215 for { 49216 v_0 := v.Args[0] 49217 if v_0.Op != OpAMD64TESTL { 49218 break 49219 } 49220 _ = v_0.Args[1] 49221 z1 := v_0.Args[0] 49222 if z1.Op != OpAMD64SHLLconst { 49223 break 49224 } 49225 if z1.AuxInt != 31 { 49226 break 49227 } 49228 z1_0 := z1.Args[0] 49229 if z1_0.Op != OpAMD64SHRQconst { 49230 break 49231 } 49232 if z1_0.AuxInt != 31 { 49233 break 49234 } 49235 x := z1_0.Args[0] 49236 z2 := v_0.Args[1] 49237 if !(z1 == z2 && !config.nacl) { 49238 break 49239 } 49240 v.reset(OpAMD64SETAE) 49241 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49242 v0.AuxInt = 31 49243 v0.AddArg(x) 49244 v.AddArg(v0) 49245 return true 49246 } 49247 // match: (SETEQ (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 49248 // cond: z1==z2 && !config.nacl 49249 // result: (SETAE (BTQconst [31] x)) 49250 for { 49251 v_0 := v.Args[0] 49252 if v_0.Op != OpAMD64TESTL { 49253 break 49254 } 49255 _ = v_0.Args[1] 49256 z2 := v_0.Args[0] 49257 z1 := v_0.Args[1] 49258 if z1.Op != OpAMD64SHLLconst { 49259 break 49260 } 49261 if z1.AuxInt != 31 { 49262 break 49263 } 49264 z1_0 := z1.Args[0] 49265 if z1_0.Op != OpAMD64SHRQconst { 49266 break 49267 } 49268 if z1_0.AuxInt != 31 { 49269 break 49270 } 49271 x := z1_0.Args[0] 49272 if !(z1 == z2 && !config.nacl) { 49273 break 49274 } 49275 v.reset(OpAMD64SETAE) 49276 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49277 v0.AuxInt = 31 49278 v0.AddArg(x) 49279 v.AddArg(v0) 49280 return true 49281 } 49282 // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 49283 // cond: z1==z2 && !config.nacl 49284 // result: (SETAE (BTQconst [0] x)) 49285 for { 49286 v_0 := v.Args[0] 49287 if v_0.Op != OpAMD64TESTQ { 49288 break 49289 } 49290 _ = v_0.Args[1] 49291 z1 := v_0.Args[0] 49292 if z1.Op != OpAMD64SHRQconst { 49293 break 49294 } 49295 if z1.AuxInt != 63 { 49296 break 49297 } 49298 z1_0 := z1.Args[0] 49299 if z1_0.Op != OpAMD64SHLQconst { 49300 break 49301 } 49302 if z1_0.AuxInt != 63 { 49303 break 49304 } 49305 x := z1_0.Args[0] 49306 z2 := v_0.Args[1] 49307 if !(z1 == z2 && !config.nacl) { 49308 break 49309 } 49310 v.reset(OpAMD64SETAE) 49311 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49312 v0.AuxInt = 0 49313 v0.AddArg(x) 49314 v.AddArg(v0) 49315 return true 49316 } 49317 // match: (SETEQ (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 49318 // cond: z1==z2 && !config.nacl 49319 // result: (SETAE (BTQconst [0] x)) 49320 for { 49321 v_0 := v.Args[0] 49322 if v_0.Op != OpAMD64TESTQ { 49323 break 49324 } 49325 _ = v_0.Args[1] 49326 z2 := v_0.Args[0] 49327 z1 := v_0.Args[1] 49328 if z1.Op != OpAMD64SHRQconst { 49329 break 49330 } 49331 if z1.AuxInt != 63 { 49332 break 49333 } 49334 z1_0 := z1.Args[0] 49335 if z1_0.Op != OpAMD64SHLQconst { 49336 break 49337 } 49338 if z1_0.AuxInt != 63 { 49339 break 49340 } 49341 x := z1_0.Args[0] 49342 if !(z1 == z2 && !config.nacl) { 49343 break 49344 } 49345 v.reset(OpAMD64SETAE) 49346 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49347 v0.AuxInt = 0 49348 v0.AddArg(x) 49349 v.AddArg(v0) 49350 return true 49351 } 49352 // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 49353 // cond: z1==z2 && !config.nacl 49354 // result: (SETAE (BTLconst [0] x)) 49355 for { 49356 v_0 := v.Args[0] 49357 if v_0.Op != OpAMD64TESTL { 49358 break 49359 } 49360 _ = v_0.Args[1] 49361 z1 := v_0.Args[0] 49362 if z1.Op != OpAMD64SHRLconst { 49363 break 49364 } 49365 if z1.AuxInt != 31 { 49366 break 49367 } 49368 z1_0 := z1.Args[0] 49369 if z1_0.Op != OpAMD64SHLLconst { 49370 break 49371 } 49372 if z1_0.AuxInt != 31 { 49373 break 49374 } 49375 x := z1_0.Args[0] 49376 z2 := v_0.Args[1] 49377 if !(z1 == z2 && !config.nacl) { 49378 break 49379 } 49380 v.reset(OpAMD64SETAE) 49381 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49382 v0.AuxInt = 0 49383 v0.AddArg(x) 49384 v.AddArg(v0) 49385 return true 49386 } 49387 // match: (SETEQ (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 49388 // cond: z1==z2 && !config.nacl 49389 // result: (SETAE (BTLconst [0] x)) 49390 for { 49391 v_0 := v.Args[0] 49392 if v_0.Op != OpAMD64TESTL { 49393 break 49394 } 49395 _ = v_0.Args[1] 49396 z2 := v_0.Args[0] 49397 z1 := v_0.Args[1] 49398 if z1.Op != OpAMD64SHRLconst { 49399 break 49400 } 49401 if z1.AuxInt != 31 { 49402 break 49403 } 49404 z1_0 := z1.Args[0] 49405 if z1_0.Op != OpAMD64SHLLconst { 49406 break 49407 } 49408 if z1_0.AuxInt != 31 { 49409 break 49410 } 49411 x := z1_0.Args[0] 49412 if !(z1 == z2 && !config.nacl) { 49413 break 49414 } 49415 v.reset(OpAMD64SETAE) 49416 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49417 v0.AuxInt = 0 49418 v0.AddArg(x) 49419 v.AddArg(v0) 49420 return true 49421 } 49422 // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2)) 49423 // cond: z1==z2 && !config.nacl 49424 // result: (SETAE (BTQconst [63] x)) 49425 for { 49426 v_0 := v.Args[0] 49427 if v_0.Op != OpAMD64TESTQ { 49428 break 49429 } 49430 _ = v_0.Args[1] 49431 z1 := v_0.Args[0] 49432 if z1.Op != OpAMD64SHRQconst { 49433 break 49434 } 49435 if z1.AuxInt != 63 { 49436 break 49437 } 49438 x := z1.Args[0] 49439 z2 := v_0.Args[1] 49440 if !(z1 == z2 && !config.nacl) { 49441 break 49442 } 49443 v.reset(OpAMD64SETAE) 49444 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49445 v0.AuxInt = 63 49446 v0.AddArg(x) 49447 v.AddArg(v0) 49448 return true 49449 } 49450 // match: (SETEQ (TESTQ z2 z1:(SHRQconst [63] x))) 49451 // cond: z1==z2 && !config.nacl 49452 // result: (SETAE (BTQconst [63] x)) 49453 for { 49454 v_0 := v.Args[0] 49455 if v_0.Op != OpAMD64TESTQ { 49456 break 49457 } 49458 _ = v_0.Args[1] 49459 z2 := v_0.Args[0] 49460 z1 := v_0.Args[1] 49461 if z1.Op != OpAMD64SHRQconst { 49462 break 49463 } 49464 if z1.AuxInt != 63 { 49465 break 49466 } 49467 x := z1.Args[0] 49468 if !(z1 == z2 && !config.nacl) { 49469 break 49470 } 49471 v.reset(OpAMD64SETAE) 49472 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49473 v0.AuxInt = 63 49474 v0.AddArg(x) 49475 v.AddArg(v0) 49476 return true 49477 } 49478 return false 49479 } 49480 func rewriteValueAMD64_OpAMD64SETEQ_20(v *Value) bool { 49481 b := v.Block 49482 _ = b 49483 config := b.Func.Config 49484 _ = config 49485 // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2)) 49486 // cond: z1==z2 && !config.nacl 49487 // result: (SETAE (BTLconst [31] x)) 49488 for { 49489 v_0 := v.Args[0] 49490 if v_0.Op != OpAMD64TESTL { 49491 break 49492 } 49493 _ = v_0.Args[1] 49494 z1 := v_0.Args[0] 49495 if z1.Op != OpAMD64SHRLconst { 49496 break 49497 } 49498 if z1.AuxInt != 31 { 49499 break 49500 } 49501 x := z1.Args[0] 49502 z2 := v_0.Args[1] 49503 if !(z1 == z2 && !config.nacl) { 49504 break 49505 } 49506 v.reset(OpAMD64SETAE) 49507 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49508 v0.AuxInt = 31 49509 v0.AddArg(x) 49510 v.AddArg(v0) 49511 return true 49512 } 49513 // match: (SETEQ (TESTL z2 z1:(SHRLconst [31] x))) 49514 // cond: z1==z2 && !config.nacl 49515 // result: (SETAE (BTLconst [31] x)) 49516 for { 49517 v_0 := v.Args[0] 49518 if v_0.Op != OpAMD64TESTL { 49519 break 49520 } 49521 _ = v_0.Args[1] 49522 z2 := v_0.Args[0] 49523 z1 := v_0.Args[1] 49524 if z1.Op != OpAMD64SHRLconst { 49525 break 49526 } 49527 if z1.AuxInt != 31 { 49528 break 49529 } 49530 x := z1.Args[0] 49531 if !(z1 == z2 && !config.nacl) { 49532 break 49533 } 49534 v.reset(OpAMD64SETAE) 49535 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49536 v0.AuxInt = 31 49537 v0.AddArg(x) 49538 v.AddArg(v0) 49539 return true 49540 } 49541 // match: (SETEQ (InvertFlags x)) 49542 // cond: 49543 // result: (SETEQ x) 49544 for { 49545 v_0 := v.Args[0] 49546 if v_0.Op != OpAMD64InvertFlags { 49547 break 49548 } 49549 x := v_0.Args[0] 49550 v.reset(OpAMD64SETEQ) 49551 v.AddArg(x) 49552 return true 49553 } 49554 // match: (SETEQ (FlagEQ)) 49555 // cond: 49556 // result: (MOVLconst [1]) 49557 for { 49558 v_0 := v.Args[0] 49559 if v_0.Op != OpAMD64FlagEQ { 49560 break 49561 } 49562 v.reset(OpAMD64MOVLconst) 49563 v.AuxInt = 1 49564 return true 49565 } 49566 // match: (SETEQ (FlagLT_ULT)) 49567 // cond: 49568 // result: (MOVLconst [0]) 49569 for { 49570 v_0 := v.Args[0] 49571 if v_0.Op != OpAMD64FlagLT_ULT { 49572 break 49573 } 49574 v.reset(OpAMD64MOVLconst) 49575 v.AuxInt = 0 49576 return true 49577 } 49578 // match: (SETEQ (FlagLT_UGT)) 49579 // cond: 49580 // result: (MOVLconst [0]) 49581 for { 49582 v_0 := v.Args[0] 49583 if v_0.Op != OpAMD64FlagLT_UGT { 49584 break 49585 } 49586 v.reset(OpAMD64MOVLconst) 49587 v.AuxInt = 0 49588 return true 49589 } 49590 // match: (SETEQ (FlagGT_ULT)) 49591 // cond: 49592 // result: (MOVLconst [0]) 49593 for { 49594 v_0 := v.Args[0] 49595 if v_0.Op != OpAMD64FlagGT_ULT { 49596 break 49597 } 49598 v.reset(OpAMD64MOVLconst) 49599 v.AuxInt = 0 49600 return true 49601 } 49602 // match: (SETEQ (FlagGT_UGT)) 49603 // cond: 49604 // result: (MOVLconst [0]) 49605 for { 49606 v_0 := v.Args[0] 49607 if v_0.Op != OpAMD64FlagGT_UGT { 49608 break 49609 } 49610 v.reset(OpAMD64MOVLconst) 49611 v.AuxInt = 0 49612 return true 49613 } 49614 return false 49615 } 49616 func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { 49617 b := v.Block 49618 _ = b 49619 config := b.Func.Config 49620 _ = config 49621 // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 49622 // cond: !config.nacl 49623 // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) 49624 for { 49625 off := v.AuxInt 49626 sym := v.Aux 49627 _ = v.Args[2] 49628 ptr := v.Args[0] 49629 v_1 := v.Args[1] 49630 if v_1.Op != OpAMD64TESTL { 49631 break 49632 } 49633 _ = v_1.Args[1] 49634 v_1_0 := v_1.Args[0] 49635 if v_1_0.Op != OpAMD64SHLL { 49636 break 49637 } 49638 _ = v_1_0.Args[1] 49639 v_1_0_0 := v_1_0.Args[0] 49640 if v_1_0_0.Op != OpAMD64MOVLconst { 49641 break 49642 } 49643 if v_1_0_0.AuxInt != 1 { 49644 break 49645 } 49646 x := v_1_0.Args[1] 49647 y := v_1.Args[1] 49648 mem := v.Args[2] 49649 if !(!config.nacl) { 49650 break 49651 } 49652 v.reset(OpAMD64SETAEstore) 49653 v.AuxInt = off 49654 v.Aux = sym 49655 v.AddArg(ptr) 49656 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49657 v0.AddArg(x) 49658 v0.AddArg(y) 49659 v.AddArg(v0) 49660 v.AddArg(mem) 49661 return true 49662 } 49663 // match: (SETEQstore [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 49664 // cond: !config.nacl 49665 // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) 49666 for { 49667 off := v.AuxInt 49668 sym := v.Aux 49669 _ = v.Args[2] 49670 ptr := v.Args[0] 49671 v_1 := v.Args[1] 49672 if v_1.Op != OpAMD64TESTL { 49673 break 49674 } 49675 _ = v_1.Args[1] 49676 y := v_1.Args[0] 49677 v_1_1 := v_1.Args[1] 49678 if v_1_1.Op != OpAMD64SHLL { 49679 break 49680 } 49681 _ = v_1_1.Args[1] 49682 v_1_1_0 := v_1_1.Args[0] 49683 if v_1_1_0.Op != OpAMD64MOVLconst { 49684 break 49685 } 49686 if v_1_1_0.AuxInt != 1 { 49687 break 49688 } 49689 x := v_1_1.Args[1] 49690 mem := v.Args[2] 49691 if !(!config.nacl) { 49692 break 49693 } 49694 v.reset(OpAMD64SETAEstore) 49695 v.AuxInt = off 49696 v.Aux = sym 49697 v.AddArg(ptr) 49698 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49699 v0.AddArg(x) 49700 v0.AddArg(y) 49701 v.AddArg(v0) 49702 v.AddArg(mem) 49703 return true 49704 } 49705 // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 49706 // cond: !config.nacl 49707 // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) 49708 for { 49709 off := v.AuxInt 49710 sym := v.Aux 49711 _ = v.Args[2] 49712 ptr := v.Args[0] 49713 v_1 := v.Args[1] 49714 if v_1.Op != OpAMD64TESTQ { 49715 break 49716 } 49717 _ = v_1.Args[1] 49718 v_1_0 := v_1.Args[0] 49719 if v_1_0.Op != OpAMD64SHLQ { 49720 break 49721 } 49722 _ = v_1_0.Args[1] 49723 v_1_0_0 := v_1_0.Args[0] 49724 if v_1_0_0.Op != OpAMD64MOVQconst { 49725 break 49726 } 49727 if v_1_0_0.AuxInt != 1 { 49728 break 49729 } 49730 x := v_1_0.Args[1] 49731 y := v_1.Args[1] 49732 mem := v.Args[2] 49733 if !(!config.nacl) { 49734 break 49735 } 49736 v.reset(OpAMD64SETAEstore) 49737 v.AuxInt = off 49738 v.Aux = sym 49739 v.AddArg(ptr) 49740 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49741 v0.AddArg(x) 49742 v0.AddArg(y) 49743 v.AddArg(v0) 49744 v.AddArg(mem) 49745 return true 49746 } 49747 // match: (SETEQstore [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 49748 // cond: !config.nacl 49749 // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) 49750 for { 49751 off := v.AuxInt 49752 sym := v.Aux 49753 _ = v.Args[2] 49754 ptr := v.Args[0] 49755 v_1 := v.Args[1] 49756 if v_1.Op != OpAMD64TESTQ { 49757 break 49758 } 49759 _ = v_1.Args[1] 49760 y := v_1.Args[0] 49761 v_1_1 := v_1.Args[1] 49762 if v_1_1.Op != OpAMD64SHLQ { 49763 break 49764 } 49765 _ = v_1_1.Args[1] 49766 v_1_1_0 := v_1_1.Args[0] 49767 if v_1_1_0.Op != OpAMD64MOVQconst { 49768 break 49769 } 49770 if v_1_1_0.AuxInt != 1 { 49771 break 49772 } 49773 x := v_1_1.Args[1] 49774 mem := v.Args[2] 49775 if !(!config.nacl) { 49776 break 49777 } 49778 v.reset(OpAMD64SETAEstore) 49779 v.AuxInt = off 49780 v.Aux = sym 49781 v.AddArg(ptr) 49782 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49783 v0.AddArg(x) 49784 v0.AddArg(y) 49785 v.AddArg(v0) 49786 v.AddArg(mem) 49787 return true 49788 } 49789 // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem) 49790 // cond: isUint32PowerOfTwo(c) && !config.nacl 49791 // result: (SETAEstore [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem) 49792 for { 49793 off := v.AuxInt 49794 sym := v.Aux 49795 _ = v.Args[2] 49796 ptr := v.Args[0] 49797 v_1 := v.Args[1] 49798 if v_1.Op != OpAMD64TESTLconst { 49799 break 49800 } 49801 c := v_1.AuxInt 49802 x := v_1.Args[0] 49803 mem := v.Args[2] 49804 if !(isUint32PowerOfTwo(c) && !config.nacl) { 49805 break 49806 } 49807 v.reset(OpAMD64SETAEstore) 49808 v.AuxInt = off 49809 v.Aux = sym 49810 v.AddArg(ptr) 49811 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49812 v0.AuxInt = log2uint32(c) 49813 v0.AddArg(x) 49814 v.AddArg(v0) 49815 v.AddArg(mem) 49816 return true 49817 } 49818 // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem) 49819 // cond: isUint64PowerOfTwo(c) && !config.nacl 49820 // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 49821 for { 49822 off := v.AuxInt 49823 sym := v.Aux 49824 _ = v.Args[2] 49825 ptr := v.Args[0] 49826 v_1 := v.Args[1] 49827 if v_1.Op != OpAMD64TESTQconst { 49828 break 49829 } 49830 c := v_1.AuxInt 49831 x := v_1.Args[0] 49832 mem := v.Args[2] 49833 if !(isUint64PowerOfTwo(c) && !config.nacl) { 49834 break 49835 } 49836 v.reset(OpAMD64SETAEstore) 49837 v.AuxInt = off 49838 v.Aux = sym 49839 v.AddArg(ptr) 49840 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49841 v0.AuxInt = log2(c) 49842 v0.AddArg(x) 49843 v.AddArg(v0) 49844 v.AddArg(mem) 49845 return true 49846 } 49847 // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 49848 // cond: isUint64PowerOfTwo(c) && !config.nacl 49849 // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 49850 for { 49851 off := v.AuxInt 49852 sym := v.Aux 49853 _ = v.Args[2] 49854 ptr := v.Args[0] 49855 v_1 := v.Args[1] 49856 if v_1.Op != OpAMD64TESTQ { 49857 break 49858 } 49859 _ = v_1.Args[1] 49860 v_1_0 := v_1.Args[0] 49861 if v_1_0.Op != OpAMD64MOVQconst { 49862 break 49863 } 49864 c := v_1_0.AuxInt 49865 x := v_1.Args[1] 49866 mem := v.Args[2] 49867 if !(isUint64PowerOfTwo(c) && !config.nacl) { 49868 break 49869 } 49870 v.reset(OpAMD64SETAEstore) 49871 v.AuxInt = off 49872 v.Aux = sym 49873 v.AddArg(ptr) 49874 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49875 v0.AuxInt = log2(c) 49876 v0.AddArg(x) 49877 v.AddArg(v0) 49878 v.AddArg(mem) 49879 return true 49880 } 49881 // match: (SETEQstore [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 49882 // cond: isUint64PowerOfTwo(c) && !config.nacl 49883 // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 49884 for { 49885 off := v.AuxInt 49886 sym := v.Aux 49887 _ = v.Args[2] 49888 ptr := v.Args[0] 49889 v_1 := v.Args[1] 49890 if v_1.Op != OpAMD64TESTQ { 49891 break 49892 } 49893 _ = v_1.Args[1] 49894 x := v_1.Args[0] 49895 v_1_1 := v_1.Args[1] 49896 if v_1_1.Op != OpAMD64MOVQconst { 49897 break 49898 } 49899 c := v_1_1.AuxInt 49900 mem := v.Args[2] 49901 if !(isUint64PowerOfTwo(c) && !config.nacl) { 49902 break 49903 } 49904 v.reset(OpAMD64SETAEstore) 49905 v.AuxInt = off 49906 v.Aux = sym 49907 v.AddArg(ptr) 49908 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49909 v0.AuxInt = log2(c) 49910 v0.AddArg(x) 49911 v.AddArg(v0) 49912 v.AddArg(mem) 49913 return true 49914 } 49915 // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) 49916 // cond: 49917 // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem) 49918 for { 49919 off := v.AuxInt 49920 sym := v.Aux 49921 _ = v.Args[2] 49922 ptr := v.Args[0] 49923 v_1 := v.Args[1] 49924 if v_1.Op != OpAMD64CMPLconst { 49925 break 49926 } 49927 if v_1.AuxInt != 1 { 49928 break 49929 } 49930 s := v_1.Args[0] 49931 if s.Op != OpAMD64ANDLconst { 49932 break 49933 } 49934 if s.AuxInt != 1 { 49935 break 49936 } 49937 mem := v.Args[2] 49938 v.reset(OpAMD64SETNEstore) 49939 v.AuxInt = off 49940 v.Aux = sym 49941 v.AddArg(ptr) 49942 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 49943 v0.AuxInt = 0 49944 v0.AddArg(s) 49945 v.AddArg(v0) 49946 v.AddArg(mem) 49947 return true 49948 } 49949 // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) 49950 // cond: 49951 // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem) 49952 for { 49953 off := v.AuxInt 49954 sym := v.Aux 49955 _ = v.Args[2] 49956 ptr := v.Args[0] 49957 v_1 := v.Args[1] 49958 if v_1.Op != OpAMD64CMPQconst { 49959 break 49960 } 49961 if v_1.AuxInt != 1 { 49962 break 49963 } 49964 s := v_1.Args[0] 49965 if s.Op != OpAMD64ANDQconst { 49966 break 49967 } 49968 if s.AuxInt != 1 { 49969 break 49970 } 49971 mem := v.Args[2] 49972 v.reset(OpAMD64SETNEstore) 49973 v.AuxInt = off 49974 v.Aux = sym 49975 v.AddArg(ptr) 49976 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 49977 v0.AuxInt = 0 49978 v0.AddArg(s) 49979 v.AddArg(v0) 49980 v.AddArg(mem) 49981 return true 49982 } 49983 return false 49984 } 49985 func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { 49986 b := v.Block 49987 _ = b 49988 config := b.Func.Config 49989 _ = config 49990 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) 49991 // cond: z1==z2 && !config.nacl 49992 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 49993 for { 49994 off := v.AuxInt 49995 sym := v.Aux 49996 _ = v.Args[2] 49997 ptr := v.Args[0] 49998 v_1 := v.Args[1] 49999 if v_1.Op != OpAMD64TESTQ { 50000 break 50001 } 50002 _ = v_1.Args[1] 50003 z1 := v_1.Args[0] 50004 if z1.Op != OpAMD64SHLQconst { 50005 break 50006 } 50007 if z1.AuxInt != 63 { 50008 break 50009 } 50010 z1_0 := z1.Args[0] 50011 if z1_0.Op != OpAMD64SHRQconst { 50012 break 50013 } 50014 if z1_0.AuxInt != 63 { 50015 break 50016 } 50017 x := z1_0.Args[0] 50018 z2 := v_1.Args[1] 50019 mem := v.Args[2] 50020 if !(z1 == z2 && !config.nacl) { 50021 break 50022 } 50023 v.reset(OpAMD64SETAEstore) 50024 v.AuxInt = off 50025 v.Aux = sym 50026 v.AddArg(ptr) 50027 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50028 v0.AuxInt = 63 50029 v0.AddArg(x) 50030 v.AddArg(v0) 50031 v.AddArg(mem) 50032 return true 50033 } 50034 // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem) 50035 // cond: z1==z2 && !config.nacl 50036 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 50037 for { 50038 off := v.AuxInt 50039 sym := v.Aux 50040 _ = v.Args[2] 50041 ptr := v.Args[0] 50042 v_1 := v.Args[1] 50043 if v_1.Op != OpAMD64TESTQ { 50044 break 50045 } 50046 _ = v_1.Args[1] 50047 z2 := v_1.Args[0] 50048 z1 := v_1.Args[1] 50049 if z1.Op != OpAMD64SHLQconst { 50050 break 50051 } 50052 if z1.AuxInt != 63 { 50053 break 50054 } 50055 z1_0 := z1.Args[0] 50056 if z1_0.Op != OpAMD64SHRQconst { 50057 break 50058 } 50059 if z1_0.AuxInt != 63 { 50060 break 50061 } 50062 x := z1_0.Args[0] 50063 mem := v.Args[2] 50064 if !(z1 == z2 && !config.nacl) { 50065 break 50066 } 50067 v.reset(OpAMD64SETAEstore) 50068 v.AuxInt = off 50069 v.Aux = sym 50070 v.AddArg(ptr) 50071 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50072 v0.AuxInt = 63 50073 v0.AddArg(x) 50074 v.AddArg(v0) 50075 v.AddArg(mem) 50076 return true 50077 } 50078 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) 50079 // cond: z1==z2 && !config.nacl 50080 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 50081 for { 50082 off := v.AuxInt 50083 sym := v.Aux 50084 _ = v.Args[2] 50085 ptr := v.Args[0] 50086 v_1 := v.Args[1] 50087 if v_1.Op != OpAMD64TESTL { 50088 break 50089 } 50090 _ = v_1.Args[1] 50091 z1 := v_1.Args[0] 50092 if z1.Op != OpAMD64SHLLconst { 50093 break 50094 } 50095 if z1.AuxInt != 31 { 50096 break 50097 } 50098 z1_0 := z1.Args[0] 50099 if z1_0.Op != OpAMD64SHRLconst { 50100 break 50101 } 50102 if z1_0.AuxInt != 31 { 50103 break 50104 } 50105 x := z1_0.Args[0] 50106 z2 := v_1.Args[1] 50107 mem := v.Args[2] 50108 if !(z1 == z2 && !config.nacl) { 50109 break 50110 } 50111 v.reset(OpAMD64SETAEstore) 50112 v.AuxInt = off 50113 v.Aux = sym 50114 v.AddArg(ptr) 50115 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50116 v0.AuxInt = 31 50117 v0.AddArg(x) 50118 v.AddArg(v0) 50119 v.AddArg(mem) 50120 return true 50121 } 50122 // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem) 50123 // cond: z1==z2 && !config.nacl 50124 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 50125 for { 50126 off := v.AuxInt 50127 sym := v.Aux 50128 _ = v.Args[2] 50129 ptr := v.Args[0] 50130 v_1 := v.Args[1] 50131 if v_1.Op != OpAMD64TESTL { 50132 break 50133 } 50134 _ = v_1.Args[1] 50135 z2 := v_1.Args[0] 50136 z1 := v_1.Args[1] 50137 if z1.Op != OpAMD64SHLLconst { 50138 break 50139 } 50140 if z1.AuxInt != 31 { 50141 break 50142 } 50143 z1_0 := z1.Args[0] 50144 if z1_0.Op != OpAMD64SHRLconst { 50145 break 50146 } 50147 if z1_0.AuxInt != 31 { 50148 break 50149 } 50150 x := z1_0.Args[0] 50151 mem := v.Args[2] 50152 if !(z1 == z2 && !config.nacl) { 50153 break 50154 } 50155 v.reset(OpAMD64SETAEstore) 50156 v.AuxInt = off 50157 v.Aux = sym 50158 v.AddArg(ptr) 50159 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50160 v0.AuxInt = 31 50161 v0.AddArg(x) 50162 v.AddArg(v0) 50163 v.AddArg(mem) 50164 return true 50165 } 50166 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) 50167 // cond: z1==z2 && !config.nacl 50168 // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) 50169 for { 50170 off := v.AuxInt 50171 sym := v.Aux 50172 _ = v.Args[2] 50173 ptr := v.Args[0] 50174 v_1 := v.Args[1] 50175 if v_1.Op != OpAMD64TESTQ { 50176 break 50177 } 50178 _ = v_1.Args[1] 50179 z1 := v_1.Args[0] 50180 if z1.Op != OpAMD64SHRQconst { 50181 break 50182 } 50183 if z1.AuxInt != 63 { 50184 break 50185 } 50186 z1_0 := z1.Args[0] 50187 if z1_0.Op != OpAMD64SHLQconst { 50188 break 50189 } 50190 if z1_0.AuxInt != 63 { 50191 break 50192 } 50193 x := z1_0.Args[0] 50194 z2 := v_1.Args[1] 50195 mem := v.Args[2] 50196 if !(z1 == z2 && !config.nacl) { 50197 break 50198 } 50199 v.reset(OpAMD64SETAEstore) 50200 v.AuxInt = off 50201 v.Aux = sym 50202 v.AddArg(ptr) 50203 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50204 v0.AuxInt = 0 50205 v0.AddArg(x) 50206 v.AddArg(v0) 50207 v.AddArg(mem) 50208 return true 50209 } 50210 // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem) 50211 // cond: z1==z2 && !config.nacl 50212 // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) 50213 for { 50214 off := v.AuxInt 50215 sym := v.Aux 50216 _ = v.Args[2] 50217 ptr := v.Args[0] 50218 v_1 := v.Args[1] 50219 if v_1.Op != OpAMD64TESTQ { 50220 break 50221 } 50222 _ = v_1.Args[1] 50223 z2 := v_1.Args[0] 50224 z1 := v_1.Args[1] 50225 if z1.Op != OpAMD64SHRQconst { 50226 break 50227 } 50228 if z1.AuxInt != 63 { 50229 break 50230 } 50231 z1_0 := z1.Args[0] 50232 if z1_0.Op != OpAMD64SHLQconst { 50233 break 50234 } 50235 if z1_0.AuxInt != 63 { 50236 break 50237 } 50238 x := z1_0.Args[0] 50239 mem := v.Args[2] 50240 if !(z1 == z2 && !config.nacl) { 50241 break 50242 } 50243 v.reset(OpAMD64SETAEstore) 50244 v.AuxInt = off 50245 v.Aux = sym 50246 v.AddArg(ptr) 50247 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50248 v0.AuxInt = 0 50249 v0.AddArg(x) 50250 v.AddArg(v0) 50251 v.AddArg(mem) 50252 return true 50253 } 50254 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) 50255 // cond: z1==z2 && !config.nacl 50256 // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) 50257 for { 50258 off := v.AuxInt 50259 sym := v.Aux 50260 _ = v.Args[2] 50261 ptr := v.Args[0] 50262 v_1 := v.Args[1] 50263 if v_1.Op != OpAMD64TESTL { 50264 break 50265 } 50266 _ = v_1.Args[1] 50267 z1 := v_1.Args[0] 50268 if z1.Op != OpAMD64SHRLconst { 50269 break 50270 } 50271 if z1.AuxInt != 31 { 50272 break 50273 } 50274 z1_0 := z1.Args[0] 50275 if z1_0.Op != OpAMD64SHLLconst { 50276 break 50277 } 50278 if z1_0.AuxInt != 31 { 50279 break 50280 } 50281 x := z1_0.Args[0] 50282 z2 := v_1.Args[1] 50283 mem := v.Args[2] 50284 if !(z1 == z2 && !config.nacl) { 50285 break 50286 } 50287 v.reset(OpAMD64SETAEstore) 50288 v.AuxInt = off 50289 v.Aux = sym 50290 v.AddArg(ptr) 50291 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50292 v0.AuxInt = 0 50293 v0.AddArg(x) 50294 v.AddArg(v0) 50295 v.AddArg(mem) 50296 return true 50297 } 50298 // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem) 50299 // cond: z1==z2 && !config.nacl 50300 // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) 50301 for { 50302 off := v.AuxInt 50303 sym := v.Aux 50304 _ = v.Args[2] 50305 ptr := v.Args[0] 50306 v_1 := v.Args[1] 50307 if v_1.Op != OpAMD64TESTL { 50308 break 50309 } 50310 _ = v_1.Args[1] 50311 z2 := v_1.Args[0] 50312 z1 := v_1.Args[1] 50313 if z1.Op != OpAMD64SHRLconst { 50314 break 50315 } 50316 if z1.AuxInt != 31 { 50317 break 50318 } 50319 z1_0 := z1.Args[0] 50320 if z1_0.Op != OpAMD64SHLLconst { 50321 break 50322 } 50323 if z1_0.AuxInt != 31 { 50324 break 50325 } 50326 x := z1_0.Args[0] 50327 mem := v.Args[2] 50328 if !(z1 == z2 && !config.nacl) { 50329 break 50330 } 50331 v.reset(OpAMD64SETAEstore) 50332 v.AuxInt = off 50333 v.Aux = sym 50334 v.AddArg(ptr) 50335 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50336 v0.AuxInt = 0 50337 v0.AddArg(x) 50338 v.AddArg(v0) 50339 v.AddArg(mem) 50340 return true 50341 } 50342 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) 50343 // cond: z1==z2 && !config.nacl 50344 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 50345 for { 50346 off := v.AuxInt 50347 sym := v.Aux 50348 _ = v.Args[2] 50349 ptr := v.Args[0] 50350 v_1 := v.Args[1] 50351 if v_1.Op != OpAMD64TESTQ { 50352 break 50353 } 50354 _ = v_1.Args[1] 50355 z1 := v_1.Args[0] 50356 if z1.Op != OpAMD64SHRQconst { 50357 break 50358 } 50359 if z1.AuxInt != 63 { 50360 break 50361 } 50362 x := z1.Args[0] 50363 z2 := v_1.Args[1] 50364 mem := v.Args[2] 50365 if !(z1 == z2 && !config.nacl) { 50366 break 50367 } 50368 v.reset(OpAMD64SETAEstore) 50369 v.AuxInt = off 50370 v.Aux = sym 50371 v.AddArg(ptr) 50372 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50373 v0.AuxInt = 63 50374 v0.AddArg(x) 50375 v.AddArg(v0) 50376 v.AddArg(mem) 50377 return true 50378 } 50379 // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem) 50380 // cond: z1==z2 && !config.nacl 50381 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 50382 for { 50383 off := v.AuxInt 50384 sym := v.Aux 50385 _ = v.Args[2] 50386 ptr := v.Args[0] 50387 v_1 := v.Args[1] 50388 if v_1.Op != OpAMD64TESTQ { 50389 break 50390 } 50391 _ = v_1.Args[1] 50392 z2 := v_1.Args[0] 50393 z1 := v_1.Args[1] 50394 if z1.Op != OpAMD64SHRQconst { 50395 break 50396 } 50397 if z1.AuxInt != 63 { 50398 break 50399 } 50400 x := z1.Args[0] 50401 mem := v.Args[2] 50402 if !(z1 == z2 && !config.nacl) { 50403 break 50404 } 50405 v.reset(OpAMD64SETAEstore) 50406 v.AuxInt = off 50407 v.Aux = sym 50408 v.AddArg(ptr) 50409 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 50410 v0.AuxInt = 63 50411 v0.AddArg(x) 50412 v.AddArg(v0) 50413 v.AddArg(mem) 50414 return true 50415 } 50416 return false 50417 } 50418 func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool { 50419 b := v.Block 50420 _ = b 50421 config := b.Func.Config 50422 _ = config 50423 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) 50424 // cond: z1==z2 && !config.nacl 50425 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 50426 for { 50427 off := v.AuxInt 50428 sym := v.Aux 50429 _ = v.Args[2] 50430 ptr := v.Args[0] 50431 v_1 := v.Args[1] 50432 if v_1.Op != OpAMD64TESTL { 50433 break 50434 } 50435 _ = v_1.Args[1] 50436 z1 := v_1.Args[0] 50437 if z1.Op != OpAMD64SHRLconst { 50438 break 50439 } 50440 if z1.AuxInt != 31 { 50441 break 50442 } 50443 x := z1.Args[0] 50444 z2 := v_1.Args[1] 50445 mem := v.Args[2] 50446 if !(z1 == z2 && !config.nacl) { 50447 break 50448 } 50449 v.reset(OpAMD64SETAEstore) 50450 v.AuxInt = off 50451 v.Aux = sym 50452 v.AddArg(ptr) 50453 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50454 v0.AuxInt = 31 50455 v0.AddArg(x) 50456 v.AddArg(v0) 50457 v.AddArg(mem) 50458 return true 50459 } 50460 // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem) 50461 // cond: z1==z2 && !config.nacl 50462 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 50463 for { 50464 off := v.AuxInt 50465 sym := v.Aux 50466 _ = v.Args[2] 50467 ptr := v.Args[0] 50468 v_1 := v.Args[1] 50469 if v_1.Op != OpAMD64TESTL { 50470 break 50471 } 50472 _ = v_1.Args[1] 50473 z2 := v_1.Args[0] 50474 z1 := v_1.Args[1] 50475 if z1.Op != OpAMD64SHRLconst { 50476 break 50477 } 50478 if z1.AuxInt != 31 { 50479 break 50480 } 50481 x := z1.Args[0] 50482 mem := v.Args[2] 50483 if !(z1 == z2 && !config.nacl) { 50484 break 50485 } 50486 v.reset(OpAMD64SETAEstore) 50487 v.AuxInt = off 50488 v.Aux = sym 50489 v.AddArg(ptr) 50490 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 50491 v0.AuxInt = 31 50492 v0.AddArg(x) 50493 v.AddArg(v0) 50494 v.AddArg(mem) 50495 return true 50496 } 50497 // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem) 50498 // cond: 50499 // result: (SETEQstore [off] {sym} ptr x mem) 50500 for { 50501 off := v.AuxInt 50502 sym := v.Aux 50503 _ = v.Args[2] 50504 ptr := v.Args[0] 50505 v_1 := v.Args[1] 50506 if v_1.Op != OpAMD64InvertFlags { 50507 break 50508 } 50509 x := v_1.Args[0] 50510 mem := v.Args[2] 50511 v.reset(OpAMD64SETEQstore) 50512 v.AuxInt = off 50513 v.Aux = sym 50514 v.AddArg(ptr) 50515 v.AddArg(x) 50516 v.AddArg(mem) 50517 return true 50518 } 50519 // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem) 50520 // cond: is32Bit(off1+off2) 50521 // result: (SETEQstore [off1+off2] {sym} base val mem) 50522 for { 50523 off1 := v.AuxInt 50524 sym := v.Aux 50525 _ = v.Args[2] 50526 v_0 := v.Args[0] 50527 if v_0.Op != OpAMD64ADDQconst { 50528 break 50529 } 50530 off2 := v_0.AuxInt 50531 base := v_0.Args[0] 50532 val := v.Args[1] 50533 mem := v.Args[2] 50534 if !(is32Bit(off1 + off2)) { 50535 break 50536 } 50537 v.reset(OpAMD64SETEQstore) 50538 v.AuxInt = off1 + off2 50539 v.Aux = sym 50540 v.AddArg(base) 50541 v.AddArg(val) 50542 v.AddArg(mem) 50543 return true 50544 } 50545 // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 50546 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 50547 // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 50548 for { 50549 off1 := v.AuxInt 50550 sym1 := v.Aux 50551 _ = v.Args[2] 50552 v_0 := v.Args[0] 50553 if v_0.Op != OpAMD64LEAQ { 50554 break 50555 } 50556 off2 := v_0.AuxInt 50557 sym2 := v_0.Aux 50558 base := v_0.Args[0] 50559 val := v.Args[1] 50560 mem := v.Args[2] 50561 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 50562 break 50563 } 50564 v.reset(OpAMD64SETEQstore) 50565 v.AuxInt = off1 + off2 50566 v.Aux = mergeSym(sym1, sym2) 50567 v.AddArg(base) 50568 v.AddArg(val) 50569 v.AddArg(mem) 50570 return true 50571 } 50572 // match: (SETEQstore [off] {sym} ptr x:(FlagEQ) mem) 50573 // cond: 50574 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 50575 for { 50576 off := v.AuxInt 50577 sym := v.Aux 50578 _ = v.Args[2] 50579 ptr := v.Args[0] 50580 x := v.Args[1] 50581 if x.Op != OpAMD64FlagEQ { 50582 break 50583 } 50584 mem := v.Args[2] 50585 v.reset(OpAMD64MOVBstore) 50586 v.AuxInt = off 50587 v.Aux = sym 50588 v.AddArg(ptr) 50589 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50590 v0.AuxInt = 1 50591 v.AddArg(v0) 50592 v.AddArg(mem) 50593 return true 50594 } 50595 // match: (SETEQstore [off] {sym} ptr x:(FlagLT_ULT) mem) 50596 // cond: 50597 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 50598 for { 50599 off := v.AuxInt 50600 sym := v.Aux 50601 _ = v.Args[2] 50602 ptr := v.Args[0] 50603 x := v.Args[1] 50604 if x.Op != OpAMD64FlagLT_ULT { 50605 break 50606 } 50607 mem := v.Args[2] 50608 v.reset(OpAMD64MOVBstore) 50609 v.AuxInt = off 50610 v.Aux = sym 50611 v.AddArg(ptr) 50612 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50613 v0.AuxInt = 0 50614 v.AddArg(v0) 50615 v.AddArg(mem) 50616 return true 50617 } 50618 // match: (SETEQstore [off] {sym} ptr x:(FlagLT_UGT) mem) 50619 // cond: 50620 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 50621 for { 50622 off := v.AuxInt 50623 sym := v.Aux 50624 _ = v.Args[2] 50625 ptr := v.Args[0] 50626 x := v.Args[1] 50627 if x.Op != OpAMD64FlagLT_UGT { 50628 break 50629 } 50630 mem := v.Args[2] 50631 v.reset(OpAMD64MOVBstore) 50632 v.AuxInt = off 50633 v.Aux = sym 50634 v.AddArg(ptr) 50635 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50636 v0.AuxInt = 0 50637 v.AddArg(v0) 50638 v.AddArg(mem) 50639 return true 50640 } 50641 // match: (SETEQstore [off] {sym} ptr x:(FlagGT_ULT) mem) 50642 // cond: 50643 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 50644 for { 50645 off := v.AuxInt 50646 sym := v.Aux 50647 _ = v.Args[2] 50648 ptr := v.Args[0] 50649 x := v.Args[1] 50650 if x.Op != OpAMD64FlagGT_ULT { 50651 break 50652 } 50653 mem := v.Args[2] 50654 v.reset(OpAMD64MOVBstore) 50655 v.AuxInt = off 50656 v.Aux = sym 50657 v.AddArg(ptr) 50658 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50659 v0.AuxInt = 0 50660 v.AddArg(v0) 50661 v.AddArg(mem) 50662 return true 50663 } 50664 // match: (SETEQstore [off] {sym} ptr x:(FlagGT_UGT) mem) 50665 // cond: 50666 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 50667 for { 50668 off := v.AuxInt 50669 sym := v.Aux 50670 _ = v.Args[2] 50671 ptr := v.Args[0] 50672 x := v.Args[1] 50673 if x.Op != OpAMD64FlagGT_UGT { 50674 break 50675 } 50676 mem := v.Args[2] 50677 v.reset(OpAMD64MOVBstore) 50678 v.AuxInt = off 50679 v.Aux = sym 50680 v.AddArg(ptr) 50681 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50682 v0.AuxInt = 0 50683 v.AddArg(v0) 50684 v.AddArg(mem) 50685 return true 50686 } 50687 return false 50688 } 50689 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 50690 // match: (SETG (InvertFlags x)) 50691 // cond: 50692 // result: (SETL x) 50693 for { 50694 v_0 := v.Args[0] 50695 if v_0.Op != OpAMD64InvertFlags { 50696 break 50697 } 50698 x := v_0.Args[0] 50699 v.reset(OpAMD64SETL) 50700 v.AddArg(x) 50701 return true 50702 } 50703 // match: (SETG (FlagEQ)) 50704 // cond: 50705 // result: (MOVLconst [0]) 50706 for { 50707 v_0 := v.Args[0] 50708 if v_0.Op != OpAMD64FlagEQ { 50709 break 50710 } 50711 v.reset(OpAMD64MOVLconst) 50712 v.AuxInt = 0 50713 return true 50714 } 50715 // match: (SETG (FlagLT_ULT)) 50716 // cond: 50717 // result: (MOVLconst [0]) 50718 for { 50719 v_0 := v.Args[0] 50720 if v_0.Op != OpAMD64FlagLT_ULT { 50721 break 50722 } 50723 v.reset(OpAMD64MOVLconst) 50724 v.AuxInt = 0 50725 return true 50726 } 50727 // match: (SETG (FlagLT_UGT)) 50728 // cond: 50729 // result: (MOVLconst [0]) 50730 for { 50731 v_0 := v.Args[0] 50732 if v_0.Op != OpAMD64FlagLT_UGT { 50733 break 50734 } 50735 v.reset(OpAMD64MOVLconst) 50736 v.AuxInt = 0 50737 return true 50738 } 50739 // match: (SETG (FlagGT_ULT)) 50740 // cond: 50741 // result: (MOVLconst [1]) 50742 for { 50743 v_0 := v.Args[0] 50744 if v_0.Op != OpAMD64FlagGT_ULT { 50745 break 50746 } 50747 v.reset(OpAMD64MOVLconst) 50748 v.AuxInt = 1 50749 return true 50750 } 50751 // match: (SETG (FlagGT_UGT)) 50752 // cond: 50753 // result: (MOVLconst [1]) 50754 for { 50755 v_0 := v.Args[0] 50756 if v_0.Op != OpAMD64FlagGT_UGT { 50757 break 50758 } 50759 v.reset(OpAMD64MOVLconst) 50760 v.AuxInt = 1 50761 return true 50762 } 50763 return false 50764 } 50765 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 50766 // match: (SETGE (InvertFlags x)) 50767 // cond: 50768 // result: (SETLE x) 50769 for { 50770 v_0 := v.Args[0] 50771 if v_0.Op != OpAMD64InvertFlags { 50772 break 50773 } 50774 x := v_0.Args[0] 50775 v.reset(OpAMD64SETLE) 50776 v.AddArg(x) 50777 return true 50778 } 50779 // match: (SETGE (FlagEQ)) 50780 // cond: 50781 // result: (MOVLconst [1]) 50782 for { 50783 v_0 := v.Args[0] 50784 if v_0.Op != OpAMD64FlagEQ { 50785 break 50786 } 50787 v.reset(OpAMD64MOVLconst) 50788 v.AuxInt = 1 50789 return true 50790 } 50791 // match: (SETGE (FlagLT_ULT)) 50792 // cond: 50793 // result: (MOVLconst [0]) 50794 for { 50795 v_0 := v.Args[0] 50796 if v_0.Op != OpAMD64FlagLT_ULT { 50797 break 50798 } 50799 v.reset(OpAMD64MOVLconst) 50800 v.AuxInt = 0 50801 return true 50802 } 50803 // match: (SETGE (FlagLT_UGT)) 50804 // cond: 50805 // result: (MOVLconst [0]) 50806 for { 50807 v_0 := v.Args[0] 50808 if v_0.Op != OpAMD64FlagLT_UGT { 50809 break 50810 } 50811 v.reset(OpAMD64MOVLconst) 50812 v.AuxInt = 0 50813 return true 50814 } 50815 // match: (SETGE (FlagGT_ULT)) 50816 // cond: 50817 // result: (MOVLconst [1]) 50818 for { 50819 v_0 := v.Args[0] 50820 if v_0.Op != OpAMD64FlagGT_ULT { 50821 break 50822 } 50823 v.reset(OpAMD64MOVLconst) 50824 v.AuxInt = 1 50825 return true 50826 } 50827 // match: (SETGE (FlagGT_UGT)) 50828 // cond: 50829 // result: (MOVLconst [1]) 50830 for { 50831 v_0 := v.Args[0] 50832 if v_0.Op != OpAMD64FlagGT_UGT { 50833 break 50834 } 50835 v.reset(OpAMD64MOVLconst) 50836 v.AuxInt = 1 50837 return true 50838 } 50839 return false 50840 } 50841 func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { 50842 b := v.Block 50843 _ = b 50844 // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem) 50845 // cond: 50846 // result: (SETLEstore [off] {sym} ptr x mem) 50847 for { 50848 off := v.AuxInt 50849 sym := v.Aux 50850 _ = v.Args[2] 50851 ptr := v.Args[0] 50852 v_1 := v.Args[1] 50853 if v_1.Op != OpAMD64InvertFlags { 50854 break 50855 } 50856 x := v_1.Args[0] 50857 mem := v.Args[2] 50858 v.reset(OpAMD64SETLEstore) 50859 v.AuxInt = off 50860 v.Aux = sym 50861 v.AddArg(ptr) 50862 v.AddArg(x) 50863 v.AddArg(mem) 50864 return true 50865 } 50866 // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem) 50867 // cond: is32Bit(off1+off2) 50868 // result: (SETGEstore [off1+off2] {sym} base val mem) 50869 for { 50870 off1 := v.AuxInt 50871 sym := v.Aux 50872 _ = v.Args[2] 50873 v_0 := v.Args[0] 50874 if v_0.Op != OpAMD64ADDQconst { 50875 break 50876 } 50877 off2 := v_0.AuxInt 50878 base := v_0.Args[0] 50879 val := v.Args[1] 50880 mem := v.Args[2] 50881 if !(is32Bit(off1 + off2)) { 50882 break 50883 } 50884 v.reset(OpAMD64SETGEstore) 50885 v.AuxInt = off1 + off2 50886 v.Aux = sym 50887 v.AddArg(base) 50888 v.AddArg(val) 50889 v.AddArg(mem) 50890 return true 50891 } 50892 // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 50893 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 50894 // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 50895 for { 50896 off1 := v.AuxInt 50897 sym1 := v.Aux 50898 _ = v.Args[2] 50899 v_0 := v.Args[0] 50900 if v_0.Op != OpAMD64LEAQ { 50901 break 50902 } 50903 off2 := v_0.AuxInt 50904 sym2 := v_0.Aux 50905 base := v_0.Args[0] 50906 val := v.Args[1] 50907 mem := v.Args[2] 50908 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 50909 break 50910 } 50911 v.reset(OpAMD64SETGEstore) 50912 v.AuxInt = off1 + off2 50913 v.Aux = mergeSym(sym1, sym2) 50914 v.AddArg(base) 50915 v.AddArg(val) 50916 v.AddArg(mem) 50917 return true 50918 } 50919 // match: (SETGEstore [off] {sym} ptr x:(FlagEQ) mem) 50920 // cond: 50921 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 50922 for { 50923 off := v.AuxInt 50924 sym := v.Aux 50925 _ = v.Args[2] 50926 ptr := v.Args[0] 50927 x := v.Args[1] 50928 if x.Op != OpAMD64FlagEQ { 50929 break 50930 } 50931 mem := v.Args[2] 50932 v.reset(OpAMD64MOVBstore) 50933 v.AuxInt = off 50934 v.Aux = sym 50935 v.AddArg(ptr) 50936 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50937 v0.AuxInt = 1 50938 v.AddArg(v0) 50939 v.AddArg(mem) 50940 return true 50941 } 50942 // match: (SETGEstore [off] {sym} ptr x:(FlagLT_ULT) mem) 50943 // cond: 50944 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 50945 for { 50946 off := v.AuxInt 50947 sym := v.Aux 50948 _ = v.Args[2] 50949 ptr := v.Args[0] 50950 x := v.Args[1] 50951 if x.Op != OpAMD64FlagLT_ULT { 50952 break 50953 } 50954 mem := v.Args[2] 50955 v.reset(OpAMD64MOVBstore) 50956 v.AuxInt = off 50957 v.Aux = sym 50958 v.AddArg(ptr) 50959 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50960 v0.AuxInt = 0 50961 v.AddArg(v0) 50962 v.AddArg(mem) 50963 return true 50964 } 50965 // match: (SETGEstore [off] {sym} ptr x:(FlagLT_UGT) mem) 50966 // cond: 50967 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 50968 for { 50969 off := v.AuxInt 50970 sym := v.Aux 50971 _ = v.Args[2] 50972 ptr := v.Args[0] 50973 x := v.Args[1] 50974 if x.Op != OpAMD64FlagLT_UGT { 50975 break 50976 } 50977 mem := v.Args[2] 50978 v.reset(OpAMD64MOVBstore) 50979 v.AuxInt = off 50980 v.Aux = sym 50981 v.AddArg(ptr) 50982 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 50983 v0.AuxInt = 0 50984 v.AddArg(v0) 50985 v.AddArg(mem) 50986 return true 50987 } 50988 // match: (SETGEstore [off] {sym} ptr x:(FlagGT_ULT) mem) 50989 // cond: 50990 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 50991 for { 50992 off := v.AuxInt 50993 sym := v.Aux 50994 _ = v.Args[2] 50995 ptr := v.Args[0] 50996 x := v.Args[1] 50997 if x.Op != OpAMD64FlagGT_ULT { 50998 break 50999 } 51000 mem := v.Args[2] 51001 v.reset(OpAMD64MOVBstore) 51002 v.AuxInt = off 51003 v.Aux = sym 51004 v.AddArg(ptr) 51005 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51006 v0.AuxInt = 1 51007 v.AddArg(v0) 51008 v.AddArg(mem) 51009 return true 51010 } 51011 // match: (SETGEstore [off] {sym} ptr x:(FlagGT_UGT) mem) 51012 // cond: 51013 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51014 for { 51015 off := v.AuxInt 51016 sym := v.Aux 51017 _ = v.Args[2] 51018 ptr := v.Args[0] 51019 x := v.Args[1] 51020 if x.Op != OpAMD64FlagGT_UGT { 51021 break 51022 } 51023 mem := v.Args[2] 51024 v.reset(OpAMD64MOVBstore) 51025 v.AuxInt = off 51026 v.Aux = sym 51027 v.AddArg(ptr) 51028 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51029 v0.AuxInt = 1 51030 v.AddArg(v0) 51031 v.AddArg(mem) 51032 return true 51033 } 51034 return false 51035 } 51036 func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { 51037 b := v.Block 51038 _ = b 51039 // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem) 51040 // cond: 51041 // result: (SETLstore [off] {sym} ptr x mem) 51042 for { 51043 off := v.AuxInt 51044 sym := v.Aux 51045 _ = v.Args[2] 51046 ptr := v.Args[0] 51047 v_1 := v.Args[1] 51048 if v_1.Op != OpAMD64InvertFlags { 51049 break 51050 } 51051 x := v_1.Args[0] 51052 mem := v.Args[2] 51053 v.reset(OpAMD64SETLstore) 51054 v.AuxInt = off 51055 v.Aux = sym 51056 v.AddArg(ptr) 51057 v.AddArg(x) 51058 v.AddArg(mem) 51059 return true 51060 } 51061 // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem) 51062 // cond: is32Bit(off1+off2) 51063 // result: (SETGstore [off1+off2] {sym} base val mem) 51064 for { 51065 off1 := v.AuxInt 51066 sym := v.Aux 51067 _ = v.Args[2] 51068 v_0 := v.Args[0] 51069 if v_0.Op != OpAMD64ADDQconst { 51070 break 51071 } 51072 off2 := v_0.AuxInt 51073 base := v_0.Args[0] 51074 val := v.Args[1] 51075 mem := v.Args[2] 51076 if !(is32Bit(off1 + off2)) { 51077 break 51078 } 51079 v.reset(OpAMD64SETGstore) 51080 v.AuxInt = off1 + off2 51081 v.Aux = sym 51082 v.AddArg(base) 51083 v.AddArg(val) 51084 v.AddArg(mem) 51085 return true 51086 } 51087 // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 51088 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 51089 // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 51090 for { 51091 off1 := v.AuxInt 51092 sym1 := v.Aux 51093 _ = v.Args[2] 51094 v_0 := v.Args[0] 51095 if v_0.Op != OpAMD64LEAQ { 51096 break 51097 } 51098 off2 := v_0.AuxInt 51099 sym2 := v_0.Aux 51100 base := v_0.Args[0] 51101 val := v.Args[1] 51102 mem := v.Args[2] 51103 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 51104 break 51105 } 51106 v.reset(OpAMD64SETGstore) 51107 v.AuxInt = off1 + off2 51108 v.Aux = mergeSym(sym1, sym2) 51109 v.AddArg(base) 51110 v.AddArg(val) 51111 v.AddArg(mem) 51112 return true 51113 } 51114 // match: (SETGstore [off] {sym} ptr x:(FlagEQ) mem) 51115 // cond: 51116 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51117 for { 51118 off := v.AuxInt 51119 sym := v.Aux 51120 _ = v.Args[2] 51121 ptr := v.Args[0] 51122 x := v.Args[1] 51123 if x.Op != OpAMD64FlagEQ { 51124 break 51125 } 51126 mem := v.Args[2] 51127 v.reset(OpAMD64MOVBstore) 51128 v.AuxInt = off 51129 v.Aux = sym 51130 v.AddArg(ptr) 51131 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51132 v0.AuxInt = 0 51133 v.AddArg(v0) 51134 v.AddArg(mem) 51135 return true 51136 } 51137 // match: (SETGstore [off] {sym} ptr x:(FlagLT_ULT) mem) 51138 // cond: 51139 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51140 for { 51141 off := v.AuxInt 51142 sym := v.Aux 51143 _ = v.Args[2] 51144 ptr := v.Args[0] 51145 x := v.Args[1] 51146 if x.Op != OpAMD64FlagLT_ULT { 51147 break 51148 } 51149 mem := v.Args[2] 51150 v.reset(OpAMD64MOVBstore) 51151 v.AuxInt = off 51152 v.Aux = sym 51153 v.AddArg(ptr) 51154 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51155 v0.AuxInt = 0 51156 v.AddArg(v0) 51157 v.AddArg(mem) 51158 return true 51159 } 51160 // match: (SETGstore [off] {sym} ptr x:(FlagLT_UGT) mem) 51161 // cond: 51162 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51163 for { 51164 off := v.AuxInt 51165 sym := v.Aux 51166 _ = v.Args[2] 51167 ptr := v.Args[0] 51168 x := v.Args[1] 51169 if x.Op != OpAMD64FlagLT_UGT { 51170 break 51171 } 51172 mem := v.Args[2] 51173 v.reset(OpAMD64MOVBstore) 51174 v.AuxInt = off 51175 v.Aux = sym 51176 v.AddArg(ptr) 51177 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51178 v0.AuxInt = 0 51179 v.AddArg(v0) 51180 v.AddArg(mem) 51181 return true 51182 } 51183 // match: (SETGstore [off] {sym} ptr x:(FlagGT_ULT) mem) 51184 // cond: 51185 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51186 for { 51187 off := v.AuxInt 51188 sym := v.Aux 51189 _ = v.Args[2] 51190 ptr := v.Args[0] 51191 x := v.Args[1] 51192 if x.Op != OpAMD64FlagGT_ULT { 51193 break 51194 } 51195 mem := v.Args[2] 51196 v.reset(OpAMD64MOVBstore) 51197 v.AuxInt = off 51198 v.Aux = sym 51199 v.AddArg(ptr) 51200 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51201 v0.AuxInt = 1 51202 v.AddArg(v0) 51203 v.AddArg(mem) 51204 return true 51205 } 51206 // match: (SETGstore [off] {sym} ptr x:(FlagGT_UGT) mem) 51207 // cond: 51208 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51209 for { 51210 off := v.AuxInt 51211 sym := v.Aux 51212 _ = v.Args[2] 51213 ptr := v.Args[0] 51214 x := v.Args[1] 51215 if x.Op != OpAMD64FlagGT_UGT { 51216 break 51217 } 51218 mem := v.Args[2] 51219 v.reset(OpAMD64MOVBstore) 51220 v.AuxInt = off 51221 v.Aux = sym 51222 v.AddArg(ptr) 51223 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51224 v0.AuxInt = 1 51225 v.AddArg(v0) 51226 v.AddArg(mem) 51227 return true 51228 } 51229 return false 51230 } 51231 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 51232 // match: (SETL (InvertFlags x)) 51233 // cond: 51234 // result: (SETG x) 51235 for { 51236 v_0 := v.Args[0] 51237 if v_0.Op != OpAMD64InvertFlags { 51238 break 51239 } 51240 x := v_0.Args[0] 51241 v.reset(OpAMD64SETG) 51242 v.AddArg(x) 51243 return true 51244 } 51245 // match: (SETL (FlagEQ)) 51246 // cond: 51247 // result: (MOVLconst [0]) 51248 for { 51249 v_0 := v.Args[0] 51250 if v_0.Op != OpAMD64FlagEQ { 51251 break 51252 } 51253 v.reset(OpAMD64MOVLconst) 51254 v.AuxInt = 0 51255 return true 51256 } 51257 // match: (SETL (FlagLT_ULT)) 51258 // cond: 51259 // result: (MOVLconst [1]) 51260 for { 51261 v_0 := v.Args[0] 51262 if v_0.Op != OpAMD64FlagLT_ULT { 51263 break 51264 } 51265 v.reset(OpAMD64MOVLconst) 51266 v.AuxInt = 1 51267 return true 51268 } 51269 // match: (SETL (FlagLT_UGT)) 51270 // cond: 51271 // result: (MOVLconst [1]) 51272 for { 51273 v_0 := v.Args[0] 51274 if v_0.Op != OpAMD64FlagLT_UGT { 51275 break 51276 } 51277 v.reset(OpAMD64MOVLconst) 51278 v.AuxInt = 1 51279 return true 51280 } 51281 // match: (SETL (FlagGT_ULT)) 51282 // cond: 51283 // result: (MOVLconst [0]) 51284 for { 51285 v_0 := v.Args[0] 51286 if v_0.Op != OpAMD64FlagGT_ULT { 51287 break 51288 } 51289 v.reset(OpAMD64MOVLconst) 51290 v.AuxInt = 0 51291 return true 51292 } 51293 // match: (SETL (FlagGT_UGT)) 51294 // cond: 51295 // result: (MOVLconst [0]) 51296 for { 51297 v_0 := v.Args[0] 51298 if v_0.Op != OpAMD64FlagGT_UGT { 51299 break 51300 } 51301 v.reset(OpAMD64MOVLconst) 51302 v.AuxInt = 0 51303 return true 51304 } 51305 return false 51306 } 51307 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 51308 // match: (SETLE (InvertFlags x)) 51309 // cond: 51310 // result: (SETGE x) 51311 for { 51312 v_0 := v.Args[0] 51313 if v_0.Op != OpAMD64InvertFlags { 51314 break 51315 } 51316 x := v_0.Args[0] 51317 v.reset(OpAMD64SETGE) 51318 v.AddArg(x) 51319 return true 51320 } 51321 // match: (SETLE (FlagEQ)) 51322 // cond: 51323 // result: (MOVLconst [1]) 51324 for { 51325 v_0 := v.Args[0] 51326 if v_0.Op != OpAMD64FlagEQ { 51327 break 51328 } 51329 v.reset(OpAMD64MOVLconst) 51330 v.AuxInt = 1 51331 return true 51332 } 51333 // match: (SETLE (FlagLT_ULT)) 51334 // cond: 51335 // result: (MOVLconst [1]) 51336 for { 51337 v_0 := v.Args[0] 51338 if v_0.Op != OpAMD64FlagLT_ULT { 51339 break 51340 } 51341 v.reset(OpAMD64MOVLconst) 51342 v.AuxInt = 1 51343 return true 51344 } 51345 // match: (SETLE (FlagLT_UGT)) 51346 // cond: 51347 // result: (MOVLconst [1]) 51348 for { 51349 v_0 := v.Args[0] 51350 if v_0.Op != OpAMD64FlagLT_UGT { 51351 break 51352 } 51353 v.reset(OpAMD64MOVLconst) 51354 v.AuxInt = 1 51355 return true 51356 } 51357 // match: (SETLE (FlagGT_ULT)) 51358 // cond: 51359 // result: (MOVLconst [0]) 51360 for { 51361 v_0 := v.Args[0] 51362 if v_0.Op != OpAMD64FlagGT_ULT { 51363 break 51364 } 51365 v.reset(OpAMD64MOVLconst) 51366 v.AuxInt = 0 51367 return true 51368 } 51369 // match: (SETLE (FlagGT_UGT)) 51370 // cond: 51371 // result: (MOVLconst [0]) 51372 for { 51373 v_0 := v.Args[0] 51374 if v_0.Op != OpAMD64FlagGT_UGT { 51375 break 51376 } 51377 v.reset(OpAMD64MOVLconst) 51378 v.AuxInt = 0 51379 return true 51380 } 51381 return false 51382 } 51383 func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { 51384 b := v.Block 51385 _ = b 51386 // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem) 51387 // cond: 51388 // result: (SETGEstore [off] {sym} ptr x mem) 51389 for { 51390 off := v.AuxInt 51391 sym := v.Aux 51392 _ = v.Args[2] 51393 ptr := v.Args[0] 51394 v_1 := v.Args[1] 51395 if v_1.Op != OpAMD64InvertFlags { 51396 break 51397 } 51398 x := v_1.Args[0] 51399 mem := v.Args[2] 51400 v.reset(OpAMD64SETGEstore) 51401 v.AuxInt = off 51402 v.Aux = sym 51403 v.AddArg(ptr) 51404 v.AddArg(x) 51405 v.AddArg(mem) 51406 return true 51407 } 51408 // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem) 51409 // cond: is32Bit(off1+off2) 51410 // result: (SETLEstore [off1+off2] {sym} base val mem) 51411 for { 51412 off1 := v.AuxInt 51413 sym := v.Aux 51414 _ = v.Args[2] 51415 v_0 := v.Args[0] 51416 if v_0.Op != OpAMD64ADDQconst { 51417 break 51418 } 51419 off2 := v_0.AuxInt 51420 base := v_0.Args[0] 51421 val := v.Args[1] 51422 mem := v.Args[2] 51423 if !(is32Bit(off1 + off2)) { 51424 break 51425 } 51426 v.reset(OpAMD64SETLEstore) 51427 v.AuxInt = off1 + off2 51428 v.Aux = sym 51429 v.AddArg(base) 51430 v.AddArg(val) 51431 v.AddArg(mem) 51432 return true 51433 } 51434 // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 51435 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 51436 // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 51437 for { 51438 off1 := v.AuxInt 51439 sym1 := v.Aux 51440 _ = v.Args[2] 51441 v_0 := v.Args[0] 51442 if v_0.Op != OpAMD64LEAQ { 51443 break 51444 } 51445 off2 := v_0.AuxInt 51446 sym2 := v_0.Aux 51447 base := v_0.Args[0] 51448 val := v.Args[1] 51449 mem := v.Args[2] 51450 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 51451 break 51452 } 51453 v.reset(OpAMD64SETLEstore) 51454 v.AuxInt = off1 + off2 51455 v.Aux = mergeSym(sym1, sym2) 51456 v.AddArg(base) 51457 v.AddArg(val) 51458 v.AddArg(mem) 51459 return true 51460 } 51461 // match: (SETLEstore [off] {sym} ptr x:(FlagEQ) mem) 51462 // cond: 51463 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51464 for { 51465 off := v.AuxInt 51466 sym := v.Aux 51467 _ = v.Args[2] 51468 ptr := v.Args[0] 51469 x := v.Args[1] 51470 if x.Op != OpAMD64FlagEQ { 51471 break 51472 } 51473 mem := v.Args[2] 51474 v.reset(OpAMD64MOVBstore) 51475 v.AuxInt = off 51476 v.Aux = sym 51477 v.AddArg(ptr) 51478 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51479 v0.AuxInt = 1 51480 v.AddArg(v0) 51481 v.AddArg(mem) 51482 return true 51483 } 51484 // match: (SETLEstore [off] {sym} ptr x:(FlagLT_ULT) mem) 51485 // cond: 51486 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51487 for { 51488 off := v.AuxInt 51489 sym := v.Aux 51490 _ = v.Args[2] 51491 ptr := v.Args[0] 51492 x := v.Args[1] 51493 if x.Op != OpAMD64FlagLT_ULT { 51494 break 51495 } 51496 mem := v.Args[2] 51497 v.reset(OpAMD64MOVBstore) 51498 v.AuxInt = off 51499 v.Aux = sym 51500 v.AddArg(ptr) 51501 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51502 v0.AuxInt = 1 51503 v.AddArg(v0) 51504 v.AddArg(mem) 51505 return true 51506 } 51507 // match: (SETLEstore [off] {sym} ptr x:(FlagLT_UGT) mem) 51508 // cond: 51509 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51510 for { 51511 off := v.AuxInt 51512 sym := v.Aux 51513 _ = v.Args[2] 51514 ptr := v.Args[0] 51515 x := v.Args[1] 51516 if x.Op != OpAMD64FlagLT_UGT { 51517 break 51518 } 51519 mem := v.Args[2] 51520 v.reset(OpAMD64MOVBstore) 51521 v.AuxInt = off 51522 v.Aux = sym 51523 v.AddArg(ptr) 51524 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51525 v0.AuxInt = 1 51526 v.AddArg(v0) 51527 v.AddArg(mem) 51528 return true 51529 } 51530 // match: (SETLEstore [off] {sym} ptr x:(FlagGT_ULT) mem) 51531 // cond: 51532 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51533 for { 51534 off := v.AuxInt 51535 sym := v.Aux 51536 _ = v.Args[2] 51537 ptr := v.Args[0] 51538 x := v.Args[1] 51539 if x.Op != OpAMD64FlagGT_ULT { 51540 break 51541 } 51542 mem := v.Args[2] 51543 v.reset(OpAMD64MOVBstore) 51544 v.AuxInt = off 51545 v.Aux = sym 51546 v.AddArg(ptr) 51547 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51548 v0.AuxInt = 0 51549 v.AddArg(v0) 51550 v.AddArg(mem) 51551 return true 51552 } 51553 // match: (SETLEstore [off] {sym} ptr x:(FlagGT_UGT) mem) 51554 // cond: 51555 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51556 for { 51557 off := v.AuxInt 51558 sym := v.Aux 51559 _ = v.Args[2] 51560 ptr := v.Args[0] 51561 x := v.Args[1] 51562 if x.Op != OpAMD64FlagGT_UGT { 51563 break 51564 } 51565 mem := v.Args[2] 51566 v.reset(OpAMD64MOVBstore) 51567 v.AuxInt = off 51568 v.Aux = sym 51569 v.AddArg(ptr) 51570 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51571 v0.AuxInt = 0 51572 v.AddArg(v0) 51573 v.AddArg(mem) 51574 return true 51575 } 51576 return false 51577 } 51578 func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { 51579 b := v.Block 51580 _ = b 51581 // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem) 51582 // cond: 51583 // result: (SETGstore [off] {sym} ptr x mem) 51584 for { 51585 off := v.AuxInt 51586 sym := v.Aux 51587 _ = v.Args[2] 51588 ptr := v.Args[0] 51589 v_1 := v.Args[1] 51590 if v_1.Op != OpAMD64InvertFlags { 51591 break 51592 } 51593 x := v_1.Args[0] 51594 mem := v.Args[2] 51595 v.reset(OpAMD64SETGstore) 51596 v.AuxInt = off 51597 v.Aux = sym 51598 v.AddArg(ptr) 51599 v.AddArg(x) 51600 v.AddArg(mem) 51601 return true 51602 } 51603 // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem) 51604 // cond: is32Bit(off1+off2) 51605 // result: (SETLstore [off1+off2] {sym} base val mem) 51606 for { 51607 off1 := v.AuxInt 51608 sym := v.Aux 51609 _ = v.Args[2] 51610 v_0 := v.Args[0] 51611 if v_0.Op != OpAMD64ADDQconst { 51612 break 51613 } 51614 off2 := v_0.AuxInt 51615 base := v_0.Args[0] 51616 val := v.Args[1] 51617 mem := v.Args[2] 51618 if !(is32Bit(off1 + off2)) { 51619 break 51620 } 51621 v.reset(OpAMD64SETLstore) 51622 v.AuxInt = off1 + off2 51623 v.Aux = sym 51624 v.AddArg(base) 51625 v.AddArg(val) 51626 v.AddArg(mem) 51627 return true 51628 } 51629 // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 51630 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 51631 // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 51632 for { 51633 off1 := v.AuxInt 51634 sym1 := v.Aux 51635 _ = v.Args[2] 51636 v_0 := v.Args[0] 51637 if v_0.Op != OpAMD64LEAQ { 51638 break 51639 } 51640 off2 := v_0.AuxInt 51641 sym2 := v_0.Aux 51642 base := v_0.Args[0] 51643 val := v.Args[1] 51644 mem := v.Args[2] 51645 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 51646 break 51647 } 51648 v.reset(OpAMD64SETLstore) 51649 v.AuxInt = off1 + off2 51650 v.Aux = mergeSym(sym1, sym2) 51651 v.AddArg(base) 51652 v.AddArg(val) 51653 v.AddArg(mem) 51654 return true 51655 } 51656 // match: (SETLstore [off] {sym} ptr x:(FlagEQ) mem) 51657 // cond: 51658 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51659 for { 51660 off := v.AuxInt 51661 sym := v.Aux 51662 _ = v.Args[2] 51663 ptr := v.Args[0] 51664 x := v.Args[1] 51665 if x.Op != OpAMD64FlagEQ { 51666 break 51667 } 51668 mem := v.Args[2] 51669 v.reset(OpAMD64MOVBstore) 51670 v.AuxInt = off 51671 v.Aux = sym 51672 v.AddArg(ptr) 51673 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51674 v0.AuxInt = 0 51675 v.AddArg(v0) 51676 v.AddArg(mem) 51677 return true 51678 } 51679 // match: (SETLstore [off] {sym} ptr x:(FlagLT_ULT) mem) 51680 // cond: 51681 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51682 for { 51683 off := v.AuxInt 51684 sym := v.Aux 51685 _ = v.Args[2] 51686 ptr := v.Args[0] 51687 x := v.Args[1] 51688 if x.Op != OpAMD64FlagLT_ULT { 51689 break 51690 } 51691 mem := v.Args[2] 51692 v.reset(OpAMD64MOVBstore) 51693 v.AuxInt = off 51694 v.Aux = sym 51695 v.AddArg(ptr) 51696 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51697 v0.AuxInt = 1 51698 v.AddArg(v0) 51699 v.AddArg(mem) 51700 return true 51701 } 51702 // match: (SETLstore [off] {sym} ptr x:(FlagLT_UGT) mem) 51703 // cond: 51704 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 51705 for { 51706 off := v.AuxInt 51707 sym := v.Aux 51708 _ = v.Args[2] 51709 ptr := v.Args[0] 51710 x := v.Args[1] 51711 if x.Op != OpAMD64FlagLT_UGT { 51712 break 51713 } 51714 mem := v.Args[2] 51715 v.reset(OpAMD64MOVBstore) 51716 v.AuxInt = off 51717 v.Aux = sym 51718 v.AddArg(ptr) 51719 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51720 v0.AuxInt = 1 51721 v.AddArg(v0) 51722 v.AddArg(mem) 51723 return true 51724 } 51725 // match: (SETLstore [off] {sym} ptr x:(FlagGT_ULT) mem) 51726 // cond: 51727 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51728 for { 51729 off := v.AuxInt 51730 sym := v.Aux 51731 _ = v.Args[2] 51732 ptr := v.Args[0] 51733 x := v.Args[1] 51734 if x.Op != OpAMD64FlagGT_ULT { 51735 break 51736 } 51737 mem := v.Args[2] 51738 v.reset(OpAMD64MOVBstore) 51739 v.AuxInt = off 51740 v.Aux = sym 51741 v.AddArg(ptr) 51742 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51743 v0.AuxInt = 0 51744 v.AddArg(v0) 51745 v.AddArg(mem) 51746 return true 51747 } 51748 // match: (SETLstore [off] {sym} ptr x:(FlagGT_UGT) mem) 51749 // cond: 51750 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 51751 for { 51752 off := v.AuxInt 51753 sym := v.Aux 51754 _ = v.Args[2] 51755 ptr := v.Args[0] 51756 x := v.Args[1] 51757 if x.Op != OpAMD64FlagGT_UGT { 51758 break 51759 } 51760 mem := v.Args[2] 51761 v.reset(OpAMD64MOVBstore) 51762 v.AuxInt = off 51763 v.Aux = sym 51764 v.AddArg(ptr) 51765 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 51766 v0.AuxInt = 0 51767 v.AddArg(v0) 51768 v.AddArg(mem) 51769 return true 51770 } 51771 return false 51772 } 51773 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 51774 b := v.Block 51775 _ = b 51776 config := b.Func.Config 51777 _ = config 51778 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 51779 // cond: !config.nacl 51780 // result: (SETB (BTL x y)) 51781 for { 51782 v_0 := v.Args[0] 51783 if v_0.Op != OpAMD64TESTL { 51784 break 51785 } 51786 _ = v_0.Args[1] 51787 v_0_0 := v_0.Args[0] 51788 if v_0_0.Op != OpAMD64SHLL { 51789 break 51790 } 51791 _ = v_0_0.Args[1] 51792 v_0_0_0 := v_0_0.Args[0] 51793 if v_0_0_0.Op != OpAMD64MOVLconst { 51794 break 51795 } 51796 if v_0_0_0.AuxInt != 1 { 51797 break 51798 } 51799 x := v_0_0.Args[1] 51800 y := v_0.Args[1] 51801 if !(!config.nacl) { 51802 break 51803 } 51804 v.reset(OpAMD64SETB) 51805 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 51806 v0.AddArg(x) 51807 v0.AddArg(y) 51808 v.AddArg(v0) 51809 return true 51810 } 51811 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 51812 // cond: !config.nacl 51813 // result: (SETB (BTL x y)) 51814 for { 51815 v_0 := v.Args[0] 51816 if v_0.Op != OpAMD64TESTL { 51817 break 51818 } 51819 _ = v_0.Args[1] 51820 y := v_0.Args[0] 51821 v_0_1 := v_0.Args[1] 51822 if v_0_1.Op != OpAMD64SHLL { 51823 break 51824 } 51825 _ = v_0_1.Args[1] 51826 v_0_1_0 := v_0_1.Args[0] 51827 if v_0_1_0.Op != OpAMD64MOVLconst { 51828 break 51829 } 51830 if v_0_1_0.AuxInt != 1 { 51831 break 51832 } 51833 x := v_0_1.Args[1] 51834 if !(!config.nacl) { 51835 break 51836 } 51837 v.reset(OpAMD64SETB) 51838 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 51839 v0.AddArg(x) 51840 v0.AddArg(y) 51841 v.AddArg(v0) 51842 return true 51843 } 51844 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 51845 // cond: !config.nacl 51846 // result: (SETB (BTQ x y)) 51847 for { 51848 v_0 := v.Args[0] 51849 if v_0.Op != OpAMD64TESTQ { 51850 break 51851 } 51852 _ = v_0.Args[1] 51853 v_0_0 := v_0.Args[0] 51854 if v_0_0.Op != OpAMD64SHLQ { 51855 break 51856 } 51857 _ = v_0_0.Args[1] 51858 v_0_0_0 := v_0_0.Args[0] 51859 if v_0_0_0.Op != OpAMD64MOVQconst { 51860 break 51861 } 51862 if v_0_0_0.AuxInt != 1 { 51863 break 51864 } 51865 x := v_0_0.Args[1] 51866 y := v_0.Args[1] 51867 if !(!config.nacl) { 51868 break 51869 } 51870 v.reset(OpAMD64SETB) 51871 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 51872 v0.AddArg(x) 51873 v0.AddArg(y) 51874 v.AddArg(v0) 51875 return true 51876 } 51877 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 51878 // cond: !config.nacl 51879 // result: (SETB (BTQ x y)) 51880 for { 51881 v_0 := v.Args[0] 51882 if v_0.Op != OpAMD64TESTQ { 51883 break 51884 } 51885 _ = v_0.Args[1] 51886 y := v_0.Args[0] 51887 v_0_1 := v_0.Args[1] 51888 if v_0_1.Op != OpAMD64SHLQ { 51889 break 51890 } 51891 _ = v_0_1.Args[1] 51892 v_0_1_0 := v_0_1.Args[0] 51893 if v_0_1_0.Op != OpAMD64MOVQconst { 51894 break 51895 } 51896 if v_0_1_0.AuxInt != 1 { 51897 break 51898 } 51899 x := v_0_1.Args[1] 51900 if !(!config.nacl) { 51901 break 51902 } 51903 v.reset(OpAMD64SETB) 51904 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 51905 v0.AddArg(x) 51906 v0.AddArg(y) 51907 v.AddArg(v0) 51908 return true 51909 } 51910 // match: (SETNE (TESTLconst [c] x)) 51911 // cond: isUint32PowerOfTwo(c) && !config.nacl 51912 // result: (SETB (BTLconst [log2uint32(c)] x)) 51913 for { 51914 v_0 := v.Args[0] 51915 if v_0.Op != OpAMD64TESTLconst { 51916 break 51917 } 51918 c := v_0.AuxInt 51919 x := v_0.Args[0] 51920 if !(isUint32PowerOfTwo(c) && !config.nacl) { 51921 break 51922 } 51923 v.reset(OpAMD64SETB) 51924 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 51925 v0.AuxInt = log2uint32(c) 51926 v0.AddArg(x) 51927 v.AddArg(v0) 51928 return true 51929 } 51930 // match: (SETNE (TESTQconst [c] x)) 51931 // cond: isUint64PowerOfTwo(c) && !config.nacl 51932 // result: (SETB (BTQconst [log2(c)] x)) 51933 for { 51934 v_0 := v.Args[0] 51935 if v_0.Op != OpAMD64TESTQconst { 51936 break 51937 } 51938 c := v_0.AuxInt 51939 x := v_0.Args[0] 51940 if !(isUint64PowerOfTwo(c) && !config.nacl) { 51941 break 51942 } 51943 v.reset(OpAMD64SETB) 51944 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 51945 v0.AuxInt = log2(c) 51946 v0.AddArg(x) 51947 v.AddArg(v0) 51948 return true 51949 } 51950 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 51951 // cond: isUint64PowerOfTwo(c) && !config.nacl 51952 // result: (SETB (BTQconst [log2(c)] x)) 51953 for { 51954 v_0 := v.Args[0] 51955 if v_0.Op != OpAMD64TESTQ { 51956 break 51957 } 51958 _ = v_0.Args[1] 51959 v_0_0 := v_0.Args[0] 51960 if v_0_0.Op != OpAMD64MOVQconst { 51961 break 51962 } 51963 c := v_0_0.AuxInt 51964 x := v_0.Args[1] 51965 if !(isUint64PowerOfTwo(c) && !config.nacl) { 51966 break 51967 } 51968 v.reset(OpAMD64SETB) 51969 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 51970 v0.AuxInt = log2(c) 51971 v0.AddArg(x) 51972 v.AddArg(v0) 51973 return true 51974 } 51975 // match: (SETNE (TESTQ x (MOVQconst [c]))) 51976 // cond: isUint64PowerOfTwo(c) && !config.nacl 51977 // result: (SETB (BTQconst [log2(c)] x)) 51978 for { 51979 v_0 := v.Args[0] 51980 if v_0.Op != OpAMD64TESTQ { 51981 break 51982 } 51983 _ = v_0.Args[1] 51984 x := v_0.Args[0] 51985 v_0_1 := v_0.Args[1] 51986 if v_0_1.Op != OpAMD64MOVQconst { 51987 break 51988 } 51989 c := v_0_1.AuxInt 51990 if !(isUint64PowerOfTwo(c) && !config.nacl) { 51991 break 51992 } 51993 v.reset(OpAMD64SETB) 51994 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 51995 v0.AuxInt = log2(c) 51996 v0.AddArg(x) 51997 v.AddArg(v0) 51998 return true 51999 } 52000 // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _))) 52001 // cond: 52002 // result: (SETEQ (CMPLconst [0] s)) 52003 for { 52004 v_0 := v.Args[0] 52005 if v_0.Op != OpAMD64CMPLconst { 52006 break 52007 } 52008 if v_0.AuxInt != 1 { 52009 break 52010 } 52011 s := v_0.Args[0] 52012 if s.Op != OpAMD64ANDLconst { 52013 break 52014 } 52015 if s.AuxInt != 1 { 52016 break 52017 } 52018 v.reset(OpAMD64SETEQ) 52019 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 52020 v0.AuxInt = 0 52021 v0.AddArg(s) 52022 v.AddArg(v0) 52023 return true 52024 } 52025 // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _))) 52026 // cond: 52027 // result: (SETEQ (CMPQconst [0] s)) 52028 for { 52029 v_0 := v.Args[0] 52030 if v_0.Op != OpAMD64CMPQconst { 52031 break 52032 } 52033 if v_0.AuxInt != 1 { 52034 break 52035 } 52036 s := v_0.Args[0] 52037 if s.Op != OpAMD64ANDQconst { 52038 break 52039 } 52040 if s.AuxInt != 1 { 52041 break 52042 } 52043 v.reset(OpAMD64SETEQ) 52044 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 52045 v0.AuxInt = 0 52046 v0.AddArg(s) 52047 v.AddArg(v0) 52048 return true 52049 } 52050 return false 52051 } 52052 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 52053 b := v.Block 52054 _ = b 52055 config := b.Func.Config 52056 _ = config 52057 // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 52058 // cond: z1==z2 && !config.nacl 52059 // result: (SETB (BTQconst [63] x)) 52060 for { 52061 v_0 := v.Args[0] 52062 if v_0.Op != OpAMD64TESTQ { 52063 break 52064 } 52065 _ = v_0.Args[1] 52066 z1 := v_0.Args[0] 52067 if z1.Op != OpAMD64SHLQconst { 52068 break 52069 } 52070 if z1.AuxInt != 63 { 52071 break 52072 } 52073 z1_0 := z1.Args[0] 52074 if z1_0.Op != OpAMD64SHRQconst { 52075 break 52076 } 52077 if z1_0.AuxInt != 63 { 52078 break 52079 } 52080 x := z1_0.Args[0] 52081 z2 := v_0.Args[1] 52082 if !(z1 == z2 && !config.nacl) { 52083 break 52084 } 52085 v.reset(OpAMD64SETB) 52086 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52087 v0.AuxInt = 63 52088 v0.AddArg(x) 52089 v.AddArg(v0) 52090 return true 52091 } 52092 // match: (SETNE (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 52093 // cond: z1==z2 && !config.nacl 52094 // result: (SETB (BTQconst [63] x)) 52095 for { 52096 v_0 := v.Args[0] 52097 if v_0.Op != OpAMD64TESTQ { 52098 break 52099 } 52100 _ = v_0.Args[1] 52101 z2 := v_0.Args[0] 52102 z1 := v_0.Args[1] 52103 if z1.Op != OpAMD64SHLQconst { 52104 break 52105 } 52106 if z1.AuxInt != 63 { 52107 break 52108 } 52109 z1_0 := z1.Args[0] 52110 if z1_0.Op != OpAMD64SHRQconst { 52111 break 52112 } 52113 if z1_0.AuxInt != 63 { 52114 break 52115 } 52116 x := z1_0.Args[0] 52117 if !(z1 == z2 && !config.nacl) { 52118 break 52119 } 52120 v.reset(OpAMD64SETB) 52121 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52122 v0.AuxInt = 63 52123 v0.AddArg(x) 52124 v.AddArg(v0) 52125 return true 52126 } 52127 // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 52128 // cond: z1==z2 && !config.nacl 52129 // result: (SETB (BTQconst [31] x)) 52130 for { 52131 v_0 := v.Args[0] 52132 if v_0.Op != OpAMD64TESTL { 52133 break 52134 } 52135 _ = v_0.Args[1] 52136 z1 := v_0.Args[0] 52137 if z1.Op != OpAMD64SHLLconst { 52138 break 52139 } 52140 if z1.AuxInt != 31 { 52141 break 52142 } 52143 z1_0 := z1.Args[0] 52144 if z1_0.Op != OpAMD64SHRQconst { 52145 break 52146 } 52147 if z1_0.AuxInt != 31 { 52148 break 52149 } 52150 x := z1_0.Args[0] 52151 z2 := v_0.Args[1] 52152 if !(z1 == z2 && !config.nacl) { 52153 break 52154 } 52155 v.reset(OpAMD64SETB) 52156 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52157 v0.AuxInt = 31 52158 v0.AddArg(x) 52159 v.AddArg(v0) 52160 return true 52161 } 52162 // match: (SETNE (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 52163 // cond: z1==z2 && !config.nacl 52164 // result: (SETB (BTQconst [31] x)) 52165 for { 52166 v_0 := v.Args[0] 52167 if v_0.Op != OpAMD64TESTL { 52168 break 52169 } 52170 _ = v_0.Args[1] 52171 z2 := v_0.Args[0] 52172 z1 := v_0.Args[1] 52173 if z1.Op != OpAMD64SHLLconst { 52174 break 52175 } 52176 if z1.AuxInt != 31 { 52177 break 52178 } 52179 z1_0 := z1.Args[0] 52180 if z1_0.Op != OpAMD64SHRQconst { 52181 break 52182 } 52183 if z1_0.AuxInt != 31 { 52184 break 52185 } 52186 x := z1_0.Args[0] 52187 if !(z1 == z2 && !config.nacl) { 52188 break 52189 } 52190 v.reset(OpAMD64SETB) 52191 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52192 v0.AuxInt = 31 52193 v0.AddArg(x) 52194 v.AddArg(v0) 52195 return true 52196 } 52197 // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 52198 // cond: z1==z2 && !config.nacl 52199 // result: (SETB (BTQconst [0] x)) 52200 for { 52201 v_0 := v.Args[0] 52202 if v_0.Op != OpAMD64TESTQ { 52203 break 52204 } 52205 _ = v_0.Args[1] 52206 z1 := v_0.Args[0] 52207 if z1.Op != OpAMD64SHRQconst { 52208 break 52209 } 52210 if z1.AuxInt != 63 { 52211 break 52212 } 52213 z1_0 := z1.Args[0] 52214 if z1_0.Op != OpAMD64SHLQconst { 52215 break 52216 } 52217 if z1_0.AuxInt != 63 { 52218 break 52219 } 52220 x := z1_0.Args[0] 52221 z2 := v_0.Args[1] 52222 if !(z1 == z2 && !config.nacl) { 52223 break 52224 } 52225 v.reset(OpAMD64SETB) 52226 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52227 v0.AuxInt = 0 52228 v0.AddArg(x) 52229 v.AddArg(v0) 52230 return true 52231 } 52232 // match: (SETNE (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 52233 // cond: z1==z2 && !config.nacl 52234 // result: (SETB (BTQconst [0] x)) 52235 for { 52236 v_0 := v.Args[0] 52237 if v_0.Op != OpAMD64TESTQ { 52238 break 52239 } 52240 _ = v_0.Args[1] 52241 z2 := v_0.Args[0] 52242 z1 := v_0.Args[1] 52243 if z1.Op != OpAMD64SHRQconst { 52244 break 52245 } 52246 if z1.AuxInt != 63 { 52247 break 52248 } 52249 z1_0 := z1.Args[0] 52250 if z1_0.Op != OpAMD64SHLQconst { 52251 break 52252 } 52253 if z1_0.AuxInt != 63 { 52254 break 52255 } 52256 x := z1_0.Args[0] 52257 if !(z1 == z2 && !config.nacl) { 52258 break 52259 } 52260 v.reset(OpAMD64SETB) 52261 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52262 v0.AuxInt = 0 52263 v0.AddArg(x) 52264 v.AddArg(v0) 52265 return true 52266 } 52267 // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 52268 // cond: z1==z2 && !config.nacl 52269 // result: (SETB (BTLconst [0] x)) 52270 for { 52271 v_0 := v.Args[0] 52272 if v_0.Op != OpAMD64TESTL { 52273 break 52274 } 52275 _ = v_0.Args[1] 52276 z1 := v_0.Args[0] 52277 if z1.Op != OpAMD64SHRLconst { 52278 break 52279 } 52280 if z1.AuxInt != 31 { 52281 break 52282 } 52283 z1_0 := z1.Args[0] 52284 if z1_0.Op != OpAMD64SHLLconst { 52285 break 52286 } 52287 if z1_0.AuxInt != 31 { 52288 break 52289 } 52290 x := z1_0.Args[0] 52291 z2 := v_0.Args[1] 52292 if !(z1 == z2 && !config.nacl) { 52293 break 52294 } 52295 v.reset(OpAMD64SETB) 52296 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 52297 v0.AuxInt = 0 52298 v0.AddArg(x) 52299 v.AddArg(v0) 52300 return true 52301 } 52302 // match: (SETNE (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 52303 // cond: z1==z2 && !config.nacl 52304 // result: (SETB (BTLconst [0] x)) 52305 for { 52306 v_0 := v.Args[0] 52307 if v_0.Op != OpAMD64TESTL { 52308 break 52309 } 52310 _ = v_0.Args[1] 52311 z2 := v_0.Args[0] 52312 z1 := v_0.Args[1] 52313 if z1.Op != OpAMD64SHRLconst { 52314 break 52315 } 52316 if z1.AuxInt != 31 { 52317 break 52318 } 52319 z1_0 := z1.Args[0] 52320 if z1_0.Op != OpAMD64SHLLconst { 52321 break 52322 } 52323 if z1_0.AuxInt != 31 { 52324 break 52325 } 52326 x := z1_0.Args[0] 52327 if !(z1 == z2 && !config.nacl) { 52328 break 52329 } 52330 v.reset(OpAMD64SETB) 52331 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 52332 v0.AuxInt = 0 52333 v0.AddArg(x) 52334 v.AddArg(v0) 52335 return true 52336 } 52337 // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2)) 52338 // cond: z1==z2 && !config.nacl 52339 // result: (SETB (BTQconst [63] x)) 52340 for { 52341 v_0 := v.Args[0] 52342 if v_0.Op != OpAMD64TESTQ { 52343 break 52344 } 52345 _ = v_0.Args[1] 52346 z1 := v_0.Args[0] 52347 if z1.Op != OpAMD64SHRQconst { 52348 break 52349 } 52350 if z1.AuxInt != 63 { 52351 break 52352 } 52353 x := z1.Args[0] 52354 z2 := v_0.Args[1] 52355 if !(z1 == z2 && !config.nacl) { 52356 break 52357 } 52358 v.reset(OpAMD64SETB) 52359 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52360 v0.AuxInt = 63 52361 v0.AddArg(x) 52362 v.AddArg(v0) 52363 return true 52364 } 52365 // match: (SETNE (TESTQ z2 z1:(SHRQconst [63] x))) 52366 // cond: z1==z2 && !config.nacl 52367 // result: (SETB (BTQconst [63] x)) 52368 for { 52369 v_0 := v.Args[0] 52370 if v_0.Op != OpAMD64TESTQ { 52371 break 52372 } 52373 _ = v_0.Args[1] 52374 z2 := v_0.Args[0] 52375 z1 := v_0.Args[1] 52376 if z1.Op != OpAMD64SHRQconst { 52377 break 52378 } 52379 if z1.AuxInt != 63 { 52380 break 52381 } 52382 x := z1.Args[0] 52383 if !(z1 == z2 && !config.nacl) { 52384 break 52385 } 52386 v.reset(OpAMD64SETB) 52387 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52388 v0.AuxInt = 63 52389 v0.AddArg(x) 52390 v.AddArg(v0) 52391 return true 52392 } 52393 return false 52394 } 52395 func rewriteValueAMD64_OpAMD64SETNE_20(v *Value) bool { 52396 b := v.Block 52397 _ = b 52398 config := b.Func.Config 52399 _ = config 52400 // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2)) 52401 // cond: z1==z2 && !config.nacl 52402 // result: (SETB (BTLconst [31] x)) 52403 for { 52404 v_0 := v.Args[0] 52405 if v_0.Op != OpAMD64TESTL { 52406 break 52407 } 52408 _ = v_0.Args[1] 52409 z1 := v_0.Args[0] 52410 if z1.Op != OpAMD64SHRLconst { 52411 break 52412 } 52413 if z1.AuxInt != 31 { 52414 break 52415 } 52416 x := z1.Args[0] 52417 z2 := v_0.Args[1] 52418 if !(z1 == z2 && !config.nacl) { 52419 break 52420 } 52421 v.reset(OpAMD64SETB) 52422 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 52423 v0.AuxInt = 31 52424 v0.AddArg(x) 52425 v.AddArg(v0) 52426 return true 52427 } 52428 // match: (SETNE (TESTL z2 z1:(SHRLconst [31] x))) 52429 // cond: z1==z2 && !config.nacl 52430 // result: (SETB (BTLconst [31] x)) 52431 for { 52432 v_0 := v.Args[0] 52433 if v_0.Op != OpAMD64TESTL { 52434 break 52435 } 52436 _ = v_0.Args[1] 52437 z2 := v_0.Args[0] 52438 z1 := v_0.Args[1] 52439 if z1.Op != OpAMD64SHRLconst { 52440 break 52441 } 52442 if z1.AuxInt != 31 { 52443 break 52444 } 52445 x := z1.Args[0] 52446 if !(z1 == z2 && !config.nacl) { 52447 break 52448 } 52449 v.reset(OpAMD64SETB) 52450 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 52451 v0.AuxInt = 31 52452 v0.AddArg(x) 52453 v.AddArg(v0) 52454 return true 52455 } 52456 // match: (SETNE (InvertFlags x)) 52457 // cond: 52458 // result: (SETNE x) 52459 for { 52460 v_0 := v.Args[0] 52461 if v_0.Op != OpAMD64InvertFlags { 52462 break 52463 } 52464 x := v_0.Args[0] 52465 v.reset(OpAMD64SETNE) 52466 v.AddArg(x) 52467 return true 52468 } 52469 // match: (SETNE (FlagEQ)) 52470 // cond: 52471 // result: (MOVLconst [0]) 52472 for { 52473 v_0 := v.Args[0] 52474 if v_0.Op != OpAMD64FlagEQ { 52475 break 52476 } 52477 v.reset(OpAMD64MOVLconst) 52478 v.AuxInt = 0 52479 return true 52480 } 52481 // match: (SETNE (FlagLT_ULT)) 52482 // cond: 52483 // result: (MOVLconst [1]) 52484 for { 52485 v_0 := v.Args[0] 52486 if v_0.Op != OpAMD64FlagLT_ULT { 52487 break 52488 } 52489 v.reset(OpAMD64MOVLconst) 52490 v.AuxInt = 1 52491 return true 52492 } 52493 // match: (SETNE (FlagLT_UGT)) 52494 // cond: 52495 // result: (MOVLconst [1]) 52496 for { 52497 v_0 := v.Args[0] 52498 if v_0.Op != OpAMD64FlagLT_UGT { 52499 break 52500 } 52501 v.reset(OpAMD64MOVLconst) 52502 v.AuxInt = 1 52503 return true 52504 } 52505 // match: (SETNE (FlagGT_ULT)) 52506 // cond: 52507 // result: (MOVLconst [1]) 52508 for { 52509 v_0 := v.Args[0] 52510 if v_0.Op != OpAMD64FlagGT_ULT { 52511 break 52512 } 52513 v.reset(OpAMD64MOVLconst) 52514 v.AuxInt = 1 52515 return true 52516 } 52517 // match: (SETNE (FlagGT_UGT)) 52518 // cond: 52519 // result: (MOVLconst [1]) 52520 for { 52521 v_0 := v.Args[0] 52522 if v_0.Op != OpAMD64FlagGT_UGT { 52523 break 52524 } 52525 v.reset(OpAMD64MOVLconst) 52526 v.AuxInt = 1 52527 return true 52528 } 52529 return false 52530 } 52531 func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { 52532 b := v.Block 52533 _ = b 52534 config := b.Func.Config 52535 _ = config 52536 // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 52537 // cond: !config.nacl 52538 // result: (SETBstore [off] {sym} ptr (BTL x y) mem) 52539 for { 52540 off := v.AuxInt 52541 sym := v.Aux 52542 _ = v.Args[2] 52543 ptr := v.Args[0] 52544 v_1 := v.Args[1] 52545 if v_1.Op != OpAMD64TESTL { 52546 break 52547 } 52548 _ = v_1.Args[1] 52549 v_1_0 := v_1.Args[0] 52550 if v_1_0.Op != OpAMD64SHLL { 52551 break 52552 } 52553 _ = v_1_0.Args[1] 52554 v_1_0_0 := v_1_0.Args[0] 52555 if v_1_0_0.Op != OpAMD64MOVLconst { 52556 break 52557 } 52558 if v_1_0_0.AuxInt != 1 { 52559 break 52560 } 52561 x := v_1_0.Args[1] 52562 y := v_1.Args[1] 52563 mem := v.Args[2] 52564 if !(!config.nacl) { 52565 break 52566 } 52567 v.reset(OpAMD64SETBstore) 52568 v.AuxInt = off 52569 v.Aux = sym 52570 v.AddArg(ptr) 52571 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 52572 v0.AddArg(x) 52573 v0.AddArg(y) 52574 v.AddArg(v0) 52575 v.AddArg(mem) 52576 return true 52577 } 52578 // match: (SETNEstore [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 52579 // cond: !config.nacl 52580 // result: (SETBstore [off] {sym} ptr (BTL x y) mem) 52581 for { 52582 off := v.AuxInt 52583 sym := v.Aux 52584 _ = v.Args[2] 52585 ptr := v.Args[0] 52586 v_1 := v.Args[1] 52587 if v_1.Op != OpAMD64TESTL { 52588 break 52589 } 52590 _ = v_1.Args[1] 52591 y := v_1.Args[0] 52592 v_1_1 := v_1.Args[1] 52593 if v_1_1.Op != OpAMD64SHLL { 52594 break 52595 } 52596 _ = v_1_1.Args[1] 52597 v_1_1_0 := v_1_1.Args[0] 52598 if v_1_1_0.Op != OpAMD64MOVLconst { 52599 break 52600 } 52601 if v_1_1_0.AuxInt != 1 { 52602 break 52603 } 52604 x := v_1_1.Args[1] 52605 mem := v.Args[2] 52606 if !(!config.nacl) { 52607 break 52608 } 52609 v.reset(OpAMD64SETBstore) 52610 v.AuxInt = off 52611 v.Aux = sym 52612 v.AddArg(ptr) 52613 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 52614 v0.AddArg(x) 52615 v0.AddArg(y) 52616 v.AddArg(v0) 52617 v.AddArg(mem) 52618 return true 52619 } 52620 // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 52621 // cond: !config.nacl 52622 // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) 52623 for { 52624 off := v.AuxInt 52625 sym := v.Aux 52626 _ = v.Args[2] 52627 ptr := v.Args[0] 52628 v_1 := v.Args[1] 52629 if v_1.Op != OpAMD64TESTQ { 52630 break 52631 } 52632 _ = v_1.Args[1] 52633 v_1_0 := v_1.Args[0] 52634 if v_1_0.Op != OpAMD64SHLQ { 52635 break 52636 } 52637 _ = v_1_0.Args[1] 52638 v_1_0_0 := v_1_0.Args[0] 52639 if v_1_0_0.Op != OpAMD64MOVQconst { 52640 break 52641 } 52642 if v_1_0_0.AuxInt != 1 { 52643 break 52644 } 52645 x := v_1_0.Args[1] 52646 y := v_1.Args[1] 52647 mem := v.Args[2] 52648 if !(!config.nacl) { 52649 break 52650 } 52651 v.reset(OpAMD64SETBstore) 52652 v.AuxInt = off 52653 v.Aux = sym 52654 v.AddArg(ptr) 52655 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 52656 v0.AddArg(x) 52657 v0.AddArg(y) 52658 v.AddArg(v0) 52659 v.AddArg(mem) 52660 return true 52661 } 52662 // match: (SETNEstore [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 52663 // cond: !config.nacl 52664 // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) 52665 for { 52666 off := v.AuxInt 52667 sym := v.Aux 52668 _ = v.Args[2] 52669 ptr := v.Args[0] 52670 v_1 := v.Args[1] 52671 if v_1.Op != OpAMD64TESTQ { 52672 break 52673 } 52674 _ = v_1.Args[1] 52675 y := v_1.Args[0] 52676 v_1_1 := v_1.Args[1] 52677 if v_1_1.Op != OpAMD64SHLQ { 52678 break 52679 } 52680 _ = v_1_1.Args[1] 52681 v_1_1_0 := v_1_1.Args[0] 52682 if v_1_1_0.Op != OpAMD64MOVQconst { 52683 break 52684 } 52685 if v_1_1_0.AuxInt != 1 { 52686 break 52687 } 52688 x := v_1_1.Args[1] 52689 mem := v.Args[2] 52690 if !(!config.nacl) { 52691 break 52692 } 52693 v.reset(OpAMD64SETBstore) 52694 v.AuxInt = off 52695 v.Aux = sym 52696 v.AddArg(ptr) 52697 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 52698 v0.AddArg(x) 52699 v0.AddArg(y) 52700 v.AddArg(v0) 52701 v.AddArg(mem) 52702 return true 52703 } 52704 // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem) 52705 // cond: isUint32PowerOfTwo(c) && !config.nacl 52706 // result: (SETBstore [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem) 52707 for { 52708 off := v.AuxInt 52709 sym := v.Aux 52710 _ = v.Args[2] 52711 ptr := v.Args[0] 52712 v_1 := v.Args[1] 52713 if v_1.Op != OpAMD64TESTLconst { 52714 break 52715 } 52716 c := v_1.AuxInt 52717 x := v_1.Args[0] 52718 mem := v.Args[2] 52719 if !(isUint32PowerOfTwo(c) && !config.nacl) { 52720 break 52721 } 52722 v.reset(OpAMD64SETBstore) 52723 v.AuxInt = off 52724 v.Aux = sym 52725 v.AddArg(ptr) 52726 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 52727 v0.AuxInt = log2uint32(c) 52728 v0.AddArg(x) 52729 v.AddArg(v0) 52730 v.AddArg(mem) 52731 return true 52732 } 52733 // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem) 52734 // cond: isUint64PowerOfTwo(c) && !config.nacl 52735 // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 52736 for { 52737 off := v.AuxInt 52738 sym := v.Aux 52739 _ = v.Args[2] 52740 ptr := v.Args[0] 52741 v_1 := v.Args[1] 52742 if v_1.Op != OpAMD64TESTQconst { 52743 break 52744 } 52745 c := v_1.AuxInt 52746 x := v_1.Args[0] 52747 mem := v.Args[2] 52748 if !(isUint64PowerOfTwo(c) && !config.nacl) { 52749 break 52750 } 52751 v.reset(OpAMD64SETBstore) 52752 v.AuxInt = off 52753 v.Aux = sym 52754 v.AddArg(ptr) 52755 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52756 v0.AuxInt = log2(c) 52757 v0.AddArg(x) 52758 v.AddArg(v0) 52759 v.AddArg(mem) 52760 return true 52761 } 52762 // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 52763 // cond: isUint64PowerOfTwo(c) && !config.nacl 52764 // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 52765 for { 52766 off := v.AuxInt 52767 sym := v.Aux 52768 _ = v.Args[2] 52769 ptr := v.Args[0] 52770 v_1 := v.Args[1] 52771 if v_1.Op != OpAMD64TESTQ { 52772 break 52773 } 52774 _ = v_1.Args[1] 52775 v_1_0 := v_1.Args[0] 52776 if v_1_0.Op != OpAMD64MOVQconst { 52777 break 52778 } 52779 c := v_1_0.AuxInt 52780 x := v_1.Args[1] 52781 mem := v.Args[2] 52782 if !(isUint64PowerOfTwo(c) && !config.nacl) { 52783 break 52784 } 52785 v.reset(OpAMD64SETBstore) 52786 v.AuxInt = off 52787 v.Aux = sym 52788 v.AddArg(ptr) 52789 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52790 v0.AuxInt = log2(c) 52791 v0.AddArg(x) 52792 v.AddArg(v0) 52793 v.AddArg(mem) 52794 return true 52795 } 52796 // match: (SETNEstore [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 52797 // cond: isUint64PowerOfTwo(c) && !config.nacl 52798 // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 52799 for { 52800 off := v.AuxInt 52801 sym := v.Aux 52802 _ = v.Args[2] 52803 ptr := v.Args[0] 52804 v_1 := v.Args[1] 52805 if v_1.Op != OpAMD64TESTQ { 52806 break 52807 } 52808 _ = v_1.Args[1] 52809 x := v_1.Args[0] 52810 v_1_1 := v_1.Args[1] 52811 if v_1_1.Op != OpAMD64MOVQconst { 52812 break 52813 } 52814 c := v_1_1.AuxInt 52815 mem := v.Args[2] 52816 if !(isUint64PowerOfTwo(c) && !config.nacl) { 52817 break 52818 } 52819 v.reset(OpAMD64SETBstore) 52820 v.AuxInt = off 52821 v.Aux = sym 52822 v.AddArg(ptr) 52823 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52824 v0.AuxInt = log2(c) 52825 v0.AddArg(x) 52826 v.AddArg(v0) 52827 v.AddArg(mem) 52828 return true 52829 } 52830 // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) 52831 // cond: 52832 // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem) 52833 for { 52834 off := v.AuxInt 52835 sym := v.Aux 52836 _ = v.Args[2] 52837 ptr := v.Args[0] 52838 v_1 := v.Args[1] 52839 if v_1.Op != OpAMD64CMPLconst { 52840 break 52841 } 52842 if v_1.AuxInt != 1 { 52843 break 52844 } 52845 s := v_1.Args[0] 52846 if s.Op != OpAMD64ANDLconst { 52847 break 52848 } 52849 if s.AuxInt != 1 { 52850 break 52851 } 52852 mem := v.Args[2] 52853 v.reset(OpAMD64SETEQstore) 52854 v.AuxInt = off 52855 v.Aux = sym 52856 v.AddArg(ptr) 52857 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 52858 v0.AuxInt = 0 52859 v0.AddArg(s) 52860 v.AddArg(v0) 52861 v.AddArg(mem) 52862 return true 52863 } 52864 // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) 52865 // cond: 52866 // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem) 52867 for { 52868 off := v.AuxInt 52869 sym := v.Aux 52870 _ = v.Args[2] 52871 ptr := v.Args[0] 52872 v_1 := v.Args[1] 52873 if v_1.Op != OpAMD64CMPQconst { 52874 break 52875 } 52876 if v_1.AuxInt != 1 { 52877 break 52878 } 52879 s := v_1.Args[0] 52880 if s.Op != OpAMD64ANDQconst { 52881 break 52882 } 52883 if s.AuxInt != 1 { 52884 break 52885 } 52886 mem := v.Args[2] 52887 v.reset(OpAMD64SETEQstore) 52888 v.AuxInt = off 52889 v.Aux = sym 52890 v.AddArg(ptr) 52891 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 52892 v0.AuxInt = 0 52893 v0.AddArg(s) 52894 v.AddArg(v0) 52895 v.AddArg(mem) 52896 return true 52897 } 52898 return false 52899 } 52900 func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { 52901 b := v.Block 52902 _ = b 52903 config := b.Func.Config 52904 _ = config 52905 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) 52906 // cond: z1==z2 && !config.nacl 52907 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 52908 for { 52909 off := v.AuxInt 52910 sym := v.Aux 52911 _ = v.Args[2] 52912 ptr := v.Args[0] 52913 v_1 := v.Args[1] 52914 if v_1.Op != OpAMD64TESTQ { 52915 break 52916 } 52917 _ = v_1.Args[1] 52918 z1 := v_1.Args[0] 52919 if z1.Op != OpAMD64SHLQconst { 52920 break 52921 } 52922 if z1.AuxInt != 63 { 52923 break 52924 } 52925 z1_0 := z1.Args[0] 52926 if z1_0.Op != OpAMD64SHRQconst { 52927 break 52928 } 52929 if z1_0.AuxInt != 63 { 52930 break 52931 } 52932 x := z1_0.Args[0] 52933 z2 := v_1.Args[1] 52934 mem := v.Args[2] 52935 if !(z1 == z2 && !config.nacl) { 52936 break 52937 } 52938 v.reset(OpAMD64SETBstore) 52939 v.AuxInt = off 52940 v.Aux = sym 52941 v.AddArg(ptr) 52942 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52943 v0.AuxInt = 63 52944 v0.AddArg(x) 52945 v.AddArg(v0) 52946 v.AddArg(mem) 52947 return true 52948 } 52949 // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem) 52950 // cond: z1==z2 && !config.nacl 52951 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 52952 for { 52953 off := v.AuxInt 52954 sym := v.Aux 52955 _ = v.Args[2] 52956 ptr := v.Args[0] 52957 v_1 := v.Args[1] 52958 if v_1.Op != OpAMD64TESTQ { 52959 break 52960 } 52961 _ = v_1.Args[1] 52962 z2 := v_1.Args[0] 52963 z1 := v_1.Args[1] 52964 if z1.Op != OpAMD64SHLQconst { 52965 break 52966 } 52967 if z1.AuxInt != 63 { 52968 break 52969 } 52970 z1_0 := z1.Args[0] 52971 if z1_0.Op != OpAMD64SHRQconst { 52972 break 52973 } 52974 if z1_0.AuxInt != 63 { 52975 break 52976 } 52977 x := z1_0.Args[0] 52978 mem := v.Args[2] 52979 if !(z1 == z2 && !config.nacl) { 52980 break 52981 } 52982 v.reset(OpAMD64SETBstore) 52983 v.AuxInt = off 52984 v.Aux = sym 52985 v.AddArg(ptr) 52986 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 52987 v0.AuxInt = 63 52988 v0.AddArg(x) 52989 v.AddArg(v0) 52990 v.AddArg(mem) 52991 return true 52992 } 52993 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) 52994 // cond: z1==z2 && !config.nacl 52995 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 52996 for { 52997 off := v.AuxInt 52998 sym := v.Aux 52999 _ = v.Args[2] 53000 ptr := v.Args[0] 53001 v_1 := v.Args[1] 53002 if v_1.Op != OpAMD64TESTL { 53003 break 53004 } 53005 _ = v_1.Args[1] 53006 z1 := v_1.Args[0] 53007 if z1.Op != OpAMD64SHLLconst { 53008 break 53009 } 53010 if z1.AuxInt != 31 { 53011 break 53012 } 53013 z1_0 := z1.Args[0] 53014 if z1_0.Op != OpAMD64SHRLconst { 53015 break 53016 } 53017 if z1_0.AuxInt != 31 { 53018 break 53019 } 53020 x := z1_0.Args[0] 53021 z2 := v_1.Args[1] 53022 mem := v.Args[2] 53023 if !(z1 == z2 && !config.nacl) { 53024 break 53025 } 53026 v.reset(OpAMD64SETBstore) 53027 v.AuxInt = off 53028 v.Aux = sym 53029 v.AddArg(ptr) 53030 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53031 v0.AuxInt = 31 53032 v0.AddArg(x) 53033 v.AddArg(v0) 53034 v.AddArg(mem) 53035 return true 53036 } 53037 // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem) 53038 // cond: z1==z2 && !config.nacl 53039 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 53040 for { 53041 off := v.AuxInt 53042 sym := v.Aux 53043 _ = v.Args[2] 53044 ptr := v.Args[0] 53045 v_1 := v.Args[1] 53046 if v_1.Op != OpAMD64TESTL { 53047 break 53048 } 53049 _ = v_1.Args[1] 53050 z2 := v_1.Args[0] 53051 z1 := v_1.Args[1] 53052 if z1.Op != OpAMD64SHLLconst { 53053 break 53054 } 53055 if z1.AuxInt != 31 { 53056 break 53057 } 53058 z1_0 := z1.Args[0] 53059 if z1_0.Op != OpAMD64SHRLconst { 53060 break 53061 } 53062 if z1_0.AuxInt != 31 { 53063 break 53064 } 53065 x := z1_0.Args[0] 53066 mem := v.Args[2] 53067 if !(z1 == z2 && !config.nacl) { 53068 break 53069 } 53070 v.reset(OpAMD64SETBstore) 53071 v.AuxInt = off 53072 v.Aux = sym 53073 v.AddArg(ptr) 53074 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53075 v0.AuxInt = 31 53076 v0.AddArg(x) 53077 v.AddArg(v0) 53078 v.AddArg(mem) 53079 return true 53080 } 53081 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) 53082 // cond: z1==z2 && !config.nacl 53083 // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) 53084 for { 53085 off := v.AuxInt 53086 sym := v.Aux 53087 _ = v.Args[2] 53088 ptr := v.Args[0] 53089 v_1 := v.Args[1] 53090 if v_1.Op != OpAMD64TESTQ { 53091 break 53092 } 53093 _ = v_1.Args[1] 53094 z1 := v_1.Args[0] 53095 if z1.Op != OpAMD64SHRQconst { 53096 break 53097 } 53098 if z1.AuxInt != 63 { 53099 break 53100 } 53101 z1_0 := z1.Args[0] 53102 if z1_0.Op != OpAMD64SHLQconst { 53103 break 53104 } 53105 if z1_0.AuxInt != 63 { 53106 break 53107 } 53108 x := z1_0.Args[0] 53109 z2 := v_1.Args[1] 53110 mem := v.Args[2] 53111 if !(z1 == z2 && !config.nacl) { 53112 break 53113 } 53114 v.reset(OpAMD64SETBstore) 53115 v.AuxInt = off 53116 v.Aux = sym 53117 v.AddArg(ptr) 53118 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 53119 v0.AuxInt = 0 53120 v0.AddArg(x) 53121 v.AddArg(v0) 53122 v.AddArg(mem) 53123 return true 53124 } 53125 // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem) 53126 // cond: z1==z2 && !config.nacl 53127 // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) 53128 for { 53129 off := v.AuxInt 53130 sym := v.Aux 53131 _ = v.Args[2] 53132 ptr := v.Args[0] 53133 v_1 := v.Args[1] 53134 if v_1.Op != OpAMD64TESTQ { 53135 break 53136 } 53137 _ = v_1.Args[1] 53138 z2 := v_1.Args[0] 53139 z1 := v_1.Args[1] 53140 if z1.Op != OpAMD64SHRQconst { 53141 break 53142 } 53143 if z1.AuxInt != 63 { 53144 break 53145 } 53146 z1_0 := z1.Args[0] 53147 if z1_0.Op != OpAMD64SHLQconst { 53148 break 53149 } 53150 if z1_0.AuxInt != 63 { 53151 break 53152 } 53153 x := z1_0.Args[0] 53154 mem := v.Args[2] 53155 if !(z1 == z2 && !config.nacl) { 53156 break 53157 } 53158 v.reset(OpAMD64SETBstore) 53159 v.AuxInt = off 53160 v.Aux = sym 53161 v.AddArg(ptr) 53162 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 53163 v0.AuxInt = 0 53164 v0.AddArg(x) 53165 v.AddArg(v0) 53166 v.AddArg(mem) 53167 return true 53168 } 53169 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) 53170 // cond: z1==z2 && !config.nacl 53171 // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) 53172 for { 53173 off := v.AuxInt 53174 sym := v.Aux 53175 _ = v.Args[2] 53176 ptr := v.Args[0] 53177 v_1 := v.Args[1] 53178 if v_1.Op != OpAMD64TESTL { 53179 break 53180 } 53181 _ = v_1.Args[1] 53182 z1 := v_1.Args[0] 53183 if z1.Op != OpAMD64SHRLconst { 53184 break 53185 } 53186 if z1.AuxInt != 31 { 53187 break 53188 } 53189 z1_0 := z1.Args[0] 53190 if z1_0.Op != OpAMD64SHLLconst { 53191 break 53192 } 53193 if z1_0.AuxInt != 31 { 53194 break 53195 } 53196 x := z1_0.Args[0] 53197 z2 := v_1.Args[1] 53198 mem := v.Args[2] 53199 if !(z1 == z2 && !config.nacl) { 53200 break 53201 } 53202 v.reset(OpAMD64SETBstore) 53203 v.AuxInt = off 53204 v.Aux = sym 53205 v.AddArg(ptr) 53206 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53207 v0.AuxInt = 0 53208 v0.AddArg(x) 53209 v.AddArg(v0) 53210 v.AddArg(mem) 53211 return true 53212 } 53213 // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem) 53214 // cond: z1==z2 && !config.nacl 53215 // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) 53216 for { 53217 off := v.AuxInt 53218 sym := v.Aux 53219 _ = v.Args[2] 53220 ptr := v.Args[0] 53221 v_1 := v.Args[1] 53222 if v_1.Op != OpAMD64TESTL { 53223 break 53224 } 53225 _ = v_1.Args[1] 53226 z2 := v_1.Args[0] 53227 z1 := v_1.Args[1] 53228 if z1.Op != OpAMD64SHRLconst { 53229 break 53230 } 53231 if z1.AuxInt != 31 { 53232 break 53233 } 53234 z1_0 := z1.Args[0] 53235 if z1_0.Op != OpAMD64SHLLconst { 53236 break 53237 } 53238 if z1_0.AuxInt != 31 { 53239 break 53240 } 53241 x := z1_0.Args[0] 53242 mem := v.Args[2] 53243 if !(z1 == z2 && !config.nacl) { 53244 break 53245 } 53246 v.reset(OpAMD64SETBstore) 53247 v.AuxInt = off 53248 v.Aux = sym 53249 v.AddArg(ptr) 53250 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53251 v0.AuxInt = 0 53252 v0.AddArg(x) 53253 v.AddArg(v0) 53254 v.AddArg(mem) 53255 return true 53256 } 53257 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) 53258 // cond: z1==z2 && !config.nacl 53259 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 53260 for { 53261 off := v.AuxInt 53262 sym := v.Aux 53263 _ = v.Args[2] 53264 ptr := v.Args[0] 53265 v_1 := v.Args[1] 53266 if v_1.Op != OpAMD64TESTQ { 53267 break 53268 } 53269 _ = v_1.Args[1] 53270 z1 := v_1.Args[0] 53271 if z1.Op != OpAMD64SHRQconst { 53272 break 53273 } 53274 if z1.AuxInt != 63 { 53275 break 53276 } 53277 x := z1.Args[0] 53278 z2 := v_1.Args[1] 53279 mem := v.Args[2] 53280 if !(z1 == z2 && !config.nacl) { 53281 break 53282 } 53283 v.reset(OpAMD64SETBstore) 53284 v.AuxInt = off 53285 v.Aux = sym 53286 v.AddArg(ptr) 53287 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 53288 v0.AuxInt = 63 53289 v0.AddArg(x) 53290 v.AddArg(v0) 53291 v.AddArg(mem) 53292 return true 53293 } 53294 // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem) 53295 // cond: z1==z2 && !config.nacl 53296 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 53297 for { 53298 off := v.AuxInt 53299 sym := v.Aux 53300 _ = v.Args[2] 53301 ptr := v.Args[0] 53302 v_1 := v.Args[1] 53303 if v_1.Op != OpAMD64TESTQ { 53304 break 53305 } 53306 _ = v_1.Args[1] 53307 z2 := v_1.Args[0] 53308 z1 := v_1.Args[1] 53309 if z1.Op != OpAMD64SHRQconst { 53310 break 53311 } 53312 if z1.AuxInt != 63 { 53313 break 53314 } 53315 x := z1.Args[0] 53316 mem := v.Args[2] 53317 if !(z1 == z2 && !config.nacl) { 53318 break 53319 } 53320 v.reset(OpAMD64SETBstore) 53321 v.AuxInt = off 53322 v.Aux = sym 53323 v.AddArg(ptr) 53324 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 53325 v0.AuxInt = 63 53326 v0.AddArg(x) 53327 v.AddArg(v0) 53328 v.AddArg(mem) 53329 return true 53330 } 53331 return false 53332 } 53333 func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool { 53334 b := v.Block 53335 _ = b 53336 config := b.Func.Config 53337 _ = config 53338 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) 53339 // cond: z1==z2 && !config.nacl 53340 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 53341 for { 53342 off := v.AuxInt 53343 sym := v.Aux 53344 _ = v.Args[2] 53345 ptr := v.Args[0] 53346 v_1 := v.Args[1] 53347 if v_1.Op != OpAMD64TESTL { 53348 break 53349 } 53350 _ = v_1.Args[1] 53351 z1 := v_1.Args[0] 53352 if z1.Op != OpAMD64SHRLconst { 53353 break 53354 } 53355 if z1.AuxInt != 31 { 53356 break 53357 } 53358 x := z1.Args[0] 53359 z2 := v_1.Args[1] 53360 mem := v.Args[2] 53361 if !(z1 == z2 && !config.nacl) { 53362 break 53363 } 53364 v.reset(OpAMD64SETBstore) 53365 v.AuxInt = off 53366 v.Aux = sym 53367 v.AddArg(ptr) 53368 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53369 v0.AuxInt = 31 53370 v0.AddArg(x) 53371 v.AddArg(v0) 53372 v.AddArg(mem) 53373 return true 53374 } 53375 // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem) 53376 // cond: z1==z2 && !config.nacl 53377 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 53378 for { 53379 off := v.AuxInt 53380 sym := v.Aux 53381 _ = v.Args[2] 53382 ptr := v.Args[0] 53383 v_1 := v.Args[1] 53384 if v_1.Op != OpAMD64TESTL { 53385 break 53386 } 53387 _ = v_1.Args[1] 53388 z2 := v_1.Args[0] 53389 z1 := v_1.Args[1] 53390 if z1.Op != OpAMD64SHRLconst { 53391 break 53392 } 53393 if z1.AuxInt != 31 { 53394 break 53395 } 53396 x := z1.Args[0] 53397 mem := v.Args[2] 53398 if !(z1 == z2 && !config.nacl) { 53399 break 53400 } 53401 v.reset(OpAMD64SETBstore) 53402 v.AuxInt = off 53403 v.Aux = sym 53404 v.AddArg(ptr) 53405 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 53406 v0.AuxInt = 31 53407 v0.AddArg(x) 53408 v.AddArg(v0) 53409 v.AddArg(mem) 53410 return true 53411 } 53412 // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem) 53413 // cond: 53414 // result: (SETNEstore [off] {sym} ptr x mem) 53415 for { 53416 off := v.AuxInt 53417 sym := v.Aux 53418 _ = v.Args[2] 53419 ptr := v.Args[0] 53420 v_1 := v.Args[1] 53421 if v_1.Op != OpAMD64InvertFlags { 53422 break 53423 } 53424 x := v_1.Args[0] 53425 mem := v.Args[2] 53426 v.reset(OpAMD64SETNEstore) 53427 v.AuxInt = off 53428 v.Aux = sym 53429 v.AddArg(ptr) 53430 v.AddArg(x) 53431 v.AddArg(mem) 53432 return true 53433 } 53434 // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem) 53435 // cond: is32Bit(off1+off2) 53436 // result: (SETNEstore [off1+off2] {sym} base val mem) 53437 for { 53438 off1 := v.AuxInt 53439 sym := v.Aux 53440 _ = v.Args[2] 53441 v_0 := v.Args[0] 53442 if v_0.Op != OpAMD64ADDQconst { 53443 break 53444 } 53445 off2 := v_0.AuxInt 53446 base := v_0.Args[0] 53447 val := v.Args[1] 53448 mem := v.Args[2] 53449 if !(is32Bit(off1 + off2)) { 53450 break 53451 } 53452 v.reset(OpAMD64SETNEstore) 53453 v.AuxInt = off1 + off2 53454 v.Aux = sym 53455 v.AddArg(base) 53456 v.AddArg(val) 53457 v.AddArg(mem) 53458 return true 53459 } 53460 // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 53461 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 53462 // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 53463 for { 53464 off1 := v.AuxInt 53465 sym1 := v.Aux 53466 _ = v.Args[2] 53467 v_0 := v.Args[0] 53468 if v_0.Op != OpAMD64LEAQ { 53469 break 53470 } 53471 off2 := v_0.AuxInt 53472 sym2 := v_0.Aux 53473 base := v_0.Args[0] 53474 val := v.Args[1] 53475 mem := v.Args[2] 53476 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 53477 break 53478 } 53479 v.reset(OpAMD64SETNEstore) 53480 v.AuxInt = off1 + off2 53481 v.Aux = mergeSym(sym1, sym2) 53482 v.AddArg(base) 53483 v.AddArg(val) 53484 v.AddArg(mem) 53485 return true 53486 } 53487 // match: (SETNEstore [off] {sym} ptr x:(FlagEQ) mem) 53488 // cond: 53489 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 53490 for { 53491 off := v.AuxInt 53492 sym := v.Aux 53493 _ = v.Args[2] 53494 ptr := v.Args[0] 53495 x := v.Args[1] 53496 if x.Op != OpAMD64FlagEQ { 53497 break 53498 } 53499 mem := v.Args[2] 53500 v.reset(OpAMD64MOVBstore) 53501 v.AuxInt = off 53502 v.Aux = sym 53503 v.AddArg(ptr) 53504 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 53505 v0.AuxInt = 0 53506 v.AddArg(v0) 53507 v.AddArg(mem) 53508 return true 53509 } 53510 // match: (SETNEstore [off] {sym} ptr x:(FlagLT_ULT) mem) 53511 // cond: 53512 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 53513 for { 53514 off := v.AuxInt 53515 sym := v.Aux 53516 _ = v.Args[2] 53517 ptr := v.Args[0] 53518 x := v.Args[1] 53519 if x.Op != OpAMD64FlagLT_ULT { 53520 break 53521 } 53522 mem := v.Args[2] 53523 v.reset(OpAMD64MOVBstore) 53524 v.AuxInt = off 53525 v.Aux = sym 53526 v.AddArg(ptr) 53527 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 53528 v0.AuxInt = 1 53529 v.AddArg(v0) 53530 v.AddArg(mem) 53531 return true 53532 } 53533 // match: (SETNEstore [off] {sym} ptr x:(FlagLT_UGT) mem) 53534 // cond: 53535 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 53536 for { 53537 off := v.AuxInt 53538 sym := v.Aux 53539 _ = v.Args[2] 53540 ptr := v.Args[0] 53541 x := v.Args[1] 53542 if x.Op != OpAMD64FlagLT_UGT { 53543 break 53544 } 53545 mem := v.Args[2] 53546 v.reset(OpAMD64MOVBstore) 53547 v.AuxInt = off 53548 v.Aux = sym 53549 v.AddArg(ptr) 53550 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 53551 v0.AuxInt = 1 53552 v.AddArg(v0) 53553 v.AddArg(mem) 53554 return true 53555 } 53556 // match: (SETNEstore [off] {sym} ptr x:(FlagGT_ULT) mem) 53557 // cond: 53558 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 53559 for { 53560 off := v.AuxInt 53561 sym := v.Aux 53562 _ = v.Args[2] 53563 ptr := v.Args[0] 53564 x := v.Args[1] 53565 if x.Op != OpAMD64FlagGT_ULT { 53566 break 53567 } 53568 mem := v.Args[2] 53569 v.reset(OpAMD64MOVBstore) 53570 v.AuxInt = off 53571 v.Aux = sym 53572 v.AddArg(ptr) 53573 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 53574 v0.AuxInt = 1 53575 v.AddArg(v0) 53576 v.AddArg(mem) 53577 return true 53578 } 53579 // match: (SETNEstore [off] {sym} ptr x:(FlagGT_UGT) mem) 53580 // cond: 53581 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 53582 for { 53583 off := v.AuxInt 53584 sym := v.Aux 53585 _ = v.Args[2] 53586 ptr := v.Args[0] 53587 x := v.Args[1] 53588 if x.Op != OpAMD64FlagGT_UGT { 53589 break 53590 } 53591 mem := v.Args[2] 53592 v.reset(OpAMD64MOVBstore) 53593 v.AuxInt = off 53594 v.Aux = sym 53595 v.AddArg(ptr) 53596 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 53597 v0.AuxInt = 1 53598 v.AddArg(v0) 53599 v.AddArg(mem) 53600 return true 53601 } 53602 return false 53603 } 53604 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 53605 b := v.Block 53606 _ = b 53607 // match: (SHLL x (MOVQconst [c])) 53608 // cond: 53609 // result: (SHLLconst [c&31] x) 53610 for { 53611 _ = v.Args[1] 53612 x := v.Args[0] 53613 v_1 := v.Args[1] 53614 if v_1.Op != OpAMD64MOVQconst { 53615 break 53616 } 53617 c := v_1.AuxInt 53618 v.reset(OpAMD64SHLLconst) 53619 v.AuxInt = c & 31 53620 v.AddArg(x) 53621 return true 53622 } 53623 // match: (SHLL x (MOVLconst [c])) 53624 // cond: 53625 // result: (SHLLconst [c&31] x) 53626 for { 53627 _ = v.Args[1] 53628 x := v.Args[0] 53629 v_1 := v.Args[1] 53630 if v_1.Op != OpAMD64MOVLconst { 53631 break 53632 } 53633 c := v_1.AuxInt 53634 v.reset(OpAMD64SHLLconst) 53635 v.AuxInt = c & 31 53636 v.AddArg(x) 53637 return true 53638 } 53639 // match: (SHLL x (ADDQconst [c] y)) 53640 // cond: c & 31 == 0 53641 // result: (SHLL x y) 53642 for { 53643 _ = v.Args[1] 53644 x := v.Args[0] 53645 v_1 := v.Args[1] 53646 if v_1.Op != OpAMD64ADDQconst { 53647 break 53648 } 53649 c := v_1.AuxInt 53650 y := v_1.Args[0] 53651 if !(c&31 == 0) { 53652 break 53653 } 53654 v.reset(OpAMD64SHLL) 53655 v.AddArg(x) 53656 v.AddArg(y) 53657 return true 53658 } 53659 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 53660 // cond: c & 31 == 0 53661 // result: (SHLL x (NEGQ <t> y)) 53662 for { 53663 _ = v.Args[1] 53664 x := v.Args[0] 53665 v_1 := v.Args[1] 53666 if v_1.Op != OpAMD64NEGQ { 53667 break 53668 } 53669 t := v_1.Type 53670 v_1_0 := v_1.Args[0] 53671 if v_1_0.Op != OpAMD64ADDQconst { 53672 break 53673 } 53674 c := v_1_0.AuxInt 53675 y := v_1_0.Args[0] 53676 if !(c&31 == 0) { 53677 break 53678 } 53679 v.reset(OpAMD64SHLL) 53680 v.AddArg(x) 53681 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 53682 v0.AddArg(y) 53683 v.AddArg(v0) 53684 return true 53685 } 53686 // match: (SHLL x (ANDQconst [c] y)) 53687 // cond: c & 31 == 31 53688 // result: (SHLL x y) 53689 for { 53690 _ = v.Args[1] 53691 x := v.Args[0] 53692 v_1 := v.Args[1] 53693 if v_1.Op != OpAMD64ANDQconst { 53694 break 53695 } 53696 c := v_1.AuxInt 53697 y := v_1.Args[0] 53698 if !(c&31 == 31) { 53699 break 53700 } 53701 v.reset(OpAMD64SHLL) 53702 v.AddArg(x) 53703 v.AddArg(y) 53704 return true 53705 } 53706 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 53707 // cond: c & 31 == 31 53708 // result: (SHLL x (NEGQ <t> y)) 53709 for { 53710 _ = v.Args[1] 53711 x := v.Args[0] 53712 v_1 := v.Args[1] 53713 if v_1.Op != OpAMD64NEGQ { 53714 break 53715 } 53716 t := v_1.Type 53717 v_1_0 := v_1.Args[0] 53718 if v_1_0.Op != OpAMD64ANDQconst { 53719 break 53720 } 53721 c := v_1_0.AuxInt 53722 y := v_1_0.Args[0] 53723 if !(c&31 == 31) { 53724 break 53725 } 53726 v.reset(OpAMD64SHLL) 53727 v.AddArg(x) 53728 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 53729 v0.AddArg(y) 53730 v.AddArg(v0) 53731 return true 53732 } 53733 // match: (SHLL x (ADDLconst [c] y)) 53734 // cond: c & 31 == 0 53735 // result: (SHLL x y) 53736 for { 53737 _ = v.Args[1] 53738 x := v.Args[0] 53739 v_1 := v.Args[1] 53740 if v_1.Op != OpAMD64ADDLconst { 53741 break 53742 } 53743 c := v_1.AuxInt 53744 y := v_1.Args[0] 53745 if !(c&31 == 0) { 53746 break 53747 } 53748 v.reset(OpAMD64SHLL) 53749 v.AddArg(x) 53750 v.AddArg(y) 53751 return true 53752 } 53753 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 53754 // cond: c & 31 == 0 53755 // result: (SHLL x (NEGL <t> y)) 53756 for { 53757 _ = v.Args[1] 53758 x := v.Args[0] 53759 v_1 := v.Args[1] 53760 if v_1.Op != OpAMD64NEGL { 53761 break 53762 } 53763 t := v_1.Type 53764 v_1_0 := v_1.Args[0] 53765 if v_1_0.Op != OpAMD64ADDLconst { 53766 break 53767 } 53768 c := v_1_0.AuxInt 53769 y := v_1_0.Args[0] 53770 if !(c&31 == 0) { 53771 break 53772 } 53773 v.reset(OpAMD64SHLL) 53774 v.AddArg(x) 53775 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 53776 v0.AddArg(y) 53777 v.AddArg(v0) 53778 return true 53779 } 53780 // match: (SHLL x (ANDLconst [c] y)) 53781 // cond: c & 31 == 31 53782 // result: (SHLL x y) 53783 for { 53784 _ = v.Args[1] 53785 x := v.Args[0] 53786 v_1 := v.Args[1] 53787 if v_1.Op != OpAMD64ANDLconst { 53788 break 53789 } 53790 c := v_1.AuxInt 53791 y := v_1.Args[0] 53792 if !(c&31 == 31) { 53793 break 53794 } 53795 v.reset(OpAMD64SHLL) 53796 v.AddArg(x) 53797 v.AddArg(y) 53798 return true 53799 } 53800 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 53801 // cond: c & 31 == 31 53802 // result: (SHLL x (NEGL <t> y)) 53803 for { 53804 _ = v.Args[1] 53805 x := v.Args[0] 53806 v_1 := v.Args[1] 53807 if v_1.Op != OpAMD64NEGL { 53808 break 53809 } 53810 t := v_1.Type 53811 v_1_0 := v_1.Args[0] 53812 if v_1_0.Op != OpAMD64ANDLconst { 53813 break 53814 } 53815 c := v_1_0.AuxInt 53816 y := v_1_0.Args[0] 53817 if !(c&31 == 31) { 53818 break 53819 } 53820 v.reset(OpAMD64SHLL) 53821 v.AddArg(x) 53822 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 53823 v0.AddArg(y) 53824 v.AddArg(v0) 53825 return true 53826 } 53827 return false 53828 } 53829 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 53830 b := v.Block 53831 _ = b 53832 config := b.Func.Config 53833 _ = config 53834 // match: (SHLLconst [1] (SHRLconst [1] x)) 53835 // cond: !config.nacl 53836 // result: (BTRLconst [0] x) 53837 for { 53838 if v.AuxInt != 1 { 53839 break 53840 } 53841 v_0 := v.Args[0] 53842 if v_0.Op != OpAMD64SHRLconst { 53843 break 53844 } 53845 if v_0.AuxInt != 1 { 53846 break 53847 } 53848 x := v_0.Args[0] 53849 if !(!config.nacl) { 53850 break 53851 } 53852 v.reset(OpAMD64BTRLconst) 53853 v.AuxInt = 0 53854 v.AddArg(x) 53855 return true 53856 } 53857 // match: (SHLLconst x [0]) 53858 // cond: 53859 // result: x 53860 for { 53861 if v.AuxInt != 0 { 53862 break 53863 } 53864 x := v.Args[0] 53865 v.reset(OpCopy) 53866 v.Type = x.Type 53867 v.AddArg(x) 53868 return true 53869 } 53870 return false 53871 } 53872 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 53873 b := v.Block 53874 _ = b 53875 // match: (SHLQ x (MOVQconst [c])) 53876 // cond: 53877 // result: (SHLQconst [c&63] x) 53878 for { 53879 _ = v.Args[1] 53880 x := v.Args[0] 53881 v_1 := v.Args[1] 53882 if v_1.Op != OpAMD64MOVQconst { 53883 break 53884 } 53885 c := v_1.AuxInt 53886 v.reset(OpAMD64SHLQconst) 53887 v.AuxInt = c & 63 53888 v.AddArg(x) 53889 return true 53890 } 53891 // match: (SHLQ x (MOVLconst [c])) 53892 // cond: 53893 // result: (SHLQconst [c&63] x) 53894 for { 53895 _ = v.Args[1] 53896 x := v.Args[0] 53897 v_1 := v.Args[1] 53898 if v_1.Op != OpAMD64MOVLconst { 53899 break 53900 } 53901 c := v_1.AuxInt 53902 v.reset(OpAMD64SHLQconst) 53903 v.AuxInt = c & 63 53904 v.AddArg(x) 53905 return true 53906 } 53907 // match: (SHLQ x (ADDQconst [c] y)) 53908 // cond: c & 63 == 0 53909 // result: (SHLQ x y) 53910 for { 53911 _ = v.Args[1] 53912 x := v.Args[0] 53913 v_1 := v.Args[1] 53914 if v_1.Op != OpAMD64ADDQconst { 53915 break 53916 } 53917 c := v_1.AuxInt 53918 y := v_1.Args[0] 53919 if !(c&63 == 0) { 53920 break 53921 } 53922 v.reset(OpAMD64SHLQ) 53923 v.AddArg(x) 53924 v.AddArg(y) 53925 return true 53926 } 53927 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 53928 // cond: c & 63 == 0 53929 // result: (SHLQ x (NEGQ <t> y)) 53930 for { 53931 _ = v.Args[1] 53932 x := v.Args[0] 53933 v_1 := v.Args[1] 53934 if v_1.Op != OpAMD64NEGQ { 53935 break 53936 } 53937 t := v_1.Type 53938 v_1_0 := v_1.Args[0] 53939 if v_1_0.Op != OpAMD64ADDQconst { 53940 break 53941 } 53942 c := v_1_0.AuxInt 53943 y := v_1_0.Args[0] 53944 if !(c&63 == 0) { 53945 break 53946 } 53947 v.reset(OpAMD64SHLQ) 53948 v.AddArg(x) 53949 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 53950 v0.AddArg(y) 53951 v.AddArg(v0) 53952 return true 53953 } 53954 // match: (SHLQ x (ANDQconst [c] y)) 53955 // cond: c & 63 == 63 53956 // result: (SHLQ x y) 53957 for { 53958 _ = v.Args[1] 53959 x := v.Args[0] 53960 v_1 := v.Args[1] 53961 if v_1.Op != OpAMD64ANDQconst { 53962 break 53963 } 53964 c := v_1.AuxInt 53965 y := v_1.Args[0] 53966 if !(c&63 == 63) { 53967 break 53968 } 53969 v.reset(OpAMD64SHLQ) 53970 v.AddArg(x) 53971 v.AddArg(y) 53972 return true 53973 } 53974 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 53975 // cond: c & 63 == 63 53976 // result: (SHLQ x (NEGQ <t> y)) 53977 for { 53978 _ = v.Args[1] 53979 x := v.Args[0] 53980 v_1 := v.Args[1] 53981 if v_1.Op != OpAMD64NEGQ { 53982 break 53983 } 53984 t := v_1.Type 53985 v_1_0 := v_1.Args[0] 53986 if v_1_0.Op != OpAMD64ANDQconst { 53987 break 53988 } 53989 c := v_1_0.AuxInt 53990 y := v_1_0.Args[0] 53991 if !(c&63 == 63) { 53992 break 53993 } 53994 v.reset(OpAMD64SHLQ) 53995 v.AddArg(x) 53996 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 53997 v0.AddArg(y) 53998 v.AddArg(v0) 53999 return true 54000 } 54001 // match: (SHLQ x (ADDLconst [c] y)) 54002 // cond: c & 63 == 0 54003 // result: (SHLQ x y) 54004 for { 54005 _ = v.Args[1] 54006 x := v.Args[0] 54007 v_1 := v.Args[1] 54008 if v_1.Op != OpAMD64ADDLconst { 54009 break 54010 } 54011 c := v_1.AuxInt 54012 y := v_1.Args[0] 54013 if !(c&63 == 0) { 54014 break 54015 } 54016 v.reset(OpAMD64SHLQ) 54017 v.AddArg(x) 54018 v.AddArg(y) 54019 return true 54020 } 54021 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 54022 // cond: c & 63 == 0 54023 // result: (SHLQ x (NEGL <t> y)) 54024 for { 54025 _ = v.Args[1] 54026 x := v.Args[0] 54027 v_1 := v.Args[1] 54028 if v_1.Op != OpAMD64NEGL { 54029 break 54030 } 54031 t := v_1.Type 54032 v_1_0 := v_1.Args[0] 54033 if v_1_0.Op != OpAMD64ADDLconst { 54034 break 54035 } 54036 c := v_1_0.AuxInt 54037 y := v_1_0.Args[0] 54038 if !(c&63 == 0) { 54039 break 54040 } 54041 v.reset(OpAMD64SHLQ) 54042 v.AddArg(x) 54043 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54044 v0.AddArg(y) 54045 v.AddArg(v0) 54046 return true 54047 } 54048 // match: (SHLQ x (ANDLconst [c] y)) 54049 // cond: c & 63 == 63 54050 // result: (SHLQ x y) 54051 for { 54052 _ = v.Args[1] 54053 x := v.Args[0] 54054 v_1 := v.Args[1] 54055 if v_1.Op != OpAMD64ANDLconst { 54056 break 54057 } 54058 c := v_1.AuxInt 54059 y := v_1.Args[0] 54060 if !(c&63 == 63) { 54061 break 54062 } 54063 v.reset(OpAMD64SHLQ) 54064 v.AddArg(x) 54065 v.AddArg(y) 54066 return true 54067 } 54068 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 54069 // cond: c & 63 == 63 54070 // result: (SHLQ x (NEGL <t> y)) 54071 for { 54072 _ = v.Args[1] 54073 x := v.Args[0] 54074 v_1 := v.Args[1] 54075 if v_1.Op != OpAMD64NEGL { 54076 break 54077 } 54078 t := v_1.Type 54079 v_1_0 := v_1.Args[0] 54080 if v_1_0.Op != OpAMD64ANDLconst { 54081 break 54082 } 54083 c := v_1_0.AuxInt 54084 y := v_1_0.Args[0] 54085 if !(c&63 == 63) { 54086 break 54087 } 54088 v.reset(OpAMD64SHLQ) 54089 v.AddArg(x) 54090 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54091 v0.AddArg(y) 54092 v.AddArg(v0) 54093 return true 54094 } 54095 return false 54096 } 54097 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 54098 b := v.Block 54099 _ = b 54100 config := b.Func.Config 54101 _ = config 54102 // match: (SHLQconst [1] (SHRQconst [1] x)) 54103 // cond: !config.nacl 54104 // result: (BTRQconst [0] x) 54105 for { 54106 if v.AuxInt != 1 { 54107 break 54108 } 54109 v_0 := v.Args[0] 54110 if v_0.Op != OpAMD64SHRQconst { 54111 break 54112 } 54113 if v_0.AuxInt != 1 { 54114 break 54115 } 54116 x := v_0.Args[0] 54117 if !(!config.nacl) { 54118 break 54119 } 54120 v.reset(OpAMD64BTRQconst) 54121 v.AuxInt = 0 54122 v.AddArg(x) 54123 return true 54124 } 54125 // match: (SHLQconst x [0]) 54126 // cond: 54127 // result: x 54128 for { 54129 if v.AuxInt != 0 { 54130 break 54131 } 54132 x := v.Args[0] 54133 v.reset(OpCopy) 54134 v.Type = x.Type 54135 v.AddArg(x) 54136 return true 54137 } 54138 return false 54139 } 54140 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 54141 // match: (SHRB x (MOVQconst [c])) 54142 // cond: c&31 < 8 54143 // result: (SHRBconst [c&31] x) 54144 for { 54145 _ = v.Args[1] 54146 x := v.Args[0] 54147 v_1 := v.Args[1] 54148 if v_1.Op != OpAMD64MOVQconst { 54149 break 54150 } 54151 c := v_1.AuxInt 54152 if !(c&31 < 8) { 54153 break 54154 } 54155 v.reset(OpAMD64SHRBconst) 54156 v.AuxInt = c & 31 54157 v.AddArg(x) 54158 return true 54159 } 54160 // match: (SHRB x (MOVLconst [c])) 54161 // cond: c&31 < 8 54162 // result: (SHRBconst [c&31] x) 54163 for { 54164 _ = v.Args[1] 54165 x := v.Args[0] 54166 v_1 := v.Args[1] 54167 if v_1.Op != OpAMD64MOVLconst { 54168 break 54169 } 54170 c := v_1.AuxInt 54171 if !(c&31 < 8) { 54172 break 54173 } 54174 v.reset(OpAMD64SHRBconst) 54175 v.AuxInt = c & 31 54176 v.AddArg(x) 54177 return true 54178 } 54179 // match: (SHRB _ (MOVQconst [c])) 54180 // cond: c&31 >= 8 54181 // result: (MOVLconst [0]) 54182 for { 54183 _ = v.Args[1] 54184 v_1 := v.Args[1] 54185 if v_1.Op != OpAMD64MOVQconst { 54186 break 54187 } 54188 c := v_1.AuxInt 54189 if !(c&31 >= 8) { 54190 break 54191 } 54192 v.reset(OpAMD64MOVLconst) 54193 v.AuxInt = 0 54194 return true 54195 } 54196 // match: (SHRB _ (MOVLconst [c])) 54197 // cond: c&31 >= 8 54198 // result: (MOVLconst [0]) 54199 for { 54200 _ = v.Args[1] 54201 v_1 := v.Args[1] 54202 if v_1.Op != OpAMD64MOVLconst { 54203 break 54204 } 54205 c := v_1.AuxInt 54206 if !(c&31 >= 8) { 54207 break 54208 } 54209 v.reset(OpAMD64MOVLconst) 54210 v.AuxInt = 0 54211 return true 54212 } 54213 return false 54214 } 54215 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 54216 // match: (SHRBconst x [0]) 54217 // cond: 54218 // result: x 54219 for { 54220 if v.AuxInt != 0 { 54221 break 54222 } 54223 x := v.Args[0] 54224 v.reset(OpCopy) 54225 v.Type = x.Type 54226 v.AddArg(x) 54227 return true 54228 } 54229 return false 54230 } 54231 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 54232 b := v.Block 54233 _ = b 54234 // match: (SHRL x (MOVQconst [c])) 54235 // cond: 54236 // result: (SHRLconst [c&31] x) 54237 for { 54238 _ = v.Args[1] 54239 x := v.Args[0] 54240 v_1 := v.Args[1] 54241 if v_1.Op != OpAMD64MOVQconst { 54242 break 54243 } 54244 c := v_1.AuxInt 54245 v.reset(OpAMD64SHRLconst) 54246 v.AuxInt = c & 31 54247 v.AddArg(x) 54248 return true 54249 } 54250 // match: (SHRL x (MOVLconst [c])) 54251 // cond: 54252 // result: (SHRLconst [c&31] x) 54253 for { 54254 _ = v.Args[1] 54255 x := v.Args[0] 54256 v_1 := v.Args[1] 54257 if v_1.Op != OpAMD64MOVLconst { 54258 break 54259 } 54260 c := v_1.AuxInt 54261 v.reset(OpAMD64SHRLconst) 54262 v.AuxInt = c & 31 54263 v.AddArg(x) 54264 return true 54265 } 54266 // match: (SHRL x (ADDQconst [c] y)) 54267 // cond: c & 31 == 0 54268 // result: (SHRL x y) 54269 for { 54270 _ = v.Args[1] 54271 x := v.Args[0] 54272 v_1 := v.Args[1] 54273 if v_1.Op != OpAMD64ADDQconst { 54274 break 54275 } 54276 c := v_1.AuxInt 54277 y := v_1.Args[0] 54278 if !(c&31 == 0) { 54279 break 54280 } 54281 v.reset(OpAMD64SHRL) 54282 v.AddArg(x) 54283 v.AddArg(y) 54284 return true 54285 } 54286 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 54287 // cond: c & 31 == 0 54288 // result: (SHRL x (NEGQ <t> y)) 54289 for { 54290 _ = v.Args[1] 54291 x := v.Args[0] 54292 v_1 := v.Args[1] 54293 if v_1.Op != OpAMD64NEGQ { 54294 break 54295 } 54296 t := v_1.Type 54297 v_1_0 := v_1.Args[0] 54298 if v_1_0.Op != OpAMD64ADDQconst { 54299 break 54300 } 54301 c := v_1_0.AuxInt 54302 y := v_1_0.Args[0] 54303 if !(c&31 == 0) { 54304 break 54305 } 54306 v.reset(OpAMD64SHRL) 54307 v.AddArg(x) 54308 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 54309 v0.AddArg(y) 54310 v.AddArg(v0) 54311 return true 54312 } 54313 // match: (SHRL x (ANDQconst [c] y)) 54314 // cond: c & 31 == 31 54315 // result: (SHRL x y) 54316 for { 54317 _ = v.Args[1] 54318 x := v.Args[0] 54319 v_1 := v.Args[1] 54320 if v_1.Op != OpAMD64ANDQconst { 54321 break 54322 } 54323 c := v_1.AuxInt 54324 y := v_1.Args[0] 54325 if !(c&31 == 31) { 54326 break 54327 } 54328 v.reset(OpAMD64SHRL) 54329 v.AddArg(x) 54330 v.AddArg(y) 54331 return true 54332 } 54333 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 54334 // cond: c & 31 == 31 54335 // result: (SHRL x (NEGQ <t> y)) 54336 for { 54337 _ = v.Args[1] 54338 x := v.Args[0] 54339 v_1 := v.Args[1] 54340 if v_1.Op != OpAMD64NEGQ { 54341 break 54342 } 54343 t := v_1.Type 54344 v_1_0 := v_1.Args[0] 54345 if v_1_0.Op != OpAMD64ANDQconst { 54346 break 54347 } 54348 c := v_1_0.AuxInt 54349 y := v_1_0.Args[0] 54350 if !(c&31 == 31) { 54351 break 54352 } 54353 v.reset(OpAMD64SHRL) 54354 v.AddArg(x) 54355 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 54356 v0.AddArg(y) 54357 v.AddArg(v0) 54358 return true 54359 } 54360 // match: (SHRL x (ADDLconst [c] y)) 54361 // cond: c & 31 == 0 54362 // result: (SHRL x y) 54363 for { 54364 _ = v.Args[1] 54365 x := v.Args[0] 54366 v_1 := v.Args[1] 54367 if v_1.Op != OpAMD64ADDLconst { 54368 break 54369 } 54370 c := v_1.AuxInt 54371 y := v_1.Args[0] 54372 if !(c&31 == 0) { 54373 break 54374 } 54375 v.reset(OpAMD64SHRL) 54376 v.AddArg(x) 54377 v.AddArg(y) 54378 return true 54379 } 54380 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 54381 // cond: c & 31 == 0 54382 // result: (SHRL x (NEGL <t> y)) 54383 for { 54384 _ = v.Args[1] 54385 x := v.Args[0] 54386 v_1 := v.Args[1] 54387 if v_1.Op != OpAMD64NEGL { 54388 break 54389 } 54390 t := v_1.Type 54391 v_1_0 := v_1.Args[0] 54392 if v_1_0.Op != OpAMD64ADDLconst { 54393 break 54394 } 54395 c := v_1_0.AuxInt 54396 y := v_1_0.Args[0] 54397 if !(c&31 == 0) { 54398 break 54399 } 54400 v.reset(OpAMD64SHRL) 54401 v.AddArg(x) 54402 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54403 v0.AddArg(y) 54404 v.AddArg(v0) 54405 return true 54406 } 54407 // match: (SHRL x (ANDLconst [c] y)) 54408 // cond: c & 31 == 31 54409 // result: (SHRL x y) 54410 for { 54411 _ = v.Args[1] 54412 x := v.Args[0] 54413 v_1 := v.Args[1] 54414 if v_1.Op != OpAMD64ANDLconst { 54415 break 54416 } 54417 c := v_1.AuxInt 54418 y := v_1.Args[0] 54419 if !(c&31 == 31) { 54420 break 54421 } 54422 v.reset(OpAMD64SHRL) 54423 v.AddArg(x) 54424 v.AddArg(y) 54425 return true 54426 } 54427 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 54428 // cond: c & 31 == 31 54429 // result: (SHRL x (NEGL <t> y)) 54430 for { 54431 _ = v.Args[1] 54432 x := v.Args[0] 54433 v_1 := v.Args[1] 54434 if v_1.Op != OpAMD64NEGL { 54435 break 54436 } 54437 t := v_1.Type 54438 v_1_0 := v_1.Args[0] 54439 if v_1_0.Op != OpAMD64ANDLconst { 54440 break 54441 } 54442 c := v_1_0.AuxInt 54443 y := v_1_0.Args[0] 54444 if !(c&31 == 31) { 54445 break 54446 } 54447 v.reset(OpAMD64SHRL) 54448 v.AddArg(x) 54449 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54450 v0.AddArg(y) 54451 v.AddArg(v0) 54452 return true 54453 } 54454 return false 54455 } 54456 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 54457 b := v.Block 54458 _ = b 54459 config := b.Func.Config 54460 _ = config 54461 // match: (SHRLconst [1] (SHLLconst [1] x)) 54462 // cond: !config.nacl 54463 // result: (BTRLconst [31] x) 54464 for { 54465 if v.AuxInt != 1 { 54466 break 54467 } 54468 v_0 := v.Args[0] 54469 if v_0.Op != OpAMD64SHLLconst { 54470 break 54471 } 54472 if v_0.AuxInt != 1 { 54473 break 54474 } 54475 x := v_0.Args[0] 54476 if !(!config.nacl) { 54477 break 54478 } 54479 v.reset(OpAMD64BTRLconst) 54480 v.AuxInt = 31 54481 v.AddArg(x) 54482 return true 54483 } 54484 // match: (SHRLconst x [0]) 54485 // cond: 54486 // result: x 54487 for { 54488 if v.AuxInt != 0 { 54489 break 54490 } 54491 x := v.Args[0] 54492 v.reset(OpCopy) 54493 v.Type = x.Type 54494 v.AddArg(x) 54495 return true 54496 } 54497 return false 54498 } 54499 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 54500 b := v.Block 54501 _ = b 54502 // match: (SHRQ x (MOVQconst [c])) 54503 // cond: 54504 // result: (SHRQconst [c&63] x) 54505 for { 54506 _ = v.Args[1] 54507 x := v.Args[0] 54508 v_1 := v.Args[1] 54509 if v_1.Op != OpAMD64MOVQconst { 54510 break 54511 } 54512 c := v_1.AuxInt 54513 v.reset(OpAMD64SHRQconst) 54514 v.AuxInt = c & 63 54515 v.AddArg(x) 54516 return true 54517 } 54518 // match: (SHRQ x (MOVLconst [c])) 54519 // cond: 54520 // result: (SHRQconst [c&63] x) 54521 for { 54522 _ = v.Args[1] 54523 x := v.Args[0] 54524 v_1 := v.Args[1] 54525 if v_1.Op != OpAMD64MOVLconst { 54526 break 54527 } 54528 c := v_1.AuxInt 54529 v.reset(OpAMD64SHRQconst) 54530 v.AuxInt = c & 63 54531 v.AddArg(x) 54532 return true 54533 } 54534 // match: (SHRQ x (ADDQconst [c] y)) 54535 // cond: c & 63 == 0 54536 // result: (SHRQ x y) 54537 for { 54538 _ = v.Args[1] 54539 x := v.Args[0] 54540 v_1 := v.Args[1] 54541 if v_1.Op != OpAMD64ADDQconst { 54542 break 54543 } 54544 c := v_1.AuxInt 54545 y := v_1.Args[0] 54546 if !(c&63 == 0) { 54547 break 54548 } 54549 v.reset(OpAMD64SHRQ) 54550 v.AddArg(x) 54551 v.AddArg(y) 54552 return true 54553 } 54554 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 54555 // cond: c & 63 == 0 54556 // result: (SHRQ x (NEGQ <t> y)) 54557 for { 54558 _ = v.Args[1] 54559 x := v.Args[0] 54560 v_1 := v.Args[1] 54561 if v_1.Op != OpAMD64NEGQ { 54562 break 54563 } 54564 t := v_1.Type 54565 v_1_0 := v_1.Args[0] 54566 if v_1_0.Op != OpAMD64ADDQconst { 54567 break 54568 } 54569 c := v_1_0.AuxInt 54570 y := v_1_0.Args[0] 54571 if !(c&63 == 0) { 54572 break 54573 } 54574 v.reset(OpAMD64SHRQ) 54575 v.AddArg(x) 54576 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 54577 v0.AddArg(y) 54578 v.AddArg(v0) 54579 return true 54580 } 54581 // match: (SHRQ x (ANDQconst [c] y)) 54582 // cond: c & 63 == 63 54583 // result: (SHRQ x y) 54584 for { 54585 _ = v.Args[1] 54586 x := v.Args[0] 54587 v_1 := v.Args[1] 54588 if v_1.Op != OpAMD64ANDQconst { 54589 break 54590 } 54591 c := v_1.AuxInt 54592 y := v_1.Args[0] 54593 if !(c&63 == 63) { 54594 break 54595 } 54596 v.reset(OpAMD64SHRQ) 54597 v.AddArg(x) 54598 v.AddArg(y) 54599 return true 54600 } 54601 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 54602 // cond: c & 63 == 63 54603 // result: (SHRQ x (NEGQ <t> y)) 54604 for { 54605 _ = v.Args[1] 54606 x := v.Args[0] 54607 v_1 := v.Args[1] 54608 if v_1.Op != OpAMD64NEGQ { 54609 break 54610 } 54611 t := v_1.Type 54612 v_1_0 := v_1.Args[0] 54613 if v_1_0.Op != OpAMD64ANDQconst { 54614 break 54615 } 54616 c := v_1_0.AuxInt 54617 y := v_1_0.Args[0] 54618 if !(c&63 == 63) { 54619 break 54620 } 54621 v.reset(OpAMD64SHRQ) 54622 v.AddArg(x) 54623 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 54624 v0.AddArg(y) 54625 v.AddArg(v0) 54626 return true 54627 } 54628 // match: (SHRQ x (ADDLconst [c] y)) 54629 // cond: c & 63 == 0 54630 // result: (SHRQ x y) 54631 for { 54632 _ = v.Args[1] 54633 x := v.Args[0] 54634 v_1 := v.Args[1] 54635 if v_1.Op != OpAMD64ADDLconst { 54636 break 54637 } 54638 c := v_1.AuxInt 54639 y := v_1.Args[0] 54640 if !(c&63 == 0) { 54641 break 54642 } 54643 v.reset(OpAMD64SHRQ) 54644 v.AddArg(x) 54645 v.AddArg(y) 54646 return true 54647 } 54648 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 54649 // cond: c & 63 == 0 54650 // result: (SHRQ x (NEGL <t> y)) 54651 for { 54652 _ = v.Args[1] 54653 x := v.Args[0] 54654 v_1 := v.Args[1] 54655 if v_1.Op != OpAMD64NEGL { 54656 break 54657 } 54658 t := v_1.Type 54659 v_1_0 := v_1.Args[0] 54660 if v_1_0.Op != OpAMD64ADDLconst { 54661 break 54662 } 54663 c := v_1_0.AuxInt 54664 y := v_1_0.Args[0] 54665 if !(c&63 == 0) { 54666 break 54667 } 54668 v.reset(OpAMD64SHRQ) 54669 v.AddArg(x) 54670 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54671 v0.AddArg(y) 54672 v.AddArg(v0) 54673 return true 54674 } 54675 // match: (SHRQ x (ANDLconst [c] y)) 54676 // cond: c & 63 == 63 54677 // result: (SHRQ x y) 54678 for { 54679 _ = v.Args[1] 54680 x := v.Args[0] 54681 v_1 := v.Args[1] 54682 if v_1.Op != OpAMD64ANDLconst { 54683 break 54684 } 54685 c := v_1.AuxInt 54686 y := v_1.Args[0] 54687 if !(c&63 == 63) { 54688 break 54689 } 54690 v.reset(OpAMD64SHRQ) 54691 v.AddArg(x) 54692 v.AddArg(y) 54693 return true 54694 } 54695 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 54696 // cond: c & 63 == 63 54697 // result: (SHRQ x (NEGL <t> y)) 54698 for { 54699 _ = v.Args[1] 54700 x := v.Args[0] 54701 v_1 := v.Args[1] 54702 if v_1.Op != OpAMD64NEGL { 54703 break 54704 } 54705 t := v_1.Type 54706 v_1_0 := v_1.Args[0] 54707 if v_1_0.Op != OpAMD64ANDLconst { 54708 break 54709 } 54710 c := v_1_0.AuxInt 54711 y := v_1_0.Args[0] 54712 if !(c&63 == 63) { 54713 break 54714 } 54715 v.reset(OpAMD64SHRQ) 54716 v.AddArg(x) 54717 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 54718 v0.AddArg(y) 54719 v.AddArg(v0) 54720 return true 54721 } 54722 return false 54723 } 54724 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 54725 b := v.Block 54726 _ = b 54727 config := b.Func.Config 54728 _ = config 54729 // match: (SHRQconst [1] (SHLQconst [1] x)) 54730 // cond: !config.nacl 54731 // result: (BTRQconst [63] x) 54732 for { 54733 if v.AuxInt != 1 { 54734 break 54735 } 54736 v_0 := v.Args[0] 54737 if v_0.Op != OpAMD64SHLQconst { 54738 break 54739 } 54740 if v_0.AuxInt != 1 { 54741 break 54742 } 54743 x := v_0.Args[0] 54744 if !(!config.nacl) { 54745 break 54746 } 54747 v.reset(OpAMD64BTRQconst) 54748 v.AuxInt = 63 54749 v.AddArg(x) 54750 return true 54751 } 54752 // match: (SHRQconst x [0]) 54753 // cond: 54754 // result: x 54755 for { 54756 if v.AuxInt != 0 { 54757 break 54758 } 54759 x := v.Args[0] 54760 v.reset(OpCopy) 54761 v.Type = x.Type 54762 v.AddArg(x) 54763 return true 54764 } 54765 return false 54766 } 54767 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 54768 // match: (SHRW x (MOVQconst [c])) 54769 // cond: c&31 < 16 54770 // result: (SHRWconst [c&31] x) 54771 for { 54772 _ = v.Args[1] 54773 x := v.Args[0] 54774 v_1 := v.Args[1] 54775 if v_1.Op != OpAMD64MOVQconst { 54776 break 54777 } 54778 c := v_1.AuxInt 54779 if !(c&31 < 16) { 54780 break 54781 } 54782 v.reset(OpAMD64SHRWconst) 54783 v.AuxInt = c & 31 54784 v.AddArg(x) 54785 return true 54786 } 54787 // match: (SHRW x (MOVLconst [c])) 54788 // cond: c&31 < 16 54789 // result: (SHRWconst [c&31] x) 54790 for { 54791 _ = v.Args[1] 54792 x := v.Args[0] 54793 v_1 := v.Args[1] 54794 if v_1.Op != OpAMD64MOVLconst { 54795 break 54796 } 54797 c := v_1.AuxInt 54798 if !(c&31 < 16) { 54799 break 54800 } 54801 v.reset(OpAMD64SHRWconst) 54802 v.AuxInt = c & 31 54803 v.AddArg(x) 54804 return true 54805 } 54806 // match: (SHRW _ (MOVQconst [c])) 54807 // cond: c&31 >= 16 54808 // result: (MOVLconst [0]) 54809 for { 54810 _ = v.Args[1] 54811 v_1 := v.Args[1] 54812 if v_1.Op != OpAMD64MOVQconst { 54813 break 54814 } 54815 c := v_1.AuxInt 54816 if !(c&31 >= 16) { 54817 break 54818 } 54819 v.reset(OpAMD64MOVLconst) 54820 v.AuxInt = 0 54821 return true 54822 } 54823 // match: (SHRW _ (MOVLconst [c])) 54824 // cond: c&31 >= 16 54825 // result: (MOVLconst [0]) 54826 for { 54827 _ = v.Args[1] 54828 v_1 := v.Args[1] 54829 if v_1.Op != OpAMD64MOVLconst { 54830 break 54831 } 54832 c := v_1.AuxInt 54833 if !(c&31 >= 16) { 54834 break 54835 } 54836 v.reset(OpAMD64MOVLconst) 54837 v.AuxInt = 0 54838 return true 54839 } 54840 return false 54841 } 54842 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 54843 // match: (SHRWconst x [0]) 54844 // cond: 54845 // result: x 54846 for { 54847 if v.AuxInt != 0 { 54848 break 54849 } 54850 x := v.Args[0] 54851 v.reset(OpCopy) 54852 v.Type = x.Type 54853 v.AddArg(x) 54854 return true 54855 } 54856 return false 54857 } 54858 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 54859 b := v.Block 54860 _ = b 54861 // match: (SUBL x (MOVLconst [c])) 54862 // cond: 54863 // result: (SUBLconst x [c]) 54864 for { 54865 _ = v.Args[1] 54866 x := v.Args[0] 54867 v_1 := v.Args[1] 54868 if v_1.Op != OpAMD64MOVLconst { 54869 break 54870 } 54871 c := v_1.AuxInt 54872 v.reset(OpAMD64SUBLconst) 54873 v.AuxInt = c 54874 v.AddArg(x) 54875 return true 54876 } 54877 // match: (SUBL (MOVLconst [c]) x) 54878 // cond: 54879 // result: (NEGL (SUBLconst <v.Type> x [c])) 54880 for { 54881 _ = v.Args[1] 54882 v_0 := v.Args[0] 54883 if v_0.Op != OpAMD64MOVLconst { 54884 break 54885 } 54886 c := v_0.AuxInt 54887 x := v.Args[1] 54888 v.reset(OpAMD64NEGL) 54889 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 54890 v0.AuxInt = c 54891 v0.AddArg(x) 54892 v.AddArg(v0) 54893 return true 54894 } 54895 // match: (SUBL x x) 54896 // cond: 54897 // result: (MOVLconst [0]) 54898 for { 54899 _ = v.Args[1] 54900 x := v.Args[0] 54901 if x != v.Args[1] { 54902 break 54903 } 54904 v.reset(OpAMD64MOVLconst) 54905 v.AuxInt = 0 54906 return true 54907 } 54908 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 54909 // cond: canMergeLoad(v, l, x) && clobber(l) 54910 // result: (SUBLload x [off] {sym} ptr mem) 54911 for { 54912 _ = v.Args[1] 54913 x := v.Args[0] 54914 l := v.Args[1] 54915 if l.Op != OpAMD64MOVLload { 54916 break 54917 } 54918 off := l.AuxInt 54919 sym := l.Aux 54920 _ = l.Args[1] 54921 ptr := l.Args[0] 54922 mem := l.Args[1] 54923 if !(canMergeLoad(v, l, x) && clobber(l)) { 54924 break 54925 } 54926 v.reset(OpAMD64SUBLload) 54927 v.AuxInt = off 54928 v.Aux = sym 54929 v.AddArg(x) 54930 v.AddArg(ptr) 54931 v.AddArg(mem) 54932 return true 54933 } 54934 return false 54935 } 54936 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 54937 // match: (SUBLconst [c] x) 54938 // cond: int32(c) == 0 54939 // result: x 54940 for { 54941 c := v.AuxInt 54942 x := v.Args[0] 54943 if !(int32(c) == 0) { 54944 break 54945 } 54946 v.reset(OpCopy) 54947 v.Type = x.Type 54948 v.AddArg(x) 54949 return true 54950 } 54951 // match: (SUBLconst [c] x) 54952 // cond: 54953 // result: (ADDLconst [int64(int32(-c))] x) 54954 for { 54955 c := v.AuxInt 54956 x := v.Args[0] 54957 v.reset(OpAMD64ADDLconst) 54958 v.AuxInt = int64(int32(-c)) 54959 v.AddArg(x) 54960 return true 54961 } 54962 } 54963 func rewriteValueAMD64_OpAMD64SUBLload_0(v *Value) bool { 54964 b := v.Block 54965 _ = b 54966 typ := &b.Func.Config.Types 54967 _ = typ 54968 // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem) 54969 // cond: is32Bit(off1+off2) 54970 // result: (SUBLload [off1+off2] {sym} val base mem) 54971 for { 54972 off1 := v.AuxInt 54973 sym := v.Aux 54974 _ = v.Args[2] 54975 val := v.Args[0] 54976 v_1 := v.Args[1] 54977 if v_1.Op != OpAMD64ADDQconst { 54978 break 54979 } 54980 off2 := v_1.AuxInt 54981 base := v_1.Args[0] 54982 mem := v.Args[2] 54983 if !(is32Bit(off1 + off2)) { 54984 break 54985 } 54986 v.reset(OpAMD64SUBLload) 54987 v.AuxInt = off1 + off2 54988 v.Aux = sym 54989 v.AddArg(val) 54990 v.AddArg(base) 54991 v.AddArg(mem) 54992 return true 54993 } 54994 // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 54995 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 54996 // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 54997 for { 54998 off1 := v.AuxInt 54999 sym1 := v.Aux 55000 _ = v.Args[2] 55001 val := v.Args[0] 55002 v_1 := v.Args[1] 55003 if v_1.Op != OpAMD64LEAQ { 55004 break 55005 } 55006 off2 := v_1.AuxInt 55007 sym2 := v_1.Aux 55008 base := v_1.Args[0] 55009 mem := v.Args[2] 55010 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55011 break 55012 } 55013 v.reset(OpAMD64SUBLload) 55014 v.AuxInt = off1 + off2 55015 v.Aux = mergeSym(sym1, sym2) 55016 v.AddArg(val) 55017 v.AddArg(base) 55018 v.AddArg(mem) 55019 return true 55020 } 55021 // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 55022 // cond: 55023 // result: (SUBL x (MOVLf2i y)) 55024 for { 55025 off := v.AuxInt 55026 sym := v.Aux 55027 _ = v.Args[2] 55028 x := v.Args[0] 55029 ptr := v.Args[1] 55030 v_2 := v.Args[2] 55031 if v_2.Op != OpAMD64MOVSSstore { 55032 break 55033 } 55034 if v_2.AuxInt != off { 55035 break 55036 } 55037 if v_2.Aux != sym { 55038 break 55039 } 55040 _ = v_2.Args[2] 55041 if ptr != v_2.Args[0] { 55042 break 55043 } 55044 y := v_2.Args[1] 55045 v.reset(OpAMD64SUBL) 55046 v.AddArg(x) 55047 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 55048 v0.AddArg(y) 55049 v.AddArg(v0) 55050 return true 55051 } 55052 return false 55053 } 55054 func rewriteValueAMD64_OpAMD64SUBLmodify_0(v *Value) bool { 55055 // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 55056 // cond: is32Bit(off1+off2) 55057 // result: (SUBLmodify [off1+off2] {sym} base val mem) 55058 for { 55059 off1 := v.AuxInt 55060 sym := v.Aux 55061 _ = v.Args[2] 55062 v_0 := v.Args[0] 55063 if v_0.Op != OpAMD64ADDQconst { 55064 break 55065 } 55066 off2 := v_0.AuxInt 55067 base := v_0.Args[0] 55068 val := v.Args[1] 55069 mem := v.Args[2] 55070 if !(is32Bit(off1 + off2)) { 55071 break 55072 } 55073 v.reset(OpAMD64SUBLmodify) 55074 v.AuxInt = off1 + off2 55075 v.Aux = sym 55076 v.AddArg(base) 55077 v.AddArg(val) 55078 v.AddArg(mem) 55079 return true 55080 } 55081 // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 55082 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 55083 // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 55084 for { 55085 off1 := v.AuxInt 55086 sym1 := v.Aux 55087 _ = v.Args[2] 55088 v_0 := v.Args[0] 55089 if v_0.Op != OpAMD64LEAQ { 55090 break 55091 } 55092 off2 := v_0.AuxInt 55093 sym2 := v_0.Aux 55094 base := v_0.Args[0] 55095 val := v.Args[1] 55096 mem := v.Args[2] 55097 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55098 break 55099 } 55100 v.reset(OpAMD64SUBLmodify) 55101 v.AuxInt = off1 + off2 55102 v.Aux = mergeSym(sym1, sym2) 55103 v.AddArg(base) 55104 v.AddArg(val) 55105 v.AddArg(mem) 55106 return true 55107 } 55108 return false 55109 } 55110 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 55111 b := v.Block 55112 _ = b 55113 // match: (SUBQ x (MOVQconst [c])) 55114 // cond: is32Bit(c) 55115 // result: (SUBQconst x [c]) 55116 for { 55117 _ = v.Args[1] 55118 x := v.Args[0] 55119 v_1 := v.Args[1] 55120 if v_1.Op != OpAMD64MOVQconst { 55121 break 55122 } 55123 c := v_1.AuxInt 55124 if !(is32Bit(c)) { 55125 break 55126 } 55127 v.reset(OpAMD64SUBQconst) 55128 v.AuxInt = c 55129 v.AddArg(x) 55130 return true 55131 } 55132 // match: (SUBQ (MOVQconst [c]) x) 55133 // cond: is32Bit(c) 55134 // result: (NEGQ (SUBQconst <v.Type> x [c])) 55135 for { 55136 _ = v.Args[1] 55137 v_0 := v.Args[0] 55138 if v_0.Op != OpAMD64MOVQconst { 55139 break 55140 } 55141 c := v_0.AuxInt 55142 x := v.Args[1] 55143 if !(is32Bit(c)) { 55144 break 55145 } 55146 v.reset(OpAMD64NEGQ) 55147 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 55148 v0.AuxInt = c 55149 v0.AddArg(x) 55150 v.AddArg(v0) 55151 return true 55152 } 55153 // match: (SUBQ x x) 55154 // cond: 55155 // result: (MOVQconst [0]) 55156 for { 55157 _ = v.Args[1] 55158 x := v.Args[0] 55159 if x != v.Args[1] { 55160 break 55161 } 55162 v.reset(OpAMD64MOVQconst) 55163 v.AuxInt = 0 55164 return true 55165 } 55166 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 55167 // cond: canMergeLoad(v, l, x) && clobber(l) 55168 // result: (SUBQload x [off] {sym} ptr mem) 55169 for { 55170 _ = v.Args[1] 55171 x := v.Args[0] 55172 l := v.Args[1] 55173 if l.Op != OpAMD64MOVQload { 55174 break 55175 } 55176 off := l.AuxInt 55177 sym := l.Aux 55178 _ = l.Args[1] 55179 ptr := l.Args[0] 55180 mem := l.Args[1] 55181 if !(canMergeLoad(v, l, x) && clobber(l)) { 55182 break 55183 } 55184 v.reset(OpAMD64SUBQload) 55185 v.AuxInt = off 55186 v.Aux = sym 55187 v.AddArg(x) 55188 v.AddArg(ptr) 55189 v.AddArg(mem) 55190 return true 55191 } 55192 return false 55193 } 55194 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 55195 // match: (SUBQconst [0] x) 55196 // cond: 55197 // result: x 55198 for { 55199 if v.AuxInt != 0 { 55200 break 55201 } 55202 x := v.Args[0] 55203 v.reset(OpCopy) 55204 v.Type = x.Type 55205 v.AddArg(x) 55206 return true 55207 } 55208 // match: (SUBQconst [c] x) 55209 // cond: c != -(1<<31) 55210 // result: (ADDQconst [-c] x) 55211 for { 55212 c := v.AuxInt 55213 x := v.Args[0] 55214 if !(c != -(1 << 31)) { 55215 break 55216 } 55217 v.reset(OpAMD64ADDQconst) 55218 v.AuxInt = -c 55219 v.AddArg(x) 55220 return true 55221 } 55222 // match: (SUBQconst (MOVQconst [d]) [c]) 55223 // cond: 55224 // result: (MOVQconst [d-c]) 55225 for { 55226 c := v.AuxInt 55227 v_0 := v.Args[0] 55228 if v_0.Op != OpAMD64MOVQconst { 55229 break 55230 } 55231 d := v_0.AuxInt 55232 v.reset(OpAMD64MOVQconst) 55233 v.AuxInt = d - c 55234 return true 55235 } 55236 // match: (SUBQconst (SUBQconst x [d]) [c]) 55237 // cond: is32Bit(-c-d) 55238 // result: (ADDQconst [-c-d] x) 55239 for { 55240 c := v.AuxInt 55241 v_0 := v.Args[0] 55242 if v_0.Op != OpAMD64SUBQconst { 55243 break 55244 } 55245 d := v_0.AuxInt 55246 x := v_0.Args[0] 55247 if !(is32Bit(-c - d)) { 55248 break 55249 } 55250 v.reset(OpAMD64ADDQconst) 55251 v.AuxInt = -c - d 55252 v.AddArg(x) 55253 return true 55254 } 55255 return false 55256 } 55257 func rewriteValueAMD64_OpAMD64SUBQload_0(v *Value) bool { 55258 b := v.Block 55259 _ = b 55260 typ := &b.Func.Config.Types 55261 _ = typ 55262 // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem) 55263 // cond: is32Bit(off1+off2) 55264 // result: (SUBQload [off1+off2] {sym} val base mem) 55265 for { 55266 off1 := v.AuxInt 55267 sym := v.Aux 55268 _ = v.Args[2] 55269 val := v.Args[0] 55270 v_1 := v.Args[1] 55271 if v_1.Op != OpAMD64ADDQconst { 55272 break 55273 } 55274 off2 := v_1.AuxInt 55275 base := v_1.Args[0] 55276 mem := v.Args[2] 55277 if !(is32Bit(off1 + off2)) { 55278 break 55279 } 55280 v.reset(OpAMD64SUBQload) 55281 v.AuxInt = off1 + off2 55282 v.Aux = sym 55283 v.AddArg(val) 55284 v.AddArg(base) 55285 v.AddArg(mem) 55286 return true 55287 } 55288 // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 55289 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 55290 // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 55291 for { 55292 off1 := v.AuxInt 55293 sym1 := v.Aux 55294 _ = v.Args[2] 55295 val := v.Args[0] 55296 v_1 := v.Args[1] 55297 if v_1.Op != OpAMD64LEAQ { 55298 break 55299 } 55300 off2 := v_1.AuxInt 55301 sym2 := v_1.Aux 55302 base := v_1.Args[0] 55303 mem := v.Args[2] 55304 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55305 break 55306 } 55307 v.reset(OpAMD64SUBQload) 55308 v.AuxInt = off1 + off2 55309 v.Aux = mergeSym(sym1, sym2) 55310 v.AddArg(val) 55311 v.AddArg(base) 55312 v.AddArg(mem) 55313 return true 55314 } 55315 // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 55316 // cond: 55317 // result: (SUBQ x (MOVQf2i y)) 55318 for { 55319 off := v.AuxInt 55320 sym := v.Aux 55321 _ = v.Args[2] 55322 x := v.Args[0] 55323 ptr := v.Args[1] 55324 v_2 := v.Args[2] 55325 if v_2.Op != OpAMD64MOVSDstore { 55326 break 55327 } 55328 if v_2.AuxInt != off { 55329 break 55330 } 55331 if v_2.Aux != sym { 55332 break 55333 } 55334 _ = v_2.Args[2] 55335 if ptr != v_2.Args[0] { 55336 break 55337 } 55338 y := v_2.Args[1] 55339 v.reset(OpAMD64SUBQ) 55340 v.AddArg(x) 55341 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 55342 v0.AddArg(y) 55343 v.AddArg(v0) 55344 return true 55345 } 55346 return false 55347 } 55348 func rewriteValueAMD64_OpAMD64SUBQmodify_0(v *Value) bool { 55349 // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 55350 // cond: is32Bit(off1+off2) 55351 // result: (SUBQmodify [off1+off2] {sym} base val mem) 55352 for { 55353 off1 := v.AuxInt 55354 sym := v.Aux 55355 _ = v.Args[2] 55356 v_0 := v.Args[0] 55357 if v_0.Op != OpAMD64ADDQconst { 55358 break 55359 } 55360 off2 := v_0.AuxInt 55361 base := v_0.Args[0] 55362 val := v.Args[1] 55363 mem := v.Args[2] 55364 if !(is32Bit(off1 + off2)) { 55365 break 55366 } 55367 v.reset(OpAMD64SUBQmodify) 55368 v.AuxInt = off1 + off2 55369 v.Aux = sym 55370 v.AddArg(base) 55371 v.AddArg(val) 55372 v.AddArg(mem) 55373 return true 55374 } 55375 // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 55376 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 55377 // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 55378 for { 55379 off1 := v.AuxInt 55380 sym1 := v.Aux 55381 _ = v.Args[2] 55382 v_0 := v.Args[0] 55383 if v_0.Op != OpAMD64LEAQ { 55384 break 55385 } 55386 off2 := v_0.AuxInt 55387 sym2 := v_0.Aux 55388 base := v_0.Args[0] 55389 val := v.Args[1] 55390 mem := v.Args[2] 55391 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55392 break 55393 } 55394 v.reset(OpAMD64SUBQmodify) 55395 v.AuxInt = off1 + off2 55396 v.Aux = mergeSym(sym1, sym2) 55397 v.AddArg(base) 55398 v.AddArg(val) 55399 v.AddArg(mem) 55400 return true 55401 } 55402 return false 55403 } 55404 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 55405 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 55406 // cond: canMergeLoad(v, l, x) && clobber(l) 55407 // result: (SUBSDload x [off] {sym} ptr mem) 55408 for { 55409 _ = v.Args[1] 55410 x := v.Args[0] 55411 l := v.Args[1] 55412 if l.Op != OpAMD64MOVSDload { 55413 break 55414 } 55415 off := l.AuxInt 55416 sym := l.Aux 55417 _ = l.Args[1] 55418 ptr := l.Args[0] 55419 mem := l.Args[1] 55420 if !(canMergeLoad(v, l, x) && clobber(l)) { 55421 break 55422 } 55423 v.reset(OpAMD64SUBSDload) 55424 v.AuxInt = off 55425 v.Aux = sym 55426 v.AddArg(x) 55427 v.AddArg(ptr) 55428 v.AddArg(mem) 55429 return true 55430 } 55431 return false 55432 } 55433 func rewriteValueAMD64_OpAMD64SUBSDload_0(v *Value) bool { 55434 b := v.Block 55435 _ = b 55436 typ := &b.Func.Config.Types 55437 _ = typ 55438 // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem) 55439 // cond: is32Bit(off1+off2) 55440 // result: (SUBSDload [off1+off2] {sym} val base mem) 55441 for { 55442 off1 := v.AuxInt 55443 sym := v.Aux 55444 _ = v.Args[2] 55445 val := v.Args[0] 55446 v_1 := v.Args[1] 55447 if v_1.Op != OpAMD64ADDQconst { 55448 break 55449 } 55450 off2 := v_1.AuxInt 55451 base := v_1.Args[0] 55452 mem := v.Args[2] 55453 if !(is32Bit(off1 + off2)) { 55454 break 55455 } 55456 v.reset(OpAMD64SUBSDload) 55457 v.AuxInt = off1 + off2 55458 v.Aux = sym 55459 v.AddArg(val) 55460 v.AddArg(base) 55461 v.AddArg(mem) 55462 return true 55463 } 55464 // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 55465 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 55466 // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 55467 for { 55468 off1 := v.AuxInt 55469 sym1 := v.Aux 55470 _ = v.Args[2] 55471 val := v.Args[0] 55472 v_1 := v.Args[1] 55473 if v_1.Op != OpAMD64LEAQ { 55474 break 55475 } 55476 off2 := v_1.AuxInt 55477 sym2 := v_1.Aux 55478 base := v_1.Args[0] 55479 mem := v.Args[2] 55480 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55481 break 55482 } 55483 v.reset(OpAMD64SUBSDload) 55484 v.AuxInt = off1 + off2 55485 v.Aux = mergeSym(sym1, sym2) 55486 v.AddArg(val) 55487 v.AddArg(base) 55488 v.AddArg(mem) 55489 return true 55490 } 55491 // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 55492 // cond: 55493 // result: (SUBSD x (MOVQi2f y)) 55494 for { 55495 off := v.AuxInt 55496 sym := v.Aux 55497 _ = v.Args[2] 55498 x := v.Args[0] 55499 ptr := v.Args[1] 55500 v_2 := v.Args[2] 55501 if v_2.Op != OpAMD64MOVQstore { 55502 break 55503 } 55504 if v_2.AuxInt != off { 55505 break 55506 } 55507 if v_2.Aux != sym { 55508 break 55509 } 55510 _ = v_2.Args[2] 55511 if ptr != v_2.Args[0] { 55512 break 55513 } 55514 y := v_2.Args[1] 55515 v.reset(OpAMD64SUBSD) 55516 v.AddArg(x) 55517 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 55518 v0.AddArg(y) 55519 v.AddArg(v0) 55520 return true 55521 } 55522 return false 55523 } 55524 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 55525 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 55526 // cond: canMergeLoad(v, l, x) && clobber(l) 55527 // result: (SUBSSload x [off] {sym} ptr mem) 55528 for { 55529 _ = v.Args[1] 55530 x := v.Args[0] 55531 l := v.Args[1] 55532 if l.Op != OpAMD64MOVSSload { 55533 break 55534 } 55535 off := l.AuxInt 55536 sym := l.Aux 55537 _ = l.Args[1] 55538 ptr := l.Args[0] 55539 mem := l.Args[1] 55540 if !(canMergeLoad(v, l, x) && clobber(l)) { 55541 break 55542 } 55543 v.reset(OpAMD64SUBSSload) 55544 v.AuxInt = off 55545 v.Aux = sym 55546 v.AddArg(x) 55547 v.AddArg(ptr) 55548 v.AddArg(mem) 55549 return true 55550 } 55551 return false 55552 } 55553 func rewriteValueAMD64_OpAMD64SUBSSload_0(v *Value) bool { 55554 b := v.Block 55555 _ = b 55556 typ := &b.Func.Config.Types 55557 _ = typ 55558 // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem) 55559 // cond: is32Bit(off1+off2) 55560 // result: (SUBSSload [off1+off2] {sym} val base mem) 55561 for { 55562 off1 := v.AuxInt 55563 sym := v.Aux 55564 _ = v.Args[2] 55565 val := v.Args[0] 55566 v_1 := v.Args[1] 55567 if v_1.Op != OpAMD64ADDQconst { 55568 break 55569 } 55570 off2 := v_1.AuxInt 55571 base := v_1.Args[0] 55572 mem := v.Args[2] 55573 if !(is32Bit(off1 + off2)) { 55574 break 55575 } 55576 v.reset(OpAMD64SUBSSload) 55577 v.AuxInt = off1 + off2 55578 v.Aux = sym 55579 v.AddArg(val) 55580 v.AddArg(base) 55581 v.AddArg(mem) 55582 return true 55583 } 55584 // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 55585 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 55586 // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 55587 for { 55588 off1 := v.AuxInt 55589 sym1 := v.Aux 55590 _ = v.Args[2] 55591 val := v.Args[0] 55592 v_1 := v.Args[1] 55593 if v_1.Op != OpAMD64LEAQ { 55594 break 55595 } 55596 off2 := v_1.AuxInt 55597 sym2 := v_1.Aux 55598 base := v_1.Args[0] 55599 mem := v.Args[2] 55600 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 55601 break 55602 } 55603 v.reset(OpAMD64SUBSSload) 55604 v.AuxInt = off1 + off2 55605 v.Aux = mergeSym(sym1, sym2) 55606 v.AddArg(val) 55607 v.AddArg(base) 55608 v.AddArg(mem) 55609 return true 55610 } 55611 // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 55612 // cond: 55613 // result: (SUBSS x (MOVLi2f y)) 55614 for { 55615 off := v.AuxInt 55616 sym := v.Aux 55617 _ = v.Args[2] 55618 x := v.Args[0] 55619 ptr := v.Args[1] 55620 v_2 := v.Args[2] 55621 if v_2.Op != OpAMD64MOVLstore { 55622 break 55623 } 55624 if v_2.AuxInt != off { 55625 break 55626 } 55627 if v_2.Aux != sym { 55628 break 55629 } 55630 _ = v_2.Args[2] 55631 if ptr != v_2.Args[0] { 55632 break 55633 } 55634 y := v_2.Args[1] 55635 v.reset(OpAMD64SUBSS) 55636 v.AddArg(x) 55637 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 55638 v0.AddArg(y) 55639 v.AddArg(v0) 55640 return true 55641 } 55642 return false 55643 } 55644 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 55645 b := v.Block 55646 _ = b 55647 // match: (TESTB (MOVLconst [c]) x) 55648 // cond: 55649 // result: (TESTBconst [c] x) 55650 for { 55651 _ = v.Args[1] 55652 v_0 := v.Args[0] 55653 if v_0.Op != OpAMD64MOVLconst { 55654 break 55655 } 55656 c := v_0.AuxInt 55657 x := v.Args[1] 55658 v.reset(OpAMD64TESTBconst) 55659 v.AuxInt = c 55660 v.AddArg(x) 55661 return true 55662 } 55663 // match: (TESTB x (MOVLconst [c])) 55664 // cond: 55665 // result: (TESTBconst [c] x) 55666 for { 55667 _ = v.Args[1] 55668 x := v.Args[0] 55669 v_1 := v.Args[1] 55670 if v_1.Op != OpAMD64MOVLconst { 55671 break 55672 } 55673 c := v_1.AuxInt 55674 v.reset(OpAMD64TESTBconst) 55675 v.AuxInt = c 55676 v.AddArg(x) 55677 return true 55678 } 55679 // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2) 55680 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 55681 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem) 55682 for { 55683 _ = v.Args[1] 55684 l := v.Args[0] 55685 if l.Op != OpAMD64MOVBload { 55686 break 55687 } 55688 off := l.AuxInt 55689 sym := l.Aux 55690 _ = l.Args[1] 55691 ptr := l.Args[0] 55692 mem := l.Args[1] 55693 l2 := v.Args[1] 55694 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 55695 break 55696 } 55697 b = l.Block 55698 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstload, types.TypeFlags) 55699 v.reset(OpCopy) 55700 v.AddArg(v0) 55701 v0.AuxInt = makeValAndOff(0, off) 55702 v0.Aux = sym 55703 v0.AddArg(ptr) 55704 v0.AddArg(mem) 55705 return true 55706 } 55707 // match: (TESTB l2 l:(MOVBload {sym} [off] ptr mem)) 55708 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 55709 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem) 55710 for { 55711 _ = v.Args[1] 55712 l2 := v.Args[0] 55713 l := v.Args[1] 55714 if l.Op != OpAMD64MOVBload { 55715 break 55716 } 55717 off := l.AuxInt 55718 sym := l.Aux 55719 _ = l.Args[1] 55720 ptr := l.Args[0] 55721 mem := l.Args[1] 55722 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 55723 break 55724 } 55725 b = l.Block 55726 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstload, types.TypeFlags) 55727 v.reset(OpCopy) 55728 v.AddArg(v0) 55729 v0.AuxInt = makeValAndOff(0, off) 55730 v0.Aux = sym 55731 v0.AddArg(ptr) 55732 v0.AddArg(mem) 55733 return true 55734 } 55735 return false 55736 } 55737 func rewriteValueAMD64_OpAMD64TESTBconst_0(v *Value) bool { 55738 // match: (TESTBconst [-1] x) 55739 // cond: x.Op != OpAMD64MOVLconst 55740 // result: (TESTB x x) 55741 for { 55742 if v.AuxInt != -1 { 55743 break 55744 } 55745 x := v.Args[0] 55746 if !(x.Op != OpAMD64MOVLconst) { 55747 break 55748 } 55749 v.reset(OpAMD64TESTB) 55750 v.AddArg(x) 55751 v.AddArg(x) 55752 return true 55753 } 55754 return false 55755 } 55756 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 55757 b := v.Block 55758 _ = b 55759 // match: (TESTL (MOVLconst [c]) x) 55760 // cond: 55761 // result: (TESTLconst [c] x) 55762 for { 55763 _ = v.Args[1] 55764 v_0 := v.Args[0] 55765 if v_0.Op != OpAMD64MOVLconst { 55766 break 55767 } 55768 c := v_0.AuxInt 55769 x := v.Args[1] 55770 v.reset(OpAMD64TESTLconst) 55771 v.AuxInt = c 55772 v.AddArg(x) 55773 return true 55774 } 55775 // match: (TESTL x (MOVLconst [c])) 55776 // cond: 55777 // result: (TESTLconst [c] x) 55778 for { 55779 _ = v.Args[1] 55780 x := v.Args[0] 55781 v_1 := v.Args[1] 55782 if v_1.Op != OpAMD64MOVLconst { 55783 break 55784 } 55785 c := v_1.AuxInt 55786 v.reset(OpAMD64TESTLconst) 55787 v.AuxInt = c 55788 v.AddArg(x) 55789 return true 55790 } 55791 // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2) 55792 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 55793 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem) 55794 for { 55795 _ = v.Args[1] 55796 l := v.Args[0] 55797 if l.Op != OpAMD64MOVLload { 55798 break 55799 } 55800 off := l.AuxInt 55801 sym := l.Aux 55802 _ = l.Args[1] 55803 ptr := l.Args[0] 55804 mem := l.Args[1] 55805 l2 := v.Args[1] 55806 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 55807 break 55808 } 55809 b = l.Block 55810 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstload, types.TypeFlags) 55811 v.reset(OpCopy) 55812 v.AddArg(v0) 55813 v0.AuxInt = makeValAndOff(0, off) 55814 v0.Aux = sym 55815 v0.AddArg(ptr) 55816 v0.AddArg(mem) 55817 return true 55818 } 55819 // match: (TESTL l2 l:(MOVLload {sym} [off] ptr mem)) 55820 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 55821 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem) 55822 for { 55823 _ = v.Args[1] 55824 l2 := v.Args[0] 55825 l := v.Args[1] 55826 if l.Op != OpAMD64MOVLload { 55827 break 55828 } 55829 off := l.AuxInt 55830 sym := l.Aux 55831 _ = l.Args[1] 55832 ptr := l.Args[0] 55833 mem := l.Args[1] 55834 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 55835 break 55836 } 55837 b = l.Block 55838 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstload, types.TypeFlags) 55839 v.reset(OpCopy) 55840 v.AddArg(v0) 55841 v0.AuxInt = makeValAndOff(0, off) 55842 v0.Aux = sym 55843 v0.AddArg(ptr) 55844 v0.AddArg(mem) 55845 return true 55846 } 55847 return false 55848 } 55849 func rewriteValueAMD64_OpAMD64TESTLconst_0(v *Value) bool { 55850 // match: (TESTLconst [-1] x) 55851 // cond: x.Op != OpAMD64MOVLconst 55852 // result: (TESTL x x) 55853 for { 55854 if v.AuxInt != -1 { 55855 break 55856 } 55857 x := v.Args[0] 55858 if !(x.Op != OpAMD64MOVLconst) { 55859 break 55860 } 55861 v.reset(OpAMD64TESTL) 55862 v.AddArg(x) 55863 v.AddArg(x) 55864 return true 55865 } 55866 return false 55867 } 55868 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 55869 b := v.Block 55870 _ = b 55871 // match: (TESTQ (MOVQconst [c]) x) 55872 // cond: is32Bit(c) 55873 // result: (TESTQconst [c] x) 55874 for { 55875 _ = v.Args[1] 55876 v_0 := v.Args[0] 55877 if v_0.Op != OpAMD64MOVQconst { 55878 break 55879 } 55880 c := v_0.AuxInt 55881 x := v.Args[1] 55882 if !(is32Bit(c)) { 55883 break 55884 } 55885 v.reset(OpAMD64TESTQconst) 55886 v.AuxInt = c 55887 v.AddArg(x) 55888 return true 55889 } 55890 // match: (TESTQ x (MOVQconst [c])) 55891 // cond: is32Bit(c) 55892 // result: (TESTQconst [c] x) 55893 for { 55894 _ = v.Args[1] 55895 x := v.Args[0] 55896 v_1 := v.Args[1] 55897 if v_1.Op != OpAMD64MOVQconst { 55898 break 55899 } 55900 c := v_1.AuxInt 55901 if !(is32Bit(c)) { 55902 break 55903 } 55904 v.reset(OpAMD64TESTQconst) 55905 v.AuxInt = c 55906 v.AddArg(x) 55907 return true 55908 } 55909 // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2) 55910 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 55911 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem) 55912 for { 55913 _ = v.Args[1] 55914 l := v.Args[0] 55915 if l.Op != OpAMD64MOVQload { 55916 break 55917 } 55918 off := l.AuxInt 55919 sym := l.Aux 55920 _ = l.Args[1] 55921 ptr := l.Args[0] 55922 mem := l.Args[1] 55923 l2 := v.Args[1] 55924 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 55925 break 55926 } 55927 b = l.Block 55928 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstload, types.TypeFlags) 55929 v.reset(OpCopy) 55930 v.AddArg(v0) 55931 v0.AuxInt = makeValAndOff(0, off) 55932 v0.Aux = sym 55933 v0.AddArg(ptr) 55934 v0.AddArg(mem) 55935 return true 55936 } 55937 // match: (TESTQ l2 l:(MOVQload {sym} [off] ptr mem)) 55938 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 55939 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem) 55940 for { 55941 _ = v.Args[1] 55942 l2 := v.Args[0] 55943 l := v.Args[1] 55944 if l.Op != OpAMD64MOVQload { 55945 break 55946 } 55947 off := l.AuxInt 55948 sym := l.Aux 55949 _ = l.Args[1] 55950 ptr := l.Args[0] 55951 mem := l.Args[1] 55952 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 55953 break 55954 } 55955 b = l.Block 55956 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstload, types.TypeFlags) 55957 v.reset(OpCopy) 55958 v.AddArg(v0) 55959 v0.AuxInt = makeValAndOff(0, off) 55960 v0.Aux = sym 55961 v0.AddArg(ptr) 55962 v0.AddArg(mem) 55963 return true 55964 } 55965 return false 55966 } 55967 func rewriteValueAMD64_OpAMD64TESTQconst_0(v *Value) bool { 55968 // match: (TESTQconst [-1] x) 55969 // cond: x.Op != OpAMD64MOVQconst 55970 // result: (TESTQ x x) 55971 for { 55972 if v.AuxInt != -1 { 55973 break 55974 } 55975 x := v.Args[0] 55976 if !(x.Op != OpAMD64MOVQconst) { 55977 break 55978 } 55979 v.reset(OpAMD64TESTQ) 55980 v.AddArg(x) 55981 v.AddArg(x) 55982 return true 55983 } 55984 return false 55985 } 55986 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 55987 b := v.Block 55988 _ = b 55989 // match: (TESTW (MOVLconst [c]) x) 55990 // cond: 55991 // result: (TESTWconst [c] x) 55992 for { 55993 _ = v.Args[1] 55994 v_0 := v.Args[0] 55995 if v_0.Op != OpAMD64MOVLconst { 55996 break 55997 } 55998 c := v_0.AuxInt 55999 x := v.Args[1] 56000 v.reset(OpAMD64TESTWconst) 56001 v.AuxInt = c 56002 v.AddArg(x) 56003 return true 56004 } 56005 // match: (TESTW x (MOVLconst [c])) 56006 // cond: 56007 // result: (TESTWconst [c] x) 56008 for { 56009 _ = v.Args[1] 56010 x := v.Args[0] 56011 v_1 := v.Args[1] 56012 if v_1.Op != OpAMD64MOVLconst { 56013 break 56014 } 56015 c := v_1.AuxInt 56016 v.reset(OpAMD64TESTWconst) 56017 v.AuxInt = c 56018 v.AddArg(x) 56019 return true 56020 } 56021 // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2) 56022 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 56023 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem) 56024 for { 56025 _ = v.Args[1] 56026 l := v.Args[0] 56027 if l.Op != OpAMD64MOVWload { 56028 break 56029 } 56030 off := l.AuxInt 56031 sym := l.Aux 56032 _ = l.Args[1] 56033 ptr := l.Args[0] 56034 mem := l.Args[1] 56035 l2 := v.Args[1] 56036 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 56037 break 56038 } 56039 b = l.Block 56040 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstload, types.TypeFlags) 56041 v.reset(OpCopy) 56042 v.AddArg(v0) 56043 v0.AuxInt = makeValAndOff(0, off) 56044 v0.Aux = sym 56045 v0.AddArg(ptr) 56046 v0.AddArg(mem) 56047 return true 56048 } 56049 // match: (TESTW l2 l:(MOVWload {sym} [off] ptr mem)) 56050 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 56051 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem) 56052 for { 56053 _ = v.Args[1] 56054 l2 := v.Args[0] 56055 l := v.Args[1] 56056 if l.Op != OpAMD64MOVWload { 56057 break 56058 } 56059 off := l.AuxInt 56060 sym := l.Aux 56061 _ = l.Args[1] 56062 ptr := l.Args[0] 56063 mem := l.Args[1] 56064 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 56065 break 56066 } 56067 b = l.Block 56068 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstload, types.TypeFlags) 56069 v.reset(OpCopy) 56070 v.AddArg(v0) 56071 v0.AuxInt = makeValAndOff(0, off) 56072 v0.Aux = sym 56073 v0.AddArg(ptr) 56074 v0.AddArg(mem) 56075 return true 56076 } 56077 return false 56078 } 56079 func rewriteValueAMD64_OpAMD64TESTWconst_0(v *Value) bool { 56080 // match: (TESTWconst [-1] x) 56081 // cond: x.Op != OpAMD64MOVLconst 56082 // result: (TESTW x x) 56083 for { 56084 if v.AuxInt != -1 { 56085 break 56086 } 56087 x := v.Args[0] 56088 if !(x.Op != OpAMD64MOVLconst) { 56089 break 56090 } 56091 v.reset(OpAMD64TESTW) 56092 v.AddArg(x) 56093 v.AddArg(x) 56094 return true 56095 } 56096 return false 56097 } 56098 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 56099 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 56100 // cond: is32Bit(off1+off2) 56101 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 56102 for { 56103 off1 := v.AuxInt 56104 sym := v.Aux 56105 _ = v.Args[2] 56106 val := v.Args[0] 56107 v_1 := v.Args[1] 56108 if v_1.Op != OpAMD64ADDQconst { 56109 break 56110 } 56111 off2 := v_1.AuxInt 56112 ptr := v_1.Args[0] 56113 mem := v.Args[2] 56114 if !(is32Bit(off1 + off2)) { 56115 break 56116 } 56117 v.reset(OpAMD64XADDLlock) 56118 v.AuxInt = off1 + off2 56119 v.Aux = sym 56120 v.AddArg(val) 56121 v.AddArg(ptr) 56122 v.AddArg(mem) 56123 return true 56124 } 56125 return false 56126 } 56127 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 56128 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 56129 // cond: is32Bit(off1+off2) 56130 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 56131 for { 56132 off1 := v.AuxInt 56133 sym := v.Aux 56134 _ = v.Args[2] 56135 val := v.Args[0] 56136 v_1 := v.Args[1] 56137 if v_1.Op != OpAMD64ADDQconst { 56138 break 56139 } 56140 off2 := v_1.AuxInt 56141 ptr := v_1.Args[0] 56142 mem := v.Args[2] 56143 if !(is32Bit(off1 + off2)) { 56144 break 56145 } 56146 v.reset(OpAMD64XADDQlock) 56147 v.AuxInt = off1 + off2 56148 v.Aux = sym 56149 v.AddArg(val) 56150 v.AddArg(ptr) 56151 v.AddArg(mem) 56152 return true 56153 } 56154 return false 56155 } 56156 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 56157 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 56158 // cond: is32Bit(off1+off2) 56159 // result: (XCHGL [off1+off2] {sym} val ptr mem) 56160 for { 56161 off1 := v.AuxInt 56162 sym := v.Aux 56163 _ = v.Args[2] 56164 val := v.Args[0] 56165 v_1 := v.Args[1] 56166 if v_1.Op != OpAMD64ADDQconst { 56167 break 56168 } 56169 off2 := v_1.AuxInt 56170 ptr := v_1.Args[0] 56171 mem := v.Args[2] 56172 if !(is32Bit(off1 + off2)) { 56173 break 56174 } 56175 v.reset(OpAMD64XCHGL) 56176 v.AuxInt = off1 + off2 56177 v.Aux = sym 56178 v.AddArg(val) 56179 v.AddArg(ptr) 56180 v.AddArg(mem) 56181 return true 56182 } 56183 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 56184 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 56185 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 56186 for { 56187 off1 := v.AuxInt 56188 sym1 := v.Aux 56189 _ = v.Args[2] 56190 val := v.Args[0] 56191 v_1 := v.Args[1] 56192 if v_1.Op != OpAMD64LEAQ { 56193 break 56194 } 56195 off2 := v_1.AuxInt 56196 sym2 := v_1.Aux 56197 ptr := v_1.Args[0] 56198 mem := v.Args[2] 56199 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 56200 break 56201 } 56202 v.reset(OpAMD64XCHGL) 56203 v.AuxInt = off1 + off2 56204 v.Aux = mergeSym(sym1, sym2) 56205 v.AddArg(val) 56206 v.AddArg(ptr) 56207 v.AddArg(mem) 56208 return true 56209 } 56210 return false 56211 } 56212 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 56213 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 56214 // cond: is32Bit(off1+off2) 56215 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 56216 for { 56217 off1 := v.AuxInt 56218 sym := v.Aux 56219 _ = v.Args[2] 56220 val := v.Args[0] 56221 v_1 := v.Args[1] 56222 if v_1.Op != OpAMD64ADDQconst { 56223 break 56224 } 56225 off2 := v_1.AuxInt 56226 ptr := v_1.Args[0] 56227 mem := v.Args[2] 56228 if !(is32Bit(off1 + off2)) { 56229 break 56230 } 56231 v.reset(OpAMD64XCHGQ) 56232 v.AuxInt = off1 + off2 56233 v.Aux = sym 56234 v.AddArg(val) 56235 v.AddArg(ptr) 56236 v.AddArg(mem) 56237 return true 56238 } 56239 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 56240 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 56241 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 56242 for { 56243 off1 := v.AuxInt 56244 sym1 := v.Aux 56245 _ = v.Args[2] 56246 val := v.Args[0] 56247 v_1 := v.Args[1] 56248 if v_1.Op != OpAMD64LEAQ { 56249 break 56250 } 56251 off2 := v_1.AuxInt 56252 sym2 := v_1.Aux 56253 ptr := v_1.Args[0] 56254 mem := v.Args[2] 56255 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 56256 break 56257 } 56258 v.reset(OpAMD64XCHGQ) 56259 v.AuxInt = off1 + off2 56260 v.Aux = mergeSym(sym1, sym2) 56261 v.AddArg(val) 56262 v.AddArg(ptr) 56263 v.AddArg(mem) 56264 return true 56265 } 56266 return false 56267 } 56268 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 56269 b := v.Block 56270 _ = b 56271 config := b.Func.Config 56272 _ = config 56273 // match: (XORL (SHLL (MOVLconst [1]) y) x) 56274 // cond: !config.nacl 56275 // result: (BTCL x y) 56276 for { 56277 _ = v.Args[1] 56278 v_0 := v.Args[0] 56279 if v_0.Op != OpAMD64SHLL { 56280 break 56281 } 56282 _ = v_0.Args[1] 56283 v_0_0 := v_0.Args[0] 56284 if v_0_0.Op != OpAMD64MOVLconst { 56285 break 56286 } 56287 if v_0_0.AuxInt != 1 { 56288 break 56289 } 56290 y := v_0.Args[1] 56291 x := v.Args[1] 56292 if !(!config.nacl) { 56293 break 56294 } 56295 v.reset(OpAMD64BTCL) 56296 v.AddArg(x) 56297 v.AddArg(y) 56298 return true 56299 } 56300 // match: (XORL x (SHLL (MOVLconst [1]) y)) 56301 // cond: !config.nacl 56302 // result: (BTCL x y) 56303 for { 56304 _ = v.Args[1] 56305 x := v.Args[0] 56306 v_1 := v.Args[1] 56307 if v_1.Op != OpAMD64SHLL { 56308 break 56309 } 56310 _ = v_1.Args[1] 56311 v_1_0 := v_1.Args[0] 56312 if v_1_0.Op != OpAMD64MOVLconst { 56313 break 56314 } 56315 if v_1_0.AuxInt != 1 { 56316 break 56317 } 56318 y := v_1.Args[1] 56319 if !(!config.nacl) { 56320 break 56321 } 56322 v.reset(OpAMD64BTCL) 56323 v.AddArg(x) 56324 v.AddArg(y) 56325 return true 56326 } 56327 // match: (XORL (MOVLconst [c]) x) 56328 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 56329 // result: (BTCLconst [log2uint32(c)] x) 56330 for { 56331 _ = v.Args[1] 56332 v_0 := v.Args[0] 56333 if v_0.Op != OpAMD64MOVLconst { 56334 break 56335 } 56336 c := v_0.AuxInt 56337 x := v.Args[1] 56338 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 56339 break 56340 } 56341 v.reset(OpAMD64BTCLconst) 56342 v.AuxInt = log2uint32(c) 56343 v.AddArg(x) 56344 return true 56345 } 56346 // match: (XORL x (MOVLconst [c])) 56347 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 56348 // result: (BTCLconst [log2uint32(c)] x) 56349 for { 56350 _ = v.Args[1] 56351 x := v.Args[0] 56352 v_1 := v.Args[1] 56353 if v_1.Op != OpAMD64MOVLconst { 56354 break 56355 } 56356 c := v_1.AuxInt 56357 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 56358 break 56359 } 56360 v.reset(OpAMD64BTCLconst) 56361 v.AuxInt = log2uint32(c) 56362 v.AddArg(x) 56363 return true 56364 } 56365 // match: (XORL x (MOVLconst [c])) 56366 // cond: 56367 // result: (XORLconst [c] x) 56368 for { 56369 _ = v.Args[1] 56370 x := v.Args[0] 56371 v_1 := v.Args[1] 56372 if v_1.Op != OpAMD64MOVLconst { 56373 break 56374 } 56375 c := v_1.AuxInt 56376 v.reset(OpAMD64XORLconst) 56377 v.AuxInt = c 56378 v.AddArg(x) 56379 return true 56380 } 56381 // match: (XORL (MOVLconst [c]) x) 56382 // cond: 56383 // result: (XORLconst [c] x) 56384 for { 56385 _ = v.Args[1] 56386 v_0 := v.Args[0] 56387 if v_0.Op != OpAMD64MOVLconst { 56388 break 56389 } 56390 c := v_0.AuxInt 56391 x := v.Args[1] 56392 v.reset(OpAMD64XORLconst) 56393 v.AuxInt = c 56394 v.AddArg(x) 56395 return true 56396 } 56397 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 56398 // cond: d==32-c 56399 // result: (ROLLconst x [c]) 56400 for { 56401 _ = v.Args[1] 56402 v_0 := v.Args[0] 56403 if v_0.Op != OpAMD64SHLLconst { 56404 break 56405 } 56406 c := v_0.AuxInt 56407 x := v_0.Args[0] 56408 v_1 := v.Args[1] 56409 if v_1.Op != OpAMD64SHRLconst { 56410 break 56411 } 56412 d := v_1.AuxInt 56413 if x != v_1.Args[0] { 56414 break 56415 } 56416 if !(d == 32-c) { 56417 break 56418 } 56419 v.reset(OpAMD64ROLLconst) 56420 v.AuxInt = c 56421 v.AddArg(x) 56422 return true 56423 } 56424 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 56425 // cond: d==32-c 56426 // result: (ROLLconst x [c]) 56427 for { 56428 _ = v.Args[1] 56429 v_0 := v.Args[0] 56430 if v_0.Op != OpAMD64SHRLconst { 56431 break 56432 } 56433 d := v_0.AuxInt 56434 x := v_0.Args[0] 56435 v_1 := v.Args[1] 56436 if v_1.Op != OpAMD64SHLLconst { 56437 break 56438 } 56439 c := v_1.AuxInt 56440 if x != v_1.Args[0] { 56441 break 56442 } 56443 if !(d == 32-c) { 56444 break 56445 } 56446 v.reset(OpAMD64ROLLconst) 56447 v.AuxInt = c 56448 v.AddArg(x) 56449 return true 56450 } 56451 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 56452 // cond: d==16-c && c < 16 && t.Size() == 2 56453 // result: (ROLWconst x [c]) 56454 for { 56455 t := v.Type 56456 _ = v.Args[1] 56457 v_0 := v.Args[0] 56458 if v_0.Op != OpAMD64SHLLconst { 56459 break 56460 } 56461 c := v_0.AuxInt 56462 x := v_0.Args[0] 56463 v_1 := v.Args[1] 56464 if v_1.Op != OpAMD64SHRWconst { 56465 break 56466 } 56467 d := v_1.AuxInt 56468 if x != v_1.Args[0] { 56469 break 56470 } 56471 if !(d == 16-c && c < 16 && t.Size() == 2) { 56472 break 56473 } 56474 v.reset(OpAMD64ROLWconst) 56475 v.AuxInt = c 56476 v.AddArg(x) 56477 return true 56478 } 56479 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 56480 // cond: d==16-c && c < 16 && t.Size() == 2 56481 // result: (ROLWconst x [c]) 56482 for { 56483 t := v.Type 56484 _ = v.Args[1] 56485 v_0 := v.Args[0] 56486 if v_0.Op != OpAMD64SHRWconst { 56487 break 56488 } 56489 d := v_0.AuxInt 56490 x := v_0.Args[0] 56491 v_1 := v.Args[1] 56492 if v_1.Op != OpAMD64SHLLconst { 56493 break 56494 } 56495 c := v_1.AuxInt 56496 if x != v_1.Args[0] { 56497 break 56498 } 56499 if !(d == 16-c && c < 16 && t.Size() == 2) { 56500 break 56501 } 56502 v.reset(OpAMD64ROLWconst) 56503 v.AuxInt = c 56504 v.AddArg(x) 56505 return true 56506 } 56507 return false 56508 } 56509 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 56510 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 56511 // cond: d==8-c && c < 8 && t.Size() == 1 56512 // result: (ROLBconst x [c]) 56513 for { 56514 t := v.Type 56515 _ = v.Args[1] 56516 v_0 := v.Args[0] 56517 if v_0.Op != OpAMD64SHLLconst { 56518 break 56519 } 56520 c := v_0.AuxInt 56521 x := v_0.Args[0] 56522 v_1 := v.Args[1] 56523 if v_1.Op != OpAMD64SHRBconst { 56524 break 56525 } 56526 d := v_1.AuxInt 56527 if x != v_1.Args[0] { 56528 break 56529 } 56530 if !(d == 8-c && c < 8 && t.Size() == 1) { 56531 break 56532 } 56533 v.reset(OpAMD64ROLBconst) 56534 v.AuxInt = c 56535 v.AddArg(x) 56536 return true 56537 } 56538 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 56539 // cond: d==8-c && c < 8 && t.Size() == 1 56540 // result: (ROLBconst x [c]) 56541 for { 56542 t := v.Type 56543 _ = v.Args[1] 56544 v_0 := v.Args[0] 56545 if v_0.Op != OpAMD64SHRBconst { 56546 break 56547 } 56548 d := v_0.AuxInt 56549 x := v_0.Args[0] 56550 v_1 := v.Args[1] 56551 if v_1.Op != OpAMD64SHLLconst { 56552 break 56553 } 56554 c := v_1.AuxInt 56555 if x != v_1.Args[0] { 56556 break 56557 } 56558 if !(d == 8-c && c < 8 && t.Size() == 1) { 56559 break 56560 } 56561 v.reset(OpAMD64ROLBconst) 56562 v.AuxInt = c 56563 v.AddArg(x) 56564 return true 56565 } 56566 // match: (XORL x x) 56567 // cond: 56568 // result: (MOVLconst [0]) 56569 for { 56570 _ = v.Args[1] 56571 x := v.Args[0] 56572 if x != v.Args[1] { 56573 break 56574 } 56575 v.reset(OpAMD64MOVLconst) 56576 v.AuxInt = 0 56577 return true 56578 } 56579 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 56580 // cond: canMergeLoad(v, l, x) && clobber(l) 56581 // result: (XORLload x [off] {sym} ptr mem) 56582 for { 56583 _ = v.Args[1] 56584 x := v.Args[0] 56585 l := v.Args[1] 56586 if l.Op != OpAMD64MOVLload { 56587 break 56588 } 56589 off := l.AuxInt 56590 sym := l.Aux 56591 _ = l.Args[1] 56592 ptr := l.Args[0] 56593 mem := l.Args[1] 56594 if !(canMergeLoad(v, l, x) && clobber(l)) { 56595 break 56596 } 56597 v.reset(OpAMD64XORLload) 56598 v.AuxInt = off 56599 v.Aux = sym 56600 v.AddArg(x) 56601 v.AddArg(ptr) 56602 v.AddArg(mem) 56603 return true 56604 } 56605 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 56606 // cond: canMergeLoad(v, l, x) && clobber(l) 56607 // result: (XORLload x [off] {sym} ptr mem) 56608 for { 56609 _ = v.Args[1] 56610 l := v.Args[0] 56611 if l.Op != OpAMD64MOVLload { 56612 break 56613 } 56614 off := l.AuxInt 56615 sym := l.Aux 56616 _ = l.Args[1] 56617 ptr := l.Args[0] 56618 mem := l.Args[1] 56619 x := v.Args[1] 56620 if !(canMergeLoad(v, l, x) && clobber(l)) { 56621 break 56622 } 56623 v.reset(OpAMD64XORLload) 56624 v.AuxInt = off 56625 v.Aux = sym 56626 v.AddArg(x) 56627 v.AddArg(ptr) 56628 v.AddArg(mem) 56629 return true 56630 } 56631 return false 56632 } 56633 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 56634 b := v.Block 56635 _ = b 56636 config := b.Func.Config 56637 _ = config 56638 // match: (XORLconst [c] x) 56639 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 56640 // result: (BTCLconst [log2uint32(c)] x) 56641 for { 56642 c := v.AuxInt 56643 x := v.Args[0] 56644 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 56645 break 56646 } 56647 v.reset(OpAMD64BTCLconst) 56648 v.AuxInt = log2uint32(c) 56649 v.AddArg(x) 56650 return true 56651 } 56652 // match: (XORLconst [1] (SETNE x)) 56653 // cond: 56654 // result: (SETEQ x) 56655 for { 56656 if v.AuxInt != 1 { 56657 break 56658 } 56659 v_0 := v.Args[0] 56660 if v_0.Op != OpAMD64SETNE { 56661 break 56662 } 56663 x := v_0.Args[0] 56664 v.reset(OpAMD64SETEQ) 56665 v.AddArg(x) 56666 return true 56667 } 56668 // match: (XORLconst [1] (SETEQ x)) 56669 // cond: 56670 // result: (SETNE x) 56671 for { 56672 if v.AuxInt != 1 { 56673 break 56674 } 56675 v_0 := v.Args[0] 56676 if v_0.Op != OpAMD64SETEQ { 56677 break 56678 } 56679 x := v_0.Args[0] 56680 v.reset(OpAMD64SETNE) 56681 v.AddArg(x) 56682 return true 56683 } 56684 // match: (XORLconst [1] (SETL x)) 56685 // cond: 56686 // result: (SETGE x) 56687 for { 56688 if v.AuxInt != 1 { 56689 break 56690 } 56691 v_0 := v.Args[0] 56692 if v_0.Op != OpAMD64SETL { 56693 break 56694 } 56695 x := v_0.Args[0] 56696 v.reset(OpAMD64SETGE) 56697 v.AddArg(x) 56698 return true 56699 } 56700 // match: (XORLconst [1] (SETGE x)) 56701 // cond: 56702 // result: (SETL x) 56703 for { 56704 if v.AuxInt != 1 { 56705 break 56706 } 56707 v_0 := v.Args[0] 56708 if v_0.Op != OpAMD64SETGE { 56709 break 56710 } 56711 x := v_0.Args[0] 56712 v.reset(OpAMD64SETL) 56713 v.AddArg(x) 56714 return true 56715 } 56716 // match: (XORLconst [1] (SETLE x)) 56717 // cond: 56718 // result: (SETG x) 56719 for { 56720 if v.AuxInt != 1 { 56721 break 56722 } 56723 v_0 := v.Args[0] 56724 if v_0.Op != OpAMD64SETLE { 56725 break 56726 } 56727 x := v_0.Args[0] 56728 v.reset(OpAMD64SETG) 56729 v.AddArg(x) 56730 return true 56731 } 56732 // match: (XORLconst [1] (SETG x)) 56733 // cond: 56734 // result: (SETLE x) 56735 for { 56736 if v.AuxInt != 1 { 56737 break 56738 } 56739 v_0 := v.Args[0] 56740 if v_0.Op != OpAMD64SETG { 56741 break 56742 } 56743 x := v_0.Args[0] 56744 v.reset(OpAMD64SETLE) 56745 v.AddArg(x) 56746 return true 56747 } 56748 // match: (XORLconst [1] (SETB x)) 56749 // cond: 56750 // result: (SETAE x) 56751 for { 56752 if v.AuxInt != 1 { 56753 break 56754 } 56755 v_0 := v.Args[0] 56756 if v_0.Op != OpAMD64SETB { 56757 break 56758 } 56759 x := v_0.Args[0] 56760 v.reset(OpAMD64SETAE) 56761 v.AddArg(x) 56762 return true 56763 } 56764 // match: (XORLconst [1] (SETAE x)) 56765 // cond: 56766 // result: (SETB x) 56767 for { 56768 if v.AuxInt != 1 { 56769 break 56770 } 56771 v_0 := v.Args[0] 56772 if v_0.Op != OpAMD64SETAE { 56773 break 56774 } 56775 x := v_0.Args[0] 56776 v.reset(OpAMD64SETB) 56777 v.AddArg(x) 56778 return true 56779 } 56780 // match: (XORLconst [1] (SETBE x)) 56781 // cond: 56782 // result: (SETA x) 56783 for { 56784 if v.AuxInt != 1 { 56785 break 56786 } 56787 v_0 := v.Args[0] 56788 if v_0.Op != OpAMD64SETBE { 56789 break 56790 } 56791 x := v_0.Args[0] 56792 v.reset(OpAMD64SETA) 56793 v.AddArg(x) 56794 return true 56795 } 56796 return false 56797 } 56798 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { 56799 // match: (XORLconst [1] (SETA x)) 56800 // cond: 56801 // result: (SETBE x) 56802 for { 56803 if v.AuxInt != 1 { 56804 break 56805 } 56806 v_0 := v.Args[0] 56807 if v_0.Op != OpAMD64SETA { 56808 break 56809 } 56810 x := v_0.Args[0] 56811 v.reset(OpAMD64SETBE) 56812 v.AddArg(x) 56813 return true 56814 } 56815 // match: (XORLconst [c] (XORLconst [d] x)) 56816 // cond: 56817 // result: (XORLconst [c ^ d] x) 56818 for { 56819 c := v.AuxInt 56820 v_0 := v.Args[0] 56821 if v_0.Op != OpAMD64XORLconst { 56822 break 56823 } 56824 d := v_0.AuxInt 56825 x := v_0.Args[0] 56826 v.reset(OpAMD64XORLconst) 56827 v.AuxInt = c ^ d 56828 v.AddArg(x) 56829 return true 56830 } 56831 // match: (XORLconst [c] (BTCLconst [d] x)) 56832 // cond: 56833 // result: (XORLconst [c ^ 1<<uint32(d)] x) 56834 for { 56835 c := v.AuxInt 56836 v_0 := v.Args[0] 56837 if v_0.Op != OpAMD64BTCLconst { 56838 break 56839 } 56840 d := v_0.AuxInt 56841 x := v_0.Args[0] 56842 v.reset(OpAMD64XORLconst) 56843 v.AuxInt = c ^ 1<<uint32(d) 56844 v.AddArg(x) 56845 return true 56846 } 56847 // match: (XORLconst [c] x) 56848 // cond: int32(c)==0 56849 // result: x 56850 for { 56851 c := v.AuxInt 56852 x := v.Args[0] 56853 if !(int32(c) == 0) { 56854 break 56855 } 56856 v.reset(OpCopy) 56857 v.Type = x.Type 56858 v.AddArg(x) 56859 return true 56860 } 56861 // match: (XORLconst [c] (MOVLconst [d])) 56862 // cond: 56863 // result: (MOVLconst [c^d]) 56864 for { 56865 c := v.AuxInt 56866 v_0 := v.Args[0] 56867 if v_0.Op != OpAMD64MOVLconst { 56868 break 56869 } 56870 d := v_0.AuxInt 56871 v.reset(OpAMD64MOVLconst) 56872 v.AuxInt = c ^ d 56873 return true 56874 } 56875 return false 56876 } 56877 func rewriteValueAMD64_OpAMD64XORLconstmodify_0(v *Value) bool { 56878 // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 56879 // cond: ValAndOff(valoff1).canAdd(off2) 56880 // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 56881 for { 56882 valoff1 := v.AuxInt 56883 sym := v.Aux 56884 _ = v.Args[1] 56885 v_0 := v.Args[0] 56886 if v_0.Op != OpAMD64ADDQconst { 56887 break 56888 } 56889 off2 := v_0.AuxInt 56890 base := v_0.Args[0] 56891 mem := v.Args[1] 56892 if !(ValAndOff(valoff1).canAdd(off2)) { 56893 break 56894 } 56895 v.reset(OpAMD64XORLconstmodify) 56896 v.AuxInt = ValAndOff(valoff1).add(off2) 56897 v.Aux = sym 56898 v.AddArg(base) 56899 v.AddArg(mem) 56900 return true 56901 } 56902 // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 56903 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 56904 // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 56905 for { 56906 valoff1 := v.AuxInt 56907 sym1 := v.Aux 56908 _ = v.Args[1] 56909 v_0 := v.Args[0] 56910 if v_0.Op != OpAMD64LEAQ { 56911 break 56912 } 56913 off2 := v_0.AuxInt 56914 sym2 := v_0.Aux 56915 base := v_0.Args[0] 56916 mem := v.Args[1] 56917 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 56918 break 56919 } 56920 v.reset(OpAMD64XORLconstmodify) 56921 v.AuxInt = ValAndOff(valoff1).add(off2) 56922 v.Aux = mergeSym(sym1, sym2) 56923 v.AddArg(base) 56924 v.AddArg(mem) 56925 return true 56926 } 56927 return false 56928 } 56929 func rewriteValueAMD64_OpAMD64XORLload_0(v *Value) bool { 56930 b := v.Block 56931 _ = b 56932 typ := &b.Func.Config.Types 56933 _ = typ 56934 // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem) 56935 // cond: is32Bit(off1+off2) 56936 // result: (XORLload [off1+off2] {sym} val base mem) 56937 for { 56938 off1 := v.AuxInt 56939 sym := v.Aux 56940 _ = v.Args[2] 56941 val := v.Args[0] 56942 v_1 := v.Args[1] 56943 if v_1.Op != OpAMD64ADDQconst { 56944 break 56945 } 56946 off2 := v_1.AuxInt 56947 base := v_1.Args[0] 56948 mem := v.Args[2] 56949 if !(is32Bit(off1 + off2)) { 56950 break 56951 } 56952 v.reset(OpAMD64XORLload) 56953 v.AuxInt = off1 + off2 56954 v.Aux = sym 56955 v.AddArg(val) 56956 v.AddArg(base) 56957 v.AddArg(mem) 56958 return true 56959 } 56960 // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 56961 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 56962 // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 56963 for { 56964 off1 := v.AuxInt 56965 sym1 := v.Aux 56966 _ = v.Args[2] 56967 val := v.Args[0] 56968 v_1 := v.Args[1] 56969 if v_1.Op != OpAMD64LEAQ { 56970 break 56971 } 56972 off2 := v_1.AuxInt 56973 sym2 := v_1.Aux 56974 base := v_1.Args[0] 56975 mem := v.Args[2] 56976 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 56977 break 56978 } 56979 v.reset(OpAMD64XORLload) 56980 v.AuxInt = off1 + off2 56981 v.Aux = mergeSym(sym1, sym2) 56982 v.AddArg(val) 56983 v.AddArg(base) 56984 v.AddArg(mem) 56985 return true 56986 } 56987 // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 56988 // cond: 56989 // result: (XORL x (MOVLf2i y)) 56990 for { 56991 off := v.AuxInt 56992 sym := v.Aux 56993 _ = v.Args[2] 56994 x := v.Args[0] 56995 ptr := v.Args[1] 56996 v_2 := v.Args[2] 56997 if v_2.Op != OpAMD64MOVSSstore { 56998 break 56999 } 57000 if v_2.AuxInt != off { 57001 break 57002 } 57003 if v_2.Aux != sym { 57004 break 57005 } 57006 _ = v_2.Args[2] 57007 if ptr != v_2.Args[0] { 57008 break 57009 } 57010 y := v_2.Args[1] 57011 v.reset(OpAMD64XORL) 57012 v.AddArg(x) 57013 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 57014 v0.AddArg(y) 57015 v.AddArg(v0) 57016 return true 57017 } 57018 return false 57019 } 57020 func rewriteValueAMD64_OpAMD64XORLmodify_0(v *Value) bool { 57021 // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 57022 // cond: is32Bit(off1+off2) 57023 // result: (XORLmodify [off1+off2] {sym} base val mem) 57024 for { 57025 off1 := v.AuxInt 57026 sym := v.Aux 57027 _ = v.Args[2] 57028 v_0 := v.Args[0] 57029 if v_0.Op != OpAMD64ADDQconst { 57030 break 57031 } 57032 off2 := v_0.AuxInt 57033 base := v_0.Args[0] 57034 val := v.Args[1] 57035 mem := v.Args[2] 57036 if !(is32Bit(off1 + off2)) { 57037 break 57038 } 57039 v.reset(OpAMD64XORLmodify) 57040 v.AuxInt = off1 + off2 57041 v.Aux = sym 57042 v.AddArg(base) 57043 v.AddArg(val) 57044 v.AddArg(mem) 57045 return true 57046 } 57047 // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 57048 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 57049 // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 57050 for { 57051 off1 := v.AuxInt 57052 sym1 := v.Aux 57053 _ = v.Args[2] 57054 v_0 := v.Args[0] 57055 if v_0.Op != OpAMD64LEAQ { 57056 break 57057 } 57058 off2 := v_0.AuxInt 57059 sym2 := v_0.Aux 57060 base := v_0.Args[0] 57061 val := v.Args[1] 57062 mem := v.Args[2] 57063 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 57064 break 57065 } 57066 v.reset(OpAMD64XORLmodify) 57067 v.AuxInt = off1 + off2 57068 v.Aux = mergeSym(sym1, sym2) 57069 v.AddArg(base) 57070 v.AddArg(val) 57071 v.AddArg(mem) 57072 return true 57073 } 57074 return false 57075 } 57076 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 57077 b := v.Block 57078 _ = b 57079 config := b.Func.Config 57080 _ = config 57081 // match: (XORQ (SHLQ (MOVQconst [1]) y) x) 57082 // cond: !config.nacl 57083 // result: (BTCQ x y) 57084 for { 57085 _ = v.Args[1] 57086 v_0 := v.Args[0] 57087 if v_0.Op != OpAMD64SHLQ { 57088 break 57089 } 57090 _ = v_0.Args[1] 57091 v_0_0 := v_0.Args[0] 57092 if v_0_0.Op != OpAMD64MOVQconst { 57093 break 57094 } 57095 if v_0_0.AuxInt != 1 { 57096 break 57097 } 57098 y := v_0.Args[1] 57099 x := v.Args[1] 57100 if !(!config.nacl) { 57101 break 57102 } 57103 v.reset(OpAMD64BTCQ) 57104 v.AddArg(x) 57105 v.AddArg(y) 57106 return true 57107 } 57108 // match: (XORQ x (SHLQ (MOVQconst [1]) y)) 57109 // cond: !config.nacl 57110 // result: (BTCQ x y) 57111 for { 57112 _ = v.Args[1] 57113 x := v.Args[0] 57114 v_1 := v.Args[1] 57115 if v_1.Op != OpAMD64SHLQ { 57116 break 57117 } 57118 _ = v_1.Args[1] 57119 v_1_0 := v_1.Args[0] 57120 if v_1_0.Op != OpAMD64MOVQconst { 57121 break 57122 } 57123 if v_1_0.AuxInt != 1 { 57124 break 57125 } 57126 y := v_1.Args[1] 57127 if !(!config.nacl) { 57128 break 57129 } 57130 v.reset(OpAMD64BTCQ) 57131 v.AddArg(x) 57132 v.AddArg(y) 57133 return true 57134 } 57135 // match: (XORQ (MOVQconst [c]) x) 57136 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 57137 // result: (BTCQconst [log2(c)] x) 57138 for { 57139 _ = v.Args[1] 57140 v_0 := v.Args[0] 57141 if v_0.Op != OpAMD64MOVQconst { 57142 break 57143 } 57144 c := v_0.AuxInt 57145 x := v.Args[1] 57146 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 57147 break 57148 } 57149 v.reset(OpAMD64BTCQconst) 57150 v.AuxInt = log2(c) 57151 v.AddArg(x) 57152 return true 57153 } 57154 // match: (XORQ x (MOVQconst [c])) 57155 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 57156 // result: (BTCQconst [log2(c)] x) 57157 for { 57158 _ = v.Args[1] 57159 x := v.Args[0] 57160 v_1 := v.Args[1] 57161 if v_1.Op != OpAMD64MOVQconst { 57162 break 57163 } 57164 c := v_1.AuxInt 57165 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 57166 break 57167 } 57168 v.reset(OpAMD64BTCQconst) 57169 v.AuxInt = log2(c) 57170 v.AddArg(x) 57171 return true 57172 } 57173 // match: (XORQ x (MOVQconst [c])) 57174 // cond: is32Bit(c) 57175 // result: (XORQconst [c] x) 57176 for { 57177 _ = v.Args[1] 57178 x := v.Args[0] 57179 v_1 := v.Args[1] 57180 if v_1.Op != OpAMD64MOVQconst { 57181 break 57182 } 57183 c := v_1.AuxInt 57184 if !(is32Bit(c)) { 57185 break 57186 } 57187 v.reset(OpAMD64XORQconst) 57188 v.AuxInt = c 57189 v.AddArg(x) 57190 return true 57191 } 57192 // match: (XORQ (MOVQconst [c]) x) 57193 // cond: is32Bit(c) 57194 // result: (XORQconst [c] x) 57195 for { 57196 _ = v.Args[1] 57197 v_0 := v.Args[0] 57198 if v_0.Op != OpAMD64MOVQconst { 57199 break 57200 } 57201 c := v_0.AuxInt 57202 x := v.Args[1] 57203 if !(is32Bit(c)) { 57204 break 57205 } 57206 v.reset(OpAMD64XORQconst) 57207 v.AuxInt = c 57208 v.AddArg(x) 57209 return true 57210 } 57211 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 57212 // cond: d==64-c 57213 // result: (ROLQconst x [c]) 57214 for { 57215 _ = v.Args[1] 57216 v_0 := v.Args[0] 57217 if v_0.Op != OpAMD64SHLQconst { 57218 break 57219 } 57220 c := v_0.AuxInt 57221 x := v_0.Args[0] 57222 v_1 := v.Args[1] 57223 if v_1.Op != OpAMD64SHRQconst { 57224 break 57225 } 57226 d := v_1.AuxInt 57227 if x != v_1.Args[0] { 57228 break 57229 } 57230 if !(d == 64-c) { 57231 break 57232 } 57233 v.reset(OpAMD64ROLQconst) 57234 v.AuxInt = c 57235 v.AddArg(x) 57236 return true 57237 } 57238 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 57239 // cond: d==64-c 57240 // result: (ROLQconst x [c]) 57241 for { 57242 _ = v.Args[1] 57243 v_0 := v.Args[0] 57244 if v_0.Op != OpAMD64SHRQconst { 57245 break 57246 } 57247 d := v_0.AuxInt 57248 x := v_0.Args[0] 57249 v_1 := v.Args[1] 57250 if v_1.Op != OpAMD64SHLQconst { 57251 break 57252 } 57253 c := v_1.AuxInt 57254 if x != v_1.Args[0] { 57255 break 57256 } 57257 if !(d == 64-c) { 57258 break 57259 } 57260 v.reset(OpAMD64ROLQconst) 57261 v.AuxInt = c 57262 v.AddArg(x) 57263 return true 57264 } 57265 // match: (XORQ x x) 57266 // cond: 57267 // result: (MOVQconst [0]) 57268 for { 57269 _ = v.Args[1] 57270 x := v.Args[0] 57271 if x != v.Args[1] { 57272 break 57273 } 57274 v.reset(OpAMD64MOVQconst) 57275 v.AuxInt = 0 57276 return true 57277 } 57278 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 57279 // cond: canMergeLoad(v, l, x) && clobber(l) 57280 // result: (XORQload x [off] {sym} ptr mem) 57281 for { 57282 _ = v.Args[1] 57283 x := v.Args[0] 57284 l := v.Args[1] 57285 if l.Op != OpAMD64MOVQload { 57286 break 57287 } 57288 off := l.AuxInt 57289 sym := l.Aux 57290 _ = l.Args[1] 57291 ptr := l.Args[0] 57292 mem := l.Args[1] 57293 if !(canMergeLoad(v, l, x) && clobber(l)) { 57294 break 57295 } 57296 v.reset(OpAMD64XORQload) 57297 v.AuxInt = off 57298 v.Aux = sym 57299 v.AddArg(x) 57300 v.AddArg(ptr) 57301 v.AddArg(mem) 57302 return true 57303 } 57304 return false 57305 } 57306 func rewriteValueAMD64_OpAMD64XORQ_10(v *Value) bool { 57307 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 57308 // cond: canMergeLoad(v, l, x) && clobber(l) 57309 // result: (XORQload x [off] {sym} ptr mem) 57310 for { 57311 _ = v.Args[1] 57312 l := v.Args[0] 57313 if l.Op != OpAMD64MOVQload { 57314 break 57315 } 57316 off := l.AuxInt 57317 sym := l.Aux 57318 _ = l.Args[1] 57319 ptr := l.Args[0] 57320 mem := l.Args[1] 57321 x := v.Args[1] 57322 if !(canMergeLoad(v, l, x) && clobber(l)) { 57323 break 57324 } 57325 v.reset(OpAMD64XORQload) 57326 v.AuxInt = off 57327 v.Aux = sym 57328 v.AddArg(x) 57329 v.AddArg(ptr) 57330 v.AddArg(mem) 57331 return true 57332 } 57333 return false 57334 } 57335 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 57336 b := v.Block 57337 _ = b 57338 config := b.Func.Config 57339 _ = config 57340 // match: (XORQconst [c] x) 57341 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl 57342 // result: (BTCQconst [log2(c)] x) 57343 for { 57344 c := v.AuxInt 57345 x := v.Args[0] 57346 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) { 57347 break 57348 } 57349 v.reset(OpAMD64BTCQconst) 57350 v.AuxInt = log2(c) 57351 v.AddArg(x) 57352 return true 57353 } 57354 // match: (XORQconst [c] (XORQconst [d] x)) 57355 // cond: 57356 // result: (XORQconst [c ^ d] x) 57357 for { 57358 c := v.AuxInt 57359 v_0 := v.Args[0] 57360 if v_0.Op != OpAMD64XORQconst { 57361 break 57362 } 57363 d := v_0.AuxInt 57364 x := v_0.Args[0] 57365 v.reset(OpAMD64XORQconst) 57366 v.AuxInt = c ^ d 57367 v.AddArg(x) 57368 return true 57369 } 57370 // match: (XORQconst [c] (BTCQconst [d] x)) 57371 // cond: 57372 // result: (XORQconst [c ^ 1<<uint32(d)] x) 57373 for { 57374 c := v.AuxInt 57375 v_0 := v.Args[0] 57376 if v_0.Op != OpAMD64BTCQconst { 57377 break 57378 } 57379 d := v_0.AuxInt 57380 x := v_0.Args[0] 57381 v.reset(OpAMD64XORQconst) 57382 v.AuxInt = c ^ 1<<uint32(d) 57383 v.AddArg(x) 57384 return true 57385 } 57386 // match: (XORQconst [0] x) 57387 // cond: 57388 // result: x 57389 for { 57390 if v.AuxInt != 0 { 57391 break 57392 } 57393 x := v.Args[0] 57394 v.reset(OpCopy) 57395 v.Type = x.Type 57396 v.AddArg(x) 57397 return true 57398 } 57399 // match: (XORQconst [c] (MOVQconst [d])) 57400 // cond: 57401 // result: (MOVQconst [c^d]) 57402 for { 57403 c := v.AuxInt 57404 v_0 := v.Args[0] 57405 if v_0.Op != OpAMD64MOVQconst { 57406 break 57407 } 57408 d := v_0.AuxInt 57409 v.reset(OpAMD64MOVQconst) 57410 v.AuxInt = c ^ d 57411 return true 57412 } 57413 return false 57414 } 57415 func rewriteValueAMD64_OpAMD64XORQconstmodify_0(v *Value) bool { 57416 // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 57417 // cond: ValAndOff(valoff1).canAdd(off2) 57418 // result: (XORQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 57419 for { 57420 valoff1 := v.AuxInt 57421 sym := v.Aux 57422 _ = v.Args[1] 57423 v_0 := v.Args[0] 57424 if v_0.Op != OpAMD64ADDQconst { 57425 break 57426 } 57427 off2 := v_0.AuxInt 57428 base := v_0.Args[0] 57429 mem := v.Args[1] 57430 if !(ValAndOff(valoff1).canAdd(off2)) { 57431 break 57432 } 57433 v.reset(OpAMD64XORQconstmodify) 57434 v.AuxInt = ValAndOff(valoff1).add(off2) 57435 v.Aux = sym 57436 v.AddArg(base) 57437 v.AddArg(mem) 57438 return true 57439 } 57440 // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 57441 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 57442 // result: (XORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 57443 for { 57444 valoff1 := v.AuxInt 57445 sym1 := v.Aux 57446 _ = v.Args[1] 57447 v_0 := v.Args[0] 57448 if v_0.Op != OpAMD64LEAQ { 57449 break 57450 } 57451 off2 := v_0.AuxInt 57452 sym2 := v_0.Aux 57453 base := v_0.Args[0] 57454 mem := v.Args[1] 57455 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 57456 break 57457 } 57458 v.reset(OpAMD64XORQconstmodify) 57459 v.AuxInt = ValAndOff(valoff1).add(off2) 57460 v.Aux = mergeSym(sym1, sym2) 57461 v.AddArg(base) 57462 v.AddArg(mem) 57463 return true 57464 } 57465 return false 57466 } 57467 func rewriteValueAMD64_OpAMD64XORQload_0(v *Value) bool { 57468 b := v.Block 57469 _ = b 57470 typ := &b.Func.Config.Types 57471 _ = typ 57472 // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem) 57473 // cond: is32Bit(off1+off2) 57474 // result: (XORQload [off1+off2] {sym} val base mem) 57475 for { 57476 off1 := v.AuxInt 57477 sym := v.Aux 57478 _ = v.Args[2] 57479 val := v.Args[0] 57480 v_1 := v.Args[1] 57481 if v_1.Op != OpAMD64ADDQconst { 57482 break 57483 } 57484 off2 := v_1.AuxInt 57485 base := v_1.Args[0] 57486 mem := v.Args[2] 57487 if !(is32Bit(off1 + off2)) { 57488 break 57489 } 57490 v.reset(OpAMD64XORQload) 57491 v.AuxInt = off1 + off2 57492 v.Aux = sym 57493 v.AddArg(val) 57494 v.AddArg(base) 57495 v.AddArg(mem) 57496 return true 57497 } 57498 // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 57499 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 57500 // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 57501 for { 57502 off1 := v.AuxInt 57503 sym1 := v.Aux 57504 _ = v.Args[2] 57505 val := v.Args[0] 57506 v_1 := v.Args[1] 57507 if v_1.Op != OpAMD64LEAQ { 57508 break 57509 } 57510 off2 := v_1.AuxInt 57511 sym2 := v_1.Aux 57512 base := v_1.Args[0] 57513 mem := v.Args[2] 57514 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 57515 break 57516 } 57517 v.reset(OpAMD64XORQload) 57518 v.AuxInt = off1 + off2 57519 v.Aux = mergeSym(sym1, sym2) 57520 v.AddArg(val) 57521 v.AddArg(base) 57522 v.AddArg(mem) 57523 return true 57524 } 57525 // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 57526 // cond: 57527 // result: (XORQ x (MOVQf2i y)) 57528 for { 57529 off := v.AuxInt 57530 sym := v.Aux 57531 _ = v.Args[2] 57532 x := v.Args[0] 57533 ptr := v.Args[1] 57534 v_2 := v.Args[2] 57535 if v_2.Op != OpAMD64MOVSDstore { 57536 break 57537 } 57538 if v_2.AuxInt != off { 57539 break 57540 } 57541 if v_2.Aux != sym { 57542 break 57543 } 57544 _ = v_2.Args[2] 57545 if ptr != v_2.Args[0] { 57546 break 57547 } 57548 y := v_2.Args[1] 57549 v.reset(OpAMD64XORQ) 57550 v.AddArg(x) 57551 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 57552 v0.AddArg(y) 57553 v.AddArg(v0) 57554 return true 57555 } 57556 return false 57557 } 57558 func rewriteValueAMD64_OpAMD64XORQmodify_0(v *Value) bool { 57559 // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 57560 // cond: is32Bit(off1+off2) 57561 // result: (XORQmodify [off1+off2] {sym} base val mem) 57562 for { 57563 off1 := v.AuxInt 57564 sym := v.Aux 57565 _ = v.Args[2] 57566 v_0 := v.Args[0] 57567 if v_0.Op != OpAMD64ADDQconst { 57568 break 57569 } 57570 off2 := v_0.AuxInt 57571 base := v_0.Args[0] 57572 val := v.Args[1] 57573 mem := v.Args[2] 57574 if !(is32Bit(off1 + off2)) { 57575 break 57576 } 57577 v.reset(OpAMD64XORQmodify) 57578 v.AuxInt = off1 + off2 57579 v.Aux = sym 57580 v.AddArg(base) 57581 v.AddArg(val) 57582 v.AddArg(mem) 57583 return true 57584 } 57585 // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 57586 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 57587 // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 57588 for { 57589 off1 := v.AuxInt 57590 sym1 := v.Aux 57591 _ = v.Args[2] 57592 v_0 := v.Args[0] 57593 if v_0.Op != OpAMD64LEAQ { 57594 break 57595 } 57596 off2 := v_0.AuxInt 57597 sym2 := v_0.Aux 57598 base := v_0.Args[0] 57599 val := v.Args[1] 57600 mem := v.Args[2] 57601 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 57602 break 57603 } 57604 v.reset(OpAMD64XORQmodify) 57605 v.AuxInt = off1 + off2 57606 v.Aux = mergeSym(sym1, sym2) 57607 v.AddArg(base) 57608 v.AddArg(val) 57609 v.AddArg(mem) 57610 return true 57611 } 57612 return false 57613 } 57614 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 57615 // match: (Add16 x y) 57616 // cond: 57617 // result: (ADDL x y) 57618 for { 57619 _ = v.Args[1] 57620 x := v.Args[0] 57621 y := v.Args[1] 57622 v.reset(OpAMD64ADDL) 57623 v.AddArg(x) 57624 v.AddArg(y) 57625 return true 57626 } 57627 } 57628 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 57629 // match: (Add32 x y) 57630 // cond: 57631 // result: (ADDL x y) 57632 for { 57633 _ = v.Args[1] 57634 x := v.Args[0] 57635 y := v.Args[1] 57636 v.reset(OpAMD64ADDL) 57637 v.AddArg(x) 57638 v.AddArg(y) 57639 return true 57640 } 57641 } 57642 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 57643 // match: (Add32F x y) 57644 // cond: 57645 // result: (ADDSS x y) 57646 for { 57647 _ = v.Args[1] 57648 x := v.Args[0] 57649 y := v.Args[1] 57650 v.reset(OpAMD64ADDSS) 57651 v.AddArg(x) 57652 v.AddArg(y) 57653 return true 57654 } 57655 } 57656 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 57657 // match: (Add64 x y) 57658 // cond: 57659 // result: (ADDQ x y) 57660 for { 57661 _ = v.Args[1] 57662 x := v.Args[0] 57663 y := v.Args[1] 57664 v.reset(OpAMD64ADDQ) 57665 v.AddArg(x) 57666 v.AddArg(y) 57667 return true 57668 } 57669 } 57670 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 57671 // match: (Add64F x y) 57672 // cond: 57673 // result: (ADDSD x y) 57674 for { 57675 _ = v.Args[1] 57676 x := v.Args[0] 57677 y := v.Args[1] 57678 v.reset(OpAMD64ADDSD) 57679 v.AddArg(x) 57680 v.AddArg(y) 57681 return true 57682 } 57683 } 57684 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 57685 // match: (Add8 x y) 57686 // cond: 57687 // result: (ADDL x y) 57688 for { 57689 _ = v.Args[1] 57690 x := v.Args[0] 57691 y := v.Args[1] 57692 v.reset(OpAMD64ADDL) 57693 v.AddArg(x) 57694 v.AddArg(y) 57695 return true 57696 } 57697 } 57698 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 57699 b := v.Block 57700 _ = b 57701 config := b.Func.Config 57702 _ = config 57703 // match: (AddPtr x y) 57704 // cond: config.PtrSize == 8 57705 // result: (ADDQ x y) 57706 for { 57707 _ = v.Args[1] 57708 x := v.Args[0] 57709 y := v.Args[1] 57710 if !(config.PtrSize == 8) { 57711 break 57712 } 57713 v.reset(OpAMD64ADDQ) 57714 v.AddArg(x) 57715 v.AddArg(y) 57716 return true 57717 } 57718 // match: (AddPtr x y) 57719 // cond: config.PtrSize == 4 57720 // result: (ADDL x y) 57721 for { 57722 _ = v.Args[1] 57723 x := v.Args[0] 57724 y := v.Args[1] 57725 if !(config.PtrSize == 4) { 57726 break 57727 } 57728 v.reset(OpAMD64ADDL) 57729 v.AddArg(x) 57730 v.AddArg(y) 57731 return true 57732 } 57733 return false 57734 } 57735 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 57736 b := v.Block 57737 _ = b 57738 config := b.Func.Config 57739 _ = config 57740 // match: (Addr {sym} base) 57741 // cond: config.PtrSize == 8 57742 // result: (LEAQ {sym} base) 57743 for { 57744 sym := v.Aux 57745 base := v.Args[0] 57746 if !(config.PtrSize == 8) { 57747 break 57748 } 57749 v.reset(OpAMD64LEAQ) 57750 v.Aux = sym 57751 v.AddArg(base) 57752 return true 57753 } 57754 // match: (Addr {sym} base) 57755 // cond: config.PtrSize == 4 57756 // result: (LEAL {sym} base) 57757 for { 57758 sym := v.Aux 57759 base := v.Args[0] 57760 if !(config.PtrSize == 4) { 57761 break 57762 } 57763 v.reset(OpAMD64LEAL) 57764 v.Aux = sym 57765 v.AddArg(base) 57766 return true 57767 } 57768 return false 57769 } 57770 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 57771 // match: (And16 x y) 57772 // cond: 57773 // result: (ANDL x y) 57774 for { 57775 _ = v.Args[1] 57776 x := v.Args[0] 57777 y := v.Args[1] 57778 v.reset(OpAMD64ANDL) 57779 v.AddArg(x) 57780 v.AddArg(y) 57781 return true 57782 } 57783 } 57784 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 57785 // match: (And32 x y) 57786 // cond: 57787 // result: (ANDL x y) 57788 for { 57789 _ = v.Args[1] 57790 x := v.Args[0] 57791 y := v.Args[1] 57792 v.reset(OpAMD64ANDL) 57793 v.AddArg(x) 57794 v.AddArg(y) 57795 return true 57796 } 57797 } 57798 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 57799 // match: (And64 x y) 57800 // cond: 57801 // result: (ANDQ x y) 57802 for { 57803 _ = v.Args[1] 57804 x := v.Args[0] 57805 y := v.Args[1] 57806 v.reset(OpAMD64ANDQ) 57807 v.AddArg(x) 57808 v.AddArg(y) 57809 return true 57810 } 57811 } 57812 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 57813 // match: (And8 x y) 57814 // cond: 57815 // result: (ANDL x y) 57816 for { 57817 _ = v.Args[1] 57818 x := v.Args[0] 57819 y := v.Args[1] 57820 v.reset(OpAMD64ANDL) 57821 v.AddArg(x) 57822 v.AddArg(y) 57823 return true 57824 } 57825 } 57826 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 57827 // match: (AndB x y) 57828 // cond: 57829 // result: (ANDL x y) 57830 for { 57831 _ = v.Args[1] 57832 x := v.Args[0] 57833 y := v.Args[1] 57834 v.reset(OpAMD64ANDL) 57835 v.AddArg(x) 57836 v.AddArg(y) 57837 return true 57838 } 57839 } 57840 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 57841 b := v.Block 57842 _ = b 57843 typ := &b.Func.Config.Types 57844 _ = typ 57845 // match: (AtomicAdd32 ptr val mem) 57846 // cond: 57847 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 57848 for { 57849 _ = v.Args[2] 57850 ptr := v.Args[0] 57851 val := v.Args[1] 57852 mem := v.Args[2] 57853 v.reset(OpAMD64AddTupleFirst32) 57854 v.AddArg(val) 57855 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 57856 v0.AddArg(val) 57857 v0.AddArg(ptr) 57858 v0.AddArg(mem) 57859 v.AddArg(v0) 57860 return true 57861 } 57862 } 57863 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 57864 b := v.Block 57865 _ = b 57866 typ := &b.Func.Config.Types 57867 _ = typ 57868 // match: (AtomicAdd64 ptr val mem) 57869 // cond: 57870 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 57871 for { 57872 _ = v.Args[2] 57873 ptr := v.Args[0] 57874 val := v.Args[1] 57875 mem := v.Args[2] 57876 v.reset(OpAMD64AddTupleFirst64) 57877 v.AddArg(val) 57878 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 57879 v0.AddArg(val) 57880 v0.AddArg(ptr) 57881 v0.AddArg(mem) 57882 v.AddArg(v0) 57883 return true 57884 } 57885 } 57886 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 57887 // match: (AtomicAnd8 ptr val mem) 57888 // cond: 57889 // result: (ANDBlock ptr val mem) 57890 for { 57891 _ = v.Args[2] 57892 ptr := v.Args[0] 57893 val := v.Args[1] 57894 mem := v.Args[2] 57895 v.reset(OpAMD64ANDBlock) 57896 v.AddArg(ptr) 57897 v.AddArg(val) 57898 v.AddArg(mem) 57899 return true 57900 } 57901 } 57902 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 57903 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 57904 // cond: 57905 // result: (CMPXCHGLlock ptr old new_ mem) 57906 for { 57907 _ = v.Args[3] 57908 ptr := v.Args[0] 57909 old := v.Args[1] 57910 new_ := v.Args[2] 57911 mem := v.Args[3] 57912 v.reset(OpAMD64CMPXCHGLlock) 57913 v.AddArg(ptr) 57914 v.AddArg(old) 57915 v.AddArg(new_) 57916 v.AddArg(mem) 57917 return true 57918 } 57919 } 57920 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 57921 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 57922 // cond: 57923 // result: (CMPXCHGQlock ptr old new_ mem) 57924 for { 57925 _ = v.Args[3] 57926 ptr := v.Args[0] 57927 old := v.Args[1] 57928 new_ := v.Args[2] 57929 mem := v.Args[3] 57930 v.reset(OpAMD64CMPXCHGQlock) 57931 v.AddArg(ptr) 57932 v.AddArg(old) 57933 v.AddArg(new_) 57934 v.AddArg(mem) 57935 return true 57936 } 57937 } 57938 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 57939 // match: (AtomicExchange32 ptr val mem) 57940 // cond: 57941 // result: (XCHGL val ptr mem) 57942 for { 57943 _ = v.Args[2] 57944 ptr := v.Args[0] 57945 val := v.Args[1] 57946 mem := v.Args[2] 57947 v.reset(OpAMD64XCHGL) 57948 v.AddArg(val) 57949 v.AddArg(ptr) 57950 v.AddArg(mem) 57951 return true 57952 } 57953 } 57954 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 57955 // match: (AtomicExchange64 ptr val mem) 57956 // cond: 57957 // result: (XCHGQ val ptr mem) 57958 for { 57959 _ = v.Args[2] 57960 ptr := v.Args[0] 57961 val := v.Args[1] 57962 mem := v.Args[2] 57963 v.reset(OpAMD64XCHGQ) 57964 v.AddArg(val) 57965 v.AddArg(ptr) 57966 v.AddArg(mem) 57967 return true 57968 } 57969 } 57970 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 57971 // match: (AtomicLoad32 ptr mem) 57972 // cond: 57973 // result: (MOVLatomicload ptr mem) 57974 for { 57975 _ = v.Args[1] 57976 ptr := v.Args[0] 57977 mem := v.Args[1] 57978 v.reset(OpAMD64MOVLatomicload) 57979 v.AddArg(ptr) 57980 v.AddArg(mem) 57981 return true 57982 } 57983 } 57984 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 57985 // match: (AtomicLoad64 ptr mem) 57986 // cond: 57987 // result: (MOVQatomicload ptr mem) 57988 for { 57989 _ = v.Args[1] 57990 ptr := v.Args[0] 57991 mem := v.Args[1] 57992 v.reset(OpAMD64MOVQatomicload) 57993 v.AddArg(ptr) 57994 v.AddArg(mem) 57995 return true 57996 } 57997 } 57998 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 57999 b := v.Block 58000 _ = b 58001 config := b.Func.Config 58002 _ = config 58003 // match: (AtomicLoadPtr ptr mem) 58004 // cond: config.PtrSize == 8 58005 // result: (MOVQatomicload ptr mem) 58006 for { 58007 _ = v.Args[1] 58008 ptr := v.Args[0] 58009 mem := v.Args[1] 58010 if !(config.PtrSize == 8) { 58011 break 58012 } 58013 v.reset(OpAMD64MOVQatomicload) 58014 v.AddArg(ptr) 58015 v.AddArg(mem) 58016 return true 58017 } 58018 // match: (AtomicLoadPtr ptr mem) 58019 // cond: config.PtrSize == 4 58020 // result: (MOVLatomicload ptr mem) 58021 for { 58022 _ = v.Args[1] 58023 ptr := v.Args[0] 58024 mem := v.Args[1] 58025 if !(config.PtrSize == 4) { 58026 break 58027 } 58028 v.reset(OpAMD64MOVLatomicload) 58029 v.AddArg(ptr) 58030 v.AddArg(mem) 58031 return true 58032 } 58033 return false 58034 } 58035 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 58036 // match: (AtomicOr8 ptr val mem) 58037 // cond: 58038 // result: (ORBlock ptr val mem) 58039 for { 58040 _ = v.Args[2] 58041 ptr := v.Args[0] 58042 val := v.Args[1] 58043 mem := v.Args[2] 58044 v.reset(OpAMD64ORBlock) 58045 v.AddArg(ptr) 58046 v.AddArg(val) 58047 v.AddArg(mem) 58048 return true 58049 } 58050 } 58051 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 58052 b := v.Block 58053 _ = b 58054 typ := &b.Func.Config.Types 58055 _ = typ 58056 // match: (AtomicStore32 ptr val mem) 58057 // cond: 58058 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 58059 for { 58060 _ = v.Args[2] 58061 ptr := v.Args[0] 58062 val := v.Args[1] 58063 mem := v.Args[2] 58064 v.reset(OpSelect1) 58065 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 58066 v0.AddArg(val) 58067 v0.AddArg(ptr) 58068 v0.AddArg(mem) 58069 v.AddArg(v0) 58070 return true 58071 } 58072 } 58073 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 58074 b := v.Block 58075 _ = b 58076 typ := &b.Func.Config.Types 58077 _ = typ 58078 // match: (AtomicStore64 ptr val mem) 58079 // cond: 58080 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 58081 for { 58082 _ = v.Args[2] 58083 ptr := v.Args[0] 58084 val := v.Args[1] 58085 mem := v.Args[2] 58086 v.reset(OpSelect1) 58087 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 58088 v0.AddArg(val) 58089 v0.AddArg(ptr) 58090 v0.AddArg(mem) 58091 v.AddArg(v0) 58092 return true 58093 } 58094 } 58095 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 58096 b := v.Block 58097 _ = b 58098 config := b.Func.Config 58099 _ = config 58100 typ := &b.Func.Config.Types 58101 _ = typ 58102 // match: (AtomicStorePtrNoWB ptr val mem) 58103 // cond: config.PtrSize == 8 58104 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 58105 for { 58106 _ = v.Args[2] 58107 ptr := v.Args[0] 58108 val := v.Args[1] 58109 mem := v.Args[2] 58110 if !(config.PtrSize == 8) { 58111 break 58112 } 58113 v.reset(OpSelect1) 58114 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 58115 v0.AddArg(val) 58116 v0.AddArg(ptr) 58117 v0.AddArg(mem) 58118 v.AddArg(v0) 58119 return true 58120 } 58121 // match: (AtomicStorePtrNoWB ptr val mem) 58122 // cond: config.PtrSize == 4 58123 // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 58124 for { 58125 _ = v.Args[2] 58126 ptr := v.Args[0] 58127 val := v.Args[1] 58128 mem := v.Args[2] 58129 if !(config.PtrSize == 4) { 58130 break 58131 } 58132 v.reset(OpSelect1) 58133 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem)) 58134 v0.AddArg(val) 58135 v0.AddArg(ptr) 58136 v0.AddArg(mem) 58137 v.AddArg(v0) 58138 return true 58139 } 58140 return false 58141 } 58142 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 58143 // match: (Avg64u x y) 58144 // cond: 58145 // result: (AVGQU x y) 58146 for { 58147 _ = v.Args[1] 58148 x := v.Args[0] 58149 y := v.Args[1] 58150 v.reset(OpAMD64AVGQU) 58151 v.AddArg(x) 58152 v.AddArg(y) 58153 return true 58154 } 58155 } 58156 func rewriteValueAMD64_OpBitLen16_0(v *Value) bool { 58157 b := v.Block 58158 _ = b 58159 typ := &b.Func.Config.Types 58160 _ = typ 58161 // match: (BitLen16 x) 58162 // cond: 58163 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x))) 58164 for { 58165 x := v.Args[0] 58166 v.reset(OpAMD64BSRL) 58167 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) 58168 v0.AuxInt = 1 58169 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 58170 v1.AddArg(x) 58171 v0.AddArg(v1) 58172 v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 58173 v2.AddArg(x) 58174 v0.AddArg(v2) 58175 v.AddArg(v0) 58176 return true 58177 } 58178 } 58179 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 58180 b := v.Block 58181 _ = b 58182 typ := &b.Func.Config.Types 58183 _ = typ 58184 // match: (BitLen32 x) 58185 // cond: 58186 // result: (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x)))) 58187 for { 58188 x := v.Args[0] 58189 v.reset(OpSelect0) 58190 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 58191 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) 58192 v1.AuxInt = 1 58193 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 58194 v2.AddArg(x) 58195 v1.AddArg(v2) 58196 v3 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 58197 v3.AddArg(x) 58198 v1.AddArg(v3) 58199 v0.AddArg(v1) 58200 v.AddArg(v0) 58201 return true 58202 } 58203 } 58204 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 58205 b := v.Block 58206 _ = b 58207 typ := &b.Func.Config.Types 58208 _ = typ 58209 // match: (BitLen64 <t> x) 58210 // cond: 58211 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 58212 for { 58213 t := v.Type 58214 x := v.Args[0] 58215 v.reset(OpAMD64ADDQconst) 58216 v.AuxInt = 1 58217 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 58218 v1 := b.NewValue0(v.Pos, OpSelect0, t) 58219 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 58220 v2.AddArg(x) 58221 v1.AddArg(v2) 58222 v0.AddArg(v1) 58223 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 58224 v3.AuxInt = -1 58225 v0.AddArg(v3) 58226 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 58227 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 58228 v5.AddArg(x) 58229 v4.AddArg(v5) 58230 v0.AddArg(v4) 58231 v.AddArg(v0) 58232 return true 58233 } 58234 } 58235 func rewriteValueAMD64_OpBitLen8_0(v *Value) bool { 58236 b := v.Block 58237 _ = b 58238 typ := &b.Func.Config.Types 58239 _ = typ 58240 // match: (BitLen8 x) 58241 // cond: 58242 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x))) 58243 for { 58244 x := v.Args[0] 58245 v.reset(OpAMD64BSRL) 58246 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) 58247 v0.AuxInt = 1 58248 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 58249 v1.AddArg(x) 58250 v0.AddArg(v1) 58251 v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 58252 v2.AddArg(x) 58253 v0.AddArg(v2) 58254 v.AddArg(v0) 58255 return true 58256 } 58257 } 58258 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 58259 // match: (Bswap32 x) 58260 // cond: 58261 // result: (BSWAPL x) 58262 for { 58263 x := v.Args[0] 58264 v.reset(OpAMD64BSWAPL) 58265 v.AddArg(x) 58266 return true 58267 } 58268 } 58269 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 58270 // match: (Bswap64 x) 58271 // cond: 58272 // result: (BSWAPQ x) 58273 for { 58274 x := v.Args[0] 58275 v.reset(OpAMD64BSWAPQ) 58276 v.AddArg(x) 58277 return true 58278 } 58279 } 58280 func rewriteValueAMD64_OpCeil_0(v *Value) bool { 58281 // match: (Ceil x) 58282 // cond: 58283 // result: (ROUNDSD [2] x) 58284 for { 58285 x := v.Args[0] 58286 v.reset(OpAMD64ROUNDSD) 58287 v.AuxInt = 2 58288 v.AddArg(x) 58289 return true 58290 } 58291 } 58292 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 58293 // match: (ClosureCall [argwid] entry closure mem) 58294 // cond: 58295 // result: (CALLclosure [argwid] entry closure mem) 58296 for { 58297 argwid := v.AuxInt 58298 _ = v.Args[2] 58299 entry := v.Args[0] 58300 closure := v.Args[1] 58301 mem := v.Args[2] 58302 v.reset(OpAMD64CALLclosure) 58303 v.AuxInt = argwid 58304 v.AddArg(entry) 58305 v.AddArg(closure) 58306 v.AddArg(mem) 58307 return true 58308 } 58309 } 58310 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 58311 // match: (Com16 x) 58312 // cond: 58313 // result: (NOTL x) 58314 for { 58315 x := v.Args[0] 58316 v.reset(OpAMD64NOTL) 58317 v.AddArg(x) 58318 return true 58319 } 58320 } 58321 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 58322 // match: (Com32 x) 58323 // cond: 58324 // result: (NOTL x) 58325 for { 58326 x := v.Args[0] 58327 v.reset(OpAMD64NOTL) 58328 v.AddArg(x) 58329 return true 58330 } 58331 } 58332 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 58333 // match: (Com64 x) 58334 // cond: 58335 // result: (NOTQ x) 58336 for { 58337 x := v.Args[0] 58338 v.reset(OpAMD64NOTQ) 58339 v.AddArg(x) 58340 return true 58341 } 58342 } 58343 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 58344 // match: (Com8 x) 58345 // cond: 58346 // result: (NOTL x) 58347 for { 58348 x := v.Args[0] 58349 v.reset(OpAMD64NOTL) 58350 v.AddArg(x) 58351 return true 58352 } 58353 } 58354 func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { 58355 // match: (CondSelect <t> x y (SETEQ cond)) 58356 // cond: (is64BitInt(t) || isPtr(t)) 58357 // result: (CMOVQEQ y x cond) 58358 for { 58359 t := v.Type 58360 _ = v.Args[2] 58361 x := v.Args[0] 58362 y := v.Args[1] 58363 v_2 := v.Args[2] 58364 if v_2.Op != OpAMD64SETEQ { 58365 break 58366 } 58367 cond := v_2.Args[0] 58368 if !(is64BitInt(t) || isPtr(t)) { 58369 break 58370 } 58371 v.reset(OpAMD64CMOVQEQ) 58372 v.AddArg(y) 58373 v.AddArg(x) 58374 v.AddArg(cond) 58375 return true 58376 } 58377 // match: (CondSelect <t> x y (SETNE cond)) 58378 // cond: (is64BitInt(t) || isPtr(t)) 58379 // result: (CMOVQNE y x cond) 58380 for { 58381 t := v.Type 58382 _ = v.Args[2] 58383 x := v.Args[0] 58384 y := v.Args[1] 58385 v_2 := v.Args[2] 58386 if v_2.Op != OpAMD64SETNE { 58387 break 58388 } 58389 cond := v_2.Args[0] 58390 if !(is64BitInt(t) || isPtr(t)) { 58391 break 58392 } 58393 v.reset(OpAMD64CMOVQNE) 58394 v.AddArg(y) 58395 v.AddArg(x) 58396 v.AddArg(cond) 58397 return true 58398 } 58399 // match: (CondSelect <t> x y (SETL cond)) 58400 // cond: (is64BitInt(t) || isPtr(t)) 58401 // result: (CMOVQLT y x cond) 58402 for { 58403 t := v.Type 58404 _ = v.Args[2] 58405 x := v.Args[0] 58406 y := v.Args[1] 58407 v_2 := v.Args[2] 58408 if v_2.Op != OpAMD64SETL { 58409 break 58410 } 58411 cond := v_2.Args[0] 58412 if !(is64BitInt(t) || isPtr(t)) { 58413 break 58414 } 58415 v.reset(OpAMD64CMOVQLT) 58416 v.AddArg(y) 58417 v.AddArg(x) 58418 v.AddArg(cond) 58419 return true 58420 } 58421 // match: (CondSelect <t> x y (SETG cond)) 58422 // cond: (is64BitInt(t) || isPtr(t)) 58423 // result: (CMOVQGT y x cond) 58424 for { 58425 t := v.Type 58426 _ = v.Args[2] 58427 x := v.Args[0] 58428 y := v.Args[1] 58429 v_2 := v.Args[2] 58430 if v_2.Op != OpAMD64SETG { 58431 break 58432 } 58433 cond := v_2.Args[0] 58434 if !(is64BitInt(t) || isPtr(t)) { 58435 break 58436 } 58437 v.reset(OpAMD64CMOVQGT) 58438 v.AddArg(y) 58439 v.AddArg(x) 58440 v.AddArg(cond) 58441 return true 58442 } 58443 // match: (CondSelect <t> x y (SETLE cond)) 58444 // cond: (is64BitInt(t) || isPtr(t)) 58445 // result: (CMOVQLE y x cond) 58446 for { 58447 t := v.Type 58448 _ = v.Args[2] 58449 x := v.Args[0] 58450 y := v.Args[1] 58451 v_2 := v.Args[2] 58452 if v_2.Op != OpAMD64SETLE { 58453 break 58454 } 58455 cond := v_2.Args[0] 58456 if !(is64BitInt(t) || isPtr(t)) { 58457 break 58458 } 58459 v.reset(OpAMD64CMOVQLE) 58460 v.AddArg(y) 58461 v.AddArg(x) 58462 v.AddArg(cond) 58463 return true 58464 } 58465 // match: (CondSelect <t> x y (SETGE cond)) 58466 // cond: (is64BitInt(t) || isPtr(t)) 58467 // result: (CMOVQGE y x cond) 58468 for { 58469 t := v.Type 58470 _ = v.Args[2] 58471 x := v.Args[0] 58472 y := v.Args[1] 58473 v_2 := v.Args[2] 58474 if v_2.Op != OpAMD64SETGE { 58475 break 58476 } 58477 cond := v_2.Args[0] 58478 if !(is64BitInt(t) || isPtr(t)) { 58479 break 58480 } 58481 v.reset(OpAMD64CMOVQGE) 58482 v.AddArg(y) 58483 v.AddArg(x) 58484 v.AddArg(cond) 58485 return true 58486 } 58487 // match: (CondSelect <t> x y (SETA cond)) 58488 // cond: (is64BitInt(t) || isPtr(t)) 58489 // result: (CMOVQHI y x cond) 58490 for { 58491 t := v.Type 58492 _ = v.Args[2] 58493 x := v.Args[0] 58494 y := v.Args[1] 58495 v_2 := v.Args[2] 58496 if v_2.Op != OpAMD64SETA { 58497 break 58498 } 58499 cond := v_2.Args[0] 58500 if !(is64BitInt(t) || isPtr(t)) { 58501 break 58502 } 58503 v.reset(OpAMD64CMOVQHI) 58504 v.AddArg(y) 58505 v.AddArg(x) 58506 v.AddArg(cond) 58507 return true 58508 } 58509 // match: (CondSelect <t> x y (SETB cond)) 58510 // cond: (is64BitInt(t) || isPtr(t)) 58511 // result: (CMOVQCS y x cond) 58512 for { 58513 t := v.Type 58514 _ = v.Args[2] 58515 x := v.Args[0] 58516 y := v.Args[1] 58517 v_2 := v.Args[2] 58518 if v_2.Op != OpAMD64SETB { 58519 break 58520 } 58521 cond := v_2.Args[0] 58522 if !(is64BitInt(t) || isPtr(t)) { 58523 break 58524 } 58525 v.reset(OpAMD64CMOVQCS) 58526 v.AddArg(y) 58527 v.AddArg(x) 58528 v.AddArg(cond) 58529 return true 58530 } 58531 // match: (CondSelect <t> x y (SETAE cond)) 58532 // cond: (is64BitInt(t) || isPtr(t)) 58533 // result: (CMOVQCC y x cond) 58534 for { 58535 t := v.Type 58536 _ = v.Args[2] 58537 x := v.Args[0] 58538 y := v.Args[1] 58539 v_2 := v.Args[2] 58540 if v_2.Op != OpAMD64SETAE { 58541 break 58542 } 58543 cond := v_2.Args[0] 58544 if !(is64BitInt(t) || isPtr(t)) { 58545 break 58546 } 58547 v.reset(OpAMD64CMOVQCC) 58548 v.AddArg(y) 58549 v.AddArg(x) 58550 v.AddArg(cond) 58551 return true 58552 } 58553 // match: (CondSelect <t> x y (SETBE cond)) 58554 // cond: (is64BitInt(t) || isPtr(t)) 58555 // result: (CMOVQLS y x cond) 58556 for { 58557 t := v.Type 58558 _ = v.Args[2] 58559 x := v.Args[0] 58560 y := v.Args[1] 58561 v_2 := v.Args[2] 58562 if v_2.Op != OpAMD64SETBE { 58563 break 58564 } 58565 cond := v_2.Args[0] 58566 if !(is64BitInt(t) || isPtr(t)) { 58567 break 58568 } 58569 v.reset(OpAMD64CMOVQLS) 58570 v.AddArg(y) 58571 v.AddArg(x) 58572 v.AddArg(cond) 58573 return true 58574 } 58575 return false 58576 } 58577 func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { 58578 // match: (CondSelect <t> x y (SETEQF cond)) 58579 // cond: (is64BitInt(t) || isPtr(t)) 58580 // result: (CMOVQEQF y x cond) 58581 for { 58582 t := v.Type 58583 _ = v.Args[2] 58584 x := v.Args[0] 58585 y := v.Args[1] 58586 v_2 := v.Args[2] 58587 if v_2.Op != OpAMD64SETEQF { 58588 break 58589 } 58590 cond := v_2.Args[0] 58591 if !(is64BitInt(t) || isPtr(t)) { 58592 break 58593 } 58594 v.reset(OpAMD64CMOVQEQF) 58595 v.AddArg(y) 58596 v.AddArg(x) 58597 v.AddArg(cond) 58598 return true 58599 } 58600 // match: (CondSelect <t> x y (SETNEF cond)) 58601 // cond: (is64BitInt(t) || isPtr(t)) 58602 // result: (CMOVQNEF y x cond) 58603 for { 58604 t := v.Type 58605 _ = v.Args[2] 58606 x := v.Args[0] 58607 y := v.Args[1] 58608 v_2 := v.Args[2] 58609 if v_2.Op != OpAMD64SETNEF { 58610 break 58611 } 58612 cond := v_2.Args[0] 58613 if !(is64BitInt(t) || isPtr(t)) { 58614 break 58615 } 58616 v.reset(OpAMD64CMOVQNEF) 58617 v.AddArg(y) 58618 v.AddArg(x) 58619 v.AddArg(cond) 58620 return true 58621 } 58622 // match: (CondSelect <t> x y (SETGF cond)) 58623 // cond: (is64BitInt(t) || isPtr(t)) 58624 // result: (CMOVQGTF y x cond) 58625 for { 58626 t := v.Type 58627 _ = v.Args[2] 58628 x := v.Args[0] 58629 y := v.Args[1] 58630 v_2 := v.Args[2] 58631 if v_2.Op != OpAMD64SETGF { 58632 break 58633 } 58634 cond := v_2.Args[0] 58635 if !(is64BitInt(t) || isPtr(t)) { 58636 break 58637 } 58638 v.reset(OpAMD64CMOVQGTF) 58639 v.AddArg(y) 58640 v.AddArg(x) 58641 v.AddArg(cond) 58642 return true 58643 } 58644 // match: (CondSelect <t> x y (SETGEF cond)) 58645 // cond: (is64BitInt(t) || isPtr(t)) 58646 // result: (CMOVQGEF y x cond) 58647 for { 58648 t := v.Type 58649 _ = v.Args[2] 58650 x := v.Args[0] 58651 y := v.Args[1] 58652 v_2 := v.Args[2] 58653 if v_2.Op != OpAMD64SETGEF { 58654 break 58655 } 58656 cond := v_2.Args[0] 58657 if !(is64BitInt(t) || isPtr(t)) { 58658 break 58659 } 58660 v.reset(OpAMD64CMOVQGEF) 58661 v.AddArg(y) 58662 v.AddArg(x) 58663 v.AddArg(cond) 58664 return true 58665 } 58666 // match: (CondSelect <t> x y (SETEQ cond)) 58667 // cond: is32BitInt(t) 58668 // result: (CMOVLEQ y x cond) 58669 for { 58670 t := v.Type 58671 _ = v.Args[2] 58672 x := v.Args[0] 58673 y := v.Args[1] 58674 v_2 := v.Args[2] 58675 if v_2.Op != OpAMD64SETEQ { 58676 break 58677 } 58678 cond := v_2.Args[0] 58679 if !(is32BitInt(t)) { 58680 break 58681 } 58682 v.reset(OpAMD64CMOVLEQ) 58683 v.AddArg(y) 58684 v.AddArg(x) 58685 v.AddArg(cond) 58686 return true 58687 } 58688 // match: (CondSelect <t> x y (SETNE cond)) 58689 // cond: is32BitInt(t) 58690 // result: (CMOVLNE y x cond) 58691 for { 58692 t := v.Type 58693 _ = v.Args[2] 58694 x := v.Args[0] 58695 y := v.Args[1] 58696 v_2 := v.Args[2] 58697 if v_2.Op != OpAMD64SETNE { 58698 break 58699 } 58700 cond := v_2.Args[0] 58701 if !(is32BitInt(t)) { 58702 break 58703 } 58704 v.reset(OpAMD64CMOVLNE) 58705 v.AddArg(y) 58706 v.AddArg(x) 58707 v.AddArg(cond) 58708 return true 58709 } 58710 // match: (CondSelect <t> x y (SETL cond)) 58711 // cond: is32BitInt(t) 58712 // result: (CMOVLLT y x cond) 58713 for { 58714 t := v.Type 58715 _ = v.Args[2] 58716 x := v.Args[0] 58717 y := v.Args[1] 58718 v_2 := v.Args[2] 58719 if v_2.Op != OpAMD64SETL { 58720 break 58721 } 58722 cond := v_2.Args[0] 58723 if !(is32BitInt(t)) { 58724 break 58725 } 58726 v.reset(OpAMD64CMOVLLT) 58727 v.AddArg(y) 58728 v.AddArg(x) 58729 v.AddArg(cond) 58730 return true 58731 } 58732 // match: (CondSelect <t> x y (SETG cond)) 58733 // cond: is32BitInt(t) 58734 // result: (CMOVLGT y x cond) 58735 for { 58736 t := v.Type 58737 _ = v.Args[2] 58738 x := v.Args[0] 58739 y := v.Args[1] 58740 v_2 := v.Args[2] 58741 if v_2.Op != OpAMD64SETG { 58742 break 58743 } 58744 cond := v_2.Args[0] 58745 if !(is32BitInt(t)) { 58746 break 58747 } 58748 v.reset(OpAMD64CMOVLGT) 58749 v.AddArg(y) 58750 v.AddArg(x) 58751 v.AddArg(cond) 58752 return true 58753 } 58754 // match: (CondSelect <t> x y (SETLE cond)) 58755 // cond: is32BitInt(t) 58756 // result: (CMOVLLE y x cond) 58757 for { 58758 t := v.Type 58759 _ = v.Args[2] 58760 x := v.Args[0] 58761 y := v.Args[1] 58762 v_2 := v.Args[2] 58763 if v_2.Op != OpAMD64SETLE { 58764 break 58765 } 58766 cond := v_2.Args[0] 58767 if !(is32BitInt(t)) { 58768 break 58769 } 58770 v.reset(OpAMD64CMOVLLE) 58771 v.AddArg(y) 58772 v.AddArg(x) 58773 v.AddArg(cond) 58774 return true 58775 } 58776 // match: (CondSelect <t> x y (SETGE cond)) 58777 // cond: is32BitInt(t) 58778 // result: (CMOVLGE y x cond) 58779 for { 58780 t := v.Type 58781 _ = v.Args[2] 58782 x := v.Args[0] 58783 y := v.Args[1] 58784 v_2 := v.Args[2] 58785 if v_2.Op != OpAMD64SETGE { 58786 break 58787 } 58788 cond := v_2.Args[0] 58789 if !(is32BitInt(t)) { 58790 break 58791 } 58792 v.reset(OpAMD64CMOVLGE) 58793 v.AddArg(y) 58794 v.AddArg(x) 58795 v.AddArg(cond) 58796 return true 58797 } 58798 return false 58799 } 58800 func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { 58801 // match: (CondSelect <t> x y (SETA cond)) 58802 // cond: is32BitInt(t) 58803 // result: (CMOVLHI y x cond) 58804 for { 58805 t := v.Type 58806 _ = v.Args[2] 58807 x := v.Args[0] 58808 y := v.Args[1] 58809 v_2 := v.Args[2] 58810 if v_2.Op != OpAMD64SETA { 58811 break 58812 } 58813 cond := v_2.Args[0] 58814 if !(is32BitInt(t)) { 58815 break 58816 } 58817 v.reset(OpAMD64CMOVLHI) 58818 v.AddArg(y) 58819 v.AddArg(x) 58820 v.AddArg(cond) 58821 return true 58822 } 58823 // match: (CondSelect <t> x y (SETB cond)) 58824 // cond: is32BitInt(t) 58825 // result: (CMOVLCS y x cond) 58826 for { 58827 t := v.Type 58828 _ = v.Args[2] 58829 x := v.Args[0] 58830 y := v.Args[1] 58831 v_2 := v.Args[2] 58832 if v_2.Op != OpAMD64SETB { 58833 break 58834 } 58835 cond := v_2.Args[0] 58836 if !(is32BitInt(t)) { 58837 break 58838 } 58839 v.reset(OpAMD64CMOVLCS) 58840 v.AddArg(y) 58841 v.AddArg(x) 58842 v.AddArg(cond) 58843 return true 58844 } 58845 // match: (CondSelect <t> x y (SETAE cond)) 58846 // cond: is32BitInt(t) 58847 // result: (CMOVLCC y x cond) 58848 for { 58849 t := v.Type 58850 _ = v.Args[2] 58851 x := v.Args[0] 58852 y := v.Args[1] 58853 v_2 := v.Args[2] 58854 if v_2.Op != OpAMD64SETAE { 58855 break 58856 } 58857 cond := v_2.Args[0] 58858 if !(is32BitInt(t)) { 58859 break 58860 } 58861 v.reset(OpAMD64CMOVLCC) 58862 v.AddArg(y) 58863 v.AddArg(x) 58864 v.AddArg(cond) 58865 return true 58866 } 58867 // match: (CondSelect <t> x y (SETBE cond)) 58868 // cond: is32BitInt(t) 58869 // result: (CMOVLLS y x cond) 58870 for { 58871 t := v.Type 58872 _ = v.Args[2] 58873 x := v.Args[0] 58874 y := v.Args[1] 58875 v_2 := v.Args[2] 58876 if v_2.Op != OpAMD64SETBE { 58877 break 58878 } 58879 cond := v_2.Args[0] 58880 if !(is32BitInt(t)) { 58881 break 58882 } 58883 v.reset(OpAMD64CMOVLLS) 58884 v.AddArg(y) 58885 v.AddArg(x) 58886 v.AddArg(cond) 58887 return true 58888 } 58889 // match: (CondSelect <t> x y (SETEQF cond)) 58890 // cond: is32BitInt(t) 58891 // result: (CMOVLEQF y x cond) 58892 for { 58893 t := v.Type 58894 _ = v.Args[2] 58895 x := v.Args[0] 58896 y := v.Args[1] 58897 v_2 := v.Args[2] 58898 if v_2.Op != OpAMD64SETEQF { 58899 break 58900 } 58901 cond := v_2.Args[0] 58902 if !(is32BitInt(t)) { 58903 break 58904 } 58905 v.reset(OpAMD64CMOVLEQF) 58906 v.AddArg(y) 58907 v.AddArg(x) 58908 v.AddArg(cond) 58909 return true 58910 } 58911 // match: (CondSelect <t> x y (SETNEF cond)) 58912 // cond: is32BitInt(t) 58913 // result: (CMOVLNEF y x cond) 58914 for { 58915 t := v.Type 58916 _ = v.Args[2] 58917 x := v.Args[0] 58918 y := v.Args[1] 58919 v_2 := v.Args[2] 58920 if v_2.Op != OpAMD64SETNEF { 58921 break 58922 } 58923 cond := v_2.Args[0] 58924 if !(is32BitInt(t)) { 58925 break 58926 } 58927 v.reset(OpAMD64CMOVLNEF) 58928 v.AddArg(y) 58929 v.AddArg(x) 58930 v.AddArg(cond) 58931 return true 58932 } 58933 // match: (CondSelect <t> x y (SETGF cond)) 58934 // cond: is32BitInt(t) 58935 // result: (CMOVLGTF y x cond) 58936 for { 58937 t := v.Type 58938 _ = v.Args[2] 58939 x := v.Args[0] 58940 y := v.Args[1] 58941 v_2 := v.Args[2] 58942 if v_2.Op != OpAMD64SETGF { 58943 break 58944 } 58945 cond := v_2.Args[0] 58946 if !(is32BitInt(t)) { 58947 break 58948 } 58949 v.reset(OpAMD64CMOVLGTF) 58950 v.AddArg(y) 58951 v.AddArg(x) 58952 v.AddArg(cond) 58953 return true 58954 } 58955 // match: (CondSelect <t> x y (SETGEF cond)) 58956 // cond: is32BitInt(t) 58957 // result: (CMOVLGEF y x cond) 58958 for { 58959 t := v.Type 58960 _ = v.Args[2] 58961 x := v.Args[0] 58962 y := v.Args[1] 58963 v_2 := v.Args[2] 58964 if v_2.Op != OpAMD64SETGEF { 58965 break 58966 } 58967 cond := v_2.Args[0] 58968 if !(is32BitInt(t)) { 58969 break 58970 } 58971 v.reset(OpAMD64CMOVLGEF) 58972 v.AddArg(y) 58973 v.AddArg(x) 58974 v.AddArg(cond) 58975 return true 58976 } 58977 // match: (CondSelect <t> x y (SETEQ cond)) 58978 // cond: is16BitInt(t) 58979 // result: (CMOVWEQ y x cond) 58980 for { 58981 t := v.Type 58982 _ = v.Args[2] 58983 x := v.Args[0] 58984 y := v.Args[1] 58985 v_2 := v.Args[2] 58986 if v_2.Op != OpAMD64SETEQ { 58987 break 58988 } 58989 cond := v_2.Args[0] 58990 if !(is16BitInt(t)) { 58991 break 58992 } 58993 v.reset(OpAMD64CMOVWEQ) 58994 v.AddArg(y) 58995 v.AddArg(x) 58996 v.AddArg(cond) 58997 return true 58998 } 58999 // match: (CondSelect <t> x y (SETNE cond)) 59000 // cond: is16BitInt(t) 59001 // result: (CMOVWNE y x cond) 59002 for { 59003 t := v.Type 59004 _ = v.Args[2] 59005 x := v.Args[0] 59006 y := v.Args[1] 59007 v_2 := v.Args[2] 59008 if v_2.Op != OpAMD64SETNE { 59009 break 59010 } 59011 cond := v_2.Args[0] 59012 if !(is16BitInt(t)) { 59013 break 59014 } 59015 v.reset(OpAMD64CMOVWNE) 59016 v.AddArg(y) 59017 v.AddArg(x) 59018 v.AddArg(cond) 59019 return true 59020 } 59021 return false 59022 } 59023 func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { 59024 // match: (CondSelect <t> x y (SETL cond)) 59025 // cond: is16BitInt(t) 59026 // result: (CMOVWLT y x cond) 59027 for { 59028 t := v.Type 59029 _ = v.Args[2] 59030 x := v.Args[0] 59031 y := v.Args[1] 59032 v_2 := v.Args[2] 59033 if v_2.Op != OpAMD64SETL { 59034 break 59035 } 59036 cond := v_2.Args[0] 59037 if !(is16BitInt(t)) { 59038 break 59039 } 59040 v.reset(OpAMD64CMOVWLT) 59041 v.AddArg(y) 59042 v.AddArg(x) 59043 v.AddArg(cond) 59044 return true 59045 } 59046 // match: (CondSelect <t> x y (SETG cond)) 59047 // cond: is16BitInt(t) 59048 // result: (CMOVWGT y x cond) 59049 for { 59050 t := v.Type 59051 _ = v.Args[2] 59052 x := v.Args[0] 59053 y := v.Args[1] 59054 v_2 := v.Args[2] 59055 if v_2.Op != OpAMD64SETG { 59056 break 59057 } 59058 cond := v_2.Args[0] 59059 if !(is16BitInt(t)) { 59060 break 59061 } 59062 v.reset(OpAMD64CMOVWGT) 59063 v.AddArg(y) 59064 v.AddArg(x) 59065 v.AddArg(cond) 59066 return true 59067 } 59068 // match: (CondSelect <t> x y (SETLE cond)) 59069 // cond: is16BitInt(t) 59070 // result: (CMOVWLE y x cond) 59071 for { 59072 t := v.Type 59073 _ = v.Args[2] 59074 x := v.Args[0] 59075 y := v.Args[1] 59076 v_2 := v.Args[2] 59077 if v_2.Op != OpAMD64SETLE { 59078 break 59079 } 59080 cond := v_2.Args[0] 59081 if !(is16BitInt(t)) { 59082 break 59083 } 59084 v.reset(OpAMD64CMOVWLE) 59085 v.AddArg(y) 59086 v.AddArg(x) 59087 v.AddArg(cond) 59088 return true 59089 } 59090 // match: (CondSelect <t> x y (SETGE cond)) 59091 // cond: is16BitInt(t) 59092 // result: (CMOVWGE y x cond) 59093 for { 59094 t := v.Type 59095 _ = v.Args[2] 59096 x := v.Args[0] 59097 y := v.Args[1] 59098 v_2 := v.Args[2] 59099 if v_2.Op != OpAMD64SETGE { 59100 break 59101 } 59102 cond := v_2.Args[0] 59103 if !(is16BitInt(t)) { 59104 break 59105 } 59106 v.reset(OpAMD64CMOVWGE) 59107 v.AddArg(y) 59108 v.AddArg(x) 59109 v.AddArg(cond) 59110 return true 59111 } 59112 // match: (CondSelect <t> x y (SETA cond)) 59113 // cond: is16BitInt(t) 59114 // result: (CMOVWHI y x cond) 59115 for { 59116 t := v.Type 59117 _ = v.Args[2] 59118 x := v.Args[0] 59119 y := v.Args[1] 59120 v_2 := v.Args[2] 59121 if v_2.Op != OpAMD64SETA { 59122 break 59123 } 59124 cond := v_2.Args[0] 59125 if !(is16BitInt(t)) { 59126 break 59127 } 59128 v.reset(OpAMD64CMOVWHI) 59129 v.AddArg(y) 59130 v.AddArg(x) 59131 v.AddArg(cond) 59132 return true 59133 } 59134 // match: (CondSelect <t> x y (SETB cond)) 59135 // cond: is16BitInt(t) 59136 // result: (CMOVWCS y x cond) 59137 for { 59138 t := v.Type 59139 _ = v.Args[2] 59140 x := v.Args[0] 59141 y := v.Args[1] 59142 v_2 := v.Args[2] 59143 if v_2.Op != OpAMD64SETB { 59144 break 59145 } 59146 cond := v_2.Args[0] 59147 if !(is16BitInt(t)) { 59148 break 59149 } 59150 v.reset(OpAMD64CMOVWCS) 59151 v.AddArg(y) 59152 v.AddArg(x) 59153 v.AddArg(cond) 59154 return true 59155 } 59156 // match: (CondSelect <t> x y (SETAE cond)) 59157 // cond: is16BitInt(t) 59158 // result: (CMOVWCC y x cond) 59159 for { 59160 t := v.Type 59161 _ = v.Args[2] 59162 x := v.Args[0] 59163 y := v.Args[1] 59164 v_2 := v.Args[2] 59165 if v_2.Op != OpAMD64SETAE { 59166 break 59167 } 59168 cond := v_2.Args[0] 59169 if !(is16BitInt(t)) { 59170 break 59171 } 59172 v.reset(OpAMD64CMOVWCC) 59173 v.AddArg(y) 59174 v.AddArg(x) 59175 v.AddArg(cond) 59176 return true 59177 } 59178 // match: (CondSelect <t> x y (SETBE cond)) 59179 // cond: is16BitInt(t) 59180 // result: (CMOVWLS y x cond) 59181 for { 59182 t := v.Type 59183 _ = v.Args[2] 59184 x := v.Args[0] 59185 y := v.Args[1] 59186 v_2 := v.Args[2] 59187 if v_2.Op != OpAMD64SETBE { 59188 break 59189 } 59190 cond := v_2.Args[0] 59191 if !(is16BitInt(t)) { 59192 break 59193 } 59194 v.reset(OpAMD64CMOVWLS) 59195 v.AddArg(y) 59196 v.AddArg(x) 59197 v.AddArg(cond) 59198 return true 59199 } 59200 // match: (CondSelect <t> x y (SETEQF cond)) 59201 // cond: is16BitInt(t) 59202 // result: (CMOVWEQF y x cond) 59203 for { 59204 t := v.Type 59205 _ = v.Args[2] 59206 x := v.Args[0] 59207 y := v.Args[1] 59208 v_2 := v.Args[2] 59209 if v_2.Op != OpAMD64SETEQF { 59210 break 59211 } 59212 cond := v_2.Args[0] 59213 if !(is16BitInt(t)) { 59214 break 59215 } 59216 v.reset(OpAMD64CMOVWEQF) 59217 v.AddArg(y) 59218 v.AddArg(x) 59219 v.AddArg(cond) 59220 return true 59221 } 59222 // match: (CondSelect <t> x y (SETNEF cond)) 59223 // cond: is16BitInt(t) 59224 // result: (CMOVWNEF y x cond) 59225 for { 59226 t := v.Type 59227 _ = v.Args[2] 59228 x := v.Args[0] 59229 y := v.Args[1] 59230 v_2 := v.Args[2] 59231 if v_2.Op != OpAMD64SETNEF { 59232 break 59233 } 59234 cond := v_2.Args[0] 59235 if !(is16BitInt(t)) { 59236 break 59237 } 59238 v.reset(OpAMD64CMOVWNEF) 59239 v.AddArg(y) 59240 v.AddArg(x) 59241 v.AddArg(cond) 59242 return true 59243 } 59244 return false 59245 } 59246 func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { 59247 b := v.Block 59248 _ = b 59249 typ := &b.Func.Config.Types 59250 _ = typ 59251 // match: (CondSelect <t> x y (SETGF cond)) 59252 // cond: is16BitInt(t) 59253 // result: (CMOVWGTF y x cond) 59254 for { 59255 t := v.Type 59256 _ = v.Args[2] 59257 x := v.Args[0] 59258 y := v.Args[1] 59259 v_2 := v.Args[2] 59260 if v_2.Op != OpAMD64SETGF { 59261 break 59262 } 59263 cond := v_2.Args[0] 59264 if !(is16BitInt(t)) { 59265 break 59266 } 59267 v.reset(OpAMD64CMOVWGTF) 59268 v.AddArg(y) 59269 v.AddArg(x) 59270 v.AddArg(cond) 59271 return true 59272 } 59273 // match: (CondSelect <t> x y (SETGEF cond)) 59274 // cond: is16BitInt(t) 59275 // result: (CMOVWGEF y x cond) 59276 for { 59277 t := v.Type 59278 _ = v.Args[2] 59279 x := v.Args[0] 59280 y := v.Args[1] 59281 v_2 := v.Args[2] 59282 if v_2.Op != OpAMD64SETGEF { 59283 break 59284 } 59285 cond := v_2.Args[0] 59286 if !(is16BitInt(t)) { 59287 break 59288 } 59289 v.reset(OpAMD64CMOVWGEF) 59290 v.AddArg(y) 59291 v.AddArg(x) 59292 v.AddArg(cond) 59293 return true 59294 } 59295 // match: (CondSelect <t> x y check) 59296 // cond: !check.Type.IsFlags() && check.Type.Size() == 1 59297 // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check)) 59298 for { 59299 t := v.Type 59300 _ = v.Args[2] 59301 x := v.Args[0] 59302 y := v.Args[1] 59303 check := v.Args[2] 59304 if !(!check.Type.IsFlags() && check.Type.Size() == 1) { 59305 break 59306 } 59307 v.reset(OpCondSelect) 59308 v.Type = t 59309 v.AddArg(x) 59310 v.AddArg(y) 59311 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) 59312 v0.AddArg(check) 59313 v.AddArg(v0) 59314 return true 59315 } 59316 // match: (CondSelect <t> x y check) 59317 // cond: !check.Type.IsFlags() && check.Type.Size() == 2 59318 // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check)) 59319 for { 59320 t := v.Type 59321 _ = v.Args[2] 59322 x := v.Args[0] 59323 y := v.Args[1] 59324 check := v.Args[2] 59325 if !(!check.Type.IsFlags() && check.Type.Size() == 2) { 59326 break 59327 } 59328 v.reset(OpCondSelect) 59329 v.Type = t 59330 v.AddArg(x) 59331 v.AddArg(y) 59332 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) 59333 v0.AddArg(check) 59334 v.AddArg(v0) 59335 return true 59336 } 59337 // match: (CondSelect <t> x y check) 59338 // cond: !check.Type.IsFlags() && check.Type.Size() == 4 59339 // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check)) 59340 for { 59341 t := v.Type 59342 _ = v.Args[2] 59343 x := v.Args[0] 59344 y := v.Args[1] 59345 check := v.Args[2] 59346 if !(!check.Type.IsFlags() && check.Type.Size() == 4) { 59347 break 59348 } 59349 v.reset(OpCondSelect) 59350 v.Type = t 59351 v.AddArg(x) 59352 v.AddArg(y) 59353 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 59354 v0.AddArg(check) 59355 v.AddArg(v0) 59356 return true 59357 } 59358 // match: (CondSelect <t> x y check) 59359 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) 59360 // result: (CMOVQNE y x (CMPQconst [0] check)) 59361 for { 59362 t := v.Type 59363 _ = v.Args[2] 59364 x := v.Args[0] 59365 y := v.Args[1] 59366 check := v.Args[2] 59367 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { 59368 break 59369 } 59370 v.reset(OpAMD64CMOVQNE) 59371 v.AddArg(y) 59372 v.AddArg(x) 59373 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 59374 v0.AuxInt = 0 59375 v0.AddArg(check) 59376 v.AddArg(v0) 59377 return true 59378 } 59379 // match: (CondSelect <t> x y check) 59380 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) 59381 // result: (CMOVLNE y x (CMPQconst [0] check)) 59382 for { 59383 t := v.Type 59384 _ = v.Args[2] 59385 x := v.Args[0] 59386 y := v.Args[1] 59387 check := v.Args[2] 59388 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { 59389 break 59390 } 59391 v.reset(OpAMD64CMOVLNE) 59392 v.AddArg(y) 59393 v.AddArg(x) 59394 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 59395 v0.AuxInt = 0 59396 v0.AddArg(check) 59397 v.AddArg(v0) 59398 return true 59399 } 59400 // match: (CondSelect <t> x y check) 59401 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) 59402 // result: (CMOVWNE y x (CMPQconst [0] check)) 59403 for { 59404 t := v.Type 59405 _ = v.Args[2] 59406 x := v.Args[0] 59407 y := v.Args[1] 59408 check := v.Args[2] 59409 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { 59410 break 59411 } 59412 v.reset(OpAMD64CMOVWNE) 59413 v.AddArg(y) 59414 v.AddArg(x) 59415 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 59416 v0.AuxInt = 0 59417 v0.AddArg(check) 59418 v.AddArg(v0) 59419 return true 59420 } 59421 return false 59422 } 59423 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 59424 // match: (Const16 [val]) 59425 // cond: 59426 // result: (MOVLconst [val]) 59427 for { 59428 val := v.AuxInt 59429 v.reset(OpAMD64MOVLconst) 59430 v.AuxInt = val 59431 return true 59432 } 59433 } 59434 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 59435 // match: (Const32 [val]) 59436 // cond: 59437 // result: (MOVLconst [val]) 59438 for { 59439 val := v.AuxInt 59440 v.reset(OpAMD64MOVLconst) 59441 v.AuxInt = val 59442 return true 59443 } 59444 } 59445 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 59446 // match: (Const32F [val]) 59447 // cond: 59448 // result: (MOVSSconst [val]) 59449 for { 59450 val := v.AuxInt 59451 v.reset(OpAMD64MOVSSconst) 59452 v.AuxInt = val 59453 return true 59454 } 59455 } 59456 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 59457 // match: (Const64 [val]) 59458 // cond: 59459 // result: (MOVQconst [val]) 59460 for { 59461 val := v.AuxInt 59462 v.reset(OpAMD64MOVQconst) 59463 v.AuxInt = val 59464 return true 59465 } 59466 } 59467 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 59468 // match: (Const64F [val]) 59469 // cond: 59470 // result: (MOVSDconst [val]) 59471 for { 59472 val := v.AuxInt 59473 v.reset(OpAMD64MOVSDconst) 59474 v.AuxInt = val 59475 return true 59476 } 59477 } 59478 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 59479 // match: (Const8 [val]) 59480 // cond: 59481 // result: (MOVLconst [val]) 59482 for { 59483 val := v.AuxInt 59484 v.reset(OpAMD64MOVLconst) 59485 v.AuxInt = val 59486 return true 59487 } 59488 } 59489 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 59490 // match: (ConstBool [b]) 59491 // cond: 59492 // result: (MOVLconst [b]) 59493 for { 59494 b := v.AuxInt 59495 v.reset(OpAMD64MOVLconst) 59496 v.AuxInt = b 59497 return true 59498 } 59499 } 59500 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 59501 b := v.Block 59502 _ = b 59503 config := b.Func.Config 59504 _ = config 59505 // match: (ConstNil) 59506 // cond: config.PtrSize == 8 59507 // result: (MOVQconst [0]) 59508 for { 59509 if !(config.PtrSize == 8) { 59510 break 59511 } 59512 v.reset(OpAMD64MOVQconst) 59513 v.AuxInt = 0 59514 return true 59515 } 59516 // match: (ConstNil) 59517 // cond: config.PtrSize == 4 59518 // result: (MOVLconst [0]) 59519 for { 59520 if !(config.PtrSize == 4) { 59521 break 59522 } 59523 v.reset(OpAMD64MOVLconst) 59524 v.AuxInt = 0 59525 return true 59526 } 59527 return false 59528 } 59529 func rewriteValueAMD64_OpCtz16_0(v *Value) bool { 59530 b := v.Block 59531 _ = b 59532 typ := &b.Func.Config.Types 59533 _ = typ 59534 // match: (Ctz16 x) 59535 // cond: 59536 // result: (BSFL (BTSLconst <typ.UInt32> [16] x)) 59537 for { 59538 x := v.Args[0] 59539 v.reset(OpAMD64BSFL) 59540 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32) 59541 v0.AuxInt = 16 59542 v0.AddArg(x) 59543 v.AddArg(v0) 59544 return true 59545 } 59546 } 59547 func rewriteValueAMD64_OpCtz16NonZero_0(v *Value) bool { 59548 // match: (Ctz16NonZero x) 59549 // cond: 59550 // result: (BSFL x) 59551 for { 59552 x := v.Args[0] 59553 v.reset(OpAMD64BSFL) 59554 v.AddArg(x) 59555 return true 59556 } 59557 } 59558 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 59559 b := v.Block 59560 _ = b 59561 typ := &b.Func.Config.Types 59562 _ = typ 59563 // match: (Ctz32 x) 59564 // cond: 59565 // result: (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x))) 59566 for { 59567 x := v.Args[0] 59568 v.reset(OpSelect0) 59569 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 59570 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) 59571 v1.AuxInt = 32 59572 v1.AddArg(x) 59573 v0.AddArg(v1) 59574 v.AddArg(v0) 59575 return true 59576 } 59577 } 59578 func rewriteValueAMD64_OpCtz32NonZero_0(v *Value) bool { 59579 // match: (Ctz32NonZero x) 59580 // cond: 59581 // result: (BSFL x) 59582 for { 59583 x := v.Args[0] 59584 v.reset(OpAMD64BSFL) 59585 v.AddArg(x) 59586 return true 59587 } 59588 } 59589 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 59590 b := v.Block 59591 _ = b 59592 typ := &b.Func.Config.Types 59593 _ = typ 59594 // match: (Ctz64 <t> x) 59595 // cond: 59596 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 59597 for { 59598 t := v.Type 59599 x := v.Args[0] 59600 v.reset(OpAMD64CMOVQEQ) 59601 v0 := b.NewValue0(v.Pos, OpSelect0, t) 59602 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 59603 v1.AddArg(x) 59604 v0.AddArg(v1) 59605 v.AddArg(v0) 59606 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 59607 v2.AuxInt = 64 59608 v.AddArg(v2) 59609 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 59610 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 59611 v4.AddArg(x) 59612 v3.AddArg(v4) 59613 v.AddArg(v3) 59614 return true 59615 } 59616 } 59617 func rewriteValueAMD64_OpCtz64NonZero_0(v *Value) bool { 59618 b := v.Block 59619 _ = b 59620 typ := &b.Func.Config.Types 59621 _ = typ 59622 // match: (Ctz64NonZero x) 59623 // cond: 59624 // result: (Select0 (BSFQ x)) 59625 for { 59626 x := v.Args[0] 59627 v.reset(OpSelect0) 59628 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 59629 v0.AddArg(x) 59630 v.AddArg(v0) 59631 return true 59632 } 59633 } 59634 func rewriteValueAMD64_OpCtz8_0(v *Value) bool { 59635 b := v.Block 59636 _ = b 59637 typ := &b.Func.Config.Types 59638 _ = typ 59639 // match: (Ctz8 x) 59640 // cond: 59641 // result: (BSFL (BTSLconst <typ.UInt32> [ 8] x)) 59642 for { 59643 x := v.Args[0] 59644 v.reset(OpAMD64BSFL) 59645 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32) 59646 v0.AuxInt = 8 59647 v0.AddArg(x) 59648 v.AddArg(v0) 59649 return true 59650 } 59651 } 59652 func rewriteValueAMD64_OpCtz8NonZero_0(v *Value) bool { 59653 // match: (Ctz8NonZero x) 59654 // cond: 59655 // result: (BSFL x) 59656 for { 59657 x := v.Args[0] 59658 v.reset(OpAMD64BSFL) 59659 v.AddArg(x) 59660 return true 59661 } 59662 } 59663 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 59664 // match: (Cvt32Fto32 x) 59665 // cond: 59666 // result: (CVTTSS2SL x) 59667 for { 59668 x := v.Args[0] 59669 v.reset(OpAMD64CVTTSS2SL) 59670 v.AddArg(x) 59671 return true 59672 } 59673 } 59674 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 59675 // match: (Cvt32Fto64 x) 59676 // cond: 59677 // result: (CVTTSS2SQ x) 59678 for { 59679 x := v.Args[0] 59680 v.reset(OpAMD64CVTTSS2SQ) 59681 v.AddArg(x) 59682 return true 59683 } 59684 } 59685 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 59686 // match: (Cvt32Fto64F x) 59687 // cond: 59688 // result: (CVTSS2SD x) 59689 for { 59690 x := v.Args[0] 59691 v.reset(OpAMD64CVTSS2SD) 59692 v.AddArg(x) 59693 return true 59694 } 59695 } 59696 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 59697 // match: (Cvt32to32F x) 59698 // cond: 59699 // result: (CVTSL2SS x) 59700 for { 59701 x := v.Args[0] 59702 v.reset(OpAMD64CVTSL2SS) 59703 v.AddArg(x) 59704 return true 59705 } 59706 } 59707 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 59708 // match: (Cvt32to64F x) 59709 // cond: 59710 // result: (CVTSL2SD x) 59711 for { 59712 x := v.Args[0] 59713 v.reset(OpAMD64CVTSL2SD) 59714 v.AddArg(x) 59715 return true 59716 } 59717 } 59718 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 59719 // match: (Cvt64Fto32 x) 59720 // cond: 59721 // result: (CVTTSD2SL x) 59722 for { 59723 x := v.Args[0] 59724 v.reset(OpAMD64CVTTSD2SL) 59725 v.AddArg(x) 59726 return true 59727 } 59728 } 59729 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 59730 // match: (Cvt64Fto32F x) 59731 // cond: 59732 // result: (CVTSD2SS x) 59733 for { 59734 x := v.Args[0] 59735 v.reset(OpAMD64CVTSD2SS) 59736 v.AddArg(x) 59737 return true 59738 } 59739 } 59740 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 59741 // match: (Cvt64Fto64 x) 59742 // cond: 59743 // result: (CVTTSD2SQ x) 59744 for { 59745 x := v.Args[0] 59746 v.reset(OpAMD64CVTTSD2SQ) 59747 v.AddArg(x) 59748 return true 59749 } 59750 } 59751 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 59752 // match: (Cvt64to32F x) 59753 // cond: 59754 // result: (CVTSQ2SS x) 59755 for { 59756 x := v.Args[0] 59757 v.reset(OpAMD64CVTSQ2SS) 59758 v.AddArg(x) 59759 return true 59760 } 59761 } 59762 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 59763 // match: (Cvt64to64F x) 59764 // cond: 59765 // result: (CVTSQ2SD x) 59766 for { 59767 x := v.Args[0] 59768 v.reset(OpAMD64CVTSQ2SD) 59769 v.AddArg(x) 59770 return true 59771 } 59772 } 59773 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 59774 // match: (Div128u xhi xlo y) 59775 // cond: 59776 // result: (DIVQU2 xhi xlo y) 59777 for { 59778 _ = v.Args[2] 59779 xhi := v.Args[0] 59780 xlo := v.Args[1] 59781 y := v.Args[2] 59782 v.reset(OpAMD64DIVQU2) 59783 v.AddArg(xhi) 59784 v.AddArg(xlo) 59785 v.AddArg(y) 59786 return true 59787 } 59788 } 59789 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 59790 b := v.Block 59791 _ = b 59792 typ := &b.Func.Config.Types 59793 _ = typ 59794 // match: (Div16 [a] x y) 59795 // cond: 59796 // result: (Select0 (DIVW [a] x y)) 59797 for { 59798 a := v.AuxInt 59799 _ = v.Args[1] 59800 x := v.Args[0] 59801 y := v.Args[1] 59802 v.reset(OpSelect0) 59803 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 59804 v0.AuxInt = a 59805 v0.AddArg(x) 59806 v0.AddArg(y) 59807 v.AddArg(v0) 59808 return true 59809 } 59810 } 59811 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 59812 b := v.Block 59813 _ = b 59814 typ := &b.Func.Config.Types 59815 _ = typ 59816 // match: (Div16u x y) 59817 // cond: 59818 // result: (Select0 (DIVWU x y)) 59819 for { 59820 _ = v.Args[1] 59821 x := v.Args[0] 59822 y := v.Args[1] 59823 v.reset(OpSelect0) 59824 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 59825 v0.AddArg(x) 59826 v0.AddArg(y) 59827 v.AddArg(v0) 59828 return true 59829 } 59830 } 59831 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 59832 b := v.Block 59833 _ = b 59834 typ := &b.Func.Config.Types 59835 _ = typ 59836 // match: (Div32 [a] x y) 59837 // cond: 59838 // result: (Select0 (DIVL [a] x y)) 59839 for { 59840 a := v.AuxInt 59841 _ = v.Args[1] 59842 x := v.Args[0] 59843 y := v.Args[1] 59844 v.reset(OpSelect0) 59845 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 59846 v0.AuxInt = a 59847 v0.AddArg(x) 59848 v0.AddArg(y) 59849 v.AddArg(v0) 59850 return true 59851 } 59852 } 59853 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 59854 // match: (Div32F x y) 59855 // cond: 59856 // result: (DIVSS x y) 59857 for { 59858 _ = v.Args[1] 59859 x := v.Args[0] 59860 y := v.Args[1] 59861 v.reset(OpAMD64DIVSS) 59862 v.AddArg(x) 59863 v.AddArg(y) 59864 return true 59865 } 59866 } 59867 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 59868 b := v.Block 59869 _ = b 59870 typ := &b.Func.Config.Types 59871 _ = typ 59872 // match: (Div32u x y) 59873 // cond: 59874 // result: (Select0 (DIVLU x y)) 59875 for { 59876 _ = v.Args[1] 59877 x := v.Args[0] 59878 y := v.Args[1] 59879 v.reset(OpSelect0) 59880 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 59881 v0.AddArg(x) 59882 v0.AddArg(y) 59883 v.AddArg(v0) 59884 return true 59885 } 59886 } 59887 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 59888 b := v.Block 59889 _ = b 59890 typ := &b.Func.Config.Types 59891 _ = typ 59892 // match: (Div64 [a] x y) 59893 // cond: 59894 // result: (Select0 (DIVQ [a] x y)) 59895 for { 59896 a := v.AuxInt 59897 _ = v.Args[1] 59898 x := v.Args[0] 59899 y := v.Args[1] 59900 v.reset(OpSelect0) 59901 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 59902 v0.AuxInt = a 59903 v0.AddArg(x) 59904 v0.AddArg(y) 59905 v.AddArg(v0) 59906 return true 59907 } 59908 } 59909 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 59910 // match: (Div64F x y) 59911 // cond: 59912 // result: (DIVSD x y) 59913 for { 59914 _ = v.Args[1] 59915 x := v.Args[0] 59916 y := v.Args[1] 59917 v.reset(OpAMD64DIVSD) 59918 v.AddArg(x) 59919 v.AddArg(y) 59920 return true 59921 } 59922 } 59923 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 59924 b := v.Block 59925 _ = b 59926 typ := &b.Func.Config.Types 59927 _ = typ 59928 // match: (Div64u x y) 59929 // cond: 59930 // result: (Select0 (DIVQU x y)) 59931 for { 59932 _ = v.Args[1] 59933 x := v.Args[0] 59934 y := v.Args[1] 59935 v.reset(OpSelect0) 59936 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 59937 v0.AddArg(x) 59938 v0.AddArg(y) 59939 v.AddArg(v0) 59940 return true 59941 } 59942 } 59943 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 59944 b := v.Block 59945 _ = b 59946 typ := &b.Func.Config.Types 59947 _ = typ 59948 // match: (Div8 x y) 59949 // cond: 59950 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 59951 for { 59952 _ = v.Args[1] 59953 x := v.Args[0] 59954 y := v.Args[1] 59955 v.reset(OpSelect0) 59956 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 59957 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 59958 v1.AddArg(x) 59959 v0.AddArg(v1) 59960 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 59961 v2.AddArg(y) 59962 v0.AddArg(v2) 59963 v.AddArg(v0) 59964 return true 59965 } 59966 } 59967 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 59968 b := v.Block 59969 _ = b 59970 typ := &b.Func.Config.Types 59971 _ = typ 59972 // match: (Div8u x y) 59973 // cond: 59974 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 59975 for { 59976 _ = v.Args[1] 59977 x := v.Args[0] 59978 y := v.Args[1] 59979 v.reset(OpSelect0) 59980 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 59981 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 59982 v1.AddArg(x) 59983 v0.AddArg(v1) 59984 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 59985 v2.AddArg(y) 59986 v0.AddArg(v2) 59987 v.AddArg(v0) 59988 return true 59989 } 59990 } 59991 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 59992 b := v.Block 59993 _ = b 59994 // match: (Eq16 x y) 59995 // cond: 59996 // result: (SETEQ (CMPW x y)) 59997 for { 59998 _ = v.Args[1] 59999 x := v.Args[0] 60000 y := v.Args[1] 60001 v.reset(OpAMD64SETEQ) 60002 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60003 v0.AddArg(x) 60004 v0.AddArg(y) 60005 v.AddArg(v0) 60006 return true 60007 } 60008 } 60009 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 60010 b := v.Block 60011 _ = b 60012 // match: (Eq32 x y) 60013 // cond: 60014 // result: (SETEQ (CMPL x y)) 60015 for { 60016 _ = v.Args[1] 60017 x := v.Args[0] 60018 y := v.Args[1] 60019 v.reset(OpAMD64SETEQ) 60020 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60021 v0.AddArg(x) 60022 v0.AddArg(y) 60023 v.AddArg(v0) 60024 return true 60025 } 60026 } 60027 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 60028 b := v.Block 60029 _ = b 60030 // match: (Eq32F x y) 60031 // cond: 60032 // result: (SETEQF (UCOMISS x y)) 60033 for { 60034 _ = v.Args[1] 60035 x := v.Args[0] 60036 y := v.Args[1] 60037 v.reset(OpAMD64SETEQF) 60038 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 60039 v0.AddArg(x) 60040 v0.AddArg(y) 60041 v.AddArg(v0) 60042 return true 60043 } 60044 } 60045 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 60046 b := v.Block 60047 _ = b 60048 // match: (Eq64 x y) 60049 // cond: 60050 // result: (SETEQ (CMPQ x y)) 60051 for { 60052 _ = v.Args[1] 60053 x := v.Args[0] 60054 y := v.Args[1] 60055 v.reset(OpAMD64SETEQ) 60056 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60057 v0.AddArg(x) 60058 v0.AddArg(y) 60059 v.AddArg(v0) 60060 return true 60061 } 60062 } 60063 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 60064 b := v.Block 60065 _ = b 60066 // match: (Eq64F x y) 60067 // cond: 60068 // result: (SETEQF (UCOMISD x y)) 60069 for { 60070 _ = v.Args[1] 60071 x := v.Args[0] 60072 y := v.Args[1] 60073 v.reset(OpAMD64SETEQF) 60074 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 60075 v0.AddArg(x) 60076 v0.AddArg(y) 60077 v.AddArg(v0) 60078 return true 60079 } 60080 } 60081 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 60082 b := v.Block 60083 _ = b 60084 // match: (Eq8 x y) 60085 // cond: 60086 // result: (SETEQ (CMPB x y)) 60087 for { 60088 _ = v.Args[1] 60089 x := v.Args[0] 60090 y := v.Args[1] 60091 v.reset(OpAMD64SETEQ) 60092 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60093 v0.AddArg(x) 60094 v0.AddArg(y) 60095 v.AddArg(v0) 60096 return true 60097 } 60098 } 60099 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 60100 b := v.Block 60101 _ = b 60102 // match: (EqB x y) 60103 // cond: 60104 // result: (SETEQ (CMPB x y)) 60105 for { 60106 _ = v.Args[1] 60107 x := v.Args[0] 60108 y := v.Args[1] 60109 v.reset(OpAMD64SETEQ) 60110 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60111 v0.AddArg(x) 60112 v0.AddArg(y) 60113 v.AddArg(v0) 60114 return true 60115 } 60116 } 60117 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 60118 b := v.Block 60119 _ = b 60120 config := b.Func.Config 60121 _ = config 60122 // match: (EqPtr x y) 60123 // cond: config.PtrSize == 8 60124 // result: (SETEQ (CMPQ x y)) 60125 for { 60126 _ = v.Args[1] 60127 x := v.Args[0] 60128 y := v.Args[1] 60129 if !(config.PtrSize == 8) { 60130 break 60131 } 60132 v.reset(OpAMD64SETEQ) 60133 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60134 v0.AddArg(x) 60135 v0.AddArg(y) 60136 v.AddArg(v0) 60137 return true 60138 } 60139 // match: (EqPtr x y) 60140 // cond: config.PtrSize == 4 60141 // result: (SETEQ (CMPL x y)) 60142 for { 60143 _ = v.Args[1] 60144 x := v.Args[0] 60145 y := v.Args[1] 60146 if !(config.PtrSize == 4) { 60147 break 60148 } 60149 v.reset(OpAMD64SETEQ) 60150 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60151 v0.AddArg(x) 60152 v0.AddArg(y) 60153 v.AddArg(v0) 60154 return true 60155 } 60156 return false 60157 } 60158 func rewriteValueAMD64_OpFloor_0(v *Value) bool { 60159 // match: (Floor x) 60160 // cond: 60161 // result: (ROUNDSD [1] x) 60162 for { 60163 x := v.Args[0] 60164 v.reset(OpAMD64ROUNDSD) 60165 v.AuxInt = 1 60166 v.AddArg(x) 60167 return true 60168 } 60169 } 60170 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 60171 b := v.Block 60172 _ = b 60173 // match: (Geq16 x y) 60174 // cond: 60175 // result: (SETGE (CMPW x y)) 60176 for { 60177 _ = v.Args[1] 60178 x := v.Args[0] 60179 y := v.Args[1] 60180 v.reset(OpAMD64SETGE) 60181 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60182 v0.AddArg(x) 60183 v0.AddArg(y) 60184 v.AddArg(v0) 60185 return true 60186 } 60187 } 60188 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 60189 b := v.Block 60190 _ = b 60191 // match: (Geq16U x y) 60192 // cond: 60193 // result: (SETAE (CMPW x y)) 60194 for { 60195 _ = v.Args[1] 60196 x := v.Args[0] 60197 y := v.Args[1] 60198 v.reset(OpAMD64SETAE) 60199 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60200 v0.AddArg(x) 60201 v0.AddArg(y) 60202 v.AddArg(v0) 60203 return true 60204 } 60205 } 60206 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 60207 b := v.Block 60208 _ = b 60209 // match: (Geq32 x y) 60210 // cond: 60211 // result: (SETGE (CMPL x y)) 60212 for { 60213 _ = v.Args[1] 60214 x := v.Args[0] 60215 y := v.Args[1] 60216 v.reset(OpAMD64SETGE) 60217 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60218 v0.AddArg(x) 60219 v0.AddArg(y) 60220 v.AddArg(v0) 60221 return true 60222 } 60223 } 60224 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 60225 b := v.Block 60226 _ = b 60227 // match: (Geq32F x y) 60228 // cond: 60229 // result: (SETGEF (UCOMISS x y)) 60230 for { 60231 _ = v.Args[1] 60232 x := v.Args[0] 60233 y := v.Args[1] 60234 v.reset(OpAMD64SETGEF) 60235 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 60236 v0.AddArg(x) 60237 v0.AddArg(y) 60238 v.AddArg(v0) 60239 return true 60240 } 60241 } 60242 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 60243 b := v.Block 60244 _ = b 60245 // match: (Geq32U x y) 60246 // cond: 60247 // result: (SETAE (CMPL x y)) 60248 for { 60249 _ = v.Args[1] 60250 x := v.Args[0] 60251 y := v.Args[1] 60252 v.reset(OpAMD64SETAE) 60253 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60254 v0.AddArg(x) 60255 v0.AddArg(y) 60256 v.AddArg(v0) 60257 return true 60258 } 60259 } 60260 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 60261 b := v.Block 60262 _ = b 60263 // match: (Geq64 x y) 60264 // cond: 60265 // result: (SETGE (CMPQ x y)) 60266 for { 60267 _ = v.Args[1] 60268 x := v.Args[0] 60269 y := v.Args[1] 60270 v.reset(OpAMD64SETGE) 60271 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60272 v0.AddArg(x) 60273 v0.AddArg(y) 60274 v.AddArg(v0) 60275 return true 60276 } 60277 } 60278 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 60279 b := v.Block 60280 _ = b 60281 // match: (Geq64F x y) 60282 // cond: 60283 // result: (SETGEF (UCOMISD x y)) 60284 for { 60285 _ = v.Args[1] 60286 x := v.Args[0] 60287 y := v.Args[1] 60288 v.reset(OpAMD64SETGEF) 60289 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 60290 v0.AddArg(x) 60291 v0.AddArg(y) 60292 v.AddArg(v0) 60293 return true 60294 } 60295 } 60296 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 60297 b := v.Block 60298 _ = b 60299 // match: (Geq64U x y) 60300 // cond: 60301 // result: (SETAE (CMPQ x y)) 60302 for { 60303 _ = v.Args[1] 60304 x := v.Args[0] 60305 y := v.Args[1] 60306 v.reset(OpAMD64SETAE) 60307 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60308 v0.AddArg(x) 60309 v0.AddArg(y) 60310 v.AddArg(v0) 60311 return true 60312 } 60313 } 60314 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 60315 b := v.Block 60316 _ = b 60317 // match: (Geq8 x y) 60318 // cond: 60319 // result: (SETGE (CMPB x y)) 60320 for { 60321 _ = v.Args[1] 60322 x := v.Args[0] 60323 y := v.Args[1] 60324 v.reset(OpAMD64SETGE) 60325 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60326 v0.AddArg(x) 60327 v0.AddArg(y) 60328 v.AddArg(v0) 60329 return true 60330 } 60331 } 60332 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 60333 b := v.Block 60334 _ = b 60335 // match: (Geq8U x y) 60336 // cond: 60337 // result: (SETAE (CMPB x y)) 60338 for { 60339 _ = v.Args[1] 60340 x := v.Args[0] 60341 y := v.Args[1] 60342 v.reset(OpAMD64SETAE) 60343 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60344 v0.AddArg(x) 60345 v0.AddArg(y) 60346 v.AddArg(v0) 60347 return true 60348 } 60349 } 60350 func rewriteValueAMD64_OpGetCallerPC_0(v *Value) bool { 60351 // match: (GetCallerPC) 60352 // cond: 60353 // result: (LoweredGetCallerPC) 60354 for { 60355 v.reset(OpAMD64LoweredGetCallerPC) 60356 return true 60357 } 60358 } 60359 func rewriteValueAMD64_OpGetCallerSP_0(v *Value) bool { 60360 // match: (GetCallerSP) 60361 // cond: 60362 // result: (LoweredGetCallerSP) 60363 for { 60364 v.reset(OpAMD64LoweredGetCallerSP) 60365 return true 60366 } 60367 } 60368 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 60369 // match: (GetClosurePtr) 60370 // cond: 60371 // result: (LoweredGetClosurePtr) 60372 for { 60373 v.reset(OpAMD64LoweredGetClosurePtr) 60374 return true 60375 } 60376 } 60377 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 60378 // match: (GetG mem) 60379 // cond: 60380 // result: (LoweredGetG mem) 60381 for { 60382 mem := v.Args[0] 60383 v.reset(OpAMD64LoweredGetG) 60384 v.AddArg(mem) 60385 return true 60386 } 60387 } 60388 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 60389 b := v.Block 60390 _ = b 60391 // match: (Greater16 x y) 60392 // cond: 60393 // result: (SETG (CMPW x y)) 60394 for { 60395 _ = v.Args[1] 60396 x := v.Args[0] 60397 y := v.Args[1] 60398 v.reset(OpAMD64SETG) 60399 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60400 v0.AddArg(x) 60401 v0.AddArg(y) 60402 v.AddArg(v0) 60403 return true 60404 } 60405 } 60406 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 60407 b := v.Block 60408 _ = b 60409 // match: (Greater16U x y) 60410 // cond: 60411 // result: (SETA (CMPW x y)) 60412 for { 60413 _ = v.Args[1] 60414 x := v.Args[0] 60415 y := v.Args[1] 60416 v.reset(OpAMD64SETA) 60417 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60418 v0.AddArg(x) 60419 v0.AddArg(y) 60420 v.AddArg(v0) 60421 return true 60422 } 60423 } 60424 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 60425 b := v.Block 60426 _ = b 60427 // match: (Greater32 x y) 60428 // cond: 60429 // result: (SETG (CMPL x y)) 60430 for { 60431 _ = v.Args[1] 60432 x := v.Args[0] 60433 y := v.Args[1] 60434 v.reset(OpAMD64SETG) 60435 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60436 v0.AddArg(x) 60437 v0.AddArg(y) 60438 v.AddArg(v0) 60439 return true 60440 } 60441 } 60442 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 60443 b := v.Block 60444 _ = b 60445 // match: (Greater32F x y) 60446 // cond: 60447 // result: (SETGF (UCOMISS x y)) 60448 for { 60449 _ = v.Args[1] 60450 x := v.Args[0] 60451 y := v.Args[1] 60452 v.reset(OpAMD64SETGF) 60453 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 60454 v0.AddArg(x) 60455 v0.AddArg(y) 60456 v.AddArg(v0) 60457 return true 60458 } 60459 } 60460 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 60461 b := v.Block 60462 _ = b 60463 // match: (Greater32U x y) 60464 // cond: 60465 // result: (SETA (CMPL x y)) 60466 for { 60467 _ = v.Args[1] 60468 x := v.Args[0] 60469 y := v.Args[1] 60470 v.reset(OpAMD64SETA) 60471 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60472 v0.AddArg(x) 60473 v0.AddArg(y) 60474 v.AddArg(v0) 60475 return true 60476 } 60477 } 60478 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 60479 b := v.Block 60480 _ = b 60481 // match: (Greater64 x y) 60482 // cond: 60483 // result: (SETG (CMPQ x y)) 60484 for { 60485 _ = v.Args[1] 60486 x := v.Args[0] 60487 y := v.Args[1] 60488 v.reset(OpAMD64SETG) 60489 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60490 v0.AddArg(x) 60491 v0.AddArg(y) 60492 v.AddArg(v0) 60493 return true 60494 } 60495 } 60496 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 60497 b := v.Block 60498 _ = b 60499 // match: (Greater64F x y) 60500 // cond: 60501 // result: (SETGF (UCOMISD x y)) 60502 for { 60503 _ = v.Args[1] 60504 x := v.Args[0] 60505 y := v.Args[1] 60506 v.reset(OpAMD64SETGF) 60507 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 60508 v0.AddArg(x) 60509 v0.AddArg(y) 60510 v.AddArg(v0) 60511 return true 60512 } 60513 } 60514 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 60515 b := v.Block 60516 _ = b 60517 // match: (Greater64U x y) 60518 // cond: 60519 // result: (SETA (CMPQ x y)) 60520 for { 60521 _ = v.Args[1] 60522 x := v.Args[0] 60523 y := v.Args[1] 60524 v.reset(OpAMD64SETA) 60525 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60526 v0.AddArg(x) 60527 v0.AddArg(y) 60528 v.AddArg(v0) 60529 return true 60530 } 60531 } 60532 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 60533 b := v.Block 60534 _ = b 60535 // match: (Greater8 x y) 60536 // cond: 60537 // result: (SETG (CMPB x y)) 60538 for { 60539 _ = v.Args[1] 60540 x := v.Args[0] 60541 y := v.Args[1] 60542 v.reset(OpAMD64SETG) 60543 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60544 v0.AddArg(x) 60545 v0.AddArg(y) 60546 v.AddArg(v0) 60547 return true 60548 } 60549 } 60550 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 60551 b := v.Block 60552 _ = b 60553 // match: (Greater8U x y) 60554 // cond: 60555 // result: (SETA (CMPB x y)) 60556 for { 60557 _ = v.Args[1] 60558 x := v.Args[0] 60559 y := v.Args[1] 60560 v.reset(OpAMD64SETA) 60561 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60562 v0.AddArg(x) 60563 v0.AddArg(y) 60564 v.AddArg(v0) 60565 return true 60566 } 60567 } 60568 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 60569 // match: (Hmul32 x y) 60570 // cond: 60571 // result: (HMULL x y) 60572 for { 60573 _ = v.Args[1] 60574 x := v.Args[0] 60575 y := v.Args[1] 60576 v.reset(OpAMD64HMULL) 60577 v.AddArg(x) 60578 v.AddArg(y) 60579 return true 60580 } 60581 } 60582 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 60583 // match: (Hmul32u x y) 60584 // cond: 60585 // result: (HMULLU x y) 60586 for { 60587 _ = v.Args[1] 60588 x := v.Args[0] 60589 y := v.Args[1] 60590 v.reset(OpAMD64HMULLU) 60591 v.AddArg(x) 60592 v.AddArg(y) 60593 return true 60594 } 60595 } 60596 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 60597 // match: (Hmul64 x y) 60598 // cond: 60599 // result: (HMULQ x y) 60600 for { 60601 _ = v.Args[1] 60602 x := v.Args[0] 60603 y := v.Args[1] 60604 v.reset(OpAMD64HMULQ) 60605 v.AddArg(x) 60606 v.AddArg(y) 60607 return true 60608 } 60609 } 60610 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 60611 // match: (Hmul64u x y) 60612 // cond: 60613 // result: (HMULQU x y) 60614 for { 60615 _ = v.Args[1] 60616 x := v.Args[0] 60617 y := v.Args[1] 60618 v.reset(OpAMD64HMULQU) 60619 v.AddArg(x) 60620 v.AddArg(y) 60621 return true 60622 } 60623 } 60624 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool { 60625 // match: (Int64Hi x) 60626 // cond: 60627 // result: (SHRQconst [32] x) 60628 for { 60629 x := v.Args[0] 60630 v.reset(OpAMD64SHRQconst) 60631 v.AuxInt = 32 60632 v.AddArg(x) 60633 return true 60634 } 60635 } 60636 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 60637 // match: (InterCall [argwid] entry mem) 60638 // cond: 60639 // result: (CALLinter [argwid] entry mem) 60640 for { 60641 argwid := v.AuxInt 60642 _ = v.Args[1] 60643 entry := v.Args[0] 60644 mem := v.Args[1] 60645 v.reset(OpAMD64CALLinter) 60646 v.AuxInt = argwid 60647 v.AddArg(entry) 60648 v.AddArg(mem) 60649 return true 60650 } 60651 } 60652 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 60653 b := v.Block 60654 _ = b 60655 config := b.Func.Config 60656 _ = config 60657 // match: (IsInBounds idx len) 60658 // cond: config.PtrSize == 8 60659 // result: (SETB (CMPQ idx len)) 60660 for { 60661 _ = v.Args[1] 60662 idx := v.Args[0] 60663 len := v.Args[1] 60664 if !(config.PtrSize == 8) { 60665 break 60666 } 60667 v.reset(OpAMD64SETB) 60668 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60669 v0.AddArg(idx) 60670 v0.AddArg(len) 60671 v.AddArg(v0) 60672 return true 60673 } 60674 // match: (IsInBounds idx len) 60675 // cond: config.PtrSize == 4 60676 // result: (SETB (CMPL idx len)) 60677 for { 60678 _ = v.Args[1] 60679 idx := v.Args[0] 60680 len := v.Args[1] 60681 if !(config.PtrSize == 4) { 60682 break 60683 } 60684 v.reset(OpAMD64SETB) 60685 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60686 v0.AddArg(idx) 60687 v0.AddArg(len) 60688 v.AddArg(v0) 60689 return true 60690 } 60691 return false 60692 } 60693 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 60694 b := v.Block 60695 _ = b 60696 config := b.Func.Config 60697 _ = config 60698 // match: (IsNonNil p) 60699 // cond: config.PtrSize == 8 60700 // result: (SETNE (TESTQ p p)) 60701 for { 60702 p := v.Args[0] 60703 if !(config.PtrSize == 8) { 60704 break 60705 } 60706 v.reset(OpAMD64SETNE) 60707 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 60708 v0.AddArg(p) 60709 v0.AddArg(p) 60710 v.AddArg(v0) 60711 return true 60712 } 60713 // match: (IsNonNil p) 60714 // cond: config.PtrSize == 4 60715 // result: (SETNE (TESTL p p)) 60716 for { 60717 p := v.Args[0] 60718 if !(config.PtrSize == 4) { 60719 break 60720 } 60721 v.reset(OpAMD64SETNE) 60722 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags) 60723 v0.AddArg(p) 60724 v0.AddArg(p) 60725 v.AddArg(v0) 60726 return true 60727 } 60728 return false 60729 } 60730 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 60731 b := v.Block 60732 _ = b 60733 config := b.Func.Config 60734 _ = config 60735 // match: (IsSliceInBounds idx len) 60736 // cond: config.PtrSize == 8 60737 // result: (SETBE (CMPQ idx len)) 60738 for { 60739 _ = v.Args[1] 60740 idx := v.Args[0] 60741 len := v.Args[1] 60742 if !(config.PtrSize == 8) { 60743 break 60744 } 60745 v.reset(OpAMD64SETBE) 60746 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60747 v0.AddArg(idx) 60748 v0.AddArg(len) 60749 v.AddArg(v0) 60750 return true 60751 } 60752 // match: (IsSliceInBounds idx len) 60753 // cond: config.PtrSize == 4 60754 // result: (SETBE (CMPL idx len)) 60755 for { 60756 _ = v.Args[1] 60757 idx := v.Args[0] 60758 len := v.Args[1] 60759 if !(config.PtrSize == 4) { 60760 break 60761 } 60762 v.reset(OpAMD64SETBE) 60763 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60764 v0.AddArg(idx) 60765 v0.AddArg(len) 60766 v.AddArg(v0) 60767 return true 60768 } 60769 return false 60770 } 60771 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 60772 b := v.Block 60773 _ = b 60774 // match: (Leq16 x y) 60775 // cond: 60776 // result: (SETLE (CMPW x y)) 60777 for { 60778 _ = v.Args[1] 60779 x := v.Args[0] 60780 y := v.Args[1] 60781 v.reset(OpAMD64SETLE) 60782 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60783 v0.AddArg(x) 60784 v0.AddArg(y) 60785 v.AddArg(v0) 60786 return true 60787 } 60788 } 60789 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 60790 b := v.Block 60791 _ = b 60792 // match: (Leq16U x y) 60793 // cond: 60794 // result: (SETBE (CMPW x y)) 60795 for { 60796 _ = v.Args[1] 60797 x := v.Args[0] 60798 y := v.Args[1] 60799 v.reset(OpAMD64SETBE) 60800 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60801 v0.AddArg(x) 60802 v0.AddArg(y) 60803 v.AddArg(v0) 60804 return true 60805 } 60806 } 60807 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 60808 b := v.Block 60809 _ = b 60810 // match: (Leq32 x y) 60811 // cond: 60812 // result: (SETLE (CMPL x y)) 60813 for { 60814 _ = v.Args[1] 60815 x := v.Args[0] 60816 y := v.Args[1] 60817 v.reset(OpAMD64SETLE) 60818 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60819 v0.AddArg(x) 60820 v0.AddArg(y) 60821 v.AddArg(v0) 60822 return true 60823 } 60824 } 60825 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 60826 b := v.Block 60827 _ = b 60828 // match: (Leq32F x y) 60829 // cond: 60830 // result: (SETGEF (UCOMISS y x)) 60831 for { 60832 _ = v.Args[1] 60833 x := v.Args[0] 60834 y := v.Args[1] 60835 v.reset(OpAMD64SETGEF) 60836 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 60837 v0.AddArg(y) 60838 v0.AddArg(x) 60839 v.AddArg(v0) 60840 return true 60841 } 60842 } 60843 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 60844 b := v.Block 60845 _ = b 60846 // match: (Leq32U x y) 60847 // cond: 60848 // result: (SETBE (CMPL x y)) 60849 for { 60850 _ = v.Args[1] 60851 x := v.Args[0] 60852 y := v.Args[1] 60853 v.reset(OpAMD64SETBE) 60854 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60855 v0.AddArg(x) 60856 v0.AddArg(y) 60857 v.AddArg(v0) 60858 return true 60859 } 60860 } 60861 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 60862 b := v.Block 60863 _ = b 60864 // match: (Leq64 x y) 60865 // cond: 60866 // result: (SETLE (CMPQ x y)) 60867 for { 60868 _ = v.Args[1] 60869 x := v.Args[0] 60870 y := v.Args[1] 60871 v.reset(OpAMD64SETLE) 60872 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60873 v0.AddArg(x) 60874 v0.AddArg(y) 60875 v.AddArg(v0) 60876 return true 60877 } 60878 } 60879 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 60880 b := v.Block 60881 _ = b 60882 // match: (Leq64F x y) 60883 // cond: 60884 // result: (SETGEF (UCOMISD y x)) 60885 for { 60886 _ = v.Args[1] 60887 x := v.Args[0] 60888 y := v.Args[1] 60889 v.reset(OpAMD64SETGEF) 60890 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 60891 v0.AddArg(y) 60892 v0.AddArg(x) 60893 v.AddArg(v0) 60894 return true 60895 } 60896 } 60897 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 60898 b := v.Block 60899 _ = b 60900 // match: (Leq64U x y) 60901 // cond: 60902 // result: (SETBE (CMPQ x y)) 60903 for { 60904 _ = v.Args[1] 60905 x := v.Args[0] 60906 y := v.Args[1] 60907 v.reset(OpAMD64SETBE) 60908 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 60909 v0.AddArg(x) 60910 v0.AddArg(y) 60911 v.AddArg(v0) 60912 return true 60913 } 60914 } 60915 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 60916 b := v.Block 60917 _ = b 60918 // match: (Leq8 x y) 60919 // cond: 60920 // result: (SETLE (CMPB x y)) 60921 for { 60922 _ = v.Args[1] 60923 x := v.Args[0] 60924 y := v.Args[1] 60925 v.reset(OpAMD64SETLE) 60926 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60927 v0.AddArg(x) 60928 v0.AddArg(y) 60929 v.AddArg(v0) 60930 return true 60931 } 60932 } 60933 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 60934 b := v.Block 60935 _ = b 60936 // match: (Leq8U x y) 60937 // cond: 60938 // result: (SETBE (CMPB x y)) 60939 for { 60940 _ = v.Args[1] 60941 x := v.Args[0] 60942 y := v.Args[1] 60943 v.reset(OpAMD64SETBE) 60944 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 60945 v0.AddArg(x) 60946 v0.AddArg(y) 60947 v.AddArg(v0) 60948 return true 60949 } 60950 } 60951 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 60952 b := v.Block 60953 _ = b 60954 // match: (Less16 x y) 60955 // cond: 60956 // result: (SETL (CMPW x y)) 60957 for { 60958 _ = v.Args[1] 60959 x := v.Args[0] 60960 y := v.Args[1] 60961 v.reset(OpAMD64SETL) 60962 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60963 v0.AddArg(x) 60964 v0.AddArg(y) 60965 v.AddArg(v0) 60966 return true 60967 } 60968 } 60969 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 60970 b := v.Block 60971 _ = b 60972 // match: (Less16U x y) 60973 // cond: 60974 // result: (SETB (CMPW x y)) 60975 for { 60976 _ = v.Args[1] 60977 x := v.Args[0] 60978 y := v.Args[1] 60979 v.reset(OpAMD64SETB) 60980 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 60981 v0.AddArg(x) 60982 v0.AddArg(y) 60983 v.AddArg(v0) 60984 return true 60985 } 60986 } 60987 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 60988 b := v.Block 60989 _ = b 60990 // match: (Less32 x y) 60991 // cond: 60992 // result: (SETL (CMPL x y)) 60993 for { 60994 _ = v.Args[1] 60995 x := v.Args[0] 60996 y := v.Args[1] 60997 v.reset(OpAMD64SETL) 60998 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 60999 v0.AddArg(x) 61000 v0.AddArg(y) 61001 v.AddArg(v0) 61002 return true 61003 } 61004 } 61005 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 61006 b := v.Block 61007 _ = b 61008 // match: (Less32F x y) 61009 // cond: 61010 // result: (SETGF (UCOMISS y x)) 61011 for { 61012 _ = v.Args[1] 61013 x := v.Args[0] 61014 y := v.Args[1] 61015 v.reset(OpAMD64SETGF) 61016 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 61017 v0.AddArg(y) 61018 v0.AddArg(x) 61019 v.AddArg(v0) 61020 return true 61021 } 61022 } 61023 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 61024 b := v.Block 61025 _ = b 61026 // match: (Less32U x y) 61027 // cond: 61028 // result: (SETB (CMPL x y)) 61029 for { 61030 _ = v.Args[1] 61031 x := v.Args[0] 61032 y := v.Args[1] 61033 v.reset(OpAMD64SETB) 61034 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 61035 v0.AddArg(x) 61036 v0.AddArg(y) 61037 v.AddArg(v0) 61038 return true 61039 } 61040 } 61041 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 61042 b := v.Block 61043 _ = b 61044 // match: (Less64 x y) 61045 // cond: 61046 // result: (SETL (CMPQ x y)) 61047 for { 61048 _ = v.Args[1] 61049 x := v.Args[0] 61050 y := v.Args[1] 61051 v.reset(OpAMD64SETL) 61052 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 61053 v0.AddArg(x) 61054 v0.AddArg(y) 61055 v.AddArg(v0) 61056 return true 61057 } 61058 } 61059 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 61060 b := v.Block 61061 _ = b 61062 // match: (Less64F x y) 61063 // cond: 61064 // result: (SETGF (UCOMISD y x)) 61065 for { 61066 _ = v.Args[1] 61067 x := v.Args[0] 61068 y := v.Args[1] 61069 v.reset(OpAMD64SETGF) 61070 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 61071 v0.AddArg(y) 61072 v0.AddArg(x) 61073 v.AddArg(v0) 61074 return true 61075 } 61076 } 61077 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 61078 b := v.Block 61079 _ = b 61080 // match: (Less64U x y) 61081 // cond: 61082 // result: (SETB (CMPQ x y)) 61083 for { 61084 _ = v.Args[1] 61085 x := v.Args[0] 61086 y := v.Args[1] 61087 v.reset(OpAMD64SETB) 61088 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 61089 v0.AddArg(x) 61090 v0.AddArg(y) 61091 v.AddArg(v0) 61092 return true 61093 } 61094 } 61095 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 61096 b := v.Block 61097 _ = b 61098 // match: (Less8 x y) 61099 // cond: 61100 // result: (SETL (CMPB x y)) 61101 for { 61102 _ = v.Args[1] 61103 x := v.Args[0] 61104 y := v.Args[1] 61105 v.reset(OpAMD64SETL) 61106 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 61107 v0.AddArg(x) 61108 v0.AddArg(y) 61109 v.AddArg(v0) 61110 return true 61111 } 61112 } 61113 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 61114 b := v.Block 61115 _ = b 61116 // match: (Less8U x y) 61117 // cond: 61118 // result: (SETB (CMPB x y)) 61119 for { 61120 _ = v.Args[1] 61121 x := v.Args[0] 61122 y := v.Args[1] 61123 v.reset(OpAMD64SETB) 61124 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 61125 v0.AddArg(x) 61126 v0.AddArg(y) 61127 v.AddArg(v0) 61128 return true 61129 } 61130 } 61131 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 61132 b := v.Block 61133 _ = b 61134 config := b.Func.Config 61135 _ = config 61136 // match: (Load <t> ptr mem) 61137 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 61138 // result: (MOVQload ptr mem) 61139 for { 61140 t := v.Type 61141 _ = v.Args[1] 61142 ptr := v.Args[0] 61143 mem := v.Args[1] 61144 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 61145 break 61146 } 61147 v.reset(OpAMD64MOVQload) 61148 v.AddArg(ptr) 61149 v.AddArg(mem) 61150 return true 61151 } 61152 // match: (Load <t> ptr mem) 61153 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 61154 // result: (MOVLload ptr mem) 61155 for { 61156 t := v.Type 61157 _ = v.Args[1] 61158 ptr := v.Args[0] 61159 mem := v.Args[1] 61160 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 61161 break 61162 } 61163 v.reset(OpAMD64MOVLload) 61164 v.AddArg(ptr) 61165 v.AddArg(mem) 61166 return true 61167 } 61168 // match: (Load <t> ptr mem) 61169 // cond: is16BitInt(t) 61170 // result: (MOVWload ptr mem) 61171 for { 61172 t := v.Type 61173 _ = v.Args[1] 61174 ptr := v.Args[0] 61175 mem := v.Args[1] 61176 if !(is16BitInt(t)) { 61177 break 61178 } 61179 v.reset(OpAMD64MOVWload) 61180 v.AddArg(ptr) 61181 v.AddArg(mem) 61182 return true 61183 } 61184 // match: (Load <t> ptr mem) 61185 // cond: (t.IsBoolean() || is8BitInt(t)) 61186 // result: (MOVBload ptr mem) 61187 for { 61188 t := v.Type 61189 _ = v.Args[1] 61190 ptr := v.Args[0] 61191 mem := v.Args[1] 61192 if !(t.IsBoolean() || is8BitInt(t)) { 61193 break 61194 } 61195 v.reset(OpAMD64MOVBload) 61196 v.AddArg(ptr) 61197 v.AddArg(mem) 61198 return true 61199 } 61200 // match: (Load <t> ptr mem) 61201 // cond: is32BitFloat(t) 61202 // result: (MOVSSload ptr mem) 61203 for { 61204 t := v.Type 61205 _ = v.Args[1] 61206 ptr := v.Args[0] 61207 mem := v.Args[1] 61208 if !(is32BitFloat(t)) { 61209 break 61210 } 61211 v.reset(OpAMD64MOVSSload) 61212 v.AddArg(ptr) 61213 v.AddArg(mem) 61214 return true 61215 } 61216 // match: (Load <t> ptr mem) 61217 // cond: is64BitFloat(t) 61218 // result: (MOVSDload ptr mem) 61219 for { 61220 t := v.Type 61221 _ = v.Args[1] 61222 ptr := v.Args[0] 61223 mem := v.Args[1] 61224 if !(is64BitFloat(t)) { 61225 break 61226 } 61227 v.reset(OpAMD64MOVSDload) 61228 v.AddArg(ptr) 61229 v.AddArg(mem) 61230 return true 61231 } 61232 return false 61233 } 61234 func rewriteValueAMD64_OpLocalAddr_0(v *Value) bool { 61235 b := v.Block 61236 _ = b 61237 config := b.Func.Config 61238 _ = config 61239 // match: (LocalAddr {sym} base _) 61240 // cond: config.PtrSize == 8 61241 // result: (LEAQ {sym} base) 61242 for { 61243 sym := v.Aux 61244 _ = v.Args[1] 61245 base := v.Args[0] 61246 if !(config.PtrSize == 8) { 61247 break 61248 } 61249 v.reset(OpAMD64LEAQ) 61250 v.Aux = sym 61251 v.AddArg(base) 61252 return true 61253 } 61254 // match: (LocalAddr {sym} base _) 61255 // cond: config.PtrSize == 4 61256 // result: (LEAL {sym} base) 61257 for { 61258 sym := v.Aux 61259 _ = v.Args[1] 61260 base := v.Args[0] 61261 if !(config.PtrSize == 4) { 61262 break 61263 } 61264 v.reset(OpAMD64LEAL) 61265 v.Aux = sym 61266 v.AddArg(base) 61267 return true 61268 } 61269 return false 61270 } 61271 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 61272 b := v.Block 61273 _ = b 61274 // match: (Lsh16x16 <t> x y) 61275 // cond: !shiftIsBounded(v) 61276 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 61277 for { 61278 t := v.Type 61279 _ = v.Args[1] 61280 x := v.Args[0] 61281 y := v.Args[1] 61282 if !(!shiftIsBounded(v)) { 61283 break 61284 } 61285 v.reset(OpAMD64ANDL) 61286 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61287 v0.AddArg(x) 61288 v0.AddArg(y) 61289 v.AddArg(v0) 61290 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61291 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 61292 v2.AuxInt = 32 61293 v2.AddArg(y) 61294 v1.AddArg(v2) 61295 v.AddArg(v1) 61296 return true 61297 } 61298 // match: (Lsh16x16 x y) 61299 // cond: shiftIsBounded(v) 61300 // result: (SHLL x y) 61301 for { 61302 _ = v.Args[1] 61303 x := v.Args[0] 61304 y := v.Args[1] 61305 if !(shiftIsBounded(v)) { 61306 break 61307 } 61308 v.reset(OpAMD64SHLL) 61309 v.AddArg(x) 61310 v.AddArg(y) 61311 return true 61312 } 61313 return false 61314 } 61315 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 61316 b := v.Block 61317 _ = b 61318 // match: (Lsh16x32 <t> x y) 61319 // cond: !shiftIsBounded(v) 61320 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 61321 for { 61322 t := v.Type 61323 _ = v.Args[1] 61324 x := v.Args[0] 61325 y := v.Args[1] 61326 if !(!shiftIsBounded(v)) { 61327 break 61328 } 61329 v.reset(OpAMD64ANDL) 61330 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61331 v0.AddArg(x) 61332 v0.AddArg(y) 61333 v.AddArg(v0) 61334 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61335 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 61336 v2.AuxInt = 32 61337 v2.AddArg(y) 61338 v1.AddArg(v2) 61339 v.AddArg(v1) 61340 return true 61341 } 61342 // match: (Lsh16x32 x y) 61343 // cond: shiftIsBounded(v) 61344 // result: (SHLL x y) 61345 for { 61346 _ = v.Args[1] 61347 x := v.Args[0] 61348 y := v.Args[1] 61349 if !(shiftIsBounded(v)) { 61350 break 61351 } 61352 v.reset(OpAMD64SHLL) 61353 v.AddArg(x) 61354 v.AddArg(y) 61355 return true 61356 } 61357 return false 61358 } 61359 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 61360 b := v.Block 61361 _ = b 61362 // match: (Lsh16x64 <t> x y) 61363 // cond: !shiftIsBounded(v) 61364 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 61365 for { 61366 t := v.Type 61367 _ = v.Args[1] 61368 x := v.Args[0] 61369 y := v.Args[1] 61370 if !(!shiftIsBounded(v)) { 61371 break 61372 } 61373 v.reset(OpAMD64ANDL) 61374 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61375 v0.AddArg(x) 61376 v0.AddArg(y) 61377 v.AddArg(v0) 61378 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61379 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 61380 v2.AuxInt = 32 61381 v2.AddArg(y) 61382 v1.AddArg(v2) 61383 v.AddArg(v1) 61384 return true 61385 } 61386 // match: (Lsh16x64 x y) 61387 // cond: shiftIsBounded(v) 61388 // result: (SHLL x y) 61389 for { 61390 _ = v.Args[1] 61391 x := v.Args[0] 61392 y := v.Args[1] 61393 if !(shiftIsBounded(v)) { 61394 break 61395 } 61396 v.reset(OpAMD64SHLL) 61397 v.AddArg(x) 61398 v.AddArg(y) 61399 return true 61400 } 61401 return false 61402 } 61403 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 61404 b := v.Block 61405 _ = b 61406 // match: (Lsh16x8 <t> x y) 61407 // cond: !shiftIsBounded(v) 61408 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 61409 for { 61410 t := v.Type 61411 _ = v.Args[1] 61412 x := v.Args[0] 61413 y := v.Args[1] 61414 if !(!shiftIsBounded(v)) { 61415 break 61416 } 61417 v.reset(OpAMD64ANDL) 61418 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61419 v0.AddArg(x) 61420 v0.AddArg(y) 61421 v.AddArg(v0) 61422 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61423 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 61424 v2.AuxInt = 32 61425 v2.AddArg(y) 61426 v1.AddArg(v2) 61427 v.AddArg(v1) 61428 return true 61429 } 61430 // match: (Lsh16x8 x y) 61431 // cond: shiftIsBounded(v) 61432 // result: (SHLL x y) 61433 for { 61434 _ = v.Args[1] 61435 x := v.Args[0] 61436 y := v.Args[1] 61437 if !(shiftIsBounded(v)) { 61438 break 61439 } 61440 v.reset(OpAMD64SHLL) 61441 v.AddArg(x) 61442 v.AddArg(y) 61443 return true 61444 } 61445 return false 61446 } 61447 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 61448 b := v.Block 61449 _ = b 61450 // match: (Lsh32x16 <t> x y) 61451 // cond: !shiftIsBounded(v) 61452 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 61453 for { 61454 t := v.Type 61455 _ = v.Args[1] 61456 x := v.Args[0] 61457 y := v.Args[1] 61458 if !(!shiftIsBounded(v)) { 61459 break 61460 } 61461 v.reset(OpAMD64ANDL) 61462 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61463 v0.AddArg(x) 61464 v0.AddArg(y) 61465 v.AddArg(v0) 61466 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61467 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 61468 v2.AuxInt = 32 61469 v2.AddArg(y) 61470 v1.AddArg(v2) 61471 v.AddArg(v1) 61472 return true 61473 } 61474 // match: (Lsh32x16 x y) 61475 // cond: shiftIsBounded(v) 61476 // result: (SHLL x y) 61477 for { 61478 _ = v.Args[1] 61479 x := v.Args[0] 61480 y := v.Args[1] 61481 if !(shiftIsBounded(v)) { 61482 break 61483 } 61484 v.reset(OpAMD64SHLL) 61485 v.AddArg(x) 61486 v.AddArg(y) 61487 return true 61488 } 61489 return false 61490 } 61491 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 61492 b := v.Block 61493 _ = b 61494 // match: (Lsh32x32 <t> x y) 61495 // cond: !shiftIsBounded(v) 61496 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 61497 for { 61498 t := v.Type 61499 _ = v.Args[1] 61500 x := v.Args[0] 61501 y := v.Args[1] 61502 if !(!shiftIsBounded(v)) { 61503 break 61504 } 61505 v.reset(OpAMD64ANDL) 61506 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61507 v0.AddArg(x) 61508 v0.AddArg(y) 61509 v.AddArg(v0) 61510 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61511 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 61512 v2.AuxInt = 32 61513 v2.AddArg(y) 61514 v1.AddArg(v2) 61515 v.AddArg(v1) 61516 return true 61517 } 61518 // match: (Lsh32x32 x y) 61519 // cond: shiftIsBounded(v) 61520 // result: (SHLL x y) 61521 for { 61522 _ = v.Args[1] 61523 x := v.Args[0] 61524 y := v.Args[1] 61525 if !(shiftIsBounded(v)) { 61526 break 61527 } 61528 v.reset(OpAMD64SHLL) 61529 v.AddArg(x) 61530 v.AddArg(y) 61531 return true 61532 } 61533 return false 61534 } 61535 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 61536 b := v.Block 61537 _ = b 61538 // match: (Lsh32x64 <t> x y) 61539 // cond: !shiftIsBounded(v) 61540 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 61541 for { 61542 t := v.Type 61543 _ = v.Args[1] 61544 x := v.Args[0] 61545 y := v.Args[1] 61546 if !(!shiftIsBounded(v)) { 61547 break 61548 } 61549 v.reset(OpAMD64ANDL) 61550 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61551 v0.AddArg(x) 61552 v0.AddArg(y) 61553 v.AddArg(v0) 61554 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61555 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 61556 v2.AuxInt = 32 61557 v2.AddArg(y) 61558 v1.AddArg(v2) 61559 v.AddArg(v1) 61560 return true 61561 } 61562 // match: (Lsh32x64 x y) 61563 // cond: shiftIsBounded(v) 61564 // result: (SHLL x y) 61565 for { 61566 _ = v.Args[1] 61567 x := v.Args[0] 61568 y := v.Args[1] 61569 if !(shiftIsBounded(v)) { 61570 break 61571 } 61572 v.reset(OpAMD64SHLL) 61573 v.AddArg(x) 61574 v.AddArg(y) 61575 return true 61576 } 61577 return false 61578 } 61579 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 61580 b := v.Block 61581 _ = b 61582 // match: (Lsh32x8 <t> x y) 61583 // cond: !shiftIsBounded(v) 61584 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 61585 for { 61586 t := v.Type 61587 _ = v.Args[1] 61588 x := v.Args[0] 61589 y := v.Args[1] 61590 if !(!shiftIsBounded(v)) { 61591 break 61592 } 61593 v.reset(OpAMD64ANDL) 61594 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61595 v0.AddArg(x) 61596 v0.AddArg(y) 61597 v.AddArg(v0) 61598 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61599 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 61600 v2.AuxInt = 32 61601 v2.AddArg(y) 61602 v1.AddArg(v2) 61603 v.AddArg(v1) 61604 return true 61605 } 61606 // match: (Lsh32x8 x y) 61607 // cond: shiftIsBounded(v) 61608 // result: (SHLL x y) 61609 for { 61610 _ = v.Args[1] 61611 x := v.Args[0] 61612 y := v.Args[1] 61613 if !(shiftIsBounded(v)) { 61614 break 61615 } 61616 v.reset(OpAMD64SHLL) 61617 v.AddArg(x) 61618 v.AddArg(y) 61619 return true 61620 } 61621 return false 61622 } 61623 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 61624 b := v.Block 61625 _ = b 61626 // match: (Lsh64x16 <t> x y) 61627 // cond: !shiftIsBounded(v) 61628 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 61629 for { 61630 t := v.Type 61631 _ = v.Args[1] 61632 x := v.Args[0] 61633 y := v.Args[1] 61634 if !(!shiftIsBounded(v)) { 61635 break 61636 } 61637 v.reset(OpAMD64ANDQ) 61638 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 61639 v0.AddArg(x) 61640 v0.AddArg(y) 61641 v.AddArg(v0) 61642 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 61643 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 61644 v2.AuxInt = 64 61645 v2.AddArg(y) 61646 v1.AddArg(v2) 61647 v.AddArg(v1) 61648 return true 61649 } 61650 // match: (Lsh64x16 x y) 61651 // cond: shiftIsBounded(v) 61652 // result: (SHLQ x y) 61653 for { 61654 _ = v.Args[1] 61655 x := v.Args[0] 61656 y := v.Args[1] 61657 if !(shiftIsBounded(v)) { 61658 break 61659 } 61660 v.reset(OpAMD64SHLQ) 61661 v.AddArg(x) 61662 v.AddArg(y) 61663 return true 61664 } 61665 return false 61666 } 61667 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 61668 b := v.Block 61669 _ = b 61670 // match: (Lsh64x32 <t> x y) 61671 // cond: !shiftIsBounded(v) 61672 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 61673 for { 61674 t := v.Type 61675 _ = v.Args[1] 61676 x := v.Args[0] 61677 y := v.Args[1] 61678 if !(!shiftIsBounded(v)) { 61679 break 61680 } 61681 v.reset(OpAMD64ANDQ) 61682 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 61683 v0.AddArg(x) 61684 v0.AddArg(y) 61685 v.AddArg(v0) 61686 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 61687 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 61688 v2.AuxInt = 64 61689 v2.AddArg(y) 61690 v1.AddArg(v2) 61691 v.AddArg(v1) 61692 return true 61693 } 61694 // match: (Lsh64x32 x y) 61695 // cond: shiftIsBounded(v) 61696 // result: (SHLQ x y) 61697 for { 61698 _ = v.Args[1] 61699 x := v.Args[0] 61700 y := v.Args[1] 61701 if !(shiftIsBounded(v)) { 61702 break 61703 } 61704 v.reset(OpAMD64SHLQ) 61705 v.AddArg(x) 61706 v.AddArg(y) 61707 return true 61708 } 61709 return false 61710 } 61711 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 61712 b := v.Block 61713 _ = b 61714 // match: (Lsh64x64 <t> x y) 61715 // cond: !shiftIsBounded(v) 61716 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 61717 for { 61718 t := v.Type 61719 _ = v.Args[1] 61720 x := v.Args[0] 61721 y := v.Args[1] 61722 if !(!shiftIsBounded(v)) { 61723 break 61724 } 61725 v.reset(OpAMD64ANDQ) 61726 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 61727 v0.AddArg(x) 61728 v0.AddArg(y) 61729 v.AddArg(v0) 61730 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 61731 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 61732 v2.AuxInt = 64 61733 v2.AddArg(y) 61734 v1.AddArg(v2) 61735 v.AddArg(v1) 61736 return true 61737 } 61738 // match: (Lsh64x64 x y) 61739 // cond: shiftIsBounded(v) 61740 // result: (SHLQ x y) 61741 for { 61742 _ = v.Args[1] 61743 x := v.Args[0] 61744 y := v.Args[1] 61745 if !(shiftIsBounded(v)) { 61746 break 61747 } 61748 v.reset(OpAMD64SHLQ) 61749 v.AddArg(x) 61750 v.AddArg(y) 61751 return true 61752 } 61753 return false 61754 } 61755 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 61756 b := v.Block 61757 _ = b 61758 // match: (Lsh64x8 <t> x y) 61759 // cond: !shiftIsBounded(v) 61760 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 61761 for { 61762 t := v.Type 61763 _ = v.Args[1] 61764 x := v.Args[0] 61765 y := v.Args[1] 61766 if !(!shiftIsBounded(v)) { 61767 break 61768 } 61769 v.reset(OpAMD64ANDQ) 61770 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 61771 v0.AddArg(x) 61772 v0.AddArg(y) 61773 v.AddArg(v0) 61774 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 61775 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 61776 v2.AuxInt = 64 61777 v2.AddArg(y) 61778 v1.AddArg(v2) 61779 v.AddArg(v1) 61780 return true 61781 } 61782 // match: (Lsh64x8 x y) 61783 // cond: shiftIsBounded(v) 61784 // result: (SHLQ x y) 61785 for { 61786 _ = v.Args[1] 61787 x := v.Args[0] 61788 y := v.Args[1] 61789 if !(shiftIsBounded(v)) { 61790 break 61791 } 61792 v.reset(OpAMD64SHLQ) 61793 v.AddArg(x) 61794 v.AddArg(y) 61795 return true 61796 } 61797 return false 61798 } 61799 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 61800 b := v.Block 61801 _ = b 61802 // match: (Lsh8x16 <t> x y) 61803 // cond: !shiftIsBounded(v) 61804 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 61805 for { 61806 t := v.Type 61807 _ = v.Args[1] 61808 x := v.Args[0] 61809 y := v.Args[1] 61810 if !(!shiftIsBounded(v)) { 61811 break 61812 } 61813 v.reset(OpAMD64ANDL) 61814 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61815 v0.AddArg(x) 61816 v0.AddArg(y) 61817 v.AddArg(v0) 61818 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61819 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 61820 v2.AuxInt = 32 61821 v2.AddArg(y) 61822 v1.AddArg(v2) 61823 v.AddArg(v1) 61824 return true 61825 } 61826 // match: (Lsh8x16 x y) 61827 // cond: shiftIsBounded(v) 61828 // result: (SHLL x y) 61829 for { 61830 _ = v.Args[1] 61831 x := v.Args[0] 61832 y := v.Args[1] 61833 if !(shiftIsBounded(v)) { 61834 break 61835 } 61836 v.reset(OpAMD64SHLL) 61837 v.AddArg(x) 61838 v.AddArg(y) 61839 return true 61840 } 61841 return false 61842 } 61843 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 61844 b := v.Block 61845 _ = b 61846 // match: (Lsh8x32 <t> x y) 61847 // cond: !shiftIsBounded(v) 61848 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 61849 for { 61850 t := v.Type 61851 _ = v.Args[1] 61852 x := v.Args[0] 61853 y := v.Args[1] 61854 if !(!shiftIsBounded(v)) { 61855 break 61856 } 61857 v.reset(OpAMD64ANDL) 61858 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61859 v0.AddArg(x) 61860 v0.AddArg(y) 61861 v.AddArg(v0) 61862 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61863 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 61864 v2.AuxInt = 32 61865 v2.AddArg(y) 61866 v1.AddArg(v2) 61867 v.AddArg(v1) 61868 return true 61869 } 61870 // match: (Lsh8x32 x y) 61871 // cond: shiftIsBounded(v) 61872 // result: (SHLL x y) 61873 for { 61874 _ = v.Args[1] 61875 x := v.Args[0] 61876 y := v.Args[1] 61877 if !(shiftIsBounded(v)) { 61878 break 61879 } 61880 v.reset(OpAMD64SHLL) 61881 v.AddArg(x) 61882 v.AddArg(y) 61883 return true 61884 } 61885 return false 61886 } 61887 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 61888 b := v.Block 61889 _ = b 61890 // match: (Lsh8x64 <t> x y) 61891 // cond: !shiftIsBounded(v) 61892 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 61893 for { 61894 t := v.Type 61895 _ = v.Args[1] 61896 x := v.Args[0] 61897 y := v.Args[1] 61898 if !(!shiftIsBounded(v)) { 61899 break 61900 } 61901 v.reset(OpAMD64ANDL) 61902 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61903 v0.AddArg(x) 61904 v0.AddArg(y) 61905 v.AddArg(v0) 61906 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61907 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 61908 v2.AuxInt = 32 61909 v2.AddArg(y) 61910 v1.AddArg(v2) 61911 v.AddArg(v1) 61912 return true 61913 } 61914 // match: (Lsh8x64 x y) 61915 // cond: shiftIsBounded(v) 61916 // result: (SHLL x y) 61917 for { 61918 _ = v.Args[1] 61919 x := v.Args[0] 61920 y := v.Args[1] 61921 if !(shiftIsBounded(v)) { 61922 break 61923 } 61924 v.reset(OpAMD64SHLL) 61925 v.AddArg(x) 61926 v.AddArg(y) 61927 return true 61928 } 61929 return false 61930 } 61931 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 61932 b := v.Block 61933 _ = b 61934 // match: (Lsh8x8 <t> x y) 61935 // cond: !shiftIsBounded(v) 61936 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 61937 for { 61938 t := v.Type 61939 _ = v.Args[1] 61940 x := v.Args[0] 61941 y := v.Args[1] 61942 if !(!shiftIsBounded(v)) { 61943 break 61944 } 61945 v.reset(OpAMD64ANDL) 61946 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 61947 v0.AddArg(x) 61948 v0.AddArg(y) 61949 v.AddArg(v0) 61950 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 61951 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 61952 v2.AuxInt = 32 61953 v2.AddArg(y) 61954 v1.AddArg(v2) 61955 v.AddArg(v1) 61956 return true 61957 } 61958 // match: (Lsh8x8 x y) 61959 // cond: shiftIsBounded(v) 61960 // result: (SHLL x y) 61961 for { 61962 _ = v.Args[1] 61963 x := v.Args[0] 61964 y := v.Args[1] 61965 if !(shiftIsBounded(v)) { 61966 break 61967 } 61968 v.reset(OpAMD64SHLL) 61969 v.AddArg(x) 61970 v.AddArg(y) 61971 return true 61972 } 61973 return false 61974 } 61975 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 61976 b := v.Block 61977 _ = b 61978 typ := &b.Func.Config.Types 61979 _ = typ 61980 // match: (Mod16 [a] x y) 61981 // cond: 61982 // result: (Select1 (DIVW [a] x y)) 61983 for { 61984 a := v.AuxInt 61985 _ = v.Args[1] 61986 x := v.Args[0] 61987 y := v.Args[1] 61988 v.reset(OpSelect1) 61989 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 61990 v0.AuxInt = a 61991 v0.AddArg(x) 61992 v0.AddArg(y) 61993 v.AddArg(v0) 61994 return true 61995 } 61996 } 61997 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 61998 b := v.Block 61999 _ = b 62000 typ := &b.Func.Config.Types 62001 _ = typ 62002 // match: (Mod16u x y) 62003 // cond: 62004 // result: (Select1 (DIVWU x y)) 62005 for { 62006 _ = v.Args[1] 62007 x := v.Args[0] 62008 y := v.Args[1] 62009 v.reset(OpSelect1) 62010 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 62011 v0.AddArg(x) 62012 v0.AddArg(y) 62013 v.AddArg(v0) 62014 return true 62015 } 62016 } 62017 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 62018 b := v.Block 62019 _ = b 62020 typ := &b.Func.Config.Types 62021 _ = typ 62022 // match: (Mod32 [a] x y) 62023 // cond: 62024 // result: (Select1 (DIVL [a] x y)) 62025 for { 62026 a := v.AuxInt 62027 _ = v.Args[1] 62028 x := v.Args[0] 62029 y := v.Args[1] 62030 v.reset(OpSelect1) 62031 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 62032 v0.AuxInt = a 62033 v0.AddArg(x) 62034 v0.AddArg(y) 62035 v.AddArg(v0) 62036 return true 62037 } 62038 } 62039 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 62040 b := v.Block 62041 _ = b 62042 typ := &b.Func.Config.Types 62043 _ = typ 62044 // match: (Mod32u x y) 62045 // cond: 62046 // result: (Select1 (DIVLU x y)) 62047 for { 62048 _ = v.Args[1] 62049 x := v.Args[0] 62050 y := v.Args[1] 62051 v.reset(OpSelect1) 62052 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 62053 v0.AddArg(x) 62054 v0.AddArg(y) 62055 v.AddArg(v0) 62056 return true 62057 } 62058 } 62059 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 62060 b := v.Block 62061 _ = b 62062 typ := &b.Func.Config.Types 62063 _ = typ 62064 // match: (Mod64 [a] x y) 62065 // cond: 62066 // result: (Select1 (DIVQ [a] x y)) 62067 for { 62068 a := v.AuxInt 62069 _ = v.Args[1] 62070 x := v.Args[0] 62071 y := v.Args[1] 62072 v.reset(OpSelect1) 62073 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 62074 v0.AuxInt = a 62075 v0.AddArg(x) 62076 v0.AddArg(y) 62077 v.AddArg(v0) 62078 return true 62079 } 62080 } 62081 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 62082 b := v.Block 62083 _ = b 62084 typ := &b.Func.Config.Types 62085 _ = typ 62086 // match: (Mod64u x y) 62087 // cond: 62088 // result: (Select1 (DIVQU x y)) 62089 for { 62090 _ = v.Args[1] 62091 x := v.Args[0] 62092 y := v.Args[1] 62093 v.reset(OpSelect1) 62094 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 62095 v0.AddArg(x) 62096 v0.AddArg(y) 62097 v.AddArg(v0) 62098 return true 62099 } 62100 } 62101 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 62102 b := v.Block 62103 _ = b 62104 typ := &b.Func.Config.Types 62105 _ = typ 62106 // match: (Mod8 x y) 62107 // cond: 62108 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 62109 for { 62110 _ = v.Args[1] 62111 x := v.Args[0] 62112 y := v.Args[1] 62113 v.reset(OpSelect1) 62114 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 62115 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 62116 v1.AddArg(x) 62117 v0.AddArg(v1) 62118 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 62119 v2.AddArg(y) 62120 v0.AddArg(v2) 62121 v.AddArg(v0) 62122 return true 62123 } 62124 } 62125 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 62126 b := v.Block 62127 _ = b 62128 typ := &b.Func.Config.Types 62129 _ = typ 62130 // match: (Mod8u x y) 62131 // cond: 62132 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 62133 for { 62134 _ = v.Args[1] 62135 x := v.Args[0] 62136 y := v.Args[1] 62137 v.reset(OpSelect1) 62138 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 62139 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 62140 v1.AddArg(x) 62141 v0.AddArg(v1) 62142 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 62143 v2.AddArg(y) 62144 v0.AddArg(v2) 62145 v.AddArg(v0) 62146 return true 62147 } 62148 } 62149 func rewriteValueAMD64_OpMove_0(v *Value) bool { 62150 b := v.Block 62151 _ = b 62152 config := b.Func.Config 62153 _ = config 62154 typ := &b.Func.Config.Types 62155 _ = typ 62156 // match: (Move [0] _ _ mem) 62157 // cond: 62158 // result: mem 62159 for { 62160 if v.AuxInt != 0 { 62161 break 62162 } 62163 _ = v.Args[2] 62164 mem := v.Args[2] 62165 v.reset(OpCopy) 62166 v.Type = mem.Type 62167 v.AddArg(mem) 62168 return true 62169 } 62170 // match: (Move [1] dst src mem) 62171 // cond: 62172 // result: (MOVBstore dst (MOVBload src mem) mem) 62173 for { 62174 if v.AuxInt != 1 { 62175 break 62176 } 62177 _ = v.Args[2] 62178 dst := v.Args[0] 62179 src := v.Args[1] 62180 mem := v.Args[2] 62181 v.reset(OpAMD64MOVBstore) 62182 v.AddArg(dst) 62183 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 62184 v0.AddArg(src) 62185 v0.AddArg(mem) 62186 v.AddArg(v0) 62187 v.AddArg(mem) 62188 return true 62189 } 62190 // match: (Move [2] dst src mem) 62191 // cond: 62192 // result: (MOVWstore dst (MOVWload src mem) mem) 62193 for { 62194 if v.AuxInt != 2 { 62195 break 62196 } 62197 _ = v.Args[2] 62198 dst := v.Args[0] 62199 src := v.Args[1] 62200 mem := v.Args[2] 62201 v.reset(OpAMD64MOVWstore) 62202 v.AddArg(dst) 62203 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 62204 v0.AddArg(src) 62205 v0.AddArg(mem) 62206 v.AddArg(v0) 62207 v.AddArg(mem) 62208 return true 62209 } 62210 // match: (Move [4] dst src mem) 62211 // cond: 62212 // result: (MOVLstore dst (MOVLload src mem) mem) 62213 for { 62214 if v.AuxInt != 4 { 62215 break 62216 } 62217 _ = v.Args[2] 62218 dst := v.Args[0] 62219 src := v.Args[1] 62220 mem := v.Args[2] 62221 v.reset(OpAMD64MOVLstore) 62222 v.AddArg(dst) 62223 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 62224 v0.AddArg(src) 62225 v0.AddArg(mem) 62226 v.AddArg(v0) 62227 v.AddArg(mem) 62228 return true 62229 } 62230 // match: (Move [8] dst src mem) 62231 // cond: 62232 // result: (MOVQstore dst (MOVQload src mem) mem) 62233 for { 62234 if v.AuxInt != 8 { 62235 break 62236 } 62237 _ = v.Args[2] 62238 dst := v.Args[0] 62239 src := v.Args[1] 62240 mem := v.Args[2] 62241 v.reset(OpAMD64MOVQstore) 62242 v.AddArg(dst) 62243 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62244 v0.AddArg(src) 62245 v0.AddArg(mem) 62246 v.AddArg(v0) 62247 v.AddArg(mem) 62248 return true 62249 } 62250 // match: (Move [16] dst src mem) 62251 // cond: config.useSSE 62252 // result: (MOVOstore dst (MOVOload src mem) mem) 62253 for { 62254 if v.AuxInt != 16 { 62255 break 62256 } 62257 _ = v.Args[2] 62258 dst := v.Args[0] 62259 src := v.Args[1] 62260 mem := v.Args[2] 62261 if !(config.useSSE) { 62262 break 62263 } 62264 v.reset(OpAMD64MOVOstore) 62265 v.AddArg(dst) 62266 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 62267 v0.AddArg(src) 62268 v0.AddArg(mem) 62269 v.AddArg(v0) 62270 v.AddArg(mem) 62271 return true 62272 } 62273 // match: (Move [16] dst src mem) 62274 // cond: !config.useSSE 62275 // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 62276 for { 62277 if v.AuxInt != 16 { 62278 break 62279 } 62280 _ = v.Args[2] 62281 dst := v.Args[0] 62282 src := v.Args[1] 62283 mem := v.Args[2] 62284 if !(!config.useSSE) { 62285 break 62286 } 62287 v.reset(OpAMD64MOVQstore) 62288 v.AuxInt = 8 62289 v.AddArg(dst) 62290 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62291 v0.AuxInt = 8 62292 v0.AddArg(src) 62293 v0.AddArg(mem) 62294 v.AddArg(v0) 62295 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62296 v1.AddArg(dst) 62297 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62298 v2.AddArg(src) 62299 v2.AddArg(mem) 62300 v1.AddArg(v2) 62301 v1.AddArg(mem) 62302 v.AddArg(v1) 62303 return true 62304 } 62305 // match: (Move [32] dst src mem) 62306 // cond: 62307 // result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem)) 62308 for { 62309 if v.AuxInt != 32 { 62310 break 62311 } 62312 _ = v.Args[2] 62313 dst := v.Args[0] 62314 src := v.Args[1] 62315 mem := v.Args[2] 62316 v.reset(OpMove) 62317 v.AuxInt = 16 62318 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62319 v0.AuxInt = 16 62320 v0.AddArg(dst) 62321 v.AddArg(v0) 62322 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62323 v1.AuxInt = 16 62324 v1.AddArg(src) 62325 v.AddArg(v1) 62326 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 62327 v2.AuxInt = 16 62328 v2.AddArg(dst) 62329 v2.AddArg(src) 62330 v2.AddArg(mem) 62331 v.AddArg(v2) 62332 return true 62333 } 62334 // match: (Move [48] dst src mem) 62335 // cond: config.useSSE 62336 // result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem)) 62337 for { 62338 if v.AuxInt != 48 { 62339 break 62340 } 62341 _ = v.Args[2] 62342 dst := v.Args[0] 62343 src := v.Args[1] 62344 mem := v.Args[2] 62345 if !(config.useSSE) { 62346 break 62347 } 62348 v.reset(OpMove) 62349 v.AuxInt = 32 62350 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62351 v0.AuxInt = 16 62352 v0.AddArg(dst) 62353 v.AddArg(v0) 62354 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62355 v1.AuxInt = 16 62356 v1.AddArg(src) 62357 v.AddArg(v1) 62358 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 62359 v2.AuxInt = 16 62360 v2.AddArg(dst) 62361 v2.AddArg(src) 62362 v2.AddArg(mem) 62363 v.AddArg(v2) 62364 return true 62365 } 62366 // match: (Move [64] dst src mem) 62367 // cond: config.useSSE 62368 // result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem)) 62369 for { 62370 if v.AuxInt != 64 { 62371 break 62372 } 62373 _ = v.Args[2] 62374 dst := v.Args[0] 62375 src := v.Args[1] 62376 mem := v.Args[2] 62377 if !(config.useSSE) { 62378 break 62379 } 62380 v.reset(OpMove) 62381 v.AuxInt = 32 62382 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62383 v0.AuxInt = 32 62384 v0.AddArg(dst) 62385 v.AddArg(v0) 62386 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62387 v1.AuxInt = 32 62388 v1.AddArg(src) 62389 v.AddArg(v1) 62390 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 62391 v2.AuxInt = 32 62392 v2.AddArg(dst) 62393 v2.AddArg(src) 62394 v2.AddArg(mem) 62395 v.AddArg(v2) 62396 return true 62397 } 62398 return false 62399 } 62400 func rewriteValueAMD64_OpMove_10(v *Value) bool { 62401 b := v.Block 62402 _ = b 62403 config := b.Func.Config 62404 _ = config 62405 typ := &b.Func.Config.Types 62406 _ = typ 62407 // match: (Move [3] dst src mem) 62408 // cond: 62409 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 62410 for { 62411 if v.AuxInt != 3 { 62412 break 62413 } 62414 _ = v.Args[2] 62415 dst := v.Args[0] 62416 src := v.Args[1] 62417 mem := v.Args[2] 62418 v.reset(OpAMD64MOVBstore) 62419 v.AuxInt = 2 62420 v.AddArg(dst) 62421 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 62422 v0.AuxInt = 2 62423 v0.AddArg(src) 62424 v0.AddArg(mem) 62425 v.AddArg(v0) 62426 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 62427 v1.AddArg(dst) 62428 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 62429 v2.AddArg(src) 62430 v2.AddArg(mem) 62431 v1.AddArg(v2) 62432 v1.AddArg(mem) 62433 v.AddArg(v1) 62434 return true 62435 } 62436 // match: (Move [5] dst src mem) 62437 // cond: 62438 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 62439 for { 62440 if v.AuxInt != 5 { 62441 break 62442 } 62443 _ = v.Args[2] 62444 dst := v.Args[0] 62445 src := v.Args[1] 62446 mem := v.Args[2] 62447 v.reset(OpAMD64MOVBstore) 62448 v.AuxInt = 4 62449 v.AddArg(dst) 62450 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 62451 v0.AuxInt = 4 62452 v0.AddArg(src) 62453 v0.AddArg(mem) 62454 v.AddArg(v0) 62455 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 62456 v1.AddArg(dst) 62457 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 62458 v2.AddArg(src) 62459 v2.AddArg(mem) 62460 v1.AddArg(v2) 62461 v1.AddArg(mem) 62462 v.AddArg(v1) 62463 return true 62464 } 62465 // match: (Move [6] dst src mem) 62466 // cond: 62467 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 62468 for { 62469 if v.AuxInt != 6 { 62470 break 62471 } 62472 _ = v.Args[2] 62473 dst := v.Args[0] 62474 src := v.Args[1] 62475 mem := v.Args[2] 62476 v.reset(OpAMD64MOVWstore) 62477 v.AuxInt = 4 62478 v.AddArg(dst) 62479 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 62480 v0.AuxInt = 4 62481 v0.AddArg(src) 62482 v0.AddArg(mem) 62483 v.AddArg(v0) 62484 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 62485 v1.AddArg(dst) 62486 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 62487 v2.AddArg(src) 62488 v2.AddArg(mem) 62489 v1.AddArg(v2) 62490 v1.AddArg(mem) 62491 v.AddArg(v1) 62492 return true 62493 } 62494 // match: (Move [7] dst src mem) 62495 // cond: 62496 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 62497 for { 62498 if v.AuxInt != 7 { 62499 break 62500 } 62501 _ = v.Args[2] 62502 dst := v.Args[0] 62503 src := v.Args[1] 62504 mem := v.Args[2] 62505 v.reset(OpAMD64MOVLstore) 62506 v.AuxInt = 3 62507 v.AddArg(dst) 62508 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 62509 v0.AuxInt = 3 62510 v0.AddArg(src) 62511 v0.AddArg(mem) 62512 v.AddArg(v0) 62513 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 62514 v1.AddArg(dst) 62515 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 62516 v2.AddArg(src) 62517 v2.AddArg(mem) 62518 v1.AddArg(v2) 62519 v1.AddArg(mem) 62520 v.AddArg(v1) 62521 return true 62522 } 62523 // match: (Move [s] dst src mem) 62524 // cond: s > 8 && s < 16 62525 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 62526 for { 62527 s := v.AuxInt 62528 _ = v.Args[2] 62529 dst := v.Args[0] 62530 src := v.Args[1] 62531 mem := v.Args[2] 62532 if !(s > 8 && s < 16) { 62533 break 62534 } 62535 v.reset(OpAMD64MOVQstore) 62536 v.AuxInt = s - 8 62537 v.AddArg(dst) 62538 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62539 v0.AuxInt = s - 8 62540 v0.AddArg(src) 62541 v0.AddArg(mem) 62542 v.AddArg(v0) 62543 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62544 v1.AddArg(dst) 62545 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62546 v2.AddArg(src) 62547 v2.AddArg(mem) 62548 v1.AddArg(v2) 62549 v1.AddArg(mem) 62550 v.AddArg(v1) 62551 return true 62552 } 62553 // match: (Move [s] dst src mem) 62554 // cond: s > 16 && s%16 != 0 && s%16 <= 8 62555 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 62556 for { 62557 s := v.AuxInt 62558 _ = v.Args[2] 62559 dst := v.Args[0] 62560 src := v.Args[1] 62561 mem := v.Args[2] 62562 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 62563 break 62564 } 62565 v.reset(OpMove) 62566 v.AuxInt = s - s%16 62567 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62568 v0.AuxInt = s % 16 62569 v0.AddArg(dst) 62570 v.AddArg(v0) 62571 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62572 v1.AuxInt = s % 16 62573 v1.AddArg(src) 62574 v.AddArg(v1) 62575 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62576 v2.AddArg(dst) 62577 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62578 v3.AddArg(src) 62579 v3.AddArg(mem) 62580 v2.AddArg(v3) 62581 v2.AddArg(mem) 62582 v.AddArg(v2) 62583 return true 62584 } 62585 // match: (Move [s] dst src mem) 62586 // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE 62587 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 62588 for { 62589 s := v.AuxInt 62590 _ = v.Args[2] 62591 dst := v.Args[0] 62592 src := v.Args[1] 62593 mem := v.Args[2] 62594 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { 62595 break 62596 } 62597 v.reset(OpMove) 62598 v.AuxInt = s - s%16 62599 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62600 v0.AuxInt = s % 16 62601 v0.AddArg(dst) 62602 v.AddArg(v0) 62603 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62604 v1.AuxInt = s % 16 62605 v1.AddArg(src) 62606 v.AddArg(v1) 62607 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 62608 v2.AddArg(dst) 62609 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 62610 v3.AddArg(src) 62611 v3.AddArg(mem) 62612 v2.AddArg(v3) 62613 v2.AddArg(mem) 62614 v.AddArg(v2) 62615 return true 62616 } 62617 // match: (Move [s] dst src mem) 62618 // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE 62619 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) 62620 for { 62621 s := v.AuxInt 62622 _ = v.Args[2] 62623 dst := v.Args[0] 62624 src := v.Args[1] 62625 mem := v.Args[2] 62626 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { 62627 break 62628 } 62629 v.reset(OpMove) 62630 v.AuxInt = s - s%16 62631 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 62632 v0.AuxInt = s % 16 62633 v0.AddArg(dst) 62634 v.AddArg(v0) 62635 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 62636 v1.AuxInt = s % 16 62637 v1.AddArg(src) 62638 v.AddArg(v1) 62639 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62640 v2.AuxInt = 8 62641 v2.AddArg(dst) 62642 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62643 v3.AuxInt = 8 62644 v3.AddArg(src) 62645 v3.AddArg(mem) 62646 v2.AddArg(v3) 62647 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 62648 v4.AddArg(dst) 62649 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 62650 v5.AddArg(src) 62651 v5.AddArg(mem) 62652 v4.AddArg(v5) 62653 v4.AddArg(mem) 62654 v2.AddArg(v4) 62655 v.AddArg(v2) 62656 return true 62657 } 62658 // match: (Move [s] dst src mem) 62659 // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 62660 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 62661 for { 62662 s := v.AuxInt 62663 _ = v.Args[2] 62664 dst := v.Args[0] 62665 src := v.Args[1] 62666 mem := v.Args[2] 62667 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 62668 break 62669 } 62670 v.reset(OpAMD64DUFFCOPY) 62671 v.AuxInt = 14 * (64 - s/16) 62672 v.AddArg(dst) 62673 v.AddArg(src) 62674 v.AddArg(mem) 62675 return true 62676 } 62677 // match: (Move [s] dst src mem) 62678 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 62679 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 62680 for { 62681 s := v.AuxInt 62682 _ = v.Args[2] 62683 dst := v.Args[0] 62684 src := v.Args[1] 62685 mem := v.Args[2] 62686 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 62687 break 62688 } 62689 v.reset(OpAMD64REPMOVSQ) 62690 v.AddArg(dst) 62691 v.AddArg(src) 62692 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 62693 v0.AuxInt = s / 8 62694 v.AddArg(v0) 62695 v.AddArg(mem) 62696 return true 62697 } 62698 return false 62699 } 62700 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 62701 // match: (Mul16 x y) 62702 // cond: 62703 // result: (MULL x y) 62704 for { 62705 _ = v.Args[1] 62706 x := v.Args[0] 62707 y := v.Args[1] 62708 v.reset(OpAMD64MULL) 62709 v.AddArg(x) 62710 v.AddArg(y) 62711 return true 62712 } 62713 } 62714 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 62715 // match: (Mul32 x y) 62716 // cond: 62717 // result: (MULL x y) 62718 for { 62719 _ = v.Args[1] 62720 x := v.Args[0] 62721 y := v.Args[1] 62722 v.reset(OpAMD64MULL) 62723 v.AddArg(x) 62724 v.AddArg(y) 62725 return true 62726 } 62727 } 62728 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 62729 // match: (Mul32F x y) 62730 // cond: 62731 // result: (MULSS x y) 62732 for { 62733 _ = v.Args[1] 62734 x := v.Args[0] 62735 y := v.Args[1] 62736 v.reset(OpAMD64MULSS) 62737 v.AddArg(x) 62738 v.AddArg(y) 62739 return true 62740 } 62741 } 62742 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 62743 // match: (Mul64 x y) 62744 // cond: 62745 // result: (MULQ x y) 62746 for { 62747 _ = v.Args[1] 62748 x := v.Args[0] 62749 y := v.Args[1] 62750 v.reset(OpAMD64MULQ) 62751 v.AddArg(x) 62752 v.AddArg(y) 62753 return true 62754 } 62755 } 62756 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 62757 // match: (Mul64F x y) 62758 // cond: 62759 // result: (MULSD x y) 62760 for { 62761 _ = v.Args[1] 62762 x := v.Args[0] 62763 y := v.Args[1] 62764 v.reset(OpAMD64MULSD) 62765 v.AddArg(x) 62766 v.AddArg(y) 62767 return true 62768 } 62769 } 62770 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 62771 // match: (Mul64uhilo x y) 62772 // cond: 62773 // result: (MULQU2 x y) 62774 for { 62775 _ = v.Args[1] 62776 x := v.Args[0] 62777 y := v.Args[1] 62778 v.reset(OpAMD64MULQU2) 62779 v.AddArg(x) 62780 v.AddArg(y) 62781 return true 62782 } 62783 } 62784 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 62785 // match: (Mul8 x y) 62786 // cond: 62787 // result: (MULL x y) 62788 for { 62789 _ = v.Args[1] 62790 x := v.Args[0] 62791 y := v.Args[1] 62792 v.reset(OpAMD64MULL) 62793 v.AddArg(x) 62794 v.AddArg(y) 62795 return true 62796 } 62797 } 62798 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 62799 // match: (Neg16 x) 62800 // cond: 62801 // result: (NEGL x) 62802 for { 62803 x := v.Args[0] 62804 v.reset(OpAMD64NEGL) 62805 v.AddArg(x) 62806 return true 62807 } 62808 } 62809 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 62810 // match: (Neg32 x) 62811 // cond: 62812 // result: (NEGL x) 62813 for { 62814 x := v.Args[0] 62815 v.reset(OpAMD64NEGL) 62816 v.AddArg(x) 62817 return true 62818 } 62819 } 62820 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 62821 b := v.Block 62822 _ = b 62823 typ := &b.Func.Config.Types 62824 _ = typ 62825 // match: (Neg32F x) 62826 // cond: 62827 // result: (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))])) 62828 for { 62829 x := v.Args[0] 62830 v.reset(OpAMD64PXOR) 62831 v.AddArg(x) 62832 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 62833 v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1))) 62834 v.AddArg(v0) 62835 return true 62836 } 62837 } 62838 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 62839 // match: (Neg64 x) 62840 // cond: 62841 // result: (NEGQ x) 62842 for { 62843 x := v.Args[0] 62844 v.reset(OpAMD64NEGQ) 62845 v.AddArg(x) 62846 return true 62847 } 62848 } 62849 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 62850 b := v.Block 62851 _ = b 62852 typ := &b.Func.Config.Types 62853 _ = typ 62854 // match: (Neg64F x) 62855 // cond: 62856 // result: (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))])) 62857 for { 62858 x := v.Args[0] 62859 v.reset(OpAMD64PXOR) 62860 v.AddArg(x) 62861 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 62862 v0.AuxInt = auxFrom64F(math.Copysign(0, -1)) 62863 v.AddArg(v0) 62864 return true 62865 } 62866 } 62867 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 62868 // match: (Neg8 x) 62869 // cond: 62870 // result: (NEGL x) 62871 for { 62872 x := v.Args[0] 62873 v.reset(OpAMD64NEGL) 62874 v.AddArg(x) 62875 return true 62876 } 62877 } 62878 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 62879 b := v.Block 62880 _ = b 62881 // match: (Neq16 x y) 62882 // cond: 62883 // result: (SETNE (CMPW x y)) 62884 for { 62885 _ = v.Args[1] 62886 x := v.Args[0] 62887 y := v.Args[1] 62888 v.reset(OpAMD64SETNE) 62889 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 62890 v0.AddArg(x) 62891 v0.AddArg(y) 62892 v.AddArg(v0) 62893 return true 62894 } 62895 } 62896 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 62897 b := v.Block 62898 _ = b 62899 // match: (Neq32 x y) 62900 // cond: 62901 // result: (SETNE (CMPL x y)) 62902 for { 62903 _ = v.Args[1] 62904 x := v.Args[0] 62905 y := v.Args[1] 62906 v.reset(OpAMD64SETNE) 62907 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 62908 v0.AddArg(x) 62909 v0.AddArg(y) 62910 v.AddArg(v0) 62911 return true 62912 } 62913 } 62914 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 62915 b := v.Block 62916 _ = b 62917 // match: (Neq32F x y) 62918 // cond: 62919 // result: (SETNEF (UCOMISS x y)) 62920 for { 62921 _ = v.Args[1] 62922 x := v.Args[0] 62923 y := v.Args[1] 62924 v.reset(OpAMD64SETNEF) 62925 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 62926 v0.AddArg(x) 62927 v0.AddArg(y) 62928 v.AddArg(v0) 62929 return true 62930 } 62931 } 62932 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 62933 b := v.Block 62934 _ = b 62935 // match: (Neq64 x y) 62936 // cond: 62937 // result: (SETNE (CMPQ x y)) 62938 for { 62939 _ = v.Args[1] 62940 x := v.Args[0] 62941 y := v.Args[1] 62942 v.reset(OpAMD64SETNE) 62943 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 62944 v0.AddArg(x) 62945 v0.AddArg(y) 62946 v.AddArg(v0) 62947 return true 62948 } 62949 } 62950 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 62951 b := v.Block 62952 _ = b 62953 // match: (Neq64F x y) 62954 // cond: 62955 // result: (SETNEF (UCOMISD x y)) 62956 for { 62957 _ = v.Args[1] 62958 x := v.Args[0] 62959 y := v.Args[1] 62960 v.reset(OpAMD64SETNEF) 62961 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 62962 v0.AddArg(x) 62963 v0.AddArg(y) 62964 v.AddArg(v0) 62965 return true 62966 } 62967 } 62968 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 62969 b := v.Block 62970 _ = b 62971 // match: (Neq8 x y) 62972 // cond: 62973 // result: (SETNE (CMPB x y)) 62974 for { 62975 _ = v.Args[1] 62976 x := v.Args[0] 62977 y := v.Args[1] 62978 v.reset(OpAMD64SETNE) 62979 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 62980 v0.AddArg(x) 62981 v0.AddArg(y) 62982 v.AddArg(v0) 62983 return true 62984 } 62985 } 62986 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 62987 b := v.Block 62988 _ = b 62989 // match: (NeqB x y) 62990 // cond: 62991 // result: (SETNE (CMPB x y)) 62992 for { 62993 _ = v.Args[1] 62994 x := v.Args[0] 62995 y := v.Args[1] 62996 v.reset(OpAMD64SETNE) 62997 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 62998 v0.AddArg(x) 62999 v0.AddArg(y) 63000 v.AddArg(v0) 63001 return true 63002 } 63003 } 63004 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 63005 b := v.Block 63006 _ = b 63007 config := b.Func.Config 63008 _ = config 63009 // match: (NeqPtr x y) 63010 // cond: config.PtrSize == 8 63011 // result: (SETNE (CMPQ x y)) 63012 for { 63013 _ = v.Args[1] 63014 x := v.Args[0] 63015 y := v.Args[1] 63016 if !(config.PtrSize == 8) { 63017 break 63018 } 63019 v.reset(OpAMD64SETNE) 63020 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 63021 v0.AddArg(x) 63022 v0.AddArg(y) 63023 v.AddArg(v0) 63024 return true 63025 } 63026 // match: (NeqPtr x y) 63027 // cond: config.PtrSize == 4 63028 // result: (SETNE (CMPL x y)) 63029 for { 63030 _ = v.Args[1] 63031 x := v.Args[0] 63032 y := v.Args[1] 63033 if !(config.PtrSize == 4) { 63034 break 63035 } 63036 v.reset(OpAMD64SETNE) 63037 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 63038 v0.AddArg(x) 63039 v0.AddArg(y) 63040 v.AddArg(v0) 63041 return true 63042 } 63043 return false 63044 } 63045 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 63046 // match: (NilCheck ptr mem) 63047 // cond: 63048 // result: (LoweredNilCheck ptr mem) 63049 for { 63050 _ = v.Args[1] 63051 ptr := v.Args[0] 63052 mem := v.Args[1] 63053 v.reset(OpAMD64LoweredNilCheck) 63054 v.AddArg(ptr) 63055 v.AddArg(mem) 63056 return true 63057 } 63058 } 63059 func rewriteValueAMD64_OpNot_0(v *Value) bool { 63060 // match: (Not x) 63061 // cond: 63062 // result: (XORLconst [1] x) 63063 for { 63064 x := v.Args[0] 63065 v.reset(OpAMD64XORLconst) 63066 v.AuxInt = 1 63067 v.AddArg(x) 63068 return true 63069 } 63070 } 63071 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 63072 b := v.Block 63073 _ = b 63074 config := b.Func.Config 63075 _ = config 63076 typ := &b.Func.Config.Types 63077 _ = typ 63078 // match: (OffPtr [off] ptr) 63079 // cond: config.PtrSize == 8 && is32Bit(off) 63080 // result: (ADDQconst [off] ptr) 63081 for { 63082 off := v.AuxInt 63083 ptr := v.Args[0] 63084 if !(config.PtrSize == 8 && is32Bit(off)) { 63085 break 63086 } 63087 v.reset(OpAMD64ADDQconst) 63088 v.AuxInt = off 63089 v.AddArg(ptr) 63090 return true 63091 } 63092 // match: (OffPtr [off] ptr) 63093 // cond: config.PtrSize == 8 63094 // result: (ADDQ (MOVQconst [off]) ptr) 63095 for { 63096 off := v.AuxInt 63097 ptr := v.Args[0] 63098 if !(config.PtrSize == 8) { 63099 break 63100 } 63101 v.reset(OpAMD64ADDQ) 63102 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 63103 v0.AuxInt = off 63104 v.AddArg(v0) 63105 v.AddArg(ptr) 63106 return true 63107 } 63108 // match: (OffPtr [off] ptr) 63109 // cond: config.PtrSize == 4 63110 // result: (ADDLconst [off] ptr) 63111 for { 63112 off := v.AuxInt 63113 ptr := v.Args[0] 63114 if !(config.PtrSize == 4) { 63115 break 63116 } 63117 v.reset(OpAMD64ADDLconst) 63118 v.AuxInt = off 63119 v.AddArg(ptr) 63120 return true 63121 } 63122 return false 63123 } 63124 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 63125 // match: (Or16 x y) 63126 // cond: 63127 // result: (ORL x y) 63128 for { 63129 _ = v.Args[1] 63130 x := v.Args[0] 63131 y := v.Args[1] 63132 v.reset(OpAMD64ORL) 63133 v.AddArg(x) 63134 v.AddArg(y) 63135 return true 63136 } 63137 } 63138 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 63139 // match: (Or32 x y) 63140 // cond: 63141 // result: (ORL x y) 63142 for { 63143 _ = v.Args[1] 63144 x := v.Args[0] 63145 y := v.Args[1] 63146 v.reset(OpAMD64ORL) 63147 v.AddArg(x) 63148 v.AddArg(y) 63149 return true 63150 } 63151 } 63152 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 63153 // match: (Or64 x y) 63154 // cond: 63155 // result: (ORQ x y) 63156 for { 63157 _ = v.Args[1] 63158 x := v.Args[0] 63159 y := v.Args[1] 63160 v.reset(OpAMD64ORQ) 63161 v.AddArg(x) 63162 v.AddArg(y) 63163 return true 63164 } 63165 } 63166 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 63167 // match: (Or8 x y) 63168 // cond: 63169 // result: (ORL x y) 63170 for { 63171 _ = v.Args[1] 63172 x := v.Args[0] 63173 y := v.Args[1] 63174 v.reset(OpAMD64ORL) 63175 v.AddArg(x) 63176 v.AddArg(y) 63177 return true 63178 } 63179 } 63180 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 63181 // match: (OrB x y) 63182 // cond: 63183 // result: (ORL x y) 63184 for { 63185 _ = v.Args[1] 63186 x := v.Args[0] 63187 y := v.Args[1] 63188 v.reset(OpAMD64ORL) 63189 v.AddArg(x) 63190 v.AddArg(y) 63191 return true 63192 } 63193 } 63194 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 63195 b := v.Block 63196 _ = b 63197 typ := &b.Func.Config.Types 63198 _ = typ 63199 // match: (PopCount16 x) 63200 // cond: 63201 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 63202 for { 63203 x := v.Args[0] 63204 v.reset(OpAMD64POPCNTL) 63205 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 63206 v0.AddArg(x) 63207 v.AddArg(v0) 63208 return true 63209 } 63210 } 63211 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 63212 // match: (PopCount32 x) 63213 // cond: 63214 // result: (POPCNTL x) 63215 for { 63216 x := v.Args[0] 63217 v.reset(OpAMD64POPCNTL) 63218 v.AddArg(x) 63219 return true 63220 } 63221 } 63222 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 63223 // match: (PopCount64 x) 63224 // cond: 63225 // result: (POPCNTQ x) 63226 for { 63227 x := v.Args[0] 63228 v.reset(OpAMD64POPCNTQ) 63229 v.AddArg(x) 63230 return true 63231 } 63232 } 63233 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 63234 b := v.Block 63235 _ = b 63236 typ := &b.Func.Config.Types 63237 _ = typ 63238 // match: (PopCount8 x) 63239 // cond: 63240 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 63241 for { 63242 x := v.Args[0] 63243 v.reset(OpAMD64POPCNTL) 63244 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 63245 v0.AddArg(x) 63246 v.AddArg(v0) 63247 return true 63248 } 63249 } 63250 func rewriteValueAMD64_OpRotateLeft16_0(v *Value) bool { 63251 // match: (RotateLeft16 a b) 63252 // cond: 63253 // result: (ROLW a b) 63254 for { 63255 _ = v.Args[1] 63256 a := v.Args[0] 63257 b := v.Args[1] 63258 v.reset(OpAMD64ROLW) 63259 v.AddArg(a) 63260 v.AddArg(b) 63261 return true 63262 } 63263 } 63264 func rewriteValueAMD64_OpRotateLeft32_0(v *Value) bool { 63265 // match: (RotateLeft32 a b) 63266 // cond: 63267 // result: (ROLL a b) 63268 for { 63269 _ = v.Args[1] 63270 a := v.Args[0] 63271 b := v.Args[1] 63272 v.reset(OpAMD64ROLL) 63273 v.AddArg(a) 63274 v.AddArg(b) 63275 return true 63276 } 63277 } 63278 func rewriteValueAMD64_OpRotateLeft64_0(v *Value) bool { 63279 // match: (RotateLeft64 a b) 63280 // cond: 63281 // result: (ROLQ a b) 63282 for { 63283 _ = v.Args[1] 63284 a := v.Args[0] 63285 b := v.Args[1] 63286 v.reset(OpAMD64ROLQ) 63287 v.AddArg(a) 63288 v.AddArg(b) 63289 return true 63290 } 63291 } 63292 func rewriteValueAMD64_OpRotateLeft8_0(v *Value) bool { 63293 // match: (RotateLeft8 a b) 63294 // cond: 63295 // result: (ROLB a b) 63296 for { 63297 _ = v.Args[1] 63298 a := v.Args[0] 63299 b := v.Args[1] 63300 v.reset(OpAMD64ROLB) 63301 v.AddArg(a) 63302 v.AddArg(b) 63303 return true 63304 } 63305 } 63306 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 63307 // match: (Round32F x) 63308 // cond: 63309 // result: x 63310 for { 63311 x := v.Args[0] 63312 v.reset(OpCopy) 63313 v.Type = x.Type 63314 v.AddArg(x) 63315 return true 63316 } 63317 } 63318 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 63319 // match: (Round64F x) 63320 // cond: 63321 // result: x 63322 for { 63323 x := v.Args[0] 63324 v.reset(OpCopy) 63325 v.Type = x.Type 63326 v.AddArg(x) 63327 return true 63328 } 63329 } 63330 func rewriteValueAMD64_OpRoundToEven_0(v *Value) bool { 63331 // match: (RoundToEven x) 63332 // cond: 63333 // result: (ROUNDSD [0] x) 63334 for { 63335 x := v.Args[0] 63336 v.reset(OpAMD64ROUNDSD) 63337 v.AuxInt = 0 63338 v.AddArg(x) 63339 return true 63340 } 63341 } 63342 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 63343 b := v.Block 63344 _ = b 63345 // match: (Rsh16Ux16 <t> x y) 63346 // cond: !shiftIsBounded(v) 63347 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 63348 for { 63349 t := v.Type 63350 _ = v.Args[1] 63351 x := v.Args[0] 63352 y := v.Args[1] 63353 if !(!shiftIsBounded(v)) { 63354 break 63355 } 63356 v.reset(OpAMD64ANDL) 63357 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 63358 v0.AddArg(x) 63359 v0.AddArg(y) 63360 v.AddArg(v0) 63361 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63362 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 63363 v2.AuxInt = 16 63364 v2.AddArg(y) 63365 v1.AddArg(v2) 63366 v.AddArg(v1) 63367 return true 63368 } 63369 // match: (Rsh16Ux16 x y) 63370 // cond: shiftIsBounded(v) 63371 // result: (SHRW x y) 63372 for { 63373 _ = v.Args[1] 63374 x := v.Args[0] 63375 y := v.Args[1] 63376 if !(shiftIsBounded(v)) { 63377 break 63378 } 63379 v.reset(OpAMD64SHRW) 63380 v.AddArg(x) 63381 v.AddArg(y) 63382 return true 63383 } 63384 return false 63385 } 63386 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 63387 b := v.Block 63388 _ = b 63389 // match: (Rsh16Ux32 <t> x y) 63390 // cond: !shiftIsBounded(v) 63391 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 63392 for { 63393 t := v.Type 63394 _ = v.Args[1] 63395 x := v.Args[0] 63396 y := v.Args[1] 63397 if !(!shiftIsBounded(v)) { 63398 break 63399 } 63400 v.reset(OpAMD64ANDL) 63401 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 63402 v0.AddArg(x) 63403 v0.AddArg(y) 63404 v.AddArg(v0) 63405 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63406 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 63407 v2.AuxInt = 16 63408 v2.AddArg(y) 63409 v1.AddArg(v2) 63410 v.AddArg(v1) 63411 return true 63412 } 63413 // match: (Rsh16Ux32 x y) 63414 // cond: shiftIsBounded(v) 63415 // result: (SHRW x y) 63416 for { 63417 _ = v.Args[1] 63418 x := v.Args[0] 63419 y := v.Args[1] 63420 if !(shiftIsBounded(v)) { 63421 break 63422 } 63423 v.reset(OpAMD64SHRW) 63424 v.AddArg(x) 63425 v.AddArg(y) 63426 return true 63427 } 63428 return false 63429 } 63430 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 63431 b := v.Block 63432 _ = b 63433 // match: (Rsh16Ux64 <t> x y) 63434 // cond: !shiftIsBounded(v) 63435 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 63436 for { 63437 t := v.Type 63438 _ = v.Args[1] 63439 x := v.Args[0] 63440 y := v.Args[1] 63441 if !(!shiftIsBounded(v)) { 63442 break 63443 } 63444 v.reset(OpAMD64ANDL) 63445 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 63446 v0.AddArg(x) 63447 v0.AddArg(y) 63448 v.AddArg(v0) 63449 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63450 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 63451 v2.AuxInt = 16 63452 v2.AddArg(y) 63453 v1.AddArg(v2) 63454 v.AddArg(v1) 63455 return true 63456 } 63457 // match: (Rsh16Ux64 x y) 63458 // cond: shiftIsBounded(v) 63459 // result: (SHRW x y) 63460 for { 63461 _ = v.Args[1] 63462 x := v.Args[0] 63463 y := v.Args[1] 63464 if !(shiftIsBounded(v)) { 63465 break 63466 } 63467 v.reset(OpAMD64SHRW) 63468 v.AddArg(x) 63469 v.AddArg(y) 63470 return true 63471 } 63472 return false 63473 } 63474 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 63475 b := v.Block 63476 _ = b 63477 // match: (Rsh16Ux8 <t> x y) 63478 // cond: !shiftIsBounded(v) 63479 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 63480 for { 63481 t := v.Type 63482 _ = v.Args[1] 63483 x := v.Args[0] 63484 y := v.Args[1] 63485 if !(!shiftIsBounded(v)) { 63486 break 63487 } 63488 v.reset(OpAMD64ANDL) 63489 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 63490 v0.AddArg(x) 63491 v0.AddArg(y) 63492 v.AddArg(v0) 63493 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63494 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 63495 v2.AuxInt = 16 63496 v2.AddArg(y) 63497 v1.AddArg(v2) 63498 v.AddArg(v1) 63499 return true 63500 } 63501 // match: (Rsh16Ux8 x y) 63502 // cond: shiftIsBounded(v) 63503 // result: (SHRW x y) 63504 for { 63505 _ = v.Args[1] 63506 x := v.Args[0] 63507 y := v.Args[1] 63508 if !(shiftIsBounded(v)) { 63509 break 63510 } 63511 v.reset(OpAMD64SHRW) 63512 v.AddArg(x) 63513 v.AddArg(y) 63514 return true 63515 } 63516 return false 63517 } 63518 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 63519 b := v.Block 63520 _ = b 63521 // match: (Rsh16x16 <t> x y) 63522 // cond: !shiftIsBounded(v) 63523 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 63524 for { 63525 t := v.Type 63526 _ = v.Args[1] 63527 x := v.Args[0] 63528 y := v.Args[1] 63529 if !(!shiftIsBounded(v)) { 63530 break 63531 } 63532 v.reset(OpAMD64SARW) 63533 v.Type = t 63534 v.AddArg(x) 63535 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 63536 v0.AddArg(y) 63537 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 63538 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 63539 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 63540 v3.AuxInt = 16 63541 v3.AddArg(y) 63542 v2.AddArg(v3) 63543 v1.AddArg(v2) 63544 v0.AddArg(v1) 63545 v.AddArg(v0) 63546 return true 63547 } 63548 // match: (Rsh16x16 x y) 63549 // cond: shiftIsBounded(v) 63550 // result: (SARW x y) 63551 for { 63552 _ = v.Args[1] 63553 x := v.Args[0] 63554 y := v.Args[1] 63555 if !(shiftIsBounded(v)) { 63556 break 63557 } 63558 v.reset(OpAMD64SARW) 63559 v.AddArg(x) 63560 v.AddArg(y) 63561 return true 63562 } 63563 return false 63564 } 63565 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 63566 b := v.Block 63567 _ = b 63568 // match: (Rsh16x32 <t> x y) 63569 // cond: !shiftIsBounded(v) 63570 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 63571 for { 63572 t := v.Type 63573 _ = v.Args[1] 63574 x := v.Args[0] 63575 y := v.Args[1] 63576 if !(!shiftIsBounded(v)) { 63577 break 63578 } 63579 v.reset(OpAMD64SARW) 63580 v.Type = t 63581 v.AddArg(x) 63582 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 63583 v0.AddArg(y) 63584 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 63585 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 63586 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 63587 v3.AuxInt = 16 63588 v3.AddArg(y) 63589 v2.AddArg(v3) 63590 v1.AddArg(v2) 63591 v0.AddArg(v1) 63592 v.AddArg(v0) 63593 return true 63594 } 63595 // match: (Rsh16x32 x y) 63596 // cond: shiftIsBounded(v) 63597 // result: (SARW x y) 63598 for { 63599 _ = v.Args[1] 63600 x := v.Args[0] 63601 y := v.Args[1] 63602 if !(shiftIsBounded(v)) { 63603 break 63604 } 63605 v.reset(OpAMD64SARW) 63606 v.AddArg(x) 63607 v.AddArg(y) 63608 return true 63609 } 63610 return false 63611 } 63612 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 63613 b := v.Block 63614 _ = b 63615 // match: (Rsh16x64 <t> x y) 63616 // cond: !shiftIsBounded(v) 63617 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 63618 for { 63619 t := v.Type 63620 _ = v.Args[1] 63621 x := v.Args[0] 63622 y := v.Args[1] 63623 if !(!shiftIsBounded(v)) { 63624 break 63625 } 63626 v.reset(OpAMD64SARW) 63627 v.Type = t 63628 v.AddArg(x) 63629 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 63630 v0.AddArg(y) 63631 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 63632 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 63633 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 63634 v3.AuxInt = 16 63635 v3.AddArg(y) 63636 v2.AddArg(v3) 63637 v1.AddArg(v2) 63638 v0.AddArg(v1) 63639 v.AddArg(v0) 63640 return true 63641 } 63642 // match: (Rsh16x64 x y) 63643 // cond: shiftIsBounded(v) 63644 // result: (SARW x y) 63645 for { 63646 _ = v.Args[1] 63647 x := v.Args[0] 63648 y := v.Args[1] 63649 if !(shiftIsBounded(v)) { 63650 break 63651 } 63652 v.reset(OpAMD64SARW) 63653 v.AddArg(x) 63654 v.AddArg(y) 63655 return true 63656 } 63657 return false 63658 } 63659 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 63660 b := v.Block 63661 _ = b 63662 // match: (Rsh16x8 <t> x y) 63663 // cond: !shiftIsBounded(v) 63664 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 63665 for { 63666 t := v.Type 63667 _ = v.Args[1] 63668 x := v.Args[0] 63669 y := v.Args[1] 63670 if !(!shiftIsBounded(v)) { 63671 break 63672 } 63673 v.reset(OpAMD64SARW) 63674 v.Type = t 63675 v.AddArg(x) 63676 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 63677 v0.AddArg(y) 63678 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 63679 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 63680 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 63681 v3.AuxInt = 16 63682 v3.AddArg(y) 63683 v2.AddArg(v3) 63684 v1.AddArg(v2) 63685 v0.AddArg(v1) 63686 v.AddArg(v0) 63687 return true 63688 } 63689 // match: (Rsh16x8 x y) 63690 // cond: shiftIsBounded(v) 63691 // result: (SARW x y) 63692 for { 63693 _ = v.Args[1] 63694 x := v.Args[0] 63695 y := v.Args[1] 63696 if !(shiftIsBounded(v)) { 63697 break 63698 } 63699 v.reset(OpAMD64SARW) 63700 v.AddArg(x) 63701 v.AddArg(y) 63702 return true 63703 } 63704 return false 63705 } 63706 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 63707 b := v.Block 63708 _ = b 63709 // match: (Rsh32Ux16 <t> x y) 63710 // cond: !shiftIsBounded(v) 63711 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 63712 for { 63713 t := v.Type 63714 _ = v.Args[1] 63715 x := v.Args[0] 63716 y := v.Args[1] 63717 if !(!shiftIsBounded(v)) { 63718 break 63719 } 63720 v.reset(OpAMD64ANDL) 63721 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 63722 v0.AddArg(x) 63723 v0.AddArg(y) 63724 v.AddArg(v0) 63725 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63726 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 63727 v2.AuxInt = 32 63728 v2.AddArg(y) 63729 v1.AddArg(v2) 63730 v.AddArg(v1) 63731 return true 63732 } 63733 // match: (Rsh32Ux16 x y) 63734 // cond: shiftIsBounded(v) 63735 // result: (SHRL x y) 63736 for { 63737 _ = v.Args[1] 63738 x := v.Args[0] 63739 y := v.Args[1] 63740 if !(shiftIsBounded(v)) { 63741 break 63742 } 63743 v.reset(OpAMD64SHRL) 63744 v.AddArg(x) 63745 v.AddArg(y) 63746 return true 63747 } 63748 return false 63749 } 63750 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 63751 b := v.Block 63752 _ = b 63753 // match: (Rsh32Ux32 <t> x y) 63754 // cond: !shiftIsBounded(v) 63755 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 63756 for { 63757 t := v.Type 63758 _ = v.Args[1] 63759 x := v.Args[0] 63760 y := v.Args[1] 63761 if !(!shiftIsBounded(v)) { 63762 break 63763 } 63764 v.reset(OpAMD64ANDL) 63765 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 63766 v0.AddArg(x) 63767 v0.AddArg(y) 63768 v.AddArg(v0) 63769 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63770 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 63771 v2.AuxInt = 32 63772 v2.AddArg(y) 63773 v1.AddArg(v2) 63774 v.AddArg(v1) 63775 return true 63776 } 63777 // match: (Rsh32Ux32 x y) 63778 // cond: shiftIsBounded(v) 63779 // result: (SHRL x y) 63780 for { 63781 _ = v.Args[1] 63782 x := v.Args[0] 63783 y := v.Args[1] 63784 if !(shiftIsBounded(v)) { 63785 break 63786 } 63787 v.reset(OpAMD64SHRL) 63788 v.AddArg(x) 63789 v.AddArg(y) 63790 return true 63791 } 63792 return false 63793 } 63794 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 63795 b := v.Block 63796 _ = b 63797 // match: (Rsh32Ux64 <t> x y) 63798 // cond: !shiftIsBounded(v) 63799 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 63800 for { 63801 t := v.Type 63802 _ = v.Args[1] 63803 x := v.Args[0] 63804 y := v.Args[1] 63805 if !(!shiftIsBounded(v)) { 63806 break 63807 } 63808 v.reset(OpAMD64ANDL) 63809 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 63810 v0.AddArg(x) 63811 v0.AddArg(y) 63812 v.AddArg(v0) 63813 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63814 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 63815 v2.AuxInt = 32 63816 v2.AddArg(y) 63817 v1.AddArg(v2) 63818 v.AddArg(v1) 63819 return true 63820 } 63821 // match: (Rsh32Ux64 x y) 63822 // cond: shiftIsBounded(v) 63823 // result: (SHRL x y) 63824 for { 63825 _ = v.Args[1] 63826 x := v.Args[0] 63827 y := v.Args[1] 63828 if !(shiftIsBounded(v)) { 63829 break 63830 } 63831 v.reset(OpAMD64SHRL) 63832 v.AddArg(x) 63833 v.AddArg(y) 63834 return true 63835 } 63836 return false 63837 } 63838 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 63839 b := v.Block 63840 _ = b 63841 // match: (Rsh32Ux8 <t> x y) 63842 // cond: !shiftIsBounded(v) 63843 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 63844 for { 63845 t := v.Type 63846 _ = v.Args[1] 63847 x := v.Args[0] 63848 y := v.Args[1] 63849 if !(!shiftIsBounded(v)) { 63850 break 63851 } 63852 v.reset(OpAMD64ANDL) 63853 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 63854 v0.AddArg(x) 63855 v0.AddArg(y) 63856 v.AddArg(v0) 63857 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 63858 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 63859 v2.AuxInt = 32 63860 v2.AddArg(y) 63861 v1.AddArg(v2) 63862 v.AddArg(v1) 63863 return true 63864 } 63865 // match: (Rsh32Ux8 x y) 63866 // cond: shiftIsBounded(v) 63867 // result: (SHRL x y) 63868 for { 63869 _ = v.Args[1] 63870 x := v.Args[0] 63871 y := v.Args[1] 63872 if !(shiftIsBounded(v)) { 63873 break 63874 } 63875 v.reset(OpAMD64SHRL) 63876 v.AddArg(x) 63877 v.AddArg(y) 63878 return true 63879 } 63880 return false 63881 } 63882 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 63883 b := v.Block 63884 _ = b 63885 // match: (Rsh32x16 <t> x y) 63886 // cond: !shiftIsBounded(v) 63887 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 63888 for { 63889 t := v.Type 63890 _ = v.Args[1] 63891 x := v.Args[0] 63892 y := v.Args[1] 63893 if !(!shiftIsBounded(v)) { 63894 break 63895 } 63896 v.reset(OpAMD64SARL) 63897 v.Type = t 63898 v.AddArg(x) 63899 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 63900 v0.AddArg(y) 63901 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 63902 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 63903 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 63904 v3.AuxInt = 32 63905 v3.AddArg(y) 63906 v2.AddArg(v3) 63907 v1.AddArg(v2) 63908 v0.AddArg(v1) 63909 v.AddArg(v0) 63910 return true 63911 } 63912 // match: (Rsh32x16 x y) 63913 // cond: shiftIsBounded(v) 63914 // result: (SARL x y) 63915 for { 63916 _ = v.Args[1] 63917 x := v.Args[0] 63918 y := v.Args[1] 63919 if !(shiftIsBounded(v)) { 63920 break 63921 } 63922 v.reset(OpAMD64SARL) 63923 v.AddArg(x) 63924 v.AddArg(y) 63925 return true 63926 } 63927 return false 63928 } 63929 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 63930 b := v.Block 63931 _ = b 63932 // match: (Rsh32x32 <t> x y) 63933 // cond: !shiftIsBounded(v) 63934 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 63935 for { 63936 t := v.Type 63937 _ = v.Args[1] 63938 x := v.Args[0] 63939 y := v.Args[1] 63940 if !(!shiftIsBounded(v)) { 63941 break 63942 } 63943 v.reset(OpAMD64SARL) 63944 v.Type = t 63945 v.AddArg(x) 63946 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 63947 v0.AddArg(y) 63948 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 63949 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 63950 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 63951 v3.AuxInt = 32 63952 v3.AddArg(y) 63953 v2.AddArg(v3) 63954 v1.AddArg(v2) 63955 v0.AddArg(v1) 63956 v.AddArg(v0) 63957 return true 63958 } 63959 // match: (Rsh32x32 x y) 63960 // cond: shiftIsBounded(v) 63961 // result: (SARL x y) 63962 for { 63963 _ = v.Args[1] 63964 x := v.Args[0] 63965 y := v.Args[1] 63966 if !(shiftIsBounded(v)) { 63967 break 63968 } 63969 v.reset(OpAMD64SARL) 63970 v.AddArg(x) 63971 v.AddArg(y) 63972 return true 63973 } 63974 return false 63975 } 63976 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 63977 b := v.Block 63978 _ = b 63979 // match: (Rsh32x64 <t> x y) 63980 // cond: !shiftIsBounded(v) 63981 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 63982 for { 63983 t := v.Type 63984 _ = v.Args[1] 63985 x := v.Args[0] 63986 y := v.Args[1] 63987 if !(!shiftIsBounded(v)) { 63988 break 63989 } 63990 v.reset(OpAMD64SARL) 63991 v.Type = t 63992 v.AddArg(x) 63993 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 63994 v0.AddArg(y) 63995 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 63996 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 63997 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 63998 v3.AuxInt = 32 63999 v3.AddArg(y) 64000 v2.AddArg(v3) 64001 v1.AddArg(v2) 64002 v0.AddArg(v1) 64003 v.AddArg(v0) 64004 return true 64005 } 64006 // match: (Rsh32x64 x y) 64007 // cond: shiftIsBounded(v) 64008 // result: (SARL x y) 64009 for { 64010 _ = v.Args[1] 64011 x := v.Args[0] 64012 y := v.Args[1] 64013 if !(shiftIsBounded(v)) { 64014 break 64015 } 64016 v.reset(OpAMD64SARL) 64017 v.AddArg(x) 64018 v.AddArg(y) 64019 return true 64020 } 64021 return false 64022 } 64023 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 64024 b := v.Block 64025 _ = b 64026 // match: (Rsh32x8 <t> x y) 64027 // cond: !shiftIsBounded(v) 64028 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 64029 for { 64030 t := v.Type 64031 _ = v.Args[1] 64032 x := v.Args[0] 64033 y := v.Args[1] 64034 if !(!shiftIsBounded(v)) { 64035 break 64036 } 64037 v.reset(OpAMD64SARL) 64038 v.Type = t 64039 v.AddArg(x) 64040 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64041 v0.AddArg(y) 64042 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64043 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64044 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 64045 v3.AuxInt = 32 64046 v3.AddArg(y) 64047 v2.AddArg(v3) 64048 v1.AddArg(v2) 64049 v0.AddArg(v1) 64050 v.AddArg(v0) 64051 return true 64052 } 64053 // match: (Rsh32x8 x y) 64054 // cond: shiftIsBounded(v) 64055 // result: (SARL x y) 64056 for { 64057 _ = v.Args[1] 64058 x := v.Args[0] 64059 y := v.Args[1] 64060 if !(shiftIsBounded(v)) { 64061 break 64062 } 64063 v.reset(OpAMD64SARL) 64064 v.AddArg(x) 64065 v.AddArg(y) 64066 return true 64067 } 64068 return false 64069 } 64070 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 64071 b := v.Block 64072 _ = b 64073 // match: (Rsh64Ux16 <t> x y) 64074 // cond: !shiftIsBounded(v) 64075 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 64076 for { 64077 t := v.Type 64078 _ = v.Args[1] 64079 x := v.Args[0] 64080 y := v.Args[1] 64081 if !(!shiftIsBounded(v)) { 64082 break 64083 } 64084 v.reset(OpAMD64ANDQ) 64085 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 64086 v0.AddArg(x) 64087 v0.AddArg(y) 64088 v.AddArg(v0) 64089 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 64090 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 64091 v2.AuxInt = 64 64092 v2.AddArg(y) 64093 v1.AddArg(v2) 64094 v.AddArg(v1) 64095 return true 64096 } 64097 // match: (Rsh64Ux16 x y) 64098 // cond: shiftIsBounded(v) 64099 // result: (SHRQ x y) 64100 for { 64101 _ = v.Args[1] 64102 x := v.Args[0] 64103 y := v.Args[1] 64104 if !(shiftIsBounded(v)) { 64105 break 64106 } 64107 v.reset(OpAMD64SHRQ) 64108 v.AddArg(x) 64109 v.AddArg(y) 64110 return true 64111 } 64112 return false 64113 } 64114 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 64115 b := v.Block 64116 _ = b 64117 // match: (Rsh64Ux32 <t> x y) 64118 // cond: !shiftIsBounded(v) 64119 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 64120 for { 64121 t := v.Type 64122 _ = v.Args[1] 64123 x := v.Args[0] 64124 y := v.Args[1] 64125 if !(!shiftIsBounded(v)) { 64126 break 64127 } 64128 v.reset(OpAMD64ANDQ) 64129 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 64130 v0.AddArg(x) 64131 v0.AddArg(y) 64132 v.AddArg(v0) 64133 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 64134 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 64135 v2.AuxInt = 64 64136 v2.AddArg(y) 64137 v1.AddArg(v2) 64138 v.AddArg(v1) 64139 return true 64140 } 64141 // match: (Rsh64Ux32 x y) 64142 // cond: shiftIsBounded(v) 64143 // result: (SHRQ x y) 64144 for { 64145 _ = v.Args[1] 64146 x := v.Args[0] 64147 y := v.Args[1] 64148 if !(shiftIsBounded(v)) { 64149 break 64150 } 64151 v.reset(OpAMD64SHRQ) 64152 v.AddArg(x) 64153 v.AddArg(y) 64154 return true 64155 } 64156 return false 64157 } 64158 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 64159 b := v.Block 64160 _ = b 64161 // match: (Rsh64Ux64 <t> x y) 64162 // cond: !shiftIsBounded(v) 64163 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 64164 for { 64165 t := v.Type 64166 _ = v.Args[1] 64167 x := v.Args[0] 64168 y := v.Args[1] 64169 if !(!shiftIsBounded(v)) { 64170 break 64171 } 64172 v.reset(OpAMD64ANDQ) 64173 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 64174 v0.AddArg(x) 64175 v0.AddArg(y) 64176 v.AddArg(v0) 64177 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 64178 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 64179 v2.AuxInt = 64 64180 v2.AddArg(y) 64181 v1.AddArg(v2) 64182 v.AddArg(v1) 64183 return true 64184 } 64185 // match: (Rsh64Ux64 x y) 64186 // cond: shiftIsBounded(v) 64187 // result: (SHRQ x y) 64188 for { 64189 _ = v.Args[1] 64190 x := v.Args[0] 64191 y := v.Args[1] 64192 if !(shiftIsBounded(v)) { 64193 break 64194 } 64195 v.reset(OpAMD64SHRQ) 64196 v.AddArg(x) 64197 v.AddArg(y) 64198 return true 64199 } 64200 return false 64201 } 64202 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 64203 b := v.Block 64204 _ = b 64205 // match: (Rsh64Ux8 <t> x y) 64206 // cond: !shiftIsBounded(v) 64207 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 64208 for { 64209 t := v.Type 64210 _ = v.Args[1] 64211 x := v.Args[0] 64212 y := v.Args[1] 64213 if !(!shiftIsBounded(v)) { 64214 break 64215 } 64216 v.reset(OpAMD64ANDQ) 64217 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 64218 v0.AddArg(x) 64219 v0.AddArg(y) 64220 v.AddArg(v0) 64221 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 64222 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 64223 v2.AuxInt = 64 64224 v2.AddArg(y) 64225 v1.AddArg(v2) 64226 v.AddArg(v1) 64227 return true 64228 } 64229 // match: (Rsh64Ux8 x y) 64230 // cond: shiftIsBounded(v) 64231 // result: (SHRQ x y) 64232 for { 64233 _ = v.Args[1] 64234 x := v.Args[0] 64235 y := v.Args[1] 64236 if !(shiftIsBounded(v)) { 64237 break 64238 } 64239 v.reset(OpAMD64SHRQ) 64240 v.AddArg(x) 64241 v.AddArg(y) 64242 return true 64243 } 64244 return false 64245 } 64246 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 64247 b := v.Block 64248 _ = b 64249 // match: (Rsh64x16 <t> x y) 64250 // cond: !shiftIsBounded(v) 64251 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 64252 for { 64253 t := v.Type 64254 _ = v.Args[1] 64255 x := v.Args[0] 64256 y := v.Args[1] 64257 if !(!shiftIsBounded(v)) { 64258 break 64259 } 64260 v.reset(OpAMD64SARQ) 64261 v.Type = t 64262 v.AddArg(x) 64263 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64264 v0.AddArg(y) 64265 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64266 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64267 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 64268 v3.AuxInt = 64 64269 v3.AddArg(y) 64270 v2.AddArg(v3) 64271 v1.AddArg(v2) 64272 v0.AddArg(v1) 64273 v.AddArg(v0) 64274 return true 64275 } 64276 // match: (Rsh64x16 x y) 64277 // cond: shiftIsBounded(v) 64278 // result: (SARQ x y) 64279 for { 64280 _ = v.Args[1] 64281 x := v.Args[0] 64282 y := v.Args[1] 64283 if !(shiftIsBounded(v)) { 64284 break 64285 } 64286 v.reset(OpAMD64SARQ) 64287 v.AddArg(x) 64288 v.AddArg(y) 64289 return true 64290 } 64291 return false 64292 } 64293 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 64294 b := v.Block 64295 _ = b 64296 // match: (Rsh64x32 <t> x y) 64297 // cond: !shiftIsBounded(v) 64298 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 64299 for { 64300 t := v.Type 64301 _ = v.Args[1] 64302 x := v.Args[0] 64303 y := v.Args[1] 64304 if !(!shiftIsBounded(v)) { 64305 break 64306 } 64307 v.reset(OpAMD64SARQ) 64308 v.Type = t 64309 v.AddArg(x) 64310 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64311 v0.AddArg(y) 64312 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64313 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64314 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 64315 v3.AuxInt = 64 64316 v3.AddArg(y) 64317 v2.AddArg(v3) 64318 v1.AddArg(v2) 64319 v0.AddArg(v1) 64320 v.AddArg(v0) 64321 return true 64322 } 64323 // match: (Rsh64x32 x y) 64324 // cond: shiftIsBounded(v) 64325 // result: (SARQ x y) 64326 for { 64327 _ = v.Args[1] 64328 x := v.Args[0] 64329 y := v.Args[1] 64330 if !(shiftIsBounded(v)) { 64331 break 64332 } 64333 v.reset(OpAMD64SARQ) 64334 v.AddArg(x) 64335 v.AddArg(y) 64336 return true 64337 } 64338 return false 64339 } 64340 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 64341 b := v.Block 64342 _ = b 64343 // match: (Rsh64x64 <t> x y) 64344 // cond: !shiftIsBounded(v) 64345 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 64346 for { 64347 t := v.Type 64348 _ = v.Args[1] 64349 x := v.Args[0] 64350 y := v.Args[1] 64351 if !(!shiftIsBounded(v)) { 64352 break 64353 } 64354 v.reset(OpAMD64SARQ) 64355 v.Type = t 64356 v.AddArg(x) 64357 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 64358 v0.AddArg(y) 64359 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 64360 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 64361 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 64362 v3.AuxInt = 64 64363 v3.AddArg(y) 64364 v2.AddArg(v3) 64365 v1.AddArg(v2) 64366 v0.AddArg(v1) 64367 v.AddArg(v0) 64368 return true 64369 } 64370 // match: (Rsh64x64 x y) 64371 // cond: shiftIsBounded(v) 64372 // result: (SARQ x y) 64373 for { 64374 _ = v.Args[1] 64375 x := v.Args[0] 64376 y := v.Args[1] 64377 if !(shiftIsBounded(v)) { 64378 break 64379 } 64380 v.reset(OpAMD64SARQ) 64381 v.AddArg(x) 64382 v.AddArg(y) 64383 return true 64384 } 64385 return false 64386 } 64387 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 64388 b := v.Block 64389 _ = b 64390 // match: (Rsh64x8 <t> x y) 64391 // cond: !shiftIsBounded(v) 64392 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 64393 for { 64394 t := v.Type 64395 _ = v.Args[1] 64396 x := v.Args[0] 64397 y := v.Args[1] 64398 if !(!shiftIsBounded(v)) { 64399 break 64400 } 64401 v.reset(OpAMD64SARQ) 64402 v.Type = t 64403 v.AddArg(x) 64404 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64405 v0.AddArg(y) 64406 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64407 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64408 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 64409 v3.AuxInt = 64 64410 v3.AddArg(y) 64411 v2.AddArg(v3) 64412 v1.AddArg(v2) 64413 v0.AddArg(v1) 64414 v.AddArg(v0) 64415 return true 64416 } 64417 // match: (Rsh64x8 x y) 64418 // cond: shiftIsBounded(v) 64419 // result: (SARQ x y) 64420 for { 64421 _ = v.Args[1] 64422 x := v.Args[0] 64423 y := v.Args[1] 64424 if !(shiftIsBounded(v)) { 64425 break 64426 } 64427 v.reset(OpAMD64SARQ) 64428 v.AddArg(x) 64429 v.AddArg(y) 64430 return true 64431 } 64432 return false 64433 } 64434 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 64435 b := v.Block 64436 _ = b 64437 // match: (Rsh8Ux16 <t> x y) 64438 // cond: !shiftIsBounded(v) 64439 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 64440 for { 64441 t := v.Type 64442 _ = v.Args[1] 64443 x := v.Args[0] 64444 y := v.Args[1] 64445 if !(!shiftIsBounded(v)) { 64446 break 64447 } 64448 v.reset(OpAMD64ANDL) 64449 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 64450 v0.AddArg(x) 64451 v0.AddArg(y) 64452 v.AddArg(v0) 64453 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64454 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 64455 v2.AuxInt = 8 64456 v2.AddArg(y) 64457 v1.AddArg(v2) 64458 v.AddArg(v1) 64459 return true 64460 } 64461 // match: (Rsh8Ux16 x y) 64462 // cond: shiftIsBounded(v) 64463 // result: (SHRB x y) 64464 for { 64465 _ = v.Args[1] 64466 x := v.Args[0] 64467 y := v.Args[1] 64468 if !(shiftIsBounded(v)) { 64469 break 64470 } 64471 v.reset(OpAMD64SHRB) 64472 v.AddArg(x) 64473 v.AddArg(y) 64474 return true 64475 } 64476 return false 64477 } 64478 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 64479 b := v.Block 64480 _ = b 64481 // match: (Rsh8Ux32 <t> x y) 64482 // cond: !shiftIsBounded(v) 64483 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 64484 for { 64485 t := v.Type 64486 _ = v.Args[1] 64487 x := v.Args[0] 64488 y := v.Args[1] 64489 if !(!shiftIsBounded(v)) { 64490 break 64491 } 64492 v.reset(OpAMD64ANDL) 64493 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 64494 v0.AddArg(x) 64495 v0.AddArg(y) 64496 v.AddArg(v0) 64497 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64498 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 64499 v2.AuxInt = 8 64500 v2.AddArg(y) 64501 v1.AddArg(v2) 64502 v.AddArg(v1) 64503 return true 64504 } 64505 // match: (Rsh8Ux32 x y) 64506 // cond: shiftIsBounded(v) 64507 // result: (SHRB x y) 64508 for { 64509 _ = v.Args[1] 64510 x := v.Args[0] 64511 y := v.Args[1] 64512 if !(shiftIsBounded(v)) { 64513 break 64514 } 64515 v.reset(OpAMD64SHRB) 64516 v.AddArg(x) 64517 v.AddArg(y) 64518 return true 64519 } 64520 return false 64521 } 64522 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 64523 b := v.Block 64524 _ = b 64525 // match: (Rsh8Ux64 <t> x y) 64526 // cond: !shiftIsBounded(v) 64527 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 64528 for { 64529 t := v.Type 64530 _ = v.Args[1] 64531 x := v.Args[0] 64532 y := v.Args[1] 64533 if !(!shiftIsBounded(v)) { 64534 break 64535 } 64536 v.reset(OpAMD64ANDL) 64537 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 64538 v0.AddArg(x) 64539 v0.AddArg(y) 64540 v.AddArg(v0) 64541 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64542 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 64543 v2.AuxInt = 8 64544 v2.AddArg(y) 64545 v1.AddArg(v2) 64546 v.AddArg(v1) 64547 return true 64548 } 64549 // match: (Rsh8Ux64 x y) 64550 // cond: shiftIsBounded(v) 64551 // result: (SHRB x y) 64552 for { 64553 _ = v.Args[1] 64554 x := v.Args[0] 64555 y := v.Args[1] 64556 if !(shiftIsBounded(v)) { 64557 break 64558 } 64559 v.reset(OpAMD64SHRB) 64560 v.AddArg(x) 64561 v.AddArg(y) 64562 return true 64563 } 64564 return false 64565 } 64566 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 64567 b := v.Block 64568 _ = b 64569 // match: (Rsh8Ux8 <t> x y) 64570 // cond: !shiftIsBounded(v) 64571 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 64572 for { 64573 t := v.Type 64574 _ = v.Args[1] 64575 x := v.Args[0] 64576 y := v.Args[1] 64577 if !(!shiftIsBounded(v)) { 64578 break 64579 } 64580 v.reset(OpAMD64ANDL) 64581 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 64582 v0.AddArg(x) 64583 v0.AddArg(y) 64584 v.AddArg(v0) 64585 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 64586 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 64587 v2.AuxInt = 8 64588 v2.AddArg(y) 64589 v1.AddArg(v2) 64590 v.AddArg(v1) 64591 return true 64592 } 64593 // match: (Rsh8Ux8 x y) 64594 // cond: shiftIsBounded(v) 64595 // result: (SHRB x y) 64596 for { 64597 _ = v.Args[1] 64598 x := v.Args[0] 64599 y := v.Args[1] 64600 if !(shiftIsBounded(v)) { 64601 break 64602 } 64603 v.reset(OpAMD64SHRB) 64604 v.AddArg(x) 64605 v.AddArg(y) 64606 return true 64607 } 64608 return false 64609 } 64610 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 64611 b := v.Block 64612 _ = b 64613 // match: (Rsh8x16 <t> x y) 64614 // cond: !shiftIsBounded(v) 64615 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 64616 for { 64617 t := v.Type 64618 _ = v.Args[1] 64619 x := v.Args[0] 64620 y := v.Args[1] 64621 if !(!shiftIsBounded(v)) { 64622 break 64623 } 64624 v.reset(OpAMD64SARB) 64625 v.Type = t 64626 v.AddArg(x) 64627 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64628 v0.AddArg(y) 64629 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64630 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64631 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 64632 v3.AuxInt = 8 64633 v3.AddArg(y) 64634 v2.AddArg(v3) 64635 v1.AddArg(v2) 64636 v0.AddArg(v1) 64637 v.AddArg(v0) 64638 return true 64639 } 64640 // match: (Rsh8x16 x y) 64641 // cond: shiftIsBounded(v) 64642 // result: (SARB x y) 64643 for { 64644 _ = v.Args[1] 64645 x := v.Args[0] 64646 y := v.Args[1] 64647 if !(shiftIsBounded(v)) { 64648 break 64649 } 64650 v.reset(OpAMD64SARB) 64651 v.AddArg(x) 64652 v.AddArg(y) 64653 return true 64654 } 64655 return false 64656 } 64657 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 64658 b := v.Block 64659 _ = b 64660 // match: (Rsh8x32 <t> x y) 64661 // cond: !shiftIsBounded(v) 64662 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 64663 for { 64664 t := v.Type 64665 _ = v.Args[1] 64666 x := v.Args[0] 64667 y := v.Args[1] 64668 if !(!shiftIsBounded(v)) { 64669 break 64670 } 64671 v.reset(OpAMD64SARB) 64672 v.Type = t 64673 v.AddArg(x) 64674 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64675 v0.AddArg(y) 64676 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64677 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64678 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 64679 v3.AuxInt = 8 64680 v3.AddArg(y) 64681 v2.AddArg(v3) 64682 v1.AddArg(v2) 64683 v0.AddArg(v1) 64684 v.AddArg(v0) 64685 return true 64686 } 64687 // match: (Rsh8x32 x y) 64688 // cond: shiftIsBounded(v) 64689 // result: (SARB x y) 64690 for { 64691 _ = v.Args[1] 64692 x := v.Args[0] 64693 y := v.Args[1] 64694 if !(shiftIsBounded(v)) { 64695 break 64696 } 64697 v.reset(OpAMD64SARB) 64698 v.AddArg(x) 64699 v.AddArg(y) 64700 return true 64701 } 64702 return false 64703 } 64704 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 64705 b := v.Block 64706 _ = b 64707 // match: (Rsh8x64 <t> x y) 64708 // cond: !shiftIsBounded(v) 64709 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 64710 for { 64711 t := v.Type 64712 _ = v.Args[1] 64713 x := v.Args[0] 64714 y := v.Args[1] 64715 if !(!shiftIsBounded(v)) { 64716 break 64717 } 64718 v.reset(OpAMD64SARB) 64719 v.Type = t 64720 v.AddArg(x) 64721 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 64722 v0.AddArg(y) 64723 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 64724 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 64725 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 64726 v3.AuxInt = 8 64727 v3.AddArg(y) 64728 v2.AddArg(v3) 64729 v1.AddArg(v2) 64730 v0.AddArg(v1) 64731 v.AddArg(v0) 64732 return true 64733 } 64734 // match: (Rsh8x64 x y) 64735 // cond: shiftIsBounded(v) 64736 // result: (SARB x y) 64737 for { 64738 _ = v.Args[1] 64739 x := v.Args[0] 64740 y := v.Args[1] 64741 if !(shiftIsBounded(v)) { 64742 break 64743 } 64744 v.reset(OpAMD64SARB) 64745 v.AddArg(x) 64746 v.AddArg(y) 64747 return true 64748 } 64749 return false 64750 } 64751 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 64752 b := v.Block 64753 _ = b 64754 // match: (Rsh8x8 <t> x y) 64755 // cond: !shiftIsBounded(v) 64756 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 64757 for { 64758 t := v.Type 64759 _ = v.Args[1] 64760 x := v.Args[0] 64761 y := v.Args[1] 64762 if !(!shiftIsBounded(v)) { 64763 break 64764 } 64765 v.reset(OpAMD64SARB) 64766 v.Type = t 64767 v.AddArg(x) 64768 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 64769 v0.AddArg(y) 64770 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 64771 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 64772 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 64773 v3.AuxInt = 8 64774 v3.AddArg(y) 64775 v2.AddArg(v3) 64776 v1.AddArg(v2) 64777 v0.AddArg(v1) 64778 v.AddArg(v0) 64779 return true 64780 } 64781 // match: (Rsh8x8 x y) 64782 // cond: shiftIsBounded(v) 64783 // result: (SARB x y) 64784 for { 64785 _ = v.Args[1] 64786 x := v.Args[0] 64787 y := v.Args[1] 64788 if !(shiftIsBounded(v)) { 64789 break 64790 } 64791 v.reset(OpAMD64SARB) 64792 v.AddArg(x) 64793 v.AddArg(y) 64794 return true 64795 } 64796 return false 64797 } 64798 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 64799 b := v.Block 64800 _ = b 64801 typ := &b.Func.Config.Types 64802 _ = typ 64803 // match: (Select0 (Mul64uover x y)) 64804 // cond: 64805 // result: (Select0 <typ.UInt64> (MULQU x y)) 64806 for { 64807 v_0 := v.Args[0] 64808 if v_0.Op != OpMul64uover { 64809 break 64810 } 64811 _ = v_0.Args[1] 64812 x := v_0.Args[0] 64813 y := v_0.Args[1] 64814 v.reset(OpSelect0) 64815 v.Type = typ.UInt64 64816 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) 64817 v0.AddArg(x) 64818 v0.AddArg(y) 64819 v.AddArg(v0) 64820 return true 64821 } 64822 // match: (Select0 (Mul32uover x y)) 64823 // cond: 64824 // result: (Select0 <typ.UInt32> (MULLU x y)) 64825 for { 64826 v_0 := v.Args[0] 64827 if v_0.Op != OpMul32uover { 64828 break 64829 } 64830 _ = v_0.Args[1] 64831 x := v_0.Args[0] 64832 y := v_0.Args[1] 64833 v.reset(OpSelect0) 64834 v.Type = typ.UInt32 64835 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) 64836 v0.AddArg(x) 64837 v0.AddArg(y) 64838 v.AddArg(v0) 64839 return true 64840 } 64841 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 64842 // cond: 64843 // result: (ADDL val (Select0 <t> tuple)) 64844 for { 64845 t := v.Type 64846 v_0 := v.Args[0] 64847 if v_0.Op != OpAMD64AddTupleFirst32 { 64848 break 64849 } 64850 _ = v_0.Args[1] 64851 val := v_0.Args[0] 64852 tuple := v_0.Args[1] 64853 v.reset(OpAMD64ADDL) 64854 v.AddArg(val) 64855 v0 := b.NewValue0(v.Pos, OpSelect0, t) 64856 v0.AddArg(tuple) 64857 v.AddArg(v0) 64858 return true 64859 } 64860 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 64861 // cond: 64862 // result: (ADDQ val (Select0 <t> tuple)) 64863 for { 64864 t := v.Type 64865 v_0 := v.Args[0] 64866 if v_0.Op != OpAMD64AddTupleFirst64 { 64867 break 64868 } 64869 _ = v_0.Args[1] 64870 val := v_0.Args[0] 64871 tuple := v_0.Args[1] 64872 v.reset(OpAMD64ADDQ) 64873 v.AddArg(val) 64874 v0 := b.NewValue0(v.Pos, OpSelect0, t) 64875 v0.AddArg(tuple) 64876 v.AddArg(v0) 64877 return true 64878 } 64879 return false 64880 } 64881 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 64882 b := v.Block 64883 _ = b 64884 typ := &b.Func.Config.Types 64885 _ = typ 64886 // match: (Select1 (Mul64uover x y)) 64887 // cond: 64888 // result: (SETO (Select1 <types.TypeFlags> (MULQU x y))) 64889 for { 64890 v_0 := v.Args[0] 64891 if v_0.Op != OpMul64uover { 64892 break 64893 } 64894 _ = v_0.Args[1] 64895 x := v_0.Args[0] 64896 y := v_0.Args[1] 64897 v.reset(OpAMD64SETO) 64898 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 64899 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) 64900 v1.AddArg(x) 64901 v1.AddArg(y) 64902 v0.AddArg(v1) 64903 v.AddArg(v0) 64904 return true 64905 } 64906 // match: (Select1 (Mul32uover x y)) 64907 // cond: 64908 // result: (SETO (Select1 <types.TypeFlags> (MULLU x y))) 64909 for { 64910 v_0 := v.Args[0] 64911 if v_0.Op != OpMul32uover { 64912 break 64913 } 64914 _ = v_0.Args[1] 64915 x := v_0.Args[0] 64916 y := v_0.Args[1] 64917 v.reset(OpAMD64SETO) 64918 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 64919 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) 64920 v1.AddArg(x) 64921 v1.AddArg(y) 64922 v0.AddArg(v1) 64923 v.AddArg(v0) 64924 return true 64925 } 64926 // match: (Select1 (AddTupleFirst32 _ tuple)) 64927 // cond: 64928 // result: (Select1 tuple) 64929 for { 64930 v_0 := v.Args[0] 64931 if v_0.Op != OpAMD64AddTupleFirst32 { 64932 break 64933 } 64934 _ = v_0.Args[1] 64935 tuple := v_0.Args[1] 64936 v.reset(OpSelect1) 64937 v.AddArg(tuple) 64938 return true 64939 } 64940 // match: (Select1 (AddTupleFirst64 _ tuple)) 64941 // cond: 64942 // result: (Select1 tuple) 64943 for { 64944 v_0 := v.Args[0] 64945 if v_0.Op != OpAMD64AddTupleFirst64 { 64946 break 64947 } 64948 _ = v_0.Args[1] 64949 tuple := v_0.Args[1] 64950 v.reset(OpSelect1) 64951 v.AddArg(tuple) 64952 return true 64953 } 64954 return false 64955 } 64956 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 64957 // match: (SignExt16to32 x) 64958 // cond: 64959 // result: (MOVWQSX x) 64960 for { 64961 x := v.Args[0] 64962 v.reset(OpAMD64MOVWQSX) 64963 v.AddArg(x) 64964 return true 64965 } 64966 } 64967 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 64968 // match: (SignExt16to64 x) 64969 // cond: 64970 // result: (MOVWQSX x) 64971 for { 64972 x := v.Args[0] 64973 v.reset(OpAMD64MOVWQSX) 64974 v.AddArg(x) 64975 return true 64976 } 64977 } 64978 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 64979 // match: (SignExt32to64 x) 64980 // cond: 64981 // result: (MOVLQSX x) 64982 for { 64983 x := v.Args[0] 64984 v.reset(OpAMD64MOVLQSX) 64985 v.AddArg(x) 64986 return true 64987 } 64988 } 64989 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 64990 // match: (SignExt8to16 x) 64991 // cond: 64992 // result: (MOVBQSX x) 64993 for { 64994 x := v.Args[0] 64995 v.reset(OpAMD64MOVBQSX) 64996 v.AddArg(x) 64997 return true 64998 } 64999 } 65000 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 65001 // match: (SignExt8to32 x) 65002 // cond: 65003 // result: (MOVBQSX x) 65004 for { 65005 x := v.Args[0] 65006 v.reset(OpAMD64MOVBQSX) 65007 v.AddArg(x) 65008 return true 65009 } 65010 } 65011 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 65012 // match: (SignExt8to64 x) 65013 // cond: 65014 // result: (MOVBQSX x) 65015 for { 65016 x := v.Args[0] 65017 v.reset(OpAMD64MOVBQSX) 65018 v.AddArg(x) 65019 return true 65020 } 65021 } 65022 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 65023 b := v.Block 65024 _ = b 65025 // match: (Slicemask <t> x) 65026 // cond: 65027 // result: (SARQconst (NEGQ <t> x) [63]) 65028 for { 65029 t := v.Type 65030 x := v.Args[0] 65031 v.reset(OpAMD64SARQconst) 65032 v.AuxInt = 63 65033 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 65034 v0.AddArg(x) 65035 v.AddArg(v0) 65036 return true 65037 } 65038 } 65039 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 65040 // match: (Sqrt x) 65041 // cond: 65042 // result: (SQRTSD x) 65043 for { 65044 x := v.Args[0] 65045 v.reset(OpAMD64SQRTSD) 65046 v.AddArg(x) 65047 return true 65048 } 65049 } 65050 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 65051 // match: (StaticCall [argwid] {target} mem) 65052 // cond: 65053 // result: (CALLstatic [argwid] {target} mem) 65054 for { 65055 argwid := v.AuxInt 65056 target := v.Aux 65057 mem := v.Args[0] 65058 v.reset(OpAMD64CALLstatic) 65059 v.AuxInt = argwid 65060 v.Aux = target 65061 v.AddArg(mem) 65062 return true 65063 } 65064 } 65065 func rewriteValueAMD64_OpStore_0(v *Value) bool { 65066 // match: (Store {t} ptr val mem) 65067 // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) 65068 // result: (MOVSDstore ptr val mem) 65069 for { 65070 t := v.Aux 65071 _ = v.Args[2] 65072 ptr := v.Args[0] 65073 val := v.Args[1] 65074 mem := v.Args[2] 65075 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { 65076 break 65077 } 65078 v.reset(OpAMD64MOVSDstore) 65079 v.AddArg(ptr) 65080 v.AddArg(val) 65081 v.AddArg(mem) 65082 return true 65083 } 65084 // match: (Store {t} ptr val mem) 65085 // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) 65086 // result: (MOVSSstore ptr val mem) 65087 for { 65088 t := v.Aux 65089 _ = v.Args[2] 65090 ptr := v.Args[0] 65091 val := v.Args[1] 65092 mem := v.Args[2] 65093 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { 65094 break 65095 } 65096 v.reset(OpAMD64MOVSSstore) 65097 v.AddArg(ptr) 65098 v.AddArg(val) 65099 v.AddArg(mem) 65100 return true 65101 } 65102 // match: (Store {t} ptr val mem) 65103 // cond: t.(*types.Type).Size() == 8 65104 // result: (MOVQstore ptr val mem) 65105 for { 65106 t := v.Aux 65107 _ = v.Args[2] 65108 ptr := v.Args[0] 65109 val := v.Args[1] 65110 mem := v.Args[2] 65111 if !(t.(*types.Type).Size() == 8) { 65112 break 65113 } 65114 v.reset(OpAMD64MOVQstore) 65115 v.AddArg(ptr) 65116 v.AddArg(val) 65117 v.AddArg(mem) 65118 return true 65119 } 65120 // match: (Store {t} ptr val mem) 65121 // cond: t.(*types.Type).Size() == 4 65122 // result: (MOVLstore ptr val mem) 65123 for { 65124 t := v.Aux 65125 _ = v.Args[2] 65126 ptr := v.Args[0] 65127 val := v.Args[1] 65128 mem := v.Args[2] 65129 if !(t.(*types.Type).Size() == 4) { 65130 break 65131 } 65132 v.reset(OpAMD64MOVLstore) 65133 v.AddArg(ptr) 65134 v.AddArg(val) 65135 v.AddArg(mem) 65136 return true 65137 } 65138 // match: (Store {t} ptr val mem) 65139 // cond: t.(*types.Type).Size() == 2 65140 // result: (MOVWstore ptr val mem) 65141 for { 65142 t := v.Aux 65143 _ = v.Args[2] 65144 ptr := v.Args[0] 65145 val := v.Args[1] 65146 mem := v.Args[2] 65147 if !(t.(*types.Type).Size() == 2) { 65148 break 65149 } 65150 v.reset(OpAMD64MOVWstore) 65151 v.AddArg(ptr) 65152 v.AddArg(val) 65153 v.AddArg(mem) 65154 return true 65155 } 65156 // match: (Store {t} ptr val mem) 65157 // cond: t.(*types.Type).Size() == 1 65158 // result: (MOVBstore ptr val mem) 65159 for { 65160 t := v.Aux 65161 _ = v.Args[2] 65162 ptr := v.Args[0] 65163 val := v.Args[1] 65164 mem := v.Args[2] 65165 if !(t.(*types.Type).Size() == 1) { 65166 break 65167 } 65168 v.reset(OpAMD64MOVBstore) 65169 v.AddArg(ptr) 65170 v.AddArg(val) 65171 v.AddArg(mem) 65172 return true 65173 } 65174 return false 65175 } 65176 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 65177 // match: (Sub16 x y) 65178 // cond: 65179 // result: (SUBL x y) 65180 for { 65181 _ = v.Args[1] 65182 x := v.Args[0] 65183 y := v.Args[1] 65184 v.reset(OpAMD64SUBL) 65185 v.AddArg(x) 65186 v.AddArg(y) 65187 return true 65188 } 65189 } 65190 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 65191 // match: (Sub32 x y) 65192 // cond: 65193 // result: (SUBL x y) 65194 for { 65195 _ = v.Args[1] 65196 x := v.Args[0] 65197 y := v.Args[1] 65198 v.reset(OpAMD64SUBL) 65199 v.AddArg(x) 65200 v.AddArg(y) 65201 return true 65202 } 65203 } 65204 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 65205 // match: (Sub32F x y) 65206 // cond: 65207 // result: (SUBSS x y) 65208 for { 65209 _ = v.Args[1] 65210 x := v.Args[0] 65211 y := v.Args[1] 65212 v.reset(OpAMD64SUBSS) 65213 v.AddArg(x) 65214 v.AddArg(y) 65215 return true 65216 } 65217 } 65218 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 65219 // match: (Sub64 x y) 65220 // cond: 65221 // result: (SUBQ x y) 65222 for { 65223 _ = v.Args[1] 65224 x := v.Args[0] 65225 y := v.Args[1] 65226 v.reset(OpAMD64SUBQ) 65227 v.AddArg(x) 65228 v.AddArg(y) 65229 return true 65230 } 65231 } 65232 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 65233 // match: (Sub64F x y) 65234 // cond: 65235 // result: (SUBSD x y) 65236 for { 65237 _ = v.Args[1] 65238 x := v.Args[0] 65239 y := v.Args[1] 65240 v.reset(OpAMD64SUBSD) 65241 v.AddArg(x) 65242 v.AddArg(y) 65243 return true 65244 } 65245 } 65246 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 65247 // match: (Sub8 x y) 65248 // cond: 65249 // result: (SUBL x y) 65250 for { 65251 _ = v.Args[1] 65252 x := v.Args[0] 65253 y := v.Args[1] 65254 v.reset(OpAMD64SUBL) 65255 v.AddArg(x) 65256 v.AddArg(y) 65257 return true 65258 } 65259 } 65260 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 65261 b := v.Block 65262 _ = b 65263 config := b.Func.Config 65264 _ = config 65265 // match: (SubPtr x y) 65266 // cond: config.PtrSize == 8 65267 // result: (SUBQ x y) 65268 for { 65269 _ = v.Args[1] 65270 x := v.Args[0] 65271 y := v.Args[1] 65272 if !(config.PtrSize == 8) { 65273 break 65274 } 65275 v.reset(OpAMD64SUBQ) 65276 v.AddArg(x) 65277 v.AddArg(y) 65278 return true 65279 } 65280 // match: (SubPtr x y) 65281 // cond: config.PtrSize == 4 65282 // result: (SUBL x y) 65283 for { 65284 _ = v.Args[1] 65285 x := v.Args[0] 65286 y := v.Args[1] 65287 if !(config.PtrSize == 4) { 65288 break 65289 } 65290 v.reset(OpAMD64SUBL) 65291 v.AddArg(x) 65292 v.AddArg(y) 65293 return true 65294 } 65295 return false 65296 } 65297 func rewriteValueAMD64_OpTrunc_0(v *Value) bool { 65298 // match: (Trunc x) 65299 // cond: 65300 // result: (ROUNDSD [3] x) 65301 for { 65302 x := v.Args[0] 65303 v.reset(OpAMD64ROUNDSD) 65304 v.AuxInt = 3 65305 v.AddArg(x) 65306 return true 65307 } 65308 } 65309 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 65310 // match: (Trunc16to8 x) 65311 // cond: 65312 // result: x 65313 for { 65314 x := v.Args[0] 65315 v.reset(OpCopy) 65316 v.Type = x.Type 65317 v.AddArg(x) 65318 return true 65319 } 65320 } 65321 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 65322 // match: (Trunc32to16 x) 65323 // cond: 65324 // result: x 65325 for { 65326 x := v.Args[0] 65327 v.reset(OpCopy) 65328 v.Type = x.Type 65329 v.AddArg(x) 65330 return true 65331 } 65332 } 65333 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 65334 // match: (Trunc32to8 x) 65335 // cond: 65336 // result: x 65337 for { 65338 x := v.Args[0] 65339 v.reset(OpCopy) 65340 v.Type = x.Type 65341 v.AddArg(x) 65342 return true 65343 } 65344 } 65345 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 65346 // match: (Trunc64to16 x) 65347 // cond: 65348 // result: x 65349 for { 65350 x := v.Args[0] 65351 v.reset(OpCopy) 65352 v.Type = x.Type 65353 v.AddArg(x) 65354 return true 65355 } 65356 } 65357 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 65358 // match: (Trunc64to32 x) 65359 // cond: 65360 // result: x 65361 for { 65362 x := v.Args[0] 65363 v.reset(OpCopy) 65364 v.Type = x.Type 65365 v.AddArg(x) 65366 return true 65367 } 65368 } 65369 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 65370 // match: (Trunc64to8 x) 65371 // cond: 65372 // result: x 65373 for { 65374 x := v.Args[0] 65375 v.reset(OpCopy) 65376 v.Type = x.Type 65377 v.AddArg(x) 65378 return true 65379 } 65380 } 65381 func rewriteValueAMD64_OpWB_0(v *Value) bool { 65382 // match: (WB {fn} destptr srcptr mem) 65383 // cond: 65384 // result: (LoweredWB {fn} destptr srcptr mem) 65385 for { 65386 fn := v.Aux 65387 _ = v.Args[2] 65388 destptr := v.Args[0] 65389 srcptr := v.Args[1] 65390 mem := v.Args[2] 65391 v.reset(OpAMD64LoweredWB) 65392 v.Aux = fn 65393 v.AddArg(destptr) 65394 v.AddArg(srcptr) 65395 v.AddArg(mem) 65396 return true 65397 } 65398 } 65399 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 65400 // match: (Xor16 x y) 65401 // cond: 65402 // result: (XORL x y) 65403 for { 65404 _ = v.Args[1] 65405 x := v.Args[0] 65406 y := v.Args[1] 65407 v.reset(OpAMD64XORL) 65408 v.AddArg(x) 65409 v.AddArg(y) 65410 return true 65411 } 65412 } 65413 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 65414 // match: (Xor32 x y) 65415 // cond: 65416 // result: (XORL x y) 65417 for { 65418 _ = v.Args[1] 65419 x := v.Args[0] 65420 y := v.Args[1] 65421 v.reset(OpAMD64XORL) 65422 v.AddArg(x) 65423 v.AddArg(y) 65424 return true 65425 } 65426 } 65427 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 65428 // match: (Xor64 x y) 65429 // cond: 65430 // result: (XORQ x y) 65431 for { 65432 _ = v.Args[1] 65433 x := v.Args[0] 65434 y := v.Args[1] 65435 v.reset(OpAMD64XORQ) 65436 v.AddArg(x) 65437 v.AddArg(y) 65438 return true 65439 } 65440 } 65441 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 65442 // match: (Xor8 x y) 65443 // cond: 65444 // result: (XORL x y) 65445 for { 65446 _ = v.Args[1] 65447 x := v.Args[0] 65448 y := v.Args[1] 65449 v.reset(OpAMD64XORL) 65450 v.AddArg(x) 65451 v.AddArg(y) 65452 return true 65453 } 65454 } 65455 func rewriteValueAMD64_OpZero_0(v *Value) bool { 65456 b := v.Block 65457 _ = b 65458 config := b.Func.Config 65459 _ = config 65460 // match: (Zero [0] _ mem) 65461 // cond: 65462 // result: mem 65463 for { 65464 if v.AuxInt != 0 { 65465 break 65466 } 65467 _ = v.Args[1] 65468 mem := v.Args[1] 65469 v.reset(OpCopy) 65470 v.Type = mem.Type 65471 v.AddArg(mem) 65472 return true 65473 } 65474 // match: (Zero [1] destptr mem) 65475 // cond: 65476 // result: (MOVBstoreconst [0] destptr mem) 65477 for { 65478 if v.AuxInt != 1 { 65479 break 65480 } 65481 _ = v.Args[1] 65482 destptr := v.Args[0] 65483 mem := v.Args[1] 65484 v.reset(OpAMD64MOVBstoreconst) 65485 v.AuxInt = 0 65486 v.AddArg(destptr) 65487 v.AddArg(mem) 65488 return true 65489 } 65490 // match: (Zero [2] destptr mem) 65491 // cond: 65492 // result: (MOVWstoreconst [0] destptr mem) 65493 for { 65494 if v.AuxInt != 2 { 65495 break 65496 } 65497 _ = v.Args[1] 65498 destptr := v.Args[0] 65499 mem := v.Args[1] 65500 v.reset(OpAMD64MOVWstoreconst) 65501 v.AuxInt = 0 65502 v.AddArg(destptr) 65503 v.AddArg(mem) 65504 return true 65505 } 65506 // match: (Zero [4] destptr mem) 65507 // cond: 65508 // result: (MOVLstoreconst [0] destptr mem) 65509 for { 65510 if v.AuxInt != 4 { 65511 break 65512 } 65513 _ = v.Args[1] 65514 destptr := v.Args[0] 65515 mem := v.Args[1] 65516 v.reset(OpAMD64MOVLstoreconst) 65517 v.AuxInt = 0 65518 v.AddArg(destptr) 65519 v.AddArg(mem) 65520 return true 65521 } 65522 // match: (Zero [8] destptr mem) 65523 // cond: 65524 // result: (MOVQstoreconst [0] destptr mem) 65525 for { 65526 if v.AuxInt != 8 { 65527 break 65528 } 65529 _ = v.Args[1] 65530 destptr := v.Args[0] 65531 mem := v.Args[1] 65532 v.reset(OpAMD64MOVQstoreconst) 65533 v.AuxInt = 0 65534 v.AddArg(destptr) 65535 v.AddArg(mem) 65536 return true 65537 } 65538 // match: (Zero [3] destptr mem) 65539 // cond: 65540 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 65541 for { 65542 if v.AuxInt != 3 { 65543 break 65544 } 65545 _ = v.Args[1] 65546 destptr := v.Args[0] 65547 mem := v.Args[1] 65548 v.reset(OpAMD64MOVBstoreconst) 65549 v.AuxInt = makeValAndOff(0, 2) 65550 v.AddArg(destptr) 65551 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 65552 v0.AuxInt = 0 65553 v0.AddArg(destptr) 65554 v0.AddArg(mem) 65555 v.AddArg(v0) 65556 return true 65557 } 65558 // match: (Zero [5] destptr mem) 65559 // cond: 65560 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 65561 for { 65562 if v.AuxInt != 5 { 65563 break 65564 } 65565 _ = v.Args[1] 65566 destptr := v.Args[0] 65567 mem := v.Args[1] 65568 v.reset(OpAMD64MOVBstoreconst) 65569 v.AuxInt = makeValAndOff(0, 4) 65570 v.AddArg(destptr) 65571 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 65572 v0.AuxInt = 0 65573 v0.AddArg(destptr) 65574 v0.AddArg(mem) 65575 v.AddArg(v0) 65576 return true 65577 } 65578 // match: (Zero [6] destptr mem) 65579 // cond: 65580 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 65581 for { 65582 if v.AuxInt != 6 { 65583 break 65584 } 65585 _ = v.Args[1] 65586 destptr := v.Args[0] 65587 mem := v.Args[1] 65588 v.reset(OpAMD64MOVWstoreconst) 65589 v.AuxInt = makeValAndOff(0, 4) 65590 v.AddArg(destptr) 65591 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 65592 v0.AuxInt = 0 65593 v0.AddArg(destptr) 65594 v0.AddArg(mem) 65595 v.AddArg(v0) 65596 return true 65597 } 65598 // match: (Zero [7] destptr mem) 65599 // cond: 65600 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 65601 for { 65602 if v.AuxInt != 7 { 65603 break 65604 } 65605 _ = v.Args[1] 65606 destptr := v.Args[0] 65607 mem := v.Args[1] 65608 v.reset(OpAMD64MOVLstoreconst) 65609 v.AuxInt = makeValAndOff(0, 3) 65610 v.AddArg(destptr) 65611 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 65612 v0.AuxInt = 0 65613 v0.AddArg(destptr) 65614 v0.AddArg(mem) 65615 v.AddArg(v0) 65616 return true 65617 } 65618 // match: (Zero [s] destptr mem) 65619 // cond: s%8 != 0 && s > 8 && !config.useSSE 65620 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 65621 for { 65622 s := v.AuxInt 65623 _ = v.Args[1] 65624 destptr := v.Args[0] 65625 mem := v.Args[1] 65626 if !(s%8 != 0 && s > 8 && !config.useSSE) { 65627 break 65628 } 65629 v.reset(OpZero) 65630 v.AuxInt = s - s%8 65631 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 65632 v0.AuxInt = s % 8 65633 v0.AddArg(destptr) 65634 v.AddArg(v0) 65635 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 65636 v1.AuxInt = 0 65637 v1.AddArg(destptr) 65638 v1.AddArg(mem) 65639 v.AddArg(v1) 65640 return true 65641 } 65642 return false 65643 } 65644 func rewriteValueAMD64_OpZero_10(v *Value) bool { 65645 b := v.Block 65646 _ = b 65647 config := b.Func.Config 65648 _ = config 65649 // match: (Zero [16] destptr mem) 65650 // cond: !config.useSSE 65651 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 65652 for { 65653 if v.AuxInt != 16 { 65654 break 65655 } 65656 _ = v.Args[1] 65657 destptr := v.Args[0] 65658 mem := v.Args[1] 65659 if !(!config.useSSE) { 65660 break 65661 } 65662 v.reset(OpAMD64MOVQstoreconst) 65663 v.AuxInt = makeValAndOff(0, 8) 65664 v.AddArg(destptr) 65665 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 65666 v0.AuxInt = 0 65667 v0.AddArg(destptr) 65668 v0.AddArg(mem) 65669 v.AddArg(v0) 65670 return true 65671 } 65672 // match: (Zero [24] destptr mem) 65673 // cond: !config.useSSE 65674 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 65675 for { 65676 if v.AuxInt != 24 { 65677 break 65678 } 65679 _ = v.Args[1] 65680 destptr := v.Args[0] 65681 mem := v.Args[1] 65682 if !(!config.useSSE) { 65683 break 65684 } 65685 v.reset(OpAMD64MOVQstoreconst) 65686 v.AuxInt = makeValAndOff(0, 16) 65687 v.AddArg(destptr) 65688 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 65689 v0.AuxInt = makeValAndOff(0, 8) 65690 v0.AddArg(destptr) 65691 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 65692 v1.AuxInt = 0 65693 v1.AddArg(destptr) 65694 v1.AddArg(mem) 65695 v0.AddArg(v1) 65696 v.AddArg(v0) 65697 return true 65698 } 65699 // match: (Zero [32] destptr mem) 65700 // cond: !config.useSSE 65701 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 65702 for { 65703 if v.AuxInt != 32 { 65704 break 65705 } 65706 _ = v.Args[1] 65707 destptr := v.Args[0] 65708 mem := v.Args[1] 65709 if !(!config.useSSE) { 65710 break 65711 } 65712 v.reset(OpAMD64MOVQstoreconst) 65713 v.AuxInt = makeValAndOff(0, 24) 65714 v.AddArg(destptr) 65715 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 65716 v0.AuxInt = makeValAndOff(0, 16) 65717 v0.AddArg(destptr) 65718 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 65719 v1.AuxInt = makeValAndOff(0, 8) 65720 v1.AddArg(destptr) 65721 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 65722 v2.AuxInt = 0 65723 v2.AddArg(destptr) 65724 v2.AddArg(mem) 65725 v1.AddArg(v2) 65726 v0.AddArg(v1) 65727 v.AddArg(v0) 65728 return true 65729 } 65730 // match: (Zero [s] destptr mem) 65731 // cond: s > 8 && s < 16 && config.useSSE 65732 // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem)) 65733 for { 65734 s := v.AuxInt 65735 _ = v.Args[1] 65736 destptr := v.Args[0] 65737 mem := v.Args[1] 65738 if !(s > 8 && s < 16 && config.useSSE) { 65739 break 65740 } 65741 v.reset(OpAMD64MOVQstoreconst) 65742 v.AuxInt = makeValAndOff(0, s-8) 65743 v.AddArg(destptr) 65744 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 65745 v0.AuxInt = 0 65746 v0.AddArg(destptr) 65747 v0.AddArg(mem) 65748 v.AddArg(v0) 65749 return true 65750 } 65751 // match: (Zero [s] destptr mem) 65752 // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE 65753 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) 65754 for { 65755 s := v.AuxInt 65756 _ = v.Args[1] 65757 destptr := v.Args[0] 65758 mem := v.Args[1] 65759 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { 65760 break 65761 } 65762 v.reset(OpZero) 65763 v.AuxInt = s - s%16 65764 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 65765 v0.AuxInt = s % 16 65766 v0.AddArg(destptr) 65767 v.AddArg(v0) 65768 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 65769 v1.AddArg(destptr) 65770 v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65771 v2.AuxInt = 0 65772 v1.AddArg(v2) 65773 v1.AddArg(mem) 65774 v.AddArg(v1) 65775 return true 65776 } 65777 // match: (Zero [s] destptr mem) 65778 // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE 65779 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [0] destptr mem)) 65780 for { 65781 s := v.AuxInt 65782 _ = v.Args[1] 65783 destptr := v.Args[0] 65784 mem := v.Args[1] 65785 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { 65786 break 65787 } 65788 v.reset(OpZero) 65789 v.AuxInt = s - s%16 65790 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 65791 v0.AuxInt = s % 16 65792 v0.AddArg(destptr) 65793 v.AddArg(v0) 65794 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 65795 v1.AuxInt = 0 65796 v1.AddArg(destptr) 65797 v1.AddArg(mem) 65798 v.AddArg(v1) 65799 return true 65800 } 65801 // match: (Zero [16] destptr mem) 65802 // cond: config.useSSE 65803 // result: (MOVOstore destptr (MOVOconst [0]) mem) 65804 for { 65805 if v.AuxInt != 16 { 65806 break 65807 } 65808 _ = v.Args[1] 65809 destptr := v.Args[0] 65810 mem := v.Args[1] 65811 if !(config.useSSE) { 65812 break 65813 } 65814 v.reset(OpAMD64MOVOstore) 65815 v.AddArg(destptr) 65816 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65817 v0.AuxInt = 0 65818 v.AddArg(v0) 65819 v.AddArg(mem) 65820 return true 65821 } 65822 // match: (Zero [32] destptr mem) 65823 // cond: config.useSSE 65824 // result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)) 65825 for { 65826 if v.AuxInt != 32 { 65827 break 65828 } 65829 _ = v.Args[1] 65830 destptr := v.Args[0] 65831 mem := v.Args[1] 65832 if !(config.useSSE) { 65833 break 65834 } 65835 v.reset(OpAMD64MOVOstore) 65836 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 65837 v0.AuxInt = 16 65838 v0.AddArg(destptr) 65839 v.AddArg(v0) 65840 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65841 v1.AuxInt = 0 65842 v.AddArg(v1) 65843 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 65844 v2.AddArg(destptr) 65845 v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65846 v3.AuxInt = 0 65847 v2.AddArg(v3) 65848 v2.AddArg(mem) 65849 v.AddArg(v2) 65850 return true 65851 } 65852 // match: (Zero [48] destptr mem) 65853 // cond: config.useSSE 65854 // result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))) 65855 for { 65856 if v.AuxInt != 48 { 65857 break 65858 } 65859 _ = v.Args[1] 65860 destptr := v.Args[0] 65861 mem := v.Args[1] 65862 if !(config.useSSE) { 65863 break 65864 } 65865 v.reset(OpAMD64MOVOstore) 65866 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 65867 v0.AuxInt = 32 65868 v0.AddArg(destptr) 65869 v.AddArg(v0) 65870 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65871 v1.AuxInt = 0 65872 v.AddArg(v1) 65873 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 65874 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 65875 v3.AuxInt = 16 65876 v3.AddArg(destptr) 65877 v2.AddArg(v3) 65878 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65879 v4.AuxInt = 0 65880 v2.AddArg(v4) 65881 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 65882 v5.AddArg(destptr) 65883 v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65884 v6.AuxInt = 0 65885 v5.AddArg(v6) 65886 v5.AddArg(mem) 65887 v2.AddArg(v5) 65888 v.AddArg(v2) 65889 return true 65890 } 65891 // match: (Zero [64] destptr mem) 65892 // cond: config.useSSE 65893 // result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))) 65894 for { 65895 if v.AuxInt != 64 { 65896 break 65897 } 65898 _ = v.Args[1] 65899 destptr := v.Args[0] 65900 mem := v.Args[1] 65901 if !(config.useSSE) { 65902 break 65903 } 65904 v.reset(OpAMD64MOVOstore) 65905 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 65906 v0.AuxInt = 48 65907 v0.AddArg(destptr) 65908 v.AddArg(v0) 65909 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65910 v1.AuxInt = 0 65911 v.AddArg(v1) 65912 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 65913 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 65914 v3.AuxInt = 32 65915 v3.AddArg(destptr) 65916 v2.AddArg(v3) 65917 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65918 v4.AuxInt = 0 65919 v2.AddArg(v4) 65920 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 65921 v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 65922 v6.AuxInt = 16 65923 v6.AddArg(destptr) 65924 v5.AddArg(v6) 65925 v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65926 v7.AuxInt = 0 65927 v5.AddArg(v7) 65928 v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 65929 v8.AddArg(destptr) 65930 v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65931 v9.AuxInt = 0 65932 v8.AddArg(v9) 65933 v8.AddArg(mem) 65934 v5.AddArg(v8) 65935 v2.AddArg(v5) 65936 v.AddArg(v2) 65937 return true 65938 } 65939 return false 65940 } 65941 func rewriteValueAMD64_OpZero_20(v *Value) bool { 65942 b := v.Block 65943 _ = b 65944 config := b.Func.Config 65945 _ = config 65946 typ := &b.Func.Config.Types 65947 _ = typ 65948 // match: (Zero [s] destptr mem) 65949 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice 65950 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 65951 for { 65952 s := v.AuxInt 65953 _ = v.Args[1] 65954 destptr := v.Args[0] 65955 mem := v.Args[1] 65956 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 65957 break 65958 } 65959 v.reset(OpAMD64DUFFZERO) 65960 v.AuxInt = s 65961 v.AddArg(destptr) 65962 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 65963 v0.AuxInt = 0 65964 v.AddArg(v0) 65965 v.AddArg(mem) 65966 return true 65967 } 65968 // match: (Zero [s] destptr mem) 65969 // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 65970 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 65971 for { 65972 s := v.AuxInt 65973 _ = v.Args[1] 65974 destptr := v.Args[0] 65975 mem := v.Args[1] 65976 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { 65977 break 65978 } 65979 v.reset(OpAMD64REPSTOSQ) 65980 v.AddArg(destptr) 65981 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 65982 v0.AuxInt = s / 8 65983 v.AddArg(v0) 65984 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 65985 v1.AuxInt = 0 65986 v.AddArg(v1) 65987 v.AddArg(mem) 65988 return true 65989 } 65990 return false 65991 } 65992 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 65993 // match: (ZeroExt16to32 x) 65994 // cond: 65995 // result: (MOVWQZX x) 65996 for { 65997 x := v.Args[0] 65998 v.reset(OpAMD64MOVWQZX) 65999 v.AddArg(x) 66000 return true 66001 } 66002 } 66003 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 66004 // match: (ZeroExt16to64 x) 66005 // cond: 66006 // result: (MOVWQZX x) 66007 for { 66008 x := v.Args[0] 66009 v.reset(OpAMD64MOVWQZX) 66010 v.AddArg(x) 66011 return true 66012 } 66013 } 66014 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 66015 // match: (ZeroExt32to64 x) 66016 // cond: 66017 // result: (MOVLQZX x) 66018 for { 66019 x := v.Args[0] 66020 v.reset(OpAMD64MOVLQZX) 66021 v.AddArg(x) 66022 return true 66023 } 66024 } 66025 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 66026 // match: (ZeroExt8to16 x) 66027 // cond: 66028 // result: (MOVBQZX x) 66029 for { 66030 x := v.Args[0] 66031 v.reset(OpAMD64MOVBQZX) 66032 v.AddArg(x) 66033 return true 66034 } 66035 } 66036 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 66037 // match: (ZeroExt8to32 x) 66038 // cond: 66039 // result: (MOVBQZX x) 66040 for { 66041 x := v.Args[0] 66042 v.reset(OpAMD64MOVBQZX) 66043 v.AddArg(x) 66044 return true 66045 } 66046 } 66047 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 66048 // match: (ZeroExt8to64 x) 66049 // cond: 66050 // result: (MOVBQZX x) 66051 for { 66052 x := v.Args[0] 66053 v.reset(OpAMD64MOVBQZX) 66054 v.AddArg(x) 66055 return true 66056 } 66057 } 66058 func rewriteBlockAMD64(b *Block) bool { 66059 config := b.Func.Config 66060 _ = config 66061 fe := b.Func.fe 66062 _ = fe 66063 typ := &config.Types 66064 _ = typ 66065 switch b.Kind { 66066 case BlockAMD64EQ: 66067 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 66068 // cond: !config.nacl 66069 // result: (UGE (BTL x y)) 66070 for { 66071 v := b.Control 66072 if v.Op != OpAMD64TESTL { 66073 break 66074 } 66075 _ = v.Args[1] 66076 v_0 := v.Args[0] 66077 if v_0.Op != OpAMD64SHLL { 66078 break 66079 } 66080 _ = v_0.Args[1] 66081 v_0_0 := v_0.Args[0] 66082 if v_0_0.Op != OpAMD64MOVLconst { 66083 break 66084 } 66085 if v_0_0.AuxInt != 1 { 66086 break 66087 } 66088 x := v_0.Args[1] 66089 y := v.Args[1] 66090 if !(!config.nacl) { 66091 break 66092 } 66093 b.Kind = BlockAMD64UGE 66094 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 66095 v0.AddArg(x) 66096 v0.AddArg(y) 66097 b.SetControl(v0) 66098 b.Aux = nil 66099 return true 66100 } 66101 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 66102 // cond: !config.nacl 66103 // result: (UGE (BTL x y)) 66104 for { 66105 v := b.Control 66106 if v.Op != OpAMD64TESTL { 66107 break 66108 } 66109 _ = v.Args[1] 66110 y := v.Args[0] 66111 v_1 := v.Args[1] 66112 if v_1.Op != OpAMD64SHLL { 66113 break 66114 } 66115 _ = v_1.Args[1] 66116 v_1_0 := v_1.Args[0] 66117 if v_1_0.Op != OpAMD64MOVLconst { 66118 break 66119 } 66120 if v_1_0.AuxInt != 1 { 66121 break 66122 } 66123 x := v_1.Args[1] 66124 if !(!config.nacl) { 66125 break 66126 } 66127 b.Kind = BlockAMD64UGE 66128 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 66129 v0.AddArg(x) 66130 v0.AddArg(y) 66131 b.SetControl(v0) 66132 b.Aux = nil 66133 return true 66134 } 66135 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 66136 // cond: !config.nacl 66137 // result: (UGE (BTQ x y)) 66138 for { 66139 v := b.Control 66140 if v.Op != OpAMD64TESTQ { 66141 break 66142 } 66143 _ = v.Args[1] 66144 v_0 := v.Args[0] 66145 if v_0.Op != OpAMD64SHLQ { 66146 break 66147 } 66148 _ = v_0.Args[1] 66149 v_0_0 := v_0.Args[0] 66150 if v_0_0.Op != OpAMD64MOVQconst { 66151 break 66152 } 66153 if v_0_0.AuxInt != 1 { 66154 break 66155 } 66156 x := v_0.Args[1] 66157 y := v.Args[1] 66158 if !(!config.nacl) { 66159 break 66160 } 66161 b.Kind = BlockAMD64UGE 66162 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 66163 v0.AddArg(x) 66164 v0.AddArg(y) 66165 b.SetControl(v0) 66166 b.Aux = nil 66167 return true 66168 } 66169 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 66170 // cond: !config.nacl 66171 // result: (UGE (BTQ x y)) 66172 for { 66173 v := b.Control 66174 if v.Op != OpAMD64TESTQ { 66175 break 66176 } 66177 _ = v.Args[1] 66178 y := v.Args[0] 66179 v_1 := v.Args[1] 66180 if v_1.Op != OpAMD64SHLQ { 66181 break 66182 } 66183 _ = v_1.Args[1] 66184 v_1_0 := v_1.Args[0] 66185 if v_1_0.Op != OpAMD64MOVQconst { 66186 break 66187 } 66188 if v_1_0.AuxInt != 1 { 66189 break 66190 } 66191 x := v_1.Args[1] 66192 if !(!config.nacl) { 66193 break 66194 } 66195 b.Kind = BlockAMD64UGE 66196 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 66197 v0.AddArg(x) 66198 v0.AddArg(y) 66199 b.SetControl(v0) 66200 b.Aux = nil 66201 return true 66202 } 66203 // match: (EQ (TESTLconst [c] x)) 66204 // cond: isUint32PowerOfTwo(c) && !config.nacl 66205 // result: (UGE (BTLconst [log2uint32(c)] x)) 66206 for { 66207 v := b.Control 66208 if v.Op != OpAMD64TESTLconst { 66209 break 66210 } 66211 c := v.AuxInt 66212 x := v.Args[0] 66213 if !(isUint32PowerOfTwo(c) && !config.nacl) { 66214 break 66215 } 66216 b.Kind = BlockAMD64UGE 66217 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 66218 v0.AuxInt = log2uint32(c) 66219 v0.AddArg(x) 66220 b.SetControl(v0) 66221 b.Aux = nil 66222 return true 66223 } 66224 // match: (EQ (TESTQconst [c] x)) 66225 // cond: isUint64PowerOfTwo(c) && !config.nacl 66226 // result: (UGE (BTQconst [log2(c)] x)) 66227 for { 66228 v := b.Control 66229 if v.Op != OpAMD64TESTQconst { 66230 break 66231 } 66232 c := v.AuxInt 66233 x := v.Args[0] 66234 if !(isUint64PowerOfTwo(c) && !config.nacl) { 66235 break 66236 } 66237 b.Kind = BlockAMD64UGE 66238 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66239 v0.AuxInt = log2(c) 66240 v0.AddArg(x) 66241 b.SetControl(v0) 66242 b.Aux = nil 66243 return true 66244 } 66245 // match: (EQ (TESTQ (MOVQconst [c]) x)) 66246 // cond: isUint64PowerOfTwo(c) && !config.nacl 66247 // result: (UGE (BTQconst [log2(c)] x)) 66248 for { 66249 v := b.Control 66250 if v.Op != OpAMD64TESTQ { 66251 break 66252 } 66253 _ = v.Args[1] 66254 v_0 := v.Args[0] 66255 if v_0.Op != OpAMD64MOVQconst { 66256 break 66257 } 66258 c := v_0.AuxInt 66259 x := v.Args[1] 66260 if !(isUint64PowerOfTwo(c) && !config.nacl) { 66261 break 66262 } 66263 b.Kind = BlockAMD64UGE 66264 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66265 v0.AuxInt = log2(c) 66266 v0.AddArg(x) 66267 b.SetControl(v0) 66268 b.Aux = nil 66269 return true 66270 } 66271 // match: (EQ (TESTQ x (MOVQconst [c]))) 66272 // cond: isUint64PowerOfTwo(c) && !config.nacl 66273 // result: (UGE (BTQconst [log2(c)] x)) 66274 for { 66275 v := b.Control 66276 if v.Op != OpAMD64TESTQ { 66277 break 66278 } 66279 _ = v.Args[1] 66280 x := v.Args[0] 66281 v_1 := v.Args[1] 66282 if v_1.Op != OpAMD64MOVQconst { 66283 break 66284 } 66285 c := v_1.AuxInt 66286 if !(isUint64PowerOfTwo(c) && !config.nacl) { 66287 break 66288 } 66289 b.Kind = BlockAMD64UGE 66290 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66291 v0.AuxInt = log2(c) 66292 v0.AddArg(x) 66293 b.SetControl(v0) 66294 b.Aux = nil 66295 return true 66296 } 66297 // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 66298 // cond: z1==z2 && !config.nacl 66299 // result: (UGE (BTQconst [63] x)) 66300 for { 66301 v := b.Control 66302 if v.Op != OpAMD64TESTQ { 66303 break 66304 } 66305 _ = v.Args[1] 66306 z1 := v.Args[0] 66307 if z1.Op != OpAMD64SHLQconst { 66308 break 66309 } 66310 if z1.AuxInt != 63 { 66311 break 66312 } 66313 z1_0 := z1.Args[0] 66314 if z1_0.Op != OpAMD64SHRQconst { 66315 break 66316 } 66317 if z1_0.AuxInt != 63 { 66318 break 66319 } 66320 x := z1_0.Args[0] 66321 z2 := v.Args[1] 66322 if !(z1 == z2 && !config.nacl) { 66323 break 66324 } 66325 b.Kind = BlockAMD64UGE 66326 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66327 v0.AuxInt = 63 66328 v0.AddArg(x) 66329 b.SetControl(v0) 66330 b.Aux = nil 66331 return true 66332 } 66333 // match: (EQ (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 66334 // cond: z1==z2 && !config.nacl 66335 // result: (UGE (BTQconst [63] x)) 66336 for { 66337 v := b.Control 66338 if v.Op != OpAMD64TESTQ { 66339 break 66340 } 66341 _ = v.Args[1] 66342 z2 := v.Args[0] 66343 z1 := v.Args[1] 66344 if z1.Op != OpAMD64SHLQconst { 66345 break 66346 } 66347 if z1.AuxInt != 63 { 66348 break 66349 } 66350 z1_0 := z1.Args[0] 66351 if z1_0.Op != OpAMD64SHRQconst { 66352 break 66353 } 66354 if z1_0.AuxInt != 63 { 66355 break 66356 } 66357 x := z1_0.Args[0] 66358 if !(z1 == z2 && !config.nacl) { 66359 break 66360 } 66361 b.Kind = BlockAMD64UGE 66362 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66363 v0.AuxInt = 63 66364 v0.AddArg(x) 66365 b.SetControl(v0) 66366 b.Aux = nil 66367 return true 66368 } 66369 // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 66370 // cond: z1==z2 && !config.nacl 66371 // result: (UGE (BTQconst [31] x)) 66372 for { 66373 v := b.Control 66374 if v.Op != OpAMD64TESTL { 66375 break 66376 } 66377 _ = v.Args[1] 66378 z1 := v.Args[0] 66379 if z1.Op != OpAMD64SHLLconst { 66380 break 66381 } 66382 if z1.AuxInt != 31 { 66383 break 66384 } 66385 z1_0 := z1.Args[0] 66386 if z1_0.Op != OpAMD64SHRQconst { 66387 break 66388 } 66389 if z1_0.AuxInt != 31 { 66390 break 66391 } 66392 x := z1_0.Args[0] 66393 z2 := v.Args[1] 66394 if !(z1 == z2 && !config.nacl) { 66395 break 66396 } 66397 b.Kind = BlockAMD64UGE 66398 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66399 v0.AuxInt = 31 66400 v0.AddArg(x) 66401 b.SetControl(v0) 66402 b.Aux = nil 66403 return true 66404 } 66405 // match: (EQ (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 66406 // cond: z1==z2 && !config.nacl 66407 // result: (UGE (BTQconst [31] x)) 66408 for { 66409 v := b.Control 66410 if v.Op != OpAMD64TESTL { 66411 break 66412 } 66413 _ = v.Args[1] 66414 z2 := v.Args[0] 66415 z1 := v.Args[1] 66416 if z1.Op != OpAMD64SHLLconst { 66417 break 66418 } 66419 if z1.AuxInt != 31 { 66420 break 66421 } 66422 z1_0 := z1.Args[0] 66423 if z1_0.Op != OpAMD64SHRQconst { 66424 break 66425 } 66426 if z1_0.AuxInt != 31 { 66427 break 66428 } 66429 x := z1_0.Args[0] 66430 if !(z1 == z2 && !config.nacl) { 66431 break 66432 } 66433 b.Kind = BlockAMD64UGE 66434 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66435 v0.AuxInt = 31 66436 v0.AddArg(x) 66437 b.SetControl(v0) 66438 b.Aux = nil 66439 return true 66440 } 66441 // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 66442 // cond: z1==z2 && !config.nacl 66443 // result: (UGE (BTQconst [0] x)) 66444 for { 66445 v := b.Control 66446 if v.Op != OpAMD64TESTQ { 66447 break 66448 } 66449 _ = v.Args[1] 66450 z1 := v.Args[0] 66451 if z1.Op != OpAMD64SHRQconst { 66452 break 66453 } 66454 if z1.AuxInt != 63 { 66455 break 66456 } 66457 z1_0 := z1.Args[0] 66458 if z1_0.Op != OpAMD64SHLQconst { 66459 break 66460 } 66461 if z1_0.AuxInt != 63 { 66462 break 66463 } 66464 x := z1_0.Args[0] 66465 z2 := v.Args[1] 66466 if !(z1 == z2 && !config.nacl) { 66467 break 66468 } 66469 b.Kind = BlockAMD64UGE 66470 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66471 v0.AuxInt = 0 66472 v0.AddArg(x) 66473 b.SetControl(v0) 66474 b.Aux = nil 66475 return true 66476 } 66477 // match: (EQ (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 66478 // cond: z1==z2 && !config.nacl 66479 // result: (UGE (BTQconst [0] x)) 66480 for { 66481 v := b.Control 66482 if v.Op != OpAMD64TESTQ { 66483 break 66484 } 66485 _ = v.Args[1] 66486 z2 := v.Args[0] 66487 z1 := v.Args[1] 66488 if z1.Op != OpAMD64SHRQconst { 66489 break 66490 } 66491 if z1.AuxInt != 63 { 66492 break 66493 } 66494 z1_0 := z1.Args[0] 66495 if z1_0.Op != OpAMD64SHLQconst { 66496 break 66497 } 66498 if z1_0.AuxInt != 63 { 66499 break 66500 } 66501 x := z1_0.Args[0] 66502 if !(z1 == z2 && !config.nacl) { 66503 break 66504 } 66505 b.Kind = BlockAMD64UGE 66506 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66507 v0.AuxInt = 0 66508 v0.AddArg(x) 66509 b.SetControl(v0) 66510 b.Aux = nil 66511 return true 66512 } 66513 // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 66514 // cond: z1==z2 && !config.nacl 66515 // result: (UGE (BTLconst [0] x)) 66516 for { 66517 v := b.Control 66518 if v.Op != OpAMD64TESTL { 66519 break 66520 } 66521 _ = v.Args[1] 66522 z1 := v.Args[0] 66523 if z1.Op != OpAMD64SHRLconst { 66524 break 66525 } 66526 if z1.AuxInt != 31 { 66527 break 66528 } 66529 z1_0 := z1.Args[0] 66530 if z1_0.Op != OpAMD64SHLLconst { 66531 break 66532 } 66533 if z1_0.AuxInt != 31 { 66534 break 66535 } 66536 x := z1_0.Args[0] 66537 z2 := v.Args[1] 66538 if !(z1 == z2 && !config.nacl) { 66539 break 66540 } 66541 b.Kind = BlockAMD64UGE 66542 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 66543 v0.AuxInt = 0 66544 v0.AddArg(x) 66545 b.SetControl(v0) 66546 b.Aux = nil 66547 return true 66548 } 66549 // match: (EQ (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 66550 // cond: z1==z2 && !config.nacl 66551 // result: (UGE (BTLconst [0] x)) 66552 for { 66553 v := b.Control 66554 if v.Op != OpAMD64TESTL { 66555 break 66556 } 66557 _ = v.Args[1] 66558 z2 := v.Args[0] 66559 z1 := v.Args[1] 66560 if z1.Op != OpAMD64SHRLconst { 66561 break 66562 } 66563 if z1.AuxInt != 31 { 66564 break 66565 } 66566 z1_0 := z1.Args[0] 66567 if z1_0.Op != OpAMD64SHLLconst { 66568 break 66569 } 66570 if z1_0.AuxInt != 31 { 66571 break 66572 } 66573 x := z1_0.Args[0] 66574 if !(z1 == z2 && !config.nacl) { 66575 break 66576 } 66577 b.Kind = BlockAMD64UGE 66578 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 66579 v0.AuxInt = 0 66580 v0.AddArg(x) 66581 b.SetControl(v0) 66582 b.Aux = nil 66583 return true 66584 } 66585 // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2)) 66586 // cond: z1==z2 && !config.nacl 66587 // result: (UGE (BTQconst [63] x)) 66588 for { 66589 v := b.Control 66590 if v.Op != OpAMD64TESTQ { 66591 break 66592 } 66593 _ = v.Args[1] 66594 z1 := v.Args[0] 66595 if z1.Op != OpAMD64SHRQconst { 66596 break 66597 } 66598 if z1.AuxInt != 63 { 66599 break 66600 } 66601 x := z1.Args[0] 66602 z2 := v.Args[1] 66603 if !(z1 == z2 && !config.nacl) { 66604 break 66605 } 66606 b.Kind = BlockAMD64UGE 66607 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66608 v0.AuxInt = 63 66609 v0.AddArg(x) 66610 b.SetControl(v0) 66611 b.Aux = nil 66612 return true 66613 } 66614 // match: (EQ (TESTQ z2 z1:(SHRQconst [63] x))) 66615 // cond: z1==z2 && !config.nacl 66616 // result: (UGE (BTQconst [63] x)) 66617 for { 66618 v := b.Control 66619 if v.Op != OpAMD64TESTQ { 66620 break 66621 } 66622 _ = v.Args[1] 66623 z2 := v.Args[0] 66624 z1 := v.Args[1] 66625 if z1.Op != OpAMD64SHRQconst { 66626 break 66627 } 66628 if z1.AuxInt != 63 { 66629 break 66630 } 66631 x := z1.Args[0] 66632 if !(z1 == z2 && !config.nacl) { 66633 break 66634 } 66635 b.Kind = BlockAMD64UGE 66636 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 66637 v0.AuxInt = 63 66638 v0.AddArg(x) 66639 b.SetControl(v0) 66640 b.Aux = nil 66641 return true 66642 } 66643 // match: (EQ (TESTL z1:(SHRLconst [31] x) z2)) 66644 // cond: z1==z2 && !config.nacl 66645 // result: (UGE (BTLconst [31] x)) 66646 for { 66647 v := b.Control 66648 if v.Op != OpAMD64TESTL { 66649 break 66650 } 66651 _ = v.Args[1] 66652 z1 := v.Args[0] 66653 if z1.Op != OpAMD64SHRLconst { 66654 break 66655 } 66656 if z1.AuxInt != 31 { 66657 break 66658 } 66659 x := z1.Args[0] 66660 z2 := v.Args[1] 66661 if !(z1 == z2 && !config.nacl) { 66662 break 66663 } 66664 b.Kind = BlockAMD64UGE 66665 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 66666 v0.AuxInt = 31 66667 v0.AddArg(x) 66668 b.SetControl(v0) 66669 b.Aux = nil 66670 return true 66671 } 66672 // match: (EQ (TESTL z2 z1:(SHRLconst [31] x))) 66673 // cond: z1==z2 && !config.nacl 66674 // result: (UGE (BTLconst [31] x)) 66675 for { 66676 v := b.Control 66677 if v.Op != OpAMD64TESTL { 66678 break 66679 } 66680 _ = v.Args[1] 66681 z2 := v.Args[0] 66682 z1 := v.Args[1] 66683 if z1.Op != OpAMD64SHRLconst { 66684 break 66685 } 66686 if z1.AuxInt != 31 { 66687 break 66688 } 66689 x := z1.Args[0] 66690 if !(z1 == z2 && !config.nacl) { 66691 break 66692 } 66693 b.Kind = BlockAMD64UGE 66694 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 66695 v0.AuxInt = 31 66696 v0.AddArg(x) 66697 b.SetControl(v0) 66698 b.Aux = nil 66699 return true 66700 } 66701 // match: (EQ (InvertFlags cmp) yes no) 66702 // cond: 66703 // result: (EQ cmp yes no) 66704 for { 66705 v := b.Control 66706 if v.Op != OpAMD64InvertFlags { 66707 break 66708 } 66709 cmp := v.Args[0] 66710 b.Kind = BlockAMD64EQ 66711 b.SetControl(cmp) 66712 b.Aux = nil 66713 return true 66714 } 66715 // match: (EQ (FlagEQ) yes no) 66716 // cond: 66717 // result: (First nil yes no) 66718 for { 66719 v := b.Control 66720 if v.Op != OpAMD64FlagEQ { 66721 break 66722 } 66723 b.Kind = BlockFirst 66724 b.SetControl(nil) 66725 b.Aux = nil 66726 return true 66727 } 66728 // match: (EQ (FlagLT_ULT) yes no) 66729 // cond: 66730 // result: (First nil no yes) 66731 for { 66732 v := b.Control 66733 if v.Op != OpAMD64FlagLT_ULT { 66734 break 66735 } 66736 b.Kind = BlockFirst 66737 b.SetControl(nil) 66738 b.Aux = nil 66739 b.swapSuccessors() 66740 return true 66741 } 66742 // match: (EQ (FlagLT_UGT) yes no) 66743 // cond: 66744 // result: (First nil no yes) 66745 for { 66746 v := b.Control 66747 if v.Op != OpAMD64FlagLT_UGT { 66748 break 66749 } 66750 b.Kind = BlockFirst 66751 b.SetControl(nil) 66752 b.Aux = nil 66753 b.swapSuccessors() 66754 return true 66755 } 66756 // match: (EQ (FlagGT_ULT) yes no) 66757 // cond: 66758 // result: (First nil no yes) 66759 for { 66760 v := b.Control 66761 if v.Op != OpAMD64FlagGT_ULT { 66762 break 66763 } 66764 b.Kind = BlockFirst 66765 b.SetControl(nil) 66766 b.Aux = nil 66767 b.swapSuccessors() 66768 return true 66769 } 66770 // match: (EQ (FlagGT_UGT) yes no) 66771 // cond: 66772 // result: (First nil no yes) 66773 for { 66774 v := b.Control 66775 if v.Op != OpAMD64FlagGT_UGT { 66776 break 66777 } 66778 b.Kind = BlockFirst 66779 b.SetControl(nil) 66780 b.Aux = nil 66781 b.swapSuccessors() 66782 return true 66783 } 66784 case BlockAMD64GE: 66785 // match: (GE (InvertFlags cmp) yes no) 66786 // cond: 66787 // result: (LE cmp yes no) 66788 for { 66789 v := b.Control 66790 if v.Op != OpAMD64InvertFlags { 66791 break 66792 } 66793 cmp := v.Args[0] 66794 b.Kind = BlockAMD64LE 66795 b.SetControl(cmp) 66796 b.Aux = nil 66797 return true 66798 } 66799 // match: (GE (FlagEQ) yes no) 66800 // cond: 66801 // result: (First nil yes no) 66802 for { 66803 v := b.Control 66804 if v.Op != OpAMD64FlagEQ { 66805 break 66806 } 66807 b.Kind = BlockFirst 66808 b.SetControl(nil) 66809 b.Aux = nil 66810 return true 66811 } 66812 // match: (GE (FlagLT_ULT) yes no) 66813 // cond: 66814 // result: (First nil no yes) 66815 for { 66816 v := b.Control 66817 if v.Op != OpAMD64FlagLT_ULT { 66818 break 66819 } 66820 b.Kind = BlockFirst 66821 b.SetControl(nil) 66822 b.Aux = nil 66823 b.swapSuccessors() 66824 return true 66825 } 66826 // match: (GE (FlagLT_UGT) yes no) 66827 // cond: 66828 // result: (First nil no yes) 66829 for { 66830 v := b.Control 66831 if v.Op != OpAMD64FlagLT_UGT { 66832 break 66833 } 66834 b.Kind = BlockFirst 66835 b.SetControl(nil) 66836 b.Aux = nil 66837 b.swapSuccessors() 66838 return true 66839 } 66840 // match: (GE (FlagGT_ULT) yes no) 66841 // cond: 66842 // result: (First nil yes no) 66843 for { 66844 v := b.Control 66845 if v.Op != OpAMD64FlagGT_ULT { 66846 break 66847 } 66848 b.Kind = BlockFirst 66849 b.SetControl(nil) 66850 b.Aux = nil 66851 return true 66852 } 66853 // match: (GE (FlagGT_UGT) yes no) 66854 // cond: 66855 // result: (First nil yes no) 66856 for { 66857 v := b.Control 66858 if v.Op != OpAMD64FlagGT_UGT { 66859 break 66860 } 66861 b.Kind = BlockFirst 66862 b.SetControl(nil) 66863 b.Aux = nil 66864 return true 66865 } 66866 case BlockAMD64GT: 66867 // match: (GT (InvertFlags cmp) yes no) 66868 // cond: 66869 // result: (LT cmp yes no) 66870 for { 66871 v := b.Control 66872 if v.Op != OpAMD64InvertFlags { 66873 break 66874 } 66875 cmp := v.Args[0] 66876 b.Kind = BlockAMD64LT 66877 b.SetControl(cmp) 66878 b.Aux = nil 66879 return true 66880 } 66881 // match: (GT (FlagEQ) yes no) 66882 // cond: 66883 // result: (First nil no yes) 66884 for { 66885 v := b.Control 66886 if v.Op != OpAMD64FlagEQ { 66887 break 66888 } 66889 b.Kind = BlockFirst 66890 b.SetControl(nil) 66891 b.Aux = nil 66892 b.swapSuccessors() 66893 return true 66894 } 66895 // match: (GT (FlagLT_ULT) yes no) 66896 // cond: 66897 // result: (First nil no yes) 66898 for { 66899 v := b.Control 66900 if v.Op != OpAMD64FlagLT_ULT { 66901 break 66902 } 66903 b.Kind = BlockFirst 66904 b.SetControl(nil) 66905 b.Aux = nil 66906 b.swapSuccessors() 66907 return true 66908 } 66909 // match: (GT (FlagLT_UGT) yes no) 66910 // cond: 66911 // result: (First nil no yes) 66912 for { 66913 v := b.Control 66914 if v.Op != OpAMD64FlagLT_UGT { 66915 break 66916 } 66917 b.Kind = BlockFirst 66918 b.SetControl(nil) 66919 b.Aux = nil 66920 b.swapSuccessors() 66921 return true 66922 } 66923 // match: (GT (FlagGT_ULT) yes no) 66924 // cond: 66925 // result: (First nil yes no) 66926 for { 66927 v := b.Control 66928 if v.Op != OpAMD64FlagGT_ULT { 66929 break 66930 } 66931 b.Kind = BlockFirst 66932 b.SetControl(nil) 66933 b.Aux = nil 66934 return true 66935 } 66936 // match: (GT (FlagGT_UGT) yes no) 66937 // cond: 66938 // result: (First nil yes no) 66939 for { 66940 v := b.Control 66941 if v.Op != OpAMD64FlagGT_UGT { 66942 break 66943 } 66944 b.Kind = BlockFirst 66945 b.SetControl(nil) 66946 b.Aux = nil 66947 return true 66948 } 66949 case BlockIf: 66950 // match: (If (SETL cmp) yes no) 66951 // cond: 66952 // result: (LT cmp yes no) 66953 for { 66954 v := b.Control 66955 if v.Op != OpAMD64SETL { 66956 break 66957 } 66958 cmp := v.Args[0] 66959 b.Kind = BlockAMD64LT 66960 b.SetControl(cmp) 66961 b.Aux = nil 66962 return true 66963 } 66964 // match: (If (SETLE cmp) yes no) 66965 // cond: 66966 // result: (LE cmp yes no) 66967 for { 66968 v := b.Control 66969 if v.Op != OpAMD64SETLE { 66970 break 66971 } 66972 cmp := v.Args[0] 66973 b.Kind = BlockAMD64LE 66974 b.SetControl(cmp) 66975 b.Aux = nil 66976 return true 66977 } 66978 // match: (If (SETG cmp) yes no) 66979 // cond: 66980 // result: (GT cmp yes no) 66981 for { 66982 v := b.Control 66983 if v.Op != OpAMD64SETG { 66984 break 66985 } 66986 cmp := v.Args[0] 66987 b.Kind = BlockAMD64GT 66988 b.SetControl(cmp) 66989 b.Aux = nil 66990 return true 66991 } 66992 // match: (If (SETGE cmp) yes no) 66993 // cond: 66994 // result: (GE cmp yes no) 66995 for { 66996 v := b.Control 66997 if v.Op != OpAMD64SETGE { 66998 break 66999 } 67000 cmp := v.Args[0] 67001 b.Kind = BlockAMD64GE 67002 b.SetControl(cmp) 67003 b.Aux = nil 67004 return true 67005 } 67006 // match: (If (SETEQ cmp) yes no) 67007 // cond: 67008 // result: (EQ cmp yes no) 67009 for { 67010 v := b.Control 67011 if v.Op != OpAMD64SETEQ { 67012 break 67013 } 67014 cmp := v.Args[0] 67015 b.Kind = BlockAMD64EQ 67016 b.SetControl(cmp) 67017 b.Aux = nil 67018 return true 67019 } 67020 // match: (If (SETNE cmp) yes no) 67021 // cond: 67022 // result: (NE cmp yes no) 67023 for { 67024 v := b.Control 67025 if v.Op != OpAMD64SETNE { 67026 break 67027 } 67028 cmp := v.Args[0] 67029 b.Kind = BlockAMD64NE 67030 b.SetControl(cmp) 67031 b.Aux = nil 67032 return true 67033 } 67034 // match: (If (SETB cmp) yes no) 67035 // cond: 67036 // result: (ULT cmp yes no) 67037 for { 67038 v := b.Control 67039 if v.Op != OpAMD64SETB { 67040 break 67041 } 67042 cmp := v.Args[0] 67043 b.Kind = BlockAMD64ULT 67044 b.SetControl(cmp) 67045 b.Aux = nil 67046 return true 67047 } 67048 // match: (If (SETBE cmp) yes no) 67049 // cond: 67050 // result: (ULE cmp yes no) 67051 for { 67052 v := b.Control 67053 if v.Op != OpAMD64SETBE { 67054 break 67055 } 67056 cmp := v.Args[0] 67057 b.Kind = BlockAMD64ULE 67058 b.SetControl(cmp) 67059 b.Aux = nil 67060 return true 67061 } 67062 // match: (If (SETA cmp) yes no) 67063 // cond: 67064 // result: (UGT cmp yes no) 67065 for { 67066 v := b.Control 67067 if v.Op != OpAMD64SETA { 67068 break 67069 } 67070 cmp := v.Args[0] 67071 b.Kind = BlockAMD64UGT 67072 b.SetControl(cmp) 67073 b.Aux = nil 67074 return true 67075 } 67076 // match: (If (SETAE cmp) yes no) 67077 // cond: 67078 // result: (UGE cmp yes no) 67079 for { 67080 v := b.Control 67081 if v.Op != OpAMD64SETAE { 67082 break 67083 } 67084 cmp := v.Args[0] 67085 b.Kind = BlockAMD64UGE 67086 b.SetControl(cmp) 67087 b.Aux = nil 67088 return true 67089 } 67090 // match: (If (SETO cmp) yes no) 67091 // cond: 67092 // result: (OS cmp yes no) 67093 for { 67094 v := b.Control 67095 if v.Op != OpAMD64SETO { 67096 break 67097 } 67098 cmp := v.Args[0] 67099 b.Kind = BlockAMD64OS 67100 b.SetControl(cmp) 67101 b.Aux = nil 67102 return true 67103 } 67104 // match: (If (SETGF cmp) yes no) 67105 // cond: 67106 // result: (UGT cmp yes no) 67107 for { 67108 v := b.Control 67109 if v.Op != OpAMD64SETGF { 67110 break 67111 } 67112 cmp := v.Args[0] 67113 b.Kind = BlockAMD64UGT 67114 b.SetControl(cmp) 67115 b.Aux = nil 67116 return true 67117 } 67118 // match: (If (SETGEF cmp) yes no) 67119 // cond: 67120 // result: (UGE cmp yes no) 67121 for { 67122 v := b.Control 67123 if v.Op != OpAMD64SETGEF { 67124 break 67125 } 67126 cmp := v.Args[0] 67127 b.Kind = BlockAMD64UGE 67128 b.SetControl(cmp) 67129 b.Aux = nil 67130 return true 67131 } 67132 // match: (If (SETEQF cmp) yes no) 67133 // cond: 67134 // result: (EQF cmp yes no) 67135 for { 67136 v := b.Control 67137 if v.Op != OpAMD64SETEQF { 67138 break 67139 } 67140 cmp := v.Args[0] 67141 b.Kind = BlockAMD64EQF 67142 b.SetControl(cmp) 67143 b.Aux = nil 67144 return true 67145 } 67146 // match: (If (SETNEF cmp) yes no) 67147 // cond: 67148 // result: (NEF cmp yes no) 67149 for { 67150 v := b.Control 67151 if v.Op != OpAMD64SETNEF { 67152 break 67153 } 67154 cmp := v.Args[0] 67155 b.Kind = BlockAMD64NEF 67156 b.SetControl(cmp) 67157 b.Aux = nil 67158 return true 67159 } 67160 // match: (If cond yes no) 67161 // cond: 67162 // result: (NE (TESTB cond cond) yes no) 67163 for { 67164 v := b.Control 67165 _ = v 67166 cond := b.Control 67167 b.Kind = BlockAMD64NE 67168 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags) 67169 v0.AddArg(cond) 67170 v0.AddArg(cond) 67171 b.SetControl(v0) 67172 b.Aux = nil 67173 return true 67174 } 67175 case BlockAMD64LE: 67176 // match: (LE (InvertFlags cmp) yes no) 67177 // cond: 67178 // result: (GE cmp yes no) 67179 for { 67180 v := b.Control 67181 if v.Op != OpAMD64InvertFlags { 67182 break 67183 } 67184 cmp := v.Args[0] 67185 b.Kind = BlockAMD64GE 67186 b.SetControl(cmp) 67187 b.Aux = nil 67188 return true 67189 } 67190 // match: (LE (FlagEQ) yes no) 67191 // cond: 67192 // result: (First nil yes no) 67193 for { 67194 v := b.Control 67195 if v.Op != OpAMD64FlagEQ { 67196 break 67197 } 67198 b.Kind = BlockFirst 67199 b.SetControl(nil) 67200 b.Aux = nil 67201 return true 67202 } 67203 // match: (LE (FlagLT_ULT) yes no) 67204 // cond: 67205 // result: (First nil yes no) 67206 for { 67207 v := b.Control 67208 if v.Op != OpAMD64FlagLT_ULT { 67209 break 67210 } 67211 b.Kind = BlockFirst 67212 b.SetControl(nil) 67213 b.Aux = nil 67214 return true 67215 } 67216 // match: (LE (FlagLT_UGT) yes no) 67217 // cond: 67218 // result: (First nil yes no) 67219 for { 67220 v := b.Control 67221 if v.Op != OpAMD64FlagLT_UGT { 67222 break 67223 } 67224 b.Kind = BlockFirst 67225 b.SetControl(nil) 67226 b.Aux = nil 67227 return true 67228 } 67229 // match: (LE (FlagGT_ULT) yes no) 67230 // cond: 67231 // result: (First nil no yes) 67232 for { 67233 v := b.Control 67234 if v.Op != OpAMD64FlagGT_ULT { 67235 break 67236 } 67237 b.Kind = BlockFirst 67238 b.SetControl(nil) 67239 b.Aux = nil 67240 b.swapSuccessors() 67241 return true 67242 } 67243 // match: (LE (FlagGT_UGT) yes no) 67244 // cond: 67245 // result: (First nil no yes) 67246 for { 67247 v := b.Control 67248 if v.Op != OpAMD64FlagGT_UGT { 67249 break 67250 } 67251 b.Kind = BlockFirst 67252 b.SetControl(nil) 67253 b.Aux = nil 67254 b.swapSuccessors() 67255 return true 67256 } 67257 case BlockAMD64LT: 67258 // match: (LT (InvertFlags cmp) yes no) 67259 // cond: 67260 // result: (GT cmp yes no) 67261 for { 67262 v := b.Control 67263 if v.Op != OpAMD64InvertFlags { 67264 break 67265 } 67266 cmp := v.Args[0] 67267 b.Kind = BlockAMD64GT 67268 b.SetControl(cmp) 67269 b.Aux = nil 67270 return true 67271 } 67272 // match: (LT (FlagEQ) yes no) 67273 // cond: 67274 // result: (First nil no yes) 67275 for { 67276 v := b.Control 67277 if v.Op != OpAMD64FlagEQ { 67278 break 67279 } 67280 b.Kind = BlockFirst 67281 b.SetControl(nil) 67282 b.Aux = nil 67283 b.swapSuccessors() 67284 return true 67285 } 67286 // match: (LT (FlagLT_ULT) yes no) 67287 // cond: 67288 // result: (First nil yes no) 67289 for { 67290 v := b.Control 67291 if v.Op != OpAMD64FlagLT_ULT { 67292 break 67293 } 67294 b.Kind = BlockFirst 67295 b.SetControl(nil) 67296 b.Aux = nil 67297 return true 67298 } 67299 // match: (LT (FlagLT_UGT) yes no) 67300 // cond: 67301 // result: (First nil yes no) 67302 for { 67303 v := b.Control 67304 if v.Op != OpAMD64FlagLT_UGT { 67305 break 67306 } 67307 b.Kind = BlockFirst 67308 b.SetControl(nil) 67309 b.Aux = nil 67310 return true 67311 } 67312 // match: (LT (FlagGT_ULT) yes no) 67313 // cond: 67314 // result: (First nil no yes) 67315 for { 67316 v := b.Control 67317 if v.Op != OpAMD64FlagGT_ULT { 67318 break 67319 } 67320 b.Kind = BlockFirst 67321 b.SetControl(nil) 67322 b.Aux = nil 67323 b.swapSuccessors() 67324 return true 67325 } 67326 // match: (LT (FlagGT_UGT) yes no) 67327 // cond: 67328 // result: (First nil no yes) 67329 for { 67330 v := b.Control 67331 if v.Op != OpAMD64FlagGT_UGT { 67332 break 67333 } 67334 b.Kind = BlockFirst 67335 b.SetControl(nil) 67336 b.Aux = nil 67337 b.swapSuccessors() 67338 return true 67339 } 67340 case BlockAMD64NE: 67341 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 67342 // cond: 67343 // result: (LT cmp yes no) 67344 for { 67345 v := b.Control 67346 if v.Op != OpAMD64TESTB { 67347 break 67348 } 67349 _ = v.Args[1] 67350 v_0 := v.Args[0] 67351 if v_0.Op != OpAMD64SETL { 67352 break 67353 } 67354 cmp := v_0.Args[0] 67355 v_1 := v.Args[1] 67356 if v_1.Op != OpAMD64SETL { 67357 break 67358 } 67359 if cmp != v_1.Args[0] { 67360 break 67361 } 67362 b.Kind = BlockAMD64LT 67363 b.SetControl(cmp) 67364 b.Aux = nil 67365 return true 67366 } 67367 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 67368 // cond: 67369 // result: (LT cmp yes no) 67370 for { 67371 v := b.Control 67372 if v.Op != OpAMD64TESTB { 67373 break 67374 } 67375 _ = v.Args[1] 67376 v_0 := v.Args[0] 67377 if v_0.Op != OpAMD64SETL { 67378 break 67379 } 67380 cmp := v_0.Args[0] 67381 v_1 := v.Args[1] 67382 if v_1.Op != OpAMD64SETL { 67383 break 67384 } 67385 if cmp != v_1.Args[0] { 67386 break 67387 } 67388 b.Kind = BlockAMD64LT 67389 b.SetControl(cmp) 67390 b.Aux = nil 67391 return true 67392 } 67393 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 67394 // cond: 67395 // result: (LE cmp yes no) 67396 for { 67397 v := b.Control 67398 if v.Op != OpAMD64TESTB { 67399 break 67400 } 67401 _ = v.Args[1] 67402 v_0 := v.Args[0] 67403 if v_0.Op != OpAMD64SETLE { 67404 break 67405 } 67406 cmp := v_0.Args[0] 67407 v_1 := v.Args[1] 67408 if v_1.Op != OpAMD64SETLE { 67409 break 67410 } 67411 if cmp != v_1.Args[0] { 67412 break 67413 } 67414 b.Kind = BlockAMD64LE 67415 b.SetControl(cmp) 67416 b.Aux = nil 67417 return true 67418 } 67419 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 67420 // cond: 67421 // result: (LE cmp yes no) 67422 for { 67423 v := b.Control 67424 if v.Op != OpAMD64TESTB { 67425 break 67426 } 67427 _ = v.Args[1] 67428 v_0 := v.Args[0] 67429 if v_0.Op != OpAMD64SETLE { 67430 break 67431 } 67432 cmp := v_0.Args[0] 67433 v_1 := v.Args[1] 67434 if v_1.Op != OpAMD64SETLE { 67435 break 67436 } 67437 if cmp != v_1.Args[0] { 67438 break 67439 } 67440 b.Kind = BlockAMD64LE 67441 b.SetControl(cmp) 67442 b.Aux = nil 67443 return true 67444 } 67445 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 67446 // cond: 67447 // result: (GT cmp yes no) 67448 for { 67449 v := b.Control 67450 if v.Op != OpAMD64TESTB { 67451 break 67452 } 67453 _ = v.Args[1] 67454 v_0 := v.Args[0] 67455 if v_0.Op != OpAMD64SETG { 67456 break 67457 } 67458 cmp := v_0.Args[0] 67459 v_1 := v.Args[1] 67460 if v_1.Op != OpAMD64SETG { 67461 break 67462 } 67463 if cmp != v_1.Args[0] { 67464 break 67465 } 67466 b.Kind = BlockAMD64GT 67467 b.SetControl(cmp) 67468 b.Aux = nil 67469 return true 67470 } 67471 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 67472 // cond: 67473 // result: (GT cmp yes no) 67474 for { 67475 v := b.Control 67476 if v.Op != OpAMD64TESTB { 67477 break 67478 } 67479 _ = v.Args[1] 67480 v_0 := v.Args[0] 67481 if v_0.Op != OpAMD64SETG { 67482 break 67483 } 67484 cmp := v_0.Args[0] 67485 v_1 := v.Args[1] 67486 if v_1.Op != OpAMD64SETG { 67487 break 67488 } 67489 if cmp != v_1.Args[0] { 67490 break 67491 } 67492 b.Kind = BlockAMD64GT 67493 b.SetControl(cmp) 67494 b.Aux = nil 67495 return true 67496 } 67497 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 67498 // cond: 67499 // result: (GE cmp yes no) 67500 for { 67501 v := b.Control 67502 if v.Op != OpAMD64TESTB { 67503 break 67504 } 67505 _ = v.Args[1] 67506 v_0 := v.Args[0] 67507 if v_0.Op != OpAMD64SETGE { 67508 break 67509 } 67510 cmp := v_0.Args[0] 67511 v_1 := v.Args[1] 67512 if v_1.Op != OpAMD64SETGE { 67513 break 67514 } 67515 if cmp != v_1.Args[0] { 67516 break 67517 } 67518 b.Kind = BlockAMD64GE 67519 b.SetControl(cmp) 67520 b.Aux = nil 67521 return true 67522 } 67523 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 67524 // cond: 67525 // result: (GE cmp yes no) 67526 for { 67527 v := b.Control 67528 if v.Op != OpAMD64TESTB { 67529 break 67530 } 67531 _ = v.Args[1] 67532 v_0 := v.Args[0] 67533 if v_0.Op != OpAMD64SETGE { 67534 break 67535 } 67536 cmp := v_0.Args[0] 67537 v_1 := v.Args[1] 67538 if v_1.Op != OpAMD64SETGE { 67539 break 67540 } 67541 if cmp != v_1.Args[0] { 67542 break 67543 } 67544 b.Kind = BlockAMD64GE 67545 b.SetControl(cmp) 67546 b.Aux = nil 67547 return true 67548 } 67549 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 67550 // cond: 67551 // result: (EQ cmp yes no) 67552 for { 67553 v := b.Control 67554 if v.Op != OpAMD64TESTB { 67555 break 67556 } 67557 _ = v.Args[1] 67558 v_0 := v.Args[0] 67559 if v_0.Op != OpAMD64SETEQ { 67560 break 67561 } 67562 cmp := v_0.Args[0] 67563 v_1 := v.Args[1] 67564 if v_1.Op != OpAMD64SETEQ { 67565 break 67566 } 67567 if cmp != v_1.Args[0] { 67568 break 67569 } 67570 b.Kind = BlockAMD64EQ 67571 b.SetControl(cmp) 67572 b.Aux = nil 67573 return true 67574 } 67575 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 67576 // cond: 67577 // result: (EQ cmp yes no) 67578 for { 67579 v := b.Control 67580 if v.Op != OpAMD64TESTB { 67581 break 67582 } 67583 _ = v.Args[1] 67584 v_0 := v.Args[0] 67585 if v_0.Op != OpAMD64SETEQ { 67586 break 67587 } 67588 cmp := v_0.Args[0] 67589 v_1 := v.Args[1] 67590 if v_1.Op != OpAMD64SETEQ { 67591 break 67592 } 67593 if cmp != v_1.Args[0] { 67594 break 67595 } 67596 b.Kind = BlockAMD64EQ 67597 b.SetControl(cmp) 67598 b.Aux = nil 67599 return true 67600 } 67601 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 67602 // cond: 67603 // result: (NE cmp yes no) 67604 for { 67605 v := b.Control 67606 if v.Op != OpAMD64TESTB { 67607 break 67608 } 67609 _ = v.Args[1] 67610 v_0 := v.Args[0] 67611 if v_0.Op != OpAMD64SETNE { 67612 break 67613 } 67614 cmp := v_0.Args[0] 67615 v_1 := v.Args[1] 67616 if v_1.Op != OpAMD64SETNE { 67617 break 67618 } 67619 if cmp != v_1.Args[0] { 67620 break 67621 } 67622 b.Kind = BlockAMD64NE 67623 b.SetControl(cmp) 67624 b.Aux = nil 67625 return true 67626 } 67627 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 67628 // cond: 67629 // result: (NE cmp yes no) 67630 for { 67631 v := b.Control 67632 if v.Op != OpAMD64TESTB { 67633 break 67634 } 67635 _ = v.Args[1] 67636 v_0 := v.Args[0] 67637 if v_0.Op != OpAMD64SETNE { 67638 break 67639 } 67640 cmp := v_0.Args[0] 67641 v_1 := v.Args[1] 67642 if v_1.Op != OpAMD64SETNE { 67643 break 67644 } 67645 if cmp != v_1.Args[0] { 67646 break 67647 } 67648 b.Kind = BlockAMD64NE 67649 b.SetControl(cmp) 67650 b.Aux = nil 67651 return true 67652 } 67653 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 67654 // cond: 67655 // result: (ULT cmp yes no) 67656 for { 67657 v := b.Control 67658 if v.Op != OpAMD64TESTB { 67659 break 67660 } 67661 _ = v.Args[1] 67662 v_0 := v.Args[0] 67663 if v_0.Op != OpAMD64SETB { 67664 break 67665 } 67666 cmp := v_0.Args[0] 67667 v_1 := v.Args[1] 67668 if v_1.Op != OpAMD64SETB { 67669 break 67670 } 67671 if cmp != v_1.Args[0] { 67672 break 67673 } 67674 b.Kind = BlockAMD64ULT 67675 b.SetControl(cmp) 67676 b.Aux = nil 67677 return true 67678 } 67679 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 67680 // cond: 67681 // result: (ULT cmp yes no) 67682 for { 67683 v := b.Control 67684 if v.Op != OpAMD64TESTB { 67685 break 67686 } 67687 _ = v.Args[1] 67688 v_0 := v.Args[0] 67689 if v_0.Op != OpAMD64SETB { 67690 break 67691 } 67692 cmp := v_0.Args[0] 67693 v_1 := v.Args[1] 67694 if v_1.Op != OpAMD64SETB { 67695 break 67696 } 67697 if cmp != v_1.Args[0] { 67698 break 67699 } 67700 b.Kind = BlockAMD64ULT 67701 b.SetControl(cmp) 67702 b.Aux = nil 67703 return true 67704 } 67705 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 67706 // cond: 67707 // result: (ULE cmp yes no) 67708 for { 67709 v := b.Control 67710 if v.Op != OpAMD64TESTB { 67711 break 67712 } 67713 _ = v.Args[1] 67714 v_0 := v.Args[0] 67715 if v_0.Op != OpAMD64SETBE { 67716 break 67717 } 67718 cmp := v_0.Args[0] 67719 v_1 := v.Args[1] 67720 if v_1.Op != OpAMD64SETBE { 67721 break 67722 } 67723 if cmp != v_1.Args[0] { 67724 break 67725 } 67726 b.Kind = BlockAMD64ULE 67727 b.SetControl(cmp) 67728 b.Aux = nil 67729 return true 67730 } 67731 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 67732 // cond: 67733 // result: (ULE cmp yes no) 67734 for { 67735 v := b.Control 67736 if v.Op != OpAMD64TESTB { 67737 break 67738 } 67739 _ = v.Args[1] 67740 v_0 := v.Args[0] 67741 if v_0.Op != OpAMD64SETBE { 67742 break 67743 } 67744 cmp := v_0.Args[0] 67745 v_1 := v.Args[1] 67746 if v_1.Op != OpAMD64SETBE { 67747 break 67748 } 67749 if cmp != v_1.Args[0] { 67750 break 67751 } 67752 b.Kind = BlockAMD64ULE 67753 b.SetControl(cmp) 67754 b.Aux = nil 67755 return true 67756 } 67757 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 67758 // cond: 67759 // result: (UGT cmp yes no) 67760 for { 67761 v := b.Control 67762 if v.Op != OpAMD64TESTB { 67763 break 67764 } 67765 _ = v.Args[1] 67766 v_0 := v.Args[0] 67767 if v_0.Op != OpAMD64SETA { 67768 break 67769 } 67770 cmp := v_0.Args[0] 67771 v_1 := v.Args[1] 67772 if v_1.Op != OpAMD64SETA { 67773 break 67774 } 67775 if cmp != v_1.Args[0] { 67776 break 67777 } 67778 b.Kind = BlockAMD64UGT 67779 b.SetControl(cmp) 67780 b.Aux = nil 67781 return true 67782 } 67783 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 67784 // cond: 67785 // result: (UGT cmp yes no) 67786 for { 67787 v := b.Control 67788 if v.Op != OpAMD64TESTB { 67789 break 67790 } 67791 _ = v.Args[1] 67792 v_0 := v.Args[0] 67793 if v_0.Op != OpAMD64SETA { 67794 break 67795 } 67796 cmp := v_0.Args[0] 67797 v_1 := v.Args[1] 67798 if v_1.Op != OpAMD64SETA { 67799 break 67800 } 67801 if cmp != v_1.Args[0] { 67802 break 67803 } 67804 b.Kind = BlockAMD64UGT 67805 b.SetControl(cmp) 67806 b.Aux = nil 67807 return true 67808 } 67809 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 67810 // cond: 67811 // result: (UGE cmp yes no) 67812 for { 67813 v := b.Control 67814 if v.Op != OpAMD64TESTB { 67815 break 67816 } 67817 _ = v.Args[1] 67818 v_0 := v.Args[0] 67819 if v_0.Op != OpAMD64SETAE { 67820 break 67821 } 67822 cmp := v_0.Args[0] 67823 v_1 := v.Args[1] 67824 if v_1.Op != OpAMD64SETAE { 67825 break 67826 } 67827 if cmp != v_1.Args[0] { 67828 break 67829 } 67830 b.Kind = BlockAMD64UGE 67831 b.SetControl(cmp) 67832 b.Aux = nil 67833 return true 67834 } 67835 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 67836 // cond: 67837 // result: (UGE cmp yes no) 67838 for { 67839 v := b.Control 67840 if v.Op != OpAMD64TESTB { 67841 break 67842 } 67843 _ = v.Args[1] 67844 v_0 := v.Args[0] 67845 if v_0.Op != OpAMD64SETAE { 67846 break 67847 } 67848 cmp := v_0.Args[0] 67849 v_1 := v.Args[1] 67850 if v_1.Op != OpAMD64SETAE { 67851 break 67852 } 67853 if cmp != v_1.Args[0] { 67854 break 67855 } 67856 b.Kind = BlockAMD64UGE 67857 b.SetControl(cmp) 67858 b.Aux = nil 67859 return true 67860 } 67861 // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) 67862 // cond: 67863 // result: (OS cmp yes no) 67864 for { 67865 v := b.Control 67866 if v.Op != OpAMD64TESTB { 67867 break 67868 } 67869 _ = v.Args[1] 67870 v_0 := v.Args[0] 67871 if v_0.Op != OpAMD64SETO { 67872 break 67873 } 67874 cmp := v_0.Args[0] 67875 v_1 := v.Args[1] 67876 if v_1.Op != OpAMD64SETO { 67877 break 67878 } 67879 if cmp != v_1.Args[0] { 67880 break 67881 } 67882 b.Kind = BlockAMD64OS 67883 b.SetControl(cmp) 67884 b.Aux = nil 67885 return true 67886 } 67887 // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) 67888 // cond: 67889 // result: (OS cmp yes no) 67890 for { 67891 v := b.Control 67892 if v.Op != OpAMD64TESTB { 67893 break 67894 } 67895 _ = v.Args[1] 67896 v_0 := v.Args[0] 67897 if v_0.Op != OpAMD64SETO { 67898 break 67899 } 67900 cmp := v_0.Args[0] 67901 v_1 := v.Args[1] 67902 if v_1.Op != OpAMD64SETO { 67903 break 67904 } 67905 if cmp != v_1.Args[0] { 67906 break 67907 } 67908 b.Kind = BlockAMD64OS 67909 b.SetControl(cmp) 67910 b.Aux = nil 67911 return true 67912 } 67913 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 67914 // cond: !config.nacl 67915 // result: (ULT (BTL x y)) 67916 for { 67917 v := b.Control 67918 if v.Op != OpAMD64TESTL { 67919 break 67920 } 67921 _ = v.Args[1] 67922 v_0 := v.Args[0] 67923 if v_0.Op != OpAMD64SHLL { 67924 break 67925 } 67926 _ = v_0.Args[1] 67927 v_0_0 := v_0.Args[0] 67928 if v_0_0.Op != OpAMD64MOVLconst { 67929 break 67930 } 67931 if v_0_0.AuxInt != 1 { 67932 break 67933 } 67934 x := v_0.Args[1] 67935 y := v.Args[1] 67936 if !(!config.nacl) { 67937 break 67938 } 67939 b.Kind = BlockAMD64ULT 67940 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 67941 v0.AddArg(x) 67942 v0.AddArg(y) 67943 b.SetControl(v0) 67944 b.Aux = nil 67945 return true 67946 } 67947 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 67948 // cond: !config.nacl 67949 // result: (ULT (BTL x y)) 67950 for { 67951 v := b.Control 67952 if v.Op != OpAMD64TESTL { 67953 break 67954 } 67955 _ = v.Args[1] 67956 y := v.Args[0] 67957 v_1 := v.Args[1] 67958 if v_1.Op != OpAMD64SHLL { 67959 break 67960 } 67961 _ = v_1.Args[1] 67962 v_1_0 := v_1.Args[0] 67963 if v_1_0.Op != OpAMD64MOVLconst { 67964 break 67965 } 67966 if v_1_0.AuxInt != 1 { 67967 break 67968 } 67969 x := v_1.Args[1] 67970 if !(!config.nacl) { 67971 break 67972 } 67973 b.Kind = BlockAMD64ULT 67974 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 67975 v0.AddArg(x) 67976 v0.AddArg(y) 67977 b.SetControl(v0) 67978 b.Aux = nil 67979 return true 67980 } 67981 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 67982 // cond: !config.nacl 67983 // result: (ULT (BTQ x y)) 67984 for { 67985 v := b.Control 67986 if v.Op != OpAMD64TESTQ { 67987 break 67988 } 67989 _ = v.Args[1] 67990 v_0 := v.Args[0] 67991 if v_0.Op != OpAMD64SHLQ { 67992 break 67993 } 67994 _ = v_0.Args[1] 67995 v_0_0 := v_0.Args[0] 67996 if v_0_0.Op != OpAMD64MOVQconst { 67997 break 67998 } 67999 if v_0_0.AuxInt != 1 { 68000 break 68001 } 68002 x := v_0.Args[1] 68003 y := v.Args[1] 68004 if !(!config.nacl) { 68005 break 68006 } 68007 b.Kind = BlockAMD64ULT 68008 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 68009 v0.AddArg(x) 68010 v0.AddArg(y) 68011 b.SetControl(v0) 68012 b.Aux = nil 68013 return true 68014 } 68015 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 68016 // cond: !config.nacl 68017 // result: (ULT (BTQ x y)) 68018 for { 68019 v := b.Control 68020 if v.Op != OpAMD64TESTQ { 68021 break 68022 } 68023 _ = v.Args[1] 68024 y := v.Args[0] 68025 v_1 := v.Args[1] 68026 if v_1.Op != OpAMD64SHLQ { 68027 break 68028 } 68029 _ = v_1.Args[1] 68030 v_1_0 := v_1.Args[0] 68031 if v_1_0.Op != OpAMD64MOVQconst { 68032 break 68033 } 68034 if v_1_0.AuxInt != 1 { 68035 break 68036 } 68037 x := v_1.Args[1] 68038 if !(!config.nacl) { 68039 break 68040 } 68041 b.Kind = BlockAMD64ULT 68042 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 68043 v0.AddArg(x) 68044 v0.AddArg(y) 68045 b.SetControl(v0) 68046 b.Aux = nil 68047 return true 68048 } 68049 // match: (NE (TESTLconst [c] x)) 68050 // cond: isUint32PowerOfTwo(c) && !config.nacl 68051 // result: (ULT (BTLconst [log2uint32(c)] x)) 68052 for { 68053 v := b.Control 68054 if v.Op != OpAMD64TESTLconst { 68055 break 68056 } 68057 c := v.AuxInt 68058 x := v.Args[0] 68059 if !(isUint32PowerOfTwo(c) && !config.nacl) { 68060 break 68061 } 68062 b.Kind = BlockAMD64ULT 68063 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 68064 v0.AuxInt = log2uint32(c) 68065 v0.AddArg(x) 68066 b.SetControl(v0) 68067 b.Aux = nil 68068 return true 68069 } 68070 // match: (NE (TESTQconst [c] x)) 68071 // cond: isUint64PowerOfTwo(c) && !config.nacl 68072 // result: (ULT (BTQconst [log2(c)] x)) 68073 for { 68074 v := b.Control 68075 if v.Op != OpAMD64TESTQconst { 68076 break 68077 } 68078 c := v.AuxInt 68079 x := v.Args[0] 68080 if !(isUint64PowerOfTwo(c) && !config.nacl) { 68081 break 68082 } 68083 b.Kind = BlockAMD64ULT 68084 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68085 v0.AuxInt = log2(c) 68086 v0.AddArg(x) 68087 b.SetControl(v0) 68088 b.Aux = nil 68089 return true 68090 } 68091 // match: (NE (TESTQ (MOVQconst [c]) x)) 68092 // cond: isUint64PowerOfTwo(c) && !config.nacl 68093 // result: (ULT (BTQconst [log2(c)] x)) 68094 for { 68095 v := b.Control 68096 if v.Op != OpAMD64TESTQ { 68097 break 68098 } 68099 _ = v.Args[1] 68100 v_0 := v.Args[0] 68101 if v_0.Op != OpAMD64MOVQconst { 68102 break 68103 } 68104 c := v_0.AuxInt 68105 x := v.Args[1] 68106 if !(isUint64PowerOfTwo(c) && !config.nacl) { 68107 break 68108 } 68109 b.Kind = BlockAMD64ULT 68110 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68111 v0.AuxInt = log2(c) 68112 v0.AddArg(x) 68113 b.SetControl(v0) 68114 b.Aux = nil 68115 return true 68116 } 68117 // match: (NE (TESTQ x (MOVQconst [c]))) 68118 // cond: isUint64PowerOfTwo(c) && !config.nacl 68119 // result: (ULT (BTQconst [log2(c)] x)) 68120 for { 68121 v := b.Control 68122 if v.Op != OpAMD64TESTQ { 68123 break 68124 } 68125 _ = v.Args[1] 68126 x := v.Args[0] 68127 v_1 := v.Args[1] 68128 if v_1.Op != OpAMD64MOVQconst { 68129 break 68130 } 68131 c := v_1.AuxInt 68132 if !(isUint64PowerOfTwo(c) && !config.nacl) { 68133 break 68134 } 68135 b.Kind = BlockAMD64ULT 68136 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68137 v0.AuxInt = log2(c) 68138 v0.AddArg(x) 68139 b.SetControl(v0) 68140 b.Aux = nil 68141 return true 68142 } 68143 // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 68144 // cond: z1==z2 && !config.nacl 68145 // result: (ULT (BTQconst [63] x)) 68146 for { 68147 v := b.Control 68148 if v.Op != OpAMD64TESTQ { 68149 break 68150 } 68151 _ = v.Args[1] 68152 z1 := v.Args[0] 68153 if z1.Op != OpAMD64SHLQconst { 68154 break 68155 } 68156 if z1.AuxInt != 63 { 68157 break 68158 } 68159 z1_0 := z1.Args[0] 68160 if z1_0.Op != OpAMD64SHRQconst { 68161 break 68162 } 68163 if z1_0.AuxInt != 63 { 68164 break 68165 } 68166 x := z1_0.Args[0] 68167 z2 := v.Args[1] 68168 if !(z1 == z2 && !config.nacl) { 68169 break 68170 } 68171 b.Kind = BlockAMD64ULT 68172 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68173 v0.AuxInt = 63 68174 v0.AddArg(x) 68175 b.SetControl(v0) 68176 b.Aux = nil 68177 return true 68178 } 68179 // match: (NE (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 68180 // cond: z1==z2 && !config.nacl 68181 // result: (ULT (BTQconst [63] x)) 68182 for { 68183 v := b.Control 68184 if v.Op != OpAMD64TESTQ { 68185 break 68186 } 68187 _ = v.Args[1] 68188 z2 := v.Args[0] 68189 z1 := v.Args[1] 68190 if z1.Op != OpAMD64SHLQconst { 68191 break 68192 } 68193 if z1.AuxInt != 63 { 68194 break 68195 } 68196 z1_0 := z1.Args[0] 68197 if z1_0.Op != OpAMD64SHRQconst { 68198 break 68199 } 68200 if z1_0.AuxInt != 63 { 68201 break 68202 } 68203 x := z1_0.Args[0] 68204 if !(z1 == z2 && !config.nacl) { 68205 break 68206 } 68207 b.Kind = BlockAMD64ULT 68208 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68209 v0.AuxInt = 63 68210 v0.AddArg(x) 68211 b.SetControl(v0) 68212 b.Aux = nil 68213 return true 68214 } 68215 // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 68216 // cond: z1==z2 && !config.nacl 68217 // result: (ULT (BTQconst [31] x)) 68218 for { 68219 v := b.Control 68220 if v.Op != OpAMD64TESTL { 68221 break 68222 } 68223 _ = v.Args[1] 68224 z1 := v.Args[0] 68225 if z1.Op != OpAMD64SHLLconst { 68226 break 68227 } 68228 if z1.AuxInt != 31 { 68229 break 68230 } 68231 z1_0 := z1.Args[0] 68232 if z1_0.Op != OpAMD64SHRQconst { 68233 break 68234 } 68235 if z1_0.AuxInt != 31 { 68236 break 68237 } 68238 x := z1_0.Args[0] 68239 z2 := v.Args[1] 68240 if !(z1 == z2 && !config.nacl) { 68241 break 68242 } 68243 b.Kind = BlockAMD64ULT 68244 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68245 v0.AuxInt = 31 68246 v0.AddArg(x) 68247 b.SetControl(v0) 68248 b.Aux = nil 68249 return true 68250 } 68251 // match: (NE (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 68252 // cond: z1==z2 && !config.nacl 68253 // result: (ULT (BTQconst [31] x)) 68254 for { 68255 v := b.Control 68256 if v.Op != OpAMD64TESTL { 68257 break 68258 } 68259 _ = v.Args[1] 68260 z2 := v.Args[0] 68261 z1 := v.Args[1] 68262 if z1.Op != OpAMD64SHLLconst { 68263 break 68264 } 68265 if z1.AuxInt != 31 { 68266 break 68267 } 68268 z1_0 := z1.Args[0] 68269 if z1_0.Op != OpAMD64SHRQconst { 68270 break 68271 } 68272 if z1_0.AuxInt != 31 { 68273 break 68274 } 68275 x := z1_0.Args[0] 68276 if !(z1 == z2 && !config.nacl) { 68277 break 68278 } 68279 b.Kind = BlockAMD64ULT 68280 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68281 v0.AuxInt = 31 68282 v0.AddArg(x) 68283 b.SetControl(v0) 68284 b.Aux = nil 68285 return true 68286 } 68287 // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 68288 // cond: z1==z2 && !config.nacl 68289 // result: (ULT (BTQconst [0] x)) 68290 for { 68291 v := b.Control 68292 if v.Op != OpAMD64TESTQ { 68293 break 68294 } 68295 _ = v.Args[1] 68296 z1 := v.Args[0] 68297 if z1.Op != OpAMD64SHRQconst { 68298 break 68299 } 68300 if z1.AuxInt != 63 { 68301 break 68302 } 68303 z1_0 := z1.Args[0] 68304 if z1_0.Op != OpAMD64SHLQconst { 68305 break 68306 } 68307 if z1_0.AuxInt != 63 { 68308 break 68309 } 68310 x := z1_0.Args[0] 68311 z2 := v.Args[1] 68312 if !(z1 == z2 && !config.nacl) { 68313 break 68314 } 68315 b.Kind = BlockAMD64ULT 68316 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68317 v0.AuxInt = 0 68318 v0.AddArg(x) 68319 b.SetControl(v0) 68320 b.Aux = nil 68321 return true 68322 } 68323 // match: (NE (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 68324 // cond: z1==z2 && !config.nacl 68325 // result: (ULT (BTQconst [0] x)) 68326 for { 68327 v := b.Control 68328 if v.Op != OpAMD64TESTQ { 68329 break 68330 } 68331 _ = v.Args[1] 68332 z2 := v.Args[0] 68333 z1 := v.Args[1] 68334 if z1.Op != OpAMD64SHRQconst { 68335 break 68336 } 68337 if z1.AuxInt != 63 { 68338 break 68339 } 68340 z1_0 := z1.Args[0] 68341 if z1_0.Op != OpAMD64SHLQconst { 68342 break 68343 } 68344 if z1_0.AuxInt != 63 { 68345 break 68346 } 68347 x := z1_0.Args[0] 68348 if !(z1 == z2 && !config.nacl) { 68349 break 68350 } 68351 b.Kind = BlockAMD64ULT 68352 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68353 v0.AuxInt = 0 68354 v0.AddArg(x) 68355 b.SetControl(v0) 68356 b.Aux = nil 68357 return true 68358 } 68359 // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 68360 // cond: z1==z2 && !config.nacl 68361 // result: (ULT (BTLconst [0] x)) 68362 for { 68363 v := b.Control 68364 if v.Op != OpAMD64TESTL { 68365 break 68366 } 68367 _ = v.Args[1] 68368 z1 := v.Args[0] 68369 if z1.Op != OpAMD64SHRLconst { 68370 break 68371 } 68372 if z1.AuxInt != 31 { 68373 break 68374 } 68375 z1_0 := z1.Args[0] 68376 if z1_0.Op != OpAMD64SHLLconst { 68377 break 68378 } 68379 if z1_0.AuxInt != 31 { 68380 break 68381 } 68382 x := z1_0.Args[0] 68383 z2 := v.Args[1] 68384 if !(z1 == z2 && !config.nacl) { 68385 break 68386 } 68387 b.Kind = BlockAMD64ULT 68388 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 68389 v0.AuxInt = 0 68390 v0.AddArg(x) 68391 b.SetControl(v0) 68392 b.Aux = nil 68393 return true 68394 } 68395 // match: (NE (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 68396 // cond: z1==z2 && !config.nacl 68397 // result: (ULT (BTLconst [0] x)) 68398 for { 68399 v := b.Control 68400 if v.Op != OpAMD64TESTL { 68401 break 68402 } 68403 _ = v.Args[1] 68404 z2 := v.Args[0] 68405 z1 := v.Args[1] 68406 if z1.Op != OpAMD64SHRLconst { 68407 break 68408 } 68409 if z1.AuxInt != 31 { 68410 break 68411 } 68412 z1_0 := z1.Args[0] 68413 if z1_0.Op != OpAMD64SHLLconst { 68414 break 68415 } 68416 if z1_0.AuxInt != 31 { 68417 break 68418 } 68419 x := z1_0.Args[0] 68420 if !(z1 == z2 && !config.nacl) { 68421 break 68422 } 68423 b.Kind = BlockAMD64ULT 68424 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 68425 v0.AuxInt = 0 68426 v0.AddArg(x) 68427 b.SetControl(v0) 68428 b.Aux = nil 68429 return true 68430 } 68431 // match: (NE (TESTQ z1:(SHRQconst [63] x) z2)) 68432 // cond: z1==z2 && !config.nacl 68433 // result: (ULT (BTQconst [63] x)) 68434 for { 68435 v := b.Control 68436 if v.Op != OpAMD64TESTQ { 68437 break 68438 } 68439 _ = v.Args[1] 68440 z1 := v.Args[0] 68441 if z1.Op != OpAMD64SHRQconst { 68442 break 68443 } 68444 if z1.AuxInt != 63 { 68445 break 68446 } 68447 x := z1.Args[0] 68448 z2 := v.Args[1] 68449 if !(z1 == z2 && !config.nacl) { 68450 break 68451 } 68452 b.Kind = BlockAMD64ULT 68453 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68454 v0.AuxInt = 63 68455 v0.AddArg(x) 68456 b.SetControl(v0) 68457 b.Aux = nil 68458 return true 68459 } 68460 // match: (NE (TESTQ z2 z1:(SHRQconst [63] x))) 68461 // cond: z1==z2 && !config.nacl 68462 // result: (ULT (BTQconst [63] x)) 68463 for { 68464 v := b.Control 68465 if v.Op != OpAMD64TESTQ { 68466 break 68467 } 68468 _ = v.Args[1] 68469 z2 := v.Args[0] 68470 z1 := v.Args[1] 68471 if z1.Op != OpAMD64SHRQconst { 68472 break 68473 } 68474 if z1.AuxInt != 63 { 68475 break 68476 } 68477 x := z1.Args[0] 68478 if !(z1 == z2 && !config.nacl) { 68479 break 68480 } 68481 b.Kind = BlockAMD64ULT 68482 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 68483 v0.AuxInt = 63 68484 v0.AddArg(x) 68485 b.SetControl(v0) 68486 b.Aux = nil 68487 return true 68488 } 68489 // match: (NE (TESTL z1:(SHRLconst [31] x) z2)) 68490 // cond: z1==z2 && !config.nacl 68491 // result: (ULT (BTLconst [31] x)) 68492 for { 68493 v := b.Control 68494 if v.Op != OpAMD64TESTL { 68495 break 68496 } 68497 _ = v.Args[1] 68498 z1 := v.Args[0] 68499 if z1.Op != OpAMD64SHRLconst { 68500 break 68501 } 68502 if z1.AuxInt != 31 { 68503 break 68504 } 68505 x := z1.Args[0] 68506 z2 := v.Args[1] 68507 if !(z1 == z2 && !config.nacl) { 68508 break 68509 } 68510 b.Kind = BlockAMD64ULT 68511 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 68512 v0.AuxInt = 31 68513 v0.AddArg(x) 68514 b.SetControl(v0) 68515 b.Aux = nil 68516 return true 68517 } 68518 // match: (NE (TESTL z2 z1:(SHRLconst [31] x))) 68519 // cond: z1==z2 && !config.nacl 68520 // result: (ULT (BTLconst [31] x)) 68521 for { 68522 v := b.Control 68523 if v.Op != OpAMD64TESTL { 68524 break 68525 } 68526 _ = v.Args[1] 68527 z2 := v.Args[0] 68528 z1 := v.Args[1] 68529 if z1.Op != OpAMD64SHRLconst { 68530 break 68531 } 68532 if z1.AuxInt != 31 { 68533 break 68534 } 68535 x := z1.Args[0] 68536 if !(z1 == z2 && !config.nacl) { 68537 break 68538 } 68539 b.Kind = BlockAMD64ULT 68540 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 68541 v0.AuxInt = 31 68542 v0.AddArg(x) 68543 b.SetControl(v0) 68544 b.Aux = nil 68545 return true 68546 } 68547 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 68548 // cond: 68549 // result: (UGT cmp yes no) 68550 for { 68551 v := b.Control 68552 if v.Op != OpAMD64TESTB { 68553 break 68554 } 68555 _ = v.Args[1] 68556 v_0 := v.Args[0] 68557 if v_0.Op != OpAMD64SETGF { 68558 break 68559 } 68560 cmp := v_0.Args[0] 68561 v_1 := v.Args[1] 68562 if v_1.Op != OpAMD64SETGF { 68563 break 68564 } 68565 if cmp != v_1.Args[0] { 68566 break 68567 } 68568 b.Kind = BlockAMD64UGT 68569 b.SetControl(cmp) 68570 b.Aux = nil 68571 return true 68572 } 68573 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 68574 // cond: 68575 // result: (UGT cmp yes no) 68576 for { 68577 v := b.Control 68578 if v.Op != OpAMD64TESTB { 68579 break 68580 } 68581 _ = v.Args[1] 68582 v_0 := v.Args[0] 68583 if v_0.Op != OpAMD64SETGF { 68584 break 68585 } 68586 cmp := v_0.Args[0] 68587 v_1 := v.Args[1] 68588 if v_1.Op != OpAMD64SETGF { 68589 break 68590 } 68591 if cmp != v_1.Args[0] { 68592 break 68593 } 68594 b.Kind = BlockAMD64UGT 68595 b.SetControl(cmp) 68596 b.Aux = nil 68597 return true 68598 } 68599 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 68600 // cond: 68601 // result: (UGE cmp yes no) 68602 for { 68603 v := b.Control 68604 if v.Op != OpAMD64TESTB { 68605 break 68606 } 68607 _ = v.Args[1] 68608 v_0 := v.Args[0] 68609 if v_0.Op != OpAMD64SETGEF { 68610 break 68611 } 68612 cmp := v_0.Args[0] 68613 v_1 := v.Args[1] 68614 if v_1.Op != OpAMD64SETGEF { 68615 break 68616 } 68617 if cmp != v_1.Args[0] { 68618 break 68619 } 68620 b.Kind = BlockAMD64UGE 68621 b.SetControl(cmp) 68622 b.Aux = nil 68623 return true 68624 } 68625 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 68626 // cond: 68627 // result: (UGE cmp yes no) 68628 for { 68629 v := b.Control 68630 if v.Op != OpAMD64TESTB { 68631 break 68632 } 68633 _ = v.Args[1] 68634 v_0 := v.Args[0] 68635 if v_0.Op != OpAMD64SETGEF { 68636 break 68637 } 68638 cmp := v_0.Args[0] 68639 v_1 := v.Args[1] 68640 if v_1.Op != OpAMD64SETGEF { 68641 break 68642 } 68643 if cmp != v_1.Args[0] { 68644 break 68645 } 68646 b.Kind = BlockAMD64UGE 68647 b.SetControl(cmp) 68648 b.Aux = nil 68649 return true 68650 } 68651 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 68652 // cond: 68653 // result: (EQF cmp yes no) 68654 for { 68655 v := b.Control 68656 if v.Op != OpAMD64TESTB { 68657 break 68658 } 68659 _ = v.Args[1] 68660 v_0 := v.Args[0] 68661 if v_0.Op != OpAMD64SETEQF { 68662 break 68663 } 68664 cmp := v_0.Args[0] 68665 v_1 := v.Args[1] 68666 if v_1.Op != OpAMD64SETEQF { 68667 break 68668 } 68669 if cmp != v_1.Args[0] { 68670 break 68671 } 68672 b.Kind = BlockAMD64EQF 68673 b.SetControl(cmp) 68674 b.Aux = nil 68675 return true 68676 } 68677 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 68678 // cond: 68679 // result: (EQF cmp yes no) 68680 for { 68681 v := b.Control 68682 if v.Op != OpAMD64TESTB { 68683 break 68684 } 68685 _ = v.Args[1] 68686 v_0 := v.Args[0] 68687 if v_0.Op != OpAMD64SETEQF { 68688 break 68689 } 68690 cmp := v_0.Args[0] 68691 v_1 := v.Args[1] 68692 if v_1.Op != OpAMD64SETEQF { 68693 break 68694 } 68695 if cmp != v_1.Args[0] { 68696 break 68697 } 68698 b.Kind = BlockAMD64EQF 68699 b.SetControl(cmp) 68700 b.Aux = nil 68701 return true 68702 } 68703 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 68704 // cond: 68705 // result: (NEF cmp yes no) 68706 for { 68707 v := b.Control 68708 if v.Op != OpAMD64TESTB { 68709 break 68710 } 68711 _ = v.Args[1] 68712 v_0 := v.Args[0] 68713 if v_0.Op != OpAMD64SETNEF { 68714 break 68715 } 68716 cmp := v_0.Args[0] 68717 v_1 := v.Args[1] 68718 if v_1.Op != OpAMD64SETNEF { 68719 break 68720 } 68721 if cmp != v_1.Args[0] { 68722 break 68723 } 68724 b.Kind = BlockAMD64NEF 68725 b.SetControl(cmp) 68726 b.Aux = nil 68727 return true 68728 } 68729 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 68730 // cond: 68731 // result: (NEF cmp yes no) 68732 for { 68733 v := b.Control 68734 if v.Op != OpAMD64TESTB { 68735 break 68736 } 68737 _ = v.Args[1] 68738 v_0 := v.Args[0] 68739 if v_0.Op != OpAMD64SETNEF { 68740 break 68741 } 68742 cmp := v_0.Args[0] 68743 v_1 := v.Args[1] 68744 if v_1.Op != OpAMD64SETNEF { 68745 break 68746 } 68747 if cmp != v_1.Args[0] { 68748 break 68749 } 68750 b.Kind = BlockAMD64NEF 68751 b.SetControl(cmp) 68752 b.Aux = nil 68753 return true 68754 } 68755 // match: (NE (InvertFlags cmp) yes no) 68756 // cond: 68757 // result: (NE cmp yes no) 68758 for { 68759 v := b.Control 68760 if v.Op != OpAMD64InvertFlags { 68761 break 68762 } 68763 cmp := v.Args[0] 68764 b.Kind = BlockAMD64NE 68765 b.SetControl(cmp) 68766 b.Aux = nil 68767 return true 68768 } 68769 // match: (NE (FlagEQ) yes no) 68770 // cond: 68771 // result: (First nil no yes) 68772 for { 68773 v := b.Control 68774 if v.Op != OpAMD64FlagEQ { 68775 break 68776 } 68777 b.Kind = BlockFirst 68778 b.SetControl(nil) 68779 b.Aux = nil 68780 b.swapSuccessors() 68781 return true 68782 } 68783 // match: (NE (FlagLT_ULT) yes no) 68784 // cond: 68785 // result: (First nil yes no) 68786 for { 68787 v := b.Control 68788 if v.Op != OpAMD64FlagLT_ULT { 68789 break 68790 } 68791 b.Kind = BlockFirst 68792 b.SetControl(nil) 68793 b.Aux = nil 68794 return true 68795 } 68796 // match: (NE (FlagLT_UGT) yes no) 68797 // cond: 68798 // result: (First nil yes no) 68799 for { 68800 v := b.Control 68801 if v.Op != OpAMD64FlagLT_UGT { 68802 break 68803 } 68804 b.Kind = BlockFirst 68805 b.SetControl(nil) 68806 b.Aux = nil 68807 return true 68808 } 68809 // match: (NE (FlagGT_ULT) yes no) 68810 // cond: 68811 // result: (First nil yes no) 68812 for { 68813 v := b.Control 68814 if v.Op != OpAMD64FlagGT_ULT { 68815 break 68816 } 68817 b.Kind = BlockFirst 68818 b.SetControl(nil) 68819 b.Aux = nil 68820 return true 68821 } 68822 // match: (NE (FlagGT_UGT) yes no) 68823 // cond: 68824 // result: (First nil yes no) 68825 for { 68826 v := b.Control 68827 if v.Op != OpAMD64FlagGT_UGT { 68828 break 68829 } 68830 b.Kind = BlockFirst 68831 b.SetControl(nil) 68832 b.Aux = nil 68833 return true 68834 } 68835 case BlockAMD64UGE: 68836 // match: (UGE (InvertFlags cmp) yes no) 68837 // cond: 68838 // result: (ULE cmp yes no) 68839 for { 68840 v := b.Control 68841 if v.Op != OpAMD64InvertFlags { 68842 break 68843 } 68844 cmp := v.Args[0] 68845 b.Kind = BlockAMD64ULE 68846 b.SetControl(cmp) 68847 b.Aux = nil 68848 return true 68849 } 68850 // match: (UGE (FlagEQ) yes no) 68851 // cond: 68852 // result: (First nil yes no) 68853 for { 68854 v := b.Control 68855 if v.Op != OpAMD64FlagEQ { 68856 break 68857 } 68858 b.Kind = BlockFirst 68859 b.SetControl(nil) 68860 b.Aux = nil 68861 return true 68862 } 68863 // match: (UGE (FlagLT_ULT) yes no) 68864 // cond: 68865 // result: (First nil no yes) 68866 for { 68867 v := b.Control 68868 if v.Op != OpAMD64FlagLT_ULT { 68869 break 68870 } 68871 b.Kind = BlockFirst 68872 b.SetControl(nil) 68873 b.Aux = nil 68874 b.swapSuccessors() 68875 return true 68876 } 68877 // match: (UGE (FlagLT_UGT) yes no) 68878 // cond: 68879 // result: (First nil yes no) 68880 for { 68881 v := b.Control 68882 if v.Op != OpAMD64FlagLT_UGT { 68883 break 68884 } 68885 b.Kind = BlockFirst 68886 b.SetControl(nil) 68887 b.Aux = nil 68888 return true 68889 } 68890 // match: (UGE (FlagGT_ULT) yes no) 68891 // cond: 68892 // result: (First nil no yes) 68893 for { 68894 v := b.Control 68895 if v.Op != OpAMD64FlagGT_ULT { 68896 break 68897 } 68898 b.Kind = BlockFirst 68899 b.SetControl(nil) 68900 b.Aux = nil 68901 b.swapSuccessors() 68902 return true 68903 } 68904 // match: (UGE (FlagGT_UGT) yes no) 68905 // cond: 68906 // result: (First nil yes no) 68907 for { 68908 v := b.Control 68909 if v.Op != OpAMD64FlagGT_UGT { 68910 break 68911 } 68912 b.Kind = BlockFirst 68913 b.SetControl(nil) 68914 b.Aux = nil 68915 return true 68916 } 68917 case BlockAMD64UGT: 68918 // match: (UGT (InvertFlags cmp) yes no) 68919 // cond: 68920 // result: (ULT cmp yes no) 68921 for { 68922 v := b.Control 68923 if v.Op != OpAMD64InvertFlags { 68924 break 68925 } 68926 cmp := v.Args[0] 68927 b.Kind = BlockAMD64ULT 68928 b.SetControl(cmp) 68929 b.Aux = nil 68930 return true 68931 } 68932 // match: (UGT (FlagEQ) yes no) 68933 // cond: 68934 // result: (First nil no yes) 68935 for { 68936 v := b.Control 68937 if v.Op != OpAMD64FlagEQ { 68938 break 68939 } 68940 b.Kind = BlockFirst 68941 b.SetControl(nil) 68942 b.Aux = nil 68943 b.swapSuccessors() 68944 return true 68945 } 68946 // match: (UGT (FlagLT_ULT) yes no) 68947 // cond: 68948 // result: (First nil no yes) 68949 for { 68950 v := b.Control 68951 if v.Op != OpAMD64FlagLT_ULT { 68952 break 68953 } 68954 b.Kind = BlockFirst 68955 b.SetControl(nil) 68956 b.Aux = nil 68957 b.swapSuccessors() 68958 return true 68959 } 68960 // match: (UGT (FlagLT_UGT) yes no) 68961 // cond: 68962 // result: (First nil yes no) 68963 for { 68964 v := b.Control 68965 if v.Op != OpAMD64FlagLT_UGT { 68966 break 68967 } 68968 b.Kind = BlockFirst 68969 b.SetControl(nil) 68970 b.Aux = nil 68971 return true 68972 } 68973 // match: (UGT (FlagGT_ULT) yes no) 68974 // cond: 68975 // result: (First nil no yes) 68976 for { 68977 v := b.Control 68978 if v.Op != OpAMD64FlagGT_ULT { 68979 break 68980 } 68981 b.Kind = BlockFirst 68982 b.SetControl(nil) 68983 b.Aux = nil 68984 b.swapSuccessors() 68985 return true 68986 } 68987 // match: (UGT (FlagGT_UGT) yes no) 68988 // cond: 68989 // result: (First nil yes no) 68990 for { 68991 v := b.Control 68992 if v.Op != OpAMD64FlagGT_UGT { 68993 break 68994 } 68995 b.Kind = BlockFirst 68996 b.SetControl(nil) 68997 b.Aux = nil 68998 return true 68999 } 69000 case BlockAMD64ULE: 69001 // match: (ULE (InvertFlags cmp) yes no) 69002 // cond: 69003 // result: (UGE cmp yes no) 69004 for { 69005 v := b.Control 69006 if v.Op != OpAMD64InvertFlags { 69007 break 69008 } 69009 cmp := v.Args[0] 69010 b.Kind = BlockAMD64UGE 69011 b.SetControl(cmp) 69012 b.Aux = nil 69013 return true 69014 } 69015 // match: (ULE (FlagEQ) yes no) 69016 // cond: 69017 // result: (First nil yes no) 69018 for { 69019 v := b.Control 69020 if v.Op != OpAMD64FlagEQ { 69021 break 69022 } 69023 b.Kind = BlockFirst 69024 b.SetControl(nil) 69025 b.Aux = nil 69026 return true 69027 } 69028 // match: (ULE (FlagLT_ULT) yes no) 69029 // cond: 69030 // result: (First nil yes no) 69031 for { 69032 v := b.Control 69033 if v.Op != OpAMD64FlagLT_ULT { 69034 break 69035 } 69036 b.Kind = BlockFirst 69037 b.SetControl(nil) 69038 b.Aux = nil 69039 return true 69040 } 69041 // match: (ULE (FlagLT_UGT) yes no) 69042 // cond: 69043 // result: (First nil no yes) 69044 for { 69045 v := b.Control 69046 if v.Op != OpAMD64FlagLT_UGT { 69047 break 69048 } 69049 b.Kind = BlockFirst 69050 b.SetControl(nil) 69051 b.Aux = nil 69052 b.swapSuccessors() 69053 return true 69054 } 69055 // match: (ULE (FlagGT_ULT) yes no) 69056 // cond: 69057 // result: (First nil yes no) 69058 for { 69059 v := b.Control 69060 if v.Op != OpAMD64FlagGT_ULT { 69061 break 69062 } 69063 b.Kind = BlockFirst 69064 b.SetControl(nil) 69065 b.Aux = nil 69066 return true 69067 } 69068 // match: (ULE (FlagGT_UGT) yes no) 69069 // cond: 69070 // result: (First nil no yes) 69071 for { 69072 v := b.Control 69073 if v.Op != OpAMD64FlagGT_UGT { 69074 break 69075 } 69076 b.Kind = BlockFirst 69077 b.SetControl(nil) 69078 b.Aux = nil 69079 b.swapSuccessors() 69080 return true 69081 } 69082 case BlockAMD64ULT: 69083 // match: (ULT (InvertFlags cmp) yes no) 69084 // cond: 69085 // result: (UGT cmp yes no) 69086 for { 69087 v := b.Control 69088 if v.Op != OpAMD64InvertFlags { 69089 break 69090 } 69091 cmp := v.Args[0] 69092 b.Kind = BlockAMD64UGT 69093 b.SetControl(cmp) 69094 b.Aux = nil 69095 return true 69096 } 69097 // match: (ULT (FlagEQ) yes no) 69098 // cond: 69099 // result: (First nil no yes) 69100 for { 69101 v := b.Control 69102 if v.Op != OpAMD64FlagEQ { 69103 break 69104 } 69105 b.Kind = BlockFirst 69106 b.SetControl(nil) 69107 b.Aux = nil 69108 b.swapSuccessors() 69109 return true 69110 } 69111 // match: (ULT (FlagLT_ULT) yes no) 69112 // cond: 69113 // result: (First nil yes no) 69114 for { 69115 v := b.Control 69116 if v.Op != OpAMD64FlagLT_ULT { 69117 break 69118 } 69119 b.Kind = BlockFirst 69120 b.SetControl(nil) 69121 b.Aux = nil 69122 return true 69123 } 69124 // match: (ULT (FlagLT_UGT) yes no) 69125 // cond: 69126 // result: (First nil no yes) 69127 for { 69128 v := b.Control 69129 if v.Op != OpAMD64FlagLT_UGT { 69130 break 69131 } 69132 b.Kind = BlockFirst 69133 b.SetControl(nil) 69134 b.Aux = nil 69135 b.swapSuccessors() 69136 return true 69137 } 69138 // match: (ULT (FlagGT_ULT) yes no) 69139 // cond: 69140 // result: (First nil yes no) 69141 for { 69142 v := b.Control 69143 if v.Op != OpAMD64FlagGT_ULT { 69144 break 69145 } 69146 b.Kind = BlockFirst 69147 b.SetControl(nil) 69148 b.Aux = nil 69149 return true 69150 } 69151 // match: (ULT (FlagGT_UGT) yes no) 69152 // cond: 69153 // result: (First nil no yes) 69154 for { 69155 v := b.Control 69156 if v.Op != OpAMD64FlagGT_UGT { 69157 break 69158 } 69159 b.Kind = BlockFirst 69160 b.SetControl(nil) 69161 b.Aux = nil 69162 b.swapSuccessors() 69163 return true 69164 } 69165 } 69166 return false 69167 }