github.com/hlts2/go@v0.0.0-20170904000733-812b34efaed8/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "cmd/internal/obj" 8 import "cmd/internal/objabi" 9 import "cmd/compile/internal/types" 10 11 var _ = math.MinInt8 // in case not otherwise used 12 var _ = obj.ANOP // in case not otherwise used 13 var _ = objabi.GOROOT // in case not otherwise used 14 var _ = types.TypeMem // in case not otherwise used 15 16 func rewriteValueAMD64(v *Value) bool { 17 switch v.Op { 18 case OpAMD64ADDL: 19 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) 20 case OpAMD64ADDLconst: 21 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) 22 case OpAMD64ADDLconstmem: 23 return rewriteValueAMD64_OpAMD64ADDLconstmem_0(v) 24 case OpAMD64ADDLmem: 25 return rewriteValueAMD64_OpAMD64ADDLmem_0(v) 26 case OpAMD64ADDQ: 27 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 28 case OpAMD64ADDQconst: 29 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) 30 case OpAMD64ADDQconstmem: 31 return rewriteValueAMD64_OpAMD64ADDQconstmem_0(v) 32 case OpAMD64ADDQmem: 33 return rewriteValueAMD64_OpAMD64ADDQmem_0(v) 34 case OpAMD64ADDSD: 35 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 36 case OpAMD64ADDSDmem: 37 return rewriteValueAMD64_OpAMD64ADDSDmem_0(v) 38 case OpAMD64ADDSS: 39 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 40 case OpAMD64ADDSSmem: 41 return rewriteValueAMD64_OpAMD64ADDSSmem_0(v) 42 case OpAMD64ANDL: 43 return rewriteValueAMD64_OpAMD64ANDL_0(v) 44 case OpAMD64ANDLconst: 45 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 46 case OpAMD64ANDLmem: 47 return rewriteValueAMD64_OpAMD64ANDLmem_0(v) 48 case OpAMD64ANDQ: 49 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 50 case OpAMD64ANDQconst: 51 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 52 case OpAMD64ANDQmem: 53 return rewriteValueAMD64_OpAMD64ANDQmem_0(v) 54 case OpAMD64BSFQ: 55 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 56 case OpAMD64BTQconst: 57 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 58 case OpAMD64CMOVQEQ: 59 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 60 case OpAMD64CMPB: 61 return rewriteValueAMD64_OpAMD64CMPB_0(v) 62 case OpAMD64CMPBconst: 63 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 64 case OpAMD64CMPL: 65 return rewriteValueAMD64_OpAMD64CMPL_0(v) 66 case OpAMD64CMPLconst: 67 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) 68 case OpAMD64CMPQ: 69 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 70 case OpAMD64CMPQconst: 71 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 72 case OpAMD64CMPW: 73 return rewriteValueAMD64_OpAMD64CMPW_0(v) 74 case OpAMD64CMPWconst: 75 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 76 case OpAMD64CMPXCHGLlock: 77 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 78 case OpAMD64CMPXCHGQlock: 79 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 80 case OpAMD64LEAL: 81 return rewriteValueAMD64_OpAMD64LEAL_0(v) 82 case OpAMD64LEAQ: 83 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 84 case OpAMD64LEAQ1: 85 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 86 case OpAMD64LEAQ2: 87 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 88 case OpAMD64LEAQ4: 89 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 90 case OpAMD64LEAQ8: 91 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 92 case OpAMD64MOVBQSX: 93 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 94 case OpAMD64MOVBQSXload: 95 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 96 case OpAMD64MOVBQZX: 97 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 98 case OpAMD64MOVBload: 99 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 100 case OpAMD64MOVBloadidx1: 101 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 102 case OpAMD64MOVBstore: 103 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) 104 case OpAMD64MOVBstoreconst: 105 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 106 case OpAMD64MOVBstoreconstidx1: 107 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 108 case OpAMD64MOVBstoreidx1: 109 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) 110 case OpAMD64MOVLQSX: 111 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 112 case OpAMD64MOVLQSXload: 113 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 114 case OpAMD64MOVLQZX: 115 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 116 case OpAMD64MOVLatomicload: 117 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 118 case OpAMD64MOVLf2i: 119 return rewriteValueAMD64_OpAMD64MOVLf2i_0(v) 120 case OpAMD64MOVLi2f: 121 return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) 122 case OpAMD64MOVLload: 123 return rewriteValueAMD64_OpAMD64MOVLload_0(v) 124 case OpAMD64MOVLloadidx1: 125 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 126 case OpAMD64MOVLloadidx4: 127 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 128 case OpAMD64MOVLloadidx8: 129 return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v) 130 case OpAMD64MOVLstore: 131 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) 132 case OpAMD64MOVLstoreconst: 133 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 134 case OpAMD64MOVLstoreconstidx1: 135 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 136 case OpAMD64MOVLstoreconstidx4: 137 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 138 case OpAMD64MOVLstoreidx1: 139 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 140 case OpAMD64MOVLstoreidx4: 141 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 142 case OpAMD64MOVLstoreidx8: 143 return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v) 144 case OpAMD64MOVOload: 145 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 146 case OpAMD64MOVOstore: 147 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 148 case OpAMD64MOVQatomicload: 149 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 150 case OpAMD64MOVQf2i: 151 return rewriteValueAMD64_OpAMD64MOVQf2i_0(v) 152 case OpAMD64MOVQi2f: 153 return rewriteValueAMD64_OpAMD64MOVQi2f_0(v) 154 case OpAMD64MOVQload: 155 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 156 case OpAMD64MOVQloadidx1: 157 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 158 case OpAMD64MOVQloadidx8: 159 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 160 case OpAMD64MOVQstore: 161 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) 162 case OpAMD64MOVQstoreconst: 163 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 164 case OpAMD64MOVQstoreconstidx1: 165 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 166 case OpAMD64MOVQstoreconstidx8: 167 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 168 case OpAMD64MOVQstoreidx1: 169 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 170 case OpAMD64MOVQstoreidx8: 171 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 172 case OpAMD64MOVSDload: 173 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 174 case OpAMD64MOVSDloadidx1: 175 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 176 case OpAMD64MOVSDloadidx8: 177 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 178 case OpAMD64MOVSDstore: 179 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 180 case OpAMD64MOVSDstoreidx1: 181 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 182 case OpAMD64MOVSDstoreidx8: 183 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 184 case OpAMD64MOVSSload: 185 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 186 case OpAMD64MOVSSloadidx1: 187 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 188 case OpAMD64MOVSSloadidx4: 189 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 190 case OpAMD64MOVSSstore: 191 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 192 case OpAMD64MOVSSstoreidx1: 193 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 194 case OpAMD64MOVSSstoreidx4: 195 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 196 case OpAMD64MOVWQSX: 197 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 198 case OpAMD64MOVWQSXload: 199 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 200 case OpAMD64MOVWQZX: 201 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 202 case OpAMD64MOVWload: 203 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 204 case OpAMD64MOVWloadidx1: 205 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 206 case OpAMD64MOVWloadidx2: 207 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 208 case OpAMD64MOVWstore: 209 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 210 case OpAMD64MOVWstoreconst: 211 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 212 case OpAMD64MOVWstoreconstidx1: 213 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 214 case OpAMD64MOVWstoreconstidx2: 215 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 216 case OpAMD64MOVWstoreidx1: 217 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 218 case OpAMD64MOVWstoreidx2: 219 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 220 case OpAMD64MULL: 221 return rewriteValueAMD64_OpAMD64MULL_0(v) 222 case OpAMD64MULLconst: 223 return rewriteValueAMD64_OpAMD64MULLconst_0(v) 224 case OpAMD64MULQ: 225 return rewriteValueAMD64_OpAMD64MULQ_0(v) 226 case OpAMD64MULQconst: 227 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) 228 case OpAMD64MULSD: 229 return rewriteValueAMD64_OpAMD64MULSD_0(v) 230 case OpAMD64MULSDmem: 231 return rewriteValueAMD64_OpAMD64MULSDmem_0(v) 232 case OpAMD64MULSS: 233 return rewriteValueAMD64_OpAMD64MULSS_0(v) 234 case OpAMD64MULSSmem: 235 return rewriteValueAMD64_OpAMD64MULSSmem_0(v) 236 case OpAMD64NEGL: 237 return rewriteValueAMD64_OpAMD64NEGL_0(v) 238 case OpAMD64NEGQ: 239 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 240 case OpAMD64NOTL: 241 return rewriteValueAMD64_OpAMD64NOTL_0(v) 242 case OpAMD64NOTQ: 243 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 244 case OpAMD64ORL: 245 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 246 case OpAMD64ORLconst: 247 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 248 case OpAMD64ORLmem: 249 return rewriteValueAMD64_OpAMD64ORLmem_0(v) 250 case OpAMD64ORQ: 251 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 252 case OpAMD64ORQconst: 253 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 254 case OpAMD64ORQmem: 255 return rewriteValueAMD64_OpAMD64ORQmem_0(v) 256 case OpAMD64ROLB: 257 return rewriteValueAMD64_OpAMD64ROLB_0(v) 258 case OpAMD64ROLBconst: 259 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 260 case OpAMD64ROLL: 261 return rewriteValueAMD64_OpAMD64ROLL_0(v) 262 case OpAMD64ROLLconst: 263 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 264 case OpAMD64ROLQ: 265 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 266 case OpAMD64ROLQconst: 267 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 268 case OpAMD64ROLW: 269 return rewriteValueAMD64_OpAMD64ROLW_0(v) 270 case OpAMD64ROLWconst: 271 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 272 case OpAMD64RORB: 273 return rewriteValueAMD64_OpAMD64RORB_0(v) 274 case OpAMD64RORL: 275 return rewriteValueAMD64_OpAMD64RORL_0(v) 276 case OpAMD64RORQ: 277 return rewriteValueAMD64_OpAMD64RORQ_0(v) 278 case OpAMD64RORW: 279 return rewriteValueAMD64_OpAMD64RORW_0(v) 280 case OpAMD64SARB: 281 return rewriteValueAMD64_OpAMD64SARB_0(v) 282 case OpAMD64SARBconst: 283 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 284 case OpAMD64SARL: 285 return rewriteValueAMD64_OpAMD64SARL_0(v) 286 case OpAMD64SARLconst: 287 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 288 case OpAMD64SARQ: 289 return rewriteValueAMD64_OpAMD64SARQ_0(v) 290 case OpAMD64SARQconst: 291 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 292 case OpAMD64SARW: 293 return rewriteValueAMD64_OpAMD64SARW_0(v) 294 case OpAMD64SARWconst: 295 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 296 case OpAMD64SBBLcarrymask: 297 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 298 case OpAMD64SBBQcarrymask: 299 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 300 case OpAMD64SETA: 301 return rewriteValueAMD64_OpAMD64SETA_0(v) 302 case OpAMD64SETAE: 303 return rewriteValueAMD64_OpAMD64SETAE_0(v) 304 case OpAMD64SETB: 305 return rewriteValueAMD64_OpAMD64SETB_0(v) 306 case OpAMD64SETBE: 307 return rewriteValueAMD64_OpAMD64SETBE_0(v) 308 case OpAMD64SETEQ: 309 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) 310 case OpAMD64SETG: 311 return rewriteValueAMD64_OpAMD64SETG_0(v) 312 case OpAMD64SETGE: 313 return rewriteValueAMD64_OpAMD64SETGE_0(v) 314 case OpAMD64SETL: 315 return rewriteValueAMD64_OpAMD64SETL_0(v) 316 case OpAMD64SETLE: 317 return rewriteValueAMD64_OpAMD64SETLE_0(v) 318 case OpAMD64SETNE: 319 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) 320 case OpAMD64SHLL: 321 return rewriteValueAMD64_OpAMD64SHLL_0(v) 322 case OpAMD64SHLLconst: 323 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 324 case OpAMD64SHLQ: 325 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 326 case OpAMD64SHLQconst: 327 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 328 case OpAMD64SHRB: 329 return rewriteValueAMD64_OpAMD64SHRB_0(v) 330 case OpAMD64SHRBconst: 331 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 332 case OpAMD64SHRL: 333 return rewriteValueAMD64_OpAMD64SHRL_0(v) 334 case OpAMD64SHRLconst: 335 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 336 case OpAMD64SHRQ: 337 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 338 case OpAMD64SHRQconst: 339 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 340 case OpAMD64SHRW: 341 return rewriteValueAMD64_OpAMD64SHRW_0(v) 342 case OpAMD64SHRWconst: 343 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 344 case OpAMD64SUBL: 345 return rewriteValueAMD64_OpAMD64SUBL_0(v) 346 case OpAMD64SUBLconst: 347 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 348 case OpAMD64SUBLmem: 349 return rewriteValueAMD64_OpAMD64SUBLmem_0(v) 350 case OpAMD64SUBQ: 351 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 352 case OpAMD64SUBQconst: 353 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 354 case OpAMD64SUBQmem: 355 return rewriteValueAMD64_OpAMD64SUBQmem_0(v) 356 case OpAMD64SUBSD: 357 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 358 case OpAMD64SUBSDmem: 359 return rewriteValueAMD64_OpAMD64SUBSDmem_0(v) 360 case OpAMD64SUBSS: 361 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 362 case OpAMD64SUBSSmem: 363 return rewriteValueAMD64_OpAMD64SUBSSmem_0(v) 364 case OpAMD64TESTB: 365 return rewriteValueAMD64_OpAMD64TESTB_0(v) 366 case OpAMD64TESTL: 367 return rewriteValueAMD64_OpAMD64TESTL_0(v) 368 case OpAMD64TESTQ: 369 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 370 case OpAMD64TESTW: 371 return rewriteValueAMD64_OpAMD64TESTW_0(v) 372 case OpAMD64XADDLlock: 373 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 374 case OpAMD64XADDQlock: 375 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 376 case OpAMD64XCHGL: 377 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 378 case OpAMD64XCHGQ: 379 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 380 case OpAMD64XORL: 381 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 382 case OpAMD64XORLconst: 383 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) 384 case OpAMD64XORLmem: 385 return rewriteValueAMD64_OpAMD64XORLmem_0(v) 386 case OpAMD64XORQ: 387 return rewriteValueAMD64_OpAMD64XORQ_0(v) 388 case OpAMD64XORQconst: 389 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 390 case OpAMD64XORQmem: 391 return rewriteValueAMD64_OpAMD64XORQmem_0(v) 392 case OpAdd16: 393 return rewriteValueAMD64_OpAdd16_0(v) 394 case OpAdd32: 395 return rewriteValueAMD64_OpAdd32_0(v) 396 case OpAdd32F: 397 return rewriteValueAMD64_OpAdd32F_0(v) 398 case OpAdd64: 399 return rewriteValueAMD64_OpAdd64_0(v) 400 case OpAdd64F: 401 return rewriteValueAMD64_OpAdd64F_0(v) 402 case OpAdd8: 403 return rewriteValueAMD64_OpAdd8_0(v) 404 case OpAddPtr: 405 return rewriteValueAMD64_OpAddPtr_0(v) 406 case OpAddr: 407 return rewriteValueAMD64_OpAddr_0(v) 408 case OpAnd16: 409 return rewriteValueAMD64_OpAnd16_0(v) 410 case OpAnd32: 411 return rewriteValueAMD64_OpAnd32_0(v) 412 case OpAnd64: 413 return rewriteValueAMD64_OpAnd64_0(v) 414 case OpAnd8: 415 return rewriteValueAMD64_OpAnd8_0(v) 416 case OpAndB: 417 return rewriteValueAMD64_OpAndB_0(v) 418 case OpAtomicAdd32: 419 return rewriteValueAMD64_OpAtomicAdd32_0(v) 420 case OpAtomicAdd64: 421 return rewriteValueAMD64_OpAtomicAdd64_0(v) 422 case OpAtomicAnd8: 423 return rewriteValueAMD64_OpAtomicAnd8_0(v) 424 case OpAtomicCompareAndSwap32: 425 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 426 case OpAtomicCompareAndSwap64: 427 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 428 case OpAtomicExchange32: 429 return rewriteValueAMD64_OpAtomicExchange32_0(v) 430 case OpAtomicExchange64: 431 return rewriteValueAMD64_OpAtomicExchange64_0(v) 432 case OpAtomicLoad32: 433 return rewriteValueAMD64_OpAtomicLoad32_0(v) 434 case OpAtomicLoad64: 435 return rewriteValueAMD64_OpAtomicLoad64_0(v) 436 case OpAtomicLoadPtr: 437 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 438 case OpAtomicOr8: 439 return rewriteValueAMD64_OpAtomicOr8_0(v) 440 case OpAtomicStore32: 441 return rewriteValueAMD64_OpAtomicStore32_0(v) 442 case OpAtomicStore64: 443 return rewriteValueAMD64_OpAtomicStore64_0(v) 444 case OpAtomicStorePtrNoWB: 445 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 446 case OpAvg64u: 447 return rewriteValueAMD64_OpAvg64u_0(v) 448 case OpBitLen32: 449 return rewriteValueAMD64_OpBitLen32_0(v) 450 case OpBitLen64: 451 return rewriteValueAMD64_OpBitLen64_0(v) 452 case OpBswap32: 453 return rewriteValueAMD64_OpBswap32_0(v) 454 case OpBswap64: 455 return rewriteValueAMD64_OpBswap64_0(v) 456 case OpClosureCall: 457 return rewriteValueAMD64_OpClosureCall_0(v) 458 case OpCom16: 459 return rewriteValueAMD64_OpCom16_0(v) 460 case OpCom32: 461 return rewriteValueAMD64_OpCom32_0(v) 462 case OpCom64: 463 return rewriteValueAMD64_OpCom64_0(v) 464 case OpCom8: 465 return rewriteValueAMD64_OpCom8_0(v) 466 case OpConst16: 467 return rewriteValueAMD64_OpConst16_0(v) 468 case OpConst32: 469 return rewriteValueAMD64_OpConst32_0(v) 470 case OpConst32F: 471 return rewriteValueAMD64_OpConst32F_0(v) 472 case OpConst64: 473 return rewriteValueAMD64_OpConst64_0(v) 474 case OpConst64F: 475 return rewriteValueAMD64_OpConst64F_0(v) 476 case OpConst8: 477 return rewriteValueAMD64_OpConst8_0(v) 478 case OpConstBool: 479 return rewriteValueAMD64_OpConstBool_0(v) 480 case OpConstNil: 481 return rewriteValueAMD64_OpConstNil_0(v) 482 case OpConvert: 483 return rewriteValueAMD64_OpConvert_0(v) 484 case OpCtz32: 485 return rewriteValueAMD64_OpCtz32_0(v) 486 case OpCtz64: 487 return rewriteValueAMD64_OpCtz64_0(v) 488 case OpCvt32Fto32: 489 return rewriteValueAMD64_OpCvt32Fto32_0(v) 490 case OpCvt32Fto64: 491 return rewriteValueAMD64_OpCvt32Fto64_0(v) 492 case OpCvt32Fto64F: 493 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 494 case OpCvt32to32F: 495 return rewriteValueAMD64_OpCvt32to32F_0(v) 496 case OpCvt32to64F: 497 return rewriteValueAMD64_OpCvt32to64F_0(v) 498 case OpCvt64Fto32: 499 return rewriteValueAMD64_OpCvt64Fto32_0(v) 500 case OpCvt64Fto32F: 501 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 502 case OpCvt64Fto64: 503 return rewriteValueAMD64_OpCvt64Fto64_0(v) 504 case OpCvt64to32F: 505 return rewriteValueAMD64_OpCvt64to32F_0(v) 506 case OpCvt64to64F: 507 return rewriteValueAMD64_OpCvt64to64F_0(v) 508 case OpDiv128u: 509 return rewriteValueAMD64_OpDiv128u_0(v) 510 case OpDiv16: 511 return rewriteValueAMD64_OpDiv16_0(v) 512 case OpDiv16u: 513 return rewriteValueAMD64_OpDiv16u_0(v) 514 case OpDiv32: 515 return rewriteValueAMD64_OpDiv32_0(v) 516 case OpDiv32F: 517 return rewriteValueAMD64_OpDiv32F_0(v) 518 case OpDiv32u: 519 return rewriteValueAMD64_OpDiv32u_0(v) 520 case OpDiv64: 521 return rewriteValueAMD64_OpDiv64_0(v) 522 case OpDiv64F: 523 return rewriteValueAMD64_OpDiv64F_0(v) 524 case OpDiv64u: 525 return rewriteValueAMD64_OpDiv64u_0(v) 526 case OpDiv8: 527 return rewriteValueAMD64_OpDiv8_0(v) 528 case OpDiv8u: 529 return rewriteValueAMD64_OpDiv8u_0(v) 530 case OpEq16: 531 return rewriteValueAMD64_OpEq16_0(v) 532 case OpEq32: 533 return rewriteValueAMD64_OpEq32_0(v) 534 case OpEq32F: 535 return rewriteValueAMD64_OpEq32F_0(v) 536 case OpEq64: 537 return rewriteValueAMD64_OpEq64_0(v) 538 case OpEq64F: 539 return rewriteValueAMD64_OpEq64F_0(v) 540 case OpEq8: 541 return rewriteValueAMD64_OpEq8_0(v) 542 case OpEqB: 543 return rewriteValueAMD64_OpEqB_0(v) 544 case OpEqPtr: 545 return rewriteValueAMD64_OpEqPtr_0(v) 546 case OpGeq16: 547 return rewriteValueAMD64_OpGeq16_0(v) 548 case OpGeq16U: 549 return rewriteValueAMD64_OpGeq16U_0(v) 550 case OpGeq32: 551 return rewriteValueAMD64_OpGeq32_0(v) 552 case OpGeq32F: 553 return rewriteValueAMD64_OpGeq32F_0(v) 554 case OpGeq32U: 555 return rewriteValueAMD64_OpGeq32U_0(v) 556 case OpGeq64: 557 return rewriteValueAMD64_OpGeq64_0(v) 558 case OpGeq64F: 559 return rewriteValueAMD64_OpGeq64F_0(v) 560 case OpGeq64U: 561 return rewriteValueAMD64_OpGeq64U_0(v) 562 case OpGeq8: 563 return rewriteValueAMD64_OpGeq8_0(v) 564 case OpGeq8U: 565 return rewriteValueAMD64_OpGeq8U_0(v) 566 case OpGetClosurePtr: 567 return rewriteValueAMD64_OpGetClosurePtr_0(v) 568 case OpGetG: 569 return rewriteValueAMD64_OpGetG_0(v) 570 case OpGreater16: 571 return rewriteValueAMD64_OpGreater16_0(v) 572 case OpGreater16U: 573 return rewriteValueAMD64_OpGreater16U_0(v) 574 case OpGreater32: 575 return rewriteValueAMD64_OpGreater32_0(v) 576 case OpGreater32F: 577 return rewriteValueAMD64_OpGreater32F_0(v) 578 case OpGreater32U: 579 return rewriteValueAMD64_OpGreater32U_0(v) 580 case OpGreater64: 581 return rewriteValueAMD64_OpGreater64_0(v) 582 case OpGreater64F: 583 return rewriteValueAMD64_OpGreater64F_0(v) 584 case OpGreater64U: 585 return rewriteValueAMD64_OpGreater64U_0(v) 586 case OpGreater8: 587 return rewriteValueAMD64_OpGreater8_0(v) 588 case OpGreater8U: 589 return rewriteValueAMD64_OpGreater8U_0(v) 590 case OpHmul32: 591 return rewriteValueAMD64_OpHmul32_0(v) 592 case OpHmul32u: 593 return rewriteValueAMD64_OpHmul32u_0(v) 594 case OpHmul64: 595 return rewriteValueAMD64_OpHmul64_0(v) 596 case OpHmul64u: 597 return rewriteValueAMD64_OpHmul64u_0(v) 598 case OpInt64Hi: 599 return rewriteValueAMD64_OpInt64Hi_0(v) 600 case OpInterCall: 601 return rewriteValueAMD64_OpInterCall_0(v) 602 case OpIsInBounds: 603 return rewriteValueAMD64_OpIsInBounds_0(v) 604 case OpIsNonNil: 605 return rewriteValueAMD64_OpIsNonNil_0(v) 606 case OpIsSliceInBounds: 607 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 608 case OpLeq16: 609 return rewriteValueAMD64_OpLeq16_0(v) 610 case OpLeq16U: 611 return rewriteValueAMD64_OpLeq16U_0(v) 612 case OpLeq32: 613 return rewriteValueAMD64_OpLeq32_0(v) 614 case OpLeq32F: 615 return rewriteValueAMD64_OpLeq32F_0(v) 616 case OpLeq32U: 617 return rewriteValueAMD64_OpLeq32U_0(v) 618 case OpLeq64: 619 return rewriteValueAMD64_OpLeq64_0(v) 620 case OpLeq64F: 621 return rewriteValueAMD64_OpLeq64F_0(v) 622 case OpLeq64U: 623 return rewriteValueAMD64_OpLeq64U_0(v) 624 case OpLeq8: 625 return rewriteValueAMD64_OpLeq8_0(v) 626 case OpLeq8U: 627 return rewriteValueAMD64_OpLeq8U_0(v) 628 case OpLess16: 629 return rewriteValueAMD64_OpLess16_0(v) 630 case OpLess16U: 631 return rewriteValueAMD64_OpLess16U_0(v) 632 case OpLess32: 633 return rewriteValueAMD64_OpLess32_0(v) 634 case OpLess32F: 635 return rewriteValueAMD64_OpLess32F_0(v) 636 case OpLess32U: 637 return rewriteValueAMD64_OpLess32U_0(v) 638 case OpLess64: 639 return rewriteValueAMD64_OpLess64_0(v) 640 case OpLess64F: 641 return rewriteValueAMD64_OpLess64F_0(v) 642 case OpLess64U: 643 return rewriteValueAMD64_OpLess64U_0(v) 644 case OpLess8: 645 return rewriteValueAMD64_OpLess8_0(v) 646 case OpLess8U: 647 return rewriteValueAMD64_OpLess8U_0(v) 648 case OpLoad: 649 return rewriteValueAMD64_OpLoad_0(v) 650 case OpLsh16x16: 651 return rewriteValueAMD64_OpLsh16x16_0(v) 652 case OpLsh16x32: 653 return rewriteValueAMD64_OpLsh16x32_0(v) 654 case OpLsh16x64: 655 return rewriteValueAMD64_OpLsh16x64_0(v) 656 case OpLsh16x8: 657 return rewriteValueAMD64_OpLsh16x8_0(v) 658 case OpLsh32x16: 659 return rewriteValueAMD64_OpLsh32x16_0(v) 660 case OpLsh32x32: 661 return rewriteValueAMD64_OpLsh32x32_0(v) 662 case OpLsh32x64: 663 return rewriteValueAMD64_OpLsh32x64_0(v) 664 case OpLsh32x8: 665 return rewriteValueAMD64_OpLsh32x8_0(v) 666 case OpLsh64x16: 667 return rewriteValueAMD64_OpLsh64x16_0(v) 668 case OpLsh64x32: 669 return rewriteValueAMD64_OpLsh64x32_0(v) 670 case OpLsh64x64: 671 return rewriteValueAMD64_OpLsh64x64_0(v) 672 case OpLsh64x8: 673 return rewriteValueAMD64_OpLsh64x8_0(v) 674 case OpLsh8x16: 675 return rewriteValueAMD64_OpLsh8x16_0(v) 676 case OpLsh8x32: 677 return rewriteValueAMD64_OpLsh8x32_0(v) 678 case OpLsh8x64: 679 return rewriteValueAMD64_OpLsh8x64_0(v) 680 case OpLsh8x8: 681 return rewriteValueAMD64_OpLsh8x8_0(v) 682 case OpMod16: 683 return rewriteValueAMD64_OpMod16_0(v) 684 case OpMod16u: 685 return rewriteValueAMD64_OpMod16u_0(v) 686 case OpMod32: 687 return rewriteValueAMD64_OpMod32_0(v) 688 case OpMod32u: 689 return rewriteValueAMD64_OpMod32u_0(v) 690 case OpMod64: 691 return rewriteValueAMD64_OpMod64_0(v) 692 case OpMod64u: 693 return rewriteValueAMD64_OpMod64u_0(v) 694 case OpMod8: 695 return rewriteValueAMD64_OpMod8_0(v) 696 case OpMod8u: 697 return rewriteValueAMD64_OpMod8u_0(v) 698 case OpMove: 699 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) 700 case OpMul16: 701 return rewriteValueAMD64_OpMul16_0(v) 702 case OpMul32: 703 return rewriteValueAMD64_OpMul32_0(v) 704 case OpMul32F: 705 return rewriteValueAMD64_OpMul32F_0(v) 706 case OpMul64: 707 return rewriteValueAMD64_OpMul64_0(v) 708 case OpMul64F: 709 return rewriteValueAMD64_OpMul64F_0(v) 710 case OpMul64uhilo: 711 return rewriteValueAMD64_OpMul64uhilo_0(v) 712 case OpMul8: 713 return rewriteValueAMD64_OpMul8_0(v) 714 case OpNeg16: 715 return rewriteValueAMD64_OpNeg16_0(v) 716 case OpNeg32: 717 return rewriteValueAMD64_OpNeg32_0(v) 718 case OpNeg32F: 719 return rewriteValueAMD64_OpNeg32F_0(v) 720 case OpNeg64: 721 return rewriteValueAMD64_OpNeg64_0(v) 722 case OpNeg64F: 723 return rewriteValueAMD64_OpNeg64F_0(v) 724 case OpNeg8: 725 return rewriteValueAMD64_OpNeg8_0(v) 726 case OpNeq16: 727 return rewriteValueAMD64_OpNeq16_0(v) 728 case OpNeq32: 729 return rewriteValueAMD64_OpNeq32_0(v) 730 case OpNeq32F: 731 return rewriteValueAMD64_OpNeq32F_0(v) 732 case OpNeq64: 733 return rewriteValueAMD64_OpNeq64_0(v) 734 case OpNeq64F: 735 return rewriteValueAMD64_OpNeq64F_0(v) 736 case OpNeq8: 737 return rewriteValueAMD64_OpNeq8_0(v) 738 case OpNeqB: 739 return rewriteValueAMD64_OpNeqB_0(v) 740 case OpNeqPtr: 741 return rewriteValueAMD64_OpNeqPtr_0(v) 742 case OpNilCheck: 743 return rewriteValueAMD64_OpNilCheck_0(v) 744 case OpNot: 745 return rewriteValueAMD64_OpNot_0(v) 746 case OpOffPtr: 747 return rewriteValueAMD64_OpOffPtr_0(v) 748 case OpOr16: 749 return rewriteValueAMD64_OpOr16_0(v) 750 case OpOr32: 751 return rewriteValueAMD64_OpOr32_0(v) 752 case OpOr64: 753 return rewriteValueAMD64_OpOr64_0(v) 754 case OpOr8: 755 return rewriteValueAMD64_OpOr8_0(v) 756 case OpOrB: 757 return rewriteValueAMD64_OpOrB_0(v) 758 case OpPopCount16: 759 return rewriteValueAMD64_OpPopCount16_0(v) 760 case OpPopCount32: 761 return rewriteValueAMD64_OpPopCount32_0(v) 762 case OpPopCount64: 763 return rewriteValueAMD64_OpPopCount64_0(v) 764 case OpPopCount8: 765 return rewriteValueAMD64_OpPopCount8_0(v) 766 case OpRound32F: 767 return rewriteValueAMD64_OpRound32F_0(v) 768 case OpRound64F: 769 return rewriteValueAMD64_OpRound64F_0(v) 770 case OpRsh16Ux16: 771 return rewriteValueAMD64_OpRsh16Ux16_0(v) 772 case OpRsh16Ux32: 773 return rewriteValueAMD64_OpRsh16Ux32_0(v) 774 case OpRsh16Ux64: 775 return rewriteValueAMD64_OpRsh16Ux64_0(v) 776 case OpRsh16Ux8: 777 return rewriteValueAMD64_OpRsh16Ux8_0(v) 778 case OpRsh16x16: 779 return rewriteValueAMD64_OpRsh16x16_0(v) 780 case OpRsh16x32: 781 return rewriteValueAMD64_OpRsh16x32_0(v) 782 case OpRsh16x64: 783 return rewriteValueAMD64_OpRsh16x64_0(v) 784 case OpRsh16x8: 785 return rewriteValueAMD64_OpRsh16x8_0(v) 786 case OpRsh32Ux16: 787 return rewriteValueAMD64_OpRsh32Ux16_0(v) 788 case OpRsh32Ux32: 789 return rewriteValueAMD64_OpRsh32Ux32_0(v) 790 case OpRsh32Ux64: 791 return rewriteValueAMD64_OpRsh32Ux64_0(v) 792 case OpRsh32Ux8: 793 return rewriteValueAMD64_OpRsh32Ux8_0(v) 794 case OpRsh32x16: 795 return rewriteValueAMD64_OpRsh32x16_0(v) 796 case OpRsh32x32: 797 return rewriteValueAMD64_OpRsh32x32_0(v) 798 case OpRsh32x64: 799 return rewriteValueAMD64_OpRsh32x64_0(v) 800 case OpRsh32x8: 801 return rewriteValueAMD64_OpRsh32x8_0(v) 802 case OpRsh64Ux16: 803 return rewriteValueAMD64_OpRsh64Ux16_0(v) 804 case OpRsh64Ux32: 805 return rewriteValueAMD64_OpRsh64Ux32_0(v) 806 case OpRsh64Ux64: 807 return rewriteValueAMD64_OpRsh64Ux64_0(v) 808 case OpRsh64Ux8: 809 return rewriteValueAMD64_OpRsh64Ux8_0(v) 810 case OpRsh64x16: 811 return rewriteValueAMD64_OpRsh64x16_0(v) 812 case OpRsh64x32: 813 return rewriteValueAMD64_OpRsh64x32_0(v) 814 case OpRsh64x64: 815 return rewriteValueAMD64_OpRsh64x64_0(v) 816 case OpRsh64x8: 817 return rewriteValueAMD64_OpRsh64x8_0(v) 818 case OpRsh8Ux16: 819 return rewriteValueAMD64_OpRsh8Ux16_0(v) 820 case OpRsh8Ux32: 821 return rewriteValueAMD64_OpRsh8Ux32_0(v) 822 case OpRsh8Ux64: 823 return rewriteValueAMD64_OpRsh8Ux64_0(v) 824 case OpRsh8Ux8: 825 return rewriteValueAMD64_OpRsh8Ux8_0(v) 826 case OpRsh8x16: 827 return rewriteValueAMD64_OpRsh8x16_0(v) 828 case OpRsh8x32: 829 return rewriteValueAMD64_OpRsh8x32_0(v) 830 case OpRsh8x64: 831 return rewriteValueAMD64_OpRsh8x64_0(v) 832 case OpRsh8x8: 833 return rewriteValueAMD64_OpRsh8x8_0(v) 834 case OpSelect0: 835 return rewriteValueAMD64_OpSelect0_0(v) 836 case OpSelect1: 837 return rewriteValueAMD64_OpSelect1_0(v) 838 case OpSignExt16to32: 839 return rewriteValueAMD64_OpSignExt16to32_0(v) 840 case OpSignExt16to64: 841 return rewriteValueAMD64_OpSignExt16to64_0(v) 842 case OpSignExt32to64: 843 return rewriteValueAMD64_OpSignExt32to64_0(v) 844 case OpSignExt8to16: 845 return rewriteValueAMD64_OpSignExt8to16_0(v) 846 case OpSignExt8to32: 847 return rewriteValueAMD64_OpSignExt8to32_0(v) 848 case OpSignExt8to64: 849 return rewriteValueAMD64_OpSignExt8to64_0(v) 850 case OpSlicemask: 851 return rewriteValueAMD64_OpSlicemask_0(v) 852 case OpSqrt: 853 return rewriteValueAMD64_OpSqrt_0(v) 854 case OpStaticCall: 855 return rewriteValueAMD64_OpStaticCall_0(v) 856 case OpStore: 857 return rewriteValueAMD64_OpStore_0(v) 858 case OpSub16: 859 return rewriteValueAMD64_OpSub16_0(v) 860 case OpSub32: 861 return rewriteValueAMD64_OpSub32_0(v) 862 case OpSub32F: 863 return rewriteValueAMD64_OpSub32F_0(v) 864 case OpSub64: 865 return rewriteValueAMD64_OpSub64_0(v) 866 case OpSub64F: 867 return rewriteValueAMD64_OpSub64F_0(v) 868 case OpSub8: 869 return rewriteValueAMD64_OpSub8_0(v) 870 case OpSubPtr: 871 return rewriteValueAMD64_OpSubPtr_0(v) 872 case OpTrunc16to8: 873 return rewriteValueAMD64_OpTrunc16to8_0(v) 874 case OpTrunc32to16: 875 return rewriteValueAMD64_OpTrunc32to16_0(v) 876 case OpTrunc32to8: 877 return rewriteValueAMD64_OpTrunc32to8_0(v) 878 case OpTrunc64to16: 879 return rewriteValueAMD64_OpTrunc64to16_0(v) 880 case OpTrunc64to32: 881 return rewriteValueAMD64_OpTrunc64to32_0(v) 882 case OpTrunc64to8: 883 return rewriteValueAMD64_OpTrunc64to8_0(v) 884 case OpXor16: 885 return rewriteValueAMD64_OpXor16_0(v) 886 case OpXor32: 887 return rewriteValueAMD64_OpXor32_0(v) 888 case OpXor64: 889 return rewriteValueAMD64_OpXor64_0(v) 890 case OpXor8: 891 return rewriteValueAMD64_OpXor8_0(v) 892 case OpZero: 893 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v) 894 case OpZeroExt16to32: 895 return rewriteValueAMD64_OpZeroExt16to32_0(v) 896 case OpZeroExt16to64: 897 return rewriteValueAMD64_OpZeroExt16to64_0(v) 898 case OpZeroExt32to64: 899 return rewriteValueAMD64_OpZeroExt32to64_0(v) 900 case OpZeroExt8to16: 901 return rewriteValueAMD64_OpZeroExt8to16_0(v) 902 case OpZeroExt8to32: 903 return rewriteValueAMD64_OpZeroExt8to32_0(v) 904 case OpZeroExt8to64: 905 return rewriteValueAMD64_OpZeroExt8to64_0(v) 906 } 907 return false 908 } 909 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 910 // match: (ADDL x (MOVLconst [c])) 911 // cond: 912 // result: (ADDLconst [c] x) 913 for { 914 _ = v.Args[1] 915 x := v.Args[0] 916 v_1 := v.Args[1] 917 if v_1.Op != OpAMD64MOVLconst { 918 break 919 } 920 c := v_1.AuxInt 921 v.reset(OpAMD64ADDLconst) 922 v.AuxInt = c 923 v.AddArg(x) 924 return true 925 } 926 // match: (ADDL (MOVLconst [c]) x) 927 // cond: 928 // result: (ADDLconst [c] x) 929 for { 930 _ = v.Args[1] 931 v_0 := v.Args[0] 932 if v_0.Op != OpAMD64MOVLconst { 933 break 934 } 935 c := v_0.AuxInt 936 x := v.Args[1] 937 v.reset(OpAMD64ADDLconst) 938 v.AuxInt = c 939 v.AddArg(x) 940 return true 941 } 942 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 943 // cond: d==32-c 944 // result: (ROLLconst x [c]) 945 for { 946 _ = v.Args[1] 947 v_0 := v.Args[0] 948 if v_0.Op != OpAMD64SHLLconst { 949 break 950 } 951 c := v_0.AuxInt 952 x := v_0.Args[0] 953 v_1 := v.Args[1] 954 if v_1.Op != OpAMD64SHRLconst { 955 break 956 } 957 d := v_1.AuxInt 958 if x != v_1.Args[0] { 959 break 960 } 961 if !(d == 32-c) { 962 break 963 } 964 v.reset(OpAMD64ROLLconst) 965 v.AuxInt = c 966 v.AddArg(x) 967 return true 968 } 969 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 970 // cond: d==32-c 971 // result: (ROLLconst x [c]) 972 for { 973 _ = v.Args[1] 974 v_0 := v.Args[0] 975 if v_0.Op != OpAMD64SHRLconst { 976 break 977 } 978 d := v_0.AuxInt 979 x := v_0.Args[0] 980 v_1 := v.Args[1] 981 if v_1.Op != OpAMD64SHLLconst { 982 break 983 } 984 c := v_1.AuxInt 985 if x != v_1.Args[0] { 986 break 987 } 988 if !(d == 32-c) { 989 break 990 } 991 v.reset(OpAMD64ROLLconst) 992 v.AuxInt = c 993 v.AddArg(x) 994 return true 995 } 996 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 997 // cond: d==16-c && c < 16 && t.Size() == 2 998 // result: (ROLWconst x [c]) 999 for { 1000 t := v.Type 1001 _ = v.Args[1] 1002 v_0 := v.Args[0] 1003 if v_0.Op != OpAMD64SHLLconst { 1004 break 1005 } 1006 c := v_0.AuxInt 1007 x := v_0.Args[0] 1008 v_1 := v.Args[1] 1009 if v_1.Op != OpAMD64SHRWconst { 1010 break 1011 } 1012 d := v_1.AuxInt 1013 if x != v_1.Args[0] { 1014 break 1015 } 1016 if !(d == 16-c && c < 16 && t.Size() == 2) { 1017 break 1018 } 1019 v.reset(OpAMD64ROLWconst) 1020 v.AuxInt = c 1021 v.AddArg(x) 1022 return true 1023 } 1024 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 1025 // cond: d==16-c && c < 16 && t.Size() == 2 1026 // result: (ROLWconst x [c]) 1027 for { 1028 t := v.Type 1029 _ = v.Args[1] 1030 v_0 := v.Args[0] 1031 if v_0.Op != OpAMD64SHRWconst { 1032 break 1033 } 1034 d := v_0.AuxInt 1035 x := v_0.Args[0] 1036 v_1 := v.Args[1] 1037 if v_1.Op != OpAMD64SHLLconst { 1038 break 1039 } 1040 c := v_1.AuxInt 1041 if x != v_1.Args[0] { 1042 break 1043 } 1044 if !(d == 16-c && c < 16 && t.Size() == 2) { 1045 break 1046 } 1047 v.reset(OpAMD64ROLWconst) 1048 v.AuxInt = c 1049 v.AddArg(x) 1050 return true 1051 } 1052 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 1053 // cond: d==8-c && c < 8 && t.Size() == 1 1054 // result: (ROLBconst x [c]) 1055 for { 1056 t := v.Type 1057 _ = v.Args[1] 1058 v_0 := v.Args[0] 1059 if v_0.Op != OpAMD64SHLLconst { 1060 break 1061 } 1062 c := v_0.AuxInt 1063 x := v_0.Args[0] 1064 v_1 := v.Args[1] 1065 if v_1.Op != OpAMD64SHRBconst { 1066 break 1067 } 1068 d := v_1.AuxInt 1069 if x != v_1.Args[0] { 1070 break 1071 } 1072 if !(d == 8-c && c < 8 && t.Size() == 1) { 1073 break 1074 } 1075 v.reset(OpAMD64ROLBconst) 1076 v.AuxInt = c 1077 v.AddArg(x) 1078 return true 1079 } 1080 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1081 // cond: d==8-c && c < 8 && t.Size() == 1 1082 // result: (ROLBconst x [c]) 1083 for { 1084 t := v.Type 1085 _ = v.Args[1] 1086 v_0 := v.Args[0] 1087 if v_0.Op != OpAMD64SHRBconst { 1088 break 1089 } 1090 d := v_0.AuxInt 1091 x := v_0.Args[0] 1092 v_1 := v.Args[1] 1093 if v_1.Op != OpAMD64SHLLconst { 1094 break 1095 } 1096 c := v_1.AuxInt 1097 if x != v_1.Args[0] { 1098 break 1099 } 1100 if !(d == 8-c && c < 8 && t.Size() == 1) { 1101 break 1102 } 1103 v.reset(OpAMD64ROLBconst) 1104 v.AuxInt = c 1105 v.AddArg(x) 1106 return true 1107 } 1108 // match: (ADDL x (NEGL y)) 1109 // cond: 1110 // result: (SUBL x y) 1111 for { 1112 _ = v.Args[1] 1113 x := v.Args[0] 1114 v_1 := v.Args[1] 1115 if v_1.Op != OpAMD64NEGL { 1116 break 1117 } 1118 y := v_1.Args[0] 1119 v.reset(OpAMD64SUBL) 1120 v.AddArg(x) 1121 v.AddArg(y) 1122 return true 1123 } 1124 // match: (ADDL (NEGL y) x) 1125 // cond: 1126 // result: (SUBL x y) 1127 for { 1128 _ = v.Args[1] 1129 v_0 := v.Args[0] 1130 if v_0.Op != OpAMD64NEGL { 1131 break 1132 } 1133 y := v_0.Args[0] 1134 x := v.Args[1] 1135 v.reset(OpAMD64SUBL) 1136 v.AddArg(x) 1137 v.AddArg(y) 1138 return true 1139 } 1140 return false 1141 } 1142 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1143 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1144 // cond: canMergeLoad(v, l, x) && clobber(l) 1145 // result: (ADDLmem x [off] {sym} ptr mem) 1146 for { 1147 _ = v.Args[1] 1148 x := v.Args[0] 1149 l := v.Args[1] 1150 if l.Op != OpAMD64MOVLload { 1151 break 1152 } 1153 off := l.AuxInt 1154 sym := l.Aux 1155 _ = l.Args[1] 1156 ptr := l.Args[0] 1157 mem := l.Args[1] 1158 if !(canMergeLoad(v, l, x) && clobber(l)) { 1159 break 1160 } 1161 v.reset(OpAMD64ADDLmem) 1162 v.AuxInt = off 1163 v.Aux = sym 1164 v.AddArg(x) 1165 v.AddArg(ptr) 1166 v.AddArg(mem) 1167 return true 1168 } 1169 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1170 // cond: canMergeLoad(v, l, x) && clobber(l) 1171 // result: (ADDLmem x [off] {sym} ptr mem) 1172 for { 1173 _ = v.Args[1] 1174 l := v.Args[0] 1175 if l.Op != OpAMD64MOVLload { 1176 break 1177 } 1178 off := l.AuxInt 1179 sym := l.Aux 1180 _ = l.Args[1] 1181 ptr := l.Args[0] 1182 mem := l.Args[1] 1183 x := v.Args[1] 1184 if !(canMergeLoad(v, l, x) && clobber(l)) { 1185 break 1186 } 1187 v.reset(OpAMD64ADDLmem) 1188 v.AuxInt = off 1189 v.Aux = sym 1190 v.AddArg(x) 1191 v.AddArg(ptr) 1192 v.AddArg(mem) 1193 return true 1194 } 1195 return false 1196 } 1197 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1198 // match: (ADDLconst [c] x) 1199 // cond: int32(c)==0 1200 // result: x 1201 for { 1202 c := v.AuxInt 1203 x := v.Args[0] 1204 if !(int32(c) == 0) { 1205 break 1206 } 1207 v.reset(OpCopy) 1208 v.Type = x.Type 1209 v.AddArg(x) 1210 return true 1211 } 1212 // match: (ADDLconst [c] (MOVLconst [d])) 1213 // cond: 1214 // result: (MOVLconst [int64(int32(c+d))]) 1215 for { 1216 c := v.AuxInt 1217 v_0 := v.Args[0] 1218 if v_0.Op != OpAMD64MOVLconst { 1219 break 1220 } 1221 d := v_0.AuxInt 1222 v.reset(OpAMD64MOVLconst) 1223 v.AuxInt = int64(int32(c + d)) 1224 return true 1225 } 1226 // match: (ADDLconst [c] (ADDLconst [d] x)) 1227 // cond: 1228 // result: (ADDLconst [int64(int32(c+d))] x) 1229 for { 1230 c := v.AuxInt 1231 v_0 := v.Args[0] 1232 if v_0.Op != OpAMD64ADDLconst { 1233 break 1234 } 1235 d := v_0.AuxInt 1236 x := v_0.Args[0] 1237 v.reset(OpAMD64ADDLconst) 1238 v.AuxInt = int64(int32(c + d)) 1239 v.AddArg(x) 1240 return true 1241 } 1242 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1243 // cond: is32Bit(c+d) 1244 // result: (LEAL [c+d] {s} x) 1245 for { 1246 c := v.AuxInt 1247 v_0 := v.Args[0] 1248 if v_0.Op != OpAMD64LEAL { 1249 break 1250 } 1251 d := v_0.AuxInt 1252 s := v_0.Aux 1253 x := v_0.Args[0] 1254 if !(is32Bit(c + d)) { 1255 break 1256 } 1257 v.reset(OpAMD64LEAL) 1258 v.AuxInt = c + d 1259 v.Aux = s 1260 v.AddArg(x) 1261 return true 1262 } 1263 return false 1264 } 1265 func rewriteValueAMD64_OpAMD64ADDLconstmem_0(v *Value) bool { 1266 b := v.Block 1267 _ = b 1268 typ := &b.Func.Config.Types 1269 _ = typ 1270 // match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) 1271 // cond: 1272 // result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x)) 1273 for { 1274 valOff := v.AuxInt 1275 sym := v.Aux 1276 _ = v.Args[1] 1277 ptr := v.Args[0] 1278 v_1 := v.Args[1] 1279 if v_1.Op != OpAMD64MOVSSstore { 1280 break 1281 } 1282 if v_1.AuxInt != ValAndOff(valOff).Off() { 1283 break 1284 } 1285 if v_1.Aux != sym { 1286 break 1287 } 1288 _ = v_1.Args[2] 1289 if ptr != v_1.Args[0] { 1290 break 1291 } 1292 x := v_1.Args[1] 1293 v.reset(OpAMD64ADDLconst) 1294 v.AuxInt = ValAndOff(valOff).Val() 1295 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 1296 v0.AddArg(x) 1297 v.AddArg(v0) 1298 return true 1299 } 1300 return false 1301 } 1302 func rewriteValueAMD64_OpAMD64ADDLmem_0(v *Value) bool { 1303 b := v.Block 1304 _ = b 1305 typ := &b.Func.Config.Types 1306 _ = typ 1307 // match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 1308 // cond: 1309 // result: (ADDL x (MOVLf2i y)) 1310 for { 1311 off := v.AuxInt 1312 sym := v.Aux 1313 _ = v.Args[2] 1314 x := v.Args[0] 1315 ptr := v.Args[1] 1316 v_2 := v.Args[2] 1317 if v_2.Op != OpAMD64MOVSSstore { 1318 break 1319 } 1320 if v_2.AuxInt != off { 1321 break 1322 } 1323 if v_2.Aux != sym { 1324 break 1325 } 1326 _ = v_2.Args[2] 1327 if ptr != v_2.Args[0] { 1328 break 1329 } 1330 y := v_2.Args[1] 1331 v.reset(OpAMD64ADDL) 1332 v.AddArg(x) 1333 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 1334 v0.AddArg(y) 1335 v.AddArg(v0) 1336 return true 1337 } 1338 return false 1339 } 1340 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 1341 // match: (ADDQ x (MOVQconst [c])) 1342 // cond: is32Bit(c) 1343 // result: (ADDQconst [c] x) 1344 for { 1345 _ = v.Args[1] 1346 x := v.Args[0] 1347 v_1 := v.Args[1] 1348 if v_1.Op != OpAMD64MOVQconst { 1349 break 1350 } 1351 c := v_1.AuxInt 1352 if !(is32Bit(c)) { 1353 break 1354 } 1355 v.reset(OpAMD64ADDQconst) 1356 v.AuxInt = c 1357 v.AddArg(x) 1358 return true 1359 } 1360 // match: (ADDQ (MOVQconst [c]) x) 1361 // cond: is32Bit(c) 1362 // result: (ADDQconst [c] x) 1363 for { 1364 _ = v.Args[1] 1365 v_0 := v.Args[0] 1366 if v_0.Op != OpAMD64MOVQconst { 1367 break 1368 } 1369 c := v_0.AuxInt 1370 x := v.Args[1] 1371 if !(is32Bit(c)) { 1372 break 1373 } 1374 v.reset(OpAMD64ADDQconst) 1375 v.AuxInt = c 1376 v.AddArg(x) 1377 return true 1378 } 1379 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 1380 // cond: d==64-c 1381 // result: (ROLQconst x [c]) 1382 for { 1383 _ = v.Args[1] 1384 v_0 := v.Args[0] 1385 if v_0.Op != OpAMD64SHLQconst { 1386 break 1387 } 1388 c := v_0.AuxInt 1389 x := v_0.Args[0] 1390 v_1 := v.Args[1] 1391 if v_1.Op != OpAMD64SHRQconst { 1392 break 1393 } 1394 d := v_1.AuxInt 1395 if x != v_1.Args[0] { 1396 break 1397 } 1398 if !(d == 64-c) { 1399 break 1400 } 1401 v.reset(OpAMD64ROLQconst) 1402 v.AuxInt = c 1403 v.AddArg(x) 1404 return true 1405 } 1406 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 1407 // cond: d==64-c 1408 // result: (ROLQconst x [c]) 1409 for { 1410 _ = v.Args[1] 1411 v_0 := v.Args[0] 1412 if v_0.Op != OpAMD64SHRQconst { 1413 break 1414 } 1415 d := v_0.AuxInt 1416 x := v_0.Args[0] 1417 v_1 := v.Args[1] 1418 if v_1.Op != OpAMD64SHLQconst { 1419 break 1420 } 1421 c := v_1.AuxInt 1422 if x != v_1.Args[0] { 1423 break 1424 } 1425 if !(d == 64-c) { 1426 break 1427 } 1428 v.reset(OpAMD64ROLQconst) 1429 v.AuxInt = c 1430 v.AddArg(x) 1431 return true 1432 } 1433 // match: (ADDQ x (SHLQconst [3] y)) 1434 // cond: 1435 // result: (LEAQ8 x y) 1436 for { 1437 _ = v.Args[1] 1438 x := v.Args[0] 1439 v_1 := v.Args[1] 1440 if v_1.Op != OpAMD64SHLQconst { 1441 break 1442 } 1443 if v_1.AuxInt != 3 { 1444 break 1445 } 1446 y := v_1.Args[0] 1447 v.reset(OpAMD64LEAQ8) 1448 v.AddArg(x) 1449 v.AddArg(y) 1450 return true 1451 } 1452 // match: (ADDQ (SHLQconst [3] y) x) 1453 // cond: 1454 // result: (LEAQ8 x y) 1455 for { 1456 _ = v.Args[1] 1457 v_0 := v.Args[0] 1458 if v_0.Op != OpAMD64SHLQconst { 1459 break 1460 } 1461 if v_0.AuxInt != 3 { 1462 break 1463 } 1464 y := v_0.Args[0] 1465 x := v.Args[1] 1466 v.reset(OpAMD64LEAQ8) 1467 v.AddArg(x) 1468 v.AddArg(y) 1469 return true 1470 } 1471 // match: (ADDQ x (SHLQconst [2] y)) 1472 // cond: 1473 // result: (LEAQ4 x y) 1474 for { 1475 _ = v.Args[1] 1476 x := v.Args[0] 1477 v_1 := v.Args[1] 1478 if v_1.Op != OpAMD64SHLQconst { 1479 break 1480 } 1481 if v_1.AuxInt != 2 { 1482 break 1483 } 1484 y := v_1.Args[0] 1485 v.reset(OpAMD64LEAQ4) 1486 v.AddArg(x) 1487 v.AddArg(y) 1488 return true 1489 } 1490 // match: (ADDQ (SHLQconst [2] y) x) 1491 // cond: 1492 // result: (LEAQ4 x y) 1493 for { 1494 _ = v.Args[1] 1495 v_0 := v.Args[0] 1496 if v_0.Op != OpAMD64SHLQconst { 1497 break 1498 } 1499 if v_0.AuxInt != 2 { 1500 break 1501 } 1502 y := v_0.Args[0] 1503 x := v.Args[1] 1504 v.reset(OpAMD64LEAQ4) 1505 v.AddArg(x) 1506 v.AddArg(y) 1507 return true 1508 } 1509 // match: (ADDQ x (SHLQconst [1] y)) 1510 // cond: 1511 // result: (LEAQ2 x y) 1512 for { 1513 _ = v.Args[1] 1514 x := v.Args[0] 1515 v_1 := v.Args[1] 1516 if v_1.Op != OpAMD64SHLQconst { 1517 break 1518 } 1519 if v_1.AuxInt != 1 { 1520 break 1521 } 1522 y := v_1.Args[0] 1523 v.reset(OpAMD64LEAQ2) 1524 v.AddArg(x) 1525 v.AddArg(y) 1526 return true 1527 } 1528 // match: (ADDQ (SHLQconst [1] y) x) 1529 // cond: 1530 // result: (LEAQ2 x y) 1531 for { 1532 _ = v.Args[1] 1533 v_0 := v.Args[0] 1534 if v_0.Op != OpAMD64SHLQconst { 1535 break 1536 } 1537 if v_0.AuxInt != 1 { 1538 break 1539 } 1540 y := v_0.Args[0] 1541 x := v.Args[1] 1542 v.reset(OpAMD64LEAQ2) 1543 v.AddArg(x) 1544 v.AddArg(y) 1545 return true 1546 } 1547 return false 1548 } 1549 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 1550 // match: (ADDQ x (ADDQ y y)) 1551 // cond: 1552 // result: (LEAQ2 x y) 1553 for { 1554 _ = v.Args[1] 1555 x := v.Args[0] 1556 v_1 := v.Args[1] 1557 if v_1.Op != OpAMD64ADDQ { 1558 break 1559 } 1560 _ = v_1.Args[1] 1561 y := v_1.Args[0] 1562 if y != v_1.Args[1] { 1563 break 1564 } 1565 v.reset(OpAMD64LEAQ2) 1566 v.AddArg(x) 1567 v.AddArg(y) 1568 return true 1569 } 1570 // match: (ADDQ (ADDQ y y) x) 1571 // cond: 1572 // result: (LEAQ2 x y) 1573 for { 1574 _ = v.Args[1] 1575 v_0 := v.Args[0] 1576 if v_0.Op != OpAMD64ADDQ { 1577 break 1578 } 1579 _ = v_0.Args[1] 1580 y := v_0.Args[0] 1581 if y != v_0.Args[1] { 1582 break 1583 } 1584 x := v.Args[1] 1585 v.reset(OpAMD64LEAQ2) 1586 v.AddArg(x) 1587 v.AddArg(y) 1588 return true 1589 } 1590 // match: (ADDQ x (ADDQ x y)) 1591 // cond: 1592 // result: (LEAQ2 y x) 1593 for { 1594 _ = v.Args[1] 1595 x := v.Args[0] 1596 v_1 := v.Args[1] 1597 if v_1.Op != OpAMD64ADDQ { 1598 break 1599 } 1600 _ = v_1.Args[1] 1601 if x != v_1.Args[0] { 1602 break 1603 } 1604 y := v_1.Args[1] 1605 v.reset(OpAMD64LEAQ2) 1606 v.AddArg(y) 1607 v.AddArg(x) 1608 return true 1609 } 1610 // match: (ADDQ x (ADDQ y x)) 1611 // cond: 1612 // result: (LEAQ2 y x) 1613 for { 1614 _ = v.Args[1] 1615 x := v.Args[0] 1616 v_1 := v.Args[1] 1617 if v_1.Op != OpAMD64ADDQ { 1618 break 1619 } 1620 _ = v_1.Args[1] 1621 y := v_1.Args[0] 1622 if x != v_1.Args[1] { 1623 break 1624 } 1625 v.reset(OpAMD64LEAQ2) 1626 v.AddArg(y) 1627 v.AddArg(x) 1628 return true 1629 } 1630 // match: (ADDQ (ADDQ x y) x) 1631 // cond: 1632 // result: (LEAQ2 y x) 1633 for { 1634 _ = v.Args[1] 1635 v_0 := v.Args[0] 1636 if v_0.Op != OpAMD64ADDQ { 1637 break 1638 } 1639 _ = v_0.Args[1] 1640 x := v_0.Args[0] 1641 y := v_0.Args[1] 1642 if x != v.Args[1] { 1643 break 1644 } 1645 v.reset(OpAMD64LEAQ2) 1646 v.AddArg(y) 1647 v.AddArg(x) 1648 return true 1649 } 1650 // match: (ADDQ (ADDQ y x) x) 1651 // cond: 1652 // result: (LEAQ2 y x) 1653 for { 1654 _ = v.Args[1] 1655 v_0 := v.Args[0] 1656 if v_0.Op != OpAMD64ADDQ { 1657 break 1658 } 1659 _ = v_0.Args[1] 1660 y := v_0.Args[0] 1661 x := v_0.Args[1] 1662 if x != v.Args[1] { 1663 break 1664 } 1665 v.reset(OpAMD64LEAQ2) 1666 v.AddArg(y) 1667 v.AddArg(x) 1668 return true 1669 } 1670 // match: (ADDQ (ADDQconst [c] x) y) 1671 // cond: 1672 // result: (LEAQ1 [c] x y) 1673 for { 1674 _ = v.Args[1] 1675 v_0 := v.Args[0] 1676 if v_0.Op != OpAMD64ADDQconst { 1677 break 1678 } 1679 c := v_0.AuxInt 1680 x := v_0.Args[0] 1681 y := v.Args[1] 1682 v.reset(OpAMD64LEAQ1) 1683 v.AuxInt = c 1684 v.AddArg(x) 1685 v.AddArg(y) 1686 return true 1687 } 1688 // match: (ADDQ y (ADDQconst [c] x)) 1689 // cond: 1690 // result: (LEAQ1 [c] x y) 1691 for { 1692 _ = v.Args[1] 1693 y := v.Args[0] 1694 v_1 := v.Args[1] 1695 if v_1.Op != OpAMD64ADDQconst { 1696 break 1697 } 1698 c := v_1.AuxInt 1699 x := v_1.Args[0] 1700 v.reset(OpAMD64LEAQ1) 1701 v.AuxInt = c 1702 v.AddArg(x) 1703 v.AddArg(y) 1704 return true 1705 } 1706 // match: (ADDQ x (LEAQ [c] {s} y)) 1707 // cond: x.Op != OpSB && y.Op != OpSB 1708 // result: (LEAQ1 [c] {s} x y) 1709 for { 1710 _ = v.Args[1] 1711 x := v.Args[0] 1712 v_1 := v.Args[1] 1713 if v_1.Op != OpAMD64LEAQ { 1714 break 1715 } 1716 c := v_1.AuxInt 1717 s := v_1.Aux 1718 y := v_1.Args[0] 1719 if !(x.Op != OpSB && y.Op != OpSB) { 1720 break 1721 } 1722 v.reset(OpAMD64LEAQ1) 1723 v.AuxInt = c 1724 v.Aux = s 1725 v.AddArg(x) 1726 v.AddArg(y) 1727 return true 1728 } 1729 // match: (ADDQ (LEAQ [c] {s} y) x) 1730 // cond: x.Op != OpSB && y.Op != OpSB 1731 // result: (LEAQ1 [c] {s} x y) 1732 for { 1733 _ = v.Args[1] 1734 v_0 := v.Args[0] 1735 if v_0.Op != OpAMD64LEAQ { 1736 break 1737 } 1738 c := v_0.AuxInt 1739 s := v_0.Aux 1740 y := v_0.Args[0] 1741 x := v.Args[1] 1742 if !(x.Op != OpSB && y.Op != OpSB) { 1743 break 1744 } 1745 v.reset(OpAMD64LEAQ1) 1746 v.AuxInt = c 1747 v.Aux = s 1748 v.AddArg(x) 1749 v.AddArg(y) 1750 return true 1751 } 1752 return false 1753 } 1754 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 1755 // match: (ADDQ x (NEGQ y)) 1756 // cond: 1757 // result: (SUBQ x y) 1758 for { 1759 _ = v.Args[1] 1760 x := v.Args[0] 1761 v_1 := v.Args[1] 1762 if v_1.Op != OpAMD64NEGQ { 1763 break 1764 } 1765 y := v_1.Args[0] 1766 v.reset(OpAMD64SUBQ) 1767 v.AddArg(x) 1768 v.AddArg(y) 1769 return true 1770 } 1771 // match: (ADDQ (NEGQ y) x) 1772 // cond: 1773 // result: (SUBQ x y) 1774 for { 1775 _ = v.Args[1] 1776 v_0 := v.Args[0] 1777 if v_0.Op != OpAMD64NEGQ { 1778 break 1779 } 1780 y := v_0.Args[0] 1781 x := v.Args[1] 1782 v.reset(OpAMD64SUBQ) 1783 v.AddArg(x) 1784 v.AddArg(y) 1785 return true 1786 } 1787 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1788 // cond: canMergeLoad(v, l, x) && clobber(l) 1789 // result: (ADDQmem x [off] {sym} ptr mem) 1790 for { 1791 _ = v.Args[1] 1792 x := v.Args[0] 1793 l := v.Args[1] 1794 if l.Op != OpAMD64MOVQload { 1795 break 1796 } 1797 off := l.AuxInt 1798 sym := l.Aux 1799 _ = l.Args[1] 1800 ptr := l.Args[0] 1801 mem := l.Args[1] 1802 if !(canMergeLoad(v, l, x) && clobber(l)) { 1803 break 1804 } 1805 v.reset(OpAMD64ADDQmem) 1806 v.AuxInt = off 1807 v.Aux = sym 1808 v.AddArg(x) 1809 v.AddArg(ptr) 1810 v.AddArg(mem) 1811 return true 1812 } 1813 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 1814 // cond: canMergeLoad(v, l, x) && clobber(l) 1815 // result: (ADDQmem x [off] {sym} ptr mem) 1816 for { 1817 _ = v.Args[1] 1818 l := v.Args[0] 1819 if l.Op != OpAMD64MOVQload { 1820 break 1821 } 1822 off := l.AuxInt 1823 sym := l.Aux 1824 _ = l.Args[1] 1825 ptr := l.Args[0] 1826 mem := l.Args[1] 1827 x := v.Args[1] 1828 if !(canMergeLoad(v, l, x) && clobber(l)) { 1829 break 1830 } 1831 v.reset(OpAMD64ADDQmem) 1832 v.AuxInt = off 1833 v.Aux = sym 1834 v.AddArg(x) 1835 v.AddArg(ptr) 1836 v.AddArg(mem) 1837 return true 1838 } 1839 return false 1840 } 1841 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 1842 // match: (ADDQconst [c] (ADDQ x y)) 1843 // cond: 1844 // result: (LEAQ1 [c] x y) 1845 for { 1846 c := v.AuxInt 1847 v_0 := v.Args[0] 1848 if v_0.Op != OpAMD64ADDQ { 1849 break 1850 } 1851 _ = v_0.Args[1] 1852 x := v_0.Args[0] 1853 y := v_0.Args[1] 1854 v.reset(OpAMD64LEAQ1) 1855 v.AuxInt = c 1856 v.AddArg(x) 1857 v.AddArg(y) 1858 return true 1859 } 1860 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1861 // cond: is32Bit(c+d) 1862 // result: (LEAQ [c+d] {s} x) 1863 for { 1864 c := v.AuxInt 1865 v_0 := v.Args[0] 1866 if v_0.Op != OpAMD64LEAQ { 1867 break 1868 } 1869 d := v_0.AuxInt 1870 s := v_0.Aux 1871 x := v_0.Args[0] 1872 if !(is32Bit(c + d)) { 1873 break 1874 } 1875 v.reset(OpAMD64LEAQ) 1876 v.AuxInt = c + d 1877 v.Aux = s 1878 v.AddArg(x) 1879 return true 1880 } 1881 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1882 // cond: is32Bit(c+d) 1883 // result: (LEAQ1 [c+d] {s} x y) 1884 for { 1885 c := v.AuxInt 1886 v_0 := v.Args[0] 1887 if v_0.Op != OpAMD64LEAQ1 { 1888 break 1889 } 1890 d := v_0.AuxInt 1891 s := v_0.Aux 1892 _ = v_0.Args[1] 1893 x := v_0.Args[0] 1894 y := v_0.Args[1] 1895 if !(is32Bit(c + d)) { 1896 break 1897 } 1898 v.reset(OpAMD64LEAQ1) 1899 v.AuxInt = c + d 1900 v.Aux = s 1901 v.AddArg(x) 1902 v.AddArg(y) 1903 return true 1904 } 1905 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1906 // cond: is32Bit(c+d) 1907 // result: (LEAQ2 [c+d] {s} x y) 1908 for { 1909 c := v.AuxInt 1910 v_0 := v.Args[0] 1911 if v_0.Op != OpAMD64LEAQ2 { 1912 break 1913 } 1914 d := v_0.AuxInt 1915 s := v_0.Aux 1916 _ = v_0.Args[1] 1917 x := v_0.Args[0] 1918 y := v_0.Args[1] 1919 if !(is32Bit(c + d)) { 1920 break 1921 } 1922 v.reset(OpAMD64LEAQ2) 1923 v.AuxInt = c + d 1924 v.Aux = s 1925 v.AddArg(x) 1926 v.AddArg(y) 1927 return true 1928 } 1929 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1930 // cond: is32Bit(c+d) 1931 // result: (LEAQ4 [c+d] {s} x y) 1932 for { 1933 c := v.AuxInt 1934 v_0 := v.Args[0] 1935 if v_0.Op != OpAMD64LEAQ4 { 1936 break 1937 } 1938 d := v_0.AuxInt 1939 s := v_0.Aux 1940 _ = v_0.Args[1] 1941 x := v_0.Args[0] 1942 y := v_0.Args[1] 1943 if !(is32Bit(c + d)) { 1944 break 1945 } 1946 v.reset(OpAMD64LEAQ4) 1947 v.AuxInt = c + d 1948 v.Aux = s 1949 v.AddArg(x) 1950 v.AddArg(y) 1951 return true 1952 } 1953 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1954 // cond: is32Bit(c+d) 1955 // result: (LEAQ8 [c+d] {s} x y) 1956 for { 1957 c := v.AuxInt 1958 v_0 := v.Args[0] 1959 if v_0.Op != OpAMD64LEAQ8 { 1960 break 1961 } 1962 d := v_0.AuxInt 1963 s := v_0.Aux 1964 _ = v_0.Args[1] 1965 x := v_0.Args[0] 1966 y := v_0.Args[1] 1967 if !(is32Bit(c + d)) { 1968 break 1969 } 1970 v.reset(OpAMD64LEAQ8) 1971 v.AuxInt = c + d 1972 v.Aux = s 1973 v.AddArg(x) 1974 v.AddArg(y) 1975 return true 1976 } 1977 // match: (ADDQconst [0] x) 1978 // cond: 1979 // result: x 1980 for { 1981 if v.AuxInt != 0 { 1982 break 1983 } 1984 x := v.Args[0] 1985 v.reset(OpCopy) 1986 v.Type = x.Type 1987 v.AddArg(x) 1988 return true 1989 } 1990 // match: (ADDQconst [c] (MOVQconst [d])) 1991 // cond: 1992 // result: (MOVQconst [c+d]) 1993 for { 1994 c := v.AuxInt 1995 v_0 := v.Args[0] 1996 if v_0.Op != OpAMD64MOVQconst { 1997 break 1998 } 1999 d := v_0.AuxInt 2000 v.reset(OpAMD64MOVQconst) 2001 v.AuxInt = c + d 2002 return true 2003 } 2004 // match: (ADDQconst [c] (ADDQconst [d] x)) 2005 // cond: is32Bit(c+d) 2006 // result: (ADDQconst [c+d] x) 2007 for { 2008 c := v.AuxInt 2009 v_0 := v.Args[0] 2010 if v_0.Op != OpAMD64ADDQconst { 2011 break 2012 } 2013 d := v_0.AuxInt 2014 x := v_0.Args[0] 2015 if !(is32Bit(c + d)) { 2016 break 2017 } 2018 v.reset(OpAMD64ADDQconst) 2019 v.AuxInt = c + d 2020 v.AddArg(x) 2021 return true 2022 } 2023 return false 2024 } 2025 func rewriteValueAMD64_OpAMD64ADDQconstmem_0(v *Value) bool { 2026 b := v.Block 2027 _ = b 2028 typ := &b.Func.Config.Types 2029 _ = typ 2030 // match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) 2031 // cond: 2032 // result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x)) 2033 for { 2034 valOff := v.AuxInt 2035 sym := v.Aux 2036 _ = v.Args[1] 2037 ptr := v.Args[0] 2038 v_1 := v.Args[1] 2039 if v_1.Op != OpAMD64MOVSDstore { 2040 break 2041 } 2042 if v_1.AuxInt != ValAndOff(valOff).Off() { 2043 break 2044 } 2045 if v_1.Aux != sym { 2046 break 2047 } 2048 _ = v_1.Args[2] 2049 if ptr != v_1.Args[0] { 2050 break 2051 } 2052 x := v_1.Args[1] 2053 v.reset(OpAMD64ADDQconst) 2054 v.AuxInt = ValAndOff(valOff).Val() 2055 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2056 v0.AddArg(x) 2057 v.AddArg(v0) 2058 return true 2059 } 2060 return false 2061 } 2062 func rewriteValueAMD64_OpAMD64ADDQmem_0(v *Value) bool { 2063 b := v.Block 2064 _ = b 2065 typ := &b.Func.Config.Types 2066 _ = typ 2067 // match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2068 // cond: 2069 // result: (ADDQ x (MOVQf2i y)) 2070 for { 2071 off := v.AuxInt 2072 sym := v.Aux 2073 _ = v.Args[2] 2074 x := v.Args[0] 2075 ptr := v.Args[1] 2076 v_2 := v.Args[2] 2077 if v_2.Op != OpAMD64MOVSDstore { 2078 break 2079 } 2080 if v_2.AuxInt != off { 2081 break 2082 } 2083 if v_2.Aux != sym { 2084 break 2085 } 2086 _ = v_2.Args[2] 2087 if ptr != v_2.Args[0] { 2088 break 2089 } 2090 y := v_2.Args[1] 2091 v.reset(OpAMD64ADDQ) 2092 v.AddArg(x) 2093 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2094 v0.AddArg(y) 2095 v.AddArg(v0) 2096 return true 2097 } 2098 return false 2099 } 2100 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 2101 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 2102 // cond: canMergeLoad(v, l, x) && clobber(l) 2103 // result: (ADDSDmem x [off] {sym} ptr mem) 2104 for { 2105 _ = v.Args[1] 2106 x := v.Args[0] 2107 l := v.Args[1] 2108 if l.Op != OpAMD64MOVSDload { 2109 break 2110 } 2111 off := l.AuxInt 2112 sym := l.Aux 2113 _ = l.Args[1] 2114 ptr := l.Args[0] 2115 mem := l.Args[1] 2116 if !(canMergeLoad(v, l, x) && clobber(l)) { 2117 break 2118 } 2119 v.reset(OpAMD64ADDSDmem) 2120 v.AuxInt = off 2121 v.Aux = sym 2122 v.AddArg(x) 2123 v.AddArg(ptr) 2124 v.AddArg(mem) 2125 return true 2126 } 2127 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 2128 // cond: canMergeLoad(v, l, x) && clobber(l) 2129 // result: (ADDSDmem x [off] {sym} ptr mem) 2130 for { 2131 _ = v.Args[1] 2132 l := v.Args[0] 2133 if l.Op != OpAMD64MOVSDload { 2134 break 2135 } 2136 off := l.AuxInt 2137 sym := l.Aux 2138 _ = l.Args[1] 2139 ptr := l.Args[0] 2140 mem := l.Args[1] 2141 x := v.Args[1] 2142 if !(canMergeLoad(v, l, x) && clobber(l)) { 2143 break 2144 } 2145 v.reset(OpAMD64ADDSDmem) 2146 v.AuxInt = off 2147 v.Aux = sym 2148 v.AddArg(x) 2149 v.AddArg(ptr) 2150 v.AddArg(mem) 2151 return true 2152 } 2153 return false 2154 } 2155 func rewriteValueAMD64_OpAMD64ADDSDmem_0(v *Value) bool { 2156 b := v.Block 2157 _ = b 2158 typ := &b.Func.Config.Types 2159 _ = typ 2160 // match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 2161 // cond: 2162 // result: (ADDSD x (MOVQi2f y)) 2163 for { 2164 off := v.AuxInt 2165 sym := v.Aux 2166 _ = v.Args[2] 2167 x := v.Args[0] 2168 ptr := v.Args[1] 2169 v_2 := v.Args[2] 2170 if v_2.Op != OpAMD64MOVQstore { 2171 break 2172 } 2173 if v_2.AuxInt != off { 2174 break 2175 } 2176 if v_2.Aux != sym { 2177 break 2178 } 2179 _ = v_2.Args[2] 2180 if ptr != v_2.Args[0] { 2181 break 2182 } 2183 y := v_2.Args[1] 2184 v.reset(OpAMD64ADDSD) 2185 v.AddArg(x) 2186 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 2187 v0.AddArg(y) 2188 v.AddArg(v0) 2189 return true 2190 } 2191 return false 2192 } 2193 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 2194 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 2195 // cond: canMergeLoad(v, l, x) && clobber(l) 2196 // result: (ADDSSmem x [off] {sym} ptr mem) 2197 for { 2198 _ = v.Args[1] 2199 x := v.Args[0] 2200 l := v.Args[1] 2201 if l.Op != OpAMD64MOVSSload { 2202 break 2203 } 2204 off := l.AuxInt 2205 sym := l.Aux 2206 _ = l.Args[1] 2207 ptr := l.Args[0] 2208 mem := l.Args[1] 2209 if !(canMergeLoad(v, l, x) && clobber(l)) { 2210 break 2211 } 2212 v.reset(OpAMD64ADDSSmem) 2213 v.AuxInt = off 2214 v.Aux = sym 2215 v.AddArg(x) 2216 v.AddArg(ptr) 2217 v.AddArg(mem) 2218 return true 2219 } 2220 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 2221 // cond: canMergeLoad(v, l, x) && clobber(l) 2222 // result: (ADDSSmem x [off] {sym} ptr mem) 2223 for { 2224 _ = v.Args[1] 2225 l := v.Args[0] 2226 if l.Op != OpAMD64MOVSSload { 2227 break 2228 } 2229 off := l.AuxInt 2230 sym := l.Aux 2231 _ = l.Args[1] 2232 ptr := l.Args[0] 2233 mem := l.Args[1] 2234 x := v.Args[1] 2235 if !(canMergeLoad(v, l, x) && clobber(l)) { 2236 break 2237 } 2238 v.reset(OpAMD64ADDSSmem) 2239 v.AuxInt = off 2240 v.Aux = sym 2241 v.AddArg(x) 2242 v.AddArg(ptr) 2243 v.AddArg(mem) 2244 return true 2245 } 2246 return false 2247 } 2248 func rewriteValueAMD64_OpAMD64ADDSSmem_0(v *Value) bool { 2249 b := v.Block 2250 _ = b 2251 typ := &b.Func.Config.Types 2252 _ = typ 2253 // match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 2254 // cond: 2255 // result: (ADDSS x (MOVLi2f y)) 2256 for { 2257 off := v.AuxInt 2258 sym := v.Aux 2259 _ = v.Args[2] 2260 x := v.Args[0] 2261 ptr := v.Args[1] 2262 v_2 := v.Args[2] 2263 if v_2.Op != OpAMD64MOVLstore { 2264 break 2265 } 2266 if v_2.AuxInt != off { 2267 break 2268 } 2269 if v_2.Aux != sym { 2270 break 2271 } 2272 _ = v_2.Args[2] 2273 if ptr != v_2.Args[0] { 2274 break 2275 } 2276 y := v_2.Args[1] 2277 v.reset(OpAMD64ADDSS) 2278 v.AddArg(x) 2279 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 2280 v0.AddArg(y) 2281 v.AddArg(v0) 2282 return true 2283 } 2284 return false 2285 } 2286 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 2287 // match: (ANDL x (MOVLconst [c])) 2288 // cond: 2289 // result: (ANDLconst [c] x) 2290 for { 2291 _ = v.Args[1] 2292 x := v.Args[0] 2293 v_1 := v.Args[1] 2294 if v_1.Op != OpAMD64MOVLconst { 2295 break 2296 } 2297 c := v_1.AuxInt 2298 v.reset(OpAMD64ANDLconst) 2299 v.AuxInt = c 2300 v.AddArg(x) 2301 return true 2302 } 2303 // match: (ANDL (MOVLconst [c]) x) 2304 // cond: 2305 // result: (ANDLconst [c] x) 2306 for { 2307 _ = v.Args[1] 2308 v_0 := v.Args[0] 2309 if v_0.Op != OpAMD64MOVLconst { 2310 break 2311 } 2312 c := v_0.AuxInt 2313 x := v.Args[1] 2314 v.reset(OpAMD64ANDLconst) 2315 v.AuxInt = c 2316 v.AddArg(x) 2317 return true 2318 } 2319 // match: (ANDL x x) 2320 // cond: 2321 // result: x 2322 for { 2323 _ = v.Args[1] 2324 x := v.Args[0] 2325 if x != v.Args[1] { 2326 break 2327 } 2328 v.reset(OpCopy) 2329 v.Type = x.Type 2330 v.AddArg(x) 2331 return true 2332 } 2333 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 2334 // cond: canMergeLoad(v, l, x) && clobber(l) 2335 // result: (ANDLmem x [off] {sym} ptr mem) 2336 for { 2337 _ = v.Args[1] 2338 x := v.Args[0] 2339 l := v.Args[1] 2340 if l.Op != OpAMD64MOVLload { 2341 break 2342 } 2343 off := l.AuxInt 2344 sym := l.Aux 2345 _ = l.Args[1] 2346 ptr := l.Args[0] 2347 mem := l.Args[1] 2348 if !(canMergeLoad(v, l, x) && clobber(l)) { 2349 break 2350 } 2351 v.reset(OpAMD64ANDLmem) 2352 v.AuxInt = off 2353 v.Aux = sym 2354 v.AddArg(x) 2355 v.AddArg(ptr) 2356 v.AddArg(mem) 2357 return true 2358 } 2359 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 2360 // cond: canMergeLoad(v, l, x) && clobber(l) 2361 // result: (ANDLmem x [off] {sym} ptr mem) 2362 for { 2363 _ = v.Args[1] 2364 l := v.Args[0] 2365 if l.Op != OpAMD64MOVLload { 2366 break 2367 } 2368 off := l.AuxInt 2369 sym := l.Aux 2370 _ = l.Args[1] 2371 ptr := l.Args[0] 2372 mem := l.Args[1] 2373 x := v.Args[1] 2374 if !(canMergeLoad(v, l, x) && clobber(l)) { 2375 break 2376 } 2377 v.reset(OpAMD64ANDLmem) 2378 v.AuxInt = off 2379 v.Aux = sym 2380 v.AddArg(x) 2381 v.AddArg(ptr) 2382 v.AddArg(mem) 2383 return true 2384 } 2385 return false 2386 } 2387 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 2388 // match: (ANDLconst [c] (ANDLconst [d] x)) 2389 // cond: 2390 // result: (ANDLconst [c & d] x) 2391 for { 2392 c := v.AuxInt 2393 v_0 := v.Args[0] 2394 if v_0.Op != OpAMD64ANDLconst { 2395 break 2396 } 2397 d := v_0.AuxInt 2398 x := v_0.Args[0] 2399 v.reset(OpAMD64ANDLconst) 2400 v.AuxInt = c & d 2401 v.AddArg(x) 2402 return true 2403 } 2404 // match: (ANDLconst [0xFF] x) 2405 // cond: 2406 // result: (MOVBQZX x) 2407 for { 2408 if v.AuxInt != 0xFF { 2409 break 2410 } 2411 x := v.Args[0] 2412 v.reset(OpAMD64MOVBQZX) 2413 v.AddArg(x) 2414 return true 2415 } 2416 // match: (ANDLconst [0xFFFF] x) 2417 // cond: 2418 // result: (MOVWQZX x) 2419 for { 2420 if v.AuxInt != 0xFFFF { 2421 break 2422 } 2423 x := v.Args[0] 2424 v.reset(OpAMD64MOVWQZX) 2425 v.AddArg(x) 2426 return true 2427 } 2428 // match: (ANDLconst [c] _) 2429 // cond: int32(c)==0 2430 // result: (MOVLconst [0]) 2431 for { 2432 c := v.AuxInt 2433 if !(int32(c) == 0) { 2434 break 2435 } 2436 v.reset(OpAMD64MOVLconst) 2437 v.AuxInt = 0 2438 return true 2439 } 2440 // match: (ANDLconst [c] x) 2441 // cond: int32(c)==-1 2442 // result: x 2443 for { 2444 c := v.AuxInt 2445 x := v.Args[0] 2446 if !(int32(c) == -1) { 2447 break 2448 } 2449 v.reset(OpCopy) 2450 v.Type = x.Type 2451 v.AddArg(x) 2452 return true 2453 } 2454 // match: (ANDLconst [c] (MOVLconst [d])) 2455 // cond: 2456 // result: (MOVLconst [c&d]) 2457 for { 2458 c := v.AuxInt 2459 v_0 := v.Args[0] 2460 if v_0.Op != OpAMD64MOVLconst { 2461 break 2462 } 2463 d := v_0.AuxInt 2464 v.reset(OpAMD64MOVLconst) 2465 v.AuxInt = c & d 2466 return true 2467 } 2468 return false 2469 } 2470 func rewriteValueAMD64_OpAMD64ANDLmem_0(v *Value) bool { 2471 b := v.Block 2472 _ = b 2473 typ := &b.Func.Config.Types 2474 _ = typ 2475 // match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 2476 // cond: 2477 // result: (ANDL x (MOVLf2i y)) 2478 for { 2479 off := v.AuxInt 2480 sym := v.Aux 2481 _ = v.Args[2] 2482 x := v.Args[0] 2483 ptr := v.Args[1] 2484 v_2 := v.Args[2] 2485 if v_2.Op != OpAMD64MOVSSstore { 2486 break 2487 } 2488 if v_2.AuxInt != off { 2489 break 2490 } 2491 if v_2.Aux != sym { 2492 break 2493 } 2494 _ = v_2.Args[2] 2495 if ptr != v_2.Args[0] { 2496 break 2497 } 2498 y := v_2.Args[1] 2499 v.reset(OpAMD64ANDL) 2500 v.AddArg(x) 2501 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 2502 v0.AddArg(y) 2503 v.AddArg(v0) 2504 return true 2505 } 2506 return false 2507 } 2508 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 2509 // match: (ANDQ x (MOVQconst [c])) 2510 // cond: is32Bit(c) 2511 // result: (ANDQconst [c] x) 2512 for { 2513 _ = v.Args[1] 2514 x := v.Args[0] 2515 v_1 := v.Args[1] 2516 if v_1.Op != OpAMD64MOVQconst { 2517 break 2518 } 2519 c := v_1.AuxInt 2520 if !(is32Bit(c)) { 2521 break 2522 } 2523 v.reset(OpAMD64ANDQconst) 2524 v.AuxInt = c 2525 v.AddArg(x) 2526 return true 2527 } 2528 // match: (ANDQ (MOVQconst [c]) x) 2529 // cond: is32Bit(c) 2530 // result: (ANDQconst [c] x) 2531 for { 2532 _ = v.Args[1] 2533 v_0 := v.Args[0] 2534 if v_0.Op != OpAMD64MOVQconst { 2535 break 2536 } 2537 c := v_0.AuxInt 2538 x := v.Args[1] 2539 if !(is32Bit(c)) { 2540 break 2541 } 2542 v.reset(OpAMD64ANDQconst) 2543 v.AuxInt = c 2544 v.AddArg(x) 2545 return true 2546 } 2547 // match: (ANDQ x x) 2548 // cond: 2549 // result: x 2550 for { 2551 _ = v.Args[1] 2552 x := v.Args[0] 2553 if x != v.Args[1] { 2554 break 2555 } 2556 v.reset(OpCopy) 2557 v.Type = x.Type 2558 v.AddArg(x) 2559 return true 2560 } 2561 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 2562 // cond: canMergeLoad(v, l, x) && clobber(l) 2563 // result: (ANDQmem x [off] {sym} ptr mem) 2564 for { 2565 _ = v.Args[1] 2566 x := v.Args[0] 2567 l := v.Args[1] 2568 if l.Op != OpAMD64MOVQload { 2569 break 2570 } 2571 off := l.AuxInt 2572 sym := l.Aux 2573 _ = l.Args[1] 2574 ptr := l.Args[0] 2575 mem := l.Args[1] 2576 if !(canMergeLoad(v, l, x) && clobber(l)) { 2577 break 2578 } 2579 v.reset(OpAMD64ANDQmem) 2580 v.AuxInt = off 2581 v.Aux = sym 2582 v.AddArg(x) 2583 v.AddArg(ptr) 2584 v.AddArg(mem) 2585 return true 2586 } 2587 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 2588 // cond: canMergeLoad(v, l, x) && clobber(l) 2589 // result: (ANDQmem x [off] {sym} ptr mem) 2590 for { 2591 _ = v.Args[1] 2592 l := v.Args[0] 2593 if l.Op != OpAMD64MOVQload { 2594 break 2595 } 2596 off := l.AuxInt 2597 sym := l.Aux 2598 _ = l.Args[1] 2599 ptr := l.Args[0] 2600 mem := l.Args[1] 2601 x := v.Args[1] 2602 if !(canMergeLoad(v, l, x) && clobber(l)) { 2603 break 2604 } 2605 v.reset(OpAMD64ANDQmem) 2606 v.AuxInt = off 2607 v.Aux = sym 2608 v.AddArg(x) 2609 v.AddArg(ptr) 2610 v.AddArg(mem) 2611 return true 2612 } 2613 return false 2614 } 2615 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 2616 // match: (ANDQconst [c] (ANDQconst [d] x)) 2617 // cond: 2618 // result: (ANDQconst [c & d] x) 2619 for { 2620 c := v.AuxInt 2621 v_0 := v.Args[0] 2622 if v_0.Op != OpAMD64ANDQconst { 2623 break 2624 } 2625 d := v_0.AuxInt 2626 x := v_0.Args[0] 2627 v.reset(OpAMD64ANDQconst) 2628 v.AuxInt = c & d 2629 v.AddArg(x) 2630 return true 2631 } 2632 // match: (ANDQconst [0xFF] x) 2633 // cond: 2634 // result: (MOVBQZX x) 2635 for { 2636 if v.AuxInt != 0xFF { 2637 break 2638 } 2639 x := v.Args[0] 2640 v.reset(OpAMD64MOVBQZX) 2641 v.AddArg(x) 2642 return true 2643 } 2644 // match: (ANDQconst [0xFFFF] x) 2645 // cond: 2646 // result: (MOVWQZX x) 2647 for { 2648 if v.AuxInt != 0xFFFF { 2649 break 2650 } 2651 x := v.Args[0] 2652 v.reset(OpAMD64MOVWQZX) 2653 v.AddArg(x) 2654 return true 2655 } 2656 // match: (ANDQconst [0xFFFFFFFF] x) 2657 // cond: 2658 // result: (MOVLQZX x) 2659 for { 2660 if v.AuxInt != 0xFFFFFFFF { 2661 break 2662 } 2663 x := v.Args[0] 2664 v.reset(OpAMD64MOVLQZX) 2665 v.AddArg(x) 2666 return true 2667 } 2668 // match: (ANDQconst [0] _) 2669 // cond: 2670 // result: (MOVQconst [0]) 2671 for { 2672 if v.AuxInt != 0 { 2673 break 2674 } 2675 v.reset(OpAMD64MOVQconst) 2676 v.AuxInt = 0 2677 return true 2678 } 2679 // match: (ANDQconst [-1] x) 2680 // cond: 2681 // result: x 2682 for { 2683 if v.AuxInt != -1 { 2684 break 2685 } 2686 x := v.Args[0] 2687 v.reset(OpCopy) 2688 v.Type = x.Type 2689 v.AddArg(x) 2690 return true 2691 } 2692 // match: (ANDQconst [c] (MOVQconst [d])) 2693 // cond: 2694 // result: (MOVQconst [c&d]) 2695 for { 2696 c := v.AuxInt 2697 v_0 := v.Args[0] 2698 if v_0.Op != OpAMD64MOVQconst { 2699 break 2700 } 2701 d := v_0.AuxInt 2702 v.reset(OpAMD64MOVQconst) 2703 v.AuxInt = c & d 2704 return true 2705 } 2706 return false 2707 } 2708 func rewriteValueAMD64_OpAMD64ANDQmem_0(v *Value) bool { 2709 b := v.Block 2710 _ = b 2711 typ := &b.Func.Config.Types 2712 _ = typ 2713 // match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2714 // cond: 2715 // result: (ANDQ x (MOVQf2i y)) 2716 for { 2717 off := v.AuxInt 2718 sym := v.Aux 2719 _ = v.Args[2] 2720 x := v.Args[0] 2721 ptr := v.Args[1] 2722 v_2 := v.Args[2] 2723 if v_2.Op != OpAMD64MOVSDstore { 2724 break 2725 } 2726 if v_2.AuxInt != off { 2727 break 2728 } 2729 if v_2.Aux != sym { 2730 break 2731 } 2732 _ = v_2.Args[2] 2733 if ptr != v_2.Args[0] { 2734 break 2735 } 2736 y := v_2.Args[1] 2737 v.reset(OpAMD64ANDQ) 2738 v.AddArg(x) 2739 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2740 v0.AddArg(y) 2741 v.AddArg(v0) 2742 return true 2743 } 2744 return false 2745 } 2746 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 2747 b := v.Block 2748 _ = b 2749 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 2750 // cond: 2751 // result: (BSFQ (ORQconst <t> [1<<8] x)) 2752 for { 2753 v_0 := v.Args[0] 2754 if v_0.Op != OpAMD64ORQconst { 2755 break 2756 } 2757 t := v_0.Type 2758 if v_0.AuxInt != 1<<8 { 2759 break 2760 } 2761 v_0_0 := v_0.Args[0] 2762 if v_0_0.Op != OpAMD64MOVBQZX { 2763 break 2764 } 2765 x := v_0_0.Args[0] 2766 v.reset(OpAMD64BSFQ) 2767 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2768 v0.AuxInt = 1 << 8 2769 v0.AddArg(x) 2770 v.AddArg(v0) 2771 return true 2772 } 2773 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 2774 // cond: 2775 // result: (BSFQ (ORQconst <t> [1<<16] x)) 2776 for { 2777 v_0 := v.Args[0] 2778 if v_0.Op != OpAMD64ORQconst { 2779 break 2780 } 2781 t := v_0.Type 2782 if v_0.AuxInt != 1<<16 { 2783 break 2784 } 2785 v_0_0 := v_0.Args[0] 2786 if v_0_0.Op != OpAMD64MOVWQZX { 2787 break 2788 } 2789 x := v_0_0.Args[0] 2790 v.reset(OpAMD64BSFQ) 2791 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2792 v0.AuxInt = 1 << 16 2793 v0.AddArg(x) 2794 v.AddArg(v0) 2795 return true 2796 } 2797 return false 2798 } 2799 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 2800 // match: (BTQconst [c] x) 2801 // cond: c < 32 2802 // result: (BTLconst [c] x) 2803 for { 2804 c := v.AuxInt 2805 x := v.Args[0] 2806 if !(c < 32) { 2807 break 2808 } 2809 v.reset(OpAMD64BTLconst) 2810 v.AuxInt = c 2811 v.AddArg(x) 2812 return true 2813 } 2814 return false 2815 } 2816 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 2817 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 2818 // cond: c != 0 2819 // result: x 2820 for { 2821 _ = v.Args[2] 2822 x := v.Args[0] 2823 v_2 := v.Args[2] 2824 if v_2.Op != OpSelect1 { 2825 break 2826 } 2827 v_2_0 := v_2.Args[0] 2828 if v_2_0.Op != OpAMD64BSFQ { 2829 break 2830 } 2831 v_2_0_0 := v_2_0.Args[0] 2832 if v_2_0_0.Op != OpAMD64ORQconst { 2833 break 2834 } 2835 c := v_2_0_0.AuxInt 2836 if !(c != 0) { 2837 break 2838 } 2839 v.reset(OpCopy) 2840 v.Type = x.Type 2841 v.AddArg(x) 2842 return true 2843 } 2844 return false 2845 } 2846 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 2847 b := v.Block 2848 _ = b 2849 // match: (CMPB x (MOVLconst [c])) 2850 // cond: 2851 // result: (CMPBconst x [int64(int8(c))]) 2852 for { 2853 _ = v.Args[1] 2854 x := v.Args[0] 2855 v_1 := v.Args[1] 2856 if v_1.Op != OpAMD64MOVLconst { 2857 break 2858 } 2859 c := v_1.AuxInt 2860 v.reset(OpAMD64CMPBconst) 2861 v.AuxInt = int64(int8(c)) 2862 v.AddArg(x) 2863 return true 2864 } 2865 // match: (CMPB (MOVLconst [c]) x) 2866 // cond: 2867 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 2868 for { 2869 _ = v.Args[1] 2870 v_0 := v.Args[0] 2871 if v_0.Op != OpAMD64MOVLconst { 2872 break 2873 } 2874 c := v_0.AuxInt 2875 x := v.Args[1] 2876 v.reset(OpAMD64InvertFlags) 2877 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 2878 v0.AuxInt = int64(int8(c)) 2879 v0.AddArg(x) 2880 v.AddArg(v0) 2881 return true 2882 } 2883 return false 2884 } 2885 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 2886 // match: (CMPBconst (MOVLconst [x]) [y]) 2887 // cond: int8(x)==int8(y) 2888 // result: (FlagEQ) 2889 for { 2890 y := v.AuxInt 2891 v_0 := v.Args[0] 2892 if v_0.Op != OpAMD64MOVLconst { 2893 break 2894 } 2895 x := v_0.AuxInt 2896 if !(int8(x) == int8(y)) { 2897 break 2898 } 2899 v.reset(OpAMD64FlagEQ) 2900 return true 2901 } 2902 // match: (CMPBconst (MOVLconst [x]) [y]) 2903 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 2904 // result: (FlagLT_ULT) 2905 for { 2906 y := v.AuxInt 2907 v_0 := v.Args[0] 2908 if v_0.Op != OpAMD64MOVLconst { 2909 break 2910 } 2911 x := v_0.AuxInt 2912 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 2913 break 2914 } 2915 v.reset(OpAMD64FlagLT_ULT) 2916 return true 2917 } 2918 // match: (CMPBconst (MOVLconst [x]) [y]) 2919 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 2920 // result: (FlagLT_UGT) 2921 for { 2922 y := v.AuxInt 2923 v_0 := v.Args[0] 2924 if v_0.Op != OpAMD64MOVLconst { 2925 break 2926 } 2927 x := v_0.AuxInt 2928 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 2929 break 2930 } 2931 v.reset(OpAMD64FlagLT_UGT) 2932 return true 2933 } 2934 // match: (CMPBconst (MOVLconst [x]) [y]) 2935 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 2936 // result: (FlagGT_ULT) 2937 for { 2938 y := v.AuxInt 2939 v_0 := v.Args[0] 2940 if v_0.Op != OpAMD64MOVLconst { 2941 break 2942 } 2943 x := v_0.AuxInt 2944 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 2945 break 2946 } 2947 v.reset(OpAMD64FlagGT_ULT) 2948 return true 2949 } 2950 // match: (CMPBconst (MOVLconst [x]) [y]) 2951 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 2952 // result: (FlagGT_UGT) 2953 for { 2954 y := v.AuxInt 2955 v_0 := v.Args[0] 2956 if v_0.Op != OpAMD64MOVLconst { 2957 break 2958 } 2959 x := v_0.AuxInt 2960 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 2961 break 2962 } 2963 v.reset(OpAMD64FlagGT_UGT) 2964 return true 2965 } 2966 // match: (CMPBconst (ANDLconst _ [m]) [n]) 2967 // cond: 0 <= int8(m) && int8(m) < int8(n) 2968 // result: (FlagLT_ULT) 2969 for { 2970 n := v.AuxInt 2971 v_0 := v.Args[0] 2972 if v_0.Op != OpAMD64ANDLconst { 2973 break 2974 } 2975 m := v_0.AuxInt 2976 if !(0 <= int8(m) && int8(m) < int8(n)) { 2977 break 2978 } 2979 v.reset(OpAMD64FlagLT_ULT) 2980 return true 2981 } 2982 // match: (CMPBconst (ANDL x y) [0]) 2983 // cond: 2984 // result: (TESTB x y) 2985 for { 2986 if v.AuxInt != 0 { 2987 break 2988 } 2989 v_0 := v.Args[0] 2990 if v_0.Op != OpAMD64ANDL { 2991 break 2992 } 2993 _ = v_0.Args[1] 2994 x := v_0.Args[0] 2995 y := v_0.Args[1] 2996 v.reset(OpAMD64TESTB) 2997 v.AddArg(x) 2998 v.AddArg(y) 2999 return true 3000 } 3001 // match: (CMPBconst (ANDLconst [c] x) [0]) 3002 // cond: 3003 // result: (TESTBconst [int64(int8(c))] x) 3004 for { 3005 if v.AuxInt != 0 { 3006 break 3007 } 3008 v_0 := v.Args[0] 3009 if v_0.Op != OpAMD64ANDLconst { 3010 break 3011 } 3012 c := v_0.AuxInt 3013 x := v_0.Args[0] 3014 v.reset(OpAMD64TESTBconst) 3015 v.AuxInt = int64(int8(c)) 3016 v.AddArg(x) 3017 return true 3018 } 3019 // match: (CMPBconst x [0]) 3020 // cond: 3021 // result: (TESTB x x) 3022 for { 3023 if v.AuxInt != 0 { 3024 break 3025 } 3026 x := v.Args[0] 3027 v.reset(OpAMD64TESTB) 3028 v.AddArg(x) 3029 v.AddArg(x) 3030 return true 3031 } 3032 return false 3033 } 3034 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 3035 b := v.Block 3036 _ = b 3037 // match: (CMPL x (MOVLconst [c])) 3038 // cond: 3039 // result: (CMPLconst x [c]) 3040 for { 3041 _ = v.Args[1] 3042 x := v.Args[0] 3043 v_1 := v.Args[1] 3044 if v_1.Op != OpAMD64MOVLconst { 3045 break 3046 } 3047 c := v_1.AuxInt 3048 v.reset(OpAMD64CMPLconst) 3049 v.AuxInt = c 3050 v.AddArg(x) 3051 return true 3052 } 3053 // match: (CMPL (MOVLconst [c]) x) 3054 // cond: 3055 // result: (InvertFlags (CMPLconst x [c])) 3056 for { 3057 _ = v.Args[1] 3058 v_0 := v.Args[0] 3059 if v_0.Op != OpAMD64MOVLconst { 3060 break 3061 } 3062 c := v_0.AuxInt 3063 x := v.Args[1] 3064 v.reset(OpAMD64InvertFlags) 3065 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 3066 v0.AuxInt = c 3067 v0.AddArg(x) 3068 v.AddArg(v0) 3069 return true 3070 } 3071 return false 3072 } 3073 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 3074 // match: (CMPLconst (MOVLconst [x]) [y]) 3075 // cond: int32(x)==int32(y) 3076 // result: (FlagEQ) 3077 for { 3078 y := v.AuxInt 3079 v_0 := v.Args[0] 3080 if v_0.Op != OpAMD64MOVLconst { 3081 break 3082 } 3083 x := v_0.AuxInt 3084 if !(int32(x) == int32(y)) { 3085 break 3086 } 3087 v.reset(OpAMD64FlagEQ) 3088 return true 3089 } 3090 // match: (CMPLconst (MOVLconst [x]) [y]) 3091 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 3092 // result: (FlagLT_ULT) 3093 for { 3094 y := v.AuxInt 3095 v_0 := v.Args[0] 3096 if v_0.Op != OpAMD64MOVLconst { 3097 break 3098 } 3099 x := v_0.AuxInt 3100 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 3101 break 3102 } 3103 v.reset(OpAMD64FlagLT_ULT) 3104 return true 3105 } 3106 // match: (CMPLconst (MOVLconst [x]) [y]) 3107 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 3108 // result: (FlagLT_UGT) 3109 for { 3110 y := v.AuxInt 3111 v_0 := v.Args[0] 3112 if v_0.Op != OpAMD64MOVLconst { 3113 break 3114 } 3115 x := v_0.AuxInt 3116 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 3117 break 3118 } 3119 v.reset(OpAMD64FlagLT_UGT) 3120 return true 3121 } 3122 // match: (CMPLconst (MOVLconst [x]) [y]) 3123 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 3124 // result: (FlagGT_ULT) 3125 for { 3126 y := v.AuxInt 3127 v_0 := v.Args[0] 3128 if v_0.Op != OpAMD64MOVLconst { 3129 break 3130 } 3131 x := v_0.AuxInt 3132 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 3133 break 3134 } 3135 v.reset(OpAMD64FlagGT_ULT) 3136 return true 3137 } 3138 // match: (CMPLconst (MOVLconst [x]) [y]) 3139 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 3140 // result: (FlagGT_UGT) 3141 for { 3142 y := v.AuxInt 3143 v_0 := v.Args[0] 3144 if v_0.Op != OpAMD64MOVLconst { 3145 break 3146 } 3147 x := v_0.AuxInt 3148 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 3149 break 3150 } 3151 v.reset(OpAMD64FlagGT_UGT) 3152 return true 3153 } 3154 // match: (CMPLconst (SHRLconst _ [c]) [n]) 3155 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 3156 // result: (FlagLT_ULT) 3157 for { 3158 n := v.AuxInt 3159 v_0 := v.Args[0] 3160 if v_0.Op != OpAMD64SHRLconst { 3161 break 3162 } 3163 c := v_0.AuxInt 3164 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 3165 break 3166 } 3167 v.reset(OpAMD64FlagLT_ULT) 3168 return true 3169 } 3170 // match: (CMPLconst (ANDLconst _ [m]) [n]) 3171 // cond: 0 <= int32(m) && int32(m) < int32(n) 3172 // result: (FlagLT_ULT) 3173 for { 3174 n := v.AuxInt 3175 v_0 := v.Args[0] 3176 if v_0.Op != OpAMD64ANDLconst { 3177 break 3178 } 3179 m := v_0.AuxInt 3180 if !(0 <= int32(m) && int32(m) < int32(n)) { 3181 break 3182 } 3183 v.reset(OpAMD64FlagLT_ULT) 3184 return true 3185 } 3186 // match: (CMPLconst (ANDL x y) [0]) 3187 // cond: 3188 // result: (TESTL x y) 3189 for { 3190 if v.AuxInt != 0 { 3191 break 3192 } 3193 v_0 := v.Args[0] 3194 if v_0.Op != OpAMD64ANDL { 3195 break 3196 } 3197 _ = v_0.Args[1] 3198 x := v_0.Args[0] 3199 y := v_0.Args[1] 3200 v.reset(OpAMD64TESTL) 3201 v.AddArg(x) 3202 v.AddArg(y) 3203 return true 3204 } 3205 // match: (CMPLconst (ANDLconst [c] x) [0]) 3206 // cond: 3207 // result: (TESTLconst [c] x) 3208 for { 3209 if v.AuxInt != 0 { 3210 break 3211 } 3212 v_0 := v.Args[0] 3213 if v_0.Op != OpAMD64ANDLconst { 3214 break 3215 } 3216 c := v_0.AuxInt 3217 x := v_0.Args[0] 3218 v.reset(OpAMD64TESTLconst) 3219 v.AuxInt = c 3220 v.AddArg(x) 3221 return true 3222 } 3223 // match: (CMPLconst x [0]) 3224 // cond: 3225 // result: (TESTL x x) 3226 for { 3227 if v.AuxInt != 0 { 3228 break 3229 } 3230 x := v.Args[0] 3231 v.reset(OpAMD64TESTL) 3232 v.AddArg(x) 3233 v.AddArg(x) 3234 return true 3235 } 3236 return false 3237 } 3238 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 3239 b := v.Block 3240 _ = b 3241 // match: (CMPQ x (MOVQconst [c])) 3242 // cond: is32Bit(c) 3243 // result: (CMPQconst x [c]) 3244 for { 3245 _ = v.Args[1] 3246 x := v.Args[0] 3247 v_1 := v.Args[1] 3248 if v_1.Op != OpAMD64MOVQconst { 3249 break 3250 } 3251 c := v_1.AuxInt 3252 if !(is32Bit(c)) { 3253 break 3254 } 3255 v.reset(OpAMD64CMPQconst) 3256 v.AuxInt = c 3257 v.AddArg(x) 3258 return true 3259 } 3260 // match: (CMPQ (MOVQconst [c]) x) 3261 // cond: is32Bit(c) 3262 // result: (InvertFlags (CMPQconst x [c])) 3263 for { 3264 _ = v.Args[1] 3265 v_0 := v.Args[0] 3266 if v_0.Op != OpAMD64MOVQconst { 3267 break 3268 } 3269 c := v_0.AuxInt 3270 x := v.Args[1] 3271 if !(is32Bit(c)) { 3272 break 3273 } 3274 v.reset(OpAMD64InvertFlags) 3275 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 3276 v0.AuxInt = c 3277 v0.AddArg(x) 3278 v.AddArg(v0) 3279 return true 3280 } 3281 return false 3282 } 3283 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 3284 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 3285 // cond: 3286 // result: (FlagLT_ULT) 3287 for { 3288 if v.AuxInt != 32 { 3289 break 3290 } 3291 v_0 := v.Args[0] 3292 if v_0.Op != OpAMD64NEGQ { 3293 break 3294 } 3295 v_0_0 := v_0.Args[0] 3296 if v_0_0.Op != OpAMD64ADDQconst { 3297 break 3298 } 3299 if v_0_0.AuxInt != -16 { 3300 break 3301 } 3302 v_0_0_0 := v_0_0.Args[0] 3303 if v_0_0_0.Op != OpAMD64ANDQconst { 3304 break 3305 } 3306 if v_0_0_0.AuxInt != 15 { 3307 break 3308 } 3309 v.reset(OpAMD64FlagLT_ULT) 3310 return true 3311 } 3312 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 3313 // cond: 3314 // result: (FlagLT_ULT) 3315 for { 3316 if v.AuxInt != 32 { 3317 break 3318 } 3319 v_0 := v.Args[0] 3320 if v_0.Op != OpAMD64NEGQ { 3321 break 3322 } 3323 v_0_0 := v_0.Args[0] 3324 if v_0_0.Op != OpAMD64ADDQconst { 3325 break 3326 } 3327 if v_0_0.AuxInt != -8 { 3328 break 3329 } 3330 v_0_0_0 := v_0_0.Args[0] 3331 if v_0_0_0.Op != OpAMD64ANDQconst { 3332 break 3333 } 3334 if v_0_0_0.AuxInt != 7 { 3335 break 3336 } 3337 v.reset(OpAMD64FlagLT_ULT) 3338 return true 3339 } 3340 // match: (CMPQconst (MOVQconst [x]) [y]) 3341 // cond: x==y 3342 // result: (FlagEQ) 3343 for { 3344 y := v.AuxInt 3345 v_0 := v.Args[0] 3346 if v_0.Op != OpAMD64MOVQconst { 3347 break 3348 } 3349 x := v_0.AuxInt 3350 if !(x == y) { 3351 break 3352 } 3353 v.reset(OpAMD64FlagEQ) 3354 return true 3355 } 3356 // match: (CMPQconst (MOVQconst [x]) [y]) 3357 // cond: x<y && uint64(x)<uint64(y) 3358 // result: (FlagLT_ULT) 3359 for { 3360 y := v.AuxInt 3361 v_0 := v.Args[0] 3362 if v_0.Op != OpAMD64MOVQconst { 3363 break 3364 } 3365 x := v_0.AuxInt 3366 if !(x < y && uint64(x) < uint64(y)) { 3367 break 3368 } 3369 v.reset(OpAMD64FlagLT_ULT) 3370 return true 3371 } 3372 // match: (CMPQconst (MOVQconst [x]) [y]) 3373 // cond: x<y && uint64(x)>uint64(y) 3374 // result: (FlagLT_UGT) 3375 for { 3376 y := v.AuxInt 3377 v_0 := v.Args[0] 3378 if v_0.Op != OpAMD64MOVQconst { 3379 break 3380 } 3381 x := v_0.AuxInt 3382 if !(x < y && uint64(x) > uint64(y)) { 3383 break 3384 } 3385 v.reset(OpAMD64FlagLT_UGT) 3386 return true 3387 } 3388 // match: (CMPQconst (MOVQconst [x]) [y]) 3389 // cond: x>y && uint64(x)<uint64(y) 3390 // result: (FlagGT_ULT) 3391 for { 3392 y := v.AuxInt 3393 v_0 := v.Args[0] 3394 if v_0.Op != OpAMD64MOVQconst { 3395 break 3396 } 3397 x := v_0.AuxInt 3398 if !(x > y && uint64(x) < uint64(y)) { 3399 break 3400 } 3401 v.reset(OpAMD64FlagGT_ULT) 3402 return true 3403 } 3404 // match: (CMPQconst (MOVQconst [x]) [y]) 3405 // cond: x>y && uint64(x)>uint64(y) 3406 // result: (FlagGT_UGT) 3407 for { 3408 y := v.AuxInt 3409 v_0 := v.Args[0] 3410 if v_0.Op != OpAMD64MOVQconst { 3411 break 3412 } 3413 x := v_0.AuxInt 3414 if !(x > y && uint64(x) > uint64(y)) { 3415 break 3416 } 3417 v.reset(OpAMD64FlagGT_UGT) 3418 return true 3419 } 3420 // match: (CMPQconst (MOVBQZX _) [c]) 3421 // cond: 0xFF < c 3422 // result: (FlagLT_ULT) 3423 for { 3424 c := v.AuxInt 3425 v_0 := v.Args[0] 3426 if v_0.Op != OpAMD64MOVBQZX { 3427 break 3428 } 3429 if !(0xFF < c) { 3430 break 3431 } 3432 v.reset(OpAMD64FlagLT_ULT) 3433 return true 3434 } 3435 // match: (CMPQconst (MOVWQZX _) [c]) 3436 // cond: 0xFFFF < c 3437 // result: (FlagLT_ULT) 3438 for { 3439 c := v.AuxInt 3440 v_0 := v.Args[0] 3441 if v_0.Op != OpAMD64MOVWQZX { 3442 break 3443 } 3444 if !(0xFFFF < c) { 3445 break 3446 } 3447 v.reset(OpAMD64FlagLT_ULT) 3448 return true 3449 } 3450 // match: (CMPQconst (MOVLQZX _) [c]) 3451 // cond: 0xFFFFFFFF < c 3452 // result: (FlagLT_ULT) 3453 for { 3454 c := v.AuxInt 3455 v_0 := v.Args[0] 3456 if v_0.Op != OpAMD64MOVLQZX { 3457 break 3458 } 3459 if !(0xFFFFFFFF < c) { 3460 break 3461 } 3462 v.reset(OpAMD64FlagLT_ULT) 3463 return true 3464 } 3465 return false 3466 } 3467 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 3468 // match: (CMPQconst (SHRQconst _ [c]) [n]) 3469 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 3470 // result: (FlagLT_ULT) 3471 for { 3472 n := v.AuxInt 3473 v_0 := v.Args[0] 3474 if v_0.Op != OpAMD64SHRQconst { 3475 break 3476 } 3477 c := v_0.AuxInt 3478 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 3479 break 3480 } 3481 v.reset(OpAMD64FlagLT_ULT) 3482 return true 3483 } 3484 // match: (CMPQconst (ANDQconst _ [m]) [n]) 3485 // cond: 0 <= m && m < n 3486 // result: (FlagLT_ULT) 3487 for { 3488 n := v.AuxInt 3489 v_0 := v.Args[0] 3490 if v_0.Op != OpAMD64ANDQconst { 3491 break 3492 } 3493 m := v_0.AuxInt 3494 if !(0 <= m && m < n) { 3495 break 3496 } 3497 v.reset(OpAMD64FlagLT_ULT) 3498 return true 3499 } 3500 // match: (CMPQconst (ANDLconst _ [m]) [n]) 3501 // cond: 0 <= m && m < n 3502 // result: (FlagLT_ULT) 3503 for { 3504 n := v.AuxInt 3505 v_0 := v.Args[0] 3506 if v_0.Op != OpAMD64ANDLconst { 3507 break 3508 } 3509 m := v_0.AuxInt 3510 if !(0 <= m && m < n) { 3511 break 3512 } 3513 v.reset(OpAMD64FlagLT_ULT) 3514 return true 3515 } 3516 // match: (CMPQconst (ANDQ x y) [0]) 3517 // cond: 3518 // result: (TESTQ x y) 3519 for { 3520 if v.AuxInt != 0 { 3521 break 3522 } 3523 v_0 := v.Args[0] 3524 if v_0.Op != OpAMD64ANDQ { 3525 break 3526 } 3527 _ = v_0.Args[1] 3528 x := v_0.Args[0] 3529 y := v_0.Args[1] 3530 v.reset(OpAMD64TESTQ) 3531 v.AddArg(x) 3532 v.AddArg(y) 3533 return true 3534 } 3535 // match: (CMPQconst (ANDQconst [c] x) [0]) 3536 // cond: 3537 // result: (TESTQconst [c] x) 3538 for { 3539 if v.AuxInt != 0 { 3540 break 3541 } 3542 v_0 := v.Args[0] 3543 if v_0.Op != OpAMD64ANDQconst { 3544 break 3545 } 3546 c := v_0.AuxInt 3547 x := v_0.Args[0] 3548 v.reset(OpAMD64TESTQconst) 3549 v.AuxInt = c 3550 v.AddArg(x) 3551 return true 3552 } 3553 // match: (CMPQconst x [0]) 3554 // cond: 3555 // result: (TESTQ x x) 3556 for { 3557 if v.AuxInt != 0 { 3558 break 3559 } 3560 x := v.Args[0] 3561 v.reset(OpAMD64TESTQ) 3562 v.AddArg(x) 3563 v.AddArg(x) 3564 return true 3565 } 3566 return false 3567 } 3568 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 3569 b := v.Block 3570 _ = b 3571 // match: (CMPW x (MOVLconst [c])) 3572 // cond: 3573 // result: (CMPWconst x [int64(int16(c))]) 3574 for { 3575 _ = v.Args[1] 3576 x := v.Args[0] 3577 v_1 := v.Args[1] 3578 if v_1.Op != OpAMD64MOVLconst { 3579 break 3580 } 3581 c := v_1.AuxInt 3582 v.reset(OpAMD64CMPWconst) 3583 v.AuxInt = int64(int16(c)) 3584 v.AddArg(x) 3585 return true 3586 } 3587 // match: (CMPW (MOVLconst [c]) x) 3588 // cond: 3589 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 3590 for { 3591 _ = v.Args[1] 3592 v_0 := v.Args[0] 3593 if v_0.Op != OpAMD64MOVLconst { 3594 break 3595 } 3596 c := v_0.AuxInt 3597 x := v.Args[1] 3598 v.reset(OpAMD64InvertFlags) 3599 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 3600 v0.AuxInt = int64(int16(c)) 3601 v0.AddArg(x) 3602 v.AddArg(v0) 3603 return true 3604 } 3605 return false 3606 } 3607 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 3608 // match: (CMPWconst (MOVLconst [x]) [y]) 3609 // cond: int16(x)==int16(y) 3610 // result: (FlagEQ) 3611 for { 3612 y := v.AuxInt 3613 v_0 := v.Args[0] 3614 if v_0.Op != OpAMD64MOVLconst { 3615 break 3616 } 3617 x := v_0.AuxInt 3618 if !(int16(x) == int16(y)) { 3619 break 3620 } 3621 v.reset(OpAMD64FlagEQ) 3622 return true 3623 } 3624 // match: (CMPWconst (MOVLconst [x]) [y]) 3625 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 3626 // result: (FlagLT_ULT) 3627 for { 3628 y := v.AuxInt 3629 v_0 := v.Args[0] 3630 if v_0.Op != OpAMD64MOVLconst { 3631 break 3632 } 3633 x := v_0.AuxInt 3634 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 3635 break 3636 } 3637 v.reset(OpAMD64FlagLT_ULT) 3638 return true 3639 } 3640 // match: (CMPWconst (MOVLconst [x]) [y]) 3641 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 3642 // result: (FlagLT_UGT) 3643 for { 3644 y := v.AuxInt 3645 v_0 := v.Args[0] 3646 if v_0.Op != OpAMD64MOVLconst { 3647 break 3648 } 3649 x := v_0.AuxInt 3650 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 3651 break 3652 } 3653 v.reset(OpAMD64FlagLT_UGT) 3654 return true 3655 } 3656 // match: (CMPWconst (MOVLconst [x]) [y]) 3657 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 3658 // result: (FlagGT_ULT) 3659 for { 3660 y := v.AuxInt 3661 v_0 := v.Args[0] 3662 if v_0.Op != OpAMD64MOVLconst { 3663 break 3664 } 3665 x := v_0.AuxInt 3666 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 3667 break 3668 } 3669 v.reset(OpAMD64FlagGT_ULT) 3670 return true 3671 } 3672 // match: (CMPWconst (MOVLconst [x]) [y]) 3673 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 3674 // result: (FlagGT_UGT) 3675 for { 3676 y := v.AuxInt 3677 v_0 := v.Args[0] 3678 if v_0.Op != OpAMD64MOVLconst { 3679 break 3680 } 3681 x := v_0.AuxInt 3682 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 3683 break 3684 } 3685 v.reset(OpAMD64FlagGT_UGT) 3686 return true 3687 } 3688 // match: (CMPWconst (ANDLconst _ [m]) [n]) 3689 // cond: 0 <= int16(m) && int16(m) < int16(n) 3690 // result: (FlagLT_ULT) 3691 for { 3692 n := v.AuxInt 3693 v_0 := v.Args[0] 3694 if v_0.Op != OpAMD64ANDLconst { 3695 break 3696 } 3697 m := v_0.AuxInt 3698 if !(0 <= int16(m) && int16(m) < int16(n)) { 3699 break 3700 } 3701 v.reset(OpAMD64FlagLT_ULT) 3702 return true 3703 } 3704 // match: (CMPWconst (ANDL x y) [0]) 3705 // cond: 3706 // result: (TESTW x y) 3707 for { 3708 if v.AuxInt != 0 { 3709 break 3710 } 3711 v_0 := v.Args[0] 3712 if v_0.Op != OpAMD64ANDL { 3713 break 3714 } 3715 _ = v_0.Args[1] 3716 x := v_0.Args[0] 3717 y := v_0.Args[1] 3718 v.reset(OpAMD64TESTW) 3719 v.AddArg(x) 3720 v.AddArg(y) 3721 return true 3722 } 3723 // match: (CMPWconst (ANDLconst [c] x) [0]) 3724 // cond: 3725 // result: (TESTWconst [int64(int16(c))] x) 3726 for { 3727 if v.AuxInt != 0 { 3728 break 3729 } 3730 v_0 := v.Args[0] 3731 if v_0.Op != OpAMD64ANDLconst { 3732 break 3733 } 3734 c := v_0.AuxInt 3735 x := v_0.Args[0] 3736 v.reset(OpAMD64TESTWconst) 3737 v.AuxInt = int64(int16(c)) 3738 v.AddArg(x) 3739 return true 3740 } 3741 // match: (CMPWconst x [0]) 3742 // cond: 3743 // result: (TESTW x x) 3744 for { 3745 if v.AuxInt != 0 { 3746 break 3747 } 3748 x := v.Args[0] 3749 v.reset(OpAMD64TESTW) 3750 v.AddArg(x) 3751 v.AddArg(x) 3752 return true 3753 } 3754 return false 3755 } 3756 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 3757 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3758 // cond: is32Bit(off1+off2) 3759 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 3760 for { 3761 off1 := v.AuxInt 3762 sym := v.Aux 3763 _ = v.Args[3] 3764 v_0 := v.Args[0] 3765 if v_0.Op != OpAMD64ADDQconst { 3766 break 3767 } 3768 off2 := v_0.AuxInt 3769 ptr := v_0.Args[0] 3770 old := v.Args[1] 3771 new_ := v.Args[2] 3772 mem := v.Args[3] 3773 if !(is32Bit(off1 + off2)) { 3774 break 3775 } 3776 v.reset(OpAMD64CMPXCHGLlock) 3777 v.AuxInt = off1 + off2 3778 v.Aux = sym 3779 v.AddArg(ptr) 3780 v.AddArg(old) 3781 v.AddArg(new_) 3782 v.AddArg(mem) 3783 return true 3784 } 3785 return false 3786 } 3787 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 3788 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3789 // cond: is32Bit(off1+off2) 3790 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 3791 for { 3792 off1 := v.AuxInt 3793 sym := v.Aux 3794 _ = v.Args[3] 3795 v_0 := v.Args[0] 3796 if v_0.Op != OpAMD64ADDQconst { 3797 break 3798 } 3799 off2 := v_0.AuxInt 3800 ptr := v_0.Args[0] 3801 old := v.Args[1] 3802 new_ := v.Args[2] 3803 mem := v.Args[3] 3804 if !(is32Bit(off1 + off2)) { 3805 break 3806 } 3807 v.reset(OpAMD64CMPXCHGQlock) 3808 v.AuxInt = off1 + off2 3809 v.Aux = sym 3810 v.AddArg(ptr) 3811 v.AddArg(old) 3812 v.AddArg(new_) 3813 v.AddArg(mem) 3814 return true 3815 } 3816 return false 3817 } 3818 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 3819 // match: (LEAL [c] {s} (ADDLconst [d] x)) 3820 // cond: is32Bit(c+d) 3821 // result: (LEAL [c+d] {s} x) 3822 for { 3823 c := v.AuxInt 3824 s := v.Aux 3825 v_0 := v.Args[0] 3826 if v_0.Op != OpAMD64ADDLconst { 3827 break 3828 } 3829 d := v_0.AuxInt 3830 x := v_0.Args[0] 3831 if !(is32Bit(c + d)) { 3832 break 3833 } 3834 v.reset(OpAMD64LEAL) 3835 v.AuxInt = c + d 3836 v.Aux = s 3837 v.AddArg(x) 3838 return true 3839 } 3840 return false 3841 } 3842 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 3843 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 3844 // cond: is32Bit(c+d) 3845 // result: (LEAQ [c+d] {s} x) 3846 for { 3847 c := v.AuxInt 3848 s := v.Aux 3849 v_0 := v.Args[0] 3850 if v_0.Op != OpAMD64ADDQconst { 3851 break 3852 } 3853 d := v_0.AuxInt 3854 x := v_0.Args[0] 3855 if !(is32Bit(c + d)) { 3856 break 3857 } 3858 v.reset(OpAMD64LEAQ) 3859 v.AuxInt = c + d 3860 v.Aux = s 3861 v.AddArg(x) 3862 return true 3863 } 3864 // match: (LEAQ [c] {s} (ADDQ x y)) 3865 // cond: x.Op != OpSB && y.Op != OpSB 3866 // result: (LEAQ1 [c] {s} x y) 3867 for { 3868 c := v.AuxInt 3869 s := v.Aux 3870 v_0 := v.Args[0] 3871 if v_0.Op != OpAMD64ADDQ { 3872 break 3873 } 3874 _ = v_0.Args[1] 3875 x := v_0.Args[0] 3876 y := v_0.Args[1] 3877 if !(x.Op != OpSB && y.Op != OpSB) { 3878 break 3879 } 3880 v.reset(OpAMD64LEAQ1) 3881 v.AuxInt = c 3882 v.Aux = s 3883 v.AddArg(x) 3884 v.AddArg(y) 3885 return true 3886 } 3887 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 3888 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3889 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 3890 for { 3891 off1 := v.AuxInt 3892 sym1 := v.Aux 3893 v_0 := v.Args[0] 3894 if v_0.Op != OpAMD64LEAQ { 3895 break 3896 } 3897 off2 := v_0.AuxInt 3898 sym2 := v_0.Aux 3899 x := v_0.Args[0] 3900 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3901 break 3902 } 3903 v.reset(OpAMD64LEAQ) 3904 v.AuxInt = off1 + off2 3905 v.Aux = mergeSym(sym1, sym2) 3906 v.AddArg(x) 3907 return true 3908 } 3909 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 3910 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3911 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3912 for { 3913 off1 := v.AuxInt 3914 sym1 := v.Aux 3915 v_0 := v.Args[0] 3916 if v_0.Op != OpAMD64LEAQ1 { 3917 break 3918 } 3919 off2 := v_0.AuxInt 3920 sym2 := v_0.Aux 3921 _ = v_0.Args[1] 3922 x := v_0.Args[0] 3923 y := v_0.Args[1] 3924 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3925 break 3926 } 3927 v.reset(OpAMD64LEAQ1) 3928 v.AuxInt = off1 + off2 3929 v.Aux = mergeSym(sym1, sym2) 3930 v.AddArg(x) 3931 v.AddArg(y) 3932 return true 3933 } 3934 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 3935 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3936 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3937 for { 3938 off1 := v.AuxInt 3939 sym1 := v.Aux 3940 v_0 := v.Args[0] 3941 if v_0.Op != OpAMD64LEAQ2 { 3942 break 3943 } 3944 off2 := v_0.AuxInt 3945 sym2 := v_0.Aux 3946 _ = v_0.Args[1] 3947 x := v_0.Args[0] 3948 y := v_0.Args[1] 3949 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3950 break 3951 } 3952 v.reset(OpAMD64LEAQ2) 3953 v.AuxInt = off1 + off2 3954 v.Aux = mergeSym(sym1, sym2) 3955 v.AddArg(x) 3956 v.AddArg(y) 3957 return true 3958 } 3959 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 3960 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3961 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3962 for { 3963 off1 := v.AuxInt 3964 sym1 := v.Aux 3965 v_0 := v.Args[0] 3966 if v_0.Op != OpAMD64LEAQ4 { 3967 break 3968 } 3969 off2 := v_0.AuxInt 3970 sym2 := v_0.Aux 3971 _ = v_0.Args[1] 3972 x := v_0.Args[0] 3973 y := v_0.Args[1] 3974 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3975 break 3976 } 3977 v.reset(OpAMD64LEAQ4) 3978 v.AuxInt = off1 + off2 3979 v.Aux = mergeSym(sym1, sym2) 3980 v.AddArg(x) 3981 v.AddArg(y) 3982 return true 3983 } 3984 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 3985 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3986 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3987 for { 3988 off1 := v.AuxInt 3989 sym1 := v.Aux 3990 v_0 := v.Args[0] 3991 if v_0.Op != OpAMD64LEAQ8 { 3992 break 3993 } 3994 off2 := v_0.AuxInt 3995 sym2 := v_0.Aux 3996 _ = v_0.Args[1] 3997 x := v_0.Args[0] 3998 y := v_0.Args[1] 3999 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4000 break 4001 } 4002 v.reset(OpAMD64LEAQ8) 4003 v.AuxInt = off1 + off2 4004 v.Aux = mergeSym(sym1, sym2) 4005 v.AddArg(x) 4006 v.AddArg(y) 4007 return true 4008 } 4009 return false 4010 } 4011 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 4012 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 4013 // cond: is32Bit(c+d) && x.Op != OpSB 4014 // result: (LEAQ1 [c+d] {s} x y) 4015 for { 4016 c := v.AuxInt 4017 s := v.Aux 4018 _ = v.Args[1] 4019 v_0 := v.Args[0] 4020 if v_0.Op != OpAMD64ADDQconst { 4021 break 4022 } 4023 d := v_0.AuxInt 4024 x := v_0.Args[0] 4025 y := v.Args[1] 4026 if !(is32Bit(c+d) && x.Op != OpSB) { 4027 break 4028 } 4029 v.reset(OpAMD64LEAQ1) 4030 v.AuxInt = c + d 4031 v.Aux = s 4032 v.AddArg(x) 4033 v.AddArg(y) 4034 return true 4035 } 4036 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 4037 // cond: is32Bit(c+d) && x.Op != OpSB 4038 // result: (LEAQ1 [c+d] {s} x y) 4039 for { 4040 c := v.AuxInt 4041 s := v.Aux 4042 _ = v.Args[1] 4043 y := v.Args[0] 4044 v_1 := v.Args[1] 4045 if v_1.Op != OpAMD64ADDQconst { 4046 break 4047 } 4048 d := v_1.AuxInt 4049 x := v_1.Args[0] 4050 if !(is32Bit(c+d) && x.Op != OpSB) { 4051 break 4052 } 4053 v.reset(OpAMD64LEAQ1) 4054 v.AuxInt = c + d 4055 v.Aux = s 4056 v.AddArg(x) 4057 v.AddArg(y) 4058 return true 4059 } 4060 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 4061 // cond: 4062 // result: (LEAQ2 [c] {s} x y) 4063 for { 4064 c := v.AuxInt 4065 s := v.Aux 4066 _ = v.Args[1] 4067 x := v.Args[0] 4068 v_1 := v.Args[1] 4069 if v_1.Op != OpAMD64SHLQconst { 4070 break 4071 } 4072 if v_1.AuxInt != 1 { 4073 break 4074 } 4075 y := v_1.Args[0] 4076 v.reset(OpAMD64LEAQ2) 4077 v.AuxInt = c 4078 v.Aux = s 4079 v.AddArg(x) 4080 v.AddArg(y) 4081 return true 4082 } 4083 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 4084 // cond: 4085 // result: (LEAQ2 [c] {s} x y) 4086 for { 4087 c := v.AuxInt 4088 s := v.Aux 4089 _ = v.Args[1] 4090 v_0 := v.Args[0] 4091 if v_0.Op != OpAMD64SHLQconst { 4092 break 4093 } 4094 if v_0.AuxInt != 1 { 4095 break 4096 } 4097 y := v_0.Args[0] 4098 x := v.Args[1] 4099 v.reset(OpAMD64LEAQ2) 4100 v.AuxInt = c 4101 v.Aux = s 4102 v.AddArg(x) 4103 v.AddArg(y) 4104 return true 4105 } 4106 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 4107 // cond: 4108 // result: (LEAQ4 [c] {s} x y) 4109 for { 4110 c := v.AuxInt 4111 s := v.Aux 4112 _ = v.Args[1] 4113 x := v.Args[0] 4114 v_1 := v.Args[1] 4115 if v_1.Op != OpAMD64SHLQconst { 4116 break 4117 } 4118 if v_1.AuxInt != 2 { 4119 break 4120 } 4121 y := v_1.Args[0] 4122 v.reset(OpAMD64LEAQ4) 4123 v.AuxInt = c 4124 v.Aux = s 4125 v.AddArg(x) 4126 v.AddArg(y) 4127 return true 4128 } 4129 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 4130 // cond: 4131 // result: (LEAQ4 [c] {s} x y) 4132 for { 4133 c := v.AuxInt 4134 s := v.Aux 4135 _ = v.Args[1] 4136 v_0 := v.Args[0] 4137 if v_0.Op != OpAMD64SHLQconst { 4138 break 4139 } 4140 if v_0.AuxInt != 2 { 4141 break 4142 } 4143 y := v_0.Args[0] 4144 x := v.Args[1] 4145 v.reset(OpAMD64LEAQ4) 4146 v.AuxInt = c 4147 v.Aux = s 4148 v.AddArg(x) 4149 v.AddArg(y) 4150 return true 4151 } 4152 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 4153 // cond: 4154 // result: (LEAQ8 [c] {s} x y) 4155 for { 4156 c := v.AuxInt 4157 s := v.Aux 4158 _ = v.Args[1] 4159 x := v.Args[0] 4160 v_1 := v.Args[1] 4161 if v_1.Op != OpAMD64SHLQconst { 4162 break 4163 } 4164 if v_1.AuxInt != 3 { 4165 break 4166 } 4167 y := v_1.Args[0] 4168 v.reset(OpAMD64LEAQ8) 4169 v.AuxInt = c 4170 v.Aux = s 4171 v.AddArg(x) 4172 v.AddArg(y) 4173 return true 4174 } 4175 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 4176 // cond: 4177 // result: (LEAQ8 [c] {s} x y) 4178 for { 4179 c := v.AuxInt 4180 s := v.Aux 4181 _ = v.Args[1] 4182 v_0 := v.Args[0] 4183 if v_0.Op != OpAMD64SHLQconst { 4184 break 4185 } 4186 if v_0.AuxInt != 3 { 4187 break 4188 } 4189 y := v_0.Args[0] 4190 x := v.Args[1] 4191 v.reset(OpAMD64LEAQ8) 4192 v.AuxInt = c 4193 v.Aux = s 4194 v.AddArg(x) 4195 v.AddArg(y) 4196 return true 4197 } 4198 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4199 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4200 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4201 for { 4202 off1 := v.AuxInt 4203 sym1 := v.Aux 4204 _ = v.Args[1] 4205 v_0 := v.Args[0] 4206 if v_0.Op != OpAMD64LEAQ { 4207 break 4208 } 4209 off2 := v_0.AuxInt 4210 sym2 := v_0.Aux 4211 x := v_0.Args[0] 4212 y := v.Args[1] 4213 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4214 break 4215 } 4216 v.reset(OpAMD64LEAQ1) 4217 v.AuxInt = off1 + off2 4218 v.Aux = mergeSym(sym1, sym2) 4219 v.AddArg(x) 4220 v.AddArg(y) 4221 return true 4222 } 4223 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 4224 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4225 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4226 for { 4227 off1 := v.AuxInt 4228 sym1 := v.Aux 4229 _ = v.Args[1] 4230 y := v.Args[0] 4231 v_1 := v.Args[1] 4232 if v_1.Op != OpAMD64LEAQ { 4233 break 4234 } 4235 off2 := v_1.AuxInt 4236 sym2 := v_1.Aux 4237 x := v_1.Args[0] 4238 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4239 break 4240 } 4241 v.reset(OpAMD64LEAQ1) 4242 v.AuxInt = off1 + off2 4243 v.Aux = mergeSym(sym1, sym2) 4244 v.AddArg(x) 4245 v.AddArg(y) 4246 return true 4247 } 4248 return false 4249 } 4250 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 4251 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 4252 // cond: is32Bit(c+d) && x.Op != OpSB 4253 // result: (LEAQ2 [c+d] {s} x y) 4254 for { 4255 c := v.AuxInt 4256 s := v.Aux 4257 _ = v.Args[1] 4258 v_0 := v.Args[0] 4259 if v_0.Op != OpAMD64ADDQconst { 4260 break 4261 } 4262 d := v_0.AuxInt 4263 x := v_0.Args[0] 4264 y := v.Args[1] 4265 if !(is32Bit(c+d) && x.Op != OpSB) { 4266 break 4267 } 4268 v.reset(OpAMD64LEAQ2) 4269 v.AuxInt = c + d 4270 v.Aux = s 4271 v.AddArg(x) 4272 v.AddArg(y) 4273 return true 4274 } 4275 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 4276 // cond: is32Bit(c+2*d) && y.Op != OpSB 4277 // result: (LEAQ2 [c+2*d] {s} x y) 4278 for { 4279 c := v.AuxInt 4280 s := v.Aux 4281 _ = v.Args[1] 4282 x := v.Args[0] 4283 v_1 := v.Args[1] 4284 if v_1.Op != OpAMD64ADDQconst { 4285 break 4286 } 4287 d := v_1.AuxInt 4288 y := v_1.Args[0] 4289 if !(is32Bit(c+2*d) && y.Op != OpSB) { 4290 break 4291 } 4292 v.reset(OpAMD64LEAQ2) 4293 v.AuxInt = c + 2*d 4294 v.Aux = s 4295 v.AddArg(x) 4296 v.AddArg(y) 4297 return true 4298 } 4299 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 4300 // cond: 4301 // result: (LEAQ4 [c] {s} x y) 4302 for { 4303 c := v.AuxInt 4304 s := v.Aux 4305 _ = v.Args[1] 4306 x := v.Args[0] 4307 v_1 := v.Args[1] 4308 if v_1.Op != OpAMD64SHLQconst { 4309 break 4310 } 4311 if v_1.AuxInt != 1 { 4312 break 4313 } 4314 y := v_1.Args[0] 4315 v.reset(OpAMD64LEAQ4) 4316 v.AuxInt = c 4317 v.Aux = s 4318 v.AddArg(x) 4319 v.AddArg(y) 4320 return true 4321 } 4322 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 4323 // cond: 4324 // result: (LEAQ8 [c] {s} x y) 4325 for { 4326 c := v.AuxInt 4327 s := v.Aux 4328 _ = v.Args[1] 4329 x := v.Args[0] 4330 v_1 := v.Args[1] 4331 if v_1.Op != OpAMD64SHLQconst { 4332 break 4333 } 4334 if v_1.AuxInt != 2 { 4335 break 4336 } 4337 y := v_1.Args[0] 4338 v.reset(OpAMD64LEAQ8) 4339 v.AuxInt = c 4340 v.Aux = s 4341 v.AddArg(x) 4342 v.AddArg(y) 4343 return true 4344 } 4345 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4346 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4347 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 4348 for { 4349 off1 := v.AuxInt 4350 sym1 := v.Aux 4351 _ = v.Args[1] 4352 v_0 := v.Args[0] 4353 if v_0.Op != OpAMD64LEAQ { 4354 break 4355 } 4356 off2 := v_0.AuxInt 4357 sym2 := v_0.Aux 4358 x := v_0.Args[0] 4359 y := v.Args[1] 4360 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4361 break 4362 } 4363 v.reset(OpAMD64LEAQ2) 4364 v.AuxInt = off1 + off2 4365 v.Aux = mergeSym(sym1, sym2) 4366 v.AddArg(x) 4367 v.AddArg(y) 4368 return true 4369 } 4370 return false 4371 } 4372 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 4373 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 4374 // cond: is32Bit(c+d) && x.Op != OpSB 4375 // result: (LEAQ4 [c+d] {s} x y) 4376 for { 4377 c := v.AuxInt 4378 s := v.Aux 4379 _ = v.Args[1] 4380 v_0 := v.Args[0] 4381 if v_0.Op != OpAMD64ADDQconst { 4382 break 4383 } 4384 d := v_0.AuxInt 4385 x := v_0.Args[0] 4386 y := v.Args[1] 4387 if !(is32Bit(c+d) && x.Op != OpSB) { 4388 break 4389 } 4390 v.reset(OpAMD64LEAQ4) 4391 v.AuxInt = c + d 4392 v.Aux = s 4393 v.AddArg(x) 4394 v.AddArg(y) 4395 return true 4396 } 4397 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 4398 // cond: is32Bit(c+4*d) && y.Op != OpSB 4399 // result: (LEAQ4 [c+4*d] {s} x y) 4400 for { 4401 c := v.AuxInt 4402 s := v.Aux 4403 _ = v.Args[1] 4404 x := v.Args[0] 4405 v_1 := v.Args[1] 4406 if v_1.Op != OpAMD64ADDQconst { 4407 break 4408 } 4409 d := v_1.AuxInt 4410 y := v_1.Args[0] 4411 if !(is32Bit(c+4*d) && y.Op != OpSB) { 4412 break 4413 } 4414 v.reset(OpAMD64LEAQ4) 4415 v.AuxInt = c + 4*d 4416 v.Aux = s 4417 v.AddArg(x) 4418 v.AddArg(y) 4419 return true 4420 } 4421 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 4422 // cond: 4423 // result: (LEAQ8 [c] {s} x y) 4424 for { 4425 c := v.AuxInt 4426 s := v.Aux 4427 _ = v.Args[1] 4428 x := v.Args[0] 4429 v_1 := v.Args[1] 4430 if v_1.Op != OpAMD64SHLQconst { 4431 break 4432 } 4433 if v_1.AuxInt != 1 { 4434 break 4435 } 4436 y := v_1.Args[0] 4437 v.reset(OpAMD64LEAQ8) 4438 v.AuxInt = c 4439 v.Aux = s 4440 v.AddArg(x) 4441 v.AddArg(y) 4442 return true 4443 } 4444 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4445 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4446 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 4447 for { 4448 off1 := v.AuxInt 4449 sym1 := v.Aux 4450 _ = v.Args[1] 4451 v_0 := v.Args[0] 4452 if v_0.Op != OpAMD64LEAQ { 4453 break 4454 } 4455 off2 := v_0.AuxInt 4456 sym2 := v_0.Aux 4457 x := v_0.Args[0] 4458 y := v.Args[1] 4459 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4460 break 4461 } 4462 v.reset(OpAMD64LEAQ4) 4463 v.AuxInt = off1 + off2 4464 v.Aux = mergeSym(sym1, sym2) 4465 v.AddArg(x) 4466 v.AddArg(y) 4467 return true 4468 } 4469 return false 4470 } 4471 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 4472 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 4473 // cond: is32Bit(c+d) && x.Op != OpSB 4474 // result: (LEAQ8 [c+d] {s} x y) 4475 for { 4476 c := v.AuxInt 4477 s := v.Aux 4478 _ = v.Args[1] 4479 v_0 := v.Args[0] 4480 if v_0.Op != OpAMD64ADDQconst { 4481 break 4482 } 4483 d := v_0.AuxInt 4484 x := v_0.Args[0] 4485 y := v.Args[1] 4486 if !(is32Bit(c+d) && x.Op != OpSB) { 4487 break 4488 } 4489 v.reset(OpAMD64LEAQ8) 4490 v.AuxInt = c + d 4491 v.Aux = s 4492 v.AddArg(x) 4493 v.AddArg(y) 4494 return true 4495 } 4496 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 4497 // cond: is32Bit(c+8*d) && y.Op != OpSB 4498 // result: (LEAQ8 [c+8*d] {s} x y) 4499 for { 4500 c := v.AuxInt 4501 s := v.Aux 4502 _ = v.Args[1] 4503 x := v.Args[0] 4504 v_1 := v.Args[1] 4505 if v_1.Op != OpAMD64ADDQconst { 4506 break 4507 } 4508 d := v_1.AuxInt 4509 y := v_1.Args[0] 4510 if !(is32Bit(c+8*d) && y.Op != OpSB) { 4511 break 4512 } 4513 v.reset(OpAMD64LEAQ8) 4514 v.AuxInt = c + 8*d 4515 v.Aux = s 4516 v.AddArg(x) 4517 v.AddArg(y) 4518 return true 4519 } 4520 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4521 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4522 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4523 for { 4524 off1 := v.AuxInt 4525 sym1 := v.Aux 4526 _ = v.Args[1] 4527 v_0 := v.Args[0] 4528 if v_0.Op != OpAMD64LEAQ { 4529 break 4530 } 4531 off2 := v_0.AuxInt 4532 sym2 := v_0.Aux 4533 x := v_0.Args[0] 4534 y := v.Args[1] 4535 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4536 break 4537 } 4538 v.reset(OpAMD64LEAQ8) 4539 v.AuxInt = off1 + off2 4540 v.Aux = mergeSym(sym1, sym2) 4541 v.AddArg(x) 4542 v.AddArg(y) 4543 return true 4544 } 4545 return false 4546 } 4547 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 4548 b := v.Block 4549 _ = b 4550 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 4551 // cond: x.Uses == 1 && clobber(x) 4552 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4553 for { 4554 x := v.Args[0] 4555 if x.Op != OpAMD64MOVBload { 4556 break 4557 } 4558 off := x.AuxInt 4559 sym := x.Aux 4560 _ = x.Args[1] 4561 ptr := x.Args[0] 4562 mem := x.Args[1] 4563 if !(x.Uses == 1 && clobber(x)) { 4564 break 4565 } 4566 b = x.Block 4567 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4568 v.reset(OpCopy) 4569 v.AddArg(v0) 4570 v0.AuxInt = off 4571 v0.Aux = sym 4572 v0.AddArg(ptr) 4573 v0.AddArg(mem) 4574 return true 4575 } 4576 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 4577 // cond: x.Uses == 1 && clobber(x) 4578 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4579 for { 4580 x := v.Args[0] 4581 if x.Op != OpAMD64MOVWload { 4582 break 4583 } 4584 off := x.AuxInt 4585 sym := x.Aux 4586 _ = x.Args[1] 4587 ptr := x.Args[0] 4588 mem := x.Args[1] 4589 if !(x.Uses == 1 && clobber(x)) { 4590 break 4591 } 4592 b = x.Block 4593 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4594 v.reset(OpCopy) 4595 v.AddArg(v0) 4596 v0.AuxInt = off 4597 v0.Aux = sym 4598 v0.AddArg(ptr) 4599 v0.AddArg(mem) 4600 return true 4601 } 4602 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 4603 // cond: x.Uses == 1 && clobber(x) 4604 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4605 for { 4606 x := v.Args[0] 4607 if x.Op != OpAMD64MOVLload { 4608 break 4609 } 4610 off := x.AuxInt 4611 sym := x.Aux 4612 _ = x.Args[1] 4613 ptr := x.Args[0] 4614 mem := x.Args[1] 4615 if !(x.Uses == 1 && clobber(x)) { 4616 break 4617 } 4618 b = x.Block 4619 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4620 v.reset(OpCopy) 4621 v.AddArg(v0) 4622 v0.AuxInt = off 4623 v0.Aux = sym 4624 v0.AddArg(ptr) 4625 v0.AddArg(mem) 4626 return true 4627 } 4628 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 4629 // cond: x.Uses == 1 && clobber(x) 4630 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4631 for { 4632 x := v.Args[0] 4633 if x.Op != OpAMD64MOVQload { 4634 break 4635 } 4636 off := x.AuxInt 4637 sym := x.Aux 4638 _ = x.Args[1] 4639 ptr := x.Args[0] 4640 mem := x.Args[1] 4641 if !(x.Uses == 1 && clobber(x)) { 4642 break 4643 } 4644 b = x.Block 4645 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4646 v.reset(OpCopy) 4647 v.AddArg(v0) 4648 v0.AuxInt = off 4649 v0.Aux = sym 4650 v0.AddArg(ptr) 4651 v0.AddArg(mem) 4652 return true 4653 } 4654 // match: (MOVBQSX (ANDLconst [c] x)) 4655 // cond: c & 0x80 == 0 4656 // result: (ANDLconst [c & 0x7f] x) 4657 for { 4658 v_0 := v.Args[0] 4659 if v_0.Op != OpAMD64ANDLconst { 4660 break 4661 } 4662 c := v_0.AuxInt 4663 x := v_0.Args[0] 4664 if !(c&0x80 == 0) { 4665 break 4666 } 4667 v.reset(OpAMD64ANDLconst) 4668 v.AuxInt = c & 0x7f 4669 v.AddArg(x) 4670 return true 4671 } 4672 // match: (MOVBQSX x:(MOVBQSX _)) 4673 // cond: 4674 // result: x 4675 for { 4676 x := v.Args[0] 4677 if x.Op != OpAMD64MOVBQSX { 4678 break 4679 } 4680 v.reset(OpCopy) 4681 v.Type = x.Type 4682 v.AddArg(x) 4683 return true 4684 } 4685 return false 4686 } 4687 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 4688 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4689 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4690 // result: (MOVBQSX x) 4691 for { 4692 off := v.AuxInt 4693 sym := v.Aux 4694 _ = v.Args[1] 4695 ptr := v.Args[0] 4696 v_1 := v.Args[1] 4697 if v_1.Op != OpAMD64MOVBstore { 4698 break 4699 } 4700 off2 := v_1.AuxInt 4701 sym2 := v_1.Aux 4702 _ = v_1.Args[2] 4703 ptr2 := v_1.Args[0] 4704 x := v_1.Args[1] 4705 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4706 break 4707 } 4708 v.reset(OpAMD64MOVBQSX) 4709 v.AddArg(x) 4710 return true 4711 } 4712 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4713 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4714 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4715 for { 4716 off1 := v.AuxInt 4717 sym1 := v.Aux 4718 _ = v.Args[1] 4719 v_0 := v.Args[0] 4720 if v_0.Op != OpAMD64LEAQ { 4721 break 4722 } 4723 off2 := v_0.AuxInt 4724 sym2 := v_0.Aux 4725 base := v_0.Args[0] 4726 mem := v.Args[1] 4727 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4728 break 4729 } 4730 v.reset(OpAMD64MOVBQSXload) 4731 v.AuxInt = off1 + off2 4732 v.Aux = mergeSym(sym1, sym2) 4733 v.AddArg(base) 4734 v.AddArg(mem) 4735 return true 4736 } 4737 return false 4738 } 4739 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 4740 b := v.Block 4741 _ = b 4742 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 4743 // cond: x.Uses == 1 && clobber(x) 4744 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4745 for { 4746 x := v.Args[0] 4747 if x.Op != OpAMD64MOVBload { 4748 break 4749 } 4750 off := x.AuxInt 4751 sym := x.Aux 4752 _ = x.Args[1] 4753 ptr := x.Args[0] 4754 mem := x.Args[1] 4755 if !(x.Uses == 1 && clobber(x)) { 4756 break 4757 } 4758 b = x.Block 4759 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4760 v.reset(OpCopy) 4761 v.AddArg(v0) 4762 v0.AuxInt = off 4763 v0.Aux = sym 4764 v0.AddArg(ptr) 4765 v0.AddArg(mem) 4766 return true 4767 } 4768 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 4769 // cond: x.Uses == 1 && clobber(x) 4770 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4771 for { 4772 x := v.Args[0] 4773 if x.Op != OpAMD64MOVWload { 4774 break 4775 } 4776 off := x.AuxInt 4777 sym := x.Aux 4778 _ = x.Args[1] 4779 ptr := x.Args[0] 4780 mem := x.Args[1] 4781 if !(x.Uses == 1 && clobber(x)) { 4782 break 4783 } 4784 b = x.Block 4785 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4786 v.reset(OpCopy) 4787 v.AddArg(v0) 4788 v0.AuxInt = off 4789 v0.Aux = sym 4790 v0.AddArg(ptr) 4791 v0.AddArg(mem) 4792 return true 4793 } 4794 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 4795 // cond: x.Uses == 1 && clobber(x) 4796 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4797 for { 4798 x := v.Args[0] 4799 if x.Op != OpAMD64MOVLload { 4800 break 4801 } 4802 off := x.AuxInt 4803 sym := x.Aux 4804 _ = x.Args[1] 4805 ptr := x.Args[0] 4806 mem := x.Args[1] 4807 if !(x.Uses == 1 && clobber(x)) { 4808 break 4809 } 4810 b = x.Block 4811 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4812 v.reset(OpCopy) 4813 v.AddArg(v0) 4814 v0.AuxInt = off 4815 v0.Aux = sym 4816 v0.AddArg(ptr) 4817 v0.AddArg(mem) 4818 return true 4819 } 4820 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 4821 // cond: x.Uses == 1 && clobber(x) 4822 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4823 for { 4824 x := v.Args[0] 4825 if x.Op != OpAMD64MOVQload { 4826 break 4827 } 4828 off := x.AuxInt 4829 sym := x.Aux 4830 _ = x.Args[1] 4831 ptr := x.Args[0] 4832 mem := x.Args[1] 4833 if !(x.Uses == 1 && clobber(x)) { 4834 break 4835 } 4836 b = x.Block 4837 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4838 v.reset(OpCopy) 4839 v.AddArg(v0) 4840 v0.AuxInt = off 4841 v0.Aux = sym 4842 v0.AddArg(ptr) 4843 v0.AddArg(mem) 4844 return true 4845 } 4846 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 4847 // cond: x.Uses == 1 && clobber(x) 4848 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 4849 for { 4850 x := v.Args[0] 4851 if x.Op != OpAMD64MOVBloadidx1 { 4852 break 4853 } 4854 off := x.AuxInt 4855 sym := x.Aux 4856 _ = x.Args[2] 4857 ptr := x.Args[0] 4858 idx := x.Args[1] 4859 mem := x.Args[2] 4860 if !(x.Uses == 1 && clobber(x)) { 4861 break 4862 } 4863 b = x.Block 4864 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 4865 v.reset(OpCopy) 4866 v.AddArg(v0) 4867 v0.AuxInt = off 4868 v0.Aux = sym 4869 v0.AddArg(ptr) 4870 v0.AddArg(idx) 4871 v0.AddArg(mem) 4872 return true 4873 } 4874 // match: (MOVBQZX (ANDLconst [c] x)) 4875 // cond: 4876 // result: (ANDLconst [c & 0xff] x) 4877 for { 4878 v_0 := v.Args[0] 4879 if v_0.Op != OpAMD64ANDLconst { 4880 break 4881 } 4882 c := v_0.AuxInt 4883 x := v_0.Args[0] 4884 v.reset(OpAMD64ANDLconst) 4885 v.AuxInt = c & 0xff 4886 v.AddArg(x) 4887 return true 4888 } 4889 // match: (MOVBQZX x:(MOVBQZX _)) 4890 // cond: 4891 // result: x 4892 for { 4893 x := v.Args[0] 4894 if x.Op != OpAMD64MOVBQZX { 4895 break 4896 } 4897 v.reset(OpCopy) 4898 v.Type = x.Type 4899 v.AddArg(x) 4900 return true 4901 } 4902 return false 4903 } 4904 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 4905 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4906 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4907 // result: (MOVBQZX x) 4908 for { 4909 off := v.AuxInt 4910 sym := v.Aux 4911 _ = v.Args[1] 4912 ptr := v.Args[0] 4913 v_1 := v.Args[1] 4914 if v_1.Op != OpAMD64MOVBstore { 4915 break 4916 } 4917 off2 := v_1.AuxInt 4918 sym2 := v_1.Aux 4919 _ = v_1.Args[2] 4920 ptr2 := v_1.Args[0] 4921 x := v_1.Args[1] 4922 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4923 break 4924 } 4925 v.reset(OpAMD64MOVBQZX) 4926 v.AddArg(x) 4927 return true 4928 } 4929 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 4930 // cond: is32Bit(off1+off2) 4931 // result: (MOVBload [off1+off2] {sym} ptr mem) 4932 for { 4933 off1 := v.AuxInt 4934 sym := v.Aux 4935 _ = v.Args[1] 4936 v_0 := v.Args[0] 4937 if v_0.Op != OpAMD64ADDQconst { 4938 break 4939 } 4940 off2 := v_0.AuxInt 4941 ptr := v_0.Args[0] 4942 mem := v.Args[1] 4943 if !(is32Bit(off1 + off2)) { 4944 break 4945 } 4946 v.reset(OpAMD64MOVBload) 4947 v.AuxInt = off1 + off2 4948 v.Aux = sym 4949 v.AddArg(ptr) 4950 v.AddArg(mem) 4951 return true 4952 } 4953 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4954 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4955 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4956 for { 4957 off1 := v.AuxInt 4958 sym1 := v.Aux 4959 _ = v.Args[1] 4960 v_0 := v.Args[0] 4961 if v_0.Op != OpAMD64LEAQ { 4962 break 4963 } 4964 off2 := v_0.AuxInt 4965 sym2 := v_0.Aux 4966 base := v_0.Args[0] 4967 mem := v.Args[1] 4968 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4969 break 4970 } 4971 v.reset(OpAMD64MOVBload) 4972 v.AuxInt = off1 + off2 4973 v.Aux = mergeSym(sym1, sym2) 4974 v.AddArg(base) 4975 v.AddArg(mem) 4976 return true 4977 } 4978 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 4979 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4980 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4981 for { 4982 off1 := v.AuxInt 4983 sym1 := v.Aux 4984 _ = v.Args[1] 4985 v_0 := v.Args[0] 4986 if v_0.Op != OpAMD64LEAQ1 { 4987 break 4988 } 4989 off2 := v_0.AuxInt 4990 sym2 := v_0.Aux 4991 _ = v_0.Args[1] 4992 ptr := v_0.Args[0] 4993 idx := v_0.Args[1] 4994 mem := v.Args[1] 4995 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4996 break 4997 } 4998 v.reset(OpAMD64MOVBloadidx1) 4999 v.AuxInt = off1 + off2 5000 v.Aux = mergeSym(sym1, sym2) 5001 v.AddArg(ptr) 5002 v.AddArg(idx) 5003 v.AddArg(mem) 5004 return true 5005 } 5006 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 5007 // cond: ptr.Op != OpSB 5008 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 5009 for { 5010 off := v.AuxInt 5011 sym := v.Aux 5012 _ = v.Args[1] 5013 v_0 := v.Args[0] 5014 if v_0.Op != OpAMD64ADDQ { 5015 break 5016 } 5017 _ = v_0.Args[1] 5018 ptr := v_0.Args[0] 5019 idx := v_0.Args[1] 5020 mem := v.Args[1] 5021 if !(ptr.Op != OpSB) { 5022 break 5023 } 5024 v.reset(OpAMD64MOVBloadidx1) 5025 v.AuxInt = off 5026 v.Aux = sym 5027 v.AddArg(ptr) 5028 v.AddArg(idx) 5029 v.AddArg(mem) 5030 return true 5031 } 5032 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 5033 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 5034 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5035 for { 5036 off1 := v.AuxInt 5037 sym1 := v.Aux 5038 _ = v.Args[1] 5039 v_0 := v.Args[0] 5040 if v_0.Op != OpAMD64LEAL { 5041 break 5042 } 5043 off2 := v_0.AuxInt 5044 sym2 := v_0.Aux 5045 base := v_0.Args[0] 5046 mem := v.Args[1] 5047 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 5048 break 5049 } 5050 v.reset(OpAMD64MOVBload) 5051 v.AuxInt = off1 + off2 5052 v.Aux = mergeSym(sym1, sym2) 5053 v.AddArg(base) 5054 v.AddArg(mem) 5055 return true 5056 } 5057 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 5058 // cond: is32Bit(off1+off2) 5059 // result: (MOVBload [off1+off2] {sym} ptr mem) 5060 for { 5061 off1 := v.AuxInt 5062 sym := v.Aux 5063 _ = v.Args[1] 5064 v_0 := v.Args[0] 5065 if v_0.Op != OpAMD64ADDLconst { 5066 break 5067 } 5068 off2 := v_0.AuxInt 5069 ptr := v_0.Args[0] 5070 mem := v.Args[1] 5071 if !(is32Bit(off1 + off2)) { 5072 break 5073 } 5074 v.reset(OpAMD64MOVBload) 5075 v.AuxInt = off1 + off2 5076 v.Aux = sym 5077 v.AddArg(ptr) 5078 v.AddArg(mem) 5079 return true 5080 } 5081 return false 5082 } 5083 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 5084 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 5085 // cond: is32Bit(c+d) 5086 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5087 for { 5088 c := v.AuxInt 5089 sym := v.Aux 5090 _ = v.Args[2] 5091 v_0 := v.Args[0] 5092 if v_0.Op != OpAMD64ADDQconst { 5093 break 5094 } 5095 d := v_0.AuxInt 5096 ptr := v_0.Args[0] 5097 idx := v.Args[1] 5098 mem := v.Args[2] 5099 if !(is32Bit(c + d)) { 5100 break 5101 } 5102 v.reset(OpAMD64MOVBloadidx1) 5103 v.AuxInt = c + d 5104 v.Aux = sym 5105 v.AddArg(ptr) 5106 v.AddArg(idx) 5107 v.AddArg(mem) 5108 return true 5109 } 5110 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 5111 // cond: is32Bit(c+d) 5112 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5113 for { 5114 c := v.AuxInt 5115 sym := v.Aux 5116 _ = v.Args[2] 5117 idx := v.Args[0] 5118 v_1 := v.Args[1] 5119 if v_1.Op != OpAMD64ADDQconst { 5120 break 5121 } 5122 d := v_1.AuxInt 5123 ptr := v_1.Args[0] 5124 mem := v.Args[2] 5125 if !(is32Bit(c + d)) { 5126 break 5127 } 5128 v.reset(OpAMD64MOVBloadidx1) 5129 v.AuxInt = c + d 5130 v.Aux = sym 5131 v.AddArg(ptr) 5132 v.AddArg(idx) 5133 v.AddArg(mem) 5134 return true 5135 } 5136 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 5137 // cond: is32Bit(c+d) 5138 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5139 for { 5140 c := v.AuxInt 5141 sym := v.Aux 5142 _ = v.Args[2] 5143 ptr := v.Args[0] 5144 v_1 := v.Args[1] 5145 if v_1.Op != OpAMD64ADDQconst { 5146 break 5147 } 5148 d := v_1.AuxInt 5149 idx := v_1.Args[0] 5150 mem := v.Args[2] 5151 if !(is32Bit(c + d)) { 5152 break 5153 } 5154 v.reset(OpAMD64MOVBloadidx1) 5155 v.AuxInt = c + d 5156 v.Aux = sym 5157 v.AddArg(ptr) 5158 v.AddArg(idx) 5159 v.AddArg(mem) 5160 return true 5161 } 5162 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 5163 // cond: is32Bit(c+d) 5164 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5165 for { 5166 c := v.AuxInt 5167 sym := v.Aux 5168 _ = v.Args[2] 5169 v_0 := v.Args[0] 5170 if v_0.Op != OpAMD64ADDQconst { 5171 break 5172 } 5173 d := v_0.AuxInt 5174 idx := v_0.Args[0] 5175 ptr := v.Args[1] 5176 mem := v.Args[2] 5177 if !(is32Bit(c + d)) { 5178 break 5179 } 5180 v.reset(OpAMD64MOVBloadidx1) 5181 v.AuxInt = c + d 5182 v.Aux = sym 5183 v.AddArg(ptr) 5184 v.AddArg(idx) 5185 v.AddArg(mem) 5186 return true 5187 } 5188 return false 5189 } 5190 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 5191 b := v.Block 5192 _ = b 5193 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 5194 // cond: 5195 // result: (MOVBstore [off] {sym} ptr x mem) 5196 for { 5197 off := v.AuxInt 5198 sym := v.Aux 5199 _ = v.Args[2] 5200 ptr := v.Args[0] 5201 v_1 := v.Args[1] 5202 if v_1.Op != OpAMD64MOVBQSX { 5203 break 5204 } 5205 x := v_1.Args[0] 5206 mem := v.Args[2] 5207 v.reset(OpAMD64MOVBstore) 5208 v.AuxInt = off 5209 v.Aux = sym 5210 v.AddArg(ptr) 5211 v.AddArg(x) 5212 v.AddArg(mem) 5213 return true 5214 } 5215 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 5216 // cond: 5217 // result: (MOVBstore [off] {sym} ptr x mem) 5218 for { 5219 off := v.AuxInt 5220 sym := v.Aux 5221 _ = v.Args[2] 5222 ptr := v.Args[0] 5223 v_1 := v.Args[1] 5224 if v_1.Op != OpAMD64MOVBQZX { 5225 break 5226 } 5227 x := v_1.Args[0] 5228 mem := v.Args[2] 5229 v.reset(OpAMD64MOVBstore) 5230 v.AuxInt = off 5231 v.Aux = sym 5232 v.AddArg(ptr) 5233 v.AddArg(x) 5234 v.AddArg(mem) 5235 return true 5236 } 5237 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 5238 // cond: is32Bit(off1+off2) 5239 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 5240 for { 5241 off1 := v.AuxInt 5242 sym := v.Aux 5243 _ = v.Args[2] 5244 v_0 := v.Args[0] 5245 if v_0.Op != OpAMD64ADDQconst { 5246 break 5247 } 5248 off2 := v_0.AuxInt 5249 ptr := v_0.Args[0] 5250 val := v.Args[1] 5251 mem := v.Args[2] 5252 if !(is32Bit(off1 + off2)) { 5253 break 5254 } 5255 v.reset(OpAMD64MOVBstore) 5256 v.AuxInt = off1 + off2 5257 v.Aux = sym 5258 v.AddArg(ptr) 5259 v.AddArg(val) 5260 v.AddArg(mem) 5261 return true 5262 } 5263 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 5264 // cond: validOff(off) 5265 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 5266 for { 5267 off := v.AuxInt 5268 sym := v.Aux 5269 _ = v.Args[2] 5270 ptr := v.Args[0] 5271 v_1 := v.Args[1] 5272 if v_1.Op != OpAMD64MOVLconst { 5273 break 5274 } 5275 c := v_1.AuxInt 5276 mem := v.Args[2] 5277 if !(validOff(off)) { 5278 break 5279 } 5280 v.reset(OpAMD64MOVBstoreconst) 5281 v.AuxInt = makeValAndOff(int64(int8(c)), off) 5282 v.Aux = sym 5283 v.AddArg(ptr) 5284 v.AddArg(mem) 5285 return true 5286 } 5287 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5288 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5289 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5290 for { 5291 off1 := v.AuxInt 5292 sym1 := v.Aux 5293 _ = v.Args[2] 5294 v_0 := v.Args[0] 5295 if v_0.Op != OpAMD64LEAQ { 5296 break 5297 } 5298 off2 := v_0.AuxInt 5299 sym2 := v_0.Aux 5300 base := v_0.Args[0] 5301 val := v.Args[1] 5302 mem := v.Args[2] 5303 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5304 break 5305 } 5306 v.reset(OpAMD64MOVBstore) 5307 v.AuxInt = off1 + off2 5308 v.Aux = mergeSym(sym1, sym2) 5309 v.AddArg(base) 5310 v.AddArg(val) 5311 v.AddArg(mem) 5312 return true 5313 } 5314 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 5315 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5316 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5317 for { 5318 off1 := v.AuxInt 5319 sym1 := v.Aux 5320 _ = v.Args[2] 5321 v_0 := v.Args[0] 5322 if v_0.Op != OpAMD64LEAQ1 { 5323 break 5324 } 5325 off2 := v_0.AuxInt 5326 sym2 := v_0.Aux 5327 _ = v_0.Args[1] 5328 ptr := v_0.Args[0] 5329 idx := v_0.Args[1] 5330 val := v.Args[1] 5331 mem := v.Args[2] 5332 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5333 break 5334 } 5335 v.reset(OpAMD64MOVBstoreidx1) 5336 v.AuxInt = off1 + off2 5337 v.Aux = mergeSym(sym1, sym2) 5338 v.AddArg(ptr) 5339 v.AddArg(idx) 5340 v.AddArg(val) 5341 v.AddArg(mem) 5342 return true 5343 } 5344 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 5345 // cond: ptr.Op != OpSB 5346 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 5347 for { 5348 off := v.AuxInt 5349 sym := v.Aux 5350 _ = v.Args[2] 5351 v_0 := v.Args[0] 5352 if v_0.Op != OpAMD64ADDQ { 5353 break 5354 } 5355 _ = v_0.Args[1] 5356 ptr := v_0.Args[0] 5357 idx := v_0.Args[1] 5358 val := v.Args[1] 5359 mem := v.Args[2] 5360 if !(ptr.Op != OpSB) { 5361 break 5362 } 5363 v.reset(OpAMD64MOVBstoreidx1) 5364 v.AuxInt = off 5365 v.Aux = sym 5366 v.AddArg(ptr) 5367 v.AddArg(idx) 5368 v.AddArg(val) 5369 v.AddArg(mem) 5370 return true 5371 } 5372 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 5373 // cond: x0.Uses == 1 && clobber(x0) 5374 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 5375 for { 5376 i := v.AuxInt 5377 s := v.Aux 5378 _ = v.Args[2] 5379 p := v.Args[0] 5380 w := v.Args[1] 5381 x0 := v.Args[2] 5382 if x0.Op != OpAMD64MOVBstore { 5383 break 5384 } 5385 if x0.AuxInt != i-1 { 5386 break 5387 } 5388 if x0.Aux != s { 5389 break 5390 } 5391 _ = x0.Args[2] 5392 if p != x0.Args[0] { 5393 break 5394 } 5395 x0_1 := x0.Args[1] 5396 if x0_1.Op != OpAMD64SHRWconst { 5397 break 5398 } 5399 if x0_1.AuxInt != 8 { 5400 break 5401 } 5402 if w != x0_1.Args[0] { 5403 break 5404 } 5405 mem := x0.Args[2] 5406 if !(x0.Uses == 1 && clobber(x0)) { 5407 break 5408 } 5409 v.reset(OpAMD64MOVWstore) 5410 v.AuxInt = i - 1 5411 v.Aux = s 5412 v.AddArg(p) 5413 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5414 v0.AuxInt = 8 5415 v0.AddArg(w) 5416 v.AddArg(v0) 5417 v.AddArg(mem) 5418 return true 5419 } 5420 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 5421 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5422 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 5423 for { 5424 i := v.AuxInt 5425 s := v.Aux 5426 _ = v.Args[2] 5427 p := v.Args[0] 5428 w := v.Args[1] 5429 x2 := v.Args[2] 5430 if x2.Op != OpAMD64MOVBstore { 5431 break 5432 } 5433 if x2.AuxInt != i-1 { 5434 break 5435 } 5436 if x2.Aux != s { 5437 break 5438 } 5439 _ = x2.Args[2] 5440 if p != x2.Args[0] { 5441 break 5442 } 5443 x2_1 := x2.Args[1] 5444 if x2_1.Op != OpAMD64SHRLconst { 5445 break 5446 } 5447 if x2_1.AuxInt != 8 { 5448 break 5449 } 5450 if w != x2_1.Args[0] { 5451 break 5452 } 5453 x1 := x2.Args[2] 5454 if x1.Op != OpAMD64MOVBstore { 5455 break 5456 } 5457 if x1.AuxInt != i-2 { 5458 break 5459 } 5460 if x1.Aux != s { 5461 break 5462 } 5463 _ = x1.Args[2] 5464 if p != x1.Args[0] { 5465 break 5466 } 5467 x1_1 := x1.Args[1] 5468 if x1_1.Op != OpAMD64SHRLconst { 5469 break 5470 } 5471 if x1_1.AuxInt != 16 { 5472 break 5473 } 5474 if w != x1_1.Args[0] { 5475 break 5476 } 5477 x0 := x1.Args[2] 5478 if x0.Op != OpAMD64MOVBstore { 5479 break 5480 } 5481 if x0.AuxInt != i-3 { 5482 break 5483 } 5484 if x0.Aux != s { 5485 break 5486 } 5487 _ = x0.Args[2] 5488 if p != x0.Args[0] { 5489 break 5490 } 5491 x0_1 := x0.Args[1] 5492 if x0_1.Op != OpAMD64SHRLconst { 5493 break 5494 } 5495 if x0_1.AuxInt != 24 { 5496 break 5497 } 5498 if w != x0_1.Args[0] { 5499 break 5500 } 5501 mem := x0.Args[2] 5502 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5503 break 5504 } 5505 v.reset(OpAMD64MOVLstore) 5506 v.AuxInt = i - 3 5507 v.Aux = s 5508 v.AddArg(p) 5509 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5510 v0.AddArg(w) 5511 v.AddArg(v0) 5512 v.AddArg(mem) 5513 return true 5514 } 5515 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 5516 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5517 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 5518 for { 5519 i := v.AuxInt 5520 s := v.Aux 5521 _ = v.Args[2] 5522 p := v.Args[0] 5523 w := v.Args[1] 5524 x6 := v.Args[2] 5525 if x6.Op != OpAMD64MOVBstore { 5526 break 5527 } 5528 if x6.AuxInt != i-1 { 5529 break 5530 } 5531 if x6.Aux != s { 5532 break 5533 } 5534 _ = x6.Args[2] 5535 if p != x6.Args[0] { 5536 break 5537 } 5538 x6_1 := x6.Args[1] 5539 if x6_1.Op != OpAMD64SHRQconst { 5540 break 5541 } 5542 if x6_1.AuxInt != 8 { 5543 break 5544 } 5545 if w != x6_1.Args[0] { 5546 break 5547 } 5548 x5 := x6.Args[2] 5549 if x5.Op != OpAMD64MOVBstore { 5550 break 5551 } 5552 if x5.AuxInt != i-2 { 5553 break 5554 } 5555 if x5.Aux != s { 5556 break 5557 } 5558 _ = x5.Args[2] 5559 if p != x5.Args[0] { 5560 break 5561 } 5562 x5_1 := x5.Args[1] 5563 if x5_1.Op != OpAMD64SHRQconst { 5564 break 5565 } 5566 if x5_1.AuxInt != 16 { 5567 break 5568 } 5569 if w != x5_1.Args[0] { 5570 break 5571 } 5572 x4 := x5.Args[2] 5573 if x4.Op != OpAMD64MOVBstore { 5574 break 5575 } 5576 if x4.AuxInt != i-3 { 5577 break 5578 } 5579 if x4.Aux != s { 5580 break 5581 } 5582 _ = x4.Args[2] 5583 if p != x4.Args[0] { 5584 break 5585 } 5586 x4_1 := x4.Args[1] 5587 if x4_1.Op != OpAMD64SHRQconst { 5588 break 5589 } 5590 if x4_1.AuxInt != 24 { 5591 break 5592 } 5593 if w != x4_1.Args[0] { 5594 break 5595 } 5596 x3 := x4.Args[2] 5597 if x3.Op != OpAMD64MOVBstore { 5598 break 5599 } 5600 if x3.AuxInt != i-4 { 5601 break 5602 } 5603 if x3.Aux != s { 5604 break 5605 } 5606 _ = x3.Args[2] 5607 if p != x3.Args[0] { 5608 break 5609 } 5610 x3_1 := x3.Args[1] 5611 if x3_1.Op != OpAMD64SHRQconst { 5612 break 5613 } 5614 if x3_1.AuxInt != 32 { 5615 break 5616 } 5617 if w != x3_1.Args[0] { 5618 break 5619 } 5620 x2 := x3.Args[2] 5621 if x2.Op != OpAMD64MOVBstore { 5622 break 5623 } 5624 if x2.AuxInt != i-5 { 5625 break 5626 } 5627 if x2.Aux != s { 5628 break 5629 } 5630 _ = x2.Args[2] 5631 if p != x2.Args[0] { 5632 break 5633 } 5634 x2_1 := x2.Args[1] 5635 if x2_1.Op != OpAMD64SHRQconst { 5636 break 5637 } 5638 if x2_1.AuxInt != 40 { 5639 break 5640 } 5641 if w != x2_1.Args[0] { 5642 break 5643 } 5644 x1 := x2.Args[2] 5645 if x1.Op != OpAMD64MOVBstore { 5646 break 5647 } 5648 if x1.AuxInt != i-6 { 5649 break 5650 } 5651 if x1.Aux != s { 5652 break 5653 } 5654 _ = x1.Args[2] 5655 if p != x1.Args[0] { 5656 break 5657 } 5658 x1_1 := x1.Args[1] 5659 if x1_1.Op != OpAMD64SHRQconst { 5660 break 5661 } 5662 if x1_1.AuxInt != 48 { 5663 break 5664 } 5665 if w != x1_1.Args[0] { 5666 break 5667 } 5668 x0 := x1.Args[2] 5669 if x0.Op != OpAMD64MOVBstore { 5670 break 5671 } 5672 if x0.AuxInt != i-7 { 5673 break 5674 } 5675 if x0.Aux != s { 5676 break 5677 } 5678 _ = x0.Args[2] 5679 if p != x0.Args[0] { 5680 break 5681 } 5682 x0_1 := x0.Args[1] 5683 if x0_1.Op != OpAMD64SHRQconst { 5684 break 5685 } 5686 if x0_1.AuxInt != 56 { 5687 break 5688 } 5689 if w != x0_1.Args[0] { 5690 break 5691 } 5692 mem := x0.Args[2] 5693 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5694 break 5695 } 5696 v.reset(OpAMD64MOVQstore) 5697 v.AuxInt = i - 7 5698 v.Aux = s 5699 v.AddArg(p) 5700 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5701 v0.AddArg(w) 5702 v.AddArg(v0) 5703 v.AddArg(mem) 5704 return true 5705 } 5706 return false 5707 } 5708 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 5709 b := v.Block 5710 _ = b 5711 typ := &b.Func.Config.Types 5712 _ = typ 5713 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 5714 // cond: x.Uses == 1 && clobber(x) 5715 // result: (MOVWstore [i-1] {s} p w mem) 5716 for { 5717 i := v.AuxInt 5718 s := v.Aux 5719 _ = v.Args[2] 5720 p := v.Args[0] 5721 v_1 := v.Args[1] 5722 if v_1.Op != OpAMD64SHRQconst { 5723 break 5724 } 5725 if v_1.AuxInt != 8 { 5726 break 5727 } 5728 w := v_1.Args[0] 5729 x := v.Args[2] 5730 if x.Op != OpAMD64MOVBstore { 5731 break 5732 } 5733 if x.AuxInt != i-1 { 5734 break 5735 } 5736 if x.Aux != s { 5737 break 5738 } 5739 _ = x.Args[2] 5740 if p != x.Args[0] { 5741 break 5742 } 5743 if w != x.Args[1] { 5744 break 5745 } 5746 mem := x.Args[2] 5747 if !(x.Uses == 1 && clobber(x)) { 5748 break 5749 } 5750 v.reset(OpAMD64MOVWstore) 5751 v.AuxInt = i - 1 5752 v.Aux = s 5753 v.AddArg(p) 5754 v.AddArg(w) 5755 v.AddArg(mem) 5756 return true 5757 } 5758 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 5759 // cond: x.Uses == 1 && clobber(x) 5760 // result: (MOVWstore [i-1] {s} p w0 mem) 5761 for { 5762 i := v.AuxInt 5763 s := v.Aux 5764 _ = v.Args[2] 5765 p := v.Args[0] 5766 v_1 := v.Args[1] 5767 if v_1.Op != OpAMD64SHRQconst { 5768 break 5769 } 5770 j := v_1.AuxInt 5771 w := v_1.Args[0] 5772 x := v.Args[2] 5773 if x.Op != OpAMD64MOVBstore { 5774 break 5775 } 5776 if x.AuxInt != i-1 { 5777 break 5778 } 5779 if x.Aux != s { 5780 break 5781 } 5782 _ = x.Args[2] 5783 if p != x.Args[0] { 5784 break 5785 } 5786 w0 := x.Args[1] 5787 if w0.Op != OpAMD64SHRQconst { 5788 break 5789 } 5790 if w0.AuxInt != j-8 { 5791 break 5792 } 5793 if w != w0.Args[0] { 5794 break 5795 } 5796 mem := x.Args[2] 5797 if !(x.Uses == 1 && clobber(x)) { 5798 break 5799 } 5800 v.reset(OpAMD64MOVWstore) 5801 v.AuxInt = i - 1 5802 v.Aux = s 5803 v.AddArg(p) 5804 v.AddArg(w0) 5805 v.AddArg(mem) 5806 return true 5807 } 5808 // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) 5809 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 5810 // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) 5811 for { 5812 i := v.AuxInt 5813 s := v.Aux 5814 _ = v.Args[2] 5815 p := v.Args[0] 5816 x1 := v.Args[1] 5817 if x1.Op != OpAMD64MOVBload { 5818 break 5819 } 5820 j := x1.AuxInt 5821 s2 := x1.Aux 5822 _ = x1.Args[1] 5823 p2 := x1.Args[0] 5824 mem := x1.Args[1] 5825 mem2 := v.Args[2] 5826 if mem2.Op != OpAMD64MOVBstore { 5827 break 5828 } 5829 if mem2.AuxInt != i-1 { 5830 break 5831 } 5832 if mem2.Aux != s { 5833 break 5834 } 5835 _ = mem2.Args[2] 5836 if p != mem2.Args[0] { 5837 break 5838 } 5839 x2 := mem2.Args[1] 5840 if x2.Op != OpAMD64MOVBload { 5841 break 5842 } 5843 if x2.AuxInt != j-1 { 5844 break 5845 } 5846 if x2.Aux != s2 { 5847 break 5848 } 5849 _ = x2.Args[1] 5850 if p2 != x2.Args[0] { 5851 break 5852 } 5853 if mem != x2.Args[1] { 5854 break 5855 } 5856 if mem != mem2.Args[2] { 5857 break 5858 } 5859 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 5860 break 5861 } 5862 v.reset(OpAMD64MOVWstore) 5863 v.AuxInt = i - 1 5864 v.Aux = s 5865 v.AddArg(p) 5866 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 5867 v0.AuxInt = j - 1 5868 v0.Aux = s2 5869 v0.AddArg(p2) 5870 v0.AddArg(mem) 5871 v.AddArg(v0) 5872 v.AddArg(mem) 5873 return true 5874 } 5875 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 5876 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 5877 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5878 for { 5879 off1 := v.AuxInt 5880 sym1 := v.Aux 5881 _ = v.Args[2] 5882 v_0 := v.Args[0] 5883 if v_0.Op != OpAMD64LEAL { 5884 break 5885 } 5886 off2 := v_0.AuxInt 5887 sym2 := v_0.Aux 5888 base := v_0.Args[0] 5889 val := v.Args[1] 5890 mem := v.Args[2] 5891 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 5892 break 5893 } 5894 v.reset(OpAMD64MOVBstore) 5895 v.AuxInt = off1 + off2 5896 v.Aux = mergeSym(sym1, sym2) 5897 v.AddArg(base) 5898 v.AddArg(val) 5899 v.AddArg(mem) 5900 return true 5901 } 5902 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 5903 // cond: is32Bit(off1+off2) 5904 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 5905 for { 5906 off1 := v.AuxInt 5907 sym := v.Aux 5908 _ = v.Args[2] 5909 v_0 := v.Args[0] 5910 if v_0.Op != OpAMD64ADDLconst { 5911 break 5912 } 5913 off2 := v_0.AuxInt 5914 ptr := v_0.Args[0] 5915 val := v.Args[1] 5916 mem := v.Args[2] 5917 if !(is32Bit(off1 + off2)) { 5918 break 5919 } 5920 v.reset(OpAMD64MOVBstore) 5921 v.AuxInt = off1 + off2 5922 v.Aux = sym 5923 v.AddArg(ptr) 5924 v.AddArg(val) 5925 v.AddArg(mem) 5926 return true 5927 } 5928 return false 5929 } 5930 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 5931 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 5932 // cond: ValAndOff(sc).canAdd(off) 5933 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5934 for { 5935 sc := v.AuxInt 5936 s := v.Aux 5937 _ = v.Args[1] 5938 v_0 := v.Args[0] 5939 if v_0.Op != OpAMD64ADDQconst { 5940 break 5941 } 5942 off := v_0.AuxInt 5943 ptr := v_0.Args[0] 5944 mem := v.Args[1] 5945 if !(ValAndOff(sc).canAdd(off)) { 5946 break 5947 } 5948 v.reset(OpAMD64MOVBstoreconst) 5949 v.AuxInt = ValAndOff(sc).add(off) 5950 v.Aux = s 5951 v.AddArg(ptr) 5952 v.AddArg(mem) 5953 return true 5954 } 5955 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 5956 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5957 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5958 for { 5959 sc := v.AuxInt 5960 sym1 := v.Aux 5961 _ = v.Args[1] 5962 v_0 := v.Args[0] 5963 if v_0.Op != OpAMD64LEAQ { 5964 break 5965 } 5966 off := v_0.AuxInt 5967 sym2 := v_0.Aux 5968 ptr := v_0.Args[0] 5969 mem := v.Args[1] 5970 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5971 break 5972 } 5973 v.reset(OpAMD64MOVBstoreconst) 5974 v.AuxInt = ValAndOff(sc).add(off) 5975 v.Aux = mergeSym(sym1, sym2) 5976 v.AddArg(ptr) 5977 v.AddArg(mem) 5978 return true 5979 } 5980 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 5981 // cond: canMergeSym(sym1, sym2) 5982 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5983 for { 5984 x := v.AuxInt 5985 sym1 := v.Aux 5986 _ = v.Args[1] 5987 v_0 := v.Args[0] 5988 if v_0.Op != OpAMD64LEAQ1 { 5989 break 5990 } 5991 off := v_0.AuxInt 5992 sym2 := v_0.Aux 5993 _ = v_0.Args[1] 5994 ptr := v_0.Args[0] 5995 idx := v_0.Args[1] 5996 mem := v.Args[1] 5997 if !(canMergeSym(sym1, sym2)) { 5998 break 5999 } 6000 v.reset(OpAMD64MOVBstoreconstidx1) 6001 v.AuxInt = ValAndOff(x).add(off) 6002 v.Aux = mergeSym(sym1, sym2) 6003 v.AddArg(ptr) 6004 v.AddArg(idx) 6005 v.AddArg(mem) 6006 return true 6007 } 6008 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 6009 // cond: 6010 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 6011 for { 6012 x := v.AuxInt 6013 sym := v.Aux 6014 _ = v.Args[1] 6015 v_0 := v.Args[0] 6016 if v_0.Op != OpAMD64ADDQ { 6017 break 6018 } 6019 _ = v_0.Args[1] 6020 ptr := v_0.Args[0] 6021 idx := v_0.Args[1] 6022 mem := v.Args[1] 6023 v.reset(OpAMD64MOVBstoreconstidx1) 6024 v.AuxInt = x 6025 v.Aux = sym 6026 v.AddArg(ptr) 6027 v.AddArg(idx) 6028 v.AddArg(mem) 6029 return true 6030 } 6031 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 6032 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 6033 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 6034 for { 6035 c := v.AuxInt 6036 s := v.Aux 6037 _ = v.Args[1] 6038 p := v.Args[0] 6039 x := v.Args[1] 6040 if x.Op != OpAMD64MOVBstoreconst { 6041 break 6042 } 6043 a := x.AuxInt 6044 if x.Aux != s { 6045 break 6046 } 6047 _ = x.Args[1] 6048 if p != x.Args[0] { 6049 break 6050 } 6051 mem := x.Args[1] 6052 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 6053 break 6054 } 6055 v.reset(OpAMD64MOVWstoreconst) 6056 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 6057 v.Aux = s 6058 v.AddArg(p) 6059 v.AddArg(mem) 6060 return true 6061 } 6062 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 6063 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6064 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6065 for { 6066 sc := v.AuxInt 6067 sym1 := v.Aux 6068 _ = v.Args[1] 6069 v_0 := v.Args[0] 6070 if v_0.Op != OpAMD64LEAL { 6071 break 6072 } 6073 off := v_0.AuxInt 6074 sym2 := v_0.Aux 6075 ptr := v_0.Args[0] 6076 mem := v.Args[1] 6077 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6078 break 6079 } 6080 v.reset(OpAMD64MOVBstoreconst) 6081 v.AuxInt = ValAndOff(sc).add(off) 6082 v.Aux = mergeSym(sym1, sym2) 6083 v.AddArg(ptr) 6084 v.AddArg(mem) 6085 return true 6086 } 6087 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 6088 // cond: ValAndOff(sc).canAdd(off) 6089 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6090 for { 6091 sc := v.AuxInt 6092 s := v.Aux 6093 _ = v.Args[1] 6094 v_0 := v.Args[0] 6095 if v_0.Op != OpAMD64ADDLconst { 6096 break 6097 } 6098 off := v_0.AuxInt 6099 ptr := v_0.Args[0] 6100 mem := v.Args[1] 6101 if !(ValAndOff(sc).canAdd(off)) { 6102 break 6103 } 6104 v.reset(OpAMD64MOVBstoreconst) 6105 v.AuxInt = ValAndOff(sc).add(off) 6106 v.Aux = s 6107 v.AddArg(ptr) 6108 v.AddArg(mem) 6109 return true 6110 } 6111 return false 6112 } 6113 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 6114 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 6115 // cond: ValAndOff(x).canAdd(c) 6116 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6117 for { 6118 x := v.AuxInt 6119 sym := v.Aux 6120 _ = v.Args[2] 6121 v_0 := v.Args[0] 6122 if v_0.Op != OpAMD64ADDQconst { 6123 break 6124 } 6125 c := v_0.AuxInt 6126 ptr := v_0.Args[0] 6127 idx := v.Args[1] 6128 mem := v.Args[2] 6129 if !(ValAndOff(x).canAdd(c)) { 6130 break 6131 } 6132 v.reset(OpAMD64MOVBstoreconstidx1) 6133 v.AuxInt = ValAndOff(x).add(c) 6134 v.Aux = sym 6135 v.AddArg(ptr) 6136 v.AddArg(idx) 6137 v.AddArg(mem) 6138 return true 6139 } 6140 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 6141 // cond: ValAndOff(x).canAdd(c) 6142 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6143 for { 6144 x := v.AuxInt 6145 sym := v.Aux 6146 _ = v.Args[2] 6147 ptr := v.Args[0] 6148 v_1 := v.Args[1] 6149 if v_1.Op != OpAMD64ADDQconst { 6150 break 6151 } 6152 c := v_1.AuxInt 6153 idx := v_1.Args[0] 6154 mem := v.Args[2] 6155 if !(ValAndOff(x).canAdd(c)) { 6156 break 6157 } 6158 v.reset(OpAMD64MOVBstoreconstidx1) 6159 v.AuxInt = ValAndOff(x).add(c) 6160 v.Aux = sym 6161 v.AddArg(ptr) 6162 v.AddArg(idx) 6163 v.AddArg(mem) 6164 return true 6165 } 6166 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 6167 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 6168 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 6169 for { 6170 c := v.AuxInt 6171 s := v.Aux 6172 _ = v.Args[2] 6173 p := v.Args[0] 6174 i := v.Args[1] 6175 x := v.Args[2] 6176 if x.Op != OpAMD64MOVBstoreconstidx1 { 6177 break 6178 } 6179 a := x.AuxInt 6180 if x.Aux != s { 6181 break 6182 } 6183 _ = x.Args[2] 6184 if p != x.Args[0] { 6185 break 6186 } 6187 if i != x.Args[1] { 6188 break 6189 } 6190 mem := x.Args[2] 6191 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 6192 break 6193 } 6194 v.reset(OpAMD64MOVWstoreconstidx1) 6195 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 6196 v.Aux = s 6197 v.AddArg(p) 6198 v.AddArg(i) 6199 v.AddArg(mem) 6200 return true 6201 } 6202 return false 6203 } 6204 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 6205 b := v.Block 6206 _ = b 6207 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6208 // cond: is32Bit(c+d) 6209 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6210 for { 6211 c := v.AuxInt 6212 sym := v.Aux 6213 _ = v.Args[3] 6214 v_0 := v.Args[0] 6215 if v_0.Op != OpAMD64ADDQconst { 6216 break 6217 } 6218 d := v_0.AuxInt 6219 ptr := v_0.Args[0] 6220 idx := v.Args[1] 6221 val := v.Args[2] 6222 mem := v.Args[3] 6223 if !(is32Bit(c + d)) { 6224 break 6225 } 6226 v.reset(OpAMD64MOVBstoreidx1) 6227 v.AuxInt = c + d 6228 v.Aux = sym 6229 v.AddArg(ptr) 6230 v.AddArg(idx) 6231 v.AddArg(val) 6232 v.AddArg(mem) 6233 return true 6234 } 6235 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6236 // cond: is32Bit(c+d) 6237 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6238 for { 6239 c := v.AuxInt 6240 sym := v.Aux 6241 _ = v.Args[3] 6242 ptr := v.Args[0] 6243 v_1 := v.Args[1] 6244 if v_1.Op != OpAMD64ADDQconst { 6245 break 6246 } 6247 d := v_1.AuxInt 6248 idx := v_1.Args[0] 6249 val := v.Args[2] 6250 mem := v.Args[3] 6251 if !(is32Bit(c + d)) { 6252 break 6253 } 6254 v.reset(OpAMD64MOVBstoreidx1) 6255 v.AuxInt = c + d 6256 v.Aux = sym 6257 v.AddArg(ptr) 6258 v.AddArg(idx) 6259 v.AddArg(val) 6260 v.AddArg(mem) 6261 return true 6262 } 6263 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 6264 // cond: x0.Uses == 1 && clobber(x0) 6265 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 6266 for { 6267 i := v.AuxInt 6268 s := v.Aux 6269 _ = v.Args[3] 6270 p := v.Args[0] 6271 idx := v.Args[1] 6272 w := v.Args[2] 6273 x0 := v.Args[3] 6274 if x0.Op != OpAMD64MOVBstoreidx1 { 6275 break 6276 } 6277 if x0.AuxInt != i-1 { 6278 break 6279 } 6280 if x0.Aux != s { 6281 break 6282 } 6283 _ = x0.Args[3] 6284 if p != x0.Args[0] { 6285 break 6286 } 6287 if idx != x0.Args[1] { 6288 break 6289 } 6290 x0_2 := x0.Args[2] 6291 if x0_2.Op != OpAMD64SHRWconst { 6292 break 6293 } 6294 if x0_2.AuxInt != 8 { 6295 break 6296 } 6297 if w != x0_2.Args[0] { 6298 break 6299 } 6300 mem := x0.Args[3] 6301 if !(x0.Uses == 1 && clobber(x0)) { 6302 break 6303 } 6304 v.reset(OpAMD64MOVWstoreidx1) 6305 v.AuxInt = i - 1 6306 v.Aux = s 6307 v.AddArg(p) 6308 v.AddArg(idx) 6309 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 6310 v0.AuxInt = 8 6311 v0.AddArg(w) 6312 v.AddArg(v0) 6313 v.AddArg(mem) 6314 return true 6315 } 6316 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 6317 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 6318 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 6319 for { 6320 i := v.AuxInt 6321 s := v.Aux 6322 _ = v.Args[3] 6323 p := v.Args[0] 6324 idx := v.Args[1] 6325 w := v.Args[2] 6326 x2 := v.Args[3] 6327 if x2.Op != OpAMD64MOVBstoreidx1 { 6328 break 6329 } 6330 if x2.AuxInt != i-1 { 6331 break 6332 } 6333 if x2.Aux != s { 6334 break 6335 } 6336 _ = x2.Args[3] 6337 if p != x2.Args[0] { 6338 break 6339 } 6340 if idx != x2.Args[1] { 6341 break 6342 } 6343 x2_2 := x2.Args[2] 6344 if x2_2.Op != OpAMD64SHRLconst { 6345 break 6346 } 6347 if x2_2.AuxInt != 8 { 6348 break 6349 } 6350 if w != x2_2.Args[0] { 6351 break 6352 } 6353 x1 := x2.Args[3] 6354 if x1.Op != OpAMD64MOVBstoreidx1 { 6355 break 6356 } 6357 if x1.AuxInt != i-2 { 6358 break 6359 } 6360 if x1.Aux != s { 6361 break 6362 } 6363 _ = x1.Args[3] 6364 if p != x1.Args[0] { 6365 break 6366 } 6367 if idx != x1.Args[1] { 6368 break 6369 } 6370 x1_2 := x1.Args[2] 6371 if x1_2.Op != OpAMD64SHRLconst { 6372 break 6373 } 6374 if x1_2.AuxInt != 16 { 6375 break 6376 } 6377 if w != x1_2.Args[0] { 6378 break 6379 } 6380 x0 := x1.Args[3] 6381 if x0.Op != OpAMD64MOVBstoreidx1 { 6382 break 6383 } 6384 if x0.AuxInt != i-3 { 6385 break 6386 } 6387 if x0.Aux != s { 6388 break 6389 } 6390 _ = x0.Args[3] 6391 if p != x0.Args[0] { 6392 break 6393 } 6394 if idx != x0.Args[1] { 6395 break 6396 } 6397 x0_2 := x0.Args[2] 6398 if x0_2.Op != OpAMD64SHRLconst { 6399 break 6400 } 6401 if x0_2.AuxInt != 24 { 6402 break 6403 } 6404 if w != x0_2.Args[0] { 6405 break 6406 } 6407 mem := x0.Args[3] 6408 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 6409 break 6410 } 6411 v.reset(OpAMD64MOVLstoreidx1) 6412 v.AuxInt = i - 3 6413 v.Aux = s 6414 v.AddArg(p) 6415 v.AddArg(idx) 6416 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 6417 v0.AddArg(w) 6418 v.AddArg(v0) 6419 v.AddArg(mem) 6420 return true 6421 } 6422 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 6423 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 6424 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 6425 for { 6426 i := v.AuxInt 6427 s := v.Aux 6428 _ = v.Args[3] 6429 p := v.Args[0] 6430 idx := v.Args[1] 6431 w := v.Args[2] 6432 x6 := v.Args[3] 6433 if x6.Op != OpAMD64MOVBstoreidx1 { 6434 break 6435 } 6436 if x6.AuxInt != i-1 { 6437 break 6438 } 6439 if x6.Aux != s { 6440 break 6441 } 6442 _ = x6.Args[3] 6443 if p != x6.Args[0] { 6444 break 6445 } 6446 if idx != x6.Args[1] { 6447 break 6448 } 6449 x6_2 := x6.Args[2] 6450 if x6_2.Op != OpAMD64SHRQconst { 6451 break 6452 } 6453 if x6_2.AuxInt != 8 { 6454 break 6455 } 6456 if w != x6_2.Args[0] { 6457 break 6458 } 6459 x5 := x6.Args[3] 6460 if x5.Op != OpAMD64MOVBstoreidx1 { 6461 break 6462 } 6463 if x5.AuxInt != i-2 { 6464 break 6465 } 6466 if x5.Aux != s { 6467 break 6468 } 6469 _ = x5.Args[3] 6470 if p != x5.Args[0] { 6471 break 6472 } 6473 if idx != x5.Args[1] { 6474 break 6475 } 6476 x5_2 := x5.Args[2] 6477 if x5_2.Op != OpAMD64SHRQconst { 6478 break 6479 } 6480 if x5_2.AuxInt != 16 { 6481 break 6482 } 6483 if w != x5_2.Args[0] { 6484 break 6485 } 6486 x4 := x5.Args[3] 6487 if x4.Op != OpAMD64MOVBstoreidx1 { 6488 break 6489 } 6490 if x4.AuxInt != i-3 { 6491 break 6492 } 6493 if x4.Aux != s { 6494 break 6495 } 6496 _ = x4.Args[3] 6497 if p != x4.Args[0] { 6498 break 6499 } 6500 if idx != x4.Args[1] { 6501 break 6502 } 6503 x4_2 := x4.Args[2] 6504 if x4_2.Op != OpAMD64SHRQconst { 6505 break 6506 } 6507 if x4_2.AuxInt != 24 { 6508 break 6509 } 6510 if w != x4_2.Args[0] { 6511 break 6512 } 6513 x3 := x4.Args[3] 6514 if x3.Op != OpAMD64MOVBstoreidx1 { 6515 break 6516 } 6517 if x3.AuxInt != i-4 { 6518 break 6519 } 6520 if x3.Aux != s { 6521 break 6522 } 6523 _ = x3.Args[3] 6524 if p != x3.Args[0] { 6525 break 6526 } 6527 if idx != x3.Args[1] { 6528 break 6529 } 6530 x3_2 := x3.Args[2] 6531 if x3_2.Op != OpAMD64SHRQconst { 6532 break 6533 } 6534 if x3_2.AuxInt != 32 { 6535 break 6536 } 6537 if w != x3_2.Args[0] { 6538 break 6539 } 6540 x2 := x3.Args[3] 6541 if x2.Op != OpAMD64MOVBstoreidx1 { 6542 break 6543 } 6544 if x2.AuxInt != i-5 { 6545 break 6546 } 6547 if x2.Aux != s { 6548 break 6549 } 6550 _ = x2.Args[3] 6551 if p != x2.Args[0] { 6552 break 6553 } 6554 if idx != x2.Args[1] { 6555 break 6556 } 6557 x2_2 := x2.Args[2] 6558 if x2_2.Op != OpAMD64SHRQconst { 6559 break 6560 } 6561 if x2_2.AuxInt != 40 { 6562 break 6563 } 6564 if w != x2_2.Args[0] { 6565 break 6566 } 6567 x1 := x2.Args[3] 6568 if x1.Op != OpAMD64MOVBstoreidx1 { 6569 break 6570 } 6571 if x1.AuxInt != i-6 { 6572 break 6573 } 6574 if x1.Aux != s { 6575 break 6576 } 6577 _ = x1.Args[3] 6578 if p != x1.Args[0] { 6579 break 6580 } 6581 if idx != x1.Args[1] { 6582 break 6583 } 6584 x1_2 := x1.Args[2] 6585 if x1_2.Op != OpAMD64SHRQconst { 6586 break 6587 } 6588 if x1_2.AuxInt != 48 { 6589 break 6590 } 6591 if w != x1_2.Args[0] { 6592 break 6593 } 6594 x0 := x1.Args[3] 6595 if x0.Op != OpAMD64MOVBstoreidx1 { 6596 break 6597 } 6598 if x0.AuxInt != i-7 { 6599 break 6600 } 6601 if x0.Aux != s { 6602 break 6603 } 6604 _ = x0.Args[3] 6605 if p != x0.Args[0] { 6606 break 6607 } 6608 if idx != x0.Args[1] { 6609 break 6610 } 6611 x0_2 := x0.Args[2] 6612 if x0_2.Op != OpAMD64SHRQconst { 6613 break 6614 } 6615 if x0_2.AuxInt != 56 { 6616 break 6617 } 6618 if w != x0_2.Args[0] { 6619 break 6620 } 6621 mem := x0.Args[3] 6622 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 6623 break 6624 } 6625 v.reset(OpAMD64MOVQstoreidx1) 6626 v.AuxInt = i - 7 6627 v.Aux = s 6628 v.AddArg(p) 6629 v.AddArg(idx) 6630 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 6631 v0.AddArg(w) 6632 v.AddArg(v0) 6633 v.AddArg(mem) 6634 return true 6635 } 6636 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 6637 // cond: x.Uses == 1 && clobber(x) 6638 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 6639 for { 6640 i := v.AuxInt 6641 s := v.Aux 6642 _ = v.Args[3] 6643 p := v.Args[0] 6644 idx := v.Args[1] 6645 v_2 := v.Args[2] 6646 if v_2.Op != OpAMD64SHRQconst { 6647 break 6648 } 6649 if v_2.AuxInt != 8 { 6650 break 6651 } 6652 w := v_2.Args[0] 6653 x := v.Args[3] 6654 if x.Op != OpAMD64MOVBstoreidx1 { 6655 break 6656 } 6657 if x.AuxInt != i-1 { 6658 break 6659 } 6660 if x.Aux != s { 6661 break 6662 } 6663 _ = x.Args[3] 6664 if p != x.Args[0] { 6665 break 6666 } 6667 if idx != x.Args[1] { 6668 break 6669 } 6670 if w != x.Args[2] { 6671 break 6672 } 6673 mem := x.Args[3] 6674 if !(x.Uses == 1 && clobber(x)) { 6675 break 6676 } 6677 v.reset(OpAMD64MOVWstoreidx1) 6678 v.AuxInt = i - 1 6679 v.Aux = s 6680 v.AddArg(p) 6681 v.AddArg(idx) 6682 v.AddArg(w) 6683 v.AddArg(mem) 6684 return true 6685 } 6686 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 6687 // cond: x.Uses == 1 && clobber(x) 6688 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 6689 for { 6690 i := v.AuxInt 6691 s := v.Aux 6692 _ = v.Args[3] 6693 p := v.Args[0] 6694 idx := v.Args[1] 6695 v_2 := v.Args[2] 6696 if v_2.Op != OpAMD64SHRQconst { 6697 break 6698 } 6699 j := v_2.AuxInt 6700 w := v_2.Args[0] 6701 x := v.Args[3] 6702 if x.Op != OpAMD64MOVBstoreidx1 { 6703 break 6704 } 6705 if x.AuxInt != i-1 { 6706 break 6707 } 6708 if x.Aux != s { 6709 break 6710 } 6711 _ = x.Args[3] 6712 if p != x.Args[0] { 6713 break 6714 } 6715 if idx != x.Args[1] { 6716 break 6717 } 6718 w0 := x.Args[2] 6719 if w0.Op != OpAMD64SHRQconst { 6720 break 6721 } 6722 if w0.AuxInt != j-8 { 6723 break 6724 } 6725 if w != w0.Args[0] { 6726 break 6727 } 6728 mem := x.Args[3] 6729 if !(x.Uses == 1 && clobber(x)) { 6730 break 6731 } 6732 v.reset(OpAMD64MOVWstoreidx1) 6733 v.AuxInt = i - 1 6734 v.Aux = s 6735 v.AddArg(p) 6736 v.AddArg(idx) 6737 v.AddArg(w0) 6738 v.AddArg(mem) 6739 return true 6740 } 6741 return false 6742 } 6743 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 6744 b := v.Block 6745 _ = b 6746 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 6747 // cond: x.Uses == 1 && clobber(x) 6748 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6749 for { 6750 x := v.Args[0] 6751 if x.Op != OpAMD64MOVLload { 6752 break 6753 } 6754 off := x.AuxInt 6755 sym := x.Aux 6756 _ = x.Args[1] 6757 ptr := x.Args[0] 6758 mem := x.Args[1] 6759 if !(x.Uses == 1 && clobber(x)) { 6760 break 6761 } 6762 b = x.Block 6763 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6764 v.reset(OpCopy) 6765 v.AddArg(v0) 6766 v0.AuxInt = off 6767 v0.Aux = sym 6768 v0.AddArg(ptr) 6769 v0.AddArg(mem) 6770 return true 6771 } 6772 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 6773 // cond: x.Uses == 1 && clobber(x) 6774 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6775 for { 6776 x := v.Args[0] 6777 if x.Op != OpAMD64MOVQload { 6778 break 6779 } 6780 off := x.AuxInt 6781 sym := x.Aux 6782 _ = x.Args[1] 6783 ptr := x.Args[0] 6784 mem := x.Args[1] 6785 if !(x.Uses == 1 && clobber(x)) { 6786 break 6787 } 6788 b = x.Block 6789 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6790 v.reset(OpCopy) 6791 v.AddArg(v0) 6792 v0.AuxInt = off 6793 v0.Aux = sym 6794 v0.AddArg(ptr) 6795 v0.AddArg(mem) 6796 return true 6797 } 6798 // match: (MOVLQSX (ANDLconst [c] x)) 6799 // cond: c & 0x80000000 == 0 6800 // result: (ANDLconst [c & 0x7fffffff] x) 6801 for { 6802 v_0 := v.Args[0] 6803 if v_0.Op != OpAMD64ANDLconst { 6804 break 6805 } 6806 c := v_0.AuxInt 6807 x := v_0.Args[0] 6808 if !(c&0x80000000 == 0) { 6809 break 6810 } 6811 v.reset(OpAMD64ANDLconst) 6812 v.AuxInt = c & 0x7fffffff 6813 v.AddArg(x) 6814 return true 6815 } 6816 // match: (MOVLQSX x:(MOVLQSX _)) 6817 // cond: 6818 // result: x 6819 for { 6820 x := v.Args[0] 6821 if x.Op != OpAMD64MOVLQSX { 6822 break 6823 } 6824 v.reset(OpCopy) 6825 v.Type = x.Type 6826 v.AddArg(x) 6827 return true 6828 } 6829 // match: (MOVLQSX x:(MOVWQSX _)) 6830 // cond: 6831 // result: x 6832 for { 6833 x := v.Args[0] 6834 if x.Op != OpAMD64MOVWQSX { 6835 break 6836 } 6837 v.reset(OpCopy) 6838 v.Type = x.Type 6839 v.AddArg(x) 6840 return true 6841 } 6842 // match: (MOVLQSX x:(MOVBQSX _)) 6843 // cond: 6844 // result: x 6845 for { 6846 x := v.Args[0] 6847 if x.Op != OpAMD64MOVBQSX { 6848 break 6849 } 6850 v.reset(OpCopy) 6851 v.Type = x.Type 6852 v.AddArg(x) 6853 return true 6854 } 6855 return false 6856 } 6857 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 6858 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 6859 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6860 // result: (MOVLQSX x) 6861 for { 6862 off := v.AuxInt 6863 sym := v.Aux 6864 _ = v.Args[1] 6865 ptr := v.Args[0] 6866 v_1 := v.Args[1] 6867 if v_1.Op != OpAMD64MOVLstore { 6868 break 6869 } 6870 off2 := v_1.AuxInt 6871 sym2 := v_1.Aux 6872 _ = v_1.Args[2] 6873 ptr2 := v_1.Args[0] 6874 x := v_1.Args[1] 6875 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6876 break 6877 } 6878 v.reset(OpAMD64MOVLQSX) 6879 v.AddArg(x) 6880 return true 6881 } 6882 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6883 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6884 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6885 for { 6886 off1 := v.AuxInt 6887 sym1 := v.Aux 6888 _ = v.Args[1] 6889 v_0 := v.Args[0] 6890 if v_0.Op != OpAMD64LEAQ { 6891 break 6892 } 6893 off2 := v_0.AuxInt 6894 sym2 := v_0.Aux 6895 base := v_0.Args[0] 6896 mem := v.Args[1] 6897 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6898 break 6899 } 6900 v.reset(OpAMD64MOVLQSXload) 6901 v.AuxInt = off1 + off2 6902 v.Aux = mergeSym(sym1, sym2) 6903 v.AddArg(base) 6904 v.AddArg(mem) 6905 return true 6906 } 6907 return false 6908 } 6909 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 6910 b := v.Block 6911 _ = b 6912 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 6913 // cond: x.Uses == 1 && clobber(x) 6914 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6915 for { 6916 x := v.Args[0] 6917 if x.Op != OpAMD64MOVLload { 6918 break 6919 } 6920 off := x.AuxInt 6921 sym := x.Aux 6922 _ = x.Args[1] 6923 ptr := x.Args[0] 6924 mem := x.Args[1] 6925 if !(x.Uses == 1 && clobber(x)) { 6926 break 6927 } 6928 b = x.Block 6929 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6930 v.reset(OpCopy) 6931 v.AddArg(v0) 6932 v0.AuxInt = off 6933 v0.Aux = sym 6934 v0.AddArg(ptr) 6935 v0.AddArg(mem) 6936 return true 6937 } 6938 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 6939 // cond: x.Uses == 1 && clobber(x) 6940 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6941 for { 6942 x := v.Args[0] 6943 if x.Op != OpAMD64MOVQload { 6944 break 6945 } 6946 off := x.AuxInt 6947 sym := x.Aux 6948 _ = x.Args[1] 6949 ptr := x.Args[0] 6950 mem := x.Args[1] 6951 if !(x.Uses == 1 && clobber(x)) { 6952 break 6953 } 6954 b = x.Block 6955 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6956 v.reset(OpCopy) 6957 v.AddArg(v0) 6958 v0.AuxInt = off 6959 v0.Aux = sym 6960 v0.AddArg(ptr) 6961 v0.AddArg(mem) 6962 return true 6963 } 6964 // match: (MOVLQZX x) 6965 // cond: zeroUpper32Bits(x,3) 6966 // result: x 6967 for { 6968 x := v.Args[0] 6969 if !(zeroUpper32Bits(x, 3)) { 6970 break 6971 } 6972 v.reset(OpCopy) 6973 v.Type = x.Type 6974 v.AddArg(x) 6975 return true 6976 } 6977 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 6978 // cond: x.Uses == 1 && clobber(x) 6979 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 6980 for { 6981 x := v.Args[0] 6982 if x.Op != OpAMD64MOVLloadidx1 { 6983 break 6984 } 6985 off := x.AuxInt 6986 sym := x.Aux 6987 _ = x.Args[2] 6988 ptr := x.Args[0] 6989 idx := x.Args[1] 6990 mem := x.Args[2] 6991 if !(x.Uses == 1 && clobber(x)) { 6992 break 6993 } 6994 b = x.Block 6995 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 6996 v.reset(OpCopy) 6997 v.AddArg(v0) 6998 v0.AuxInt = off 6999 v0.Aux = sym 7000 v0.AddArg(ptr) 7001 v0.AddArg(idx) 7002 v0.AddArg(mem) 7003 return true 7004 } 7005 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 7006 // cond: x.Uses == 1 && clobber(x) 7007 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 7008 for { 7009 x := v.Args[0] 7010 if x.Op != OpAMD64MOVLloadidx4 { 7011 break 7012 } 7013 off := x.AuxInt 7014 sym := x.Aux 7015 _ = x.Args[2] 7016 ptr := x.Args[0] 7017 idx := x.Args[1] 7018 mem := x.Args[2] 7019 if !(x.Uses == 1 && clobber(x)) { 7020 break 7021 } 7022 b = x.Block 7023 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 7024 v.reset(OpCopy) 7025 v.AddArg(v0) 7026 v0.AuxInt = off 7027 v0.Aux = sym 7028 v0.AddArg(ptr) 7029 v0.AddArg(idx) 7030 v0.AddArg(mem) 7031 return true 7032 } 7033 // match: (MOVLQZX (ANDLconst [c] x)) 7034 // cond: 7035 // result: (ANDLconst [c] x) 7036 for { 7037 v_0 := v.Args[0] 7038 if v_0.Op != OpAMD64ANDLconst { 7039 break 7040 } 7041 c := v_0.AuxInt 7042 x := v_0.Args[0] 7043 v.reset(OpAMD64ANDLconst) 7044 v.AuxInt = c 7045 v.AddArg(x) 7046 return true 7047 } 7048 // match: (MOVLQZX x:(MOVLQZX _)) 7049 // cond: 7050 // result: x 7051 for { 7052 x := v.Args[0] 7053 if x.Op != OpAMD64MOVLQZX { 7054 break 7055 } 7056 v.reset(OpCopy) 7057 v.Type = x.Type 7058 v.AddArg(x) 7059 return true 7060 } 7061 // match: (MOVLQZX x:(MOVWQZX _)) 7062 // cond: 7063 // result: x 7064 for { 7065 x := v.Args[0] 7066 if x.Op != OpAMD64MOVWQZX { 7067 break 7068 } 7069 v.reset(OpCopy) 7070 v.Type = x.Type 7071 v.AddArg(x) 7072 return true 7073 } 7074 // match: (MOVLQZX x:(MOVBQZX _)) 7075 // cond: 7076 // result: x 7077 for { 7078 x := v.Args[0] 7079 if x.Op != OpAMD64MOVBQZX { 7080 break 7081 } 7082 v.reset(OpCopy) 7083 v.Type = x.Type 7084 v.AddArg(x) 7085 return true 7086 } 7087 return false 7088 } 7089 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 7090 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 7091 // cond: is32Bit(off1+off2) 7092 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 7093 for { 7094 off1 := v.AuxInt 7095 sym := v.Aux 7096 _ = v.Args[1] 7097 v_0 := v.Args[0] 7098 if v_0.Op != OpAMD64ADDQconst { 7099 break 7100 } 7101 off2 := v_0.AuxInt 7102 ptr := v_0.Args[0] 7103 mem := v.Args[1] 7104 if !(is32Bit(off1 + off2)) { 7105 break 7106 } 7107 v.reset(OpAMD64MOVLatomicload) 7108 v.AuxInt = off1 + off2 7109 v.Aux = sym 7110 v.AddArg(ptr) 7111 v.AddArg(mem) 7112 return true 7113 } 7114 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 7115 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7116 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 7117 for { 7118 off1 := v.AuxInt 7119 sym1 := v.Aux 7120 _ = v.Args[1] 7121 v_0 := v.Args[0] 7122 if v_0.Op != OpAMD64LEAQ { 7123 break 7124 } 7125 off2 := v_0.AuxInt 7126 sym2 := v_0.Aux 7127 ptr := v_0.Args[0] 7128 mem := v.Args[1] 7129 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7130 break 7131 } 7132 v.reset(OpAMD64MOVLatomicload) 7133 v.AuxInt = off1 + off2 7134 v.Aux = mergeSym(sym1, sym2) 7135 v.AddArg(ptr) 7136 v.AddArg(mem) 7137 return true 7138 } 7139 return false 7140 } 7141 func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool { 7142 b := v.Block 7143 _ = b 7144 // match: (MOVLf2i <t> (Arg [off] {sym})) 7145 // cond: 7146 // result: @b.Func.Entry (Arg <t> [off] {sym}) 7147 for { 7148 t := v.Type 7149 v_0 := v.Args[0] 7150 if v_0.Op != OpArg { 7151 break 7152 } 7153 off := v_0.AuxInt 7154 sym := v_0.Aux 7155 b = b.Func.Entry 7156 v0 := b.NewValue0(v.Pos, OpArg, t) 7157 v.reset(OpCopy) 7158 v.AddArg(v0) 7159 v0.AuxInt = off 7160 v0.Aux = sym 7161 return true 7162 } 7163 return false 7164 } 7165 func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool { 7166 b := v.Block 7167 _ = b 7168 // match: (MOVLi2f <t> (Arg [off] {sym})) 7169 // cond: 7170 // result: @b.Func.Entry (Arg <t> [off] {sym}) 7171 for { 7172 t := v.Type 7173 v_0 := v.Args[0] 7174 if v_0.Op != OpArg { 7175 break 7176 } 7177 off := v_0.AuxInt 7178 sym := v_0.Aux 7179 b = b.Func.Entry 7180 v0 := b.NewValue0(v.Pos, OpArg, t) 7181 v.reset(OpCopy) 7182 v.AddArg(v0) 7183 v0.AuxInt = off 7184 v0.Aux = sym 7185 return true 7186 } 7187 return false 7188 } 7189 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 7190 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 7191 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7192 // result: (MOVLQZX x) 7193 for { 7194 off := v.AuxInt 7195 sym := v.Aux 7196 _ = v.Args[1] 7197 ptr := v.Args[0] 7198 v_1 := v.Args[1] 7199 if v_1.Op != OpAMD64MOVLstore { 7200 break 7201 } 7202 off2 := v_1.AuxInt 7203 sym2 := v_1.Aux 7204 _ = v_1.Args[2] 7205 ptr2 := v_1.Args[0] 7206 x := v_1.Args[1] 7207 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7208 break 7209 } 7210 v.reset(OpAMD64MOVLQZX) 7211 v.AddArg(x) 7212 return true 7213 } 7214 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 7215 // cond: is32Bit(off1+off2) 7216 // result: (MOVLload [off1+off2] {sym} ptr mem) 7217 for { 7218 off1 := v.AuxInt 7219 sym := v.Aux 7220 _ = v.Args[1] 7221 v_0 := v.Args[0] 7222 if v_0.Op != OpAMD64ADDQconst { 7223 break 7224 } 7225 off2 := v_0.AuxInt 7226 ptr := v_0.Args[0] 7227 mem := v.Args[1] 7228 if !(is32Bit(off1 + off2)) { 7229 break 7230 } 7231 v.reset(OpAMD64MOVLload) 7232 v.AuxInt = off1 + off2 7233 v.Aux = sym 7234 v.AddArg(ptr) 7235 v.AddArg(mem) 7236 return true 7237 } 7238 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7239 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7240 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7241 for { 7242 off1 := v.AuxInt 7243 sym1 := v.Aux 7244 _ = v.Args[1] 7245 v_0 := v.Args[0] 7246 if v_0.Op != OpAMD64LEAQ { 7247 break 7248 } 7249 off2 := v_0.AuxInt 7250 sym2 := v_0.Aux 7251 base := v_0.Args[0] 7252 mem := v.Args[1] 7253 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7254 break 7255 } 7256 v.reset(OpAMD64MOVLload) 7257 v.AuxInt = off1 + off2 7258 v.Aux = mergeSym(sym1, sym2) 7259 v.AddArg(base) 7260 v.AddArg(mem) 7261 return true 7262 } 7263 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7264 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7265 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7266 for { 7267 off1 := v.AuxInt 7268 sym1 := v.Aux 7269 _ = v.Args[1] 7270 v_0 := v.Args[0] 7271 if v_0.Op != OpAMD64LEAQ1 { 7272 break 7273 } 7274 off2 := v_0.AuxInt 7275 sym2 := v_0.Aux 7276 _ = v_0.Args[1] 7277 ptr := v_0.Args[0] 7278 idx := v_0.Args[1] 7279 mem := v.Args[1] 7280 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7281 break 7282 } 7283 v.reset(OpAMD64MOVLloadidx1) 7284 v.AuxInt = off1 + off2 7285 v.Aux = mergeSym(sym1, sym2) 7286 v.AddArg(ptr) 7287 v.AddArg(idx) 7288 v.AddArg(mem) 7289 return true 7290 } 7291 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 7292 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7293 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7294 for { 7295 off1 := v.AuxInt 7296 sym1 := v.Aux 7297 _ = v.Args[1] 7298 v_0 := v.Args[0] 7299 if v_0.Op != OpAMD64LEAQ4 { 7300 break 7301 } 7302 off2 := v_0.AuxInt 7303 sym2 := v_0.Aux 7304 _ = v_0.Args[1] 7305 ptr := v_0.Args[0] 7306 idx := v_0.Args[1] 7307 mem := v.Args[1] 7308 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7309 break 7310 } 7311 v.reset(OpAMD64MOVLloadidx4) 7312 v.AuxInt = off1 + off2 7313 v.Aux = mergeSym(sym1, sym2) 7314 v.AddArg(ptr) 7315 v.AddArg(idx) 7316 v.AddArg(mem) 7317 return true 7318 } 7319 // match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 7320 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7321 // result: (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7322 for { 7323 off1 := v.AuxInt 7324 sym1 := v.Aux 7325 _ = v.Args[1] 7326 v_0 := v.Args[0] 7327 if v_0.Op != OpAMD64LEAQ8 { 7328 break 7329 } 7330 off2 := v_0.AuxInt 7331 sym2 := v_0.Aux 7332 _ = v_0.Args[1] 7333 ptr := v_0.Args[0] 7334 idx := v_0.Args[1] 7335 mem := v.Args[1] 7336 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7337 break 7338 } 7339 v.reset(OpAMD64MOVLloadidx8) 7340 v.AuxInt = off1 + off2 7341 v.Aux = mergeSym(sym1, sym2) 7342 v.AddArg(ptr) 7343 v.AddArg(idx) 7344 v.AddArg(mem) 7345 return true 7346 } 7347 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 7348 // cond: ptr.Op != OpSB 7349 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 7350 for { 7351 off := v.AuxInt 7352 sym := v.Aux 7353 _ = v.Args[1] 7354 v_0 := v.Args[0] 7355 if v_0.Op != OpAMD64ADDQ { 7356 break 7357 } 7358 _ = v_0.Args[1] 7359 ptr := v_0.Args[0] 7360 idx := v_0.Args[1] 7361 mem := v.Args[1] 7362 if !(ptr.Op != OpSB) { 7363 break 7364 } 7365 v.reset(OpAMD64MOVLloadidx1) 7366 v.AuxInt = off 7367 v.Aux = sym 7368 v.AddArg(ptr) 7369 v.AddArg(idx) 7370 v.AddArg(mem) 7371 return true 7372 } 7373 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 7374 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 7375 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7376 for { 7377 off1 := v.AuxInt 7378 sym1 := v.Aux 7379 _ = v.Args[1] 7380 v_0 := v.Args[0] 7381 if v_0.Op != OpAMD64LEAL { 7382 break 7383 } 7384 off2 := v_0.AuxInt 7385 sym2 := v_0.Aux 7386 base := v_0.Args[0] 7387 mem := v.Args[1] 7388 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 7389 break 7390 } 7391 v.reset(OpAMD64MOVLload) 7392 v.AuxInt = off1 + off2 7393 v.Aux = mergeSym(sym1, sym2) 7394 v.AddArg(base) 7395 v.AddArg(mem) 7396 return true 7397 } 7398 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 7399 // cond: is32Bit(off1+off2) 7400 // result: (MOVLload [off1+off2] {sym} ptr mem) 7401 for { 7402 off1 := v.AuxInt 7403 sym := v.Aux 7404 _ = v.Args[1] 7405 v_0 := v.Args[0] 7406 if v_0.Op != OpAMD64ADDLconst { 7407 break 7408 } 7409 off2 := v_0.AuxInt 7410 ptr := v_0.Args[0] 7411 mem := v.Args[1] 7412 if !(is32Bit(off1 + off2)) { 7413 break 7414 } 7415 v.reset(OpAMD64MOVLload) 7416 v.AuxInt = off1 + off2 7417 v.Aux = sym 7418 v.AddArg(ptr) 7419 v.AddArg(mem) 7420 return true 7421 } 7422 // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) 7423 // cond: 7424 // result: (MOVLf2i val) 7425 for { 7426 off := v.AuxInt 7427 sym := v.Aux 7428 _ = v.Args[1] 7429 ptr := v.Args[0] 7430 v_1 := v.Args[1] 7431 if v_1.Op != OpAMD64MOVSSstore { 7432 break 7433 } 7434 if v_1.AuxInt != off { 7435 break 7436 } 7437 if v_1.Aux != sym { 7438 break 7439 } 7440 _ = v_1.Args[2] 7441 if ptr != v_1.Args[0] { 7442 break 7443 } 7444 val := v_1.Args[1] 7445 v.reset(OpAMD64MOVLf2i) 7446 v.AddArg(val) 7447 return true 7448 } 7449 return false 7450 } 7451 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 7452 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7453 // cond: 7454 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 7455 for { 7456 c := v.AuxInt 7457 sym := v.Aux 7458 _ = v.Args[2] 7459 ptr := v.Args[0] 7460 v_1 := v.Args[1] 7461 if v_1.Op != OpAMD64SHLQconst { 7462 break 7463 } 7464 if v_1.AuxInt != 2 { 7465 break 7466 } 7467 idx := v_1.Args[0] 7468 mem := v.Args[2] 7469 v.reset(OpAMD64MOVLloadidx4) 7470 v.AuxInt = c 7471 v.Aux = sym 7472 v.AddArg(ptr) 7473 v.AddArg(idx) 7474 v.AddArg(mem) 7475 return true 7476 } 7477 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 7478 // cond: 7479 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 7480 for { 7481 c := v.AuxInt 7482 sym := v.Aux 7483 _ = v.Args[2] 7484 v_0 := v.Args[0] 7485 if v_0.Op != OpAMD64SHLQconst { 7486 break 7487 } 7488 if v_0.AuxInt != 2 { 7489 break 7490 } 7491 idx := v_0.Args[0] 7492 ptr := v.Args[1] 7493 mem := v.Args[2] 7494 v.reset(OpAMD64MOVLloadidx4) 7495 v.AuxInt = c 7496 v.Aux = sym 7497 v.AddArg(ptr) 7498 v.AddArg(idx) 7499 v.AddArg(mem) 7500 return true 7501 } 7502 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7503 // cond: 7504 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 7505 for { 7506 c := v.AuxInt 7507 sym := v.Aux 7508 _ = v.Args[2] 7509 ptr := v.Args[0] 7510 v_1 := v.Args[1] 7511 if v_1.Op != OpAMD64SHLQconst { 7512 break 7513 } 7514 if v_1.AuxInt != 3 { 7515 break 7516 } 7517 idx := v_1.Args[0] 7518 mem := v.Args[2] 7519 v.reset(OpAMD64MOVLloadidx8) 7520 v.AuxInt = c 7521 v.Aux = sym 7522 v.AddArg(ptr) 7523 v.AddArg(idx) 7524 v.AddArg(mem) 7525 return true 7526 } 7527 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 7528 // cond: 7529 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 7530 for { 7531 c := v.AuxInt 7532 sym := v.Aux 7533 _ = v.Args[2] 7534 v_0 := v.Args[0] 7535 if v_0.Op != OpAMD64SHLQconst { 7536 break 7537 } 7538 if v_0.AuxInt != 3 { 7539 break 7540 } 7541 idx := v_0.Args[0] 7542 ptr := v.Args[1] 7543 mem := v.Args[2] 7544 v.reset(OpAMD64MOVLloadidx8) 7545 v.AuxInt = c 7546 v.Aux = sym 7547 v.AddArg(ptr) 7548 v.AddArg(idx) 7549 v.AddArg(mem) 7550 return true 7551 } 7552 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7553 // cond: is32Bit(c+d) 7554 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7555 for { 7556 c := v.AuxInt 7557 sym := v.Aux 7558 _ = v.Args[2] 7559 v_0 := v.Args[0] 7560 if v_0.Op != OpAMD64ADDQconst { 7561 break 7562 } 7563 d := v_0.AuxInt 7564 ptr := v_0.Args[0] 7565 idx := v.Args[1] 7566 mem := v.Args[2] 7567 if !(is32Bit(c + d)) { 7568 break 7569 } 7570 v.reset(OpAMD64MOVLloadidx1) 7571 v.AuxInt = c + d 7572 v.Aux = sym 7573 v.AddArg(ptr) 7574 v.AddArg(idx) 7575 v.AddArg(mem) 7576 return true 7577 } 7578 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 7579 // cond: is32Bit(c+d) 7580 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7581 for { 7582 c := v.AuxInt 7583 sym := v.Aux 7584 _ = v.Args[2] 7585 idx := v.Args[0] 7586 v_1 := v.Args[1] 7587 if v_1.Op != OpAMD64ADDQconst { 7588 break 7589 } 7590 d := v_1.AuxInt 7591 ptr := v_1.Args[0] 7592 mem := v.Args[2] 7593 if !(is32Bit(c + d)) { 7594 break 7595 } 7596 v.reset(OpAMD64MOVLloadidx1) 7597 v.AuxInt = c + d 7598 v.Aux = sym 7599 v.AddArg(ptr) 7600 v.AddArg(idx) 7601 v.AddArg(mem) 7602 return true 7603 } 7604 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7605 // cond: is32Bit(c+d) 7606 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7607 for { 7608 c := v.AuxInt 7609 sym := v.Aux 7610 _ = v.Args[2] 7611 ptr := v.Args[0] 7612 v_1 := v.Args[1] 7613 if v_1.Op != OpAMD64ADDQconst { 7614 break 7615 } 7616 d := v_1.AuxInt 7617 idx := v_1.Args[0] 7618 mem := v.Args[2] 7619 if !(is32Bit(c + d)) { 7620 break 7621 } 7622 v.reset(OpAMD64MOVLloadidx1) 7623 v.AuxInt = c + d 7624 v.Aux = sym 7625 v.AddArg(ptr) 7626 v.AddArg(idx) 7627 v.AddArg(mem) 7628 return true 7629 } 7630 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 7631 // cond: is32Bit(c+d) 7632 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7633 for { 7634 c := v.AuxInt 7635 sym := v.Aux 7636 _ = v.Args[2] 7637 v_0 := v.Args[0] 7638 if v_0.Op != OpAMD64ADDQconst { 7639 break 7640 } 7641 d := v_0.AuxInt 7642 idx := v_0.Args[0] 7643 ptr := v.Args[1] 7644 mem := v.Args[2] 7645 if !(is32Bit(c + d)) { 7646 break 7647 } 7648 v.reset(OpAMD64MOVLloadidx1) 7649 v.AuxInt = c + d 7650 v.Aux = sym 7651 v.AddArg(ptr) 7652 v.AddArg(idx) 7653 v.AddArg(mem) 7654 return true 7655 } 7656 return false 7657 } 7658 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 7659 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 7660 // cond: is32Bit(c+d) 7661 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 7662 for { 7663 c := v.AuxInt 7664 sym := v.Aux 7665 _ = v.Args[2] 7666 v_0 := v.Args[0] 7667 if v_0.Op != OpAMD64ADDQconst { 7668 break 7669 } 7670 d := v_0.AuxInt 7671 ptr := v_0.Args[0] 7672 idx := v.Args[1] 7673 mem := v.Args[2] 7674 if !(is32Bit(c + d)) { 7675 break 7676 } 7677 v.reset(OpAMD64MOVLloadidx4) 7678 v.AuxInt = c + d 7679 v.Aux = sym 7680 v.AddArg(ptr) 7681 v.AddArg(idx) 7682 v.AddArg(mem) 7683 return true 7684 } 7685 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 7686 // cond: is32Bit(c+4*d) 7687 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 7688 for { 7689 c := v.AuxInt 7690 sym := v.Aux 7691 _ = v.Args[2] 7692 ptr := v.Args[0] 7693 v_1 := v.Args[1] 7694 if v_1.Op != OpAMD64ADDQconst { 7695 break 7696 } 7697 d := v_1.AuxInt 7698 idx := v_1.Args[0] 7699 mem := v.Args[2] 7700 if !(is32Bit(c + 4*d)) { 7701 break 7702 } 7703 v.reset(OpAMD64MOVLloadidx4) 7704 v.AuxInt = c + 4*d 7705 v.Aux = sym 7706 v.AddArg(ptr) 7707 v.AddArg(idx) 7708 v.AddArg(mem) 7709 return true 7710 } 7711 return false 7712 } 7713 func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { 7714 // match: (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 7715 // cond: is32Bit(c+d) 7716 // result: (MOVLloadidx8 [c+d] {sym} ptr idx mem) 7717 for { 7718 c := v.AuxInt 7719 sym := v.Aux 7720 _ = v.Args[2] 7721 v_0 := v.Args[0] 7722 if v_0.Op != OpAMD64ADDQconst { 7723 break 7724 } 7725 d := v_0.AuxInt 7726 ptr := v_0.Args[0] 7727 idx := v.Args[1] 7728 mem := v.Args[2] 7729 if !(is32Bit(c + d)) { 7730 break 7731 } 7732 v.reset(OpAMD64MOVLloadidx8) 7733 v.AuxInt = c + d 7734 v.Aux = sym 7735 v.AddArg(ptr) 7736 v.AddArg(idx) 7737 v.AddArg(mem) 7738 return true 7739 } 7740 // match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 7741 // cond: is32Bit(c+8*d) 7742 // result: (MOVLloadidx8 [c+8*d] {sym} ptr idx mem) 7743 for { 7744 c := v.AuxInt 7745 sym := v.Aux 7746 _ = v.Args[2] 7747 ptr := v.Args[0] 7748 v_1 := v.Args[1] 7749 if v_1.Op != OpAMD64ADDQconst { 7750 break 7751 } 7752 d := v_1.AuxInt 7753 idx := v_1.Args[0] 7754 mem := v.Args[2] 7755 if !(is32Bit(c + 8*d)) { 7756 break 7757 } 7758 v.reset(OpAMD64MOVLloadidx8) 7759 v.AuxInt = c + 8*d 7760 v.Aux = sym 7761 v.AddArg(ptr) 7762 v.AddArg(idx) 7763 v.AddArg(mem) 7764 return true 7765 } 7766 return false 7767 } 7768 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 7769 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 7770 // cond: 7771 // result: (MOVLstore [off] {sym} ptr x mem) 7772 for { 7773 off := v.AuxInt 7774 sym := v.Aux 7775 _ = v.Args[2] 7776 ptr := v.Args[0] 7777 v_1 := v.Args[1] 7778 if v_1.Op != OpAMD64MOVLQSX { 7779 break 7780 } 7781 x := v_1.Args[0] 7782 mem := v.Args[2] 7783 v.reset(OpAMD64MOVLstore) 7784 v.AuxInt = off 7785 v.Aux = sym 7786 v.AddArg(ptr) 7787 v.AddArg(x) 7788 v.AddArg(mem) 7789 return true 7790 } 7791 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 7792 // cond: 7793 // result: (MOVLstore [off] {sym} ptr x mem) 7794 for { 7795 off := v.AuxInt 7796 sym := v.Aux 7797 _ = v.Args[2] 7798 ptr := v.Args[0] 7799 v_1 := v.Args[1] 7800 if v_1.Op != OpAMD64MOVLQZX { 7801 break 7802 } 7803 x := v_1.Args[0] 7804 mem := v.Args[2] 7805 v.reset(OpAMD64MOVLstore) 7806 v.AuxInt = off 7807 v.Aux = sym 7808 v.AddArg(ptr) 7809 v.AddArg(x) 7810 v.AddArg(mem) 7811 return true 7812 } 7813 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7814 // cond: is32Bit(off1+off2) 7815 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 7816 for { 7817 off1 := v.AuxInt 7818 sym := v.Aux 7819 _ = v.Args[2] 7820 v_0 := v.Args[0] 7821 if v_0.Op != OpAMD64ADDQconst { 7822 break 7823 } 7824 off2 := v_0.AuxInt 7825 ptr := v_0.Args[0] 7826 val := v.Args[1] 7827 mem := v.Args[2] 7828 if !(is32Bit(off1 + off2)) { 7829 break 7830 } 7831 v.reset(OpAMD64MOVLstore) 7832 v.AuxInt = off1 + off2 7833 v.Aux = sym 7834 v.AddArg(ptr) 7835 v.AddArg(val) 7836 v.AddArg(mem) 7837 return true 7838 } 7839 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 7840 // cond: validOff(off) 7841 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 7842 for { 7843 off := v.AuxInt 7844 sym := v.Aux 7845 _ = v.Args[2] 7846 ptr := v.Args[0] 7847 v_1 := v.Args[1] 7848 if v_1.Op != OpAMD64MOVLconst { 7849 break 7850 } 7851 c := v_1.AuxInt 7852 mem := v.Args[2] 7853 if !(validOff(off)) { 7854 break 7855 } 7856 v.reset(OpAMD64MOVLstoreconst) 7857 v.AuxInt = makeValAndOff(int64(int32(c)), off) 7858 v.Aux = sym 7859 v.AddArg(ptr) 7860 v.AddArg(mem) 7861 return true 7862 } 7863 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7864 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7865 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7866 for { 7867 off1 := v.AuxInt 7868 sym1 := v.Aux 7869 _ = v.Args[2] 7870 v_0 := v.Args[0] 7871 if v_0.Op != OpAMD64LEAQ { 7872 break 7873 } 7874 off2 := v_0.AuxInt 7875 sym2 := v_0.Aux 7876 base := v_0.Args[0] 7877 val := v.Args[1] 7878 mem := v.Args[2] 7879 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7880 break 7881 } 7882 v.reset(OpAMD64MOVLstore) 7883 v.AuxInt = off1 + off2 7884 v.Aux = mergeSym(sym1, sym2) 7885 v.AddArg(base) 7886 v.AddArg(val) 7887 v.AddArg(mem) 7888 return true 7889 } 7890 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7891 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7892 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7893 for { 7894 off1 := v.AuxInt 7895 sym1 := v.Aux 7896 _ = v.Args[2] 7897 v_0 := v.Args[0] 7898 if v_0.Op != OpAMD64LEAQ1 { 7899 break 7900 } 7901 off2 := v_0.AuxInt 7902 sym2 := v_0.Aux 7903 _ = v_0.Args[1] 7904 ptr := v_0.Args[0] 7905 idx := v_0.Args[1] 7906 val := v.Args[1] 7907 mem := v.Args[2] 7908 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7909 break 7910 } 7911 v.reset(OpAMD64MOVLstoreidx1) 7912 v.AuxInt = off1 + off2 7913 v.Aux = mergeSym(sym1, sym2) 7914 v.AddArg(ptr) 7915 v.AddArg(idx) 7916 v.AddArg(val) 7917 v.AddArg(mem) 7918 return true 7919 } 7920 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 7921 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7922 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7923 for { 7924 off1 := v.AuxInt 7925 sym1 := v.Aux 7926 _ = v.Args[2] 7927 v_0 := v.Args[0] 7928 if v_0.Op != OpAMD64LEAQ4 { 7929 break 7930 } 7931 off2 := v_0.AuxInt 7932 sym2 := v_0.Aux 7933 _ = v_0.Args[1] 7934 ptr := v_0.Args[0] 7935 idx := v_0.Args[1] 7936 val := v.Args[1] 7937 mem := v.Args[2] 7938 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7939 break 7940 } 7941 v.reset(OpAMD64MOVLstoreidx4) 7942 v.AuxInt = off1 + off2 7943 v.Aux = mergeSym(sym1, sym2) 7944 v.AddArg(ptr) 7945 v.AddArg(idx) 7946 v.AddArg(val) 7947 v.AddArg(mem) 7948 return true 7949 } 7950 // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 7951 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7952 // result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7953 for { 7954 off1 := v.AuxInt 7955 sym1 := v.Aux 7956 _ = v.Args[2] 7957 v_0 := v.Args[0] 7958 if v_0.Op != OpAMD64LEAQ8 { 7959 break 7960 } 7961 off2 := v_0.AuxInt 7962 sym2 := v_0.Aux 7963 _ = v_0.Args[1] 7964 ptr := v_0.Args[0] 7965 idx := v_0.Args[1] 7966 val := v.Args[1] 7967 mem := v.Args[2] 7968 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7969 break 7970 } 7971 v.reset(OpAMD64MOVLstoreidx8) 7972 v.AuxInt = off1 + off2 7973 v.Aux = mergeSym(sym1, sym2) 7974 v.AddArg(ptr) 7975 v.AddArg(idx) 7976 v.AddArg(val) 7977 v.AddArg(mem) 7978 return true 7979 } 7980 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 7981 // cond: ptr.Op != OpSB 7982 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 7983 for { 7984 off := v.AuxInt 7985 sym := v.Aux 7986 _ = v.Args[2] 7987 v_0 := v.Args[0] 7988 if v_0.Op != OpAMD64ADDQ { 7989 break 7990 } 7991 _ = v_0.Args[1] 7992 ptr := v_0.Args[0] 7993 idx := v_0.Args[1] 7994 val := v.Args[1] 7995 mem := v.Args[2] 7996 if !(ptr.Op != OpSB) { 7997 break 7998 } 7999 v.reset(OpAMD64MOVLstoreidx1) 8000 v.AuxInt = off 8001 v.Aux = sym 8002 v.AddArg(ptr) 8003 v.AddArg(idx) 8004 v.AddArg(val) 8005 v.AddArg(mem) 8006 return true 8007 } 8008 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 8009 // cond: x.Uses == 1 && clobber(x) 8010 // result: (MOVQstore [i-4] {s} p w mem) 8011 for { 8012 i := v.AuxInt 8013 s := v.Aux 8014 _ = v.Args[2] 8015 p := v.Args[0] 8016 v_1 := v.Args[1] 8017 if v_1.Op != OpAMD64SHRQconst { 8018 break 8019 } 8020 if v_1.AuxInt != 32 { 8021 break 8022 } 8023 w := v_1.Args[0] 8024 x := v.Args[2] 8025 if x.Op != OpAMD64MOVLstore { 8026 break 8027 } 8028 if x.AuxInt != i-4 { 8029 break 8030 } 8031 if x.Aux != s { 8032 break 8033 } 8034 _ = x.Args[2] 8035 if p != x.Args[0] { 8036 break 8037 } 8038 if w != x.Args[1] { 8039 break 8040 } 8041 mem := x.Args[2] 8042 if !(x.Uses == 1 && clobber(x)) { 8043 break 8044 } 8045 v.reset(OpAMD64MOVQstore) 8046 v.AuxInt = i - 4 8047 v.Aux = s 8048 v.AddArg(p) 8049 v.AddArg(w) 8050 v.AddArg(mem) 8051 return true 8052 } 8053 return false 8054 } 8055 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 8056 b := v.Block 8057 _ = b 8058 typ := &b.Func.Config.Types 8059 _ = typ 8060 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 8061 // cond: x.Uses == 1 && clobber(x) 8062 // result: (MOVQstore [i-4] {s} p w0 mem) 8063 for { 8064 i := v.AuxInt 8065 s := v.Aux 8066 _ = v.Args[2] 8067 p := v.Args[0] 8068 v_1 := v.Args[1] 8069 if v_1.Op != OpAMD64SHRQconst { 8070 break 8071 } 8072 j := v_1.AuxInt 8073 w := v_1.Args[0] 8074 x := v.Args[2] 8075 if x.Op != OpAMD64MOVLstore { 8076 break 8077 } 8078 if x.AuxInt != i-4 { 8079 break 8080 } 8081 if x.Aux != s { 8082 break 8083 } 8084 _ = x.Args[2] 8085 if p != x.Args[0] { 8086 break 8087 } 8088 w0 := x.Args[1] 8089 if w0.Op != OpAMD64SHRQconst { 8090 break 8091 } 8092 if w0.AuxInt != j-32 { 8093 break 8094 } 8095 if w != w0.Args[0] { 8096 break 8097 } 8098 mem := x.Args[2] 8099 if !(x.Uses == 1 && clobber(x)) { 8100 break 8101 } 8102 v.reset(OpAMD64MOVQstore) 8103 v.AuxInt = i - 4 8104 v.Aux = s 8105 v.AddArg(p) 8106 v.AddArg(w0) 8107 v.AddArg(mem) 8108 return true 8109 } 8110 // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) 8111 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 8112 // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) 8113 for { 8114 i := v.AuxInt 8115 s := v.Aux 8116 _ = v.Args[2] 8117 p := v.Args[0] 8118 x1 := v.Args[1] 8119 if x1.Op != OpAMD64MOVLload { 8120 break 8121 } 8122 j := x1.AuxInt 8123 s2 := x1.Aux 8124 _ = x1.Args[1] 8125 p2 := x1.Args[0] 8126 mem := x1.Args[1] 8127 mem2 := v.Args[2] 8128 if mem2.Op != OpAMD64MOVLstore { 8129 break 8130 } 8131 if mem2.AuxInt != i-4 { 8132 break 8133 } 8134 if mem2.Aux != s { 8135 break 8136 } 8137 _ = mem2.Args[2] 8138 if p != mem2.Args[0] { 8139 break 8140 } 8141 x2 := mem2.Args[1] 8142 if x2.Op != OpAMD64MOVLload { 8143 break 8144 } 8145 if x2.AuxInt != j-4 { 8146 break 8147 } 8148 if x2.Aux != s2 { 8149 break 8150 } 8151 _ = x2.Args[1] 8152 if p2 != x2.Args[0] { 8153 break 8154 } 8155 if mem != x2.Args[1] { 8156 break 8157 } 8158 if mem != mem2.Args[2] { 8159 break 8160 } 8161 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 8162 break 8163 } 8164 v.reset(OpAMD64MOVQstore) 8165 v.AuxInt = i - 4 8166 v.Aux = s 8167 v.AddArg(p) 8168 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 8169 v0.AuxInt = j - 4 8170 v0.Aux = s2 8171 v0.AddArg(p2) 8172 v0.AddArg(mem) 8173 v.AddArg(v0) 8174 v.AddArg(mem) 8175 return true 8176 } 8177 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 8178 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 8179 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8180 for { 8181 off1 := v.AuxInt 8182 sym1 := v.Aux 8183 _ = v.Args[2] 8184 v_0 := v.Args[0] 8185 if v_0.Op != OpAMD64LEAL { 8186 break 8187 } 8188 off2 := v_0.AuxInt 8189 sym2 := v_0.Aux 8190 base := v_0.Args[0] 8191 val := v.Args[1] 8192 mem := v.Args[2] 8193 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 8194 break 8195 } 8196 v.reset(OpAMD64MOVLstore) 8197 v.AuxInt = off1 + off2 8198 v.Aux = mergeSym(sym1, sym2) 8199 v.AddArg(base) 8200 v.AddArg(val) 8201 v.AddArg(mem) 8202 return true 8203 } 8204 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 8205 // cond: is32Bit(off1+off2) 8206 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 8207 for { 8208 off1 := v.AuxInt 8209 sym := v.Aux 8210 _ = v.Args[2] 8211 v_0 := v.Args[0] 8212 if v_0.Op != OpAMD64ADDLconst { 8213 break 8214 } 8215 off2 := v_0.AuxInt 8216 ptr := v_0.Args[0] 8217 val := v.Args[1] 8218 mem := v.Args[2] 8219 if !(is32Bit(off1 + off2)) { 8220 break 8221 } 8222 v.reset(OpAMD64MOVLstore) 8223 v.AuxInt = off1 + off2 8224 v.Aux = sym 8225 v.AddArg(ptr) 8226 v.AddArg(val) 8227 v.AddArg(mem) 8228 return true 8229 } 8230 // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 8231 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) 8232 // result: (ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem) 8233 for { 8234 off := v.AuxInt 8235 sym := v.Aux 8236 _ = v.Args[2] 8237 ptr := v.Args[0] 8238 a := v.Args[1] 8239 if a.Op != OpAMD64ADDLconst { 8240 break 8241 } 8242 c := a.AuxInt 8243 l := a.Args[0] 8244 if l.Op != OpAMD64MOVLload { 8245 break 8246 } 8247 if l.AuxInt != off { 8248 break 8249 } 8250 if l.Aux != sym { 8251 break 8252 } 8253 _ = l.Args[1] 8254 ptr2 := l.Args[0] 8255 mem := l.Args[1] 8256 if mem != v.Args[2] { 8257 break 8258 } 8259 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { 8260 break 8261 } 8262 v.reset(OpAMD64ADDLconstmem) 8263 v.AuxInt = makeValAndOff(c, off) 8264 v.Aux = sym 8265 v.AddArg(ptr) 8266 v.AddArg(mem) 8267 return true 8268 } 8269 // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) 8270 // cond: 8271 // result: (MOVSSstore [off] {sym} ptr val mem) 8272 for { 8273 off := v.AuxInt 8274 sym := v.Aux 8275 _ = v.Args[2] 8276 ptr := v.Args[0] 8277 v_1 := v.Args[1] 8278 if v_1.Op != OpAMD64MOVLf2i { 8279 break 8280 } 8281 val := v_1.Args[0] 8282 mem := v.Args[2] 8283 v.reset(OpAMD64MOVSSstore) 8284 v.AuxInt = off 8285 v.Aux = sym 8286 v.AddArg(ptr) 8287 v.AddArg(val) 8288 v.AddArg(mem) 8289 return true 8290 } 8291 return false 8292 } 8293 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 8294 b := v.Block 8295 _ = b 8296 typ := &b.Func.Config.Types 8297 _ = typ 8298 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 8299 // cond: ValAndOff(sc).canAdd(off) 8300 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8301 for { 8302 sc := v.AuxInt 8303 s := v.Aux 8304 _ = v.Args[1] 8305 v_0 := v.Args[0] 8306 if v_0.Op != OpAMD64ADDQconst { 8307 break 8308 } 8309 off := v_0.AuxInt 8310 ptr := v_0.Args[0] 8311 mem := v.Args[1] 8312 if !(ValAndOff(sc).canAdd(off)) { 8313 break 8314 } 8315 v.reset(OpAMD64MOVLstoreconst) 8316 v.AuxInt = ValAndOff(sc).add(off) 8317 v.Aux = s 8318 v.AddArg(ptr) 8319 v.AddArg(mem) 8320 return true 8321 } 8322 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 8323 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8324 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8325 for { 8326 sc := v.AuxInt 8327 sym1 := v.Aux 8328 _ = v.Args[1] 8329 v_0 := v.Args[0] 8330 if v_0.Op != OpAMD64LEAQ { 8331 break 8332 } 8333 off := v_0.AuxInt 8334 sym2 := v_0.Aux 8335 ptr := v_0.Args[0] 8336 mem := v.Args[1] 8337 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8338 break 8339 } 8340 v.reset(OpAMD64MOVLstoreconst) 8341 v.AuxInt = ValAndOff(sc).add(off) 8342 v.Aux = mergeSym(sym1, sym2) 8343 v.AddArg(ptr) 8344 v.AddArg(mem) 8345 return true 8346 } 8347 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 8348 // cond: canMergeSym(sym1, sym2) 8349 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8350 for { 8351 x := v.AuxInt 8352 sym1 := v.Aux 8353 _ = v.Args[1] 8354 v_0 := v.Args[0] 8355 if v_0.Op != OpAMD64LEAQ1 { 8356 break 8357 } 8358 off := v_0.AuxInt 8359 sym2 := v_0.Aux 8360 _ = v_0.Args[1] 8361 ptr := v_0.Args[0] 8362 idx := v_0.Args[1] 8363 mem := v.Args[1] 8364 if !(canMergeSym(sym1, sym2)) { 8365 break 8366 } 8367 v.reset(OpAMD64MOVLstoreconstidx1) 8368 v.AuxInt = ValAndOff(x).add(off) 8369 v.Aux = mergeSym(sym1, sym2) 8370 v.AddArg(ptr) 8371 v.AddArg(idx) 8372 v.AddArg(mem) 8373 return true 8374 } 8375 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 8376 // cond: canMergeSym(sym1, sym2) 8377 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8378 for { 8379 x := v.AuxInt 8380 sym1 := v.Aux 8381 _ = v.Args[1] 8382 v_0 := v.Args[0] 8383 if v_0.Op != OpAMD64LEAQ4 { 8384 break 8385 } 8386 off := v_0.AuxInt 8387 sym2 := v_0.Aux 8388 _ = v_0.Args[1] 8389 ptr := v_0.Args[0] 8390 idx := v_0.Args[1] 8391 mem := v.Args[1] 8392 if !(canMergeSym(sym1, sym2)) { 8393 break 8394 } 8395 v.reset(OpAMD64MOVLstoreconstidx4) 8396 v.AuxInt = ValAndOff(x).add(off) 8397 v.Aux = mergeSym(sym1, sym2) 8398 v.AddArg(ptr) 8399 v.AddArg(idx) 8400 v.AddArg(mem) 8401 return true 8402 } 8403 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 8404 // cond: 8405 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 8406 for { 8407 x := v.AuxInt 8408 sym := v.Aux 8409 _ = v.Args[1] 8410 v_0 := v.Args[0] 8411 if v_0.Op != OpAMD64ADDQ { 8412 break 8413 } 8414 _ = v_0.Args[1] 8415 ptr := v_0.Args[0] 8416 idx := v_0.Args[1] 8417 mem := v.Args[1] 8418 v.reset(OpAMD64MOVLstoreconstidx1) 8419 v.AuxInt = x 8420 v.Aux = sym 8421 v.AddArg(ptr) 8422 v.AddArg(idx) 8423 v.AddArg(mem) 8424 return true 8425 } 8426 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 8427 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8428 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8429 for { 8430 c := v.AuxInt 8431 s := v.Aux 8432 _ = v.Args[1] 8433 p := v.Args[0] 8434 x := v.Args[1] 8435 if x.Op != OpAMD64MOVLstoreconst { 8436 break 8437 } 8438 a := x.AuxInt 8439 if x.Aux != s { 8440 break 8441 } 8442 _ = x.Args[1] 8443 if p != x.Args[0] { 8444 break 8445 } 8446 mem := x.Args[1] 8447 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8448 break 8449 } 8450 v.reset(OpAMD64MOVQstore) 8451 v.AuxInt = ValAndOff(a).Off() 8452 v.Aux = s 8453 v.AddArg(p) 8454 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 8455 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 8456 v.AddArg(v0) 8457 v.AddArg(mem) 8458 return true 8459 } 8460 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 8461 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8462 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8463 for { 8464 sc := v.AuxInt 8465 sym1 := v.Aux 8466 _ = v.Args[1] 8467 v_0 := v.Args[0] 8468 if v_0.Op != OpAMD64LEAL { 8469 break 8470 } 8471 off := v_0.AuxInt 8472 sym2 := v_0.Aux 8473 ptr := v_0.Args[0] 8474 mem := v.Args[1] 8475 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8476 break 8477 } 8478 v.reset(OpAMD64MOVLstoreconst) 8479 v.AuxInt = ValAndOff(sc).add(off) 8480 v.Aux = mergeSym(sym1, sym2) 8481 v.AddArg(ptr) 8482 v.AddArg(mem) 8483 return true 8484 } 8485 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 8486 // cond: ValAndOff(sc).canAdd(off) 8487 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8488 for { 8489 sc := v.AuxInt 8490 s := v.Aux 8491 _ = v.Args[1] 8492 v_0 := v.Args[0] 8493 if v_0.Op != OpAMD64ADDLconst { 8494 break 8495 } 8496 off := v_0.AuxInt 8497 ptr := v_0.Args[0] 8498 mem := v.Args[1] 8499 if !(ValAndOff(sc).canAdd(off)) { 8500 break 8501 } 8502 v.reset(OpAMD64MOVLstoreconst) 8503 v.AuxInt = ValAndOff(sc).add(off) 8504 v.Aux = s 8505 v.AddArg(ptr) 8506 v.AddArg(mem) 8507 return true 8508 } 8509 return false 8510 } 8511 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 8512 b := v.Block 8513 _ = b 8514 typ := &b.Func.Config.Types 8515 _ = typ 8516 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 8517 // cond: 8518 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 8519 for { 8520 c := v.AuxInt 8521 sym := v.Aux 8522 _ = v.Args[2] 8523 ptr := v.Args[0] 8524 v_1 := v.Args[1] 8525 if v_1.Op != OpAMD64SHLQconst { 8526 break 8527 } 8528 if v_1.AuxInt != 2 { 8529 break 8530 } 8531 idx := v_1.Args[0] 8532 mem := v.Args[2] 8533 v.reset(OpAMD64MOVLstoreconstidx4) 8534 v.AuxInt = c 8535 v.Aux = sym 8536 v.AddArg(ptr) 8537 v.AddArg(idx) 8538 v.AddArg(mem) 8539 return true 8540 } 8541 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 8542 // cond: ValAndOff(x).canAdd(c) 8543 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8544 for { 8545 x := v.AuxInt 8546 sym := v.Aux 8547 _ = v.Args[2] 8548 v_0 := v.Args[0] 8549 if v_0.Op != OpAMD64ADDQconst { 8550 break 8551 } 8552 c := v_0.AuxInt 8553 ptr := v_0.Args[0] 8554 idx := v.Args[1] 8555 mem := v.Args[2] 8556 if !(ValAndOff(x).canAdd(c)) { 8557 break 8558 } 8559 v.reset(OpAMD64MOVLstoreconstidx1) 8560 v.AuxInt = ValAndOff(x).add(c) 8561 v.Aux = sym 8562 v.AddArg(ptr) 8563 v.AddArg(idx) 8564 v.AddArg(mem) 8565 return true 8566 } 8567 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 8568 // cond: ValAndOff(x).canAdd(c) 8569 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8570 for { 8571 x := v.AuxInt 8572 sym := v.Aux 8573 _ = v.Args[2] 8574 ptr := v.Args[0] 8575 v_1 := v.Args[1] 8576 if v_1.Op != OpAMD64ADDQconst { 8577 break 8578 } 8579 c := v_1.AuxInt 8580 idx := v_1.Args[0] 8581 mem := v.Args[2] 8582 if !(ValAndOff(x).canAdd(c)) { 8583 break 8584 } 8585 v.reset(OpAMD64MOVLstoreconstidx1) 8586 v.AuxInt = ValAndOff(x).add(c) 8587 v.Aux = sym 8588 v.AddArg(ptr) 8589 v.AddArg(idx) 8590 v.AddArg(mem) 8591 return true 8592 } 8593 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 8594 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8595 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8596 for { 8597 c := v.AuxInt 8598 s := v.Aux 8599 _ = v.Args[2] 8600 p := v.Args[0] 8601 i := v.Args[1] 8602 x := v.Args[2] 8603 if x.Op != OpAMD64MOVLstoreconstidx1 { 8604 break 8605 } 8606 a := x.AuxInt 8607 if x.Aux != s { 8608 break 8609 } 8610 _ = x.Args[2] 8611 if p != x.Args[0] { 8612 break 8613 } 8614 if i != x.Args[1] { 8615 break 8616 } 8617 mem := x.Args[2] 8618 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8619 break 8620 } 8621 v.reset(OpAMD64MOVQstoreidx1) 8622 v.AuxInt = ValAndOff(a).Off() 8623 v.Aux = s 8624 v.AddArg(p) 8625 v.AddArg(i) 8626 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 8627 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 8628 v.AddArg(v0) 8629 v.AddArg(mem) 8630 return true 8631 } 8632 return false 8633 } 8634 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 8635 b := v.Block 8636 _ = b 8637 typ := &b.Func.Config.Types 8638 _ = typ 8639 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 8640 // cond: ValAndOff(x).canAdd(c) 8641 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8642 for { 8643 x := v.AuxInt 8644 sym := v.Aux 8645 _ = v.Args[2] 8646 v_0 := v.Args[0] 8647 if v_0.Op != OpAMD64ADDQconst { 8648 break 8649 } 8650 c := v_0.AuxInt 8651 ptr := v_0.Args[0] 8652 idx := v.Args[1] 8653 mem := v.Args[2] 8654 if !(ValAndOff(x).canAdd(c)) { 8655 break 8656 } 8657 v.reset(OpAMD64MOVLstoreconstidx4) 8658 v.AuxInt = ValAndOff(x).add(c) 8659 v.Aux = sym 8660 v.AddArg(ptr) 8661 v.AddArg(idx) 8662 v.AddArg(mem) 8663 return true 8664 } 8665 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 8666 // cond: ValAndOff(x).canAdd(4*c) 8667 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 8668 for { 8669 x := v.AuxInt 8670 sym := v.Aux 8671 _ = v.Args[2] 8672 ptr := v.Args[0] 8673 v_1 := v.Args[1] 8674 if v_1.Op != OpAMD64ADDQconst { 8675 break 8676 } 8677 c := v_1.AuxInt 8678 idx := v_1.Args[0] 8679 mem := v.Args[2] 8680 if !(ValAndOff(x).canAdd(4 * c)) { 8681 break 8682 } 8683 v.reset(OpAMD64MOVLstoreconstidx4) 8684 v.AuxInt = ValAndOff(x).add(4 * c) 8685 v.Aux = sym 8686 v.AddArg(ptr) 8687 v.AddArg(idx) 8688 v.AddArg(mem) 8689 return true 8690 } 8691 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 8692 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8693 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8694 for { 8695 c := v.AuxInt 8696 s := v.Aux 8697 _ = v.Args[2] 8698 p := v.Args[0] 8699 i := v.Args[1] 8700 x := v.Args[2] 8701 if x.Op != OpAMD64MOVLstoreconstidx4 { 8702 break 8703 } 8704 a := x.AuxInt 8705 if x.Aux != s { 8706 break 8707 } 8708 _ = x.Args[2] 8709 if p != x.Args[0] { 8710 break 8711 } 8712 if i != x.Args[1] { 8713 break 8714 } 8715 mem := x.Args[2] 8716 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8717 break 8718 } 8719 v.reset(OpAMD64MOVQstoreidx1) 8720 v.AuxInt = ValAndOff(a).Off() 8721 v.Aux = s 8722 v.AddArg(p) 8723 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 8724 v0.AuxInt = 2 8725 v0.AddArg(i) 8726 v.AddArg(v0) 8727 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 8728 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 8729 v.AddArg(v1) 8730 v.AddArg(mem) 8731 return true 8732 } 8733 return false 8734 } 8735 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 8736 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 8737 // cond: 8738 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 8739 for { 8740 c := v.AuxInt 8741 sym := v.Aux 8742 _ = v.Args[3] 8743 ptr := v.Args[0] 8744 v_1 := v.Args[1] 8745 if v_1.Op != OpAMD64SHLQconst { 8746 break 8747 } 8748 if v_1.AuxInt != 2 { 8749 break 8750 } 8751 idx := v_1.Args[0] 8752 val := v.Args[2] 8753 mem := v.Args[3] 8754 v.reset(OpAMD64MOVLstoreidx4) 8755 v.AuxInt = c 8756 v.Aux = sym 8757 v.AddArg(ptr) 8758 v.AddArg(idx) 8759 v.AddArg(val) 8760 v.AddArg(mem) 8761 return true 8762 } 8763 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 8764 // cond: 8765 // result: (MOVLstoreidx8 [c] {sym} ptr idx val mem) 8766 for { 8767 c := v.AuxInt 8768 sym := v.Aux 8769 _ = v.Args[3] 8770 ptr := v.Args[0] 8771 v_1 := v.Args[1] 8772 if v_1.Op != OpAMD64SHLQconst { 8773 break 8774 } 8775 if v_1.AuxInt != 3 { 8776 break 8777 } 8778 idx := v_1.Args[0] 8779 val := v.Args[2] 8780 mem := v.Args[3] 8781 v.reset(OpAMD64MOVLstoreidx8) 8782 v.AuxInt = c 8783 v.Aux = sym 8784 v.AddArg(ptr) 8785 v.AddArg(idx) 8786 v.AddArg(val) 8787 v.AddArg(mem) 8788 return true 8789 } 8790 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8791 // cond: is32Bit(c+d) 8792 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 8793 for { 8794 c := v.AuxInt 8795 sym := v.Aux 8796 _ = v.Args[3] 8797 v_0 := v.Args[0] 8798 if v_0.Op != OpAMD64ADDQconst { 8799 break 8800 } 8801 d := v_0.AuxInt 8802 ptr := v_0.Args[0] 8803 idx := v.Args[1] 8804 val := v.Args[2] 8805 mem := v.Args[3] 8806 if !(is32Bit(c + d)) { 8807 break 8808 } 8809 v.reset(OpAMD64MOVLstoreidx1) 8810 v.AuxInt = c + d 8811 v.Aux = sym 8812 v.AddArg(ptr) 8813 v.AddArg(idx) 8814 v.AddArg(val) 8815 v.AddArg(mem) 8816 return true 8817 } 8818 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8819 // cond: is32Bit(c+d) 8820 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 8821 for { 8822 c := v.AuxInt 8823 sym := v.Aux 8824 _ = v.Args[3] 8825 ptr := v.Args[0] 8826 v_1 := v.Args[1] 8827 if v_1.Op != OpAMD64ADDQconst { 8828 break 8829 } 8830 d := v_1.AuxInt 8831 idx := v_1.Args[0] 8832 val := v.Args[2] 8833 mem := v.Args[3] 8834 if !(is32Bit(c + d)) { 8835 break 8836 } 8837 v.reset(OpAMD64MOVLstoreidx1) 8838 v.AuxInt = c + d 8839 v.Aux = sym 8840 v.AddArg(ptr) 8841 v.AddArg(idx) 8842 v.AddArg(val) 8843 v.AddArg(mem) 8844 return true 8845 } 8846 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 8847 // cond: x.Uses == 1 && clobber(x) 8848 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 8849 for { 8850 i := v.AuxInt 8851 s := v.Aux 8852 _ = v.Args[3] 8853 p := v.Args[0] 8854 idx := v.Args[1] 8855 v_2 := v.Args[2] 8856 if v_2.Op != OpAMD64SHRQconst { 8857 break 8858 } 8859 if v_2.AuxInt != 32 { 8860 break 8861 } 8862 w := v_2.Args[0] 8863 x := v.Args[3] 8864 if x.Op != OpAMD64MOVLstoreidx1 { 8865 break 8866 } 8867 if x.AuxInt != i-4 { 8868 break 8869 } 8870 if x.Aux != s { 8871 break 8872 } 8873 _ = x.Args[3] 8874 if p != x.Args[0] { 8875 break 8876 } 8877 if idx != x.Args[1] { 8878 break 8879 } 8880 if w != x.Args[2] { 8881 break 8882 } 8883 mem := x.Args[3] 8884 if !(x.Uses == 1 && clobber(x)) { 8885 break 8886 } 8887 v.reset(OpAMD64MOVQstoreidx1) 8888 v.AuxInt = i - 4 8889 v.Aux = s 8890 v.AddArg(p) 8891 v.AddArg(idx) 8892 v.AddArg(w) 8893 v.AddArg(mem) 8894 return true 8895 } 8896 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 8897 // cond: x.Uses == 1 && clobber(x) 8898 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 8899 for { 8900 i := v.AuxInt 8901 s := v.Aux 8902 _ = v.Args[3] 8903 p := v.Args[0] 8904 idx := v.Args[1] 8905 v_2 := v.Args[2] 8906 if v_2.Op != OpAMD64SHRQconst { 8907 break 8908 } 8909 j := v_2.AuxInt 8910 w := v_2.Args[0] 8911 x := v.Args[3] 8912 if x.Op != OpAMD64MOVLstoreidx1 { 8913 break 8914 } 8915 if x.AuxInt != i-4 { 8916 break 8917 } 8918 if x.Aux != s { 8919 break 8920 } 8921 _ = x.Args[3] 8922 if p != x.Args[0] { 8923 break 8924 } 8925 if idx != x.Args[1] { 8926 break 8927 } 8928 w0 := x.Args[2] 8929 if w0.Op != OpAMD64SHRQconst { 8930 break 8931 } 8932 if w0.AuxInt != j-32 { 8933 break 8934 } 8935 if w != w0.Args[0] { 8936 break 8937 } 8938 mem := x.Args[3] 8939 if !(x.Uses == 1 && clobber(x)) { 8940 break 8941 } 8942 v.reset(OpAMD64MOVQstoreidx1) 8943 v.AuxInt = i - 4 8944 v.Aux = s 8945 v.AddArg(p) 8946 v.AddArg(idx) 8947 v.AddArg(w0) 8948 v.AddArg(mem) 8949 return true 8950 } 8951 return false 8952 } 8953 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 8954 b := v.Block 8955 _ = b 8956 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8957 // cond: is32Bit(c+d) 8958 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 8959 for { 8960 c := v.AuxInt 8961 sym := v.Aux 8962 _ = v.Args[3] 8963 v_0 := v.Args[0] 8964 if v_0.Op != OpAMD64ADDQconst { 8965 break 8966 } 8967 d := v_0.AuxInt 8968 ptr := v_0.Args[0] 8969 idx := v.Args[1] 8970 val := v.Args[2] 8971 mem := v.Args[3] 8972 if !(is32Bit(c + d)) { 8973 break 8974 } 8975 v.reset(OpAMD64MOVLstoreidx4) 8976 v.AuxInt = c + d 8977 v.Aux = sym 8978 v.AddArg(ptr) 8979 v.AddArg(idx) 8980 v.AddArg(val) 8981 v.AddArg(mem) 8982 return true 8983 } 8984 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8985 // cond: is32Bit(c+4*d) 8986 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 8987 for { 8988 c := v.AuxInt 8989 sym := v.Aux 8990 _ = v.Args[3] 8991 ptr := v.Args[0] 8992 v_1 := v.Args[1] 8993 if v_1.Op != OpAMD64ADDQconst { 8994 break 8995 } 8996 d := v_1.AuxInt 8997 idx := v_1.Args[0] 8998 val := v.Args[2] 8999 mem := v.Args[3] 9000 if !(is32Bit(c + 4*d)) { 9001 break 9002 } 9003 v.reset(OpAMD64MOVLstoreidx4) 9004 v.AuxInt = c + 4*d 9005 v.Aux = sym 9006 v.AddArg(ptr) 9007 v.AddArg(idx) 9008 v.AddArg(val) 9009 v.AddArg(mem) 9010 return true 9011 } 9012 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 9013 // cond: x.Uses == 1 && clobber(x) 9014 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 9015 for { 9016 i := v.AuxInt 9017 s := v.Aux 9018 _ = v.Args[3] 9019 p := v.Args[0] 9020 idx := v.Args[1] 9021 v_2 := v.Args[2] 9022 if v_2.Op != OpAMD64SHRQconst { 9023 break 9024 } 9025 if v_2.AuxInt != 32 { 9026 break 9027 } 9028 w := v_2.Args[0] 9029 x := v.Args[3] 9030 if x.Op != OpAMD64MOVLstoreidx4 { 9031 break 9032 } 9033 if x.AuxInt != i-4 { 9034 break 9035 } 9036 if x.Aux != s { 9037 break 9038 } 9039 _ = x.Args[3] 9040 if p != x.Args[0] { 9041 break 9042 } 9043 if idx != x.Args[1] { 9044 break 9045 } 9046 if w != x.Args[2] { 9047 break 9048 } 9049 mem := x.Args[3] 9050 if !(x.Uses == 1 && clobber(x)) { 9051 break 9052 } 9053 v.reset(OpAMD64MOVQstoreidx1) 9054 v.AuxInt = i - 4 9055 v.Aux = s 9056 v.AddArg(p) 9057 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 9058 v0.AuxInt = 2 9059 v0.AddArg(idx) 9060 v.AddArg(v0) 9061 v.AddArg(w) 9062 v.AddArg(mem) 9063 return true 9064 } 9065 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 9066 // cond: x.Uses == 1 && clobber(x) 9067 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 9068 for { 9069 i := v.AuxInt 9070 s := v.Aux 9071 _ = v.Args[3] 9072 p := v.Args[0] 9073 idx := v.Args[1] 9074 v_2 := v.Args[2] 9075 if v_2.Op != OpAMD64SHRQconst { 9076 break 9077 } 9078 j := v_2.AuxInt 9079 w := v_2.Args[0] 9080 x := v.Args[3] 9081 if x.Op != OpAMD64MOVLstoreidx4 { 9082 break 9083 } 9084 if x.AuxInt != i-4 { 9085 break 9086 } 9087 if x.Aux != s { 9088 break 9089 } 9090 _ = x.Args[3] 9091 if p != x.Args[0] { 9092 break 9093 } 9094 if idx != x.Args[1] { 9095 break 9096 } 9097 w0 := x.Args[2] 9098 if w0.Op != OpAMD64SHRQconst { 9099 break 9100 } 9101 if w0.AuxInt != j-32 { 9102 break 9103 } 9104 if w != w0.Args[0] { 9105 break 9106 } 9107 mem := x.Args[3] 9108 if !(x.Uses == 1 && clobber(x)) { 9109 break 9110 } 9111 v.reset(OpAMD64MOVQstoreidx1) 9112 v.AuxInt = i - 4 9113 v.Aux = s 9114 v.AddArg(p) 9115 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 9116 v0.AuxInt = 2 9117 v0.AddArg(idx) 9118 v.AddArg(v0) 9119 v.AddArg(w0) 9120 v.AddArg(mem) 9121 return true 9122 } 9123 return false 9124 } 9125 func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { 9126 // match: (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9127 // cond: is32Bit(c+d) 9128 // result: (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) 9129 for { 9130 c := v.AuxInt 9131 sym := v.Aux 9132 _ = v.Args[3] 9133 v_0 := v.Args[0] 9134 if v_0.Op != OpAMD64ADDQconst { 9135 break 9136 } 9137 d := v_0.AuxInt 9138 ptr := v_0.Args[0] 9139 idx := v.Args[1] 9140 val := v.Args[2] 9141 mem := v.Args[3] 9142 if !(is32Bit(c + d)) { 9143 break 9144 } 9145 v.reset(OpAMD64MOVLstoreidx8) 9146 v.AuxInt = c + d 9147 v.Aux = sym 9148 v.AddArg(ptr) 9149 v.AddArg(idx) 9150 v.AddArg(val) 9151 v.AddArg(mem) 9152 return true 9153 } 9154 // match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9155 // cond: is32Bit(c+8*d) 9156 // result: (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem) 9157 for { 9158 c := v.AuxInt 9159 sym := v.Aux 9160 _ = v.Args[3] 9161 ptr := v.Args[0] 9162 v_1 := v.Args[1] 9163 if v_1.Op != OpAMD64ADDQconst { 9164 break 9165 } 9166 d := v_1.AuxInt 9167 idx := v_1.Args[0] 9168 val := v.Args[2] 9169 mem := v.Args[3] 9170 if !(is32Bit(c + 8*d)) { 9171 break 9172 } 9173 v.reset(OpAMD64MOVLstoreidx8) 9174 v.AuxInt = c + 8*d 9175 v.Aux = sym 9176 v.AddArg(ptr) 9177 v.AddArg(idx) 9178 v.AddArg(val) 9179 v.AddArg(mem) 9180 return true 9181 } 9182 return false 9183 } 9184 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 9185 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 9186 // cond: is32Bit(off1+off2) 9187 // result: (MOVOload [off1+off2] {sym} ptr mem) 9188 for { 9189 off1 := v.AuxInt 9190 sym := v.Aux 9191 _ = v.Args[1] 9192 v_0 := v.Args[0] 9193 if v_0.Op != OpAMD64ADDQconst { 9194 break 9195 } 9196 off2 := v_0.AuxInt 9197 ptr := v_0.Args[0] 9198 mem := v.Args[1] 9199 if !(is32Bit(off1 + off2)) { 9200 break 9201 } 9202 v.reset(OpAMD64MOVOload) 9203 v.AuxInt = off1 + off2 9204 v.Aux = sym 9205 v.AddArg(ptr) 9206 v.AddArg(mem) 9207 return true 9208 } 9209 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9210 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9211 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9212 for { 9213 off1 := v.AuxInt 9214 sym1 := v.Aux 9215 _ = v.Args[1] 9216 v_0 := v.Args[0] 9217 if v_0.Op != OpAMD64LEAQ { 9218 break 9219 } 9220 off2 := v_0.AuxInt 9221 sym2 := v_0.Aux 9222 base := v_0.Args[0] 9223 mem := v.Args[1] 9224 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9225 break 9226 } 9227 v.reset(OpAMD64MOVOload) 9228 v.AuxInt = off1 + off2 9229 v.Aux = mergeSym(sym1, sym2) 9230 v.AddArg(base) 9231 v.AddArg(mem) 9232 return true 9233 } 9234 return false 9235 } 9236 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 9237 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9238 // cond: is32Bit(off1+off2) 9239 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 9240 for { 9241 off1 := v.AuxInt 9242 sym := v.Aux 9243 _ = v.Args[2] 9244 v_0 := v.Args[0] 9245 if v_0.Op != OpAMD64ADDQconst { 9246 break 9247 } 9248 off2 := v_0.AuxInt 9249 ptr := v_0.Args[0] 9250 val := v.Args[1] 9251 mem := v.Args[2] 9252 if !(is32Bit(off1 + off2)) { 9253 break 9254 } 9255 v.reset(OpAMD64MOVOstore) 9256 v.AuxInt = off1 + off2 9257 v.Aux = sym 9258 v.AddArg(ptr) 9259 v.AddArg(val) 9260 v.AddArg(mem) 9261 return true 9262 } 9263 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9264 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9265 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9266 for { 9267 off1 := v.AuxInt 9268 sym1 := v.Aux 9269 _ = v.Args[2] 9270 v_0 := v.Args[0] 9271 if v_0.Op != OpAMD64LEAQ { 9272 break 9273 } 9274 off2 := v_0.AuxInt 9275 sym2 := v_0.Aux 9276 base := v_0.Args[0] 9277 val := v.Args[1] 9278 mem := v.Args[2] 9279 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9280 break 9281 } 9282 v.reset(OpAMD64MOVOstore) 9283 v.AuxInt = off1 + off2 9284 v.Aux = mergeSym(sym1, sym2) 9285 v.AddArg(base) 9286 v.AddArg(val) 9287 v.AddArg(mem) 9288 return true 9289 } 9290 return false 9291 } 9292 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 9293 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 9294 // cond: is32Bit(off1+off2) 9295 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 9296 for { 9297 off1 := v.AuxInt 9298 sym := v.Aux 9299 _ = v.Args[1] 9300 v_0 := v.Args[0] 9301 if v_0.Op != OpAMD64ADDQconst { 9302 break 9303 } 9304 off2 := v_0.AuxInt 9305 ptr := v_0.Args[0] 9306 mem := v.Args[1] 9307 if !(is32Bit(off1 + off2)) { 9308 break 9309 } 9310 v.reset(OpAMD64MOVQatomicload) 9311 v.AuxInt = off1 + off2 9312 v.Aux = sym 9313 v.AddArg(ptr) 9314 v.AddArg(mem) 9315 return true 9316 } 9317 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 9318 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9319 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 9320 for { 9321 off1 := v.AuxInt 9322 sym1 := v.Aux 9323 _ = v.Args[1] 9324 v_0 := v.Args[0] 9325 if v_0.Op != OpAMD64LEAQ { 9326 break 9327 } 9328 off2 := v_0.AuxInt 9329 sym2 := v_0.Aux 9330 ptr := v_0.Args[0] 9331 mem := v.Args[1] 9332 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9333 break 9334 } 9335 v.reset(OpAMD64MOVQatomicload) 9336 v.AuxInt = off1 + off2 9337 v.Aux = mergeSym(sym1, sym2) 9338 v.AddArg(ptr) 9339 v.AddArg(mem) 9340 return true 9341 } 9342 return false 9343 } 9344 func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool { 9345 b := v.Block 9346 _ = b 9347 // match: (MOVQf2i <t> (Arg [off] {sym})) 9348 // cond: 9349 // result: @b.Func.Entry (Arg <t> [off] {sym}) 9350 for { 9351 t := v.Type 9352 v_0 := v.Args[0] 9353 if v_0.Op != OpArg { 9354 break 9355 } 9356 off := v_0.AuxInt 9357 sym := v_0.Aux 9358 b = b.Func.Entry 9359 v0 := b.NewValue0(v.Pos, OpArg, t) 9360 v.reset(OpCopy) 9361 v.AddArg(v0) 9362 v0.AuxInt = off 9363 v0.Aux = sym 9364 return true 9365 } 9366 return false 9367 } 9368 func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { 9369 b := v.Block 9370 _ = b 9371 // match: (MOVQi2f <t> (Arg [off] {sym})) 9372 // cond: 9373 // result: @b.Func.Entry (Arg <t> [off] {sym}) 9374 for { 9375 t := v.Type 9376 v_0 := v.Args[0] 9377 if v_0.Op != OpArg { 9378 break 9379 } 9380 off := v_0.AuxInt 9381 sym := v_0.Aux 9382 b = b.Func.Entry 9383 v0 := b.NewValue0(v.Pos, OpArg, t) 9384 v.reset(OpCopy) 9385 v.AddArg(v0) 9386 v0.AuxInt = off 9387 v0.Aux = sym 9388 return true 9389 } 9390 return false 9391 } 9392 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 9393 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 9394 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 9395 // result: x 9396 for { 9397 off := v.AuxInt 9398 sym := v.Aux 9399 _ = v.Args[1] 9400 ptr := v.Args[0] 9401 v_1 := v.Args[1] 9402 if v_1.Op != OpAMD64MOVQstore { 9403 break 9404 } 9405 off2 := v_1.AuxInt 9406 sym2 := v_1.Aux 9407 _ = v_1.Args[2] 9408 ptr2 := v_1.Args[0] 9409 x := v_1.Args[1] 9410 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 9411 break 9412 } 9413 v.reset(OpCopy) 9414 v.Type = x.Type 9415 v.AddArg(x) 9416 return true 9417 } 9418 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 9419 // cond: is32Bit(off1+off2) 9420 // result: (MOVQload [off1+off2] {sym} ptr mem) 9421 for { 9422 off1 := v.AuxInt 9423 sym := v.Aux 9424 _ = v.Args[1] 9425 v_0 := v.Args[0] 9426 if v_0.Op != OpAMD64ADDQconst { 9427 break 9428 } 9429 off2 := v_0.AuxInt 9430 ptr := v_0.Args[0] 9431 mem := v.Args[1] 9432 if !(is32Bit(off1 + off2)) { 9433 break 9434 } 9435 v.reset(OpAMD64MOVQload) 9436 v.AuxInt = off1 + off2 9437 v.Aux = sym 9438 v.AddArg(ptr) 9439 v.AddArg(mem) 9440 return true 9441 } 9442 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9443 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9444 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9445 for { 9446 off1 := v.AuxInt 9447 sym1 := v.Aux 9448 _ = v.Args[1] 9449 v_0 := v.Args[0] 9450 if v_0.Op != OpAMD64LEAQ { 9451 break 9452 } 9453 off2 := v_0.AuxInt 9454 sym2 := v_0.Aux 9455 base := v_0.Args[0] 9456 mem := v.Args[1] 9457 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9458 break 9459 } 9460 v.reset(OpAMD64MOVQload) 9461 v.AuxInt = off1 + off2 9462 v.Aux = mergeSym(sym1, sym2) 9463 v.AddArg(base) 9464 v.AddArg(mem) 9465 return true 9466 } 9467 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9468 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9469 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9470 for { 9471 off1 := v.AuxInt 9472 sym1 := v.Aux 9473 _ = v.Args[1] 9474 v_0 := v.Args[0] 9475 if v_0.Op != OpAMD64LEAQ1 { 9476 break 9477 } 9478 off2 := v_0.AuxInt 9479 sym2 := v_0.Aux 9480 _ = v_0.Args[1] 9481 ptr := v_0.Args[0] 9482 idx := v_0.Args[1] 9483 mem := v.Args[1] 9484 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9485 break 9486 } 9487 v.reset(OpAMD64MOVQloadidx1) 9488 v.AuxInt = off1 + off2 9489 v.Aux = mergeSym(sym1, sym2) 9490 v.AddArg(ptr) 9491 v.AddArg(idx) 9492 v.AddArg(mem) 9493 return true 9494 } 9495 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 9496 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9497 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9498 for { 9499 off1 := v.AuxInt 9500 sym1 := v.Aux 9501 _ = v.Args[1] 9502 v_0 := v.Args[0] 9503 if v_0.Op != OpAMD64LEAQ8 { 9504 break 9505 } 9506 off2 := v_0.AuxInt 9507 sym2 := v_0.Aux 9508 _ = v_0.Args[1] 9509 ptr := v_0.Args[0] 9510 idx := v_0.Args[1] 9511 mem := v.Args[1] 9512 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9513 break 9514 } 9515 v.reset(OpAMD64MOVQloadidx8) 9516 v.AuxInt = off1 + off2 9517 v.Aux = mergeSym(sym1, sym2) 9518 v.AddArg(ptr) 9519 v.AddArg(idx) 9520 v.AddArg(mem) 9521 return true 9522 } 9523 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 9524 // cond: ptr.Op != OpSB 9525 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 9526 for { 9527 off := v.AuxInt 9528 sym := v.Aux 9529 _ = v.Args[1] 9530 v_0 := v.Args[0] 9531 if v_0.Op != OpAMD64ADDQ { 9532 break 9533 } 9534 _ = v_0.Args[1] 9535 ptr := v_0.Args[0] 9536 idx := v_0.Args[1] 9537 mem := v.Args[1] 9538 if !(ptr.Op != OpSB) { 9539 break 9540 } 9541 v.reset(OpAMD64MOVQloadidx1) 9542 v.AuxInt = off 9543 v.Aux = sym 9544 v.AddArg(ptr) 9545 v.AddArg(idx) 9546 v.AddArg(mem) 9547 return true 9548 } 9549 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 9550 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 9551 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9552 for { 9553 off1 := v.AuxInt 9554 sym1 := v.Aux 9555 _ = v.Args[1] 9556 v_0 := v.Args[0] 9557 if v_0.Op != OpAMD64LEAL { 9558 break 9559 } 9560 off2 := v_0.AuxInt 9561 sym2 := v_0.Aux 9562 base := v_0.Args[0] 9563 mem := v.Args[1] 9564 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 9565 break 9566 } 9567 v.reset(OpAMD64MOVQload) 9568 v.AuxInt = off1 + off2 9569 v.Aux = mergeSym(sym1, sym2) 9570 v.AddArg(base) 9571 v.AddArg(mem) 9572 return true 9573 } 9574 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 9575 // cond: is32Bit(off1+off2) 9576 // result: (MOVQload [off1+off2] {sym} ptr mem) 9577 for { 9578 off1 := v.AuxInt 9579 sym := v.Aux 9580 _ = v.Args[1] 9581 v_0 := v.Args[0] 9582 if v_0.Op != OpAMD64ADDLconst { 9583 break 9584 } 9585 off2 := v_0.AuxInt 9586 ptr := v_0.Args[0] 9587 mem := v.Args[1] 9588 if !(is32Bit(off1 + off2)) { 9589 break 9590 } 9591 v.reset(OpAMD64MOVQload) 9592 v.AuxInt = off1 + off2 9593 v.Aux = sym 9594 v.AddArg(ptr) 9595 v.AddArg(mem) 9596 return true 9597 } 9598 // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) 9599 // cond: 9600 // result: (MOVQf2i val) 9601 for { 9602 off := v.AuxInt 9603 sym := v.Aux 9604 _ = v.Args[1] 9605 ptr := v.Args[0] 9606 v_1 := v.Args[1] 9607 if v_1.Op != OpAMD64MOVSDstore { 9608 break 9609 } 9610 if v_1.AuxInt != off { 9611 break 9612 } 9613 if v_1.Aux != sym { 9614 break 9615 } 9616 _ = v_1.Args[2] 9617 if ptr != v_1.Args[0] { 9618 break 9619 } 9620 val := v_1.Args[1] 9621 v.reset(OpAMD64MOVQf2i) 9622 v.AddArg(val) 9623 return true 9624 } 9625 return false 9626 } 9627 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 9628 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9629 // cond: 9630 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 9631 for { 9632 c := v.AuxInt 9633 sym := v.Aux 9634 _ = v.Args[2] 9635 ptr := v.Args[0] 9636 v_1 := v.Args[1] 9637 if v_1.Op != OpAMD64SHLQconst { 9638 break 9639 } 9640 if v_1.AuxInt != 3 { 9641 break 9642 } 9643 idx := v_1.Args[0] 9644 mem := v.Args[2] 9645 v.reset(OpAMD64MOVQloadidx8) 9646 v.AuxInt = c 9647 v.Aux = sym 9648 v.AddArg(ptr) 9649 v.AddArg(idx) 9650 v.AddArg(mem) 9651 return true 9652 } 9653 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 9654 // cond: 9655 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 9656 for { 9657 c := v.AuxInt 9658 sym := v.Aux 9659 _ = v.Args[2] 9660 v_0 := v.Args[0] 9661 if v_0.Op != OpAMD64SHLQconst { 9662 break 9663 } 9664 if v_0.AuxInt != 3 { 9665 break 9666 } 9667 idx := v_0.Args[0] 9668 ptr := v.Args[1] 9669 mem := v.Args[2] 9670 v.reset(OpAMD64MOVQloadidx8) 9671 v.AuxInt = c 9672 v.Aux = sym 9673 v.AddArg(ptr) 9674 v.AddArg(idx) 9675 v.AddArg(mem) 9676 return true 9677 } 9678 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9679 // cond: is32Bit(c+d) 9680 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 9681 for { 9682 c := v.AuxInt 9683 sym := v.Aux 9684 _ = v.Args[2] 9685 v_0 := v.Args[0] 9686 if v_0.Op != OpAMD64ADDQconst { 9687 break 9688 } 9689 d := v_0.AuxInt 9690 ptr := v_0.Args[0] 9691 idx := v.Args[1] 9692 mem := v.Args[2] 9693 if !(is32Bit(c + d)) { 9694 break 9695 } 9696 v.reset(OpAMD64MOVQloadidx1) 9697 v.AuxInt = c + d 9698 v.Aux = sym 9699 v.AddArg(ptr) 9700 v.AddArg(idx) 9701 v.AddArg(mem) 9702 return true 9703 } 9704 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 9705 // cond: is32Bit(c+d) 9706 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 9707 for { 9708 c := v.AuxInt 9709 sym := v.Aux 9710 _ = v.Args[2] 9711 idx := v.Args[0] 9712 v_1 := v.Args[1] 9713 if v_1.Op != OpAMD64ADDQconst { 9714 break 9715 } 9716 d := v_1.AuxInt 9717 ptr := v_1.Args[0] 9718 mem := v.Args[2] 9719 if !(is32Bit(c + d)) { 9720 break 9721 } 9722 v.reset(OpAMD64MOVQloadidx1) 9723 v.AuxInt = c + d 9724 v.Aux = sym 9725 v.AddArg(ptr) 9726 v.AddArg(idx) 9727 v.AddArg(mem) 9728 return true 9729 } 9730 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9731 // cond: is32Bit(c+d) 9732 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 9733 for { 9734 c := v.AuxInt 9735 sym := v.Aux 9736 _ = v.Args[2] 9737 ptr := v.Args[0] 9738 v_1 := v.Args[1] 9739 if v_1.Op != OpAMD64ADDQconst { 9740 break 9741 } 9742 d := v_1.AuxInt 9743 idx := v_1.Args[0] 9744 mem := v.Args[2] 9745 if !(is32Bit(c + d)) { 9746 break 9747 } 9748 v.reset(OpAMD64MOVQloadidx1) 9749 v.AuxInt = c + d 9750 v.Aux = sym 9751 v.AddArg(ptr) 9752 v.AddArg(idx) 9753 v.AddArg(mem) 9754 return true 9755 } 9756 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 9757 // cond: is32Bit(c+d) 9758 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 9759 for { 9760 c := v.AuxInt 9761 sym := v.Aux 9762 _ = v.Args[2] 9763 v_0 := v.Args[0] 9764 if v_0.Op != OpAMD64ADDQconst { 9765 break 9766 } 9767 d := v_0.AuxInt 9768 idx := v_0.Args[0] 9769 ptr := v.Args[1] 9770 mem := v.Args[2] 9771 if !(is32Bit(c + d)) { 9772 break 9773 } 9774 v.reset(OpAMD64MOVQloadidx1) 9775 v.AuxInt = c + d 9776 v.Aux = sym 9777 v.AddArg(ptr) 9778 v.AddArg(idx) 9779 v.AddArg(mem) 9780 return true 9781 } 9782 return false 9783 } 9784 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 9785 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 9786 // cond: is32Bit(c+d) 9787 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 9788 for { 9789 c := v.AuxInt 9790 sym := v.Aux 9791 _ = v.Args[2] 9792 v_0 := v.Args[0] 9793 if v_0.Op != OpAMD64ADDQconst { 9794 break 9795 } 9796 d := v_0.AuxInt 9797 ptr := v_0.Args[0] 9798 idx := v.Args[1] 9799 mem := v.Args[2] 9800 if !(is32Bit(c + d)) { 9801 break 9802 } 9803 v.reset(OpAMD64MOVQloadidx8) 9804 v.AuxInt = c + d 9805 v.Aux = sym 9806 v.AddArg(ptr) 9807 v.AddArg(idx) 9808 v.AddArg(mem) 9809 return true 9810 } 9811 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 9812 // cond: is32Bit(c+8*d) 9813 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 9814 for { 9815 c := v.AuxInt 9816 sym := v.Aux 9817 _ = v.Args[2] 9818 ptr := v.Args[0] 9819 v_1 := v.Args[1] 9820 if v_1.Op != OpAMD64ADDQconst { 9821 break 9822 } 9823 d := v_1.AuxInt 9824 idx := v_1.Args[0] 9825 mem := v.Args[2] 9826 if !(is32Bit(c + 8*d)) { 9827 break 9828 } 9829 v.reset(OpAMD64MOVQloadidx8) 9830 v.AuxInt = c + 8*d 9831 v.Aux = sym 9832 v.AddArg(ptr) 9833 v.AddArg(idx) 9834 v.AddArg(mem) 9835 return true 9836 } 9837 return false 9838 } 9839 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 9840 b := v.Block 9841 _ = b 9842 config := b.Func.Config 9843 _ = config 9844 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9845 // cond: is32Bit(off1+off2) 9846 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 9847 for { 9848 off1 := v.AuxInt 9849 sym := v.Aux 9850 _ = v.Args[2] 9851 v_0 := v.Args[0] 9852 if v_0.Op != OpAMD64ADDQconst { 9853 break 9854 } 9855 off2 := v_0.AuxInt 9856 ptr := v_0.Args[0] 9857 val := v.Args[1] 9858 mem := v.Args[2] 9859 if !(is32Bit(off1 + off2)) { 9860 break 9861 } 9862 v.reset(OpAMD64MOVQstore) 9863 v.AuxInt = off1 + off2 9864 v.Aux = sym 9865 v.AddArg(ptr) 9866 v.AddArg(val) 9867 v.AddArg(mem) 9868 return true 9869 } 9870 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 9871 // cond: validValAndOff(c,off) 9872 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 9873 for { 9874 off := v.AuxInt 9875 sym := v.Aux 9876 _ = v.Args[2] 9877 ptr := v.Args[0] 9878 v_1 := v.Args[1] 9879 if v_1.Op != OpAMD64MOVQconst { 9880 break 9881 } 9882 c := v_1.AuxInt 9883 mem := v.Args[2] 9884 if !(validValAndOff(c, off)) { 9885 break 9886 } 9887 v.reset(OpAMD64MOVQstoreconst) 9888 v.AuxInt = makeValAndOff(c, off) 9889 v.Aux = sym 9890 v.AddArg(ptr) 9891 v.AddArg(mem) 9892 return true 9893 } 9894 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9895 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9896 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9897 for { 9898 off1 := v.AuxInt 9899 sym1 := v.Aux 9900 _ = v.Args[2] 9901 v_0 := v.Args[0] 9902 if v_0.Op != OpAMD64LEAQ { 9903 break 9904 } 9905 off2 := v_0.AuxInt 9906 sym2 := v_0.Aux 9907 base := v_0.Args[0] 9908 val := v.Args[1] 9909 mem := v.Args[2] 9910 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9911 break 9912 } 9913 v.reset(OpAMD64MOVQstore) 9914 v.AuxInt = off1 + off2 9915 v.Aux = mergeSym(sym1, sym2) 9916 v.AddArg(base) 9917 v.AddArg(val) 9918 v.AddArg(mem) 9919 return true 9920 } 9921 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9922 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9923 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9924 for { 9925 off1 := v.AuxInt 9926 sym1 := v.Aux 9927 _ = v.Args[2] 9928 v_0 := v.Args[0] 9929 if v_0.Op != OpAMD64LEAQ1 { 9930 break 9931 } 9932 off2 := v_0.AuxInt 9933 sym2 := v_0.Aux 9934 _ = v_0.Args[1] 9935 ptr := v_0.Args[0] 9936 idx := v_0.Args[1] 9937 val := v.Args[1] 9938 mem := v.Args[2] 9939 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9940 break 9941 } 9942 v.reset(OpAMD64MOVQstoreidx1) 9943 v.AuxInt = off1 + off2 9944 v.Aux = mergeSym(sym1, sym2) 9945 v.AddArg(ptr) 9946 v.AddArg(idx) 9947 v.AddArg(val) 9948 v.AddArg(mem) 9949 return true 9950 } 9951 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 9952 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9953 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9954 for { 9955 off1 := v.AuxInt 9956 sym1 := v.Aux 9957 _ = v.Args[2] 9958 v_0 := v.Args[0] 9959 if v_0.Op != OpAMD64LEAQ8 { 9960 break 9961 } 9962 off2 := v_0.AuxInt 9963 sym2 := v_0.Aux 9964 _ = v_0.Args[1] 9965 ptr := v_0.Args[0] 9966 idx := v_0.Args[1] 9967 val := v.Args[1] 9968 mem := v.Args[2] 9969 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9970 break 9971 } 9972 v.reset(OpAMD64MOVQstoreidx8) 9973 v.AuxInt = off1 + off2 9974 v.Aux = mergeSym(sym1, sym2) 9975 v.AddArg(ptr) 9976 v.AddArg(idx) 9977 v.AddArg(val) 9978 v.AddArg(mem) 9979 return true 9980 } 9981 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 9982 // cond: ptr.Op != OpSB 9983 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 9984 for { 9985 off := v.AuxInt 9986 sym := v.Aux 9987 _ = v.Args[2] 9988 v_0 := v.Args[0] 9989 if v_0.Op != OpAMD64ADDQ { 9990 break 9991 } 9992 _ = v_0.Args[1] 9993 ptr := v_0.Args[0] 9994 idx := v_0.Args[1] 9995 val := v.Args[1] 9996 mem := v.Args[2] 9997 if !(ptr.Op != OpSB) { 9998 break 9999 } 10000 v.reset(OpAMD64MOVQstoreidx1) 10001 v.AuxInt = off 10002 v.Aux = sym 10003 v.AddArg(ptr) 10004 v.AddArg(idx) 10005 v.AddArg(val) 10006 v.AddArg(mem) 10007 return true 10008 } 10009 // match: (MOVQstore [i] {s} p x1:(MOVQload [j] {s2} p2 mem) mem2:(MOVQstore [i-8] {s} p x2:(MOVQload [j-8] {s2} p2 mem) mem)) 10010 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && config.useSSE && clobber(x1) && clobber(x2) && clobber(mem2) 10011 // result: (MOVOstore [i-8] {s} p (MOVOload [j-8] {s2} p2 mem) mem) 10012 for { 10013 i := v.AuxInt 10014 s := v.Aux 10015 _ = v.Args[2] 10016 p := v.Args[0] 10017 x1 := v.Args[1] 10018 if x1.Op != OpAMD64MOVQload { 10019 break 10020 } 10021 j := x1.AuxInt 10022 s2 := x1.Aux 10023 _ = x1.Args[1] 10024 p2 := x1.Args[0] 10025 mem := x1.Args[1] 10026 mem2 := v.Args[2] 10027 if mem2.Op != OpAMD64MOVQstore { 10028 break 10029 } 10030 if mem2.AuxInt != i-8 { 10031 break 10032 } 10033 if mem2.Aux != s { 10034 break 10035 } 10036 _ = mem2.Args[2] 10037 if p != mem2.Args[0] { 10038 break 10039 } 10040 x2 := mem2.Args[1] 10041 if x2.Op != OpAMD64MOVQload { 10042 break 10043 } 10044 if x2.AuxInt != j-8 { 10045 break 10046 } 10047 if x2.Aux != s2 { 10048 break 10049 } 10050 _ = x2.Args[1] 10051 if p2 != x2.Args[0] { 10052 break 10053 } 10054 if mem != x2.Args[1] { 10055 break 10056 } 10057 if mem != mem2.Args[2] { 10058 break 10059 } 10060 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && config.useSSE && clobber(x1) && clobber(x2) && clobber(mem2)) { 10061 break 10062 } 10063 v.reset(OpAMD64MOVOstore) 10064 v.AuxInt = i - 8 10065 v.Aux = s 10066 v.AddArg(p) 10067 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 10068 v0.AuxInt = j - 8 10069 v0.Aux = s2 10070 v0.AddArg(p2) 10071 v0.AddArg(mem) 10072 v.AddArg(v0) 10073 v.AddArg(mem) 10074 return true 10075 } 10076 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 10077 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 10078 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10079 for { 10080 off1 := v.AuxInt 10081 sym1 := v.Aux 10082 _ = v.Args[2] 10083 v_0 := v.Args[0] 10084 if v_0.Op != OpAMD64LEAL { 10085 break 10086 } 10087 off2 := v_0.AuxInt 10088 sym2 := v_0.Aux 10089 base := v_0.Args[0] 10090 val := v.Args[1] 10091 mem := v.Args[2] 10092 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 10093 break 10094 } 10095 v.reset(OpAMD64MOVQstore) 10096 v.AuxInt = off1 + off2 10097 v.Aux = mergeSym(sym1, sym2) 10098 v.AddArg(base) 10099 v.AddArg(val) 10100 v.AddArg(mem) 10101 return true 10102 } 10103 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 10104 // cond: is32Bit(off1+off2) 10105 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 10106 for { 10107 off1 := v.AuxInt 10108 sym := v.Aux 10109 _ = v.Args[2] 10110 v_0 := v.Args[0] 10111 if v_0.Op != OpAMD64ADDLconst { 10112 break 10113 } 10114 off2 := v_0.AuxInt 10115 ptr := v_0.Args[0] 10116 val := v.Args[1] 10117 mem := v.Args[2] 10118 if !(is32Bit(off1 + off2)) { 10119 break 10120 } 10121 v.reset(OpAMD64MOVQstore) 10122 v.AuxInt = off1 + off2 10123 v.Aux = sym 10124 v.AddArg(ptr) 10125 v.AddArg(val) 10126 v.AddArg(mem) 10127 return true 10128 } 10129 // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 10130 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) 10131 // result: (ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem) 10132 for { 10133 off := v.AuxInt 10134 sym := v.Aux 10135 _ = v.Args[2] 10136 ptr := v.Args[0] 10137 a := v.Args[1] 10138 if a.Op != OpAMD64ADDQconst { 10139 break 10140 } 10141 c := a.AuxInt 10142 l := a.Args[0] 10143 if l.Op != OpAMD64MOVQload { 10144 break 10145 } 10146 if l.AuxInt != off { 10147 break 10148 } 10149 if l.Aux != sym { 10150 break 10151 } 10152 _ = l.Args[1] 10153 ptr2 := l.Args[0] 10154 mem := l.Args[1] 10155 if mem != v.Args[2] { 10156 break 10157 } 10158 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { 10159 break 10160 } 10161 v.reset(OpAMD64ADDQconstmem) 10162 v.AuxInt = makeValAndOff(c, off) 10163 v.Aux = sym 10164 v.AddArg(ptr) 10165 v.AddArg(mem) 10166 return true 10167 } 10168 return false 10169 } 10170 func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { 10171 // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) 10172 // cond: 10173 // result: (MOVSDstore [off] {sym} ptr val mem) 10174 for { 10175 off := v.AuxInt 10176 sym := v.Aux 10177 _ = v.Args[2] 10178 ptr := v.Args[0] 10179 v_1 := v.Args[1] 10180 if v_1.Op != OpAMD64MOVQf2i { 10181 break 10182 } 10183 val := v_1.Args[0] 10184 mem := v.Args[2] 10185 v.reset(OpAMD64MOVSDstore) 10186 v.AuxInt = off 10187 v.Aux = sym 10188 v.AddArg(ptr) 10189 v.AddArg(val) 10190 v.AddArg(mem) 10191 return true 10192 } 10193 return false 10194 } 10195 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 10196 b := v.Block 10197 _ = b 10198 config := b.Func.Config 10199 _ = config 10200 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 10201 // cond: ValAndOff(sc).canAdd(off) 10202 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10203 for { 10204 sc := v.AuxInt 10205 s := v.Aux 10206 _ = v.Args[1] 10207 v_0 := v.Args[0] 10208 if v_0.Op != OpAMD64ADDQconst { 10209 break 10210 } 10211 off := v_0.AuxInt 10212 ptr := v_0.Args[0] 10213 mem := v.Args[1] 10214 if !(ValAndOff(sc).canAdd(off)) { 10215 break 10216 } 10217 v.reset(OpAMD64MOVQstoreconst) 10218 v.AuxInt = ValAndOff(sc).add(off) 10219 v.Aux = s 10220 v.AddArg(ptr) 10221 v.AddArg(mem) 10222 return true 10223 } 10224 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 10225 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10226 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10227 for { 10228 sc := v.AuxInt 10229 sym1 := v.Aux 10230 _ = v.Args[1] 10231 v_0 := v.Args[0] 10232 if v_0.Op != OpAMD64LEAQ { 10233 break 10234 } 10235 off := v_0.AuxInt 10236 sym2 := v_0.Aux 10237 ptr := v_0.Args[0] 10238 mem := v.Args[1] 10239 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10240 break 10241 } 10242 v.reset(OpAMD64MOVQstoreconst) 10243 v.AuxInt = ValAndOff(sc).add(off) 10244 v.Aux = mergeSym(sym1, sym2) 10245 v.AddArg(ptr) 10246 v.AddArg(mem) 10247 return true 10248 } 10249 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 10250 // cond: canMergeSym(sym1, sym2) 10251 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10252 for { 10253 x := v.AuxInt 10254 sym1 := v.Aux 10255 _ = v.Args[1] 10256 v_0 := v.Args[0] 10257 if v_0.Op != OpAMD64LEAQ1 { 10258 break 10259 } 10260 off := v_0.AuxInt 10261 sym2 := v_0.Aux 10262 _ = v_0.Args[1] 10263 ptr := v_0.Args[0] 10264 idx := v_0.Args[1] 10265 mem := v.Args[1] 10266 if !(canMergeSym(sym1, sym2)) { 10267 break 10268 } 10269 v.reset(OpAMD64MOVQstoreconstidx1) 10270 v.AuxInt = ValAndOff(x).add(off) 10271 v.Aux = mergeSym(sym1, sym2) 10272 v.AddArg(ptr) 10273 v.AddArg(idx) 10274 v.AddArg(mem) 10275 return true 10276 } 10277 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 10278 // cond: canMergeSym(sym1, sym2) 10279 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10280 for { 10281 x := v.AuxInt 10282 sym1 := v.Aux 10283 _ = v.Args[1] 10284 v_0 := v.Args[0] 10285 if v_0.Op != OpAMD64LEAQ8 { 10286 break 10287 } 10288 off := v_0.AuxInt 10289 sym2 := v_0.Aux 10290 _ = v_0.Args[1] 10291 ptr := v_0.Args[0] 10292 idx := v_0.Args[1] 10293 mem := v.Args[1] 10294 if !(canMergeSym(sym1, sym2)) { 10295 break 10296 } 10297 v.reset(OpAMD64MOVQstoreconstidx8) 10298 v.AuxInt = ValAndOff(x).add(off) 10299 v.Aux = mergeSym(sym1, sym2) 10300 v.AddArg(ptr) 10301 v.AddArg(idx) 10302 v.AddArg(mem) 10303 return true 10304 } 10305 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 10306 // cond: 10307 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 10308 for { 10309 x := v.AuxInt 10310 sym := v.Aux 10311 _ = v.Args[1] 10312 v_0 := v.Args[0] 10313 if v_0.Op != OpAMD64ADDQ { 10314 break 10315 } 10316 _ = v_0.Args[1] 10317 ptr := v_0.Args[0] 10318 idx := v_0.Args[1] 10319 mem := v.Args[1] 10320 v.reset(OpAMD64MOVQstoreconstidx1) 10321 v.AuxInt = x 10322 v.Aux = sym 10323 v.AddArg(ptr) 10324 v.AddArg(idx) 10325 v.AddArg(mem) 10326 return true 10327 } 10328 // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) 10329 // cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x) 10330 // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) 10331 for { 10332 c := v.AuxInt 10333 s := v.Aux 10334 _ = v.Args[1] 10335 p := v.Args[0] 10336 x := v.Args[1] 10337 if x.Op != OpAMD64MOVQstoreconst { 10338 break 10339 } 10340 c2 := x.AuxInt 10341 if x.Aux != s { 10342 break 10343 } 10344 _ = x.Args[1] 10345 if p != x.Args[0] { 10346 break 10347 } 10348 mem := x.Args[1] 10349 if !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) { 10350 break 10351 } 10352 v.reset(OpAMD64MOVOstore) 10353 v.AuxInt = ValAndOff(c2).Off() 10354 v.Aux = s 10355 v.AddArg(p) 10356 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 10357 v0.AuxInt = 0 10358 v.AddArg(v0) 10359 v.AddArg(mem) 10360 return true 10361 } 10362 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 10363 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10364 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10365 for { 10366 sc := v.AuxInt 10367 sym1 := v.Aux 10368 _ = v.Args[1] 10369 v_0 := v.Args[0] 10370 if v_0.Op != OpAMD64LEAL { 10371 break 10372 } 10373 off := v_0.AuxInt 10374 sym2 := v_0.Aux 10375 ptr := v_0.Args[0] 10376 mem := v.Args[1] 10377 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10378 break 10379 } 10380 v.reset(OpAMD64MOVQstoreconst) 10381 v.AuxInt = ValAndOff(sc).add(off) 10382 v.Aux = mergeSym(sym1, sym2) 10383 v.AddArg(ptr) 10384 v.AddArg(mem) 10385 return true 10386 } 10387 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 10388 // cond: ValAndOff(sc).canAdd(off) 10389 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10390 for { 10391 sc := v.AuxInt 10392 s := v.Aux 10393 _ = v.Args[1] 10394 v_0 := v.Args[0] 10395 if v_0.Op != OpAMD64ADDLconst { 10396 break 10397 } 10398 off := v_0.AuxInt 10399 ptr := v_0.Args[0] 10400 mem := v.Args[1] 10401 if !(ValAndOff(sc).canAdd(off)) { 10402 break 10403 } 10404 v.reset(OpAMD64MOVQstoreconst) 10405 v.AuxInt = ValAndOff(sc).add(off) 10406 v.Aux = s 10407 v.AddArg(ptr) 10408 v.AddArg(mem) 10409 return true 10410 } 10411 return false 10412 } 10413 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 10414 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 10415 // cond: 10416 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 10417 for { 10418 c := v.AuxInt 10419 sym := v.Aux 10420 _ = v.Args[2] 10421 ptr := v.Args[0] 10422 v_1 := v.Args[1] 10423 if v_1.Op != OpAMD64SHLQconst { 10424 break 10425 } 10426 if v_1.AuxInt != 3 { 10427 break 10428 } 10429 idx := v_1.Args[0] 10430 mem := v.Args[2] 10431 v.reset(OpAMD64MOVQstoreconstidx8) 10432 v.AuxInt = c 10433 v.Aux = sym 10434 v.AddArg(ptr) 10435 v.AddArg(idx) 10436 v.AddArg(mem) 10437 return true 10438 } 10439 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 10440 // cond: ValAndOff(x).canAdd(c) 10441 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10442 for { 10443 x := v.AuxInt 10444 sym := v.Aux 10445 _ = v.Args[2] 10446 v_0 := v.Args[0] 10447 if v_0.Op != OpAMD64ADDQconst { 10448 break 10449 } 10450 c := v_0.AuxInt 10451 ptr := v_0.Args[0] 10452 idx := v.Args[1] 10453 mem := v.Args[2] 10454 if !(ValAndOff(x).canAdd(c)) { 10455 break 10456 } 10457 v.reset(OpAMD64MOVQstoreconstidx1) 10458 v.AuxInt = ValAndOff(x).add(c) 10459 v.Aux = sym 10460 v.AddArg(ptr) 10461 v.AddArg(idx) 10462 v.AddArg(mem) 10463 return true 10464 } 10465 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 10466 // cond: ValAndOff(x).canAdd(c) 10467 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10468 for { 10469 x := v.AuxInt 10470 sym := v.Aux 10471 _ = v.Args[2] 10472 ptr := v.Args[0] 10473 v_1 := v.Args[1] 10474 if v_1.Op != OpAMD64ADDQconst { 10475 break 10476 } 10477 c := v_1.AuxInt 10478 idx := v_1.Args[0] 10479 mem := v.Args[2] 10480 if !(ValAndOff(x).canAdd(c)) { 10481 break 10482 } 10483 v.reset(OpAMD64MOVQstoreconstidx1) 10484 v.AuxInt = ValAndOff(x).add(c) 10485 v.Aux = sym 10486 v.AddArg(ptr) 10487 v.AddArg(idx) 10488 v.AddArg(mem) 10489 return true 10490 } 10491 return false 10492 } 10493 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 10494 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 10495 // cond: ValAndOff(x).canAdd(c) 10496 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10497 for { 10498 x := v.AuxInt 10499 sym := v.Aux 10500 _ = v.Args[2] 10501 v_0 := v.Args[0] 10502 if v_0.Op != OpAMD64ADDQconst { 10503 break 10504 } 10505 c := v_0.AuxInt 10506 ptr := v_0.Args[0] 10507 idx := v.Args[1] 10508 mem := v.Args[2] 10509 if !(ValAndOff(x).canAdd(c)) { 10510 break 10511 } 10512 v.reset(OpAMD64MOVQstoreconstidx8) 10513 v.AuxInt = ValAndOff(x).add(c) 10514 v.Aux = sym 10515 v.AddArg(ptr) 10516 v.AddArg(idx) 10517 v.AddArg(mem) 10518 return true 10519 } 10520 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 10521 // cond: ValAndOff(x).canAdd(8*c) 10522 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 10523 for { 10524 x := v.AuxInt 10525 sym := v.Aux 10526 _ = v.Args[2] 10527 ptr := v.Args[0] 10528 v_1 := v.Args[1] 10529 if v_1.Op != OpAMD64ADDQconst { 10530 break 10531 } 10532 c := v_1.AuxInt 10533 idx := v_1.Args[0] 10534 mem := v.Args[2] 10535 if !(ValAndOff(x).canAdd(8 * c)) { 10536 break 10537 } 10538 v.reset(OpAMD64MOVQstoreconstidx8) 10539 v.AuxInt = ValAndOff(x).add(8 * c) 10540 v.Aux = sym 10541 v.AddArg(ptr) 10542 v.AddArg(idx) 10543 v.AddArg(mem) 10544 return true 10545 } 10546 return false 10547 } 10548 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 10549 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 10550 // cond: 10551 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 10552 for { 10553 c := v.AuxInt 10554 sym := v.Aux 10555 _ = v.Args[3] 10556 ptr := v.Args[0] 10557 v_1 := v.Args[1] 10558 if v_1.Op != OpAMD64SHLQconst { 10559 break 10560 } 10561 if v_1.AuxInt != 3 { 10562 break 10563 } 10564 idx := v_1.Args[0] 10565 val := v.Args[2] 10566 mem := v.Args[3] 10567 v.reset(OpAMD64MOVQstoreidx8) 10568 v.AuxInt = c 10569 v.Aux = sym 10570 v.AddArg(ptr) 10571 v.AddArg(idx) 10572 v.AddArg(val) 10573 v.AddArg(mem) 10574 return true 10575 } 10576 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10577 // cond: is32Bit(c+d) 10578 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 10579 for { 10580 c := v.AuxInt 10581 sym := v.Aux 10582 _ = v.Args[3] 10583 v_0 := v.Args[0] 10584 if v_0.Op != OpAMD64ADDQconst { 10585 break 10586 } 10587 d := v_0.AuxInt 10588 ptr := v_0.Args[0] 10589 idx := v.Args[1] 10590 val := v.Args[2] 10591 mem := v.Args[3] 10592 if !(is32Bit(c + d)) { 10593 break 10594 } 10595 v.reset(OpAMD64MOVQstoreidx1) 10596 v.AuxInt = c + d 10597 v.Aux = sym 10598 v.AddArg(ptr) 10599 v.AddArg(idx) 10600 v.AddArg(val) 10601 v.AddArg(mem) 10602 return true 10603 } 10604 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10605 // cond: is32Bit(c+d) 10606 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 10607 for { 10608 c := v.AuxInt 10609 sym := v.Aux 10610 _ = v.Args[3] 10611 ptr := v.Args[0] 10612 v_1 := v.Args[1] 10613 if v_1.Op != OpAMD64ADDQconst { 10614 break 10615 } 10616 d := v_1.AuxInt 10617 idx := v_1.Args[0] 10618 val := v.Args[2] 10619 mem := v.Args[3] 10620 if !(is32Bit(c + d)) { 10621 break 10622 } 10623 v.reset(OpAMD64MOVQstoreidx1) 10624 v.AuxInt = c + d 10625 v.Aux = sym 10626 v.AddArg(ptr) 10627 v.AddArg(idx) 10628 v.AddArg(val) 10629 v.AddArg(mem) 10630 return true 10631 } 10632 return false 10633 } 10634 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 10635 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10636 // cond: is32Bit(c+d) 10637 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 10638 for { 10639 c := v.AuxInt 10640 sym := v.Aux 10641 _ = v.Args[3] 10642 v_0 := v.Args[0] 10643 if v_0.Op != OpAMD64ADDQconst { 10644 break 10645 } 10646 d := v_0.AuxInt 10647 ptr := v_0.Args[0] 10648 idx := v.Args[1] 10649 val := v.Args[2] 10650 mem := v.Args[3] 10651 if !(is32Bit(c + d)) { 10652 break 10653 } 10654 v.reset(OpAMD64MOVQstoreidx8) 10655 v.AuxInt = c + d 10656 v.Aux = sym 10657 v.AddArg(ptr) 10658 v.AddArg(idx) 10659 v.AddArg(val) 10660 v.AddArg(mem) 10661 return true 10662 } 10663 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10664 // cond: is32Bit(c+8*d) 10665 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 10666 for { 10667 c := v.AuxInt 10668 sym := v.Aux 10669 _ = v.Args[3] 10670 ptr := v.Args[0] 10671 v_1 := v.Args[1] 10672 if v_1.Op != OpAMD64ADDQconst { 10673 break 10674 } 10675 d := v_1.AuxInt 10676 idx := v_1.Args[0] 10677 val := v.Args[2] 10678 mem := v.Args[3] 10679 if !(is32Bit(c + 8*d)) { 10680 break 10681 } 10682 v.reset(OpAMD64MOVQstoreidx8) 10683 v.AuxInt = c + 8*d 10684 v.Aux = sym 10685 v.AddArg(ptr) 10686 v.AddArg(idx) 10687 v.AddArg(val) 10688 v.AddArg(mem) 10689 return true 10690 } 10691 return false 10692 } 10693 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 10694 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 10695 // cond: is32Bit(off1+off2) 10696 // result: (MOVSDload [off1+off2] {sym} ptr mem) 10697 for { 10698 off1 := v.AuxInt 10699 sym := v.Aux 10700 _ = v.Args[1] 10701 v_0 := v.Args[0] 10702 if v_0.Op != OpAMD64ADDQconst { 10703 break 10704 } 10705 off2 := v_0.AuxInt 10706 ptr := v_0.Args[0] 10707 mem := v.Args[1] 10708 if !(is32Bit(off1 + off2)) { 10709 break 10710 } 10711 v.reset(OpAMD64MOVSDload) 10712 v.AuxInt = off1 + off2 10713 v.Aux = sym 10714 v.AddArg(ptr) 10715 v.AddArg(mem) 10716 return true 10717 } 10718 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10719 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10720 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10721 for { 10722 off1 := v.AuxInt 10723 sym1 := v.Aux 10724 _ = v.Args[1] 10725 v_0 := v.Args[0] 10726 if v_0.Op != OpAMD64LEAQ { 10727 break 10728 } 10729 off2 := v_0.AuxInt 10730 sym2 := v_0.Aux 10731 base := v_0.Args[0] 10732 mem := v.Args[1] 10733 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10734 break 10735 } 10736 v.reset(OpAMD64MOVSDload) 10737 v.AuxInt = off1 + off2 10738 v.Aux = mergeSym(sym1, sym2) 10739 v.AddArg(base) 10740 v.AddArg(mem) 10741 return true 10742 } 10743 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 10744 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10745 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10746 for { 10747 off1 := v.AuxInt 10748 sym1 := v.Aux 10749 _ = v.Args[1] 10750 v_0 := v.Args[0] 10751 if v_0.Op != OpAMD64LEAQ1 { 10752 break 10753 } 10754 off2 := v_0.AuxInt 10755 sym2 := v_0.Aux 10756 _ = v_0.Args[1] 10757 ptr := v_0.Args[0] 10758 idx := v_0.Args[1] 10759 mem := v.Args[1] 10760 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10761 break 10762 } 10763 v.reset(OpAMD64MOVSDloadidx1) 10764 v.AuxInt = off1 + off2 10765 v.Aux = mergeSym(sym1, sym2) 10766 v.AddArg(ptr) 10767 v.AddArg(idx) 10768 v.AddArg(mem) 10769 return true 10770 } 10771 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 10772 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10773 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10774 for { 10775 off1 := v.AuxInt 10776 sym1 := v.Aux 10777 _ = v.Args[1] 10778 v_0 := v.Args[0] 10779 if v_0.Op != OpAMD64LEAQ8 { 10780 break 10781 } 10782 off2 := v_0.AuxInt 10783 sym2 := v_0.Aux 10784 _ = v_0.Args[1] 10785 ptr := v_0.Args[0] 10786 idx := v_0.Args[1] 10787 mem := v.Args[1] 10788 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10789 break 10790 } 10791 v.reset(OpAMD64MOVSDloadidx8) 10792 v.AuxInt = off1 + off2 10793 v.Aux = mergeSym(sym1, sym2) 10794 v.AddArg(ptr) 10795 v.AddArg(idx) 10796 v.AddArg(mem) 10797 return true 10798 } 10799 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 10800 // cond: ptr.Op != OpSB 10801 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 10802 for { 10803 off := v.AuxInt 10804 sym := v.Aux 10805 _ = v.Args[1] 10806 v_0 := v.Args[0] 10807 if v_0.Op != OpAMD64ADDQ { 10808 break 10809 } 10810 _ = v_0.Args[1] 10811 ptr := v_0.Args[0] 10812 idx := v_0.Args[1] 10813 mem := v.Args[1] 10814 if !(ptr.Op != OpSB) { 10815 break 10816 } 10817 v.reset(OpAMD64MOVSDloadidx1) 10818 v.AuxInt = off 10819 v.Aux = sym 10820 v.AddArg(ptr) 10821 v.AddArg(idx) 10822 v.AddArg(mem) 10823 return true 10824 } 10825 // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) 10826 // cond: 10827 // result: (MOVQi2f val) 10828 for { 10829 off := v.AuxInt 10830 sym := v.Aux 10831 _ = v.Args[1] 10832 ptr := v.Args[0] 10833 v_1 := v.Args[1] 10834 if v_1.Op != OpAMD64MOVQstore { 10835 break 10836 } 10837 if v_1.AuxInt != off { 10838 break 10839 } 10840 if v_1.Aux != sym { 10841 break 10842 } 10843 _ = v_1.Args[2] 10844 if ptr != v_1.Args[0] { 10845 break 10846 } 10847 val := v_1.Args[1] 10848 v.reset(OpAMD64MOVQi2f) 10849 v.AddArg(val) 10850 return true 10851 } 10852 return false 10853 } 10854 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 10855 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 10856 // cond: 10857 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 10858 for { 10859 c := v.AuxInt 10860 sym := v.Aux 10861 _ = v.Args[2] 10862 ptr := v.Args[0] 10863 v_1 := v.Args[1] 10864 if v_1.Op != OpAMD64SHLQconst { 10865 break 10866 } 10867 if v_1.AuxInt != 3 { 10868 break 10869 } 10870 idx := v_1.Args[0] 10871 mem := v.Args[2] 10872 v.reset(OpAMD64MOVSDloadidx8) 10873 v.AuxInt = c 10874 v.Aux = sym 10875 v.AddArg(ptr) 10876 v.AddArg(idx) 10877 v.AddArg(mem) 10878 return true 10879 } 10880 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 10881 // cond: is32Bit(c+d) 10882 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 10883 for { 10884 c := v.AuxInt 10885 sym := v.Aux 10886 _ = v.Args[2] 10887 v_0 := v.Args[0] 10888 if v_0.Op != OpAMD64ADDQconst { 10889 break 10890 } 10891 d := v_0.AuxInt 10892 ptr := v_0.Args[0] 10893 idx := v.Args[1] 10894 mem := v.Args[2] 10895 if !(is32Bit(c + d)) { 10896 break 10897 } 10898 v.reset(OpAMD64MOVSDloadidx1) 10899 v.AuxInt = c + d 10900 v.Aux = sym 10901 v.AddArg(ptr) 10902 v.AddArg(idx) 10903 v.AddArg(mem) 10904 return true 10905 } 10906 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 10907 // cond: is32Bit(c+d) 10908 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 10909 for { 10910 c := v.AuxInt 10911 sym := v.Aux 10912 _ = v.Args[2] 10913 ptr := v.Args[0] 10914 v_1 := v.Args[1] 10915 if v_1.Op != OpAMD64ADDQconst { 10916 break 10917 } 10918 d := v_1.AuxInt 10919 idx := v_1.Args[0] 10920 mem := v.Args[2] 10921 if !(is32Bit(c + d)) { 10922 break 10923 } 10924 v.reset(OpAMD64MOVSDloadidx1) 10925 v.AuxInt = c + d 10926 v.Aux = sym 10927 v.AddArg(ptr) 10928 v.AddArg(idx) 10929 v.AddArg(mem) 10930 return true 10931 } 10932 return false 10933 } 10934 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 10935 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 10936 // cond: is32Bit(c+d) 10937 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 10938 for { 10939 c := v.AuxInt 10940 sym := v.Aux 10941 _ = v.Args[2] 10942 v_0 := v.Args[0] 10943 if v_0.Op != OpAMD64ADDQconst { 10944 break 10945 } 10946 d := v_0.AuxInt 10947 ptr := v_0.Args[0] 10948 idx := v.Args[1] 10949 mem := v.Args[2] 10950 if !(is32Bit(c + d)) { 10951 break 10952 } 10953 v.reset(OpAMD64MOVSDloadidx8) 10954 v.AuxInt = c + d 10955 v.Aux = sym 10956 v.AddArg(ptr) 10957 v.AddArg(idx) 10958 v.AddArg(mem) 10959 return true 10960 } 10961 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 10962 // cond: is32Bit(c+8*d) 10963 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 10964 for { 10965 c := v.AuxInt 10966 sym := v.Aux 10967 _ = v.Args[2] 10968 ptr := v.Args[0] 10969 v_1 := v.Args[1] 10970 if v_1.Op != OpAMD64ADDQconst { 10971 break 10972 } 10973 d := v_1.AuxInt 10974 idx := v_1.Args[0] 10975 mem := v.Args[2] 10976 if !(is32Bit(c + 8*d)) { 10977 break 10978 } 10979 v.reset(OpAMD64MOVSDloadidx8) 10980 v.AuxInt = c + 8*d 10981 v.Aux = sym 10982 v.AddArg(ptr) 10983 v.AddArg(idx) 10984 v.AddArg(mem) 10985 return true 10986 } 10987 return false 10988 } 10989 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 10990 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10991 // cond: is32Bit(off1+off2) 10992 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 10993 for { 10994 off1 := v.AuxInt 10995 sym := v.Aux 10996 _ = v.Args[2] 10997 v_0 := v.Args[0] 10998 if v_0.Op != OpAMD64ADDQconst { 10999 break 11000 } 11001 off2 := v_0.AuxInt 11002 ptr := v_0.Args[0] 11003 val := v.Args[1] 11004 mem := v.Args[2] 11005 if !(is32Bit(off1 + off2)) { 11006 break 11007 } 11008 v.reset(OpAMD64MOVSDstore) 11009 v.AuxInt = off1 + off2 11010 v.Aux = sym 11011 v.AddArg(ptr) 11012 v.AddArg(val) 11013 v.AddArg(mem) 11014 return true 11015 } 11016 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11017 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11018 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11019 for { 11020 off1 := v.AuxInt 11021 sym1 := v.Aux 11022 _ = v.Args[2] 11023 v_0 := v.Args[0] 11024 if v_0.Op != OpAMD64LEAQ { 11025 break 11026 } 11027 off2 := v_0.AuxInt 11028 sym2 := v_0.Aux 11029 base := v_0.Args[0] 11030 val := v.Args[1] 11031 mem := v.Args[2] 11032 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11033 break 11034 } 11035 v.reset(OpAMD64MOVSDstore) 11036 v.AuxInt = off1 + off2 11037 v.Aux = mergeSym(sym1, sym2) 11038 v.AddArg(base) 11039 v.AddArg(val) 11040 v.AddArg(mem) 11041 return true 11042 } 11043 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11044 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11045 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11046 for { 11047 off1 := v.AuxInt 11048 sym1 := v.Aux 11049 _ = v.Args[2] 11050 v_0 := v.Args[0] 11051 if v_0.Op != OpAMD64LEAQ1 { 11052 break 11053 } 11054 off2 := v_0.AuxInt 11055 sym2 := v_0.Aux 11056 _ = v_0.Args[1] 11057 ptr := v_0.Args[0] 11058 idx := v_0.Args[1] 11059 val := v.Args[1] 11060 mem := v.Args[2] 11061 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11062 break 11063 } 11064 v.reset(OpAMD64MOVSDstoreidx1) 11065 v.AuxInt = off1 + off2 11066 v.Aux = mergeSym(sym1, sym2) 11067 v.AddArg(ptr) 11068 v.AddArg(idx) 11069 v.AddArg(val) 11070 v.AddArg(mem) 11071 return true 11072 } 11073 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 11074 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11075 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11076 for { 11077 off1 := v.AuxInt 11078 sym1 := v.Aux 11079 _ = v.Args[2] 11080 v_0 := v.Args[0] 11081 if v_0.Op != OpAMD64LEAQ8 { 11082 break 11083 } 11084 off2 := v_0.AuxInt 11085 sym2 := v_0.Aux 11086 _ = v_0.Args[1] 11087 ptr := v_0.Args[0] 11088 idx := v_0.Args[1] 11089 val := v.Args[1] 11090 mem := v.Args[2] 11091 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11092 break 11093 } 11094 v.reset(OpAMD64MOVSDstoreidx8) 11095 v.AuxInt = off1 + off2 11096 v.Aux = mergeSym(sym1, sym2) 11097 v.AddArg(ptr) 11098 v.AddArg(idx) 11099 v.AddArg(val) 11100 v.AddArg(mem) 11101 return true 11102 } 11103 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 11104 // cond: ptr.Op != OpSB 11105 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 11106 for { 11107 off := v.AuxInt 11108 sym := v.Aux 11109 _ = v.Args[2] 11110 v_0 := v.Args[0] 11111 if v_0.Op != OpAMD64ADDQ { 11112 break 11113 } 11114 _ = v_0.Args[1] 11115 ptr := v_0.Args[0] 11116 idx := v_0.Args[1] 11117 val := v.Args[1] 11118 mem := v.Args[2] 11119 if !(ptr.Op != OpSB) { 11120 break 11121 } 11122 v.reset(OpAMD64MOVSDstoreidx1) 11123 v.AuxInt = off 11124 v.Aux = sym 11125 v.AddArg(ptr) 11126 v.AddArg(idx) 11127 v.AddArg(val) 11128 v.AddArg(mem) 11129 return true 11130 } 11131 // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) 11132 // cond: 11133 // result: (MOVQstore [off] {sym} ptr val mem) 11134 for { 11135 off := v.AuxInt 11136 sym := v.Aux 11137 _ = v.Args[2] 11138 ptr := v.Args[0] 11139 v_1 := v.Args[1] 11140 if v_1.Op != OpAMD64MOVQi2f { 11141 break 11142 } 11143 val := v_1.Args[0] 11144 mem := v.Args[2] 11145 v.reset(OpAMD64MOVQstore) 11146 v.AuxInt = off 11147 v.Aux = sym 11148 v.AddArg(ptr) 11149 v.AddArg(val) 11150 v.AddArg(mem) 11151 return true 11152 } 11153 return false 11154 } 11155 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 11156 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 11157 // cond: 11158 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 11159 for { 11160 c := v.AuxInt 11161 sym := v.Aux 11162 _ = v.Args[3] 11163 ptr := v.Args[0] 11164 v_1 := v.Args[1] 11165 if v_1.Op != OpAMD64SHLQconst { 11166 break 11167 } 11168 if v_1.AuxInt != 3 { 11169 break 11170 } 11171 idx := v_1.Args[0] 11172 val := v.Args[2] 11173 mem := v.Args[3] 11174 v.reset(OpAMD64MOVSDstoreidx8) 11175 v.AuxInt = c 11176 v.Aux = sym 11177 v.AddArg(ptr) 11178 v.AddArg(idx) 11179 v.AddArg(val) 11180 v.AddArg(mem) 11181 return true 11182 } 11183 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11184 // cond: is32Bit(c+d) 11185 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 11186 for { 11187 c := v.AuxInt 11188 sym := v.Aux 11189 _ = v.Args[3] 11190 v_0 := v.Args[0] 11191 if v_0.Op != OpAMD64ADDQconst { 11192 break 11193 } 11194 d := v_0.AuxInt 11195 ptr := v_0.Args[0] 11196 idx := v.Args[1] 11197 val := v.Args[2] 11198 mem := v.Args[3] 11199 if !(is32Bit(c + d)) { 11200 break 11201 } 11202 v.reset(OpAMD64MOVSDstoreidx1) 11203 v.AuxInt = c + d 11204 v.Aux = sym 11205 v.AddArg(ptr) 11206 v.AddArg(idx) 11207 v.AddArg(val) 11208 v.AddArg(mem) 11209 return true 11210 } 11211 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11212 // cond: is32Bit(c+d) 11213 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 11214 for { 11215 c := v.AuxInt 11216 sym := v.Aux 11217 _ = v.Args[3] 11218 ptr := v.Args[0] 11219 v_1 := v.Args[1] 11220 if v_1.Op != OpAMD64ADDQconst { 11221 break 11222 } 11223 d := v_1.AuxInt 11224 idx := v_1.Args[0] 11225 val := v.Args[2] 11226 mem := v.Args[3] 11227 if !(is32Bit(c + d)) { 11228 break 11229 } 11230 v.reset(OpAMD64MOVSDstoreidx1) 11231 v.AuxInt = c + d 11232 v.Aux = sym 11233 v.AddArg(ptr) 11234 v.AddArg(idx) 11235 v.AddArg(val) 11236 v.AddArg(mem) 11237 return true 11238 } 11239 return false 11240 } 11241 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 11242 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11243 // cond: is32Bit(c+d) 11244 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 11245 for { 11246 c := v.AuxInt 11247 sym := v.Aux 11248 _ = v.Args[3] 11249 v_0 := v.Args[0] 11250 if v_0.Op != OpAMD64ADDQconst { 11251 break 11252 } 11253 d := v_0.AuxInt 11254 ptr := v_0.Args[0] 11255 idx := v.Args[1] 11256 val := v.Args[2] 11257 mem := v.Args[3] 11258 if !(is32Bit(c + d)) { 11259 break 11260 } 11261 v.reset(OpAMD64MOVSDstoreidx8) 11262 v.AuxInt = c + d 11263 v.Aux = sym 11264 v.AddArg(ptr) 11265 v.AddArg(idx) 11266 v.AddArg(val) 11267 v.AddArg(mem) 11268 return true 11269 } 11270 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11271 // cond: is32Bit(c+8*d) 11272 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 11273 for { 11274 c := v.AuxInt 11275 sym := v.Aux 11276 _ = v.Args[3] 11277 ptr := v.Args[0] 11278 v_1 := v.Args[1] 11279 if v_1.Op != OpAMD64ADDQconst { 11280 break 11281 } 11282 d := v_1.AuxInt 11283 idx := v_1.Args[0] 11284 val := v.Args[2] 11285 mem := v.Args[3] 11286 if !(is32Bit(c + 8*d)) { 11287 break 11288 } 11289 v.reset(OpAMD64MOVSDstoreidx8) 11290 v.AuxInt = c + 8*d 11291 v.Aux = sym 11292 v.AddArg(ptr) 11293 v.AddArg(idx) 11294 v.AddArg(val) 11295 v.AddArg(mem) 11296 return true 11297 } 11298 return false 11299 } 11300 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 11301 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 11302 // cond: is32Bit(off1+off2) 11303 // result: (MOVSSload [off1+off2] {sym} ptr mem) 11304 for { 11305 off1 := v.AuxInt 11306 sym := v.Aux 11307 _ = v.Args[1] 11308 v_0 := v.Args[0] 11309 if v_0.Op != OpAMD64ADDQconst { 11310 break 11311 } 11312 off2 := v_0.AuxInt 11313 ptr := v_0.Args[0] 11314 mem := v.Args[1] 11315 if !(is32Bit(off1 + off2)) { 11316 break 11317 } 11318 v.reset(OpAMD64MOVSSload) 11319 v.AuxInt = off1 + off2 11320 v.Aux = sym 11321 v.AddArg(ptr) 11322 v.AddArg(mem) 11323 return true 11324 } 11325 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11326 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11327 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11328 for { 11329 off1 := v.AuxInt 11330 sym1 := v.Aux 11331 _ = v.Args[1] 11332 v_0 := v.Args[0] 11333 if v_0.Op != OpAMD64LEAQ { 11334 break 11335 } 11336 off2 := v_0.AuxInt 11337 sym2 := v_0.Aux 11338 base := v_0.Args[0] 11339 mem := v.Args[1] 11340 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11341 break 11342 } 11343 v.reset(OpAMD64MOVSSload) 11344 v.AuxInt = off1 + off2 11345 v.Aux = mergeSym(sym1, sym2) 11346 v.AddArg(base) 11347 v.AddArg(mem) 11348 return true 11349 } 11350 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 11351 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11352 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11353 for { 11354 off1 := v.AuxInt 11355 sym1 := v.Aux 11356 _ = v.Args[1] 11357 v_0 := v.Args[0] 11358 if v_0.Op != OpAMD64LEAQ1 { 11359 break 11360 } 11361 off2 := v_0.AuxInt 11362 sym2 := v_0.Aux 11363 _ = v_0.Args[1] 11364 ptr := v_0.Args[0] 11365 idx := v_0.Args[1] 11366 mem := v.Args[1] 11367 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11368 break 11369 } 11370 v.reset(OpAMD64MOVSSloadidx1) 11371 v.AuxInt = off1 + off2 11372 v.Aux = mergeSym(sym1, sym2) 11373 v.AddArg(ptr) 11374 v.AddArg(idx) 11375 v.AddArg(mem) 11376 return true 11377 } 11378 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 11379 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11380 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11381 for { 11382 off1 := v.AuxInt 11383 sym1 := v.Aux 11384 _ = v.Args[1] 11385 v_0 := v.Args[0] 11386 if v_0.Op != OpAMD64LEAQ4 { 11387 break 11388 } 11389 off2 := v_0.AuxInt 11390 sym2 := v_0.Aux 11391 _ = v_0.Args[1] 11392 ptr := v_0.Args[0] 11393 idx := v_0.Args[1] 11394 mem := v.Args[1] 11395 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11396 break 11397 } 11398 v.reset(OpAMD64MOVSSloadidx4) 11399 v.AuxInt = off1 + off2 11400 v.Aux = mergeSym(sym1, sym2) 11401 v.AddArg(ptr) 11402 v.AddArg(idx) 11403 v.AddArg(mem) 11404 return true 11405 } 11406 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 11407 // cond: ptr.Op != OpSB 11408 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 11409 for { 11410 off := v.AuxInt 11411 sym := v.Aux 11412 _ = v.Args[1] 11413 v_0 := v.Args[0] 11414 if v_0.Op != OpAMD64ADDQ { 11415 break 11416 } 11417 _ = v_0.Args[1] 11418 ptr := v_0.Args[0] 11419 idx := v_0.Args[1] 11420 mem := v.Args[1] 11421 if !(ptr.Op != OpSB) { 11422 break 11423 } 11424 v.reset(OpAMD64MOVSSloadidx1) 11425 v.AuxInt = off 11426 v.Aux = sym 11427 v.AddArg(ptr) 11428 v.AddArg(idx) 11429 v.AddArg(mem) 11430 return true 11431 } 11432 // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) 11433 // cond: 11434 // result: (MOVLi2f val) 11435 for { 11436 off := v.AuxInt 11437 sym := v.Aux 11438 _ = v.Args[1] 11439 ptr := v.Args[0] 11440 v_1 := v.Args[1] 11441 if v_1.Op != OpAMD64MOVLstore { 11442 break 11443 } 11444 if v_1.AuxInt != off { 11445 break 11446 } 11447 if v_1.Aux != sym { 11448 break 11449 } 11450 _ = v_1.Args[2] 11451 if ptr != v_1.Args[0] { 11452 break 11453 } 11454 val := v_1.Args[1] 11455 v.reset(OpAMD64MOVLi2f) 11456 v.AddArg(val) 11457 return true 11458 } 11459 return false 11460 } 11461 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 11462 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 11463 // cond: 11464 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 11465 for { 11466 c := v.AuxInt 11467 sym := v.Aux 11468 _ = v.Args[2] 11469 ptr := v.Args[0] 11470 v_1 := v.Args[1] 11471 if v_1.Op != OpAMD64SHLQconst { 11472 break 11473 } 11474 if v_1.AuxInt != 2 { 11475 break 11476 } 11477 idx := v_1.Args[0] 11478 mem := v.Args[2] 11479 v.reset(OpAMD64MOVSSloadidx4) 11480 v.AuxInt = c 11481 v.Aux = sym 11482 v.AddArg(ptr) 11483 v.AddArg(idx) 11484 v.AddArg(mem) 11485 return true 11486 } 11487 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11488 // cond: is32Bit(c+d) 11489 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 11490 for { 11491 c := v.AuxInt 11492 sym := v.Aux 11493 _ = v.Args[2] 11494 v_0 := v.Args[0] 11495 if v_0.Op != OpAMD64ADDQconst { 11496 break 11497 } 11498 d := v_0.AuxInt 11499 ptr := v_0.Args[0] 11500 idx := v.Args[1] 11501 mem := v.Args[2] 11502 if !(is32Bit(c + d)) { 11503 break 11504 } 11505 v.reset(OpAMD64MOVSSloadidx1) 11506 v.AuxInt = c + d 11507 v.Aux = sym 11508 v.AddArg(ptr) 11509 v.AddArg(idx) 11510 v.AddArg(mem) 11511 return true 11512 } 11513 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11514 // cond: is32Bit(c+d) 11515 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 11516 for { 11517 c := v.AuxInt 11518 sym := v.Aux 11519 _ = v.Args[2] 11520 ptr := v.Args[0] 11521 v_1 := v.Args[1] 11522 if v_1.Op != OpAMD64ADDQconst { 11523 break 11524 } 11525 d := v_1.AuxInt 11526 idx := v_1.Args[0] 11527 mem := v.Args[2] 11528 if !(is32Bit(c + d)) { 11529 break 11530 } 11531 v.reset(OpAMD64MOVSSloadidx1) 11532 v.AuxInt = c + d 11533 v.Aux = sym 11534 v.AddArg(ptr) 11535 v.AddArg(idx) 11536 v.AddArg(mem) 11537 return true 11538 } 11539 return false 11540 } 11541 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 11542 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 11543 // cond: is32Bit(c+d) 11544 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 11545 for { 11546 c := v.AuxInt 11547 sym := v.Aux 11548 _ = v.Args[2] 11549 v_0 := v.Args[0] 11550 if v_0.Op != OpAMD64ADDQconst { 11551 break 11552 } 11553 d := v_0.AuxInt 11554 ptr := v_0.Args[0] 11555 idx := v.Args[1] 11556 mem := v.Args[2] 11557 if !(is32Bit(c + d)) { 11558 break 11559 } 11560 v.reset(OpAMD64MOVSSloadidx4) 11561 v.AuxInt = c + d 11562 v.Aux = sym 11563 v.AddArg(ptr) 11564 v.AddArg(idx) 11565 v.AddArg(mem) 11566 return true 11567 } 11568 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 11569 // cond: is32Bit(c+4*d) 11570 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 11571 for { 11572 c := v.AuxInt 11573 sym := v.Aux 11574 _ = v.Args[2] 11575 ptr := v.Args[0] 11576 v_1 := v.Args[1] 11577 if v_1.Op != OpAMD64ADDQconst { 11578 break 11579 } 11580 d := v_1.AuxInt 11581 idx := v_1.Args[0] 11582 mem := v.Args[2] 11583 if !(is32Bit(c + 4*d)) { 11584 break 11585 } 11586 v.reset(OpAMD64MOVSSloadidx4) 11587 v.AuxInt = c + 4*d 11588 v.Aux = sym 11589 v.AddArg(ptr) 11590 v.AddArg(idx) 11591 v.AddArg(mem) 11592 return true 11593 } 11594 return false 11595 } 11596 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 11597 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11598 // cond: is32Bit(off1+off2) 11599 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 11600 for { 11601 off1 := v.AuxInt 11602 sym := v.Aux 11603 _ = v.Args[2] 11604 v_0 := v.Args[0] 11605 if v_0.Op != OpAMD64ADDQconst { 11606 break 11607 } 11608 off2 := v_0.AuxInt 11609 ptr := v_0.Args[0] 11610 val := v.Args[1] 11611 mem := v.Args[2] 11612 if !(is32Bit(off1 + off2)) { 11613 break 11614 } 11615 v.reset(OpAMD64MOVSSstore) 11616 v.AuxInt = off1 + off2 11617 v.Aux = sym 11618 v.AddArg(ptr) 11619 v.AddArg(val) 11620 v.AddArg(mem) 11621 return true 11622 } 11623 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11624 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11625 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11626 for { 11627 off1 := v.AuxInt 11628 sym1 := v.Aux 11629 _ = v.Args[2] 11630 v_0 := v.Args[0] 11631 if v_0.Op != OpAMD64LEAQ { 11632 break 11633 } 11634 off2 := v_0.AuxInt 11635 sym2 := v_0.Aux 11636 base := v_0.Args[0] 11637 val := v.Args[1] 11638 mem := v.Args[2] 11639 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11640 break 11641 } 11642 v.reset(OpAMD64MOVSSstore) 11643 v.AuxInt = off1 + off2 11644 v.Aux = mergeSym(sym1, sym2) 11645 v.AddArg(base) 11646 v.AddArg(val) 11647 v.AddArg(mem) 11648 return true 11649 } 11650 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11651 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11652 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11653 for { 11654 off1 := v.AuxInt 11655 sym1 := v.Aux 11656 _ = v.Args[2] 11657 v_0 := v.Args[0] 11658 if v_0.Op != OpAMD64LEAQ1 { 11659 break 11660 } 11661 off2 := v_0.AuxInt 11662 sym2 := v_0.Aux 11663 _ = v_0.Args[1] 11664 ptr := v_0.Args[0] 11665 idx := v_0.Args[1] 11666 val := v.Args[1] 11667 mem := v.Args[2] 11668 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11669 break 11670 } 11671 v.reset(OpAMD64MOVSSstoreidx1) 11672 v.AuxInt = off1 + off2 11673 v.Aux = mergeSym(sym1, sym2) 11674 v.AddArg(ptr) 11675 v.AddArg(idx) 11676 v.AddArg(val) 11677 v.AddArg(mem) 11678 return true 11679 } 11680 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 11681 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11682 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11683 for { 11684 off1 := v.AuxInt 11685 sym1 := v.Aux 11686 _ = v.Args[2] 11687 v_0 := v.Args[0] 11688 if v_0.Op != OpAMD64LEAQ4 { 11689 break 11690 } 11691 off2 := v_0.AuxInt 11692 sym2 := v_0.Aux 11693 _ = v_0.Args[1] 11694 ptr := v_0.Args[0] 11695 idx := v_0.Args[1] 11696 val := v.Args[1] 11697 mem := v.Args[2] 11698 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11699 break 11700 } 11701 v.reset(OpAMD64MOVSSstoreidx4) 11702 v.AuxInt = off1 + off2 11703 v.Aux = mergeSym(sym1, sym2) 11704 v.AddArg(ptr) 11705 v.AddArg(idx) 11706 v.AddArg(val) 11707 v.AddArg(mem) 11708 return true 11709 } 11710 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 11711 // cond: ptr.Op != OpSB 11712 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 11713 for { 11714 off := v.AuxInt 11715 sym := v.Aux 11716 _ = v.Args[2] 11717 v_0 := v.Args[0] 11718 if v_0.Op != OpAMD64ADDQ { 11719 break 11720 } 11721 _ = v_0.Args[1] 11722 ptr := v_0.Args[0] 11723 idx := v_0.Args[1] 11724 val := v.Args[1] 11725 mem := v.Args[2] 11726 if !(ptr.Op != OpSB) { 11727 break 11728 } 11729 v.reset(OpAMD64MOVSSstoreidx1) 11730 v.AuxInt = off 11731 v.Aux = sym 11732 v.AddArg(ptr) 11733 v.AddArg(idx) 11734 v.AddArg(val) 11735 v.AddArg(mem) 11736 return true 11737 } 11738 // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) 11739 // cond: 11740 // result: (MOVLstore [off] {sym} ptr val mem) 11741 for { 11742 off := v.AuxInt 11743 sym := v.Aux 11744 _ = v.Args[2] 11745 ptr := v.Args[0] 11746 v_1 := v.Args[1] 11747 if v_1.Op != OpAMD64MOVLi2f { 11748 break 11749 } 11750 val := v_1.Args[0] 11751 mem := v.Args[2] 11752 v.reset(OpAMD64MOVLstore) 11753 v.AuxInt = off 11754 v.Aux = sym 11755 v.AddArg(ptr) 11756 v.AddArg(val) 11757 v.AddArg(mem) 11758 return true 11759 } 11760 return false 11761 } 11762 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 11763 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 11764 // cond: 11765 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 11766 for { 11767 c := v.AuxInt 11768 sym := v.Aux 11769 _ = v.Args[3] 11770 ptr := v.Args[0] 11771 v_1 := v.Args[1] 11772 if v_1.Op != OpAMD64SHLQconst { 11773 break 11774 } 11775 if v_1.AuxInt != 2 { 11776 break 11777 } 11778 idx := v_1.Args[0] 11779 val := v.Args[2] 11780 mem := v.Args[3] 11781 v.reset(OpAMD64MOVSSstoreidx4) 11782 v.AuxInt = c 11783 v.Aux = sym 11784 v.AddArg(ptr) 11785 v.AddArg(idx) 11786 v.AddArg(val) 11787 v.AddArg(mem) 11788 return true 11789 } 11790 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11791 // cond: is32Bit(c+d) 11792 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 11793 for { 11794 c := v.AuxInt 11795 sym := v.Aux 11796 _ = v.Args[3] 11797 v_0 := v.Args[0] 11798 if v_0.Op != OpAMD64ADDQconst { 11799 break 11800 } 11801 d := v_0.AuxInt 11802 ptr := v_0.Args[0] 11803 idx := v.Args[1] 11804 val := v.Args[2] 11805 mem := v.Args[3] 11806 if !(is32Bit(c + d)) { 11807 break 11808 } 11809 v.reset(OpAMD64MOVSSstoreidx1) 11810 v.AuxInt = c + d 11811 v.Aux = sym 11812 v.AddArg(ptr) 11813 v.AddArg(idx) 11814 v.AddArg(val) 11815 v.AddArg(mem) 11816 return true 11817 } 11818 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11819 // cond: is32Bit(c+d) 11820 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 11821 for { 11822 c := v.AuxInt 11823 sym := v.Aux 11824 _ = v.Args[3] 11825 ptr := v.Args[0] 11826 v_1 := v.Args[1] 11827 if v_1.Op != OpAMD64ADDQconst { 11828 break 11829 } 11830 d := v_1.AuxInt 11831 idx := v_1.Args[0] 11832 val := v.Args[2] 11833 mem := v.Args[3] 11834 if !(is32Bit(c + d)) { 11835 break 11836 } 11837 v.reset(OpAMD64MOVSSstoreidx1) 11838 v.AuxInt = c + d 11839 v.Aux = sym 11840 v.AddArg(ptr) 11841 v.AddArg(idx) 11842 v.AddArg(val) 11843 v.AddArg(mem) 11844 return true 11845 } 11846 return false 11847 } 11848 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 11849 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11850 // cond: is32Bit(c+d) 11851 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 11852 for { 11853 c := v.AuxInt 11854 sym := v.Aux 11855 _ = v.Args[3] 11856 v_0 := v.Args[0] 11857 if v_0.Op != OpAMD64ADDQconst { 11858 break 11859 } 11860 d := v_0.AuxInt 11861 ptr := v_0.Args[0] 11862 idx := v.Args[1] 11863 val := v.Args[2] 11864 mem := v.Args[3] 11865 if !(is32Bit(c + d)) { 11866 break 11867 } 11868 v.reset(OpAMD64MOVSSstoreidx4) 11869 v.AuxInt = c + d 11870 v.Aux = sym 11871 v.AddArg(ptr) 11872 v.AddArg(idx) 11873 v.AddArg(val) 11874 v.AddArg(mem) 11875 return true 11876 } 11877 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11878 // cond: is32Bit(c+4*d) 11879 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 11880 for { 11881 c := v.AuxInt 11882 sym := v.Aux 11883 _ = v.Args[3] 11884 ptr := v.Args[0] 11885 v_1 := v.Args[1] 11886 if v_1.Op != OpAMD64ADDQconst { 11887 break 11888 } 11889 d := v_1.AuxInt 11890 idx := v_1.Args[0] 11891 val := v.Args[2] 11892 mem := v.Args[3] 11893 if !(is32Bit(c + 4*d)) { 11894 break 11895 } 11896 v.reset(OpAMD64MOVSSstoreidx4) 11897 v.AuxInt = c + 4*d 11898 v.Aux = sym 11899 v.AddArg(ptr) 11900 v.AddArg(idx) 11901 v.AddArg(val) 11902 v.AddArg(mem) 11903 return true 11904 } 11905 return false 11906 } 11907 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 11908 b := v.Block 11909 _ = b 11910 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 11911 // cond: x.Uses == 1 && clobber(x) 11912 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 11913 for { 11914 x := v.Args[0] 11915 if x.Op != OpAMD64MOVWload { 11916 break 11917 } 11918 off := x.AuxInt 11919 sym := x.Aux 11920 _ = x.Args[1] 11921 ptr := x.Args[0] 11922 mem := x.Args[1] 11923 if !(x.Uses == 1 && clobber(x)) { 11924 break 11925 } 11926 b = x.Block 11927 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 11928 v.reset(OpCopy) 11929 v.AddArg(v0) 11930 v0.AuxInt = off 11931 v0.Aux = sym 11932 v0.AddArg(ptr) 11933 v0.AddArg(mem) 11934 return true 11935 } 11936 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 11937 // cond: x.Uses == 1 && clobber(x) 11938 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 11939 for { 11940 x := v.Args[0] 11941 if x.Op != OpAMD64MOVLload { 11942 break 11943 } 11944 off := x.AuxInt 11945 sym := x.Aux 11946 _ = x.Args[1] 11947 ptr := x.Args[0] 11948 mem := x.Args[1] 11949 if !(x.Uses == 1 && clobber(x)) { 11950 break 11951 } 11952 b = x.Block 11953 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 11954 v.reset(OpCopy) 11955 v.AddArg(v0) 11956 v0.AuxInt = off 11957 v0.Aux = sym 11958 v0.AddArg(ptr) 11959 v0.AddArg(mem) 11960 return true 11961 } 11962 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 11963 // cond: x.Uses == 1 && clobber(x) 11964 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 11965 for { 11966 x := v.Args[0] 11967 if x.Op != OpAMD64MOVQload { 11968 break 11969 } 11970 off := x.AuxInt 11971 sym := x.Aux 11972 _ = x.Args[1] 11973 ptr := x.Args[0] 11974 mem := x.Args[1] 11975 if !(x.Uses == 1 && clobber(x)) { 11976 break 11977 } 11978 b = x.Block 11979 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 11980 v.reset(OpCopy) 11981 v.AddArg(v0) 11982 v0.AuxInt = off 11983 v0.Aux = sym 11984 v0.AddArg(ptr) 11985 v0.AddArg(mem) 11986 return true 11987 } 11988 // match: (MOVWQSX (ANDLconst [c] x)) 11989 // cond: c & 0x8000 == 0 11990 // result: (ANDLconst [c & 0x7fff] x) 11991 for { 11992 v_0 := v.Args[0] 11993 if v_0.Op != OpAMD64ANDLconst { 11994 break 11995 } 11996 c := v_0.AuxInt 11997 x := v_0.Args[0] 11998 if !(c&0x8000 == 0) { 11999 break 12000 } 12001 v.reset(OpAMD64ANDLconst) 12002 v.AuxInt = c & 0x7fff 12003 v.AddArg(x) 12004 return true 12005 } 12006 // match: (MOVWQSX x:(MOVWQSX _)) 12007 // cond: 12008 // result: x 12009 for { 12010 x := v.Args[0] 12011 if x.Op != OpAMD64MOVWQSX { 12012 break 12013 } 12014 v.reset(OpCopy) 12015 v.Type = x.Type 12016 v.AddArg(x) 12017 return true 12018 } 12019 // match: (MOVWQSX x:(MOVBQSX _)) 12020 // cond: 12021 // result: x 12022 for { 12023 x := v.Args[0] 12024 if x.Op != OpAMD64MOVBQSX { 12025 break 12026 } 12027 v.reset(OpCopy) 12028 v.Type = x.Type 12029 v.AddArg(x) 12030 return true 12031 } 12032 return false 12033 } 12034 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 12035 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12036 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12037 // result: (MOVWQSX x) 12038 for { 12039 off := v.AuxInt 12040 sym := v.Aux 12041 _ = v.Args[1] 12042 ptr := v.Args[0] 12043 v_1 := v.Args[1] 12044 if v_1.Op != OpAMD64MOVWstore { 12045 break 12046 } 12047 off2 := v_1.AuxInt 12048 sym2 := v_1.Aux 12049 _ = v_1.Args[2] 12050 ptr2 := v_1.Args[0] 12051 x := v_1.Args[1] 12052 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12053 break 12054 } 12055 v.reset(OpAMD64MOVWQSX) 12056 v.AddArg(x) 12057 return true 12058 } 12059 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12060 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12061 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12062 for { 12063 off1 := v.AuxInt 12064 sym1 := v.Aux 12065 _ = v.Args[1] 12066 v_0 := v.Args[0] 12067 if v_0.Op != OpAMD64LEAQ { 12068 break 12069 } 12070 off2 := v_0.AuxInt 12071 sym2 := v_0.Aux 12072 base := v_0.Args[0] 12073 mem := v.Args[1] 12074 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12075 break 12076 } 12077 v.reset(OpAMD64MOVWQSXload) 12078 v.AuxInt = off1 + off2 12079 v.Aux = mergeSym(sym1, sym2) 12080 v.AddArg(base) 12081 v.AddArg(mem) 12082 return true 12083 } 12084 return false 12085 } 12086 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 12087 b := v.Block 12088 _ = b 12089 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 12090 // cond: x.Uses == 1 && clobber(x) 12091 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12092 for { 12093 x := v.Args[0] 12094 if x.Op != OpAMD64MOVWload { 12095 break 12096 } 12097 off := x.AuxInt 12098 sym := x.Aux 12099 _ = x.Args[1] 12100 ptr := x.Args[0] 12101 mem := x.Args[1] 12102 if !(x.Uses == 1 && clobber(x)) { 12103 break 12104 } 12105 b = x.Block 12106 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12107 v.reset(OpCopy) 12108 v.AddArg(v0) 12109 v0.AuxInt = off 12110 v0.Aux = sym 12111 v0.AddArg(ptr) 12112 v0.AddArg(mem) 12113 return true 12114 } 12115 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 12116 // cond: x.Uses == 1 && clobber(x) 12117 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12118 for { 12119 x := v.Args[0] 12120 if x.Op != OpAMD64MOVLload { 12121 break 12122 } 12123 off := x.AuxInt 12124 sym := x.Aux 12125 _ = x.Args[1] 12126 ptr := x.Args[0] 12127 mem := x.Args[1] 12128 if !(x.Uses == 1 && clobber(x)) { 12129 break 12130 } 12131 b = x.Block 12132 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12133 v.reset(OpCopy) 12134 v.AddArg(v0) 12135 v0.AuxInt = off 12136 v0.Aux = sym 12137 v0.AddArg(ptr) 12138 v0.AddArg(mem) 12139 return true 12140 } 12141 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 12142 // cond: x.Uses == 1 && clobber(x) 12143 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12144 for { 12145 x := v.Args[0] 12146 if x.Op != OpAMD64MOVQload { 12147 break 12148 } 12149 off := x.AuxInt 12150 sym := x.Aux 12151 _ = x.Args[1] 12152 ptr := x.Args[0] 12153 mem := x.Args[1] 12154 if !(x.Uses == 1 && clobber(x)) { 12155 break 12156 } 12157 b = x.Block 12158 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12159 v.reset(OpCopy) 12160 v.AddArg(v0) 12161 v0.AuxInt = off 12162 v0.Aux = sym 12163 v0.AddArg(ptr) 12164 v0.AddArg(mem) 12165 return true 12166 } 12167 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 12168 // cond: x.Uses == 1 && clobber(x) 12169 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 12170 for { 12171 x := v.Args[0] 12172 if x.Op != OpAMD64MOVWloadidx1 { 12173 break 12174 } 12175 off := x.AuxInt 12176 sym := x.Aux 12177 _ = x.Args[2] 12178 ptr := x.Args[0] 12179 idx := x.Args[1] 12180 mem := x.Args[2] 12181 if !(x.Uses == 1 && clobber(x)) { 12182 break 12183 } 12184 b = x.Block 12185 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 12186 v.reset(OpCopy) 12187 v.AddArg(v0) 12188 v0.AuxInt = off 12189 v0.Aux = sym 12190 v0.AddArg(ptr) 12191 v0.AddArg(idx) 12192 v0.AddArg(mem) 12193 return true 12194 } 12195 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 12196 // cond: x.Uses == 1 && clobber(x) 12197 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 12198 for { 12199 x := v.Args[0] 12200 if x.Op != OpAMD64MOVWloadidx2 { 12201 break 12202 } 12203 off := x.AuxInt 12204 sym := x.Aux 12205 _ = x.Args[2] 12206 ptr := x.Args[0] 12207 idx := x.Args[1] 12208 mem := x.Args[2] 12209 if !(x.Uses == 1 && clobber(x)) { 12210 break 12211 } 12212 b = x.Block 12213 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 12214 v.reset(OpCopy) 12215 v.AddArg(v0) 12216 v0.AuxInt = off 12217 v0.Aux = sym 12218 v0.AddArg(ptr) 12219 v0.AddArg(idx) 12220 v0.AddArg(mem) 12221 return true 12222 } 12223 // match: (MOVWQZX (ANDLconst [c] x)) 12224 // cond: 12225 // result: (ANDLconst [c & 0xffff] x) 12226 for { 12227 v_0 := v.Args[0] 12228 if v_0.Op != OpAMD64ANDLconst { 12229 break 12230 } 12231 c := v_0.AuxInt 12232 x := v_0.Args[0] 12233 v.reset(OpAMD64ANDLconst) 12234 v.AuxInt = c & 0xffff 12235 v.AddArg(x) 12236 return true 12237 } 12238 // match: (MOVWQZX x:(MOVWQZX _)) 12239 // cond: 12240 // result: x 12241 for { 12242 x := v.Args[0] 12243 if x.Op != OpAMD64MOVWQZX { 12244 break 12245 } 12246 v.reset(OpCopy) 12247 v.Type = x.Type 12248 v.AddArg(x) 12249 return true 12250 } 12251 // match: (MOVWQZX x:(MOVBQZX _)) 12252 // cond: 12253 // result: x 12254 for { 12255 x := v.Args[0] 12256 if x.Op != OpAMD64MOVBQZX { 12257 break 12258 } 12259 v.reset(OpCopy) 12260 v.Type = x.Type 12261 v.AddArg(x) 12262 return true 12263 } 12264 return false 12265 } 12266 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 12267 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12268 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12269 // result: (MOVWQZX x) 12270 for { 12271 off := v.AuxInt 12272 sym := v.Aux 12273 _ = v.Args[1] 12274 ptr := v.Args[0] 12275 v_1 := v.Args[1] 12276 if v_1.Op != OpAMD64MOVWstore { 12277 break 12278 } 12279 off2 := v_1.AuxInt 12280 sym2 := v_1.Aux 12281 _ = v_1.Args[2] 12282 ptr2 := v_1.Args[0] 12283 x := v_1.Args[1] 12284 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12285 break 12286 } 12287 v.reset(OpAMD64MOVWQZX) 12288 v.AddArg(x) 12289 return true 12290 } 12291 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 12292 // cond: is32Bit(off1+off2) 12293 // result: (MOVWload [off1+off2] {sym} ptr mem) 12294 for { 12295 off1 := v.AuxInt 12296 sym := v.Aux 12297 _ = v.Args[1] 12298 v_0 := v.Args[0] 12299 if v_0.Op != OpAMD64ADDQconst { 12300 break 12301 } 12302 off2 := v_0.AuxInt 12303 ptr := v_0.Args[0] 12304 mem := v.Args[1] 12305 if !(is32Bit(off1 + off2)) { 12306 break 12307 } 12308 v.reset(OpAMD64MOVWload) 12309 v.AuxInt = off1 + off2 12310 v.Aux = sym 12311 v.AddArg(ptr) 12312 v.AddArg(mem) 12313 return true 12314 } 12315 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12316 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12317 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12318 for { 12319 off1 := v.AuxInt 12320 sym1 := v.Aux 12321 _ = v.Args[1] 12322 v_0 := v.Args[0] 12323 if v_0.Op != OpAMD64LEAQ { 12324 break 12325 } 12326 off2 := v_0.AuxInt 12327 sym2 := v_0.Aux 12328 base := v_0.Args[0] 12329 mem := v.Args[1] 12330 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12331 break 12332 } 12333 v.reset(OpAMD64MOVWload) 12334 v.AuxInt = off1 + off2 12335 v.Aux = mergeSym(sym1, sym2) 12336 v.AddArg(base) 12337 v.AddArg(mem) 12338 return true 12339 } 12340 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 12341 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12342 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12343 for { 12344 off1 := v.AuxInt 12345 sym1 := v.Aux 12346 _ = v.Args[1] 12347 v_0 := v.Args[0] 12348 if v_0.Op != OpAMD64LEAQ1 { 12349 break 12350 } 12351 off2 := v_0.AuxInt 12352 sym2 := v_0.Aux 12353 _ = v_0.Args[1] 12354 ptr := v_0.Args[0] 12355 idx := v_0.Args[1] 12356 mem := v.Args[1] 12357 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12358 break 12359 } 12360 v.reset(OpAMD64MOVWloadidx1) 12361 v.AuxInt = off1 + off2 12362 v.Aux = mergeSym(sym1, sym2) 12363 v.AddArg(ptr) 12364 v.AddArg(idx) 12365 v.AddArg(mem) 12366 return true 12367 } 12368 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 12369 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12370 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12371 for { 12372 off1 := v.AuxInt 12373 sym1 := v.Aux 12374 _ = v.Args[1] 12375 v_0 := v.Args[0] 12376 if v_0.Op != OpAMD64LEAQ2 { 12377 break 12378 } 12379 off2 := v_0.AuxInt 12380 sym2 := v_0.Aux 12381 _ = v_0.Args[1] 12382 ptr := v_0.Args[0] 12383 idx := v_0.Args[1] 12384 mem := v.Args[1] 12385 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12386 break 12387 } 12388 v.reset(OpAMD64MOVWloadidx2) 12389 v.AuxInt = off1 + off2 12390 v.Aux = mergeSym(sym1, sym2) 12391 v.AddArg(ptr) 12392 v.AddArg(idx) 12393 v.AddArg(mem) 12394 return true 12395 } 12396 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 12397 // cond: ptr.Op != OpSB 12398 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 12399 for { 12400 off := v.AuxInt 12401 sym := v.Aux 12402 _ = v.Args[1] 12403 v_0 := v.Args[0] 12404 if v_0.Op != OpAMD64ADDQ { 12405 break 12406 } 12407 _ = v_0.Args[1] 12408 ptr := v_0.Args[0] 12409 idx := v_0.Args[1] 12410 mem := v.Args[1] 12411 if !(ptr.Op != OpSB) { 12412 break 12413 } 12414 v.reset(OpAMD64MOVWloadidx1) 12415 v.AuxInt = off 12416 v.Aux = sym 12417 v.AddArg(ptr) 12418 v.AddArg(idx) 12419 v.AddArg(mem) 12420 return true 12421 } 12422 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 12423 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 12424 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12425 for { 12426 off1 := v.AuxInt 12427 sym1 := v.Aux 12428 _ = v.Args[1] 12429 v_0 := v.Args[0] 12430 if v_0.Op != OpAMD64LEAL { 12431 break 12432 } 12433 off2 := v_0.AuxInt 12434 sym2 := v_0.Aux 12435 base := v_0.Args[0] 12436 mem := v.Args[1] 12437 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 12438 break 12439 } 12440 v.reset(OpAMD64MOVWload) 12441 v.AuxInt = off1 + off2 12442 v.Aux = mergeSym(sym1, sym2) 12443 v.AddArg(base) 12444 v.AddArg(mem) 12445 return true 12446 } 12447 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 12448 // cond: is32Bit(off1+off2) 12449 // result: (MOVWload [off1+off2] {sym} ptr mem) 12450 for { 12451 off1 := v.AuxInt 12452 sym := v.Aux 12453 _ = v.Args[1] 12454 v_0 := v.Args[0] 12455 if v_0.Op != OpAMD64ADDLconst { 12456 break 12457 } 12458 off2 := v_0.AuxInt 12459 ptr := v_0.Args[0] 12460 mem := v.Args[1] 12461 if !(is32Bit(off1 + off2)) { 12462 break 12463 } 12464 v.reset(OpAMD64MOVWload) 12465 v.AuxInt = off1 + off2 12466 v.Aux = sym 12467 v.AddArg(ptr) 12468 v.AddArg(mem) 12469 return true 12470 } 12471 return false 12472 } 12473 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 12474 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 12475 // cond: 12476 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 12477 for { 12478 c := v.AuxInt 12479 sym := v.Aux 12480 _ = v.Args[2] 12481 ptr := v.Args[0] 12482 v_1 := v.Args[1] 12483 if v_1.Op != OpAMD64SHLQconst { 12484 break 12485 } 12486 if v_1.AuxInt != 1 { 12487 break 12488 } 12489 idx := v_1.Args[0] 12490 mem := v.Args[2] 12491 v.reset(OpAMD64MOVWloadidx2) 12492 v.AuxInt = c 12493 v.Aux = sym 12494 v.AddArg(ptr) 12495 v.AddArg(idx) 12496 v.AddArg(mem) 12497 return true 12498 } 12499 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 12500 // cond: 12501 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 12502 for { 12503 c := v.AuxInt 12504 sym := v.Aux 12505 _ = v.Args[2] 12506 v_0 := v.Args[0] 12507 if v_0.Op != OpAMD64SHLQconst { 12508 break 12509 } 12510 if v_0.AuxInt != 1 { 12511 break 12512 } 12513 idx := v_0.Args[0] 12514 ptr := v.Args[1] 12515 mem := v.Args[2] 12516 v.reset(OpAMD64MOVWloadidx2) 12517 v.AuxInt = c 12518 v.Aux = sym 12519 v.AddArg(ptr) 12520 v.AddArg(idx) 12521 v.AddArg(mem) 12522 return true 12523 } 12524 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 12525 // cond: is32Bit(c+d) 12526 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12527 for { 12528 c := v.AuxInt 12529 sym := v.Aux 12530 _ = v.Args[2] 12531 v_0 := v.Args[0] 12532 if v_0.Op != OpAMD64ADDQconst { 12533 break 12534 } 12535 d := v_0.AuxInt 12536 ptr := v_0.Args[0] 12537 idx := v.Args[1] 12538 mem := v.Args[2] 12539 if !(is32Bit(c + d)) { 12540 break 12541 } 12542 v.reset(OpAMD64MOVWloadidx1) 12543 v.AuxInt = c + d 12544 v.Aux = sym 12545 v.AddArg(ptr) 12546 v.AddArg(idx) 12547 v.AddArg(mem) 12548 return true 12549 } 12550 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 12551 // cond: is32Bit(c+d) 12552 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12553 for { 12554 c := v.AuxInt 12555 sym := v.Aux 12556 _ = v.Args[2] 12557 idx := v.Args[0] 12558 v_1 := v.Args[1] 12559 if v_1.Op != OpAMD64ADDQconst { 12560 break 12561 } 12562 d := v_1.AuxInt 12563 ptr := v_1.Args[0] 12564 mem := v.Args[2] 12565 if !(is32Bit(c + d)) { 12566 break 12567 } 12568 v.reset(OpAMD64MOVWloadidx1) 12569 v.AuxInt = c + d 12570 v.Aux = sym 12571 v.AddArg(ptr) 12572 v.AddArg(idx) 12573 v.AddArg(mem) 12574 return true 12575 } 12576 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 12577 // cond: is32Bit(c+d) 12578 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12579 for { 12580 c := v.AuxInt 12581 sym := v.Aux 12582 _ = v.Args[2] 12583 ptr := v.Args[0] 12584 v_1 := v.Args[1] 12585 if v_1.Op != OpAMD64ADDQconst { 12586 break 12587 } 12588 d := v_1.AuxInt 12589 idx := v_1.Args[0] 12590 mem := v.Args[2] 12591 if !(is32Bit(c + d)) { 12592 break 12593 } 12594 v.reset(OpAMD64MOVWloadidx1) 12595 v.AuxInt = c + d 12596 v.Aux = sym 12597 v.AddArg(ptr) 12598 v.AddArg(idx) 12599 v.AddArg(mem) 12600 return true 12601 } 12602 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 12603 // cond: is32Bit(c+d) 12604 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12605 for { 12606 c := v.AuxInt 12607 sym := v.Aux 12608 _ = v.Args[2] 12609 v_0 := v.Args[0] 12610 if v_0.Op != OpAMD64ADDQconst { 12611 break 12612 } 12613 d := v_0.AuxInt 12614 idx := v_0.Args[0] 12615 ptr := v.Args[1] 12616 mem := v.Args[2] 12617 if !(is32Bit(c + d)) { 12618 break 12619 } 12620 v.reset(OpAMD64MOVWloadidx1) 12621 v.AuxInt = c + d 12622 v.Aux = sym 12623 v.AddArg(ptr) 12624 v.AddArg(idx) 12625 v.AddArg(mem) 12626 return true 12627 } 12628 return false 12629 } 12630 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 12631 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 12632 // cond: is32Bit(c+d) 12633 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 12634 for { 12635 c := v.AuxInt 12636 sym := v.Aux 12637 _ = v.Args[2] 12638 v_0 := v.Args[0] 12639 if v_0.Op != OpAMD64ADDQconst { 12640 break 12641 } 12642 d := v_0.AuxInt 12643 ptr := v_0.Args[0] 12644 idx := v.Args[1] 12645 mem := v.Args[2] 12646 if !(is32Bit(c + d)) { 12647 break 12648 } 12649 v.reset(OpAMD64MOVWloadidx2) 12650 v.AuxInt = c + d 12651 v.Aux = sym 12652 v.AddArg(ptr) 12653 v.AddArg(idx) 12654 v.AddArg(mem) 12655 return true 12656 } 12657 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 12658 // cond: is32Bit(c+2*d) 12659 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 12660 for { 12661 c := v.AuxInt 12662 sym := v.Aux 12663 _ = v.Args[2] 12664 ptr := v.Args[0] 12665 v_1 := v.Args[1] 12666 if v_1.Op != OpAMD64ADDQconst { 12667 break 12668 } 12669 d := v_1.AuxInt 12670 idx := v_1.Args[0] 12671 mem := v.Args[2] 12672 if !(is32Bit(c + 2*d)) { 12673 break 12674 } 12675 v.reset(OpAMD64MOVWloadidx2) 12676 v.AuxInt = c + 2*d 12677 v.Aux = sym 12678 v.AddArg(ptr) 12679 v.AddArg(idx) 12680 v.AddArg(mem) 12681 return true 12682 } 12683 return false 12684 } 12685 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 12686 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 12687 // cond: 12688 // result: (MOVWstore [off] {sym} ptr x mem) 12689 for { 12690 off := v.AuxInt 12691 sym := v.Aux 12692 _ = v.Args[2] 12693 ptr := v.Args[0] 12694 v_1 := v.Args[1] 12695 if v_1.Op != OpAMD64MOVWQSX { 12696 break 12697 } 12698 x := v_1.Args[0] 12699 mem := v.Args[2] 12700 v.reset(OpAMD64MOVWstore) 12701 v.AuxInt = off 12702 v.Aux = sym 12703 v.AddArg(ptr) 12704 v.AddArg(x) 12705 v.AddArg(mem) 12706 return true 12707 } 12708 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 12709 // cond: 12710 // result: (MOVWstore [off] {sym} ptr x mem) 12711 for { 12712 off := v.AuxInt 12713 sym := v.Aux 12714 _ = v.Args[2] 12715 ptr := v.Args[0] 12716 v_1 := v.Args[1] 12717 if v_1.Op != OpAMD64MOVWQZX { 12718 break 12719 } 12720 x := v_1.Args[0] 12721 mem := v.Args[2] 12722 v.reset(OpAMD64MOVWstore) 12723 v.AuxInt = off 12724 v.Aux = sym 12725 v.AddArg(ptr) 12726 v.AddArg(x) 12727 v.AddArg(mem) 12728 return true 12729 } 12730 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 12731 // cond: is32Bit(off1+off2) 12732 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 12733 for { 12734 off1 := v.AuxInt 12735 sym := v.Aux 12736 _ = v.Args[2] 12737 v_0 := v.Args[0] 12738 if v_0.Op != OpAMD64ADDQconst { 12739 break 12740 } 12741 off2 := v_0.AuxInt 12742 ptr := v_0.Args[0] 12743 val := v.Args[1] 12744 mem := v.Args[2] 12745 if !(is32Bit(off1 + off2)) { 12746 break 12747 } 12748 v.reset(OpAMD64MOVWstore) 12749 v.AuxInt = off1 + off2 12750 v.Aux = sym 12751 v.AddArg(ptr) 12752 v.AddArg(val) 12753 v.AddArg(mem) 12754 return true 12755 } 12756 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 12757 // cond: validOff(off) 12758 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 12759 for { 12760 off := v.AuxInt 12761 sym := v.Aux 12762 _ = v.Args[2] 12763 ptr := v.Args[0] 12764 v_1 := v.Args[1] 12765 if v_1.Op != OpAMD64MOVLconst { 12766 break 12767 } 12768 c := v_1.AuxInt 12769 mem := v.Args[2] 12770 if !(validOff(off)) { 12771 break 12772 } 12773 v.reset(OpAMD64MOVWstoreconst) 12774 v.AuxInt = makeValAndOff(int64(int16(c)), off) 12775 v.Aux = sym 12776 v.AddArg(ptr) 12777 v.AddArg(mem) 12778 return true 12779 } 12780 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 12781 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12782 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 12783 for { 12784 off1 := v.AuxInt 12785 sym1 := v.Aux 12786 _ = v.Args[2] 12787 v_0 := v.Args[0] 12788 if v_0.Op != OpAMD64LEAQ { 12789 break 12790 } 12791 off2 := v_0.AuxInt 12792 sym2 := v_0.Aux 12793 base := v_0.Args[0] 12794 val := v.Args[1] 12795 mem := v.Args[2] 12796 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12797 break 12798 } 12799 v.reset(OpAMD64MOVWstore) 12800 v.AuxInt = off1 + off2 12801 v.Aux = mergeSym(sym1, sym2) 12802 v.AddArg(base) 12803 v.AddArg(val) 12804 v.AddArg(mem) 12805 return true 12806 } 12807 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 12808 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12809 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 12810 for { 12811 off1 := v.AuxInt 12812 sym1 := v.Aux 12813 _ = v.Args[2] 12814 v_0 := v.Args[0] 12815 if v_0.Op != OpAMD64LEAQ1 { 12816 break 12817 } 12818 off2 := v_0.AuxInt 12819 sym2 := v_0.Aux 12820 _ = v_0.Args[1] 12821 ptr := v_0.Args[0] 12822 idx := v_0.Args[1] 12823 val := v.Args[1] 12824 mem := v.Args[2] 12825 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12826 break 12827 } 12828 v.reset(OpAMD64MOVWstoreidx1) 12829 v.AuxInt = off1 + off2 12830 v.Aux = mergeSym(sym1, sym2) 12831 v.AddArg(ptr) 12832 v.AddArg(idx) 12833 v.AddArg(val) 12834 v.AddArg(mem) 12835 return true 12836 } 12837 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 12838 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12839 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 12840 for { 12841 off1 := v.AuxInt 12842 sym1 := v.Aux 12843 _ = v.Args[2] 12844 v_0 := v.Args[0] 12845 if v_0.Op != OpAMD64LEAQ2 { 12846 break 12847 } 12848 off2 := v_0.AuxInt 12849 sym2 := v_0.Aux 12850 _ = v_0.Args[1] 12851 ptr := v_0.Args[0] 12852 idx := v_0.Args[1] 12853 val := v.Args[1] 12854 mem := v.Args[2] 12855 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12856 break 12857 } 12858 v.reset(OpAMD64MOVWstoreidx2) 12859 v.AuxInt = off1 + off2 12860 v.Aux = mergeSym(sym1, sym2) 12861 v.AddArg(ptr) 12862 v.AddArg(idx) 12863 v.AddArg(val) 12864 v.AddArg(mem) 12865 return true 12866 } 12867 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 12868 // cond: ptr.Op != OpSB 12869 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 12870 for { 12871 off := v.AuxInt 12872 sym := v.Aux 12873 _ = v.Args[2] 12874 v_0 := v.Args[0] 12875 if v_0.Op != OpAMD64ADDQ { 12876 break 12877 } 12878 _ = v_0.Args[1] 12879 ptr := v_0.Args[0] 12880 idx := v_0.Args[1] 12881 val := v.Args[1] 12882 mem := v.Args[2] 12883 if !(ptr.Op != OpSB) { 12884 break 12885 } 12886 v.reset(OpAMD64MOVWstoreidx1) 12887 v.AuxInt = off 12888 v.Aux = sym 12889 v.AddArg(ptr) 12890 v.AddArg(idx) 12891 v.AddArg(val) 12892 v.AddArg(mem) 12893 return true 12894 } 12895 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 12896 // cond: x.Uses == 1 && clobber(x) 12897 // result: (MOVLstore [i-2] {s} p w mem) 12898 for { 12899 i := v.AuxInt 12900 s := v.Aux 12901 _ = v.Args[2] 12902 p := v.Args[0] 12903 v_1 := v.Args[1] 12904 if v_1.Op != OpAMD64SHRQconst { 12905 break 12906 } 12907 if v_1.AuxInt != 16 { 12908 break 12909 } 12910 w := v_1.Args[0] 12911 x := v.Args[2] 12912 if x.Op != OpAMD64MOVWstore { 12913 break 12914 } 12915 if x.AuxInt != i-2 { 12916 break 12917 } 12918 if x.Aux != s { 12919 break 12920 } 12921 _ = x.Args[2] 12922 if p != x.Args[0] { 12923 break 12924 } 12925 if w != x.Args[1] { 12926 break 12927 } 12928 mem := x.Args[2] 12929 if !(x.Uses == 1 && clobber(x)) { 12930 break 12931 } 12932 v.reset(OpAMD64MOVLstore) 12933 v.AuxInt = i - 2 12934 v.Aux = s 12935 v.AddArg(p) 12936 v.AddArg(w) 12937 v.AddArg(mem) 12938 return true 12939 } 12940 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 12941 // cond: x.Uses == 1 && clobber(x) 12942 // result: (MOVLstore [i-2] {s} p w0 mem) 12943 for { 12944 i := v.AuxInt 12945 s := v.Aux 12946 _ = v.Args[2] 12947 p := v.Args[0] 12948 v_1 := v.Args[1] 12949 if v_1.Op != OpAMD64SHRQconst { 12950 break 12951 } 12952 j := v_1.AuxInt 12953 w := v_1.Args[0] 12954 x := v.Args[2] 12955 if x.Op != OpAMD64MOVWstore { 12956 break 12957 } 12958 if x.AuxInt != i-2 { 12959 break 12960 } 12961 if x.Aux != s { 12962 break 12963 } 12964 _ = x.Args[2] 12965 if p != x.Args[0] { 12966 break 12967 } 12968 w0 := x.Args[1] 12969 if w0.Op != OpAMD64SHRQconst { 12970 break 12971 } 12972 if w0.AuxInt != j-16 { 12973 break 12974 } 12975 if w != w0.Args[0] { 12976 break 12977 } 12978 mem := x.Args[2] 12979 if !(x.Uses == 1 && clobber(x)) { 12980 break 12981 } 12982 v.reset(OpAMD64MOVLstore) 12983 v.AuxInt = i - 2 12984 v.Aux = s 12985 v.AddArg(p) 12986 v.AddArg(w0) 12987 v.AddArg(mem) 12988 return true 12989 } 12990 return false 12991 } 12992 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 12993 b := v.Block 12994 _ = b 12995 typ := &b.Func.Config.Types 12996 _ = typ 12997 // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) 12998 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 12999 // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) 13000 for { 13001 i := v.AuxInt 13002 s := v.Aux 13003 _ = v.Args[2] 13004 p := v.Args[0] 13005 x1 := v.Args[1] 13006 if x1.Op != OpAMD64MOVWload { 13007 break 13008 } 13009 j := x1.AuxInt 13010 s2 := x1.Aux 13011 _ = x1.Args[1] 13012 p2 := x1.Args[0] 13013 mem := x1.Args[1] 13014 mem2 := v.Args[2] 13015 if mem2.Op != OpAMD64MOVWstore { 13016 break 13017 } 13018 if mem2.AuxInt != i-2 { 13019 break 13020 } 13021 if mem2.Aux != s { 13022 break 13023 } 13024 _ = mem2.Args[2] 13025 if p != mem2.Args[0] { 13026 break 13027 } 13028 x2 := mem2.Args[1] 13029 if x2.Op != OpAMD64MOVWload { 13030 break 13031 } 13032 if x2.AuxInt != j-2 { 13033 break 13034 } 13035 if x2.Aux != s2 { 13036 break 13037 } 13038 _ = x2.Args[1] 13039 if p2 != x2.Args[0] { 13040 break 13041 } 13042 if mem != x2.Args[1] { 13043 break 13044 } 13045 if mem != mem2.Args[2] { 13046 break 13047 } 13048 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 13049 break 13050 } 13051 v.reset(OpAMD64MOVLstore) 13052 v.AuxInt = i - 2 13053 v.Aux = s 13054 v.AddArg(p) 13055 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 13056 v0.AuxInt = j - 2 13057 v0.Aux = s2 13058 v0.AddArg(p2) 13059 v0.AddArg(mem) 13060 v.AddArg(v0) 13061 v.AddArg(mem) 13062 return true 13063 } 13064 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 13065 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 13066 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13067 for { 13068 off1 := v.AuxInt 13069 sym1 := v.Aux 13070 _ = v.Args[2] 13071 v_0 := v.Args[0] 13072 if v_0.Op != OpAMD64LEAL { 13073 break 13074 } 13075 off2 := v_0.AuxInt 13076 sym2 := v_0.Aux 13077 base := v_0.Args[0] 13078 val := v.Args[1] 13079 mem := v.Args[2] 13080 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 13081 break 13082 } 13083 v.reset(OpAMD64MOVWstore) 13084 v.AuxInt = off1 + off2 13085 v.Aux = mergeSym(sym1, sym2) 13086 v.AddArg(base) 13087 v.AddArg(val) 13088 v.AddArg(mem) 13089 return true 13090 } 13091 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 13092 // cond: is32Bit(off1+off2) 13093 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 13094 for { 13095 off1 := v.AuxInt 13096 sym := v.Aux 13097 _ = v.Args[2] 13098 v_0 := v.Args[0] 13099 if v_0.Op != OpAMD64ADDLconst { 13100 break 13101 } 13102 off2 := v_0.AuxInt 13103 ptr := v_0.Args[0] 13104 val := v.Args[1] 13105 mem := v.Args[2] 13106 if !(is32Bit(off1 + off2)) { 13107 break 13108 } 13109 v.reset(OpAMD64MOVWstore) 13110 v.AuxInt = off1 + off2 13111 v.Aux = sym 13112 v.AddArg(ptr) 13113 v.AddArg(val) 13114 v.AddArg(mem) 13115 return true 13116 } 13117 return false 13118 } 13119 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 13120 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 13121 // cond: ValAndOff(sc).canAdd(off) 13122 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13123 for { 13124 sc := v.AuxInt 13125 s := v.Aux 13126 _ = v.Args[1] 13127 v_0 := v.Args[0] 13128 if v_0.Op != OpAMD64ADDQconst { 13129 break 13130 } 13131 off := v_0.AuxInt 13132 ptr := v_0.Args[0] 13133 mem := v.Args[1] 13134 if !(ValAndOff(sc).canAdd(off)) { 13135 break 13136 } 13137 v.reset(OpAMD64MOVWstoreconst) 13138 v.AuxInt = ValAndOff(sc).add(off) 13139 v.Aux = s 13140 v.AddArg(ptr) 13141 v.AddArg(mem) 13142 return true 13143 } 13144 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 13145 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13146 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13147 for { 13148 sc := v.AuxInt 13149 sym1 := v.Aux 13150 _ = v.Args[1] 13151 v_0 := v.Args[0] 13152 if v_0.Op != OpAMD64LEAQ { 13153 break 13154 } 13155 off := v_0.AuxInt 13156 sym2 := v_0.Aux 13157 ptr := v_0.Args[0] 13158 mem := v.Args[1] 13159 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13160 break 13161 } 13162 v.reset(OpAMD64MOVWstoreconst) 13163 v.AuxInt = ValAndOff(sc).add(off) 13164 v.Aux = mergeSym(sym1, sym2) 13165 v.AddArg(ptr) 13166 v.AddArg(mem) 13167 return true 13168 } 13169 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 13170 // cond: canMergeSym(sym1, sym2) 13171 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13172 for { 13173 x := v.AuxInt 13174 sym1 := v.Aux 13175 _ = v.Args[1] 13176 v_0 := v.Args[0] 13177 if v_0.Op != OpAMD64LEAQ1 { 13178 break 13179 } 13180 off := v_0.AuxInt 13181 sym2 := v_0.Aux 13182 _ = v_0.Args[1] 13183 ptr := v_0.Args[0] 13184 idx := v_0.Args[1] 13185 mem := v.Args[1] 13186 if !(canMergeSym(sym1, sym2)) { 13187 break 13188 } 13189 v.reset(OpAMD64MOVWstoreconstidx1) 13190 v.AuxInt = ValAndOff(x).add(off) 13191 v.Aux = mergeSym(sym1, sym2) 13192 v.AddArg(ptr) 13193 v.AddArg(idx) 13194 v.AddArg(mem) 13195 return true 13196 } 13197 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 13198 // cond: canMergeSym(sym1, sym2) 13199 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13200 for { 13201 x := v.AuxInt 13202 sym1 := v.Aux 13203 _ = v.Args[1] 13204 v_0 := v.Args[0] 13205 if v_0.Op != OpAMD64LEAQ2 { 13206 break 13207 } 13208 off := v_0.AuxInt 13209 sym2 := v_0.Aux 13210 _ = v_0.Args[1] 13211 ptr := v_0.Args[0] 13212 idx := v_0.Args[1] 13213 mem := v.Args[1] 13214 if !(canMergeSym(sym1, sym2)) { 13215 break 13216 } 13217 v.reset(OpAMD64MOVWstoreconstidx2) 13218 v.AuxInt = ValAndOff(x).add(off) 13219 v.Aux = mergeSym(sym1, sym2) 13220 v.AddArg(ptr) 13221 v.AddArg(idx) 13222 v.AddArg(mem) 13223 return true 13224 } 13225 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 13226 // cond: 13227 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 13228 for { 13229 x := v.AuxInt 13230 sym := v.Aux 13231 _ = v.Args[1] 13232 v_0 := v.Args[0] 13233 if v_0.Op != OpAMD64ADDQ { 13234 break 13235 } 13236 _ = v_0.Args[1] 13237 ptr := v_0.Args[0] 13238 idx := v_0.Args[1] 13239 mem := v.Args[1] 13240 v.reset(OpAMD64MOVWstoreconstidx1) 13241 v.AuxInt = x 13242 v.Aux = sym 13243 v.AddArg(ptr) 13244 v.AddArg(idx) 13245 v.AddArg(mem) 13246 return true 13247 } 13248 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 13249 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13250 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 13251 for { 13252 c := v.AuxInt 13253 s := v.Aux 13254 _ = v.Args[1] 13255 p := v.Args[0] 13256 x := v.Args[1] 13257 if x.Op != OpAMD64MOVWstoreconst { 13258 break 13259 } 13260 a := x.AuxInt 13261 if x.Aux != s { 13262 break 13263 } 13264 _ = x.Args[1] 13265 if p != x.Args[0] { 13266 break 13267 } 13268 mem := x.Args[1] 13269 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13270 break 13271 } 13272 v.reset(OpAMD64MOVLstoreconst) 13273 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13274 v.Aux = s 13275 v.AddArg(p) 13276 v.AddArg(mem) 13277 return true 13278 } 13279 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 13280 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13281 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13282 for { 13283 sc := v.AuxInt 13284 sym1 := v.Aux 13285 _ = v.Args[1] 13286 v_0 := v.Args[0] 13287 if v_0.Op != OpAMD64LEAL { 13288 break 13289 } 13290 off := v_0.AuxInt 13291 sym2 := v_0.Aux 13292 ptr := v_0.Args[0] 13293 mem := v.Args[1] 13294 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13295 break 13296 } 13297 v.reset(OpAMD64MOVWstoreconst) 13298 v.AuxInt = ValAndOff(sc).add(off) 13299 v.Aux = mergeSym(sym1, sym2) 13300 v.AddArg(ptr) 13301 v.AddArg(mem) 13302 return true 13303 } 13304 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 13305 // cond: ValAndOff(sc).canAdd(off) 13306 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13307 for { 13308 sc := v.AuxInt 13309 s := v.Aux 13310 _ = v.Args[1] 13311 v_0 := v.Args[0] 13312 if v_0.Op != OpAMD64ADDLconst { 13313 break 13314 } 13315 off := v_0.AuxInt 13316 ptr := v_0.Args[0] 13317 mem := v.Args[1] 13318 if !(ValAndOff(sc).canAdd(off)) { 13319 break 13320 } 13321 v.reset(OpAMD64MOVWstoreconst) 13322 v.AuxInt = ValAndOff(sc).add(off) 13323 v.Aux = s 13324 v.AddArg(ptr) 13325 v.AddArg(mem) 13326 return true 13327 } 13328 return false 13329 } 13330 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 13331 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 13332 // cond: 13333 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 13334 for { 13335 c := v.AuxInt 13336 sym := v.Aux 13337 _ = v.Args[2] 13338 ptr := v.Args[0] 13339 v_1 := v.Args[1] 13340 if v_1.Op != OpAMD64SHLQconst { 13341 break 13342 } 13343 if v_1.AuxInt != 1 { 13344 break 13345 } 13346 idx := v_1.Args[0] 13347 mem := v.Args[2] 13348 v.reset(OpAMD64MOVWstoreconstidx2) 13349 v.AuxInt = c 13350 v.Aux = sym 13351 v.AddArg(ptr) 13352 v.AddArg(idx) 13353 v.AddArg(mem) 13354 return true 13355 } 13356 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 13357 // cond: ValAndOff(x).canAdd(c) 13358 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13359 for { 13360 x := v.AuxInt 13361 sym := v.Aux 13362 _ = v.Args[2] 13363 v_0 := v.Args[0] 13364 if v_0.Op != OpAMD64ADDQconst { 13365 break 13366 } 13367 c := v_0.AuxInt 13368 ptr := v_0.Args[0] 13369 idx := v.Args[1] 13370 mem := v.Args[2] 13371 if !(ValAndOff(x).canAdd(c)) { 13372 break 13373 } 13374 v.reset(OpAMD64MOVWstoreconstidx1) 13375 v.AuxInt = ValAndOff(x).add(c) 13376 v.Aux = sym 13377 v.AddArg(ptr) 13378 v.AddArg(idx) 13379 v.AddArg(mem) 13380 return true 13381 } 13382 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 13383 // cond: ValAndOff(x).canAdd(c) 13384 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13385 for { 13386 x := v.AuxInt 13387 sym := v.Aux 13388 _ = v.Args[2] 13389 ptr := v.Args[0] 13390 v_1 := v.Args[1] 13391 if v_1.Op != OpAMD64ADDQconst { 13392 break 13393 } 13394 c := v_1.AuxInt 13395 idx := v_1.Args[0] 13396 mem := v.Args[2] 13397 if !(ValAndOff(x).canAdd(c)) { 13398 break 13399 } 13400 v.reset(OpAMD64MOVWstoreconstidx1) 13401 v.AuxInt = ValAndOff(x).add(c) 13402 v.Aux = sym 13403 v.AddArg(ptr) 13404 v.AddArg(idx) 13405 v.AddArg(mem) 13406 return true 13407 } 13408 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 13409 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13410 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 13411 for { 13412 c := v.AuxInt 13413 s := v.Aux 13414 _ = v.Args[2] 13415 p := v.Args[0] 13416 i := v.Args[1] 13417 x := v.Args[2] 13418 if x.Op != OpAMD64MOVWstoreconstidx1 { 13419 break 13420 } 13421 a := x.AuxInt 13422 if x.Aux != s { 13423 break 13424 } 13425 _ = x.Args[2] 13426 if p != x.Args[0] { 13427 break 13428 } 13429 if i != x.Args[1] { 13430 break 13431 } 13432 mem := x.Args[2] 13433 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13434 break 13435 } 13436 v.reset(OpAMD64MOVLstoreconstidx1) 13437 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13438 v.Aux = s 13439 v.AddArg(p) 13440 v.AddArg(i) 13441 v.AddArg(mem) 13442 return true 13443 } 13444 return false 13445 } 13446 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 13447 b := v.Block 13448 _ = b 13449 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 13450 // cond: ValAndOff(x).canAdd(c) 13451 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13452 for { 13453 x := v.AuxInt 13454 sym := v.Aux 13455 _ = v.Args[2] 13456 v_0 := v.Args[0] 13457 if v_0.Op != OpAMD64ADDQconst { 13458 break 13459 } 13460 c := v_0.AuxInt 13461 ptr := v_0.Args[0] 13462 idx := v.Args[1] 13463 mem := v.Args[2] 13464 if !(ValAndOff(x).canAdd(c)) { 13465 break 13466 } 13467 v.reset(OpAMD64MOVWstoreconstidx2) 13468 v.AuxInt = ValAndOff(x).add(c) 13469 v.Aux = sym 13470 v.AddArg(ptr) 13471 v.AddArg(idx) 13472 v.AddArg(mem) 13473 return true 13474 } 13475 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 13476 // cond: ValAndOff(x).canAdd(2*c) 13477 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 13478 for { 13479 x := v.AuxInt 13480 sym := v.Aux 13481 _ = v.Args[2] 13482 ptr := v.Args[0] 13483 v_1 := v.Args[1] 13484 if v_1.Op != OpAMD64ADDQconst { 13485 break 13486 } 13487 c := v_1.AuxInt 13488 idx := v_1.Args[0] 13489 mem := v.Args[2] 13490 if !(ValAndOff(x).canAdd(2 * c)) { 13491 break 13492 } 13493 v.reset(OpAMD64MOVWstoreconstidx2) 13494 v.AuxInt = ValAndOff(x).add(2 * c) 13495 v.Aux = sym 13496 v.AddArg(ptr) 13497 v.AddArg(idx) 13498 v.AddArg(mem) 13499 return true 13500 } 13501 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 13502 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13503 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 13504 for { 13505 c := v.AuxInt 13506 s := v.Aux 13507 _ = v.Args[2] 13508 p := v.Args[0] 13509 i := v.Args[1] 13510 x := v.Args[2] 13511 if x.Op != OpAMD64MOVWstoreconstidx2 { 13512 break 13513 } 13514 a := x.AuxInt 13515 if x.Aux != s { 13516 break 13517 } 13518 _ = x.Args[2] 13519 if p != x.Args[0] { 13520 break 13521 } 13522 if i != x.Args[1] { 13523 break 13524 } 13525 mem := x.Args[2] 13526 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13527 break 13528 } 13529 v.reset(OpAMD64MOVLstoreconstidx1) 13530 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13531 v.Aux = s 13532 v.AddArg(p) 13533 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 13534 v0.AuxInt = 1 13535 v0.AddArg(i) 13536 v.AddArg(v0) 13537 v.AddArg(mem) 13538 return true 13539 } 13540 return false 13541 } 13542 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 13543 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 13544 // cond: 13545 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 13546 for { 13547 c := v.AuxInt 13548 sym := v.Aux 13549 _ = v.Args[3] 13550 ptr := v.Args[0] 13551 v_1 := v.Args[1] 13552 if v_1.Op != OpAMD64SHLQconst { 13553 break 13554 } 13555 if v_1.AuxInt != 1 { 13556 break 13557 } 13558 idx := v_1.Args[0] 13559 val := v.Args[2] 13560 mem := v.Args[3] 13561 v.reset(OpAMD64MOVWstoreidx2) 13562 v.AuxInt = c 13563 v.Aux = sym 13564 v.AddArg(ptr) 13565 v.AddArg(idx) 13566 v.AddArg(val) 13567 v.AddArg(mem) 13568 return true 13569 } 13570 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 13571 // cond: is32Bit(c+d) 13572 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 13573 for { 13574 c := v.AuxInt 13575 sym := v.Aux 13576 _ = v.Args[3] 13577 v_0 := v.Args[0] 13578 if v_0.Op != OpAMD64ADDQconst { 13579 break 13580 } 13581 d := v_0.AuxInt 13582 ptr := v_0.Args[0] 13583 idx := v.Args[1] 13584 val := v.Args[2] 13585 mem := v.Args[3] 13586 if !(is32Bit(c + d)) { 13587 break 13588 } 13589 v.reset(OpAMD64MOVWstoreidx1) 13590 v.AuxInt = c + d 13591 v.Aux = sym 13592 v.AddArg(ptr) 13593 v.AddArg(idx) 13594 v.AddArg(val) 13595 v.AddArg(mem) 13596 return true 13597 } 13598 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 13599 // cond: is32Bit(c+d) 13600 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 13601 for { 13602 c := v.AuxInt 13603 sym := v.Aux 13604 _ = v.Args[3] 13605 ptr := v.Args[0] 13606 v_1 := v.Args[1] 13607 if v_1.Op != OpAMD64ADDQconst { 13608 break 13609 } 13610 d := v_1.AuxInt 13611 idx := v_1.Args[0] 13612 val := v.Args[2] 13613 mem := v.Args[3] 13614 if !(is32Bit(c + d)) { 13615 break 13616 } 13617 v.reset(OpAMD64MOVWstoreidx1) 13618 v.AuxInt = c + d 13619 v.Aux = sym 13620 v.AddArg(ptr) 13621 v.AddArg(idx) 13622 v.AddArg(val) 13623 v.AddArg(mem) 13624 return true 13625 } 13626 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 13627 // cond: x.Uses == 1 && clobber(x) 13628 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 13629 for { 13630 i := v.AuxInt 13631 s := v.Aux 13632 _ = v.Args[3] 13633 p := v.Args[0] 13634 idx := v.Args[1] 13635 v_2 := v.Args[2] 13636 if v_2.Op != OpAMD64SHRQconst { 13637 break 13638 } 13639 if v_2.AuxInt != 16 { 13640 break 13641 } 13642 w := v_2.Args[0] 13643 x := v.Args[3] 13644 if x.Op != OpAMD64MOVWstoreidx1 { 13645 break 13646 } 13647 if x.AuxInt != i-2 { 13648 break 13649 } 13650 if x.Aux != s { 13651 break 13652 } 13653 _ = x.Args[3] 13654 if p != x.Args[0] { 13655 break 13656 } 13657 if idx != x.Args[1] { 13658 break 13659 } 13660 if w != x.Args[2] { 13661 break 13662 } 13663 mem := x.Args[3] 13664 if !(x.Uses == 1 && clobber(x)) { 13665 break 13666 } 13667 v.reset(OpAMD64MOVLstoreidx1) 13668 v.AuxInt = i - 2 13669 v.Aux = s 13670 v.AddArg(p) 13671 v.AddArg(idx) 13672 v.AddArg(w) 13673 v.AddArg(mem) 13674 return true 13675 } 13676 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 13677 // cond: x.Uses == 1 && clobber(x) 13678 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 13679 for { 13680 i := v.AuxInt 13681 s := v.Aux 13682 _ = v.Args[3] 13683 p := v.Args[0] 13684 idx := v.Args[1] 13685 v_2 := v.Args[2] 13686 if v_2.Op != OpAMD64SHRQconst { 13687 break 13688 } 13689 j := v_2.AuxInt 13690 w := v_2.Args[0] 13691 x := v.Args[3] 13692 if x.Op != OpAMD64MOVWstoreidx1 { 13693 break 13694 } 13695 if x.AuxInt != i-2 { 13696 break 13697 } 13698 if x.Aux != s { 13699 break 13700 } 13701 _ = x.Args[3] 13702 if p != x.Args[0] { 13703 break 13704 } 13705 if idx != x.Args[1] { 13706 break 13707 } 13708 w0 := x.Args[2] 13709 if w0.Op != OpAMD64SHRQconst { 13710 break 13711 } 13712 if w0.AuxInt != j-16 { 13713 break 13714 } 13715 if w != w0.Args[0] { 13716 break 13717 } 13718 mem := x.Args[3] 13719 if !(x.Uses == 1 && clobber(x)) { 13720 break 13721 } 13722 v.reset(OpAMD64MOVLstoreidx1) 13723 v.AuxInt = i - 2 13724 v.Aux = s 13725 v.AddArg(p) 13726 v.AddArg(idx) 13727 v.AddArg(w0) 13728 v.AddArg(mem) 13729 return true 13730 } 13731 return false 13732 } 13733 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 13734 b := v.Block 13735 _ = b 13736 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 13737 // cond: is32Bit(c+d) 13738 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 13739 for { 13740 c := v.AuxInt 13741 sym := v.Aux 13742 _ = v.Args[3] 13743 v_0 := v.Args[0] 13744 if v_0.Op != OpAMD64ADDQconst { 13745 break 13746 } 13747 d := v_0.AuxInt 13748 ptr := v_0.Args[0] 13749 idx := v.Args[1] 13750 val := v.Args[2] 13751 mem := v.Args[3] 13752 if !(is32Bit(c + d)) { 13753 break 13754 } 13755 v.reset(OpAMD64MOVWstoreidx2) 13756 v.AuxInt = c + d 13757 v.Aux = sym 13758 v.AddArg(ptr) 13759 v.AddArg(idx) 13760 v.AddArg(val) 13761 v.AddArg(mem) 13762 return true 13763 } 13764 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 13765 // cond: is32Bit(c+2*d) 13766 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 13767 for { 13768 c := v.AuxInt 13769 sym := v.Aux 13770 _ = v.Args[3] 13771 ptr := v.Args[0] 13772 v_1 := v.Args[1] 13773 if v_1.Op != OpAMD64ADDQconst { 13774 break 13775 } 13776 d := v_1.AuxInt 13777 idx := v_1.Args[0] 13778 val := v.Args[2] 13779 mem := v.Args[3] 13780 if !(is32Bit(c + 2*d)) { 13781 break 13782 } 13783 v.reset(OpAMD64MOVWstoreidx2) 13784 v.AuxInt = c + 2*d 13785 v.Aux = sym 13786 v.AddArg(ptr) 13787 v.AddArg(idx) 13788 v.AddArg(val) 13789 v.AddArg(mem) 13790 return true 13791 } 13792 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 13793 // cond: x.Uses == 1 && clobber(x) 13794 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 13795 for { 13796 i := v.AuxInt 13797 s := v.Aux 13798 _ = v.Args[3] 13799 p := v.Args[0] 13800 idx := v.Args[1] 13801 v_2 := v.Args[2] 13802 if v_2.Op != OpAMD64SHRQconst { 13803 break 13804 } 13805 if v_2.AuxInt != 16 { 13806 break 13807 } 13808 w := v_2.Args[0] 13809 x := v.Args[3] 13810 if x.Op != OpAMD64MOVWstoreidx2 { 13811 break 13812 } 13813 if x.AuxInt != i-2 { 13814 break 13815 } 13816 if x.Aux != s { 13817 break 13818 } 13819 _ = x.Args[3] 13820 if p != x.Args[0] { 13821 break 13822 } 13823 if idx != x.Args[1] { 13824 break 13825 } 13826 if w != x.Args[2] { 13827 break 13828 } 13829 mem := x.Args[3] 13830 if !(x.Uses == 1 && clobber(x)) { 13831 break 13832 } 13833 v.reset(OpAMD64MOVLstoreidx1) 13834 v.AuxInt = i - 2 13835 v.Aux = s 13836 v.AddArg(p) 13837 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 13838 v0.AuxInt = 1 13839 v0.AddArg(idx) 13840 v.AddArg(v0) 13841 v.AddArg(w) 13842 v.AddArg(mem) 13843 return true 13844 } 13845 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 13846 // cond: x.Uses == 1 && clobber(x) 13847 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 13848 for { 13849 i := v.AuxInt 13850 s := v.Aux 13851 _ = v.Args[3] 13852 p := v.Args[0] 13853 idx := v.Args[1] 13854 v_2 := v.Args[2] 13855 if v_2.Op != OpAMD64SHRQconst { 13856 break 13857 } 13858 j := v_2.AuxInt 13859 w := v_2.Args[0] 13860 x := v.Args[3] 13861 if x.Op != OpAMD64MOVWstoreidx2 { 13862 break 13863 } 13864 if x.AuxInt != i-2 { 13865 break 13866 } 13867 if x.Aux != s { 13868 break 13869 } 13870 _ = x.Args[3] 13871 if p != x.Args[0] { 13872 break 13873 } 13874 if idx != x.Args[1] { 13875 break 13876 } 13877 w0 := x.Args[2] 13878 if w0.Op != OpAMD64SHRQconst { 13879 break 13880 } 13881 if w0.AuxInt != j-16 { 13882 break 13883 } 13884 if w != w0.Args[0] { 13885 break 13886 } 13887 mem := x.Args[3] 13888 if !(x.Uses == 1 && clobber(x)) { 13889 break 13890 } 13891 v.reset(OpAMD64MOVLstoreidx1) 13892 v.AuxInt = i - 2 13893 v.Aux = s 13894 v.AddArg(p) 13895 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 13896 v0.AuxInt = 1 13897 v0.AddArg(idx) 13898 v.AddArg(v0) 13899 v.AddArg(w0) 13900 v.AddArg(mem) 13901 return true 13902 } 13903 return false 13904 } 13905 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 13906 // match: (MULL x (MOVLconst [c])) 13907 // cond: 13908 // result: (MULLconst [c] x) 13909 for { 13910 _ = v.Args[1] 13911 x := v.Args[0] 13912 v_1 := v.Args[1] 13913 if v_1.Op != OpAMD64MOVLconst { 13914 break 13915 } 13916 c := v_1.AuxInt 13917 v.reset(OpAMD64MULLconst) 13918 v.AuxInt = c 13919 v.AddArg(x) 13920 return true 13921 } 13922 // match: (MULL (MOVLconst [c]) x) 13923 // cond: 13924 // result: (MULLconst [c] x) 13925 for { 13926 _ = v.Args[1] 13927 v_0 := v.Args[0] 13928 if v_0.Op != OpAMD64MOVLconst { 13929 break 13930 } 13931 c := v_0.AuxInt 13932 x := v.Args[1] 13933 v.reset(OpAMD64MULLconst) 13934 v.AuxInt = c 13935 v.AddArg(x) 13936 return true 13937 } 13938 return false 13939 } 13940 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 13941 // match: (MULLconst [c] (MULLconst [d] x)) 13942 // cond: 13943 // result: (MULLconst [int64(int32(c * d))] x) 13944 for { 13945 c := v.AuxInt 13946 v_0 := v.Args[0] 13947 if v_0.Op != OpAMD64MULLconst { 13948 break 13949 } 13950 d := v_0.AuxInt 13951 x := v_0.Args[0] 13952 v.reset(OpAMD64MULLconst) 13953 v.AuxInt = int64(int32(c * d)) 13954 v.AddArg(x) 13955 return true 13956 } 13957 // match: (MULLconst [c] (MOVLconst [d])) 13958 // cond: 13959 // result: (MOVLconst [int64(int32(c*d))]) 13960 for { 13961 c := v.AuxInt 13962 v_0 := v.Args[0] 13963 if v_0.Op != OpAMD64MOVLconst { 13964 break 13965 } 13966 d := v_0.AuxInt 13967 v.reset(OpAMD64MOVLconst) 13968 v.AuxInt = int64(int32(c * d)) 13969 return true 13970 } 13971 return false 13972 } 13973 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 13974 // match: (MULQ x (MOVQconst [c])) 13975 // cond: is32Bit(c) 13976 // result: (MULQconst [c] x) 13977 for { 13978 _ = v.Args[1] 13979 x := v.Args[0] 13980 v_1 := v.Args[1] 13981 if v_1.Op != OpAMD64MOVQconst { 13982 break 13983 } 13984 c := v_1.AuxInt 13985 if !(is32Bit(c)) { 13986 break 13987 } 13988 v.reset(OpAMD64MULQconst) 13989 v.AuxInt = c 13990 v.AddArg(x) 13991 return true 13992 } 13993 // match: (MULQ (MOVQconst [c]) x) 13994 // cond: is32Bit(c) 13995 // result: (MULQconst [c] x) 13996 for { 13997 _ = v.Args[1] 13998 v_0 := v.Args[0] 13999 if v_0.Op != OpAMD64MOVQconst { 14000 break 14001 } 14002 c := v_0.AuxInt 14003 x := v.Args[1] 14004 if !(is32Bit(c)) { 14005 break 14006 } 14007 v.reset(OpAMD64MULQconst) 14008 v.AuxInt = c 14009 v.AddArg(x) 14010 return true 14011 } 14012 return false 14013 } 14014 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 14015 b := v.Block 14016 _ = b 14017 // match: (MULQconst [c] (MULQconst [d] x)) 14018 // cond: is32Bit(c*d) 14019 // result: (MULQconst [c * d] x) 14020 for { 14021 c := v.AuxInt 14022 v_0 := v.Args[0] 14023 if v_0.Op != OpAMD64MULQconst { 14024 break 14025 } 14026 d := v_0.AuxInt 14027 x := v_0.Args[0] 14028 if !(is32Bit(c * d)) { 14029 break 14030 } 14031 v.reset(OpAMD64MULQconst) 14032 v.AuxInt = c * d 14033 v.AddArg(x) 14034 return true 14035 } 14036 // match: (MULQconst [-1] x) 14037 // cond: 14038 // result: (NEGQ x) 14039 for { 14040 if v.AuxInt != -1 { 14041 break 14042 } 14043 x := v.Args[0] 14044 v.reset(OpAMD64NEGQ) 14045 v.AddArg(x) 14046 return true 14047 } 14048 // match: (MULQconst [0] _) 14049 // cond: 14050 // result: (MOVQconst [0]) 14051 for { 14052 if v.AuxInt != 0 { 14053 break 14054 } 14055 v.reset(OpAMD64MOVQconst) 14056 v.AuxInt = 0 14057 return true 14058 } 14059 // match: (MULQconst [1] x) 14060 // cond: 14061 // result: x 14062 for { 14063 if v.AuxInt != 1 { 14064 break 14065 } 14066 x := v.Args[0] 14067 v.reset(OpCopy) 14068 v.Type = x.Type 14069 v.AddArg(x) 14070 return true 14071 } 14072 // match: (MULQconst [3] x) 14073 // cond: 14074 // result: (LEAQ2 x x) 14075 for { 14076 if v.AuxInt != 3 { 14077 break 14078 } 14079 x := v.Args[0] 14080 v.reset(OpAMD64LEAQ2) 14081 v.AddArg(x) 14082 v.AddArg(x) 14083 return true 14084 } 14085 // match: (MULQconst [5] x) 14086 // cond: 14087 // result: (LEAQ4 x x) 14088 for { 14089 if v.AuxInt != 5 { 14090 break 14091 } 14092 x := v.Args[0] 14093 v.reset(OpAMD64LEAQ4) 14094 v.AddArg(x) 14095 v.AddArg(x) 14096 return true 14097 } 14098 // match: (MULQconst [7] x) 14099 // cond: 14100 // result: (LEAQ8 (NEGQ <v.Type> x) x) 14101 for { 14102 if v.AuxInt != 7 { 14103 break 14104 } 14105 x := v.Args[0] 14106 v.reset(OpAMD64LEAQ8) 14107 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 14108 v0.AddArg(x) 14109 v.AddArg(v0) 14110 v.AddArg(x) 14111 return true 14112 } 14113 // match: (MULQconst [9] x) 14114 // cond: 14115 // result: (LEAQ8 x x) 14116 for { 14117 if v.AuxInt != 9 { 14118 break 14119 } 14120 x := v.Args[0] 14121 v.reset(OpAMD64LEAQ8) 14122 v.AddArg(x) 14123 v.AddArg(x) 14124 return true 14125 } 14126 // match: (MULQconst [11] x) 14127 // cond: 14128 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 14129 for { 14130 if v.AuxInt != 11 { 14131 break 14132 } 14133 x := v.Args[0] 14134 v.reset(OpAMD64LEAQ2) 14135 v.AddArg(x) 14136 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14137 v0.AddArg(x) 14138 v0.AddArg(x) 14139 v.AddArg(v0) 14140 return true 14141 } 14142 // match: (MULQconst [13] x) 14143 // cond: 14144 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 14145 for { 14146 if v.AuxInt != 13 { 14147 break 14148 } 14149 x := v.Args[0] 14150 v.reset(OpAMD64LEAQ4) 14151 v.AddArg(x) 14152 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14153 v0.AddArg(x) 14154 v0.AddArg(x) 14155 v.AddArg(v0) 14156 return true 14157 } 14158 return false 14159 } 14160 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 14161 b := v.Block 14162 _ = b 14163 // match: (MULQconst [21] x) 14164 // cond: 14165 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 14166 for { 14167 if v.AuxInt != 21 { 14168 break 14169 } 14170 x := v.Args[0] 14171 v.reset(OpAMD64LEAQ4) 14172 v.AddArg(x) 14173 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14174 v0.AddArg(x) 14175 v0.AddArg(x) 14176 v.AddArg(v0) 14177 return true 14178 } 14179 // match: (MULQconst [25] x) 14180 // cond: 14181 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 14182 for { 14183 if v.AuxInt != 25 { 14184 break 14185 } 14186 x := v.Args[0] 14187 v.reset(OpAMD64LEAQ8) 14188 v.AddArg(x) 14189 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14190 v0.AddArg(x) 14191 v0.AddArg(x) 14192 v.AddArg(v0) 14193 return true 14194 } 14195 // match: (MULQconst [37] x) 14196 // cond: 14197 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 14198 for { 14199 if v.AuxInt != 37 { 14200 break 14201 } 14202 x := v.Args[0] 14203 v.reset(OpAMD64LEAQ4) 14204 v.AddArg(x) 14205 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14206 v0.AddArg(x) 14207 v0.AddArg(x) 14208 v.AddArg(v0) 14209 return true 14210 } 14211 // match: (MULQconst [41] x) 14212 // cond: 14213 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 14214 for { 14215 if v.AuxInt != 41 { 14216 break 14217 } 14218 x := v.Args[0] 14219 v.reset(OpAMD64LEAQ8) 14220 v.AddArg(x) 14221 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14222 v0.AddArg(x) 14223 v0.AddArg(x) 14224 v.AddArg(v0) 14225 return true 14226 } 14227 // match: (MULQconst [73] x) 14228 // cond: 14229 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 14230 for { 14231 if v.AuxInt != 73 { 14232 break 14233 } 14234 x := v.Args[0] 14235 v.reset(OpAMD64LEAQ8) 14236 v.AddArg(x) 14237 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14238 v0.AddArg(x) 14239 v0.AddArg(x) 14240 v.AddArg(v0) 14241 return true 14242 } 14243 // match: (MULQconst [c] x) 14244 // cond: isPowerOfTwo(c) 14245 // result: (SHLQconst [log2(c)] x) 14246 for { 14247 c := v.AuxInt 14248 x := v.Args[0] 14249 if !(isPowerOfTwo(c)) { 14250 break 14251 } 14252 v.reset(OpAMD64SHLQconst) 14253 v.AuxInt = log2(c) 14254 v.AddArg(x) 14255 return true 14256 } 14257 // match: (MULQconst [c] x) 14258 // cond: isPowerOfTwo(c+1) && c >= 15 14259 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 14260 for { 14261 c := v.AuxInt 14262 x := v.Args[0] 14263 if !(isPowerOfTwo(c+1) && c >= 15) { 14264 break 14265 } 14266 v.reset(OpAMD64SUBQ) 14267 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14268 v0.AuxInt = log2(c + 1) 14269 v0.AddArg(x) 14270 v.AddArg(v0) 14271 v.AddArg(x) 14272 return true 14273 } 14274 // match: (MULQconst [c] x) 14275 // cond: isPowerOfTwo(c-1) && c >= 17 14276 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 14277 for { 14278 c := v.AuxInt 14279 x := v.Args[0] 14280 if !(isPowerOfTwo(c-1) && c >= 17) { 14281 break 14282 } 14283 v.reset(OpAMD64LEAQ1) 14284 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14285 v0.AuxInt = log2(c - 1) 14286 v0.AddArg(x) 14287 v.AddArg(v0) 14288 v.AddArg(x) 14289 return true 14290 } 14291 // match: (MULQconst [c] x) 14292 // cond: isPowerOfTwo(c-2) && c >= 34 14293 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 14294 for { 14295 c := v.AuxInt 14296 x := v.Args[0] 14297 if !(isPowerOfTwo(c-2) && c >= 34) { 14298 break 14299 } 14300 v.reset(OpAMD64LEAQ2) 14301 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14302 v0.AuxInt = log2(c - 2) 14303 v0.AddArg(x) 14304 v.AddArg(v0) 14305 v.AddArg(x) 14306 return true 14307 } 14308 // match: (MULQconst [c] x) 14309 // cond: isPowerOfTwo(c-4) && c >= 68 14310 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 14311 for { 14312 c := v.AuxInt 14313 x := v.Args[0] 14314 if !(isPowerOfTwo(c-4) && c >= 68) { 14315 break 14316 } 14317 v.reset(OpAMD64LEAQ4) 14318 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14319 v0.AuxInt = log2(c - 4) 14320 v0.AddArg(x) 14321 v.AddArg(v0) 14322 v.AddArg(x) 14323 return true 14324 } 14325 return false 14326 } 14327 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 14328 b := v.Block 14329 _ = b 14330 // match: (MULQconst [c] x) 14331 // cond: isPowerOfTwo(c-8) && c >= 136 14332 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 14333 for { 14334 c := v.AuxInt 14335 x := v.Args[0] 14336 if !(isPowerOfTwo(c-8) && c >= 136) { 14337 break 14338 } 14339 v.reset(OpAMD64LEAQ8) 14340 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14341 v0.AuxInt = log2(c - 8) 14342 v0.AddArg(x) 14343 v.AddArg(v0) 14344 v.AddArg(x) 14345 return true 14346 } 14347 // match: (MULQconst [c] x) 14348 // cond: c%3 == 0 && isPowerOfTwo(c/3) 14349 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 14350 for { 14351 c := v.AuxInt 14352 x := v.Args[0] 14353 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 14354 break 14355 } 14356 v.reset(OpAMD64SHLQconst) 14357 v.AuxInt = log2(c / 3) 14358 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14359 v0.AddArg(x) 14360 v0.AddArg(x) 14361 v.AddArg(v0) 14362 return true 14363 } 14364 // match: (MULQconst [c] x) 14365 // cond: c%5 == 0 && isPowerOfTwo(c/5) 14366 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 14367 for { 14368 c := v.AuxInt 14369 x := v.Args[0] 14370 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 14371 break 14372 } 14373 v.reset(OpAMD64SHLQconst) 14374 v.AuxInt = log2(c / 5) 14375 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14376 v0.AddArg(x) 14377 v0.AddArg(x) 14378 v.AddArg(v0) 14379 return true 14380 } 14381 // match: (MULQconst [c] x) 14382 // cond: c%9 == 0 && isPowerOfTwo(c/9) 14383 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 14384 for { 14385 c := v.AuxInt 14386 x := v.Args[0] 14387 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 14388 break 14389 } 14390 v.reset(OpAMD64SHLQconst) 14391 v.AuxInt = log2(c / 9) 14392 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14393 v0.AddArg(x) 14394 v0.AddArg(x) 14395 v.AddArg(v0) 14396 return true 14397 } 14398 // match: (MULQconst [c] (MOVQconst [d])) 14399 // cond: 14400 // result: (MOVQconst [c*d]) 14401 for { 14402 c := v.AuxInt 14403 v_0 := v.Args[0] 14404 if v_0.Op != OpAMD64MOVQconst { 14405 break 14406 } 14407 d := v_0.AuxInt 14408 v.reset(OpAMD64MOVQconst) 14409 v.AuxInt = c * d 14410 return true 14411 } 14412 return false 14413 } 14414 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 14415 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 14416 // cond: canMergeLoad(v, l, x) && clobber(l) 14417 // result: (MULSDmem x [off] {sym} ptr mem) 14418 for { 14419 _ = v.Args[1] 14420 x := v.Args[0] 14421 l := v.Args[1] 14422 if l.Op != OpAMD64MOVSDload { 14423 break 14424 } 14425 off := l.AuxInt 14426 sym := l.Aux 14427 _ = l.Args[1] 14428 ptr := l.Args[0] 14429 mem := l.Args[1] 14430 if !(canMergeLoad(v, l, x) && clobber(l)) { 14431 break 14432 } 14433 v.reset(OpAMD64MULSDmem) 14434 v.AuxInt = off 14435 v.Aux = sym 14436 v.AddArg(x) 14437 v.AddArg(ptr) 14438 v.AddArg(mem) 14439 return true 14440 } 14441 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 14442 // cond: canMergeLoad(v, l, x) && clobber(l) 14443 // result: (MULSDmem x [off] {sym} ptr mem) 14444 for { 14445 _ = v.Args[1] 14446 l := v.Args[0] 14447 if l.Op != OpAMD64MOVSDload { 14448 break 14449 } 14450 off := l.AuxInt 14451 sym := l.Aux 14452 _ = l.Args[1] 14453 ptr := l.Args[0] 14454 mem := l.Args[1] 14455 x := v.Args[1] 14456 if !(canMergeLoad(v, l, x) && clobber(l)) { 14457 break 14458 } 14459 v.reset(OpAMD64MULSDmem) 14460 v.AuxInt = off 14461 v.Aux = sym 14462 v.AddArg(x) 14463 v.AddArg(ptr) 14464 v.AddArg(mem) 14465 return true 14466 } 14467 return false 14468 } 14469 func rewriteValueAMD64_OpAMD64MULSDmem_0(v *Value) bool { 14470 b := v.Block 14471 _ = b 14472 typ := &b.Func.Config.Types 14473 _ = typ 14474 // match: (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 14475 // cond: 14476 // result: (MULSD x (MOVQi2f y)) 14477 for { 14478 off := v.AuxInt 14479 sym := v.Aux 14480 _ = v.Args[2] 14481 x := v.Args[0] 14482 ptr := v.Args[1] 14483 v_2 := v.Args[2] 14484 if v_2.Op != OpAMD64MOVQstore { 14485 break 14486 } 14487 if v_2.AuxInt != off { 14488 break 14489 } 14490 if v_2.Aux != sym { 14491 break 14492 } 14493 _ = v_2.Args[2] 14494 if ptr != v_2.Args[0] { 14495 break 14496 } 14497 y := v_2.Args[1] 14498 v.reset(OpAMD64MULSD) 14499 v.AddArg(x) 14500 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 14501 v0.AddArg(y) 14502 v.AddArg(v0) 14503 return true 14504 } 14505 return false 14506 } 14507 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 14508 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 14509 // cond: canMergeLoad(v, l, x) && clobber(l) 14510 // result: (MULSSmem x [off] {sym} ptr mem) 14511 for { 14512 _ = v.Args[1] 14513 x := v.Args[0] 14514 l := v.Args[1] 14515 if l.Op != OpAMD64MOVSSload { 14516 break 14517 } 14518 off := l.AuxInt 14519 sym := l.Aux 14520 _ = l.Args[1] 14521 ptr := l.Args[0] 14522 mem := l.Args[1] 14523 if !(canMergeLoad(v, l, x) && clobber(l)) { 14524 break 14525 } 14526 v.reset(OpAMD64MULSSmem) 14527 v.AuxInt = off 14528 v.Aux = sym 14529 v.AddArg(x) 14530 v.AddArg(ptr) 14531 v.AddArg(mem) 14532 return true 14533 } 14534 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 14535 // cond: canMergeLoad(v, l, x) && clobber(l) 14536 // result: (MULSSmem x [off] {sym} ptr mem) 14537 for { 14538 _ = v.Args[1] 14539 l := v.Args[0] 14540 if l.Op != OpAMD64MOVSSload { 14541 break 14542 } 14543 off := l.AuxInt 14544 sym := l.Aux 14545 _ = l.Args[1] 14546 ptr := l.Args[0] 14547 mem := l.Args[1] 14548 x := v.Args[1] 14549 if !(canMergeLoad(v, l, x) && clobber(l)) { 14550 break 14551 } 14552 v.reset(OpAMD64MULSSmem) 14553 v.AuxInt = off 14554 v.Aux = sym 14555 v.AddArg(x) 14556 v.AddArg(ptr) 14557 v.AddArg(mem) 14558 return true 14559 } 14560 return false 14561 } 14562 func rewriteValueAMD64_OpAMD64MULSSmem_0(v *Value) bool { 14563 b := v.Block 14564 _ = b 14565 typ := &b.Func.Config.Types 14566 _ = typ 14567 // match: (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 14568 // cond: 14569 // result: (MULSS x (MOVLi2f y)) 14570 for { 14571 off := v.AuxInt 14572 sym := v.Aux 14573 _ = v.Args[2] 14574 x := v.Args[0] 14575 ptr := v.Args[1] 14576 v_2 := v.Args[2] 14577 if v_2.Op != OpAMD64MOVLstore { 14578 break 14579 } 14580 if v_2.AuxInt != off { 14581 break 14582 } 14583 if v_2.Aux != sym { 14584 break 14585 } 14586 _ = v_2.Args[2] 14587 if ptr != v_2.Args[0] { 14588 break 14589 } 14590 y := v_2.Args[1] 14591 v.reset(OpAMD64MULSS) 14592 v.AddArg(x) 14593 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 14594 v0.AddArg(y) 14595 v.AddArg(v0) 14596 return true 14597 } 14598 return false 14599 } 14600 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 14601 // match: (NEGL (MOVLconst [c])) 14602 // cond: 14603 // result: (MOVLconst [int64(int32(-c))]) 14604 for { 14605 v_0 := v.Args[0] 14606 if v_0.Op != OpAMD64MOVLconst { 14607 break 14608 } 14609 c := v_0.AuxInt 14610 v.reset(OpAMD64MOVLconst) 14611 v.AuxInt = int64(int32(-c)) 14612 return true 14613 } 14614 return false 14615 } 14616 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 14617 // match: (NEGQ (MOVQconst [c])) 14618 // cond: 14619 // result: (MOVQconst [-c]) 14620 for { 14621 v_0 := v.Args[0] 14622 if v_0.Op != OpAMD64MOVQconst { 14623 break 14624 } 14625 c := v_0.AuxInt 14626 v.reset(OpAMD64MOVQconst) 14627 v.AuxInt = -c 14628 return true 14629 } 14630 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 14631 // cond: c != -(1<<31) 14632 // result: (ADDQconst [-c] x) 14633 for { 14634 v_0 := v.Args[0] 14635 if v_0.Op != OpAMD64ADDQconst { 14636 break 14637 } 14638 c := v_0.AuxInt 14639 v_0_0 := v_0.Args[0] 14640 if v_0_0.Op != OpAMD64NEGQ { 14641 break 14642 } 14643 x := v_0_0.Args[0] 14644 if !(c != -(1 << 31)) { 14645 break 14646 } 14647 v.reset(OpAMD64ADDQconst) 14648 v.AuxInt = -c 14649 v.AddArg(x) 14650 return true 14651 } 14652 return false 14653 } 14654 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 14655 // match: (NOTL (MOVLconst [c])) 14656 // cond: 14657 // result: (MOVLconst [^c]) 14658 for { 14659 v_0 := v.Args[0] 14660 if v_0.Op != OpAMD64MOVLconst { 14661 break 14662 } 14663 c := v_0.AuxInt 14664 v.reset(OpAMD64MOVLconst) 14665 v.AuxInt = ^c 14666 return true 14667 } 14668 return false 14669 } 14670 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 14671 // match: (NOTQ (MOVQconst [c])) 14672 // cond: 14673 // result: (MOVQconst [^c]) 14674 for { 14675 v_0 := v.Args[0] 14676 if v_0.Op != OpAMD64MOVQconst { 14677 break 14678 } 14679 c := v_0.AuxInt 14680 v.reset(OpAMD64MOVQconst) 14681 v.AuxInt = ^c 14682 return true 14683 } 14684 return false 14685 } 14686 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 14687 // match: (ORL x (MOVLconst [c])) 14688 // cond: 14689 // result: (ORLconst [c] x) 14690 for { 14691 _ = v.Args[1] 14692 x := v.Args[0] 14693 v_1 := v.Args[1] 14694 if v_1.Op != OpAMD64MOVLconst { 14695 break 14696 } 14697 c := v_1.AuxInt 14698 v.reset(OpAMD64ORLconst) 14699 v.AuxInt = c 14700 v.AddArg(x) 14701 return true 14702 } 14703 // match: (ORL (MOVLconst [c]) x) 14704 // cond: 14705 // result: (ORLconst [c] x) 14706 for { 14707 _ = v.Args[1] 14708 v_0 := v.Args[0] 14709 if v_0.Op != OpAMD64MOVLconst { 14710 break 14711 } 14712 c := v_0.AuxInt 14713 x := v.Args[1] 14714 v.reset(OpAMD64ORLconst) 14715 v.AuxInt = c 14716 v.AddArg(x) 14717 return true 14718 } 14719 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 14720 // cond: d==32-c 14721 // result: (ROLLconst x [c]) 14722 for { 14723 _ = v.Args[1] 14724 v_0 := v.Args[0] 14725 if v_0.Op != OpAMD64SHLLconst { 14726 break 14727 } 14728 c := v_0.AuxInt 14729 x := v_0.Args[0] 14730 v_1 := v.Args[1] 14731 if v_1.Op != OpAMD64SHRLconst { 14732 break 14733 } 14734 d := v_1.AuxInt 14735 if x != v_1.Args[0] { 14736 break 14737 } 14738 if !(d == 32-c) { 14739 break 14740 } 14741 v.reset(OpAMD64ROLLconst) 14742 v.AuxInt = c 14743 v.AddArg(x) 14744 return true 14745 } 14746 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 14747 // cond: d==32-c 14748 // result: (ROLLconst x [c]) 14749 for { 14750 _ = v.Args[1] 14751 v_0 := v.Args[0] 14752 if v_0.Op != OpAMD64SHRLconst { 14753 break 14754 } 14755 d := v_0.AuxInt 14756 x := v_0.Args[0] 14757 v_1 := v.Args[1] 14758 if v_1.Op != OpAMD64SHLLconst { 14759 break 14760 } 14761 c := v_1.AuxInt 14762 if x != v_1.Args[0] { 14763 break 14764 } 14765 if !(d == 32-c) { 14766 break 14767 } 14768 v.reset(OpAMD64ROLLconst) 14769 v.AuxInt = c 14770 v.AddArg(x) 14771 return true 14772 } 14773 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 14774 // cond: d==16-c && c < 16 && t.Size() == 2 14775 // result: (ROLWconst x [c]) 14776 for { 14777 t := v.Type 14778 _ = v.Args[1] 14779 v_0 := v.Args[0] 14780 if v_0.Op != OpAMD64SHLLconst { 14781 break 14782 } 14783 c := v_0.AuxInt 14784 x := v_0.Args[0] 14785 v_1 := v.Args[1] 14786 if v_1.Op != OpAMD64SHRWconst { 14787 break 14788 } 14789 d := v_1.AuxInt 14790 if x != v_1.Args[0] { 14791 break 14792 } 14793 if !(d == 16-c && c < 16 && t.Size() == 2) { 14794 break 14795 } 14796 v.reset(OpAMD64ROLWconst) 14797 v.AuxInt = c 14798 v.AddArg(x) 14799 return true 14800 } 14801 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 14802 // cond: d==16-c && c < 16 && t.Size() == 2 14803 // result: (ROLWconst x [c]) 14804 for { 14805 t := v.Type 14806 _ = v.Args[1] 14807 v_0 := v.Args[0] 14808 if v_0.Op != OpAMD64SHRWconst { 14809 break 14810 } 14811 d := v_0.AuxInt 14812 x := v_0.Args[0] 14813 v_1 := v.Args[1] 14814 if v_1.Op != OpAMD64SHLLconst { 14815 break 14816 } 14817 c := v_1.AuxInt 14818 if x != v_1.Args[0] { 14819 break 14820 } 14821 if !(d == 16-c && c < 16 && t.Size() == 2) { 14822 break 14823 } 14824 v.reset(OpAMD64ROLWconst) 14825 v.AuxInt = c 14826 v.AddArg(x) 14827 return true 14828 } 14829 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 14830 // cond: d==8-c && c < 8 && t.Size() == 1 14831 // result: (ROLBconst x [c]) 14832 for { 14833 t := v.Type 14834 _ = v.Args[1] 14835 v_0 := v.Args[0] 14836 if v_0.Op != OpAMD64SHLLconst { 14837 break 14838 } 14839 c := v_0.AuxInt 14840 x := v_0.Args[0] 14841 v_1 := v.Args[1] 14842 if v_1.Op != OpAMD64SHRBconst { 14843 break 14844 } 14845 d := v_1.AuxInt 14846 if x != v_1.Args[0] { 14847 break 14848 } 14849 if !(d == 8-c && c < 8 && t.Size() == 1) { 14850 break 14851 } 14852 v.reset(OpAMD64ROLBconst) 14853 v.AuxInt = c 14854 v.AddArg(x) 14855 return true 14856 } 14857 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 14858 // cond: d==8-c && c < 8 && t.Size() == 1 14859 // result: (ROLBconst x [c]) 14860 for { 14861 t := v.Type 14862 _ = v.Args[1] 14863 v_0 := v.Args[0] 14864 if v_0.Op != OpAMD64SHRBconst { 14865 break 14866 } 14867 d := v_0.AuxInt 14868 x := v_0.Args[0] 14869 v_1 := v.Args[1] 14870 if v_1.Op != OpAMD64SHLLconst { 14871 break 14872 } 14873 c := v_1.AuxInt 14874 if x != v_1.Args[0] { 14875 break 14876 } 14877 if !(d == 8-c && c < 8 && t.Size() == 1) { 14878 break 14879 } 14880 v.reset(OpAMD64ROLBconst) 14881 v.AuxInt = c 14882 v.AddArg(x) 14883 return true 14884 } 14885 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 14886 // cond: 14887 // result: (ROLL x y) 14888 for { 14889 _ = v.Args[1] 14890 v_0 := v.Args[0] 14891 if v_0.Op != OpAMD64SHLL { 14892 break 14893 } 14894 _ = v_0.Args[1] 14895 x := v_0.Args[0] 14896 y := v_0.Args[1] 14897 v_1 := v.Args[1] 14898 if v_1.Op != OpAMD64ANDL { 14899 break 14900 } 14901 _ = v_1.Args[1] 14902 v_1_0 := v_1.Args[0] 14903 if v_1_0.Op != OpAMD64SHRL { 14904 break 14905 } 14906 _ = v_1_0.Args[1] 14907 if x != v_1_0.Args[0] { 14908 break 14909 } 14910 v_1_0_1 := v_1_0.Args[1] 14911 if v_1_0_1.Op != OpAMD64NEGQ { 14912 break 14913 } 14914 if y != v_1_0_1.Args[0] { 14915 break 14916 } 14917 v_1_1 := v_1.Args[1] 14918 if v_1_1.Op != OpAMD64SBBLcarrymask { 14919 break 14920 } 14921 v_1_1_0 := v_1_1.Args[0] 14922 if v_1_1_0.Op != OpAMD64CMPQconst { 14923 break 14924 } 14925 if v_1_1_0.AuxInt != 32 { 14926 break 14927 } 14928 v_1_1_0_0 := v_1_1_0.Args[0] 14929 if v_1_1_0_0.Op != OpAMD64NEGQ { 14930 break 14931 } 14932 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14933 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 14934 break 14935 } 14936 if v_1_1_0_0_0.AuxInt != -32 { 14937 break 14938 } 14939 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14940 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 14941 break 14942 } 14943 if v_1_1_0_0_0_0.AuxInt != 31 { 14944 break 14945 } 14946 if y != v_1_1_0_0_0_0.Args[0] { 14947 break 14948 } 14949 v.reset(OpAMD64ROLL) 14950 v.AddArg(x) 14951 v.AddArg(y) 14952 return true 14953 } 14954 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 14955 // cond: 14956 // result: (ROLL x y) 14957 for { 14958 _ = v.Args[1] 14959 v_0 := v.Args[0] 14960 if v_0.Op != OpAMD64SHLL { 14961 break 14962 } 14963 _ = v_0.Args[1] 14964 x := v_0.Args[0] 14965 y := v_0.Args[1] 14966 v_1 := v.Args[1] 14967 if v_1.Op != OpAMD64ANDL { 14968 break 14969 } 14970 _ = v_1.Args[1] 14971 v_1_0 := v_1.Args[0] 14972 if v_1_0.Op != OpAMD64SBBLcarrymask { 14973 break 14974 } 14975 v_1_0_0 := v_1_0.Args[0] 14976 if v_1_0_0.Op != OpAMD64CMPQconst { 14977 break 14978 } 14979 if v_1_0_0.AuxInt != 32 { 14980 break 14981 } 14982 v_1_0_0_0 := v_1_0_0.Args[0] 14983 if v_1_0_0_0.Op != OpAMD64NEGQ { 14984 break 14985 } 14986 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14987 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 14988 break 14989 } 14990 if v_1_0_0_0_0.AuxInt != -32 { 14991 break 14992 } 14993 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14994 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 14995 break 14996 } 14997 if v_1_0_0_0_0_0.AuxInt != 31 { 14998 break 14999 } 15000 if y != v_1_0_0_0_0_0.Args[0] { 15001 break 15002 } 15003 v_1_1 := v_1.Args[1] 15004 if v_1_1.Op != OpAMD64SHRL { 15005 break 15006 } 15007 _ = v_1_1.Args[1] 15008 if x != v_1_1.Args[0] { 15009 break 15010 } 15011 v_1_1_1 := v_1_1.Args[1] 15012 if v_1_1_1.Op != OpAMD64NEGQ { 15013 break 15014 } 15015 if y != v_1_1_1.Args[0] { 15016 break 15017 } 15018 v.reset(OpAMD64ROLL) 15019 v.AddArg(x) 15020 v.AddArg(y) 15021 return true 15022 } 15023 return false 15024 } 15025 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 15026 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 15027 // cond: 15028 // result: (ROLL x y) 15029 for { 15030 _ = v.Args[1] 15031 v_0 := v.Args[0] 15032 if v_0.Op != OpAMD64ANDL { 15033 break 15034 } 15035 _ = v_0.Args[1] 15036 v_0_0 := v_0.Args[0] 15037 if v_0_0.Op != OpAMD64SHRL { 15038 break 15039 } 15040 _ = v_0_0.Args[1] 15041 x := v_0_0.Args[0] 15042 v_0_0_1 := v_0_0.Args[1] 15043 if v_0_0_1.Op != OpAMD64NEGQ { 15044 break 15045 } 15046 y := v_0_0_1.Args[0] 15047 v_0_1 := v_0.Args[1] 15048 if v_0_1.Op != OpAMD64SBBLcarrymask { 15049 break 15050 } 15051 v_0_1_0 := v_0_1.Args[0] 15052 if v_0_1_0.Op != OpAMD64CMPQconst { 15053 break 15054 } 15055 if v_0_1_0.AuxInt != 32 { 15056 break 15057 } 15058 v_0_1_0_0 := v_0_1_0.Args[0] 15059 if v_0_1_0_0.Op != OpAMD64NEGQ { 15060 break 15061 } 15062 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15063 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15064 break 15065 } 15066 if v_0_1_0_0_0.AuxInt != -32 { 15067 break 15068 } 15069 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15070 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15071 break 15072 } 15073 if v_0_1_0_0_0_0.AuxInt != 31 { 15074 break 15075 } 15076 if y != v_0_1_0_0_0_0.Args[0] { 15077 break 15078 } 15079 v_1 := v.Args[1] 15080 if v_1.Op != OpAMD64SHLL { 15081 break 15082 } 15083 _ = v_1.Args[1] 15084 if x != v_1.Args[0] { 15085 break 15086 } 15087 if y != v_1.Args[1] { 15088 break 15089 } 15090 v.reset(OpAMD64ROLL) 15091 v.AddArg(x) 15092 v.AddArg(y) 15093 return true 15094 } 15095 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 15096 // cond: 15097 // result: (ROLL x y) 15098 for { 15099 _ = v.Args[1] 15100 v_0 := v.Args[0] 15101 if v_0.Op != OpAMD64ANDL { 15102 break 15103 } 15104 _ = v_0.Args[1] 15105 v_0_0 := v_0.Args[0] 15106 if v_0_0.Op != OpAMD64SBBLcarrymask { 15107 break 15108 } 15109 v_0_0_0 := v_0_0.Args[0] 15110 if v_0_0_0.Op != OpAMD64CMPQconst { 15111 break 15112 } 15113 if v_0_0_0.AuxInt != 32 { 15114 break 15115 } 15116 v_0_0_0_0 := v_0_0_0.Args[0] 15117 if v_0_0_0_0.Op != OpAMD64NEGQ { 15118 break 15119 } 15120 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15121 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15122 break 15123 } 15124 if v_0_0_0_0_0.AuxInt != -32 { 15125 break 15126 } 15127 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15128 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15129 break 15130 } 15131 if v_0_0_0_0_0_0.AuxInt != 31 { 15132 break 15133 } 15134 y := v_0_0_0_0_0_0.Args[0] 15135 v_0_1 := v_0.Args[1] 15136 if v_0_1.Op != OpAMD64SHRL { 15137 break 15138 } 15139 _ = v_0_1.Args[1] 15140 x := v_0_1.Args[0] 15141 v_0_1_1 := v_0_1.Args[1] 15142 if v_0_1_1.Op != OpAMD64NEGQ { 15143 break 15144 } 15145 if y != v_0_1_1.Args[0] { 15146 break 15147 } 15148 v_1 := v.Args[1] 15149 if v_1.Op != OpAMD64SHLL { 15150 break 15151 } 15152 _ = v_1.Args[1] 15153 if x != v_1.Args[0] { 15154 break 15155 } 15156 if y != v_1.Args[1] { 15157 break 15158 } 15159 v.reset(OpAMD64ROLL) 15160 v.AddArg(x) 15161 v.AddArg(y) 15162 return true 15163 } 15164 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 15165 // cond: 15166 // result: (ROLL x y) 15167 for { 15168 _ = v.Args[1] 15169 v_0 := v.Args[0] 15170 if v_0.Op != OpAMD64SHLL { 15171 break 15172 } 15173 _ = v_0.Args[1] 15174 x := v_0.Args[0] 15175 y := v_0.Args[1] 15176 v_1 := v.Args[1] 15177 if v_1.Op != OpAMD64ANDL { 15178 break 15179 } 15180 _ = v_1.Args[1] 15181 v_1_0 := v_1.Args[0] 15182 if v_1_0.Op != OpAMD64SHRL { 15183 break 15184 } 15185 _ = v_1_0.Args[1] 15186 if x != v_1_0.Args[0] { 15187 break 15188 } 15189 v_1_0_1 := v_1_0.Args[1] 15190 if v_1_0_1.Op != OpAMD64NEGL { 15191 break 15192 } 15193 if y != v_1_0_1.Args[0] { 15194 break 15195 } 15196 v_1_1 := v_1.Args[1] 15197 if v_1_1.Op != OpAMD64SBBLcarrymask { 15198 break 15199 } 15200 v_1_1_0 := v_1_1.Args[0] 15201 if v_1_1_0.Op != OpAMD64CMPLconst { 15202 break 15203 } 15204 if v_1_1_0.AuxInt != 32 { 15205 break 15206 } 15207 v_1_1_0_0 := v_1_1_0.Args[0] 15208 if v_1_1_0_0.Op != OpAMD64NEGL { 15209 break 15210 } 15211 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15212 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15213 break 15214 } 15215 if v_1_1_0_0_0.AuxInt != -32 { 15216 break 15217 } 15218 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15219 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15220 break 15221 } 15222 if v_1_1_0_0_0_0.AuxInt != 31 { 15223 break 15224 } 15225 if y != v_1_1_0_0_0_0.Args[0] { 15226 break 15227 } 15228 v.reset(OpAMD64ROLL) 15229 v.AddArg(x) 15230 v.AddArg(y) 15231 return true 15232 } 15233 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 15234 // cond: 15235 // result: (ROLL x y) 15236 for { 15237 _ = v.Args[1] 15238 v_0 := v.Args[0] 15239 if v_0.Op != OpAMD64SHLL { 15240 break 15241 } 15242 _ = v_0.Args[1] 15243 x := v_0.Args[0] 15244 y := v_0.Args[1] 15245 v_1 := v.Args[1] 15246 if v_1.Op != OpAMD64ANDL { 15247 break 15248 } 15249 _ = v_1.Args[1] 15250 v_1_0 := v_1.Args[0] 15251 if v_1_0.Op != OpAMD64SBBLcarrymask { 15252 break 15253 } 15254 v_1_0_0 := v_1_0.Args[0] 15255 if v_1_0_0.Op != OpAMD64CMPLconst { 15256 break 15257 } 15258 if v_1_0_0.AuxInt != 32 { 15259 break 15260 } 15261 v_1_0_0_0 := v_1_0_0.Args[0] 15262 if v_1_0_0_0.Op != OpAMD64NEGL { 15263 break 15264 } 15265 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15266 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 15267 break 15268 } 15269 if v_1_0_0_0_0.AuxInt != -32 { 15270 break 15271 } 15272 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15273 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 15274 break 15275 } 15276 if v_1_0_0_0_0_0.AuxInt != 31 { 15277 break 15278 } 15279 if y != v_1_0_0_0_0_0.Args[0] { 15280 break 15281 } 15282 v_1_1 := v_1.Args[1] 15283 if v_1_1.Op != OpAMD64SHRL { 15284 break 15285 } 15286 _ = v_1_1.Args[1] 15287 if x != v_1_1.Args[0] { 15288 break 15289 } 15290 v_1_1_1 := v_1_1.Args[1] 15291 if v_1_1_1.Op != OpAMD64NEGL { 15292 break 15293 } 15294 if y != v_1_1_1.Args[0] { 15295 break 15296 } 15297 v.reset(OpAMD64ROLL) 15298 v.AddArg(x) 15299 v.AddArg(y) 15300 return true 15301 } 15302 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 15303 // cond: 15304 // result: (ROLL x y) 15305 for { 15306 _ = v.Args[1] 15307 v_0 := v.Args[0] 15308 if v_0.Op != OpAMD64ANDL { 15309 break 15310 } 15311 _ = v_0.Args[1] 15312 v_0_0 := v_0.Args[0] 15313 if v_0_0.Op != OpAMD64SHRL { 15314 break 15315 } 15316 _ = v_0_0.Args[1] 15317 x := v_0_0.Args[0] 15318 v_0_0_1 := v_0_0.Args[1] 15319 if v_0_0_1.Op != OpAMD64NEGL { 15320 break 15321 } 15322 y := v_0_0_1.Args[0] 15323 v_0_1 := v_0.Args[1] 15324 if v_0_1.Op != OpAMD64SBBLcarrymask { 15325 break 15326 } 15327 v_0_1_0 := v_0_1.Args[0] 15328 if v_0_1_0.Op != OpAMD64CMPLconst { 15329 break 15330 } 15331 if v_0_1_0.AuxInt != 32 { 15332 break 15333 } 15334 v_0_1_0_0 := v_0_1_0.Args[0] 15335 if v_0_1_0_0.Op != OpAMD64NEGL { 15336 break 15337 } 15338 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15339 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 15340 break 15341 } 15342 if v_0_1_0_0_0.AuxInt != -32 { 15343 break 15344 } 15345 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15346 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 15347 break 15348 } 15349 if v_0_1_0_0_0_0.AuxInt != 31 { 15350 break 15351 } 15352 if y != v_0_1_0_0_0_0.Args[0] { 15353 break 15354 } 15355 v_1 := v.Args[1] 15356 if v_1.Op != OpAMD64SHLL { 15357 break 15358 } 15359 _ = v_1.Args[1] 15360 if x != v_1.Args[0] { 15361 break 15362 } 15363 if y != v_1.Args[1] { 15364 break 15365 } 15366 v.reset(OpAMD64ROLL) 15367 v.AddArg(x) 15368 v.AddArg(y) 15369 return true 15370 } 15371 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 15372 // cond: 15373 // result: (ROLL x y) 15374 for { 15375 _ = v.Args[1] 15376 v_0 := v.Args[0] 15377 if v_0.Op != OpAMD64ANDL { 15378 break 15379 } 15380 _ = v_0.Args[1] 15381 v_0_0 := v_0.Args[0] 15382 if v_0_0.Op != OpAMD64SBBLcarrymask { 15383 break 15384 } 15385 v_0_0_0 := v_0_0.Args[0] 15386 if v_0_0_0.Op != OpAMD64CMPLconst { 15387 break 15388 } 15389 if v_0_0_0.AuxInt != 32 { 15390 break 15391 } 15392 v_0_0_0_0 := v_0_0_0.Args[0] 15393 if v_0_0_0_0.Op != OpAMD64NEGL { 15394 break 15395 } 15396 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15397 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 15398 break 15399 } 15400 if v_0_0_0_0_0.AuxInt != -32 { 15401 break 15402 } 15403 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15404 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 15405 break 15406 } 15407 if v_0_0_0_0_0_0.AuxInt != 31 { 15408 break 15409 } 15410 y := v_0_0_0_0_0_0.Args[0] 15411 v_0_1 := v_0.Args[1] 15412 if v_0_1.Op != OpAMD64SHRL { 15413 break 15414 } 15415 _ = v_0_1.Args[1] 15416 x := v_0_1.Args[0] 15417 v_0_1_1 := v_0_1.Args[1] 15418 if v_0_1_1.Op != OpAMD64NEGL { 15419 break 15420 } 15421 if y != v_0_1_1.Args[0] { 15422 break 15423 } 15424 v_1 := v.Args[1] 15425 if v_1.Op != OpAMD64SHLL { 15426 break 15427 } 15428 _ = v_1.Args[1] 15429 if x != v_1.Args[0] { 15430 break 15431 } 15432 if y != v_1.Args[1] { 15433 break 15434 } 15435 v.reset(OpAMD64ROLL) 15436 v.AddArg(x) 15437 v.AddArg(y) 15438 return true 15439 } 15440 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 15441 // cond: 15442 // result: (RORL x y) 15443 for { 15444 _ = v.Args[1] 15445 v_0 := v.Args[0] 15446 if v_0.Op != OpAMD64SHRL { 15447 break 15448 } 15449 _ = v_0.Args[1] 15450 x := v_0.Args[0] 15451 y := v_0.Args[1] 15452 v_1 := v.Args[1] 15453 if v_1.Op != OpAMD64ANDL { 15454 break 15455 } 15456 _ = v_1.Args[1] 15457 v_1_0 := v_1.Args[0] 15458 if v_1_0.Op != OpAMD64SHLL { 15459 break 15460 } 15461 _ = v_1_0.Args[1] 15462 if x != v_1_0.Args[0] { 15463 break 15464 } 15465 v_1_0_1 := v_1_0.Args[1] 15466 if v_1_0_1.Op != OpAMD64NEGQ { 15467 break 15468 } 15469 if y != v_1_0_1.Args[0] { 15470 break 15471 } 15472 v_1_1 := v_1.Args[1] 15473 if v_1_1.Op != OpAMD64SBBLcarrymask { 15474 break 15475 } 15476 v_1_1_0 := v_1_1.Args[0] 15477 if v_1_1_0.Op != OpAMD64CMPQconst { 15478 break 15479 } 15480 if v_1_1_0.AuxInt != 32 { 15481 break 15482 } 15483 v_1_1_0_0 := v_1_1_0.Args[0] 15484 if v_1_1_0_0.Op != OpAMD64NEGQ { 15485 break 15486 } 15487 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15488 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15489 break 15490 } 15491 if v_1_1_0_0_0.AuxInt != -32 { 15492 break 15493 } 15494 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15495 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15496 break 15497 } 15498 if v_1_1_0_0_0_0.AuxInt != 31 { 15499 break 15500 } 15501 if y != v_1_1_0_0_0_0.Args[0] { 15502 break 15503 } 15504 v.reset(OpAMD64RORL) 15505 v.AddArg(x) 15506 v.AddArg(y) 15507 return true 15508 } 15509 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 15510 // cond: 15511 // result: (RORL x y) 15512 for { 15513 _ = v.Args[1] 15514 v_0 := v.Args[0] 15515 if v_0.Op != OpAMD64SHRL { 15516 break 15517 } 15518 _ = v_0.Args[1] 15519 x := v_0.Args[0] 15520 y := v_0.Args[1] 15521 v_1 := v.Args[1] 15522 if v_1.Op != OpAMD64ANDL { 15523 break 15524 } 15525 _ = v_1.Args[1] 15526 v_1_0 := v_1.Args[0] 15527 if v_1_0.Op != OpAMD64SBBLcarrymask { 15528 break 15529 } 15530 v_1_0_0 := v_1_0.Args[0] 15531 if v_1_0_0.Op != OpAMD64CMPQconst { 15532 break 15533 } 15534 if v_1_0_0.AuxInt != 32 { 15535 break 15536 } 15537 v_1_0_0_0 := v_1_0_0.Args[0] 15538 if v_1_0_0_0.Op != OpAMD64NEGQ { 15539 break 15540 } 15541 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15542 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15543 break 15544 } 15545 if v_1_0_0_0_0.AuxInt != -32 { 15546 break 15547 } 15548 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15549 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15550 break 15551 } 15552 if v_1_0_0_0_0_0.AuxInt != 31 { 15553 break 15554 } 15555 if y != v_1_0_0_0_0_0.Args[0] { 15556 break 15557 } 15558 v_1_1 := v_1.Args[1] 15559 if v_1_1.Op != OpAMD64SHLL { 15560 break 15561 } 15562 _ = v_1_1.Args[1] 15563 if x != v_1_1.Args[0] { 15564 break 15565 } 15566 v_1_1_1 := v_1_1.Args[1] 15567 if v_1_1_1.Op != OpAMD64NEGQ { 15568 break 15569 } 15570 if y != v_1_1_1.Args[0] { 15571 break 15572 } 15573 v.reset(OpAMD64RORL) 15574 v.AddArg(x) 15575 v.AddArg(y) 15576 return true 15577 } 15578 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 15579 // cond: 15580 // result: (RORL x y) 15581 for { 15582 _ = v.Args[1] 15583 v_0 := v.Args[0] 15584 if v_0.Op != OpAMD64ANDL { 15585 break 15586 } 15587 _ = v_0.Args[1] 15588 v_0_0 := v_0.Args[0] 15589 if v_0_0.Op != OpAMD64SHLL { 15590 break 15591 } 15592 _ = v_0_0.Args[1] 15593 x := v_0_0.Args[0] 15594 v_0_0_1 := v_0_0.Args[1] 15595 if v_0_0_1.Op != OpAMD64NEGQ { 15596 break 15597 } 15598 y := v_0_0_1.Args[0] 15599 v_0_1 := v_0.Args[1] 15600 if v_0_1.Op != OpAMD64SBBLcarrymask { 15601 break 15602 } 15603 v_0_1_0 := v_0_1.Args[0] 15604 if v_0_1_0.Op != OpAMD64CMPQconst { 15605 break 15606 } 15607 if v_0_1_0.AuxInt != 32 { 15608 break 15609 } 15610 v_0_1_0_0 := v_0_1_0.Args[0] 15611 if v_0_1_0_0.Op != OpAMD64NEGQ { 15612 break 15613 } 15614 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15615 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15616 break 15617 } 15618 if v_0_1_0_0_0.AuxInt != -32 { 15619 break 15620 } 15621 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15622 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15623 break 15624 } 15625 if v_0_1_0_0_0_0.AuxInt != 31 { 15626 break 15627 } 15628 if y != v_0_1_0_0_0_0.Args[0] { 15629 break 15630 } 15631 v_1 := v.Args[1] 15632 if v_1.Op != OpAMD64SHRL { 15633 break 15634 } 15635 _ = v_1.Args[1] 15636 if x != v_1.Args[0] { 15637 break 15638 } 15639 if y != v_1.Args[1] { 15640 break 15641 } 15642 v.reset(OpAMD64RORL) 15643 v.AddArg(x) 15644 v.AddArg(y) 15645 return true 15646 } 15647 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 15648 // cond: 15649 // result: (RORL x y) 15650 for { 15651 _ = v.Args[1] 15652 v_0 := v.Args[0] 15653 if v_0.Op != OpAMD64ANDL { 15654 break 15655 } 15656 _ = v_0.Args[1] 15657 v_0_0 := v_0.Args[0] 15658 if v_0_0.Op != OpAMD64SBBLcarrymask { 15659 break 15660 } 15661 v_0_0_0 := v_0_0.Args[0] 15662 if v_0_0_0.Op != OpAMD64CMPQconst { 15663 break 15664 } 15665 if v_0_0_0.AuxInt != 32 { 15666 break 15667 } 15668 v_0_0_0_0 := v_0_0_0.Args[0] 15669 if v_0_0_0_0.Op != OpAMD64NEGQ { 15670 break 15671 } 15672 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15673 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15674 break 15675 } 15676 if v_0_0_0_0_0.AuxInt != -32 { 15677 break 15678 } 15679 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15680 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15681 break 15682 } 15683 if v_0_0_0_0_0_0.AuxInt != 31 { 15684 break 15685 } 15686 y := v_0_0_0_0_0_0.Args[0] 15687 v_0_1 := v_0.Args[1] 15688 if v_0_1.Op != OpAMD64SHLL { 15689 break 15690 } 15691 _ = v_0_1.Args[1] 15692 x := v_0_1.Args[0] 15693 v_0_1_1 := v_0_1.Args[1] 15694 if v_0_1_1.Op != OpAMD64NEGQ { 15695 break 15696 } 15697 if y != v_0_1_1.Args[0] { 15698 break 15699 } 15700 v_1 := v.Args[1] 15701 if v_1.Op != OpAMD64SHRL { 15702 break 15703 } 15704 _ = v_1.Args[1] 15705 if x != v_1.Args[0] { 15706 break 15707 } 15708 if y != v_1.Args[1] { 15709 break 15710 } 15711 v.reset(OpAMD64RORL) 15712 v.AddArg(x) 15713 v.AddArg(y) 15714 return true 15715 } 15716 return false 15717 } 15718 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 15719 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 15720 // cond: 15721 // result: (RORL x y) 15722 for { 15723 _ = v.Args[1] 15724 v_0 := v.Args[0] 15725 if v_0.Op != OpAMD64SHRL { 15726 break 15727 } 15728 _ = v_0.Args[1] 15729 x := v_0.Args[0] 15730 y := v_0.Args[1] 15731 v_1 := v.Args[1] 15732 if v_1.Op != OpAMD64ANDL { 15733 break 15734 } 15735 _ = v_1.Args[1] 15736 v_1_0 := v_1.Args[0] 15737 if v_1_0.Op != OpAMD64SHLL { 15738 break 15739 } 15740 _ = v_1_0.Args[1] 15741 if x != v_1_0.Args[0] { 15742 break 15743 } 15744 v_1_0_1 := v_1_0.Args[1] 15745 if v_1_0_1.Op != OpAMD64NEGL { 15746 break 15747 } 15748 if y != v_1_0_1.Args[0] { 15749 break 15750 } 15751 v_1_1 := v_1.Args[1] 15752 if v_1_1.Op != OpAMD64SBBLcarrymask { 15753 break 15754 } 15755 v_1_1_0 := v_1_1.Args[0] 15756 if v_1_1_0.Op != OpAMD64CMPLconst { 15757 break 15758 } 15759 if v_1_1_0.AuxInt != 32 { 15760 break 15761 } 15762 v_1_1_0_0 := v_1_1_0.Args[0] 15763 if v_1_1_0_0.Op != OpAMD64NEGL { 15764 break 15765 } 15766 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15767 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15768 break 15769 } 15770 if v_1_1_0_0_0.AuxInt != -32 { 15771 break 15772 } 15773 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15774 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15775 break 15776 } 15777 if v_1_1_0_0_0_0.AuxInt != 31 { 15778 break 15779 } 15780 if y != v_1_1_0_0_0_0.Args[0] { 15781 break 15782 } 15783 v.reset(OpAMD64RORL) 15784 v.AddArg(x) 15785 v.AddArg(y) 15786 return true 15787 } 15788 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 15789 // cond: 15790 // result: (RORL x y) 15791 for { 15792 _ = v.Args[1] 15793 v_0 := v.Args[0] 15794 if v_0.Op != OpAMD64SHRL { 15795 break 15796 } 15797 _ = v_0.Args[1] 15798 x := v_0.Args[0] 15799 y := v_0.Args[1] 15800 v_1 := v.Args[1] 15801 if v_1.Op != OpAMD64ANDL { 15802 break 15803 } 15804 _ = v_1.Args[1] 15805 v_1_0 := v_1.Args[0] 15806 if v_1_0.Op != OpAMD64SBBLcarrymask { 15807 break 15808 } 15809 v_1_0_0 := v_1_0.Args[0] 15810 if v_1_0_0.Op != OpAMD64CMPLconst { 15811 break 15812 } 15813 if v_1_0_0.AuxInt != 32 { 15814 break 15815 } 15816 v_1_0_0_0 := v_1_0_0.Args[0] 15817 if v_1_0_0_0.Op != OpAMD64NEGL { 15818 break 15819 } 15820 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15821 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 15822 break 15823 } 15824 if v_1_0_0_0_0.AuxInt != -32 { 15825 break 15826 } 15827 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15828 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 15829 break 15830 } 15831 if v_1_0_0_0_0_0.AuxInt != 31 { 15832 break 15833 } 15834 if y != v_1_0_0_0_0_0.Args[0] { 15835 break 15836 } 15837 v_1_1 := v_1.Args[1] 15838 if v_1_1.Op != OpAMD64SHLL { 15839 break 15840 } 15841 _ = v_1_1.Args[1] 15842 if x != v_1_1.Args[0] { 15843 break 15844 } 15845 v_1_1_1 := v_1_1.Args[1] 15846 if v_1_1_1.Op != OpAMD64NEGL { 15847 break 15848 } 15849 if y != v_1_1_1.Args[0] { 15850 break 15851 } 15852 v.reset(OpAMD64RORL) 15853 v.AddArg(x) 15854 v.AddArg(y) 15855 return true 15856 } 15857 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 15858 // cond: 15859 // result: (RORL x y) 15860 for { 15861 _ = v.Args[1] 15862 v_0 := v.Args[0] 15863 if v_0.Op != OpAMD64ANDL { 15864 break 15865 } 15866 _ = v_0.Args[1] 15867 v_0_0 := v_0.Args[0] 15868 if v_0_0.Op != OpAMD64SHLL { 15869 break 15870 } 15871 _ = v_0_0.Args[1] 15872 x := v_0_0.Args[0] 15873 v_0_0_1 := v_0_0.Args[1] 15874 if v_0_0_1.Op != OpAMD64NEGL { 15875 break 15876 } 15877 y := v_0_0_1.Args[0] 15878 v_0_1 := v_0.Args[1] 15879 if v_0_1.Op != OpAMD64SBBLcarrymask { 15880 break 15881 } 15882 v_0_1_0 := v_0_1.Args[0] 15883 if v_0_1_0.Op != OpAMD64CMPLconst { 15884 break 15885 } 15886 if v_0_1_0.AuxInt != 32 { 15887 break 15888 } 15889 v_0_1_0_0 := v_0_1_0.Args[0] 15890 if v_0_1_0_0.Op != OpAMD64NEGL { 15891 break 15892 } 15893 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15894 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 15895 break 15896 } 15897 if v_0_1_0_0_0.AuxInt != -32 { 15898 break 15899 } 15900 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15901 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 15902 break 15903 } 15904 if v_0_1_0_0_0_0.AuxInt != 31 { 15905 break 15906 } 15907 if y != v_0_1_0_0_0_0.Args[0] { 15908 break 15909 } 15910 v_1 := v.Args[1] 15911 if v_1.Op != OpAMD64SHRL { 15912 break 15913 } 15914 _ = v_1.Args[1] 15915 if x != v_1.Args[0] { 15916 break 15917 } 15918 if y != v_1.Args[1] { 15919 break 15920 } 15921 v.reset(OpAMD64RORL) 15922 v.AddArg(x) 15923 v.AddArg(y) 15924 return true 15925 } 15926 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 15927 // cond: 15928 // result: (RORL x y) 15929 for { 15930 _ = v.Args[1] 15931 v_0 := v.Args[0] 15932 if v_0.Op != OpAMD64ANDL { 15933 break 15934 } 15935 _ = v_0.Args[1] 15936 v_0_0 := v_0.Args[0] 15937 if v_0_0.Op != OpAMD64SBBLcarrymask { 15938 break 15939 } 15940 v_0_0_0 := v_0_0.Args[0] 15941 if v_0_0_0.Op != OpAMD64CMPLconst { 15942 break 15943 } 15944 if v_0_0_0.AuxInt != 32 { 15945 break 15946 } 15947 v_0_0_0_0 := v_0_0_0.Args[0] 15948 if v_0_0_0_0.Op != OpAMD64NEGL { 15949 break 15950 } 15951 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15952 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 15953 break 15954 } 15955 if v_0_0_0_0_0.AuxInt != -32 { 15956 break 15957 } 15958 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15959 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 15960 break 15961 } 15962 if v_0_0_0_0_0_0.AuxInt != 31 { 15963 break 15964 } 15965 y := v_0_0_0_0_0_0.Args[0] 15966 v_0_1 := v_0.Args[1] 15967 if v_0_1.Op != OpAMD64SHLL { 15968 break 15969 } 15970 _ = v_0_1.Args[1] 15971 x := v_0_1.Args[0] 15972 v_0_1_1 := v_0_1.Args[1] 15973 if v_0_1_1.Op != OpAMD64NEGL { 15974 break 15975 } 15976 if y != v_0_1_1.Args[0] { 15977 break 15978 } 15979 v_1 := v.Args[1] 15980 if v_1.Op != OpAMD64SHRL { 15981 break 15982 } 15983 _ = v_1.Args[1] 15984 if x != v_1.Args[0] { 15985 break 15986 } 15987 if y != v_1.Args[1] { 15988 break 15989 } 15990 v.reset(OpAMD64RORL) 15991 v.AddArg(x) 15992 v.AddArg(y) 15993 return true 15994 } 15995 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 15996 // cond: v.Type.Size() == 2 15997 // result: (ROLW x y) 15998 for { 15999 _ = v.Args[1] 16000 v_0 := v.Args[0] 16001 if v_0.Op != OpAMD64SHLL { 16002 break 16003 } 16004 _ = v_0.Args[1] 16005 x := v_0.Args[0] 16006 v_0_1 := v_0.Args[1] 16007 if v_0_1.Op != OpAMD64ANDQconst { 16008 break 16009 } 16010 if v_0_1.AuxInt != 15 { 16011 break 16012 } 16013 y := v_0_1.Args[0] 16014 v_1 := v.Args[1] 16015 if v_1.Op != OpAMD64ANDL { 16016 break 16017 } 16018 _ = v_1.Args[1] 16019 v_1_0 := v_1.Args[0] 16020 if v_1_0.Op != OpAMD64SHRW { 16021 break 16022 } 16023 _ = v_1_0.Args[1] 16024 if x != v_1_0.Args[0] { 16025 break 16026 } 16027 v_1_0_1 := v_1_0.Args[1] 16028 if v_1_0_1.Op != OpAMD64NEGQ { 16029 break 16030 } 16031 v_1_0_1_0 := v_1_0_1.Args[0] 16032 if v_1_0_1_0.Op != OpAMD64ADDQconst { 16033 break 16034 } 16035 if v_1_0_1_0.AuxInt != -16 { 16036 break 16037 } 16038 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 16039 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 16040 break 16041 } 16042 if v_1_0_1_0_0.AuxInt != 15 { 16043 break 16044 } 16045 if y != v_1_0_1_0_0.Args[0] { 16046 break 16047 } 16048 v_1_1 := v_1.Args[1] 16049 if v_1_1.Op != OpAMD64SBBLcarrymask { 16050 break 16051 } 16052 v_1_1_0 := v_1_1.Args[0] 16053 if v_1_1_0.Op != OpAMD64CMPQconst { 16054 break 16055 } 16056 if v_1_1_0.AuxInt != 16 { 16057 break 16058 } 16059 v_1_1_0_0 := v_1_1_0.Args[0] 16060 if v_1_1_0_0.Op != OpAMD64NEGQ { 16061 break 16062 } 16063 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16064 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 16065 break 16066 } 16067 if v_1_1_0_0_0.AuxInt != -16 { 16068 break 16069 } 16070 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16071 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 16072 break 16073 } 16074 if v_1_1_0_0_0_0.AuxInt != 15 { 16075 break 16076 } 16077 if y != v_1_1_0_0_0_0.Args[0] { 16078 break 16079 } 16080 if !(v.Type.Size() == 2) { 16081 break 16082 } 16083 v.reset(OpAMD64ROLW) 16084 v.AddArg(x) 16085 v.AddArg(y) 16086 return true 16087 } 16088 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 16089 // cond: v.Type.Size() == 2 16090 // result: (ROLW x y) 16091 for { 16092 _ = v.Args[1] 16093 v_0 := v.Args[0] 16094 if v_0.Op != OpAMD64SHLL { 16095 break 16096 } 16097 _ = v_0.Args[1] 16098 x := v_0.Args[0] 16099 v_0_1 := v_0.Args[1] 16100 if v_0_1.Op != OpAMD64ANDQconst { 16101 break 16102 } 16103 if v_0_1.AuxInt != 15 { 16104 break 16105 } 16106 y := v_0_1.Args[0] 16107 v_1 := v.Args[1] 16108 if v_1.Op != OpAMD64ANDL { 16109 break 16110 } 16111 _ = v_1.Args[1] 16112 v_1_0 := v_1.Args[0] 16113 if v_1_0.Op != OpAMD64SBBLcarrymask { 16114 break 16115 } 16116 v_1_0_0 := v_1_0.Args[0] 16117 if v_1_0_0.Op != OpAMD64CMPQconst { 16118 break 16119 } 16120 if v_1_0_0.AuxInt != 16 { 16121 break 16122 } 16123 v_1_0_0_0 := v_1_0_0.Args[0] 16124 if v_1_0_0_0.Op != OpAMD64NEGQ { 16125 break 16126 } 16127 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16128 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 16129 break 16130 } 16131 if v_1_0_0_0_0.AuxInt != -16 { 16132 break 16133 } 16134 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16135 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 16136 break 16137 } 16138 if v_1_0_0_0_0_0.AuxInt != 15 { 16139 break 16140 } 16141 if y != v_1_0_0_0_0_0.Args[0] { 16142 break 16143 } 16144 v_1_1 := v_1.Args[1] 16145 if v_1_1.Op != OpAMD64SHRW { 16146 break 16147 } 16148 _ = v_1_1.Args[1] 16149 if x != v_1_1.Args[0] { 16150 break 16151 } 16152 v_1_1_1 := v_1_1.Args[1] 16153 if v_1_1_1.Op != OpAMD64NEGQ { 16154 break 16155 } 16156 v_1_1_1_0 := v_1_1_1.Args[0] 16157 if v_1_1_1_0.Op != OpAMD64ADDQconst { 16158 break 16159 } 16160 if v_1_1_1_0.AuxInt != -16 { 16161 break 16162 } 16163 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 16164 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 16165 break 16166 } 16167 if v_1_1_1_0_0.AuxInt != 15 { 16168 break 16169 } 16170 if y != v_1_1_1_0_0.Args[0] { 16171 break 16172 } 16173 if !(v.Type.Size() == 2) { 16174 break 16175 } 16176 v.reset(OpAMD64ROLW) 16177 v.AddArg(x) 16178 v.AddArg(y) 16179 return true 16180 } 16181 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 16182 // cond: v.Type.Size() == 2 16183 // result: (ROLW x y) 16184 for { 16185 _ = v.Args[1] 16186 v_0 := v.Args[0] 16187 if v_0.Op != OpAMD64ANDL { 16188 break 16189 } 16190 _ = v_0.Args[1] 16191 v_0_0 := v_0.Args[0] 16192 if v_0_0.Op != OpAMD64SHRW { 16193 break 16194 } 16195 _ = v_0_0.Args[1] 16196 x := v_0_0.Args[0] 16197 v_0_0_1 := v_0_0.Args[1] 16198 if v_0_0_1.Op != OpAMD64NEGQ { 16199 break 16200 } 16201 v_0_0_1_0 := v_0_0_1.Args[0] 16202 if v_0_0_1_0.Op != OpAMD64ADDQconst { 16203 break 16204 } 16205 if v_0_0_1_0.AuxInt != -16 { 16206 break 16207 } 16208 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16209 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 16210 break 16211 } 16212 if v_0_0_1_0_0.AuxInt != 15 { 16213 break 16214 } 16215 y := v_0_0_1_0_0.Args[0] 16216 v_0_1 := v_0.Args[1] 16217 if v_0_1.Op != OpAMD64SBBLcarrymask { 16218 break 16219 } 16220 v_0_1_0 := v_0_1.Args[0] 16221 if v_0_1_0.Op != OpAMD64CMPQconst { 16222 break 16223 } 16224 if v_0_1_0.AuxInt != 16 { 16225 break 16226 } 16227 v_0_1_0_0 := v_0_1_0.Args[0] 16228 if v_0_1_0_0.Op != OpAMD64NEGQ { 16229 break 16230 } 16231 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16232 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 16233 break 16234 } 16235 if v_0_1_0_0_0.AuxInt != -16 { 16236 break 16237 } 16238 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16239 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 16240 break 16241 } 16242 if v_0_1_0_0_0_0.AuxInt != 15 { 16243 break 16244 } 16245 if y != v_0_1_0_0_0_0.Args[0] { 16246 break 16247 } 16248 v_1 := v.Args[1] 16249 if v_1.Op != OpAMD64SHLL { 16250 break 16251 } 16252 _ = v_1.Args[1] 16253 if x != v_1.Args[0] { 16254 break 16255 } 16256 v_1_1 := v_1.Args[1] 16257 if v_1_1.Op != OpAMD64ANDQconst { 16258 break 16259 } 16260 if v_1_1.AuxInt != 15 { 16261 break 16262 } 16263 if y != v_1_1.Args[0] { 16264 break 16265 } 16266 if !(v.Type.Size() == 2) { 16267 break 16268 } 16269 v.reset(OpAMD64ROLW) 16270 v.AddArg(x) 16271 v.AddArg(y) 16272 return true 16273 } 16274 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 16275 // cond: v.Type.Size() == 2 16276 // result: (ROLW x y) 16277 for { 16278 _ = v.Args[1] 16279 v_0 := v.Args[0] 16280 if v_0.Op != OpAMD64ANDL { 16281 break 16282 } 16283 _ = v_0.Args[1] 16284 v_0_0 := v_0.Args[0] 16285 if v_0_0.Op != OpAMD64SBBLcarrymask { 16286 break 16287 } 16288 v_0_0_0 := v_0_0.Args[0] 16289 if v_0_0_0.Op != OpAMD64CMPQconst { 16290 break 16291 } 16292 if v_0_0_0.AuxInt != 16 { 16293 break 16294 } 16295 v_0_0_0_0 := v_0_0_0.Args[0] 16296 if v_0_0_0_0.Op != OpAMD64NEGQ { 16297 break 16298 } 16299 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16300 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 16301 break 16302 } 16303 if v_0_0_0_0_0.AuxInt != -16 { 16304 break 16305 } 16306 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16307 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 16308 break 16309 } 16310 if v_0_0_0_0_0_0.AuxInt != 15 { 16311 break 16312 } 16313 y := v_0_0_0_0_0_0.Args[0] 16314 v_0_1 := v_0.Args[1] 16315 if v_0_1.Op != OpAMD64SHRW { 16316 break 16317 } 16318 _ = v_0_1.Args[1] 16319 x := v_0_1.Args[0] 16320 v_0_1_1 := v_0_1.Args[1] 16321 if v_0_1_1.Op != OpAMD64NEGQ { 16322 break 16323 } 16324 v_0_1_1_0 := v_0_1_1.Args[0] 16325 if v_0_1_1_0.Op != OpAMD64ADDQconst { 16326 break 16327 } 16328 if v_0_1_1_0.AuxInt != -16 { 16329 break 16330 } 16331 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16332 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 16333 break 16334 } 16335 if v_0_1_1_0_0.AuxInt != 15 { 16336 break 16337 } 16338 if y != v_0_1_1_0_0.Args[0] { 16339 break 16340 } 16341 v_1 := v.Args[1] 16342 if v_1.Op != OpAMD64SHLL { 16343 break 16344 } 16345 _ = v_1.Args[1] 16346 if x != v_1.Args[0] { 16347 break 16348 } 16349 v_1_1 := v_1.Args[1] 16350 if v_1_1.Op != OpAMD64ANDQconst { 16351 break 16352 } 16353 if v_1_1.AuxInt != 15 { 16354 break 16355 } 16356 if y != v_1_1.Args[0] { 16357 break 16358 } 16359 if !(v.Type.Size() == 2) { 16360 break 16361 } 16362 v.reset(OpAMD64ROLW) 16363 v.AddArg(x) 16364 v.AddArg(y) 16365 return true 16366 } 16367 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 16368 // cond: v.Type.Size() == 2 16369 // result: (ROLW x y) 16370 for { 16371 _ = v.Args[1] 16372 v_0 := v.Args[0] 16373 if v_0.Op != OpAMD64SHLL { 16374 break 16375 } 16376 _ = v_0.Args[1] 16377 x := v_0.Args[0] 16378 v_0_1 := v_0.Args[1] 16379 if v_0_1.Op != OpAMD64ANDLconst { 16380 break 16381 } 16382 if v_0_1.AuxInt != 15 { 16383 break 16384 } 16385 y := v_0_1.Args[0] 16386 v_1 := v.Args[1] 16387 if v_1.Op != OpAMD64ANDL { 16388 break 16389 } 16390 _ = v_1.Args[1] 16391 v_1_0 := v_1.Args[0] 16392 if v_1_0.Op != OpAMD64SHRW { 16393 break 16394 } 16395 _ = v_1_0.Args[1] 16396 if x != v_1_0.Args[0] { 16397 break 16398 } 16399 v_1_0_1 := v_1_0.Args[1] 16400 if v_1_0_1.Op != OpAMD64NEGL { 16401 break 16402 } 16403 v_1_0_1_0 := v_1_0_1.Args[0] 16404 if v_1_0_1_0.Op != OpAMD64ADDLconst { 16405 break 16406 } 16407 if v_1_0_1_0.AuxInt != -16 { 16408 break 16409 } 16410 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 16411 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 16412 break 16413 } 16414 if v_1_0_1_0_0.AuxInt != 15 { 16415 break 16416 } 16417 if y != v_1_0_1_0_0.Args[0] { 16418 break 16419 } 16420 v_1_1 := v_1.Args[1] 16421 if v_1_1.Op != OpAMD64SBBLcarrymask { 16422 break 16423 } 16424 v_1_1_0 := v_1_1.Args[0] 16425 if v_1_1_0.Op != OpAMD64CMPLconst { 16426 break 16427 } 16428 if v_1_1_0.AuxInt != 16 { 16429 break 16430 } 16431 v_1_1_0_0 := v_1_1_0.Args[0] 16432 if v_1_1_0_0.Op != OpAMD64NEGL { 16433 break 16434 } 16435 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16436 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 16437 break 16438 } 16439 if v_1_1_0_0_0.AuxInt != -16 { 16440 break 16441 } 16442 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16443 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 16444 break 16445 } 16446 if v_1_1_0_0_0_0.AuxInt != 15 { 16447 break 16448 } 16449 if y != v_1_1_0_0_0_0.Args[0] { 16450 break 16451 } 16452 if !(v.Type.Size() == 2) { 16453 break 16454 } 16455 v.reset(OpAMD64ROLW) 16456 v.AddArg(x) 16457 v.AddArg(y) 16458 return true 16459 } 16460 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 16461 // cond: v.Type.Size() == 2 16462 // result: (ROLW x y) 16463 for { 16464 _ = v.Args[1] 16465 v_0 := v.Args[0] 16466 if v_0.Op != OpAMD64SHLL { 16467 break 16468 } 16469 _ = v_0.Args[1] 16470 x := v_0.Args[0] 16471 v_0_1 := v_0.Args[1] 16472 if v_0_1.Op != OpAMD64ANDLconst { 16473 break 16474 } 16475 if v_0_1.AuxInt != 15 { 16476 break 16477 } 16478 y := v_0_1.Args[0] 16479 v_1 := v.Args[1] 16480 if v_1.Op != OpAMD64ANDL { 16481 break 16482 } 16483 _ = v_1.Args[1] 16484 v_1_0 := v_1.Args[0] 16485 if v_1_0.Op != OpAMD64SBBLcarrymask { 16486 break 16487 } 16488 v_1_0_0 := v_1_0.Args[0] 16489 if v_1_0_0.Op != OpAMD64CMPLconst { 16490 break 16491 } 16492 if v_1_0_0.AuxInt != 16 { 16493 break 16494 } 16495 v_1_0_0_0 := v_1_0_0.Args[0] 16496 if v_1_0_0_0.Op != OpAMD64NEGL { 16497 break 16498 } 16499 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16500 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 16501 break 16502 } 16503 if v_1_0_0_0_0.AuxInt != -16 { 16504 break 16505 } 16506 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16507 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 16508 break 16509 } 16510 if v_1_0_0_0_0_0.AuxInt != 15 { 16511 break 16512 } 16513 if y != v_1_0_0_0_0_0.Args[0] { 16514 break 16515 } 16516 v_1_1 := v_1.Args[1] 16517 if v_1_1.Op != OpAMD64SHRW { 16518 break 16519 } 16520 _ = v_1_1.Args[1] 16521 if x != v_1_1.Args[0] { 16522 break 16523 } 16524 v_1_1_1 := v_1_1.Args[1] 16525 if v_1_1_1.Op != OpAMD64NEGL { 16526 break 16527 } 16528 v_1_1_1_0 := v_1_1_1.Args[0] 16529 if v_1_1_1_0.Op != OpAMD64ADDLconst { 16530 break 16531 } 16532 if v_1_1_1_0.AuxInt != -16 { 16533 break 16534 } 16535 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 16536 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 16537 break 16538 } 16539 if v_1_1_1_0_0.AuxInt != 15 { 16540 break 16541 } 16542 if y != v_1_1_1_0_0.Args[0] { 16543 break 16544 } 16545 if !(v.Type.Size() == 2) { 16546 break 16547 } 16548 v.reset(OpAMD64ROLW) 16549 v.AddArg(x) 16550 v.AddArg(y) 16551 return true 16552 } 16553 return false 16554 } 16555 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 16556 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 16557 // cond: v.Type.Size() == 2 16558 // result: (ROLW x y) 16559 for { 16560 _ = v.Args[1] 16561 v_0 := v.Args[0] 16562 if v_0.Op != OpAMD64ANDL { 16563 break 16564 } 16565 _ = v_0.Args[1] 16566 v_0_0 := v_0.Args[0] 16567 if v_0_0.Op != OpAMD64SHRW { 16568 break 16569 } 16570 _ = v_0_0.Args[1] 16571 x := v_0_0.Args[0] 16572 v_0_0_1 := v_0_0.Args[1] 16573 if v_0_0_1.Op != OpAMD64NEGL { 16574 break 16575 } 16576 v_0_0_1_0 := v_0_0_1.Args[0] 16577 if v_0_0_1_0.Op != OpAMD64ADDLconst { 16578 break 16579 } 16580 if v_0_0_1_0.AuxInt != -16 { 16581 break 16582 } 16583 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16584 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 16585 break 16586 } 16587 if v_0_0_1_0_0.AuxInt != 15 { 16588 break 16589 } 16590 y := v_0_0_1_0_0.Args[0] 16591 v_0_1 := v_0.Args[1] 16592 if v_0_1.Op != OpAMD64SBBLcarrymask { 16593 break 16594 } 16595 v_0_1_0 := v_0_1.Args[0] 16596 if v_0_1_0.Op != OpAMD64CMPLconst { 16597 break 16598 } 16599 if v_0_1_0.AuxInt != 16 { 16600 break 16601 } 16602 v_0_1_0_0 := v_0_1_0.Args[0] 16603 if v_0_1_0_0.Op != OpAMD64NEGL { 16604 break 16605 } 16606 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16607 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16608 break 16609 } 16610 if v_0_1_0_0_0.AuxInt != -16 { 16611 break 16612 } 16613 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16614 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16615 break 16616 } 16617 if v_0_1_0_0_0_0.AuxInt != 15 { 16618 break 16619 } 16620 if y != v_0_1_0_0_0_0.Args[0] { 16621 break 16622 } 16623 v_1 := v.Args[1] 16624 if v_1.Op != OpAMD64SHLL { 16625 break 16626 } 16627 _ = v_1.Args[1] 16628 if x != v_1.Args[0] { 16629 break 16630 } 16631 v_1_1 := v_1.Args[1] 16632 if v_1_1.Op != OpAMD64ANDLconst { 16633 break 16634 } 16635 if v_1_1.AuxInt != 15 { 16636 break 16637 } 16638 if y != v_1_1.Args[0] { 16639 break 16640 } 16641 if !(v.Type.Size() == 2) { 16642 break 16643 } 16644 v.reset(OpAMD64ROLW) 16645 v.AddArg(x) 16646 v.AddArg(y) 16647 return true 16648 } 16649 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 16650 // cond: v.Type.Size() == 2 16651 // result: (ROLW x y) 16652 for { 16653 _ = v.Args[1] 16654 v_0 := v.Args[0] 16655 if v_0.Op != OpAMD64ANDL { 16656 break 16657 } 16658 _ = v_0.Args[1] 16659 v_0_0 := v_0.Args[0] 16660 if v_0_0.Op != OpAMD64SBBLcarrymask { 16661 break 16662 } 16663 v_0_0_0 := v_0_0.Args[0] 16664 if v_0_0_0.Op != OpAMD64CMPLconst { 16665 break 16666 } 16667 if v_0_0_0.AuxInt != 16 { 16668 break 16669 } 16670 v_0_0_0_0 := v_0_0_0.Args[0] 16671 if v_0_0_0_0.Op != OpAMD64NEGL { 16672 break 16673 } 16674 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16675 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16676 break 16677 } 16678 if v_0_0_0_0_0.AuxInt != -16 { 16679 break 16680 } 16681 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16682 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16683 break 16684 } 16685 if v_0_0_0_0_0_0.AuxInt != 15 { 16686 break 16687 } 16688 y := v_0_0_0_0_0_0.Args[0] 16689 v_0_1 := v_0.Args[1] 16690 if v_0_1.Op != OpAMD64SHRW { 16691 break 16692 } 16693 _ = v_0_1.Args[1] 16694 x := v_0_1.Args[0] 16695 v_0_1_1 := v_0_1.Args[1] 16696 if v_0_1_1.Op != OpAMD64NEGL { 16697 break 16698 } 16699 v_0_1_1_0 := v_0_1_1.Args[0] 16700 if v_0_1_1_0.Op != OpAMD64ADDLconst { 16701 break 16702 } 16703 if v_0_1_1_0.AuxInt != -16 { 16704 break 16705 } 16706 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16707 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 16708 break 16709 } 16710 if v_0_1_1_0_0.AuxInt != 15 { 16711 break 16712 } 16713 if y != v_0_1_1_0_0.Args[0] { 16714 break 16715 } 16716 v_1 := v.Args[1] 16717 if v_1.Op != OpAMD64SHLL { 16718 break 16719 } 16720 _ = v_1.Args[1] 16721 if x != v_1.Args[0] { 16722 break 16723 } 16724 v_1_1 := v_1.Args[1] 16725 if v_1_1.Op != OpAMD64ANDLconst { 16726 break 16727 } 16728 if v_1_1.AuxInt != 15 { 16729 break 16730 } 16731 if y != v_1_1.Args[0] { 16732 break 16733 } 16734 if !(v.Type.Size() == 2) { 16735 break 16736 } 16737 v.reset(OpAMD64ROLW) 16738 v.AddArg(x) 16739 v.AddArg(y) 16740 return true 16741 } 16742 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 16743 // cond: v.Type.Size() == 2 16744 // result: (RORW x y) 16745 for { 16746 _ = v.Args[1] 16747 v_0 := v.Args[0] 16748 if v_0.Op != OpAMD64SHRW { 16749 break 16750 } 16751 _ = v_0.Args[1] 16752 x := v_0.Args[0] 16753 v_0_1 := v_0.Args[1] 16754 if v_0_1.Op != OpAMD64ANDQconst { 16755 break 16756 } 16757 if v_0_1.AuxInt != 15 { 16758 break 16759 } 16760 y := v_0_1.Args[0] 16761 v_1 := v.Args[1] 16762 if v_1.Op != OpAMD64SHLL { 16763 break 16764 } 16765 _ = v_1.Args[1] 16766 if x != v_1.Args[0] { 16767 break 16768 } 16769 v_1_1 := v_1.Args[1] 16770 if v_1_1.Op != OpAMD64NEGQ { 16771 break 16772 } 16773 v_1_1_0 := v_1_1.Args[0] 16774 if v_1_1_0.Op != OpAMD64ADDQconst { 16775 break 16776 } 16777 if v_1_1_0.AuxInt != -16 { 16778 break 16779 } 16780 v_1_1_0_0 := v_1_1_0.Args[0] 16781 if v_1_1_0_0.Op != OpAMD64ANDQconst { 16782 break 16783 } 16784 if v_1_1_0_0.AuxInt != 15 { 16785 break 16786 } 16787 if y != v_1_1_0_0.Args[0] { 16788 break 16789 } 16790 if !(v.Type.Size() == 2) { 16791 break 16792 } 16793 v.reset(OpAMD64RORW) 16794 v.AddArg(x) 16795 v.AddArg(y) 16796 return true 16797 } 16798 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 16799 // cond: v.Type.Size() == 2 16800 // result: (RORW x y) 16801 for { 16802 _ = v.Args[1] 16803 v_0 := v.Args[0] 16804 if v_0.Op != OpAMD64SHLL { 16805 break 16806 } 16807 _ = v_0.Args[1] 16808 x := v_0.Args[0] 16809 v_0_1 := v_0.Args[1] 16810 if v_0_1.Op != OpAMD64NEGQ { 16811 break 16812 } 16813 v_0_1_0 := v_0_1.Args[0] 16814 if v_0_1_0.Op != OpAMD64ADDQconst { 16815 break 16816 } 16817 if v_0_1_0.AuxInt != -16 { 16818 break 16819 } 16820 v_0_1_0_0 := v_0_1_0.Args[0] 16821 if v_0_1_0_0.Op != OpAMD64ANDQconst { 16822 break 16823 } 16824 if v_0_1_0_0.AuxInt != 15 { 16825 break 16826 } 16827 y := v_0_1_0_0.Args[0] 16828 v_1 := v.Args[1] 16829 if v_1.Op != OpAMD64SHRW { 16830 break 16831 } 16832 _ = v_1.Args[1] 16833 if x != v_1.Args[0] { 16834 break 16835 } 16836 v_1_1 := v_1.Args[1] 16837 if v_1_1.Op != OpAMD64ANDQconst { 16838 break 16839 } 16840 if v_1_1.AuxInt != 15 { 16841 break 16842 } 16843 if y != v_1_1.Args[0] { 16844 break 16845 } 16846 if !(v.Type.Size() == 2) { 16847 break 16848 } 16849 v.reset(OpAMD64RORW) 16850 v.AddArg(x) 16851 v.AddArg(y) 16852 return true 16853 } 16854 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 16855 // cond: v.Type.Size() == 2 16856 // result: (RORW x y) 16857 for { 16858 _ = v.Args[1] 16859 v_0 := v.Args[0] 16860 if v_0.Op != OpAMD64SHRW { 16861 break 16862 } 16863 _ = v_0.Args[1] 16864 x := v_0.Args[0] 16865 v_0_1 := v_0.Args[1] 16866 if v_0_1.Op != OpAMD64ANDLconst { 16867 break 16868 } 16869 if v_0_1.AuxInt != 15 { 16870 break 16871 } 16872 y := v_0_1.Args[0] 16873 v_1 := v.Args[1] 16874 if v_1.Op != OpAMD64SHLL { 16875 break 16876 } 16877 _ = v_1.Args[1] 16878 if x != v_1.Args[0] { 16879 break 16880 } 16881 v_1_1 := v_1.Args[1] 16882 if v_1_1.Op != OpAMD64NEGL { 16883 break 16884 } 16885 v_1_1_0 := v_1_1.Args[0] 16886 if v_1_1_0.Op != OpAMD64ADDLconst { 16887 break 16888 } 16889 if v_1_1_0.AuxInt != -16 { 16890 break 16891 } 16892 v_1_1_0_0 := v_1_1_0.Args[0] 16893 if v_1_1_0_0.Op != OpAMD64ANDLconst { 16894 break 16895 } 16896 if v_1_1_0_0.AuxInt != 15 { 16897 break 16898 } 16899 if y != v_1_1_0_0.Args[0] { 16900 break 16901 } 16902 if !(v.Type.Size() == 2) { 16903 break 16904 } 16905 v.reset(OpAMD64RORW) 16906 v.AddArg(x) 16907 v.AddArg(y) 16908 return true 16909 } 16910 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 16911 // cond: v.Type.Size() == 2 16912 // result: (RORW x y) 16913 for { 16914 _ = v.Args[1] 16915 v_0 := v.Args[0] 16916 if v_0.Op != OpAMD64SHLL { 16917 break 16918 } 16919 _ = v_0.Args[1] 16920 x := v_0.Args[0] 16921 v_0_1 := v_0.Args[1] 16922 if v_0_1.Op != OpAMD64NEGL { 16923 break 16924 } 16925 v_0_1_0 := v_0_1.Args[0] 16926 if v_0_1_0.Op != OpAMD64ADDLconst { 16927 break 16928 } 16929 if v_0_1_0.AuxInt != -16 { 16930 break 16931 } 16932 v_0_1_0_0 := v_0_1_0.Args[0] 16933 if v_0_1_0_0.Op != OpAMD64ANDLconst { 16934 break 16935 } 16936 if v_0_1_0_0.AuxInt != 15 { 16937 break 16938 } 16939 y := v_0_1_0_0.Args[0] 16940 v_1 := v.Args[1] 16941 if v_1.Op != OpAMD64SHRW { 16942 break 16943 } 16944 _ = v_1.Args[1] 16945 if x != v_1.Args[0] { 16946 break 16947 } 16948 v_1_1 := v_1.Args[1] 16949 if v_1_1.Op != OpAMD64ANDLconst { 16950 break 16951 } 16952 if v_1_1.AuxInt != 15 { 16953 break 16954 } 16955 if y != v_1_1.Args[0] { 16956 break 16957 } 16958 if !(v.Type.Size() == 2) { 16959 break 16960 } 16961 v.reset(OpAMD64RORW) 16962 v.AddArg(x) 16963 v.AddArg(y) 16964 return true 16965 } 16966 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 16967 // cond: v.Type.Size() == 1 16968 // result: (ROLB x y) 16969 for { 16970 _ = v.Args[1] 16971 v_0 := v.Args[0] 16972 if v_0.Op != OpAMD64SHLL { 16973 break 16974 } 16975 _ = v_0.Args[1] 16976 x := v_0.Args[0] 16977 v_0_1 := v_0.Args[1] 16978 if v_0_1.Op != OpAMD64ANDQconst { 16979 break 16980 } 16981 if v_0_1.AuxInt != 7 { 16982 break 16983 } 16984 y := v_0_1.Args[0] 16985 v_1 := v.Args[1] 16986 if v_1.Op != OpAMD64ANDL { 16987 break 16988 } 16989 _ = v_1.Args[1] 16990 v_1_0 := v_1.Args[0] 16991 if v_1_0.Op != OpAMD64SHRB { 16992 break 16993 } 16994 _ = v_1_0.Args[1] 16995 if x != v_1_0.Args[0] { 16996 break 16997 } 16998 v_1_0_1 := v_1_0.Args[1] 16999 if v_1_0_1.Op != OpAMD64NEGQ { 17000 break 17001 } 17002 v_1_0_1_0 := v_1_0_1.Args[0] 17003 if v_1_0_1_0.Op != OpAMD64ADDQconst { 17004 break 17005 } 17006 if v_1_0_1_0.AuxInt != -8 { 17007 break 17008 } 17009 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 17010 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 17011 break 17012 } 17013 if v_1_0_1_0_0.AuxInt != 7 { 17014 break 17015 } 17016 if y != v_1_0_1_0_0.Args[0] { 17017 break 17018 } 17019 v_1_1 := v_1.Args[1] 17020 if v_1_1.Op != OpAMD64SBBLcarrymask { 17021 break 17022 } 17023 v_1_1_0 := v_1_1.Args[0] 17024 if v_1_1_0.Op != OpAMD64CMPQconst { 17025 break 17026 } 17027 if v_1_1_0.AuxInt != 8 { 17028 break 17029 } 17030 v_1_1_0_0 := v_1_1_0.Args[0] 17031 if v_1_1_0_0.Op != OpAMD64NEGQ { 17032 break 17033 } 17034 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 17035 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 17036 break 17037 } 17038 if v_1_1_0_0_0.AuxInt != -8 { 17039 break 17040 } 17041 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 17042 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 17043 break 17044 } 17045 if v_1_1_0_0_0_0.AuxInt != 7 { 17046 break 17047 } 17048 if y != v_1_1_0_0_0_0.Args[0] { 17049 break 17050 } 17051 if !(v.Type.Size() == 1) { 17052 break 17053 } 17054 v.reset(OpAMD64ROLB) 17055 v.AddArg(x) 17056 v.AddArg(y) 17057 return true 17058 } 17059 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 17060 // cond: v.Type.Size() == 1 17061 // result: (ROLB x y) 17062 for { 17063 _ = v.Args[1] 17064 v_0 := v.Args[0] 17065 if v_0.Op != OpAMD64SHLL { 17066 break 17067 } 17068 _ = v_0.Args[1] 17069 x := v_0.Args[0] 17070 v_0_1 := v_0.Args[1] 17071 if v_0_1.Op != OpAMD64ANDQconst { 17072 break 17073 } 17074 if v_0_1.AuxInt != 7 { 17075 break 17076 } 17077 y := v_0_1.Args[0] 17078 v_1 := v.Args[1] 17079 if v_1.Op != OpAMD64ANDL { 17080 break 17081 } 17082 _ = v_1.Args[1] 17083 v_1_0 := v_1.Args[0] 17084 if v_1_0.Op != OpAMD64SBBLcarrymask { 17085 break 17086 } 17087 v_1_0_0 := v_1_0.Args[0] 17088 if v_1_0_0.Op != OpAMD64CMPQconst { 17089 break 17090 } 17091 if v_1_0_0.AuxInt != 8 { 17092 break 17093 } 17094 v_1_0_0_0 := v_1_0_0.Args[0] 17095 if v_1_0_0_0.Op != OpAMD64NEGQ { 17096 break 17097 } 17098 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 17099 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 17100 break 17101 } 17102 if v_1_0_0_0_0.AuxInt != -8 { 17103 break 17104 } 17105 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 17106 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 17107 break 17108 } 17109 if v_1_0_0_0_0_0.AuxInt != 7 { 17110 break 17111 } 17112 if y != v_1_0_0_0_0_0.Args[0] { 17113 break 17114 } 17115 v_1_1 := v_1.Args[1] 17116 if v_1_1.Op != OpAMD64SHRB { 17117 break 17118 } 17119 _ = v_1_1.Args[1] 17120 if x != v_1_1.Args[0] { 17121 break 17122 } 17123 v_1_1_1 := v_1_1.Args[1] 17124 if v_1_1_1.Op != OpAMD64NEGQ { 17125 break 17126 } 17127 v_1_1_1_0 := v_1_1_1.Args[0] 17128 if v_1_1_1_0.Op != OpAMD64ADDQconst { 17129 break 17130 } 17131 if v_1_1_1_0.AuxInt != -8 { 17132 break 17133 } 17134 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 17135 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 17136 break 17137 } 17138 if v_1_1_1_0_0.AuxInt != 7 { 17139 break 17140 } 17141 if y != v_1_1_1_0_0.Args[0] { 17142 break 17143 } 17144 if !(v.Type.Size() == 1) { 17145 break 17146 } 17147 v.reset(OpAMD64ROLB) 17148 v.AddArg(x) 17149 v.AddArg(y) 17150 return true 17151 } 17152 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 17153 // cond: v.Type.Size() == 1 17154 // result: (ROLB x y) 17155 for { 17156 _ = v.Args[1] 17157 v_0 := v.Args[0] 17158 if v_0.Op != OpAMD64ANDL { 17159 break 17160 } 17161 _ = v_0.Args[1] 17162 v_0_0 := v_0.Args[0] 17163 if v_0_0.Op != OpAMD64SHRB { 17164 break 17165 } 17166 _ = v_0_0.Args[1] 17167 x := v_0_0.Args[0] 17168 v_0_0_1 := v_0_0.Args[1] 17169 if v_0_0_1.Op != OpAMD64NEGQ { 17170 break 17171 } 17172 v_0_0_1_0 := v_0_0_1.Args[0] 17173 if v_0_0_1_0.Op != OpAMD64ADDQconst { 17174 break 17175 } 17176 if v_0_0_1_0.AuxInt != -8 { 17177 break 17178 } 17179 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 17180 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 17181 break 17182 } 17183 if v_0_0_1_0_0.AuxInt != 7 { 17184 break 17185 } 17186 y := v_0_0_1_0_0.Args[0] 17187 v_0_1 := v_0.Args[1] 17188 if v_0_1.Op != OpAMD64SBBLcarrymask { 17189 break 17190 } 17191 v_0_1_0 := v_0_1.Args[0] 17192 if v_0_1_0.Op != OpAMD64CMPQconst { 17193 break 17194 } 17195 if v_0_1_0.AuxInt != 8 { 17196 break 17197 } 17198 v_0_1_0_0 := v_0_1_0.Args[0] 17199 if v_0_1_0_0.Op != OpAMD64NEGQ { 17200 break 17201 } 17202 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 17203 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 17204 break 17205 } 17206 if v_0_1_0_0_0.AuxInt != -8 { 17207 break 17208 } 17209 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 17210 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 17211 break 17212 } 17213 if v_0_1_0_0_0_0.AuxInt != 7 { 17214 break 17215 } 17216 if y != v_0_1_0_0_0_0.Args[0] { 17217 break 17218 } 17219 v_1 := v.Args[1] 17220 if v_1.Op != OpAMD64SHLL { 17221 break 17222 } 17223 _ = v_1.Args[1] 17224 if x != v_1.Args[0] { 17225 break 17226 } 17227 v_1_1 := v_1.Args[1] 17228 if v_1_1.Op != OpAMD64ANDQconst { 17229 break 17230 } 17231 if v_1_1.AuxInt != 7 { 17232 break 17233 } 17234 if y != v_1_1.Args[0] { 17235 break 17236 } 17237 if !(v.Type.Size() == 1) { 17238 break 17239 } 17240 v.reset(OpAMD64ROLB) 17241 v.AddArg(x) 17242 v.AddArg(y) 17243 return true 17244 } 17245 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 17246 // cond: v.Type.Size() == 1 17247 // result: (ROLB x y) 17248 for { 17249 _ = v.Args[1] 17250 v_0 := v.Args[0] 17251 if v_0.Op != OpAMD64ANDL { 17252 break 17253 } 17254 _ = v_0.Args[1] 17255 v_0_0 := v_0.Args[0] 17256 if v_0_0.Op != OpAMD64SBBLcarrymask { 17257 break 17258 } 17259 v_0_0_0 := v_0_0.Args[0] 17260 if v_0_0_0.Op != OpAMD64CMPQconst { 17261 break 17262 } 17263 if v_0_0_0.AuxInt != 8 { 17264 break 17265 } 17266 v_0_0_0_0 := v_0_0_0.Args[0] 17267 if v_0_0_0_0.Op != OpAMD64NEGQ { 17268 break 17269 } 17270 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 17271 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 17272 break 17273 } 17274 if v_0_0_0_0_0.AuxInt != -8 { 17275 break 17276 } 17277 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 17278 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 17279 break 17280 } 17281 if v_0_0_0_0_0_0.AuxInt != 7 { 17282 break 17283 } 17284 y := v_0_0_0_0_0_0.Args[0] 17285 v_0_1 := v_0.Args[1] 17286 if v_0_1.Op != OpAMD64SHRB { 17287 break 17288 } 17289 _ = v_0_1.Args[1] 17290 x := v_0_1.Args[0] 17291 v_0_1_1 := v_0_1.Args[1] 17292 if v_0_1_1.Op != OpAMD64NEGQ { 17293 break 17294 } 17295 v_0_1_1_0 := v_0_1_1.Args[0] 17296 if v_0_1_1_0.Op != OpAMD64ADDQconst { 17297 break 17298 } 17299 if v_0_1_1_0.AuxInt != -8 { 17300 break 17301 } 17302 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 17303 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 17304 break 17305 } 17306 if v_0_1_1_0_0.AuxInt != 7 { 17307 break 17308 } 17309 if y != v_0_1_1_0_0.Args[0] { 17310 break 17311 } 17312 v_1 := v.Args[1] 17313 if v_1.Op != OpAMD64SHLL { 17314 break 17315 } 17316 _ = v_1.Args[1] 17317 if x != v_1.Args[0] { 17318 break 17319 } 17320 v_1_1 := v_1.Args[1] 17321 if v_1_1.Op != OpAMD64ANDQconst { 17322 break 17323 } 17324 if v_1_1.AuxInt != 7 { 17325 break 17326 } 17327 if y != v_1_1.Args[0] { 17328 break 17329 } 17330 if !(v.Type.Size() == 1) { 17331 break 17332 } 17333 v.reset(OpAMD64ROLB) 17334 v.AddArg(x) 17335 v.AddArg(y) 17336 return true 17337 } 17338 return false 17339 } 17340 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 17341 b := v.Block 17342 _ = b 17343 typ := &b.Func.Config.Types 17344 _ = typ 17345 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 17346 // cond: v.Type.Size() == 1 17347 // result: (ROLB x y) 17348 for { 17349 _ = v.Args[1] 17350 v_0 := v.Args[0] 17351 if v_0.Op != OpAMD64SHLL { 17352 break 17353 } 17354 _ = v_0.Args[1] 17355 x := v_0.Args[0] 17356 v_0_1 := v_0.Args[1] 17357 if v_0_1.Op != OpAMD64ANDLconst { 17358 break 17359 } 17360 if v_0_1.AuxInt != 7 { 17361 break 17362 } 17363 y := v_0_1.Args[0] 17364 v_1 := v.Args[1] 17365 if v_1.Op != OpAMD64ANDL { 17366 break 17367 } 17368 _ = v_1.Args[1] 17369 v_1_0 := v_1.Args[0] 17370 if v_1_0.Op != OpAMD64SHRB { 17371 break 17372 } 17373 _ = v_1_0.Args[1] 17374 if x != v_1_0.Args[0] { 17375 break 17376 } 17377 v_1_0_1 := v_1_0.Args[1] 17378 if v_1_0_1.Op != OpAMD64NEGL { 17379 break 17380 } 17381 v_1_0_1_0 := v_1_0_1.Args[0] 17382 if v_1_0_1_0.Op != OpAMD64ADDLconst { 17383 break 17384 } 17385 if v_1_0_1_0.AuxInt != -8 { 17386 break 17387 } 17388 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 17389 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 17390 break 17391 } 17392 if v_1_0_1_0_0.AuxInt != 7 { 17393 break 17394 } 17395 if y != v_1_0_1_0_0.Args[0] { 17396 break 17397 } 17398 v_1_1 := v_1.Args[1] 17399 if v_1_1.Op != OpAMD64SBBLcarrymask { 17400 break 17401 } 17402 v_1_1_0 := v_1_1.Args[0] 17403 if v_1_1_0.Op != OpAMD64CMPLconst { 17404 break 17405 } 17406 if v_1_1_0.AuxInt != 8 { 17407 break 17408 } 17409 v_1_1_0_0 := v_1_1_0.Args[0] 17410 if v_1_1_0_0.Op != OpAMD64NEGL { 17411 break 17412 } 17413 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 17414 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 17415 break 17416 } 17417 if v_1_1_0_0_0.AuxInt != -8 { 17418 break 17419 } 17420 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 17421 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 17422 break 17423 } 17424 if v_1_1_0_0_0_0.AuxInt != 7 { 17425 break 17426 } 17427 if y != v_1_1_0_0_0_0.Args[0] { 17428 break 17429 } 17430 if !(v.Type.Size() == 1) { 17431 break 17432 } 17433 v.reset(OpAMD64ROLB) 17434 v.AddArg(x) 17435 v.AddArg(y) 17436 return true 17437 } 17438 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 17439 // cond: v.Type.Size() == 1 17440 // result: (ROLB x y) 17441 for { 17442 _ = v.Args[1] 17443 v_0 := v.Args[0] 17444 if v_0.Op != OpAMD64SHLL { 17445 break 17446 } 17447 _ = v_0.Args[1] 17448 x := v_0.Args[0] 17449 v_0_1 := v_0.Args[1] 17450 if v_0_1.Op != OpAMD64ANDLconst { 17451 break 17452 } 17453 if v_0_1.AuxInt != 7 { 17454 break 17455 } 17456 y := v_0_1.Args[0] 17457 v_1 := v.Args[1] 17458 if v_1.Op != OpAMD64ANDL { 17459 break 17460 } 17461 _ = v_1.Args[1] 17462 v_1_0 := v_1.Args[0] 17463 if v_1_0.Op != OpAMD64SBBLcarrymask { 17464 break 17465 } 17466 v_1_0_0 := v_1_0.Args[0] 17467 if v_1_0_0.Op != OpAMD64CMPLconst { 17468 break 17469 } 17470 if v_1_0_0.AuxInt != 8 { 17471 break 17472 } 17473 v_1_0_0_0 := v_1_0_0.Args[0] 17474 if v_1_0_0_0.Op != OpAMD64NEGL { 17475 break 17476 } 17477 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 17478 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 17479 break 17480 } 17481 if v_1_0_0_0_0.AuxInt != -8 { 17482 break 17483 } 17484 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 17485 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 17486 break 17487 } 17488 if v_1_0_0_0_0_0.AuxInt != 7 { 17489 break 17490 } 17491 if y != v_1_0_0_0_0_0.Args[0] { 17492 break 17493 } 17494 v_1_1 := v_1.Args[1] 17495 if v_1_1.Op != OpAMD64SHRB { 17496 break 17497 } 17498 _ = v_1_1.Args[1] 17499 if x != v_1_1.Args[0] { 17500 break 17501 } 17502 v_1_1_1 := v_1_1.Args[1] 17503 if v_1_1_1.Op != OpAMD64NEGL { 17504 break 17505 } 17506 v_1_1_1_0 := v_1_1_1.Args[0] 17507 if v_1_1_1_0.Op != OpAMD64ADDLconst { 17508 break 17509 } 17510 if v_1_1_1_0.AuxInt != -8 { 17511 break 17512 } 17513 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 17514 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 17515 break 17516 } 17517 if v_1_1_1_0_0.AuxInt != 7 { 17518 break 17519 } 17520 if y != v_1_1_1_0_0.Args[0] { 17521 break 17522 } 17523 if !(v.Type.Size() == 1) { 17524 break 17525 } 17526 v.reset(OpAMD64ROLB) 17527 v.AddArg(x) 17528 v.AddArg(y) 17529 return true 17530 } 17531 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 17532 // cond: v.Type.Size() == 1 17533 // result: (ROLB x y) 17534 for { 17535 _ = v.Args[1] 17536 v_0 := v.Args[0] 17537 if v_0.Op != OpAMD64ANDL { 17538 break 17539 } 17540 _ = v_0.Args[1] 17541 v_0_0 := v_0.Args[0] 17542 if v_0_0.Op != OpAMD64SHRB { 17543 break 17544 } 17545 _ = v_0_0.Args[1] 17546 x := v_0_0.Args[0] 17547 v_0_0_1 := v_0_0.Args[1] 17548 if v_0_0_1.Op != OpAMD64NEGL { 17549 break 17550 } 17551 v_0_0_1_0 := v_0_0_1.Args[0] 17552 if v_0_0_1_0.Op != OpAMD64ADDLconst { 17553 break 17554 } 17555 if v_0_0_1_0.AuxInt != -8 { 17556 break 17557 } 17558 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 17559 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 17560 break 17561 } 17562 if v_0_0_1_0_0.AuxInt != 7 { 17563 break 17564 } 17565 y := v_0_0_1_0_0.Args[0] 17566 v_0_1 := v_0.Args[1] 17567 if v_0_1.Op != OpAMD64SBBLcarrymask { 17568 break 17569 } 17570 v_0_1_0 := v_0_1.Args[0] 17571 if v_0_1_0.Op != OpAMD64CMPLconst { 17572 break 17573 } 17574 if v_0_1_0.AuxInt != 8 { 17575 break 17576 } 17577 v_0_1_0_0 := v_0_1_0.Args[0] 17578 if v_0_1_0_0.Op != OpAMD64NEGL { 17579 break 17580 } 17581 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 17582 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 17583 break 17584 } 17585 if v_0_1_0_0_0.AuxInt != -8 { 17586 break 17587 } 17588 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 17589 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 17590 break 17591 } 17592 if v_0_1_0_0_0_0.AuxInt != 7 { 17593 break 17594 } 17595 if y != v_0_1_0_0_0_0.Args[0] { 17596 break 17597 } 17598 v_1 := v.Args[1] 17599 if v_1.Op != OpAMD64SHLL { 17600 break 17601 } 17602 _ = v_1.Args[1] 17603 if x != v_1.Args[0] { 17604 break 17605 } 17606 v_1_1 := v_1.Args[1] 17607 if v_1_1.Op != OpAMD64ANDLconst { 17608 break 17609 } 17610 if v_1_1.AuxInt != 7 { 17611 break 17612 } 17613 if y != v_1_1.Args[0] { 17614 break 17615 } 17616 if !(v.Type.Size() == 1) { 17617 break 17618 } 17619 v.reset(OpAMD64ROLB) 17620 v.AddArg(x) 17621 v.AddArg(y) 17622 return true 17623 } 17624 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 17625 // cond: v.Type.Size() == 1 17626 // result: (ROLB x y) 17627 for { 17628 _ = v.Args[1] 17629 v_0 := v.Args[0] 17630 if v_0.Op != OpAMD64ANDL { 17631 break 17632 } 17633 _ = v_0.Args[1] 17634 v_0_0 := v_0.Args[0] 17635 if v_0_0.Op != OpAMD64SBBLcarrymask { 17636 break 17637 } 17638 v_0_0_0 := v_0_0.Args[0] 17639 if v_0_0_0.Op != OpAMD64CMPLconst { 17640 break 17641 } 17642 if v_0_0_0.AuxInt != 8 { 17643 break 17644 } 17645 v_0_0_0_0 := v_0_0_0.Args[0] 17646 if v_0_0_0_0.Op != OpAMD64NEGL { 17647 break 17648 } 17649 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 17650 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 17651 break 17652 } 17653 if v_0_0_0_0_0.AuxInt != -8 { 17654 break 17655 } 17656 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 17657 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 17658 break 17659 } 17660 if v_0_0_0_0_0_0.AuxInt != 7 { 17661 break 17662 } 17663 y := v_0_0_0_0_0_0.Args[0] 17664 v_0_1 := v_0.Args[1] 17665 if v_0_1.Op != OpAMD64SHRB { 17666 break 17667 } 17668 _ = v_0_1.Args[1] 17669 x := v_0_1.Args[0] 17670 v_0_1_1 := v_0_1.Args[1] 17671 if v_0_1_1.Op != OpAMD64NEGL { 17672 break 17673 } 17674 v_0_1_1_0 := v_0_1_1.Args[0] 17675 if v_0_1_1_0.Op != OpAMD64ADDLconst { 17676 break 17677 } 17678 if v_0_1_1_0.AuxInt != -8 { 17679 break 17680 } 17681 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 17682 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 17683 break 17684 } 17685 if v_0_1_1_0_0.AuxInt != 7 { 17686 break 17687 } 17688 if y != v_0_1_1_0_0.Args[0] { 17689 break 17690 } 17691 v_1 := v.Args[1] 17692 if v_1.Op != OpAMD64SHLL { 17693 break 17694 } 17695 _ = v_1.Args[1] 17696 if x != v_1.Args[0] { 17697 break 17698 } 17699 v_1_1 := v_1.Args[1] 17700 if v_1_1.Op != OpAMD64ANDLconst { 17701 break 17702 } 17703 if v_1_1.AuxInt != 7 { 17704 break 17705 } 17706 if y != v_1_1.Args[0] { 17707 break 17708 } 17709 if !(v.Type.Size() == 1) { 17710 break 17711 } 17712 v.reset(OpAMD64ROLB) 17713 v.AddArg(x) 17714 v.AddArg(y) 17715 return true 17716 } 17717 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 17718 // cond: v.Type.Size() == 1 17719 // result: (RORB x y) 17720 for { 17721 _ = v.Args[1] 17722 v_0 := v.Args[0] 17723 if v_0.Op != OpAMD64SHRB { 17724 break 17725 } 17726 _ = v_0.Args[1] 17727 x := v_0.Args[0] 17728 v_0_1 := v_0.Args[1] 17729 if v_0_1.Op != OpAMD64ANDQconst { 17730 break 17731 } 17732 if v_0_1.AuxInt != 7 { 17733 break 17734 } 17735 y := v_0_1.Args[0] 17736 v_1 := v.Args[1] 17737 if v_1.Op != OpAMD64SHLL { 17738 break 17739 } 17740 _ = v_1.Args[1] 17741 if x != v_1.Args[0] { 17742 break 17743 } 17744 v_1_1 := v_1.Args[1] 17745 if v_1_1.Op != OpAMD64NEGQ { 17746 break 17747 } 17748 v_1_1_0 := v_1_1.Args[0] 17749 if v_1_1_0.Op != OpAMD64ADDQconst { 17750 break 17751 } 17752 if v_1_1_0.AuxInt != -8 { 17753 break 17754 } 17755 v_1_1_0_0 := v_1_1_0.Args[0] 17756 if v_1_1_0_0.Op != OpAMD64ANDQconst { 17757 break 17758 } 17759 if v_1_1_0_0.AuxInt != 7 { 17760 break 17761 } 17762 if y != v_1_1_0_0.Args[0] { 17763 break 17764 } 17765 if !(v.Type.Size() == 1) { 17766 break 17767 } 17768 v.reset(OpAMD64RORB) 17769 v.AddArg(x) 17770 v.AddArg(y) 17771 return true 17772 } 17773 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 17774 // cond: v.Type.Size() == 1 17775 // result: (RORB x y) 17776 for { 17777 _ = v.Args[1] 17778 v_0 := v.Args[0] 17779 if v_0.Op != OpAMD64SHLL { 17780 break 17781 } 17782 _ = v_0.Args[1] 17783 x := v_0.Args[0] 17784 v_0_1 := v_0.Args[1] 17785 if v_0_1.Op != OpAMD64NEGQ { 17786 break 17787 } 17788 v_0_1_0 := v_0_1.Args[0] 17789 if v_0_1_0.Op != OpAMD64ADDQconst { 17790 break 17791 } 17792 if v_0_1_0.AuxInt != -8 { 17793 break 17794 } 17795 v_0_1_0_0 := v_0_1_0.Args[0] 17796 if v_0_1_0_0.Op != OpAMD64ANDQconst { 17797 break 17798 } 17799 if v_0_1_0_0.AuxInt != 7 { 17800 break 17801 } 17802 y := v_0_1_0_0.Args[0] 17803 v_1 := v.Args[1] 17804 if v_1.Op != OpAMD64SHRB { 17805 break 17806 } 17807 _ = v_1.Args[1] 17808 if x != v_1.Args[0] { 17809 break 17810 } 17811 v_1_1 := v_1.Args[1] 17812 if v_1_1.Op != OpAMD64ANDQconst { 17813 break 17814 } 17815 if v_1_1.AuxInt != 7 { 17816 break 17817 } 17818 if y != v_1_1.Args[0] { 17819 break 17820 } 17821 if !(v.Type.Size() == 1) { 17822 break 17823 } 17824 v.reset(OpAMD64RORB) 17825 v.AddArg(x) 17826 v.AddArg(y) 17827 return true 17828 } 17829 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 17830 // cond: v.Type.Size() == 1 17831 // result: (RORB x y) 17832 for { 17833 _ = v.Args[1] 17834 v_0 := v.Args[0] 17835 if v_0.Op != OpAMD64SHRB { 17836 break 17837 } 17838 _ = v_0.Args[1] 17839 x := v_0.Args[0] 17840 v_0_1 := v_0.Args[1] 17841 if v_0_1.Op != OpAMD64ANDLconst { 17842 break 17843 } 17844 if v_0_1.AuxInt != 7 { 17845 break 17846 } 17847 y := v_0_1.Args[0] 17848 v_1 := v.Args[1] 17849 if v_1.Op != OpAMD64SHLL { 17850 break 17851 } 17852 _ = v_1.Args[1] 17853 if x != v_1.Args[0] { 17854 break 17855 } 17856 v_1_1 := v_1.Args[1] 17857 if v_1_1.Op != OpAMD64NEGL { 17858 break 17859 } 17860 v_1_1_0 := v_1_1.Args[0] 17861 if v_1_1_0.Op != OpAMD64ADDLconst { 17862 break 17863 } 17864 if v_1_1_0.AuxInt != -8 { 17865 break 17866 } 17867 v_1_1_0_0 := v_1_1_0.Args[0] 17868 if v_1_1_0_0.Op != OpAMD64ANDLconst { 17869 break 17870 } 17871 if v_1_1_0_0.AuxInt != 7 { 17872 break 17873 } 17874 if y != v_1_1_0_0.Args[0] { 17875 break 17876 } 17877 if !(v.Type.Size() == 1) { 17878 break 17879 } 17880 v.reset(OpAMD64RORB) 17881 v.AddArg(x) 17882 v.AddArg(y) 17883 return true 17884 } 17885 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 17886 // cond: v.Type.Size() == 1 17887 // result: (RORB x y) 17888 for { 17889 _ = v.Args[1] 17890 v_0 := v.Args[0] 17891 if v_0.Op != OpAMD64SHLL { 17892 break 17893 } 17894 _ = v_0.Args[1] 17895 x := v_0.Args[0] 17896 v_0_1 := v_0.Args[1] 17897 if v_0_1.Op != OpAMD64NEGL { 17898 break 17899 } 17900 v_0_1_0 := v_0_1.Args[0] 17901 if v_0_1_0.Op != OpAMD64ADDLconst { 17902 break 17903 } 17904 if v_0_1_0.AuxInt != -8 { 17905 break 17906 } 17907 v_0_1_0_0 := v_0_1_0.Args[0] 17908 if v_0_1_0_0.Op != OpAMD64ANDLconst { 17909 break 17910 } 17911 if v_0_1_0_0.AuxInt != 7 { 17912 break 17913 } 17914 y := v_0_1_0_0.Args[0] 17915 v_1 := v.Args[1] 17916 if v_1.Op != OpAMD64SHRB { 17917 break 17918 } 17919 _ = v_1.Args[1] 17920 if x != v_1.Args[0] { 17921 break 17922 } 17923 v_1_1 := v_1.Args[1] 17924 if v_1_1.Op != OpAMD64ANDLconst { 17925 break 17926 } 17927 if v_1_1.AuxInt != 7 { 17928 break 17929 } 17930 if y != v_1_1.Args[0] { 17931 break 17932 } 17933 if !(v.Type.Size() == 1) { 17934 break 17935 } 17936 v.reset(OpAMD64RORB) 17937 v.AddArg(x) 17938 v.AddArg(y) 17939 return true 17940 } 17941 // match: (ORL x x) 17942 // cond: 17943 // result: x 17944 for { 17945 _ = v.Args[1] 17946 x := v.Args[0] 17947 if x != v.Args[1] { 17948 break 17949 } 17950 v.reset(OpCopy) 17951 v.Type = x.Type 17952 v.AddArg(x) 17953 return true 17954 } 17955 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 17956 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17957 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 17958 for { 17959 _ = v.Args[1] 17960 x0 := v.Args[0] 17961 if x0.Op != OpAMD64MOVBload { 17962 break 17963 } 17964 i0 := x0.AuxInt 17965 s := x0.Aux 17966 _ = x0.Args[1] 17967 p := x0.Args[0] 17968 mem := x0.Args[1] 17969 sh := v.Args[1] 17970 if sh.Op != OpAMD64SHLLconst { 17971 break 17972 } 17973 if sh.AuxInt != 8 { 17974 break 17975 } 17976 x1 := sh.Args[0] 17977 if x1.Op != OpAMD64MOVBload { 17978 break 17979 } 17980 i1 := x1.AuxInt 17981 if x1.Aux != s { 17982 break 17983 } 17984 _ = x1.Args[1] 17985 if p != x1.Args[0] { 17986 break 17987 } 17988 if mem != x1.Args[1] { 17989 break 17990 } 17991 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17992 break 17993 } 17994 b = mergePoint(b, x0, x1) 17995 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 17996 v.reset(OpCopy) 17997 v.AddArg(v0) 17998 v0.AuxInt = i0 17999 v0.Aux = s 18000 v0.AddArg(p) 18001 v0.AddArg(mem) 18002 return true 18003 } 18004 return false 18005 } 18006 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 18007 b := v.Block 18008 _ = b 18009 typ := &b.Func.Config.Types 18010 _ = typ 18011 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 18012 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18013 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 18014 for { 18015 _ = v.Args[1] 18016 sh := v.Args[0] 18017 if sh.Op != OpAMD64SHLLconst { 18018 break 18019 } 18020 if sh.AuxInt != 8 { 18021 break 18022 } 18023 x1 := sh.Args[0] 18024 if x1.Op != OpAMD64MOVBload { 18025 break 18026 } 18027 i1 := x1.AuxInt 18028 s := x1.Aux 18029 _ = x1.Args[1] 18030 p := x1.Args[0] 18031 mem := x1.Args[1] 18032 x0 := v.Args[1] 18033 if x0.Op != OpAMD64MOVBload { 18034 break 18035 } 18036 i0 := x0.AuxInt 18037 if x0.Aux != s { 18038 break 18039 } 18040 _ = x0.Args[1] 18041 if p != x0.Args[0] { 18042 break 18043 } 18044 if mem != x0.Args[1] { 18045 break 18046 } 18047 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18048 break 18049 } 18050 b = mergePoint(b, x0, x1) 18051 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18052 v.reset(OpCopy) 18053 v.AddArg(v0) 18054 v0.AuxInt = i0 18055 v0.Aux = s 18056 v0.AddArg(p) 18057 v0.AddArg(mem) 18058 return true 18059 } 18060 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 18061 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18062 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 18063 for { 18064 _ = v.Args[1] 18065 x0 := v.Args[0] 18066 if x0.Op != OpAMD64MOVWload { 18067 break 18068 } 18069 i0 := x0.AuxInt 18070 s := x0.Aux 18071 _ = x0.Args[1] 18072 p := x0.Args[0] 18073 mem := x0.Args[1] 18074 sh := v.Args[1] 18075 if sh.Op != OpAMD64SHLLconst { 18076 break 18077 } 18078 if sh.AuxInt != 16 { 18079 break 18080 } 18081 x1 := sh.Args[0] 18082 if x1.Op != OpAMD64MOVWload { 18083 break 18084 } 18085 i1 := x1.AuxInt 18086 if x1.Aux != s { 18087 break 18088 } 18089 _ = x1.Args[1] 18090 if p != x1.Args[0] { 18091 break 18092 } 18093 if mem != x1.Args[1] { 18094 break 18095 } 18096 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18097 break 18098 } 18099 b = mergePoint(b, x0, x1) 18100 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 18101 v.reset(OpCopy) 18102 v.AddArg(v0) 18103 v0.AuxInt = i0 18104 v0.Aux = s 18105 v0.AddArg(p) 18106 v0.AddArg(mem) 18107 return true 18108 } 18109 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 18110 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18111 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 18112 for { 18113 _ = v.Args[1] 18114 sh := v.Args[0] 18115 if sh.Op != OpAMD64SHLLconst { 18116 break 18117 } 18118 if sh.AuxInt != 16 { 18119 break 18120 } 18121 x1 := sh.Args[0] 18122 if x1.Op != OpAMD64MOVWload { 18123 break 18124 } 18125 i1 := x1.AuxInt 18126 s := x1.Aux 18127 _ = x1.Args[1] 18128 p := x1.Args[0] 18129 mem := x1.Args[1] 18130 x0 := v.Args[1] 18131 if x0.Op != OpAMD64MOVWload { 18132 break 18133 } 18134 i0 := x0.AuxInt 18135 if x0.Aux != s { 18136 break 18137 } 18138 _ = x0.Args[1] 18139 if p != x0.Args[0] { 18140 break 18141 } 18142 if mem != x0.Args[1] { 18143 break 18144 } 18145 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18146 break 18147 } 18148 b = mergePoint(b, x0, x1) 18149 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 18150 v.reset(OpCopy) 18151 v.AddArg(v0) 18152 v0.AuxInt = i0 18153 v0.Aux = s 18154 v0.AddArg(p) 18155 v0.AddArg(mem) 18156 return true 18157 } 18158 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 18159 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18160 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18161 for { 18162 _ = v.Args[1] 18163 s1 := v.Args[0] 18164 if s1.Op != OpAMD64SHLLconst { 18165 break 18166 } 18167 j1 := s1.AuxInt 18168 x1 := s1.Args[0] 18169 if x1.Op != OpAMD64MOVBload { 18170 break 18171 } 18172 i1 := x1.AuxInt 18173 s := x1.Aux 18174 _ = x1.Args[1] 18175 p := x1.Args[0] 18176 mem := x1.Args[1] 18177 or := v.Args[1] 18178 if or.Op != OpAMD64ORL { 18179 break 18180 } 18181 _ = or.Args[1] 18182 s0 := or.Args[0] 18183 if s0.Op != OpAMD64SHLLconst { 18184 break 18185 } 18186 j0 := s0.AuxInt 18187 x0 := s0.Args[0] 18188 if x0.Op != OpAMD64MOVBload { 18189 break 18190 } 18191 i0 := x0.AuxInt 18192 if x0.Aux != s { 18193 break 18194 } 18195 _ = x0.Args[1] 18196 if p != x0.Args[0] { 18197 break 18198 } 18199 if mem != x0.Args[1] { 18200 break 18201 } 18202 y := or.Args[1] 18203 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18204 break 18205 } 18206 b = mergePoint(b, x0, x1) 18207 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18208 v.reset(OpCopy) 18209 v.AddArg(v0) 18210 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18211 v1.AuxInt = j0 18212 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18213 v2.AuxInt = i0 18214 v2.Aux = s 18215 v2.AddArg(p) 18216 v2.AddArg(mem) 18217 v1.AddArg(v2) 18218 v0.AddArg(v1) 18219 v0.AddArg(y) 18220 return true 18221 } 18222 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 18223 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18224 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18225 for { 18226 _ = v.Args[1] 18227 s1 := v.Args[0] 18228 if s1.Op != OpAMD64SHLLconst { 18229 break 18230 } 18231 j1 := s1.AuxInt 18232 x1 := s1.Args[0] 18233 if x1.Op != OpAMD64MOVBload { 18234 break 18235 } 18236 i1 := x1.AuxInt 18237 s := x1.Aux 18238 _ = x1.Args[1] 18239 p := x1.Args[0] 18240 mem := x1.Args[1] 18241 or := v.Args[1] 18242 if or.Op != OpAMD64ORL { 18243 break 18244 } 18245 _ = or.Args[1] 18246 y := or.Args[0] 18247 s0 := or.Args[1] 18248 if s0.Op != OpAMD64SHLLconst { 18249 break 18250 } 18251 j0 := s0.AuxInt 18252 x0 := s0.Args[0] 18253 if x0.Op != OpAMD64MOVBload { 18254 break 18255 } 18256 i0 := x0.AuxInt 18257 if x0.Aux != s { 18258 break 18259 } 18260 _ = x0.Args[1] 18261 if p != x0.Args[0] { 18262 break 18263 } 18264 if mem != x0.Args[1] { 18265 break 18266 } 18267 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18268 break 18269 } 18270 b = mergePoint(b, x0, x1) 18271 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18272 v.reset(OpCopy) 18273 v.AddArg(v0) 18274 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18275 v1.AuxInt = j0 18276 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18277 v2.AuxInt = i0 18278 v2.Aux = s 18279 v2.AddArg(p) 18280 v2.AddArg(mem) 18281 v1.AddArg(v2) 18282 v0.AddArg(v1) 18283 v0.AddArg(y) 18284 return true 18285 } 18286 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 18287 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18288 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18289 for { 18290 _ = v.Args[1] 18291 or := v.Args[0] 18292 if or.Op != OpAMD64ORL { 18293 break 18294 } 18295 _ = or.Args[1] 18296 s0 := or.Args[0] 18297 if s0.Op != OpAMD64SHLLconst { 18298 break 18299 } 18300 j0 := s0.AuxInt 18301 x0 := s0.Args[0] 18302 if x0.Op != OpAMD64MOVBload { 18303 break 18304 } 18305 i0 := x0.AuxInt 18306 s := x0.Aux 18307 _ = x0.Args[1] 18308 p := x0.Args[0] 18309 mem := x0.Args[1] 18310 y := or.Args[1] 18311 s1 := v.Args[1] 18312 if s1.Op != OpAMD64SHLLconst { 18313 break 18314 } 18315 j1 := s1.AuxInt 18316 x1 := s1.Args[0] 18317 if x1.Op != OpAMD64MOVBload { 18318 break 18319 } 18320 i1 := x1.AuxInt 18321 if x1.Aux != s { 18322 break 18323 } 18324 _ = x1.Args[1] 18325 if p != x1.Args[0] { 18326 break 18327 } 18328 if mem != x1.Args[1] { 18329 break 18330 } 18331 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18332 break 18333 } 18334 b = mergePoint(b, x0, x1) 18335 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18336 v.reset(OpCopy) 18337 v.AddArg(v0) 18338 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18339 v1.AuxInt = j0 18340 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18341 v2.AuxInt = i0 18342 v2.Aux = s 18343 v2.AddArg(p) 18344 v2.AddArg(mem) 18345 v1.AddArg(v2) 18346 v0.AddArg(v1) 18347 v0.AddArg(y) 18348 return true 18349 } 18350 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 18351 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18352 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18353 for { 18354 _ = v.Args[1] 18355 or := v.Args[0] 18356 if or.Op != OpAMD64ORL { 18357 break 18358 } 18359 _ = or.Args[1] 18360 y := or.Args[0] 18361 s0 := or.Args[1] 18362 if s0.Op != OpAMD64SHLLconst { 18363 break 18364 } 18365 j0 := s0.AuxInt 18366 x0 := s0.Args[0] 18367 if x0.Op != OpAMD64MOVBload { 18368 break 18369 } 18370 i0 := x0.AuxInt 18371 s := x0.Aux 18372 _ = x0.Args[1] 18373 p := x0.Args[0] 18374 mem := x0.Args[1] 18375 s1 := v.Args[1] 18376 if s1.Op != OpAMD64SHLLconst { 18377 break 18378 } 18379 j1 := s1.AuxInt 18380 x1 := s1.Args[0] 18381 if x1.Op != OpAMD64MOVBload { 18382 break 18383 } 18384 i1 := x1.AuxInt 18385 if x1.Aux != s { 18386 break 18387 } 18388 _ = x1.Args[1] 18389 if p != x1.Args[0] { 18390 break 18391 } 18392 if mem != x1.Args[1] { 18393 break 18394 } 18395 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18396 break 18397 } 18398 b = mergePoint(b, x0, x1) 18399 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18400 v.reset(OpCopy) 18401 v.AddArg(v0) 18402 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18403 v1.AuxInt = j0 18404 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18405 v2.AuxInt = i0 18406 v2.Aux = s 18407 v2.AddArg(p) 18408 v2.AddArg(mem) 18409 v1.AddArg(v2) 18410 v0.AddArg(v1) 18411 v0.AddArg(y) 18412 return true 18413 } 18414 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18415 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18416 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18417 for { 18418 _ = v.Args[1] 18419 x0 := v.Args[0] 18420 if x0.Op != OpAMD64MOVBloadidx1 { 18421 break 18422 } 18423 i0 := x0.AuxInt 18424 s := x0.Aux 18425 _ = x0.Args[2] 18426 p := x0.Args[0] 18427 idx := x0.Args[1] 18428 mem := x0.Args[2] 18429 sh := v.Args[1] 18430 if sh.Op != OpAMD64SHLLconst { 18431 break 18432 } 18433 if sh.AuxInt != 8 { 18434 break 18435 } 18436 x1 := sh.Args[0] 18437 if x1.Op != OpAMD64MOVBloadidx1 { 18438 break 18439 } 18440 i1 := x1.AuxInt 18441 if x1.Aux != s { 18442 break 18443 } 18444 _ = x1.Args[2] 18445 if p != x1.Args[0] { 18446 break 18447 } 18448 if idx != x1.Args[1] { 18449 break 18450 } 18451 if mem != x1.Args[2] { 18452 break 18453 } 18454 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18455 break 18456 } 18457 b = mergePoint(b, x0, x1) 18458 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18459 v.reset(OpCopy) 18460 v.AddArg(v0) 18461 v0.AuxInt = i0 18462 v0.Aux = s 18463 v0.AddArg(p) 18464 v0.AddArg(idx) 18465 v0.AddArg(mem) 18466 return true 18467 } 18468 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18469 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18470 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18471 for { 18472 _ = v.Args[1] 18473 x0 := v.Args[0] 18474 if x0.Op != OpAMD64MOVBloadidx1 { 18475 break 18476 } 18477 i0 := x0.AuxInt 18478 s := x0.Aux 18479 _ = x0.Args[2] 18480 idx := x0.Args[0] 18481 p := x0.Args[1] 18482 mem := x0.Args[2] 18483 sh := v.Args[1] 18484 if sh.Op != OpAMD64SHLLconst { 18485 break 18486 } 18487 if sh.AuxInt != 8 { 18488 break 18489 } 18490 x1 := sh.Args[0] 18491 if x1.Op != OpAMD64MOVBloadidx1 { 18492 break 18493 } 18494 i1 := x1.AuxInt 18495 if x1.Aux != s { 18496 break 18497 } 18498 _ = x1.Args[2] 18499 if p != x1.Args[0] { 18500 break 18501 } 18502 if idx != x1.Args[1] { 18503 break 18504 } 18505 if mem != x1.Args[2] { 18506 break 18507 } 18508 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18509 break 18510 } 18511 b = mergePoint(b, x0, x1) 18512 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18513 v.reset(OpCopy) 18514 v.AddArg(v0) 18515 v0.AuxInt = i0 18516 v0.Aux = s 18517 v0.AddArg(p) 18518 v0.AddArg(idx) 18519 v0.AddArg(mem) 18520 return true 18521 } 18522 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18523 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18524 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18525 for { 18526 _ = v.Args[1] 18527 x0 := v.Args[0] 18528 if x0.Op != OpAMD64MOVBloadidx1 { 18529 break 18530 } 18531 i0 := x0.AuxInt 18532 s := x0.Aux 18533 _ = x0.Args[2] 18534 p := x0.Args[0] 18535 idx := x0.Args[1] 18536 mem := x0.Args[2] 18537 sh := v.Args[1] 18538 if sh.Op != OpAMD64SHLLconst { 18539 break 18540 } 18541 if sh.AuxInt != 8 { 18542 break 18543 } 18544 x1 := sh.Args[0] 18545 if x1.Op != OpAMD64MOVBloadidx1 { 18546 break 18547 } 18548 i1 := x1.AuxInt 18549 if x1.Aux != s { 18550 break 18551 } 18552 _ = x1.Args[2] 18553 if idx != x1.Args[0] { 18554 break 18555 } 18556 if p != x1.Args[1] { 18557 break 18558 } 18559 if mem != x1.Args[2] { 18560 break 18561 } 18562 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18563 break 18564 } 18565 b = mergePoint(b, x0, x1) 18566 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18567 v.reset(OpCopy) 18568 v.AddArg(v0) 18569 v0.AuxInt = i0 18570 v0.Aux = s 18571 v0.AddArg(p) 18572 v0.AddArg(idx) 18573 v0.AddArg(mem) 18574 return true 18575 } 18576 return false 18577 } 18578 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 18579 b := v.Block 18580 _ = b 18581 typ := &b.Func.Config.Types 18582 _ = typ 18583 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18584 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18585 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18586 for { 18587 _ = v.Args[1] 18588 x0 := v.Args[0] 18589 if x0.Op != OpAMD64MOVBloadidx1 { 18590 break 18591 } 18592 i0 := x0.AuxInt 18593 s := x0.Aux 18594 _ = x0.Args[2] 18595 idx := x0.Args[0] 18596 p := x0.Args[1] 18597 mem := x0.Args[2] 18598 sh := v.Args[1] 18599 if sh.Op != OpAMD64SHLLconst { 18600 break 18601 } 18602 if sh.AuxInt != 8 { 18603 break 18604 } 18605 x1 := sh.Args[0] 18606 if x1.Op != OpAMD64MOVBloadidx1 { 18607 break 18608 } 18609 i1 := x1.AuxInt 18610 if x1.Aux != s { 18611 break 18612 } 18613 _ = x1.Args[2] 18614 if idx != x1.Args[0] { 18615 break 18616 } 18617 if p != x1.Args[1] { 18618 break 18619 } 18620 if mem != x1.Args[2] { 18621 break 18622 } 18623 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18624 break 18625 } 18626 b = mergePoint(b, x0, x1) 18627 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18628 v.reset(OpCopy) 18629 v.AddArg(v0) 18630 v0.AuxInt = i0 18631 v0.Aux = s 18632 v0.AddArg(p) 18633 v0.AddArg(idx) 18634 v0.AddArg(mem) 18635 return true 18636 } 18637 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18638 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18639 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18640 for { 18641 _ = v.Args[1] 18642 sh := v.Args[0] 18643 if sh.Op != OpAMD64SHLLconst { 18644 break 18645 } 18646 if sh.AuxInt != 8 { 18647 break 18648 } 18649 x1 := sh.Args[0] 18650 if x1.Op != OpAMD64MOVBloadidx1 { 18651 break 18652 } 18653 i1 := x1.AuxInt 18654 s := x1.Aux 18655 _ = x1.Args[2] 18656 p := x1.Args[0] 18657 idx := x1.Args[1] 18658 mem := x1.Args[2] 18659 x0 := v.Args[1] 18660 if x0.Op != OpAMD64MOVBloadidx1 { 18661 break 18662 } 18663 i0 := x0.AuxInt 18664 if x0.Aux != s { 18665 break 18666 } 18667 _ = x0.Args[2] 18668 if p != x0.Args[0] { 18669 break 18670 } 18671 if idx != x0.Args[1] { 18672 break 18673 } 18674 if mem != x0.Args[2] { 18675 break 18676 } 18677 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18678 break 18679 } 18680 b = mergePoint(b, x0, x1) 18681 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18682 v.reset(OpCopy) 18683 v.AddArg(v0) 18684 v0.AuxInt = i0 18685 v0.Aux = s 18686 v0.AddArg(p) 18687 v0.AddArg(idx) 18688 v0.AddArg(mem) 18689 return true 18690 } 18691 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18692 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18693 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18694 for { 18695 _ = v.Args[1] 18696 sh := v.Args[0] 18697 if sh.Op != OpAMD64SHLLconst { 18698 break 18699 } 18700 if sh.AuxInt != 8 { 18701 break 18702 } 18703 x1 := sh.Args[0] 18704 if x1.Op != OpAMD64MOVBloadidx1 { 18705 break 18706 } 18707 i1 := x1.AuxInt 18708 s := x1.Aux 18709 _ = x1.Args[2] 18710 idx := x1.Args[0] 18711 p := x1.Args[1] 18712 mem := x1.Args[2] 18713 x0 := v.Args[1] 18714 if x0.Op != OpAMD64MOVBloadidx1 { 18715 break 18716 } 18717 i0 := x0.AuxInt 18718 if x0.Aux != s { 18719 break 18720 } 18721 _ = x0.Args[2] 18722 if p != x0.Args[0] { 18723 break 18724 } 18725 if idx != x0.Args[1] { 18726 break 18727 } 18728 if mem != x0.Args[2] { 18729 break 18730 } 18731 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18732 break 18733 } 18734 b = mergePoint(b, x0, x1) 18735 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18736 v.reset(OpCopy) 18737 v.AddArg(v0) 18738 v0.AuxInt = i0 18739 v0.Aux = s 18740 v0.AddArg(p) 18741 v0.AddArg(idx) 18742 v0.AddArg(mem) 18743 return true 18744 } 18745 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 18746 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18747 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18748 for { 18749 _ = v.Args[1] 18750 sh := v.Args[0] 18751 if sh.Op != OpAMD64SHLLconst { 18752 break 18753 } 18754 if sh.AuxInt != 8 { 18755 break 18756 } 18757 x1 := sh.Args[0] 18758 if x1.Op != OpAMD64MOVBloadidx1 { 18759 break 18760 } 18761 i1 := x1.AuxInt 18762 s := x1.Aux 18763 _ = x1.Args[2] 18764 p := x1.Args[0] 18765 idx := x1.Args[1] 18766 mem := x1.Args[2] 18767 x0 := v.Args[1] 18768 if x0.Op != OpAMD64MOVBloadidx1 { 18769 break 18770 } 18771 i0 := x0.AuxInt 18772 if x0.Aux != s { 18773 break 18774 } 18775 _ = x0.Args[2] 18776 if idx != x0.Args[0] { 18777 break 18778 } 18779 if p != x0.Args[1] { 18780 break 18781 } 18782 if mem != x0.Args[2] { 18783 break 18784 } 18785 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18786 break 18787 } 18788 b = mergePoint(b, x0, x1) 18789 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18790 v.reset(OpCopy) 18791 v.AddArg(v0) 18792 v0.AuxInt = i0 18793 v0.Aux = s 18794 v0.AddArg(p) 18795 v0.AddArg(idx) 18796 v0.AddArg(mem) 18797 return true 18798 } 18799 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 18800 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18801 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18802 for { 18803 _ = v.Args[1] 18804 sh := v.Args[0] 18805 if sh.Op != OpAMD64SHLLconst { 18806 break 18807 } 18808 if sh.AuxInt != 8 { 18809 break 18810 } 18811 x1 := sh.Args[0] 18812 if x1.Op != OpAMD64MOVBloadidx1 { 18813 break 18814 } 18815 i1 := x1.AuxInt 18816 s := x1.Aux 18817 _ = x1.Args[2] 18818 idx := x1.Args[0] 18819 p := x1.Args[1] 18820 mem := x1.Args[2] 18821 x0 := v.Args[1] 18822 if x0.Op != OpAMD64MOVBloadidx1 { 18823 break 18824 } 18825 i0 := x0.AuxInt 18826 if x0.Aux != s { 18827 break 18828 } 18829 _ = x0.Args[2] 18830 if idx != x0.Args[0] { 18831 break 18832 } 18833 if p != x0.Args[1] { 18834 break 18835 } 18836 if mem != x0.Args[2] { 18837 break 18838 } 18839 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18840 break 18841 } 18842 b = mergePoint(b, x0, x1) 18843 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18844 v.reset(OpCopy) 18845 v.AddArg(v0) 18846 v0.AuxInt = i0 18847 v0.Aux = s 18848 v0.AddArg(p) 18849 v0.AddArg(idx) 18850 v0.AddArg(mem) 18851 return true 18852 } 18853 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 18854 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18855 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 18856 for { 18857 _ = v.Args[1] 18858 x0 := v.Args[0] 18859 if x0.Op != OpAMD64MOVWloadidx1 { 18860 break 18861 } 18862 i0 := x0.AuxInt 18863 s := x0.Aux 18864 _ = x0.Args[2] 18865 p := x0.Args[0] 18866 idx := x0.Args[1] 18867 mem := x0.Args[2] 18868 sh := v.Args[1] 18869 if sh.Op != OpAMD64SHLLconst { 18870 break 18871 } 18872 if sh.AuxInt != 16 { 18873 break 18874 } 18875 x1 := sh.Args[0] 18876 if x1.Op != OpAMD64MOVWloadidx1 { 18877 break 18878 } 18879 i1 := x1.AuxInt 18880 if x1.Aux != s { 18881 break 18882 } 18883 _ = x1.Args[2] 18884 if p != x1.Args[0] { 18885 break 18886 } 18887 if idx != x1.Args[1] { 18888 break 18889 } 18890 if mem != x1.Args[2] { 18891 break 18892 } 18893 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18894 break 18895 } 18896 b = mergePoint(b, x0, x1) 18897 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 18898 v.reset(OpCopy) 18899 v.AddArg(v0) 18900 v0.AuxInt = i0 18901 v0.Aux = s 18902 v0.AddArg(p) 18903 v0.AddArg(idx) 18904 v0.AddArg(mem) 18905 return true 18906 } 18907 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 18908 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18909 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 18910 for { 18911 _ = v.Args[1] 18912 x0 := v.Args[0] 18913 if x0.Op != OpAMD64MOVWloadidx1 { 18914 break 18915 } 18916 i0 := x0.AuxInt 18917 s := x0.Aux 18918 _ = x0.Args[2] 18919 idx := x0.Args[0] 18920 p := x0.Args[1] 18921 mem := x0.Args[2] 18922 sh := v.Args[1] 18923 if sh.Op != OpAMD64SHLLconst { 18924 break 18925 } 18926 if sh.AuxInt != 16 { 18927 break 18928 } 18929 x1 := sh.Args[0] 18930 if x1.Op != OpAMD64MOVWloadidx1 { 18931 break 18932 } 18933 i1 := x1.AuxInt 18934 if x1.Aux != s { 18935 break 18936 } 18937 _ = x1.Args[2] 18938 if p != x1.Args[0] { 18939 break 18940 } 18941 if idx != x1.Args[1] { 18942 break 18943 } 18944 if mem != x1.Args[2] { 18945 break 18946 } 18947 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18948 break 18949 } 18950 b = mergePoint(b, x0, x1) 18951 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 18952 v.reset(OpCopy) 18953 v.AddArg(v0) 18954 v0.AuxInt = i0 18955 v0.Aux = s 18956 v0.AddArg(p) 18957 v0.AddArg(idx) 18958 v0.AddArg(mem) 18959 return true 18960 } 18961 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 18962 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18963 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 18964 for { 18965 _ = v.Args[1] 18966 x0 := v.Args[0] 18967 if x0.Op != OpAMD64MOVWloadidx1 { 18968 break 18969 } 18970 i0 := x0.AuxInt 18971 s := x0.Aux 18972 _ = x0.Args[2] 18973 p := x0.Args[0] 18974 idx := x0.Args[1] 18975 mem := x0.Args[2] 18976 sh := v.Args[1] 18977 if sh.Op != OpAMD64SHLLconst { 18978 break 18979 } 18980 if sh.AuxInt != 16 { 18981 break 18982 } 18983 x1 := sh.Args[0] 18984 if x1.Op != OpAMD64MOVWloadidx1 { 18985 break 18986 } 18987 i1 := x1.AuxInt 18988 if x1.Aux != s { 18989 break 18990 } 18991 _ = x1.Args[2] 18992 if idx != x1.Args[0] { 18993 break 18994 } 18995 if p != x1.Args[1] { 18996 break 18997 } 18998 if mem != x1.Args[2] { 18999 break 19000 } 19001 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19002 break 19003 } 19004 b = mergePoint(b, x0, x1) 19005 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19006 v.reset(OpCopy) 19007 v.AddArg(v0) 19008 v0.AuxInt = i0 19009 v0.Aux = s 19010 v0.AddArg(p) 19011 v0.AddArg(idx) 19012 v0.AddArg(mem) 19013 return true 19014 } 19015 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 19016 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19017 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19018 for { 19019 _ = v.Args[1] 19020 x0 := v.Args[0] 19021 if x0.Op != OpAMD64MOVWloadidx1 { 19022 break 19023 } 19024 i0 := x0.AuxInt 19025 s := x0.Aux 19026 _ = x0.Args[2] 19027 idx := x0.Args[0] 19028 p := x0.Args[1] 19029 mem := x0.Args[2] 19030 sh := v.Args[1] 19031 if sh.Op != OpAMD64SHLLconst { 19032 break 19033 } 19034 if sh.AuxInt != 16 { 19035 break 19036 } 19037 x1 := sh.Args[0] 19038 if x1.Op != OpAMD64MOVWloadidx1 { 19039 break 19040 } 19041 i1 := x1.AuxInt 19042 if x1.Aux != s { 19043 break 19044 } 19045 _ = x1.Args[2] 19046 if idx != x1.Args[0] { 19047 break 19048 } 19049 if p != x1.Args[1] { 19050 break 19051 } 19052 if mem != x1.Args[2] { 19053 break 19054 } 19055 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19056 break 19057 } 19058 b = mergePoint(b, x0, x1) 19059 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19060 v.reset(OpCopy) 19061 v.AddArg(v0) 19062 v0.AuxInt = i0 19063 v0.Aux = s 19064 v0.AddArg(p) 19065 v0.AddArg(idx) 19066 v0.AddArg(mem) 19067 return true 19068 } 19069 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19070 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19071 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19072 for { 19073 _ = v.Args[1] 19074 sh := v.Args[0] 19075 if sh.Op != OpAMD64SHLLconst { 19076 break 19077 } 19078 if sh.AuxInt != 16 { 19079 break 19080 } 19081 x1 := sh.Args[0] 19082 if x1.Op != OpAMD64MOVWloadidx1 { 19083 break 19084 } 19085 i1 := x1.AuxInt 19086 s := x1.Aux 19087 _ = x1.Args[2] 19088 p := x1.Args[0] 19089 idx := x1.Args[1] 19090 mem := x1.Args[2] 19091 x0 := v.Args[1] 19092 if x0.Op != OpAMD64MOVWloadidx1 { 19093 break 19094 } 19095 i0 := x0.AuxInt 19096 if x0.Aux != s { 19097 break 19098 } 19099 _ = x0.Args[2] 19100 if p != x0.Args[0] { 19101 break 19102 } 19103 if idx != x0.Args[1] { 19104 break 19105 } 19106 if mem != x0.Args[2] { 19107 break 19108 } 19109 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19110 break 19111 } 19112 b = mergePoint(b, x0, x1) 19113 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19114 v.reset(OpCopy) 19115 v.AddArg(v0) 19116 v0.AuxInt = i0 19117 v0.Aux = s 19118 v0.AddArg(p) 19119 v0.AddArg(idx) 19120 v0.AddArg(mem) 19121 return true 19122 } 19123 return false 19124 } 19125 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 19126 b := v.Block 19127 _ = b 19128 typ := &b.Func.Config.Types 19129 _ = typ 19130 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19131 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19132 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19133 for { 19134 _ = v.Args[1] 19135 sh := v.Args[0] 19136 if sh.Op != OpAMD64SHLLconst { 19137 break 19138 } 19139 if sh.AuxInt != 16 { 19140 break 19141 } 19142 x1 := sh.Args[0] 19143 if x1.Op != OpAMD64MOVWloadidx1 { 19144 break 19145 } 19146 i1 := x1.AuxInt 19147 s := x1.Aux 19148 _ = x1.Args[2] 19149 idx := x1.Args[0] 19150 p := x1.Args[1] 19151 mem := x1.Args[2] 19152 x0 := v.Args[1] 19153 if x0.Op != OpAMD64MOVWloadidx1 { 19154 break 19155 } 19156 i0 := x0.AuxInt 19157 if x0.Aux != s { 19158 break 19159 } 19160 _ = x0.Args[2] 19161 if p != x0.Args[0] { 19162 break 19163 } 19164 if idx != x0.Args[1] { 19165 break 19166 } 19167 if mem != x0.Args[2] { 19168 break 19169 } 19170 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19171 break 19172 } 19173 b = mergePoint(b, x0, x1) 19174 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19175 v.reset(OpCopy) 19176 v.AddArg(v0) 19177 v0.AuxInt = i0 19178 v0.Aux = s 19179 v0.AddArg(p) 19180 v0.AddArg(idx) 19181 v0.AddArg(mem) 19182 return true 19183 } 19184 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19185 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19186 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19187 for { 19188 _ = v.Args[1] 19189 sh := v.Args[0] 19190 if sh.Op != OpAMD64SHLLconst { 19191 break 19192 } 19193 if sh.AuxInt != 16 { 19194 break 19195 } 19196 x1 := sh.Args[0] 19197 if x1.Op != OpAMD64MOVWloadidx1 { 19198 break 19199 } 19200 i1 := x1.AuxInt 19201 s := x1.Aux 19202 _ = x1.Args[2] 19203 p := x1.Args[0] 19204 idx := x1.Args[1] 19205 mem := x1.Args[2] 19206 x0 := v.Args[1] 19207 if x0.Op != OpAMD64MOVWloadidx1 { 19208 break 19209 } 19210 i0 := x0.AuxInt 19211 if x0.Aux != s { 19212 break 19213 } 19214 _ = x0.Args[2] 19215 if idx != x0.Args[0] { 19216 break 19217 } 19218 if p != x0.Args[1] { 19219 break 19220 } 19221 if mem != x0.Args[2] { 19222 break 19223 } 19224 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19225 break 19226 } 19227 b = mergePoint(b, x0, x1) 19228 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19229 v.reset(OpCopy) 19230 v.AddArg(v0) 19231 v0.AuxInt = i0 19232 v0.Aux = s 19233 v0.AddArg(p) 19234 v0.AddArg(idx) 19235 v0.AddArg(mem) 19236 return true 19237 } 19238 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19239 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19240 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19241 for { 19242 _ = v.Args[1] 19243 sh := v.Args[0] 19244 if sh.Op != OpAMD64SHLLconst { 19245 break 19246 } 19247 if sh.AuxInt != 16 { 19248 break 19249 } 19250 x1 := sh.Args[0] 19251 if x1.Op != OpAMD64MOVWloadidx1 { 19252 break 19253 } 19254 i1 := x1.AuxInt 19255 s := x1.Aux 19256 _ = x1.Args[2] 19257 idx := x1.Args[0] 19258 p := x1.Args[1] 19259 mem := x1.Args[2] 19260 x0 := v.Args[1] 19261 if x0.Op != OpAMD64MOVWloadidx1 { 19262 break 19263 } 19264 i0 := x0.AuxInt 19265 if x0.Aux != s { 19266 break 19267 } 19268 _ = x0.Args[2] 19269 if idx != x0.Args[0] { 19270 break 19271 } 19272 if p != x0.Args[1] { 19273 break 19274 } 19275 if mem != x0.Args[2] { 19276 break 19277 } 19278 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19279 break 19280 } 19281 b = mergePoint(b, x0, x1) 19282 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19283 v.reset(OpCopy) 19284 v.AddArg(v0) 19285 v0.AuxInt = i0 19286 v0.Aux = s 19287 v0.AddArg(p) 19288 v0.AddArg(idx) 19289 v0.AddArg(mem) 19290 return true 19291 } 19292 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19293 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19294 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19295 for { 19296 _ = v.Args[1] 19297 s1 := v.Args[0] 19298 if s1.Op != OpAMD64SHLLconst { 19299 break 19300 } 19301 j1 := s1.AuxInt 19302 x1 := s1.Args[0] 19303 if x1.Op != OpAMD64MOVBloadidx1 { 19304 break 19305 } 19306 i1 := x1.AuxInt 19307 s := x1.Aux 19308 _ = x1.Args[2] 19309 p := x1.Args[0] 19310 idx := x1.Args[1] 19311 mem := x1.Args[2] 19312 or := v.Args[1] 19313 if or.Op != OpAMD64ORL { 19314 break 19315 } 19316 _ = or.Args[1] 19317 s0 := or.Args[0] 19318 if s0.Op != OpAMD64SHLLconst { 19319 break 19320 } 19321 j0 := s0.AuxInt 19322 x0 := s0.Args[0] 19323 if x0.Op != OpAMD64MOVBloadidx1 { 19324 break 19325 } 19326 i0 := x0.AuxInt 19327 if x0.Aux != s { 19328 break 19329 } 19330 _ = x0.Args[2] 19331 if p != x0.Args[0] { 19332 break 19333 } 19334 if idx != x0.Args[1] { 19335 break 19336 } 19337 if mem != x0.Args[2] { 19338 break 19339 } 19340 y := or.Args[1] 19341 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19342 break 19343 } 19344 b = mergePoint(b, x0, x1) 19345 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19346 v.reset(OpCopy) 19347 v.AddArg(v0) 19348 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19349 v1.AuxInt = j0 19350 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19351 v2.AuxInt = i0 19352 v2.Aux = s 19353 v2.AddArg(p) 19354 v2.AddArg(idx) 19355 v2.AddArg(mem) 19356 v1.AddArg(v2) 19357 v0.AddArg(v1) 19358 v0.AddArg(y) 19359 return true 19360 } 19361 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19362 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19363 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19364 for { 19365 _ = v.Args[1] 19366 s1 := v.Args[0] 19367 if s1.Op != OpAMD64SHLLconst { 19368 break 19369 } 19370 j1 := s1.AuxInt 19371 x1 := s1.Args[0] 19372 if x1.Op != OpAMD64MOVBloadidx1 { 19373 break 19374 } 19375 i1 := x1.AuxInt 19376 s := x1.Aux 19377 _ = x1.Args[2] 19378 idx := x1.Args[0] 19379 p := x1.Args[1] 19380 mem := x1.Args[2] 19381 or := v.Args[1] 19382 if or.Op != OpAMD64ORL { 19383 break 19384 } 19385 _ = or.Args[1] 19386 s0 := or.Args[0] 19387 if s0.Op != OpAMD64SHLLconst { 19388 break 19389 } 19390 j0 := s0.AuxInt 19391 x0 := s0.Args[0] 19392 if x0.Op != OpAMD64MOVBloadidx1 { 19393 break 19394 } 19395 i0 := x0.AuxInt 19396 if x0.Aux != s { 19397 break 19398 } 19399 _ = x0.Args[2] 19400 if p != x0.Args[0] { 19401 break 19402 } 19403 if idx != x0.Args[1] { 19404 break 19405 } 19406 if mem != x0.Args[2] { 19407 break 19408 } 19409 y := or.Args[1] 19410 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19411 break 19412 } 19413 b = mergePoint(b, x0, x1) 19414 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19415 v.reset(OpCopy) 19416 v.AddArg(v0) 19417 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19418 v1.AuxInt = j0 19419 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19420 v2.AuxInt = i0 19421 v2.Aux = s 19422 v2.AddArg(p) 19423 v2.AddArg(idx) 19424 v2.AddArg(mem) 19425 v1.AddArg(v2) 19426 v0.AddArg(v1) 19427 v0.AddArg(y) 19428 return true 19429 } 19430 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19431 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19432 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19433 for { 19434 _ = v.Args[1] 19435 s1 := v.Args[0] 19436 if s1.Op != OpAMD64SHLLconst { 19437 break 19438 } 19439 j1 := s1.AuxInt 19440 x1 := s1.Args[0] 19441 if x1.Op != OpAMD64MOVBloadidx1 { 19442 break 19443 } 19444 i1 := x1.AuxInt 19445 s := x1.Aux 19446 _ = x1.Args[2] 19447 p := x1.Args[0] 19448 idx := x1.Args[1] 19449 mem := x1.Args[2] 19450 or := v.Args[1] 19451 if or.Op != OpAMD64ORL { 19452 break 19453 } 19454 _ = or.Args[1] 19455 s0 := or.Args[0] 19456 if s0.Op != OpAMD64SHLLconst { 19457 break 19458 } 19459 j0 := s0.AuxInt 19460 x0 := s0.Args[0] 19461 if x0.Op != OpAMD64MOVBloadidx1 { 19462 break 19463 } 19464 i0 := x0.AuxInt 19465 if x0.Aux != s { 19466 break 19467 } 19468 _ = x0.Args[2] 19469 if idx != x0.Args[0] { 19470 break 19471 } 19472 if p != x0.Args[1] { 19473 break 19474 } 19475 if mem != x0.Args[2] { 19476 break 19477 } 19478 y := or.Args[1] 19479 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19480 break 19481 } 19482 b = mergePoint(b, x0, x1) 19483 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19484 v.reset(OpCopy) 19485 v.AddArg(v0) 19486 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19487 v1.AuxInt = j0 19488 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19489 v2.AuxInt = i0 19490 v2.Aux = s 19491 v2.AddArg(p) 19492 v2.AddArg(idx) 19493 v2.AddArg(mem) 19494 v1.AddArg(v2) 19495 v0.AddArg(v1) 19496 v0.AddArg(y) 19497 return true 19498 } 19499 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19500 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19501 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19502 for { 19503 _ = v.Args[1] 19504 s1 := v.Args[0] 19505 if s1.Op != OpAMD64SHLLconst { 19506 break 19507 } 19508 j1 := s1.AuxInt 19509 x1 := s1.Args[0] 19510 if x1.Op != OpAMD64MOVBloadidx1 { 19511 break 19512 } 19513 i1 := x1.AuxInt 19514 s := x1.Aux 19515 _ = x1.Args[2] 19516 idx := x1.Args[0] 19517 p := x1.Args[1] 19518 mem := x1.Args[2] 19519 or := v.Args[1] 19520 if or.Op != OpAMD64ORL { 19521 break 19522 } 19523 _ = or.Args[1] 19524 s0 := or.Args[0] 19525 if s0.Op != OpAMD64SHLLconst { 19526 break 19527 } 19528 j0 := s0.AuxInt 19529 x0 := s0.Args[0] 19530 if x0.Op != OpAMD64MOVBloadidx1 { 19531 break 19532 } 19533 i0 := x0.AuxInt 19534 if x0.Aux != s { 19535 break 19536 } 19537 _ = x0.Args[2] 19538 if idx != x0.Args[0] { 19539 break 19540 } 19541 if p != x0.Args[1] { 19542 break 19543 } 19544 if mem != x0.Args[2] { 19545 break 19546 } 19547 y := or.Args[1] 19548 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19549 break 19550 } 19551 b = mergePoint(b, x0, x1) 19552 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19553 v.reset(OpCopy) 19554 v.AddArg(v0) 19555 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19556 v1.AuxInt = j0 19557 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19558 v2.AuxInt = i0 19559 v2.Aux = s 19560 v2.AddArg(p) 19561 v2.AddArg(idx) 19562 v2.AddArg(mem) 19563 v1.AddArg(v2) 19564 v0.AddArg(v1) 19565 v0.AddArg(y) 19566 return true 19567 } 19568 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19569 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19570 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19571 for { 19572 _ = v.Args[1] 19573 s1 := v.Args[0] 19574 if s1.Op != OpAMD64SHLLconst { 19575 break 19576 } 19577 j1 := s1.AuxInt 19578 x1 := s1.Args[0] 19579 if x1.Op != OpAMD64MOVBloadidx1 { 19580 break 19581 } 19582 i1 := x1.AuxInt 19583 s := x1.Aux 19584 _ = x1.Args[2] 19585 p := x1.Args[0] 19586 idx := x1.Args[1] 19587 mem := x1.Args[2] 19588 or := v.Args[1] 19589 if or.Op != OpAMD64ORL { 19590 break 19591 } 19592 _ = or.Args[1] 19593 y := or.Args[0] 19594 s0 := or.Args[1] 19595 if s0.Op != OpAMD64SHLLconst { 19596 break 19597 } 19598 j0 := s0.AuxInt 19599 x0 := s0.Args[0] 19600 if x0.Op != OpAMD64MOVBloadidx1 { 19601 break 19602 } 19603 i0 := x0.AuxInt 19604 if x0.Aux != s { 19605 break 19606 } 19607 _ = x0.Args[2] 19608 if p != x0.Args[0] { 19609 break 19610 } 19611 if idx != x0.Args[1] { 19612 break 19613 } 19614 if mem != x0.Args[2] { 19615 break 19616 } 19617 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19618 break 19619 } 19620 b = mergePoint(b, x0, x1) 19621 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19622 v.reset(OpCopy) 19623 v.AddArg(v0) 19624 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19625 v1.AuxInt = j0 19626 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19627 v2.AuxInt = i0 19628 v2.Aux = s 19629 v2.AddArg(p) 19630 v2.AddArg(idx) 19631 v2.AddArg(mem) 19632 v1.AddArg(v2) 19633 v0.AddArg(v1) 19634 v0.AddArg(y) 19635 return true 19636 } 19637 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19638 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19639 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19640 for { 19641 _ = v.Args[1] 19642 s1 := v.Args[0] 19643 if s1.Op != OpAMD64SHLLconst { 19644 break 19645 } 19646 j1 := s1.AuxInt 19647 x1 := s1.Args[0] 19648 if x1.Op != OpAMD64MOVBloadidx1 { 19649 break 19650 } 19651 i1 := x1.AuxInt 19652 s := x1.Aux 19653 _ = x1.Args[2] 19654 idx := x1.Args[0] 19655 p := x1.Args[1] 19656 mem := x1.Args[2] 19657 or := v.Args[1] 19658 if or.Op != OpAMD64ORL { 19659 break 19660 } 19661 _ = or.Args[1] 19662 y := or.Args[0] 19663 s0 := or.Args[1] 19664 if s0.Op != OpAMD64SHLLconst { 19665 break 19666 } 19667 j0 := s0.AuxInt 19668 x0 := s0.Args[0] 19669 if x0.Op != OpAMD64MOVBloadidx1 { 19670 break 19671 } 19672 i0 := x0.AuxInt 19673 if x0.Aux != s { 19674 break 19675 } 19676 _ = x0.Args[2] 19677 if p != x0.Args[0] { 19678 break 19679 } 19680 if idx != x0.Args[1] { 19681 break 19682 } 19683 if mem != x0.Args[2] { 19684 break 19685 } 19686 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19687 break 19688 } 19689 b = mergePoint(b, x0, x1) 19690 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19691 v.reset(OpCopy) 19692 v.AddArg(v0) 19693 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19694 v1.AuxInt = j0 19695 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19696 v2.AuxInt = i0 19697 v2.Aux = s 19698 v2.AddArg(p) 19699 v2.AddArg(idx) 19700 v2.AddArg(mem) 19701 v1.AddArg(v2) 19702 v0.AddArg(v1) 19703 v0.AddArg(y) 19704 return true 19705 } 19706 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 19707 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19708 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19709 for { 19710 _ = v.Args[1] 19711 s1 := v.Args[0] 19712 if s1.Op != OpAMD64SHLLconst { 19713 break 19714 } 19715 j1 := s1.AuxInt 19716 x1 := s1.Args[0] 19717 if x1.Op != OpAMD64MOVBloadidx1 { 19718 break 19719 } 19720 i1 := x1.AuxInt 19721 s := x1.Aux 19722 _ = x1.Args[2] 19723 p := x1.Args[0] 19724 idx := x1.Args[1] 19725 mem := x1.Args[2] 19726 or := v.Args[1] 19727 if or.Op != OpAMD64ORL { 19728 break 19729 } 19730 _ = or.Args[1] 19731 y := or.Args[0] 19732 s0 := or.Args[1] 19733 if s0.Op != OpAMD64SHLLconst { 19734 break 19735 } 19736 j0 := s0.AuxInt 19737 x0 := s0.Args[0] 19738 if x0.Op != OpAMD64MOVBloadidx1 { 19739 break 19740 } 19741 i0 := x0.AuxInt 19742 if x0.Aux != s { 19743 break 19744 } 19745 _ = x0.Args[2] 19746 if idx != x0.Args[0] { 19747 break 19748 } 19749 if p != x0.Args[1] { 19750 break 19751 } 19752 if mem != x0.Args[2] { 19753 break 19754 } 19755 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19756 break 19757 } 19758 b = mergePoint(b, x0, x1) 19759 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19760 v.reset(OpCopy) 19761 v.AddArg(v0) 19762 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19763 v1.AuxInt = j0 19764 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19765 v2.AuxInt = i0 19766 v2.Aux = s 19767 v2.AddArg(p) 19768 v2.AddArg(idx) 19769 v2.AddArg(mem) 19770 v1.AddArg(v2) 19771 v0.AddArg(v1) 19772 v0.AddArg(y) 19773 return true 19774 } 19775 return false 19776 } 19777 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 19778 b := v.Block 19779 _ = b 19780 typ := &b.Func.Config.Types 19781 _ = typ 19782 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 19783 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19784 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19785 for { 19786 _ = v.Args[1] 19787 s1 := v.Args[0] 19788 if s1.Op != OpAMD64SHLLconst { 19789 break 19790 } 19791 j1 := s1.AuxInt 19792 x1 := s1.Args[0] 19793 if x1.Op != OpAMD64MOVBloadidx1 { 19794 break 19795 } 19796 i1 := x1.AuxInt 19797 s := x1.Aux 19798 _ = x1.Args[2] 19799 idx := x1.Args[0] 19800 p := x1.Args[1] 19801 mem := x1.Args[2] 19802 or := v.Args[1] 19803 if or.Op != OpAMD64ORL { 19804 break 19805 } 19806 _ = or.Args[1] 19807 y := or.Args[0] 19808 s0 := or.Args[1] 19809 if s0.Op != OpAMD64SHLLconst { 19810 break 19811 } 19812 j0 := s0.AuxInt 19813 x0 := s0.Args[0] 19814 if x0.Op != OpAMD64MOVBloadidx1 { 19815 break 19816 } 19817 i0 := x0.AuxInt 19818 if x0.Aux != s { 19819 break 19820 } 19821 _ = x0.Args[2] 19822 if idx != x0.Args[0] { 19823 break 19824 } 19825 if p != x0.Args[1] { 19826 break 19827 } 19828 if mem != x0.Args[2] { 19829 break 19830 } 19831 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19832 break 19833 } 19834 b = mergePoint(b, x0, x1) 19835 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19836 v.reset(OpCopy) 19837 v.AddArg(v0) 19838 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19839 v1.AuxInt = j0 19840 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19841 v2.AuxInt = i0 19842 v2.Aux = s 19843 v2.AddArg(p) 19844 v2.AddArg(idx) 19845 v2.AddArg(mem) 19846 v1.AddArg(v2) 19847 v0.AddArg(v1) 19848 v0.AddArg(y) 19849 return true 19850 } 19851 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 19852 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19853 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19854 for { 19855 _ = v.Args[1] 19856 or := v.Args[0] 19857 if or.Op != OpAMD64ORL { 19858 break 19859 } 19860 _ = or.Args[1] 19861 s0 := or.Args[0] 19862 if s0.Op != OpAMD64SHLLconst { 19863 break 19864 } 19865 j0 := s0.AuxInt 19866 x0 := s0.Args[0] 19867 if x0.Op != OpAMD64MOVBloadidx1 { 19868 break 19869 } 19870 i0 := x0.AuxInt 19871 s := x0.Aux 19872 _ = x0.Args[2] 19873 p := x0.Args[0] 19874 idx := x0.Args[1] 19875 mem := x0.Args[2] 19876 y := or.Args[1] 19877 s1 := v.Args[1] 19878 if s1.Op != OpAMD64SHLLconst { 19879 break 19880 } 19881 j1 := s1.AuxInt 19882 x1 := s1.Args[0] 19883 if x1.Op != OpAMD64MOVBloadidx1 { 19884 break 19885 } 19886 i1 := x1.AuxInt 19887 if x1.Aux != s { 19888 break 19889 } 19890 _ = x1.Args[2] 19891 if p != x1.Args[0] { 19892 break 19893 } 19894 if idx != x1.Args[1] { 19895 break 19896 } 19897 if mem != x1.Args[2] { 19898 break 19899 } 19900 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19901 break 19902 } 19903 b = mergePoint(b, x0, x1) 19904 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19905 v.reset(OpCopy) 19906 v.AddArg(v0) 19907 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19908 v1.AuxInt = j0 19909 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19910 v2.AuxInt = i0 19911 v2.Aux = s 19912 v2.AddArg(p) 19913 v2.AddArg(idx) 19914 v2.AddArg(mem) 19915 v1.AddArg(v2) 19916 v0.AddArg(v1) 19917 v0.AddArg(y) 19918 return true 19919 } 19920 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 19921 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19922 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19923 for { 19924 _ = v.Args[1] 19925 or := v.Args[0] 19926 if or.Op != OpAMD64ORL { 19927 break 19928 } 19929 _ = or.Args[1] 19930 s0 := or.Args[0] 19931 if s0.Op != OpAMD64SHLLconst { 19932 break 19933 } 19934 j0 := s0.AuxInt 19935 x0 := s0.Args[0] 19936 if x0.Op != OpAMD64MOVBloadidx1 { 19937 break 19938 } 19939 i0 := x0.AuxInt 19940 s := x0.Aux 19941 _ = x0.Args[2] 19942 idx := x0.Args[0] 19943 p := x0.Args[1] 19944 mem := x0.Args[2] 19945 y := or.Args[1] 19946 s1 := v.Args[1] 19947 if s1.Op != OpAMD64SHLLconst { 19948 break 19949 } 19950 j1 := s1.AuxInt 19951 x1 := s1.Args[0] 19952 if x1.Op != OpAMD64MOVBloadidx1 { 19953 break 19954 } 19955 i1 := x1.AuxInt 19956 if x1.Aux != s { 19957 break 19958 } 19959 _ = x1.Args[2] 19960 if p != x1.Args[0] { 19961 break 19962 } 19963 if idx != x1.Args[1] { 19964 break 19965 } 19966 if mem != x1.Args[2] { 19967 break 19968 } 19969 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19970 break 19971 } 19972 b = mergePoint(b, x0, x1) 19973 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19974 v.reset(OpCopy) 19975 v.AddArg(v0) 19976 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19977 v1.AuxInt = j0 19978 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19979 v2.AuxInt = i0 19980 v2.Aux = s 19981 v2.AddArg(p) 19982 v2.AddArg(idx) 19983 v2.AddArg(mem) 19984 v1.AddArg(v2) 19985 v0.AddArg(v1) 19986 v0.AddArg(y) 19987 return true 19988 } 19989 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 19990 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19991 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19992 for { 19993 _ = v.Args[1] 19994 or := v.Args[0] 19995 if or.Op != OpAMD64ORL { 19996 break 19997 } 19998 _ = or.Args[1] 19999 y := or.Args[0] 20000 s0 := or.Args[1] 20001 if s0.Op != OpAMD64SHLLconst { 20002 break 20003 } 20004 j0 := s0.AuxInt 20005 x0 := s0.Args[0] 20006 if x0.Op != OpAMD64MOVBloadidx1 { 20007 break 20008 } 20009 i0 := x0.AuxInt 20010 s := x0.Aux 20011 _ = x0.Args[2] 20012 p := x0.Args[0] 20013 idx := x0.Args[1] 20014 mem := x0.Args[2] 20015 s1 := v.Args[1] 20016 if s1.Op != OpAMD64SHLLconst { 20017 break 20018 } 20019 j1 := s1.AuxInt 20020 x1 := s1.Args[0] 20021 if x1.Op != OpAMD64MOVBloadidx1 { 20022 break 20023 } 20024 i1 := x1.AuxInt 20025 if x1.Aux != s { 20026 break 20027 } 20028 _ = x1.Args[2] 20029 if p != x1.Args[0] { 20030 break 20031 } 20032 if idx != x1.Args[1] { 20033 break 20034 } 20035 if mem != x1.Args[2] { 20036 break 20037 } 20038 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20039 break 20040 } 20041 b = mergePoint(b, x0, x1) 20042 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20043 v.reset(OpCopy) 20044 v.AddArg(v0) 20045 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20046 v1.AuxInt = j0 20047 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20048 v2.AuxInt = i0 20049 v2.Aux = s 20050 v2.AddArg(p) 20051 v2.AddArg(idx) 20052 v2.AddArg(mem) 20053 v1.AddArg(v2) 20054 v0.AddArg(v1) 20055 v0.AddArg(y) 20056 return true 20057 } 20058 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20059 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20060 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20061 for { 20062 _ = v.Args[1] 20063 or := v.Args[0] 20064 if or.Op != OpAMD64ORL { 20065 break 20066 } 20067 _ = or.Args[1] 20068 y := or.Args[0] 20069 s0 := or.Args[1] 20070 if s0.Op != OpAMD64SHLLconst { 20071 break 20072 } 20073 j0 := s0.AuxInt 20074 x0 := s0.Args[0] 20075 if x0.Op != OpAMD64MOVBloadidx1 { 20076 break 20077 } 20078 i0 := x0.AuxInt 20079 s := x0.Aux 20080 _ = x0.Args[2] 20081 idx := x0.Args[0] 20082 p := x0.Args[1] 20083 mem := x0.Args[2] 20084 s1 := v.Args[1] 20085 if s1.Op != OpAMD64SHLLconst { 20086 break 20087 } 20088 j1 := s1.AuxInt 20089 x1 := s1.Args[0] 20090 if x1.Op != OpAMD64MOVBloadidx1 { 20091 break 20092 } 20093 i1 := x1.AuxInt 20094 if x1.Aux != s { 20095 break 20096 } 20097 _ = x1.Args[2] 20098 if p != x1.Args[0] { 20099 break 20100 } 20101 if idx != x1.Args[1] { 20102 break 20103 } 20104 if mem != x1.Args[2] { 20105 break 20106 } 20107 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20108 break 20109 } 20110 b = mergePoint(b, x0, x1) 20111 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20112 v.reset(OpCopy) 20113 v.AddArg(v0) 20114 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20115 v1.AuxInt = j0 20116 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20117 v2.AuxInt = i0 20118 v2.Aux = s 20119 v2.AddArg(p) 20120 v2.AddArg(idx) 20121 v2.AddArg(mem) 20122 v1.AddArg(v2) 20123 v0.AddArg(v1) 20124 v0.AddArg(y) 20125 return true 20126 } 20127 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20128 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20129 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20130 for { 20131 _ = v.Args[1] 20132 or := v.Args[0] 20133 if or.Op != OpAMD64ORL { 20134 break 20135 } 20136 _ = or.Args[1] 20137 s0 := or.Args[0] 20138 if s0.Op != OpAMD64SHLLconst { 20139 break 20140 } 20141 j0 := s0.AuxInt 20142 x0 := s0.Args[0] 20143 if x0.Op != OpAMD64MOVBloadidx1 { 20144 break 20145 } 20146 i0 := x0.AuxInt 20147 s := x0.Aux 20148 _ = x0.Args[2] 20149 p := x0.Args[0] 20150 idx := x0.Args[1] 20151 mem := x0.Args[2] 20152 y := or.Args[1] 20153 s1 := v.Args[1] 20154 if s1.Op != OpAMD64SHLLconst { 20155 break 20156 } 20157 j1 := s1.AuxInt 20158 x1 := s1.Args[0] 20159 if x1.Op != OpAMD64MOVBloadidx1 { 20160 break 20161 } 20162 i1 := x1.AuxInt 20163 if x1.Aux != s { 20164 break 20165 } 20166 _ = x1.Args[2] 20167 if idx != x1.Args[0] { 20168 break 20169 } 20170 if p != x1.Args[1] { 20171 break 20172 } 20173 if mem != x1.Args[2] { 20174 break 20175 } 20176 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20177 break 20178 } 20179 b = mergePoint(b, x0, x1) 20180 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20181 v.reset(OpCopy) 20182 v.AddArg(v0) 20183 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20184 v1.AuxInt = j0 20185 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20186 v2.AuxInt = i0 20187 v2.Aux = s 20188 v2.AddArg(p) 20189 v2.AddArg(idx) 20190 v2.AddArg(mem) 20191 v1.AddArg(v2) 20192 v0.AddArg(v1) 20193 v0.AddArg(y) 20194 return true 20195 } 20196 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20197 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20198 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20199 for { 20200 _ = v.Args[1] 20201 or := v.Args[0] 20202 if or.Op != OpAMD64ORL { 20203 break 20204 } 20205 _ = or.Args[1] 20206 s0 := or.Args[0] 20207 if s0.Op != OpAMD64SHLLconst { 20208 break 20209 } 20210 j0 := s0.AuxInt 20211 x0 := s0.Args[0] 20212 if x0.Op != OpAMD64MOVBloadidx1 { 20213 break 20214 } 20215 i0 := x0.AuxInt 20216 s := x0.Aux 20217 _ = x0.Args[2] 20218 idx := x0.Args[0] 20219 p := x0.Args[1] 20220 mem := x0.Args[2] 20221 y := or.Args[1] 20222 s1 := v.Args[1] 20223 if s1.Op != OpAMD64SHLLconst { 20224 break 20225 } 20226 j1 := s1.AuxInt 20227 x1 := s1.Args[0] 20228 if x1.Op != OpAMD64MOVBloadidx1 { 20229 break 20230 } 20231 i1 := x1.AuxInt 20232 if x1.Aux != s { 20233 break 20234 } 20235 _ = x1.Args[2] 20236 if idx != x1.Args[0] { 20237 break 20238 } 20239 if p != x1.Args[1] { 20240 break 20241 } 20242 if mem != x1.Args[2] { 20243 break 20244 } 20245 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20246 break 20247 } 20248 b = mergePoint(b, x0, x1) 20249 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20250 v.reset(OpCopy) 20251 v.AddArg(v0) 20252 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20253 v1.AuxInt = j0 20254 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20255 v2.AuxInt = i0 20256 v2.Aux = s 20257 v2.AddArg(p) 20258 v2.AddArg(idx) 20259 v2.AddArg(mem) 20260 v1.AddArg(v2) 20261 v0.AddArg(v1) 20262 v0.AddArg(y) 20263 return true 20264 } 20265 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20266 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20267 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20268 for { 20269 _ = v.Args[1] 20270 or := v.Args[0] 20271 if or.Op != OpAMD64ORL { 20272 break 20273 } 20274 _ = or.Args[1] 20275 y := or.Args[0] 20276 s0 := or.Args[1] 20277 if s0.Op != OpAMD64SHLLconst { 20278 break 20279 } 20280 j0 := s0.AuxInt 20281 x0 := s0.Args[0] 20282 if x0.Op != OpAMD64MOVBloadidx1 { 20283 break 20284 } 20285 i0 := x0.AuxInt 20286 s := x0.Aux 20287 _ = x0.Args[2] 20288 p := x0.Args[0] 20289 idx := x0.Args[1] 20290 mem := x0.Args[2] 20291 s1 := v.Args[1] 20292 if s1.Op != OpAMD64SHLLconst { 20293 break 20294 } 20295 j1 := s1.AuxInt 20296 x1 := s1.Args[0] 20297 if x1.Op != OpAMD64MOVBloadidx1 { 20298 break 20299 } 20300 i1 := x1.AuxInt 20301 if x1.Aux != s { 20302 break 20303 } 20304 _ = x1.Args[2] 20305 if idx != x1.Args[0] { 20306 break 20307 } 20308 if p != x1.Args[1] { 20309 break 20310 } 20311 if mem != x1.Args[2] { 20312 break 20313 } 20314 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20315 break 20316 } 20317 b = mergePoint(b, x0, x1) 20318 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20319 v.reset(OpCopy) 20320 v.AddArg(v0) 20321 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20322 v1.AuxInt = j0 20323 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20324 v2.AuxInt = i0 20325 v2.Aux = s 20326 v2.AddArg(p) 20327 v2.AddArg(idx) 20328 v2.AddArg(mem) 20329 v1.AddArg(v2) 20330 v0.AddArg(v1) 20331 v0.AddArg(y) 20332 return true 20333 } 20334 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20335 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20336 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20337 for { 20338 _ = v.Args[1] 20339 or := v.Args[0] 20340 if or.Op != OpAMD64ORL { 20341 break 20342 } 20343 _ = or.Args[1] 20344 y := or.Args[0] 20345 s0 := or.Args[1] 20346 if s0.Op != OpAMD64SHLLconst { 20347 break 20348 } 20349 j0 := s0.AuxInt 20350 x0 := s0.Args[0] 20351 if x0.Op != OpAMD64MOVBloadidx1 { 20352 break 20353 } 20354 i0 := x0.AuxInt 20355 s := x0.Aux 20356 _ = x0.Args[2] 20357 idx := x0.Args[0] 20358 p := x0.Args[1] 20359 mem := x0.Args[2] 20360 s1 := v.Args[1] 20361 if s1.Op != OpAMD64SHLLconst { 20362 break 20363 } 20364 j1 := s1.AuxInt 20365 x1 := s1.Args[0] 20366 if x1.Op != OpAMD64MOVBloadidx1 { 20367 break 20368 } 20369 i1 := x1.AuxInt 20370 if x1.Aux != s { 20371 break 20372 } 20373 _ = x1.Args[2] 20374 if idx != x1.Args[0] { 20375 break 20376 } 20377 if p != x1.Args[1] { 20378 break 20379 } 20380 if mem != x1.Args[2] { 20381 break 20382 } 20383 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20384 break 20385 } 20386 b = mergePoint(b, x0, x1) 20387 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20388 v.reset(OpCopy) 20389 v.AddArg(v0) 20390 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20391 v1.AuxInt = j0 20392 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20393 v2.AuxInt = i0 20394 v2.Aux = s 20395 v2.AddArg(p) 20396 v2.AddArg(idx) 20397 v2.AddArg(mem) 20398 v1.AddArg(v2) 20399 v0.AddArg(v1) 20400 v0.AddArg(y) 20401 return true 20402 } 20403 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 20404 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20405 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 20406 for { 20407 _ = v.Args[1] 20408 x1 := v.Args[0] 20409 if x1.Op != OpAMD64MOVBload { 20410 break 20411 } 20412 i1 := x1.AuxInt 20413 s := x1.Aux 20414 _ = x1.Args[1] 20415 p := x1.Args[0] 20416 mem := x1.Args[1] 20417 sh := v.Args[1] 20418 if sh.Op != OpAMD64SHLLconst { 20419 break 20420 } 20421 if sh.AuxInt != 8 { 20422 break 20423 } 20424 x0 := sh.Args[0] 20425 if x0.Op != OpAMD64MOVBload { 20426 break 20427 } 20428 i0 := x0.AuxInt 20429 if x0.Aux != s { 20430 break 20431 } 20432 _ = x0.Args[1] 20433 if p != x0.Args[0] { 20434 break 20435 } 20436 if mem != x0.Args[1] { 20437 break 20438 } 20439 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 20440 break 20441 } 20442 b = mergePoint(b, x0, x1) 20443 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 20444 v.reset(OpCopy) 20445 v.AddArg(v0) 20446 v0.AuxInt = 8 20447 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20448 v1.AuxInt = i0 20449 v1.Aux = s 20450 v1.AddArg(p) 20451 v1.AddArg(mem) 20452 v0.AddArg(v1) 20453 return true 20454 } 20455 return false 20456 } 20457 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 20458 b := v.Block 20459 _ = b 20460 typ := &b.Func.Config.Types 20461 _ = typ 20462 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 20463 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20464 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 20465 for { 20466 _ = v.Args[1] 20467 sh := v.Args[0] 20468 if sh.Op != OpAMD64SHLLconst { 20469 break 20470 } 20471 if sh.AuxInt != 8 { 20472 break 20473 } 20474 x0 := sh.Args[0] 20475 if x0.Op != OpAMD64MOVBload { 20476 break 20477 } 20478 i0 := x0.AuxInt 20479 s := x0.Aux 20480 _ = x0.Args[1] 20481 p := x0.Args[0] 20482 mem := x0.Args[1] 20483 x1 := v.Args[1] 20484 if x1.Op != OpAMD64MOVBload { 20485 break 20486 } 20487 i1 := x1.AuxInt 20488 if x1.Aux != s { 20489 break 20490 } 20491 _ = x1.Args[1] 20492 if p != x1.Args[0] { 20493 break 20494 } 20495 if mem != x1.Args[1] { 20496 break 20497 } 20498 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 20499 break 20500 } 20501 b = mergePoint(b, x0, x1) 20502 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 20503 v.reset(OpCopy) 20504 v.AddArg(v0) 20505 v0.AuxInt = 8 20506 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20507 v1.AuxInt = i0 20508 v1.Aux = s 20509 v1.AddArg(p) 20510 v1.AddArg(mem) 20511 v0.AddArg(v1) 20512 return true 20513 } 20514 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 20515 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20516 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 20517 for { 20518 _ = v.Args[1] 20519 r1 := v.Args[0] 20520 if r1.Op != OpAMD64ROLWconst { 20521 break 20522 } 20523 if r1.AuxInt != 8 { 20524 break 20525 } 20526 x1 := r1.Args[0] 20527 if x1.Op != OpAMD64MOVWload { 20528 break 20529 } 20530 i1 := x1.AuxInt 20531 s := x1.Aux 20532 _ = x1.Args[1] 20533 p := x1.Args[0] 20534 mem := x1.Args[1] 20535 sh := v.Args[1] 20536 if sh.Op != OpAMD64SHLLconst { 20537 break 20538 } 20539 if sh.AuxInt != 16 { 20540 break 20541 } 20542 r0 := sh.Args[0] 20543 if r0.Op != OpAMD64ROLWconst { 20544 break 20545 } 20546 if r0.AuxInt != 8 { 20547 break 20548 } 20549 x0 := r0.Args[0] 20550 if x0.Op != OpAMD64MOVWload { 20551 break 20552 } 20553 i0 := x0.AuxInt 20554 if x0.Aux != s { 20555 break 20556 } 20557 _ = x0.Args[1] 20558 if p != x0.Args[0] { 20559 break 20560 } 20561 if mem != x0.Args[1] { 20562 break 20563 } 20564 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20565 break 20566 } 20567 b = mergePoint(b, x0, x1) 20568 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20569 v.reset(OpCopy) 20570 v.AddArg(v0) 20571 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 20572 v1.AuxInt = i0 20573 v1.Aux = s 20574 v1.AddArg(p) 20575 v1.AddArg(mem) 20576 v0.AddArg(v1) 20577 return true 20578 } 20579 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 20580 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20581 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 20582 for { 20583 _ = v.Args[1] 20584 sh := v.Args[0] 20585 if sh.Op != OpAMD64SHLLconst { 20586 break 20587 } 20588 if sh.AuxInt != 16 { 20589 break 20590 } 20591 r0 := sh.Args[0] 20592 if r0.Op != OpAMD64ROLWconst { 20593 break 20594 } 20595 if r0.AuxInt != 8 { 20596 break 20597 } 20598 x0 := r0.Args[0] 20599 if x0.Op != OpAMD64MOVWload { 20600 break 20601 } 20602 i0 := x0.AuxInt 20603 s := x0.Aux 20604 _ = x0.Args[1] 20605 p := x0.Args[0] 20606 mem := x0.Args[1] 20607 r1 := v.Args[1] 20608 if r1.Op != OpAMD64ROLWconst { 20609 break 20610 } 20611 if r1.AuxInt != 8 { 20612 break 20613 } 20614 x1 := r1.Args[0] 20615 if x1.Op != OpAMD64MOVWload { 20616 break 20617 } 20618 i1 := x1.AuxInt 20619 if x1.Aux != s { 20620 break 20621 } 20622 _ = x1.Args[1] 20623 if p != x1.Args[0] { 20624 break 20625 } 20626 if mem != x1.Args[1] { 20627 break 20628 } 20629 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20630 break 20631 } 20632 b = mergePoint(b, x0, x1) 20633 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20634 v.reset(OpCopy) 20635 v.AddArg(v0) 20636 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 20637 v1.AuxInt = i0 20638 v1.Aux = s 20639 v1.AddArg(p) 20640 v1.AddArg(mem) 20641 v0.AddArg(v1) 20642 return true 20643 } 20644 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 20645 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20646 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20647 for { 20648 _ = v.Args[1] 20649 s0 := v.Args[0] 20650 if s0.Op != OpAMD64SHLLconst { 20651 break 20652 } 20653 j0 := s0.AuxInt 20654 x0 := s0.Args[0] 20655 if x0.Op != OpAMD64MOVBload { 20656 break 20657 } 20658 i0 := x0.AuxInt 20659 s := x0.Aux 20660 _ = x0.Args[1] 20661 p := x0.Args[0] 20662 mem := x0.Args[1] 20663 or := v.Args[1] 20664 if or.Op != OpAMD64ORL { 20665 break 20666 } 20667 _ = or.Args[1] 20668 s1 := or.Args[0] 20669 if s1.Op != OpAMD64SHLLconst { 20670 break 20671 } 20672 j1 := s1.AuxInt 20673 x1 := s1.Args[0] 20674 if x1.Op != OpAMD64MOVBload { 20675 break 20676 } 20677 i1 := x1.AuxInt 20678 if x1.Aux != s { 20679 break 20680 } 20681 _ = x1.Args[1] 20682 if p != x1.Args[0] { 20683 break 20684 } 20685 if mem != x1.Args[1] { 20686 break 20687 } 20688 y := or.Args[1] 20689 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20690 break 20691 } 20692 b = mergePoint(b, x0, x1) 20693 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20694 v.reset(OpCopy) 20695 v.AddArg(v0) 20696 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20697 v1.AuxInt = j1 20698 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20699 v2.AuxInt = 8 20700 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20701 v3.AuxInt = i0 20702 v3.Aux = s 20703 v3.AddArg(p) 20704 v3.AddArg(mem) 20705 v2.AddArg(v3) 20706 v1.AddArg(v2) 20707 v0.AddArg(v1) 20708 v0.AddArg(y) 20709 return true 20710 } 20711 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 20712 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20713 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20714 for { 20715 _ = v.Args[1] 20716 s0 := v.Args[0] 20717 if s0.Op != OpAMD64SHLLconst { 20718 break 20719 } 20720 j0 := s0.AuxInt 20721 x0 := s0.Args[0] 20722 if x0.Op != OpAMD64MOVBload { 20723 break 20724 } 20725 i0 := x0.AuxInt 20726 s := x0.Aux 20727 _ = x0.Args[1] 20728 p := x0.Args[0] 20729 mem := x0.Args[1] 20730 or := v.Args[1] 20731 if or.Op != OpAMD64ORL { 20732 break 20733 } 20734 _ = or.Args[1] 20735 y := or.Args[0] 20736 s1 := or.Args[1] 20737 if s1.Op != OpAMD64SHLLconst { 20738 break 20739 } 20740 j1 := s1.AuxInt 20741 x1 := s1.Args[0] 20742 if x1.Op != OpAMD64MOVBload { 20743 break 20744 } 20745 i1 := x1.AuxInt 20746 if x1.Aux != s { 20747 break 20748 } 20749 _ = x1.Args[1] 20750 if p != x1.Args[0] { 20751 break 20752 } 20753 if mem != x1.Args[1] { 20754 break 20755 } 20756 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20757 break 20758 } 20759 b = mergePoint(b, x0, x1) 20760 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20761 v.reset(OpCopy) 20762 v.AddArg(v0) 20763 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20764 v1.AuxInt = j1 20765 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20766 v2.AuxInt = 8 20767 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20768 v3.AuxInt = i0 20769 v3.Aux = s 20770 v3.AddArg(p) 20771 v3.AddArg(mem) 20772 v2.AddArg(v3) 20773 v1.AddArg(v2) 20774 v0.AddArg(v1) 20775 v0.AddArg(y) 20776 return true 20777 } 20778 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 20779 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20780 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20781 for { 20782 _ = v.Args[1] 20783 or := v.Args[0] 20784 if or.Op != OpAMD64ORL { 20785 break 20786 } 20787 _ = or.Args[1] 20788 s1 := or.Args[0] 20789 if s1.Op != OpAMD64SHLLconst { 20790 break 20791 } 20792 j1 := s1.AuxInt 20793 x1 := s1.Args[0] 20794 if x1.Op != OpAMD64MOVBload { 20795 break 20796 } 20797 i1 := x1.AuxInt 20798 s := x1.Aux 20799 _ = x1.Args[1] 20800 p := x1.Args[0] 20801 mem := x1.Args[1] 20802 y := or.Args[1] 20803 s0 := v.Args[1] 20804 if s0.Op != OpAMD64SHLLconst { 20805 break 20806 } 20807 j0 := s0.AuxInt 20808 x0 := s0.Args[0] 20809 if x0.Op != OpAMD64MOVBload { 20810 break 20811 } 20812 i0 := x0.AuxInt 20813 if x0.Aux != s { 20814 break 20815 } 20816 _ = x0.Args[1] 20817 if p != x0.Args[0] { 20818 break 20819 } 20820 if mem != x0.Args[1] { 20821 break 20822 } 20823 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20824 break 20825 } 20826 b = mergePoint(b, x0, x1) 20827 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20828 v.reset(OpCopy) 20829 v.AddArg(v0) 20830 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20831 v1.AuxInt = j1 20832 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20833 v2.AuxInt = 8 20834 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20835 v3.AuxInt = i0 20836 v3.Aux = s 20837 v3.AddArg(p) 20838 v3.AddArg(mem) 20839 v2.AddArg(v3) 20840 v1.AddArg(v2) 20841 v0.AddArg(v1) 20842 v0.AddArg(y) 20843 return true 20844 } 20845 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 20846 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20847 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20848 for { 20849 _ = v.Args[1] 20850 or := v.Args[0] 20851 if or.Op != OpAMD64ORL { 20852 break 20853 } 20854 _ = or.Args[1] 20855 y := or.Args[0] 20856 s1 := or.Args[1] 20857 if s1.Op != OpAMD64SHLLconst { 20858 break 20859 } 20860 j1 := s1.AuxInt 20861 x1 := s1.Args[0] 20862 if x1.Op != OpAMD64MOVBload { 20863 break 20864 } 20865 i1 := x1.AuxInt 20866 s := x1.Aux 20867 _ = x1.Args[1] 20868 p := x1.Args[0] 20869 mem := x1.Args[1] 20870 s0 := v.Args[1] 20871 if s0.Op != OpAMD64SHLLconst { 20872 break 20873 } 20874 j0 := s0.AuxInt 20875 x0 := s0.Args[0] 20876 if x0.Op != OpAMD64MOVBload { 20877 break 20878 } 20879 i0 := x0.AuxInt 20880 if x0.Aux != s { 20881 break 20882 } 20883 _ = x0.Args[1] 20884 if p != x0.Args[0] { 20885 break 20886 } 20887 if mem != x0.Args[1] { 20888 break 20889 } 20890 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20891 break 20892 } 20893 b = mergePoint(b, x0, x1) 20894 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20895 v.reset(OpCopy) 20896 v.AddArg(v0) 20897 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20898 v1.AuxInt = j1 20899 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20900 v2.AuxInt = 8 20901 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20902 v3.AuxInt = i0 20903 v3.Aux = s 20904 v3.AddArg(p) 20905 v3.AddArg(mem) 20906 v2.AddArg(v3) 20907 v1.AddArg(v2) 20908 v0.AddArg(v1) 20909 v0.AddArg(y) 20910 return true 20911 } 20912 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 20913 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20914 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 20915 for { 20916 _ = v.Args[1] 20917 x1 := v.Args[0] 20918 if x1.Op != OpAMD64MOVBloadidx1 { 20919 break 20920 } 20921 i1 := x1.AuxInt 20922 s := x1.Aux 20923 _ = x1.Args[2] 20924 p := x1.Args[0] 20925 idx := x1.Args[1] 20926 mem := x1.Args[2] 20927 sh := v.Args[1] 20928 if sh.Op != OpAMD64SHLLconst { 20929 break 20930 } 20931 if sh.AuxInt != 8 { 20932 break 20933 } 20934 x0 := sh.Args[0] 20935 if x0.Op != OpAMD64MOVBloadidx1 { 20936 break 20937 } 20938 i0 := x0.AuxInt 20939 if x0.Aux != s { 20940 break 20941 } 20942 _ = x0.Args[2] 20943 if p != x0.Args[0] { 20944 break 20945 } 20946 if idx != x0.Args[1] { 20947 break 20948 } 20949 if mem != x0.Args[2] { 20950 break 20951 } 20952 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 20953 break 20954 } 20955 b = mergePoint(b, x0, x1) 20956 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 20957 v.reset(OpCopy) 20958 v.AddArg(v0) 20959 v0.AuxInt = 8 20960 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20961 v1.AuxInt = i0 20962 v1.Aux = s 20963 v1.AddArg(p) 20964 v1.AddArg(idx) 20965 v1.AddArg(mem) 20966 v0.AddArg(v1) 20967 return true 20968 } 20969 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 20970 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20971 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 20972 for { 20973 _ = v.Args[1] 20974 x1 := v.Args[0] 20975 if x1.Op != OpAMD64MOVBloadidx1 { 20976 break 20977 } 20978 i1 := x1.AuxInt 20979 s := x1.Aux 20980 _ = x1.Args[2] 20981 idx := x1.Args[0] 20982 p := x1.Args[1] 20983 mem := x1.Args[2] 20984 sh := v.Args[1] 20985 if sh.Op != OpAMD64SHLLconst { 20986 break 20987 } 20988 if sh.AuxInt != 8 { 20989 break 20990 } 20991 x0 := sh.Args[0] 20992 if x0.Op != OpAMD64MOVBloadidx1 { 20993 break 20994 } 20995 i0 := x0.AuxInt 20996 if x0.Aux != s { 20997 break 20998 } 20999 _ = x0.Args[2] 21000 if p != x0.Args[0] { 21001 break 21002 } 21003 if idx != x0.Args[1] { 21004 break 21005 } 21006 if mem != x0.Args[2] { 21007 break 21008 } 21009 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21010 break 21011 } 21012 b = mergePoint(b, x0, x1) 21013 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21014 v.reset(OpCopy) 21015 v.AddArg(v0) 21016 v0.AuxInt = 8 21017 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21018 v1.AuxInt = i0 21019 v1.Aux = s 21020 v1.AddArg(p) 21021 v1.AddArg(idx) 21022 v1.AddArg(mem) 21023 v0.AddArg(v1) 21024 return true 21025 } 21026 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21027 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21028 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21029 for { 21030 _ = v.Args[1] 21031 x1 := v.Args[0] 21032 if x1.Op != OpAMD64MOVBloadidx1 { 21033 break 21034 } 21035 i1 := x1.AuxInt 21036 s := x1.Aux 21037 _ = x1.Args[2] 21038 p := x1.Args[0] 21039 idx := x1.Args[1] 21040 mem := x1.Args[2] 21041 sh := v.Args[1] 21042 if sh.Op != OpAMD64SHLLconst { 21043 break 21044 } 21045 if sh.AuxInt != 8 { 21046 break 21047 } 21048 x0 := sh.Args[0] 21049 if x0.Op != OpAMD64MOVBloadidx1 { 21050 break 21051 } 21052 i0 := x0.AuxInt 21053 if x0.Aux != s { 21054 break 21055 } 21056 _ = x0.Args[2] 21057 if idx != x0.Args[0] { 21058 break 21059 } 21060 if p != x0.Args[1] { 21061 break 21062 } 21063 if mem != x0.Args[2] { 21064 break 21065 } 21066 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21067 break 21068 } 21069 b = mergePoint(b, x0, x1) 21070 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21071 v.reset(OpCopy) 21072 v.AddArg(v0) 21073 v0.AuxInt = 8 21074 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21075 v1.AuxInt = i0 21076 v1.Aux = s 21077 v1.AddArg(p) 21078 v1.AddArg(idx) 21079 v1.AddArg(mem) 21080 v0.AddArg(v1) 21081 return true 21082 } 21083 return false 21084 } 21085 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 21086 b := v.Block 21087 _ = b 21088 typ := &b.Func.Config.Types 21089 _ = typ 21090 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21091 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21092 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21093 for { 21094 _ = v.Args[1] 21095 x1 := v.Args[0] 21096 if x1.Op != OpAMD64MOVBloadidx1 { 21097 break 21098 } 21099 i1 := x1.AuxInt 21100 s := x1.Aux 21101 _ = x1.Args[2] 21102 idx := x1.Args[0] 21103 p := x1.Args[1] 21104 mem := x1.Args[2] 21105 sh := v.Args[1] 21106 if sh.Op != OpAMD64SHLLconst { 21107 break 21108 } 21109 if sh.AuxInt != 8 { 21110 break 21111 } 21112 x0 := sh.Args[0] 21113 if x0.Op != OpAMD64MOVBloadidx1 { 21114 break 21115 } 21116 i0 := x0.AuxInt 21117 if x0.Aux != s { 21118 break 21119 } 21120 _ = x0.Args[2] 21121 if idx != x0.Args[0] { 21122 break 21123 } 21124 if p != x0.Args[1] { 21125 break 21126 } 21127 if mem != x0.Args[2] { 21128 break 21129 } 21130 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21131 break 21132 } 21133 b = mergePoint(b, x0, x1) 21134 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21135 v.reset(OpCopy) 21136 v.AddArg(v0) 21137 v0.AuxInt = 8 21138 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21139 v1.AuxInt = i0 21140 v1.Aux = s 21141 v1.AddArg(p) 21142 v1.AddArg(idx) 21143 v1.AddArg(mem) 21144 v0.AddArg(v1) 21145 return true 21146 } 21147 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 21148 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21149 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21150 for { 21151 _ = v.Args[1] 21152 sh := v.Args[0] 21153 if sh.Op != OpAMD64SHLLconst { 21154 break 21155 } 21156 if sh.AuxInt != 8 { 21157 break 21158 } 21159 x0 := sh.Args[0] 21160 if x0.Op != OpAMD64MOVBloadidx1 { 21161 break 21162 } 21163 i0 := x0.AuxInt 21164 s := x0.Aux 21165 _ = x0.Args[2] 21166 p := x0.Args[0] 21167 idx := x0.Args[1] 21168 mem := x0.Args[2] 21169 x1 := v.Args[1] 21170 if x1.Op != OpAMD64MOVBloadidx1 { 21171 break 21172 } 21173 i1 := x1.AuxInt 21174 if x1.Aux != s { 21175 break 21176 } 21177 _ = x1.Args[2] 21178 if p != x1.Args[0] { 21179 break 21180 } 21181 if idx != x1.Args[1] { 21182 break 21183 } 21184 if mem != x1.Args[2] { 21185 break 21186 } 21187 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21188 break 21189 } 21190 b = mergePoint(b, x0, x1) 21191 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21192 v.reset(OpCopy) 21193 v.AddArg(v0) 21194 v0.AuxInt = 8 21195 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21196 v1.AuxInt = i0 21197 v1.Aux = s 21198 v1.AddArg(p) 21199 v1.AddArg(idx) 21200 v1.AddArg(mem) 21201 v0.AddArg(v1) 21202 return true 21203 } 21204 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 21205 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21206 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21207 for { 21208 _ = v.Args[1] 21209 sh := v.Args[0] 21210 if sh.Op != OpAMD64SHLLconst { 21211 break 21212 } 21213 if sh.AuxInt != 8 { 21214 break 21215 } 21216 x0 := sh.Args[0] 21217 if x0.Op != OpAMD64MOVBloadidx1 { 21218 break 21219 } 21220 i0 := x0.AuxInt 21221 s := x0.Aux 21222 _ = x0.Args[2] 21223 idx := x0.Args[0] 21224 p := x0.Args[1] 21225 mem := x0.Args[2] 21226 x1 := v.Args[1] 21227 if x1.Op != OpAMD64MOVBloadidx1 { 21228 break 21229 } 21230 i1 := x1.AuxInt 21231 if x1.Aux != s { 21232 break 21233 } 21234 _ = x1.Args[2] 21235 if p != x1.Args[0] { 21236 break 21237 } 21238 if idx != x1.Args[1] { 21239 break 21240 } 21241 if mem != x1.Args[2] { 21242 break 21243 } 21244 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21245 break 21246 } 21247 b = mergePoint(b, x0, x1) 21248 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21249 v.reset(OpCopy) 21250 v.AddArg(v0) 21251 v0.AuxInt = 8 21252 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21253 v1.AuxInt = i0 21254 v1.Aux = s 21255 v1.AddArg(p) 21256 v1.AddArg(idx) 21257 v1.AddArg(mem) 21258 v0.AddArg(v1) 21259 return true 21260 } 21261 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 21262 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21263 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21264 for { 21265 _ = v.Args[1] 21266 sh := v.Args[0] 21267 if sh.Op != OpAMD64SHLLconst { 21268 break 21269 } 21270 if sh.AuxInt != 8 { 21271 break 21272 } 21273 x0 := sh.Args[0] 21274 if x0.Op != OpAMD64MOVBloadidx1 { 21275 break 21276 } 21277 i0 := x0.AuxInt 21278 s := x0.Aux 21279 _ = x0.Args[2] 21280 p := x0.Args[0] 21281 idx := x0.Args[1] 21282 mem := x0.Args[2] 21283 x1 := v.Args[1] 21284 if x1.Op != OpAMD64MOVBloadidx1 { 21285 break 21286 } 21287 i1 := x1.AuxInt 21288 if x1.Aux != s { 21289 break 21290 } 21291 _ = x1.Args[2] 21292 if idx != x1.Args[0] { 21293 break 21294 } 21295 if p != x1.Args[1] { 21296 break 21297 } 21298 if mem != x1.Args[2] { 21299 break 21300 } 21301 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21302 break 21303 } 21304 b = mergePoint(b, x0, x1) 21305 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21306 v.reset(OpCopy) 21307 v.AddArg(v0) 21308 v0.AuxInt = 8 21309 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21310 v1.AuxInt = i0 21311 v1.Aux = s 21312 v1.AddArg(p) 21313 v1.AddArg(idx) 21314 v1.AddArg(mem) 21315 v0.AddArg(v1) 21316 return true 21317 } 21318 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 21319 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21320 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21321 for { 21322 _ = v.Args[1] 21323 sh := v.Args[0] 21324 if sh.Op != OpAMD64SHLLconst { 21325 break 21326 } 21327 if sh.AuxInt != 8 { 21328 break 21329 } 21330 x0 := sh.Args[0] 21331 if x0.Op != OpAMD64MOVBloadidx1 { 21332 break 21333 } 21334 i0 := x0.AuxInt 21335 s := x0.Aux 21336 _ = x0.Args[2] 21337 idx := x0.Args[0] 21338 p := x0.Args[1] 21339 mem := x0.Args[2] 21340 x1 := v.Args[1] 21341 if x1.Op != OpAMD64MOVBloadidx1 { 21342 break 21343 } 21344 i1 := x1.AuxInt 21345 if x1.Aux != s { 21346 break 21347 } 21348 _ = x1.Args[2] 21349 if idx != x1.Args[0] { 21350 break 21351 } 21352 if p != x1.Args[1] { 21353 break 21354 } 21355 if mem != x1.Args[2] { 21356 break 21357 } 21358 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21359 break 21360 } 21361 b = mergePoint(b, x0, x1) 21362 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21363 v.reset(OpCopy) 21364 v.AddArg(v0) 21365 v0.AuxInt = 8 21366 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21367 v1.AuxInt = i0 21368 v1.Aux = s 21369 v1.AddArg(p) 21370 v1.AddArg(idx) 21371 v1.AddArg(mem) 21372 v0.AddArg(v1) 21373 return true 21374 } 21375 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 21376 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21377 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21378 for { 21379 _ = v.Args[1] 21380 r1 := v.Args[0] 21381 if r1.Op != OpAMD64ROLWconst { 21382 break 21383 } 21384 if r1.AuxInt != 8 { 21385 break 21386 } 21387 x1 := r1.Args[0] 21388 if x1.Op != OpAMD64MOVWloadidx1 { 21389 break 21390 } 21391 i1 := x1.AuxInt 21392 s := x1.Aux 21393 _ = x1.Args[2] 21394 p := x1.Args[0] 21395 idx := x1.Args[1] 21396 mem := x1.Args[2] 21397 sh := v.Args[1] 21398 if sh.Op != OpAMD64SHLLconst { 21399 break 21400 } 21401 if sh.AuxInt != 16 { 21402 break 21403 } 21404 r0 := sh.Args[0] 21405 if r0.Op != OpAMD64ROLWconst { 21406 break 21407 } 21408 if r0.AuxInt != 8 { 21409 break 21410 } 21411 x0 := r0.Args[0] 21412 if x0.Op != OpAMD64MOVWloadidx1 { 21413 break 21414 } 21415 i0 := x0.AuxInt 21416 if x0.Aux != s { 21417 break 21418 } 21419 _ = x0.Args[2] 21420 if p != x0.Args[0] { 21421 break 21422 } 21423 if idx != x0.Args[1] { 21424 break 21425 } 21426 if mem != x0.Args[2] { 21427 break 21428 } 21429 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21430 break 21431 } 21432 b = mergePoint(b, x0, x1) 21433 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21434 v.reset(OpCopy) 21435 v.AddArg(v0) 21436 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21437 v1.AuxInt = i0 21438 v1.Aux = s 21439 v1.AddArg(p) 21440 v1.AddArg(idx) 21441 v1.AddArg(mem) 21442 v0.AddArg(v1) 21443 return true 21444 } 21445 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 21446 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21447 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21448 for { 21449 _ = v.Args[1] 21450 r1 := v.Args[0] 21451 if r1.Op != OpAMD64ROLWconst { 21452 break 21453 } 21454 if r1.AuxInt != 8 { 21455 break 21456 } 21457 x1 := r1.Args[0] 21458 if x1.Op != OpAMD64MOVWloadidx1 { 21459 break 21460 } 21461 i1 := x1.AuxInt 21462 s := x1.Aux 21463 _ = x1.Args[2] 21464 idx := x1.Args[0] 21465 p := x1.Args[1] 21466 mem := x1.Args[2] 21467 sh := v.Args[1] 21468 if sh.Op != OpAMD64SHLLconst { 21469 break 21470 } 21471 if sh.AuxInt != 16 { 21472 break 21473 } 21474 r0 := sh.Args[0] 21475 if r0.Op != OpAMD64ROLWconst { 21476 break 21477 } 21478 if r0.AuxInt != 8 { 21479 break 21480 } 21481 x0 := r0.Args[0] 21482 if x0.Op != OpAMD64MOVWloadidx1 { 21483 break 21484 } 21485 i0 := x0.AuxInt 21486 if x0.Aux != s { 21487 break 21488 } 21489 _ = x0.Args[2] 21490 if p != x0.Args[0] { 21491 break 21492 } 21493 if idx != x0.Args[1] { 21494 break 21495 } 21496 if mem != x0.Args[2] { 21497 break 21498 } 21499 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21500 break 21501 } 21502 b = mergePoint(b, x0, x1) 21503 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21504 v.reset(OpCopy) 21505 v.AddArg(v0) 21506 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21507 v1.AuxInt = i0 21508 v1.Aux = s 21509 v1.AddArg(p) 21510 v1.AddArg(idx) 21511 v1.AddArg(mem) 21512 v0.AddArg(v1) 21513 return true 21514 } 21515 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21516 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21517 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21518 for { 21519 _ = v.Args[1] 21520 r1 := v.Args[0] 21521 if r1.Op != OpAMD64ROLWconst { 21522 break 21523 } 21524 if r1.AuxInt != 8 { 21525 break 21526 } 21527 x1 := r1.Args[0] 21528 if x1.Op != OpAMD64MOVWloadidx1 { 21529 break 21530 } 21531 i1 := x1.AuxInt 21532 s := x1.Aux 21533 _ = x1.Args[2] 21534 p := x1.Args[0] 21535 idx := x1.Args[1] 21536 mem := x1.Args[2] 21537 sh := v.Args[1] 21538 if sh.Op != OpAMD64SHLLconst { 21539 break 21540 } 21541 if sh.AuxInt != 16 { 21542 break 21543 } 21544 r0 := sh.Args[0] 21545 if r0.Op != OpAMD64ROLWconst { 21546 break 21547 } 21548 if r0.AuxInt != 8 { 21549 break 21550 } 21551 x0 := r0.Args[0] 21552 if x0.Op != OpAMD64MOVWloadidx1 { 21553 break 21554 } 21555 i0 := x0.AuxInt 21556 if x0.Aux != s { 21557 break 21558 } 21559 _ = x0.Args[2] 21560 if idx != x0.Args[0] { 21561 break 21562 } 21563 if p != x0.Args[1] { 21564 break 21565 } 21566 if mem != x0.Args[2] { 21567 break 21568 } 21569 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21570 break 21571 } 21572 b = mergePoint(b, x0, x1) 21573 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21574 v.reset(OpCopy) 21575 v.AddArg(v0) 21576 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21577 v1.AuxInt = i0 21578 v1.Aux = s 21579 v1.AddArg(p) 21580 v1.AddArg(idx) 21581 v1.AddArg(mem) 21582 v0.AddArg(v1) 21583 return true 21584 } 21585 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21586 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21587 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21588 for { 21589 _ = v.Args[1] 21590 r1 := v.Args[0] 21591 if r1.Op != OpAMD64ROLWconst { 21592 break 21593 } 21594 if r1.AuxInt != 8 { 21595 break 21596 } 21597 x1 := r1.Args[0] 21598 if x1.Op != OpAMD64MOVWloadidx1 { 21599 break 21600 } 21601 i1 := x1.AuxInt 21602 s := x1.Aux 21603 _ = x1.Args[2] 21604 idx := x1.Args[0] 21605 p := x1.Args[1] 21606 mem := x1.Args[2] 21607 sh := v.Args[1] 21608 if sh.Op != OpAMD64SHLLconst { 21609 break 21610 } 21611 if sh.AuxInt != 16 { 21612 break 21613 } 21614 r0 := sh.Args[0] 21615 if r0.Op != OpAMD64ROLWconst { 21616 break 21617 } 21618 if r0.AuxInt != 8 { 21619 break 21620 } 21621 x0 := r0.Args[0] 21622 if x0.Op != OpAMD64MOVWloadidx1 { 21623 break 21624 } 21625 i0 := x0.AuxInt 21626 if x0.Aux != s { 21627 break 21628 } 21629 _ = x0.Args[2] 21630 if idx != x0.Args[0] { 21631 break 21632 } 21633 if p != x0.Args[1] { 21634 break 21635 } 21636 if mem != x0.Args[2] { 21637 break 21638 } 21639 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21640 break 21641 } 21642 b = mergePoint(b, x0, x1) 21643 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21644 v.reset(OpCopy) 21645 v.AddArg(v0) 21646 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21647 v1.AuxInt = i0 21648 v1.Aux = s 21649 v1.AddArg(p) 21650 v1.AddArg(idx) 21651 v1.AddArg(mem) 21652 v0.AddArg(v1) 21653 return true 21654 } 21655 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21656 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21657 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21658 for { 21659 _ = v.Args[1] 21660 sh := v.Args[0] 21661 if sh.Op != OpAMD64SHLLconst { 21662 break 21663 } 21664 if sh.AuxInt != 16 { 21665 break 21666 } 21667 r0 := sh.Args[0] 21668 if r0.Op != OpAMD64ROLWconst { 21669 break 21670 } 21671 if r0.AuxInt != 8 { 21672 break 21673 } 21674 x0 := r0.Args[0] 21675 if x0.Op != OpAMD64MOVWloadidx1 { 21676 break 21677 } 21678 i0 := x0.AuxInt 21679 s := x0.Aux 21680 _ = x0.Args[2] 21681 p := x0.Args[0] 21682 idx := x0.Args[1] 21683 mem := x0.Args[2] 21684 r1 := v.Args[1] 21685 if r1.Op != OpAMD64ROLWconst { 21686 break 21687 } 21688 if r1.AuxInt != 8 { 21689 break 21690 } 21691 x1 := r1.Args[0] 21692 if x1.Op != OpAMD64MOVWloadidx1 { 21693 break 21694 } 21695 i1 := x1.AuxInt 21696 if x1.Aux != s { 21697 break 21698 } 21699 _ = x1.Args[2] 21700 if p != x1.Args[0] { 21701 break 21702 } 21703 if idx != x1.Args[1] { 21704 break 21705 } 21706 if mem != x1.Args[2] { 21707 break 21708 } 21709 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21710 break 21711 } 21712 b = mergePoint(b, x0, x1) 21713 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21714 v.reset(OpCopy) 21715 v.AddArg(v0) 21716 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21717 v1.AuxInt = i0 21718 v1.Aux = s 21719 v1.AddArg(p) 21720 v1.AddArg(idx) 21721 v1.AddArg(mem) 21722 v0.AddArg(v1) 21723 return true 21724 } 21725 return false 21726 } 21727 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 21728 b := v.Block 21729 _ = b 21730 typ := &b.Func.Config.Types 21731 _ = typ 21732 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21733 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21734 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21735 for { 21736 _ = v.Args[1] 21737 sh := v.Args[0] 21738 if sh.Op != OpAMD64SHLLconst { 21739 break 21740 } 21741 if sh.AuxInt != 16 { 21742 break 21743 } 21744 r0 := sh.Args[0] 21745 if r0.Op != OpAMD64ROLWconst { 21746 break 21747 } 21748 if r0.AuxInt != 8 { 21749 break 21750 } 21751 x0 := r0.Args[0] 21752 if x0.Op != OpAMD64MOVWloadidx1 { 21753 break 21754 } 21755 i0 := x0.AuxInt 21756 s := x0.Aux 21757 _ = x0.Args[2] 21758 idx := x0.Args[0] 21759 p := x0.Args[1] 21760 mem := x0.Args[2] 21761 r1 := v.Args[1] 21762 if r1.Op != OpAMD64ROLWconst { 21763 break 21764 } 21765 if r1.AuxInt != 8 { 21766 break 21767 } 21768 x1 := r1.Args[0] 21769 if x1.Op != OpAMD64MOVWloadidx1 { 21770 break 21771 } 21772 i1 := x1.AuxInt 21773 if x1.Aux != s { 21774 break 21775 } 21776 _ = x1.Args[2] 21777 if p != x1.Args[0] { 21778 break 21779 } 21780 if idx != x1.Args[1] { 21781 break 21782 } 21783 if mem != x1.Args[2] { 21784 break 21785 } 21786 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21787 break 21788 } 21789 b = mergePoint(b, x0, x1) 21790 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21791 v.reset(OpCopy) 21792 v.AddArg(v0) 21793 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21794 v1.AuxInt = i0 21795 v1.Aux = s 21796 v1.AddArg(p) 21797 v1.AddArg(idx) 21798 v1.AddArg(mem) 21799 v0.AddArg(v1) 21800 return true 21801 } 21802 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 21803 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21804 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21805 for { 21806 _ = v.Args[1] 21807 sh := v.Args[0] 21808 if sh.Op != OpAMD64SHLLconst { 21809 break 21810 } 21811 if sh.AuxInt != 16 { 21812 break 21813 } 21814 r0 := sh.Args[0] 21815 if r0.Op != OpAMD64ROLWconst { 21816 break 21817 } 21818 if r0.AuxInt != 8 { 21819 break 21820 } 21821 x0 := r0.Args[0] 21822 if x0.Op != OpAMD64MOVWloadidx1 { 21823 break 21824 } 21825 i0 := x0.AuxInt 21826 s := x0.Aux 21827 _ = x0.Args[2] 21828 p := x0.Args[0] 21829 idx := x0.Args[1] 21830 mem := x0.Args[2] 21831 r1 := v.Args[1] 21832 if r1.Op != OpAMD64ROLWconst { 21833 break 21834 } 21835 if r1.AuxInt != 8 { 21836 break 21837 } 21838 x1 := r1.Args[0] 21839 if x1.Op != OpAMD64MOVWloadidx1 { 21840 break 21841 } 21842 i1 := x1.AuxInt 21843 if x1.Aux != s { 21844 break 21845 } 21846 _ = x1.Args[2] 21847 if idx != x1.Args[0] { 21848 break 21849 } 21850 if p != x1.Args[1] { 21851 break 21852 } 21853 if mem != x1.Args[2] { 21854 break 21855 } 21856 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21857 break 21858 } 21859 b = mergePoint(b, x0, x1) 21860 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21861 v.reset(OpCopy) 21862 v.AddArg(v0) 21863 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21864 v1.AuxInt = i0 21865 v1.Aux = s 21866 v1.AddArg(p) 21867 v1.AddArg(idx) 21868 v1.AddArg(mem) 21869 v0.AddArg(v1) 21870 return true 21871 } 21872 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 21873 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21874 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21875 for { 21876 _ = v.Args[1] 21877 sh := v.Args[0] 21878 if sh.Op != OpAMD64SHLLconst { 21879 break 21880 } 21881 if sh.AuxInt != 16 { 21882 break 21883 } 21884 r0 := sh.Args[0] 21885 if r0.Op != OpAMD64ROLWconst { 21886 break 21887 } 21888 if r0.AuxInt != 8 { 21889 break 21890 } 21891 x0 := r0.Args[0] 21892 if x0.Op != OpAMD64MOVWloadidx1 { 21893 break 21894 } 21895 i0 := x0.AuxInt 21896 s := x0.Aux 21897 _ = x0.Args[2] 21898 idx := x0.Args[0] 21899 p := x0.Args[1] 21900 mem := x0.Args[2] 21901 r1 := v.Args[1] 21902 if r1.Op != OpAMD64ROLWconst { 21903 break 21904 } 21905 if r1.AuxInt != 8 { 21906 break 21907 } 21908 x1 := r1.Args[0] 21909 if x1.Op != OpAMD64MOVWloadidx1 { 21910 break 21911 } 21912 i1 := x1.AuxInt 21913 if x1.Aux != s { 21914 break 21915 } 21916 _ = x1.Args[2] 21917 if idx != x1.Args[0] { 21918 break 21919 } 21920 if p != x1.Args[1] { 21921 break 21922 } 21923 if mem != x1.Args[2] { 21924 break 21925 } 21926 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21927 break 21928 } 21929 b = mergePoint(b, x0, x1) 21930 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21931 v.reset(OpCopy) 21932 v.AddArg(v0) 21933 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21934 v1.AuxInt = i0 21935 v1.Aux = s 21936 v1.AddArg(p) 21937 v1.AddArg(idx) 21938 v1.AddArg(mem) 21939 v0.AddArg(v1) 21940 return true 21941 } 21942 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 21943 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21944 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21945 for { 21946 _ = v.Args[1] 21947 s0 := v.Args[0] 21948 if s0.Op != OpAMD64SHLLconst { 21949 break 21950 } 21951 j0 := s0.AuxInt 21952 x0 := s0.Args[0] 21953 if x0.Op != OpAMD64MOVBloadidx1 { 21954 break 21955 } 21956 i0 := x0.AuxInt 21957 s := x0.Aux 21958 _ = x0.Args[2] 21959 p := x0.Args[0] 21960 idx := x0.Args[1] 21961 mem := x0.Args[2] 21962 or := v.Args[1] 21963 if or.Op != OpAMD64ORL { 21964 break 21965 } 21966 _ = or.Args[1] 21967 s1 := or.Args[0] 21968 if s1.Op != OpAMD64SHLLconst { 21969 break 21970 } 21971 j1 := s1.AuxInt 21972 x1 := s1.Args[0] 21973 if x1.Op != OpAMD64MOVBloadidx1 { 21974 break 21975 } 21976 i1 := x1.AuxInt 21977 if x1.Aux != s { 21978 break 21979 } 21980 _ = x1.Args[2] 21981 if p != x1.Args[0] { 21982 break 21983 } 21984 if idx != x1.Args[1] { 21985 break 21986 } 21987 if mem != x1.Args[2] { 21988 break 21989 } 21990 y := or.Args[1] 21991 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21992 break 21993 } 21994 b = mergePoint(b, x0, x1) 21995 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21996 v.reset(OpCopy) 21997 v.AddArg(v0) 21998 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21999 v1.AuxInt = j1 22000 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22001 v2.AuxInt = 8 22002 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22003 v3.AuxInt = i0 22004 v3.Aux = s 22005 v3.AddArg(p) 22006 v3.AddArg(idx) 22007 v3.AddArg(mem) 22008 v2.AddArg(v3) 22009 v1.AddArg(v2) 22010 v0.AddArg(v1) 22011 v0.AddArg(y) 22012 return true 22013 } 22014 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 22015 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22016 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22017 for { 22018 _ = v.Args[1] 22019 s0 := v.Args[0] 22020 if s0.Op != OpAMD64SHLLconst { 22021 break 22022 } 22023 j0 := s0.AuxInt 22024 x0 := s0.Args[0] 22025 if x0.Op != OpAMD64MOVBloadidx1 { 22026 break 22027 } 22028 i0 := x0.AuxInt 22029 s := x0.Aux 22030 _ = x0.Args[2] 22031 idx := x0.Args[0] 22032 p := x0.Args[1] 22033 mem := x0.Args[2] 22034 or := v.Args[1] 22035 if or.Op != OpAMD64ORL { 22036 break 22037 } 22038 _ = or.Args[1] 22039 s1 := or.Args[0] 22040 if s1.Op != OpAMD64SHLLconst { 22041 break 22042 } 22043 j1 := s1.AuxInt 22044 x1 := s1.Args[0] 22045 if x1.Op != OpAMD64MOVBloadidx1 { 22046 break 22047 } 22048 i1 := x1.AuxInt 22049 if x1.Aux != s { 22050 break 22051 } 22052 _ = x1.Args[2] 22053 if p != x1.Args[0] { 22054 break 22055 } 22056 if idx != x1.Args[1] { 22057 break 22058 } 22059 if mem != x1.Args[2] { 22060 break 22061 } 22062 y := or.Args[1] 22063 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22064 break 22065 } 22066 b = mergePoint(b, x0, x1) 22067 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22068 v.reset(OpCopy) 22069 v.AddArg(v0) 22070 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22071 v1.AuxInt = j1 22072 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22073 v2.AuxInt = 8 22074 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22075 v3.AuxInt = i0 22076 v3.Aux = s 22077 v3.AddArg(p) 22078 v3.AddArg(idx) 22079 v3.AddArg(mem) 22080 v2.AddArg(v3) 22081 v1.AddArg(v2) 22082 v0.AddArg(v1) 22083 v0.AddArg(y) 22084 return true 22085 } 22086 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 22087 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22088 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22089 for { 22090 _ = v.Args[1] 22091 s0 := v.Args[0] 22092 if s0.Op != OpAMD64SHLLconst { 22093 break 22094 } 22095 j0 := s0.AuxInt 22096 x0 := s0.Args[0] 22097 if x0.Op != OpAMD64MOVBloadidx1 { 22098 break 22099 } 22100 i0 := x0.AuxInt 22101 s := x0.Aux 22102 _ = x0.Args[2] 22103 p := x0.Args[0] 22104 idx := x0.Args[1] 22105 mem := x0.Args[2] 22106 or := v.Args[1] 22107 if or.Op != OpAMD64ORL { 22108 break 22109 } 22110 _ = or.Args[1] 22111 s1 := or.Args[0] 22112 if s1.Op != OpAMD64SHLLconst { 22113 break 22114 } 22115 j1 := s1.AuxInt 22116 x1 := s1.Args[0] 22117 if x1.Op != OpAMD64MOVBloadidx1 { 22118 break 22119 } 22120 i1 := x1.AuxInt 22121 if x1.Aux != s { 22122 break 22123 } 22124 _ = x1.Args[2] 22125 if idx != x1.Args[0] { 22126 break 22127 } 22128 if p != x1.Args[1] { 22129 break 22130 } 22131 if mem != x1.Args[2] { 22132 break 22133 } 22134 y := or.Args[1] 22135 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22136 break 22137 } 22138 b = mergePoint(b, x0, x1) 22139 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22140 v.reset(OpCopy) 22141 v.AddArg(v0) 22142 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22143 v1.AuxInt = j1 22144 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22145 v2.AuxInt = 8 22146 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22147 v3.AuxInt = i0 22148 v3.Aux = s 22149 v3.AddArg(p) 22150 v3.AddArg(idx) 22151 v3.AddArg(mem) 22152 v2.AddArg(v3) 22153 v1.AddArg(v2) 22154 v0.AddArg(v1) 22155 v0.AddArg(y) 22156 return true 22157 } 22158 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 22159 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22160 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22161 for { 22162 _ = v.Args[1] 22163 s0 := v.Args[0] 22164 if s0.Op != OpAMD64SHLLconst { 22165 break 22166 } 22167 j0 := s0.AuxInt 22168 x0 := s0.Args[0] 22169 if x0.Op != OpAMD64MOVBloadidx1 { 22170 break 22171 } 22172 i0 := x0.AuxInt 22173 s := x0.Aux 22174 _ = x0.Args[2] 22175 idx := x0.Args[0] 22176 p := x0.Args[1] 22177 mem := x0.Args[2] 22178 or := v.Args[1] 22179 if or.Op != OpAMD64ORL { 22180 break 22181 } 22182 _ = or.Args[1] 22183 s1 := or.Args[0] 22184 if s1.Op != OpAMD64SHLLconst { 22185 break 22186 } 22187 j1 := s1.AuxInt 22188 x1 := s1.Args[0] 22189 if x1.Op != OpAMD64MOVBloadidx1 { 22190 break 22191 } 22192 i1 := x1.AuxInt 22193 if x1.Aux != s { 22194 break 22195 } 22196 _ = x1.Args[2] 22197 if idx != x1.Args[0] { 22198 break 22199 } 22200 if p != x1.Args[1] { 22201 break 22202 } 22203 if mem != x1.Args[2] { 22204 break 22205 } 22206 y := or.Args[1] 22207 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22208 break 22209 } 22210 b = mergePoint(b, x0, x1) 22211 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22212 v.reset(OpCopy) 22213 v.AddArg(v0) 22214 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22215 v1.AuxInt = j1 22216 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22217 v2.AuxInt = 8 22218 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22219 v3.AuxInt = i0 22220 v3.Aux = s 22221 v3.AddArg(p) 22222 v3.AddArg(idx) 22223 v3.AddArg(mem) 22224 v2.AddArg(v3) 22225 v1.AddArg(v2) 22226 v0.AddArg(v1) 22227 v0.AddArg(y) 22228 return true 22229 } 22230 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 22231 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22232 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22233 for { 22234 _ = v.Args[1] 22235 s0 := v.Args[0] 22236 if s0.Op != OpAMD64SHLLconst { 22237 break 22238 } 22239 j0 := s0.AuxInt 22240 x0 := s0.Args[0] 22241 if x0.Op != OpAMD64MOVBloadidx1 { 22242 break 22243 } 22244 i0 := x0.AuxInt 22245 s := x0.Aux 22246 _ = x0.Args[2] 22247 p := x0.Args[0] 22248 idx := x0.Args[1] 22249 mem := x0.Args[2] 22250 or := v.Args[1] 22251 if or.Op != OpAMD64ORL { 22252 break 22253 } 22254 _ = or.Args[1] 22255 y := or.Args[0] 22256 s1 := or.Args[1] 22257 if s1.Op != OpAMD64SHLLconst { 22258 break 22259 } 22260 j1 := s1.AuxInt 22261 x1 := s1.Args[0] 22262 if x1.Op != OpAMD64MOVBloadidx1 { 22263 break 22264 } 22265 i1 := x1.AuxInt 22266 if x1.Aux != s { 22267 break 22268 } 22269 _ = x1.Args[2] 22270 if p != x1.Args[0] { 22271 break 22272 } 22273 if idx != x1.Args[1] { 22274 break 22275 } 22276 if mem != x1.Args[2] { 22277 break 22278 } 22279 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22280 break 22281 } 22282 b = mergePoint(b, x0, x1) 22283 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22284 v.reset(OpCopy) 22285 v.AddArg(v0) 22286 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22287 v1.AuxInt = j1 22288 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22289 v2.AuxInt = 8 22290 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22291 v3.AuxInt = i0 22292 v3.Aux = s 22293 v3.AddArg(p) 22294 v3.AddArg(idx) 22295 v3.AddArg(mem) 22296 v2.AddArg(v3) 22297 v1.AddArg(v2) 22298 v0.AddArg(v1) 22299 v0.AddArg(y) 22300 return true 22301 } 22302 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 22303 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22304 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22305 for { 22306 _ = v.Args[1] 22307 s0 := v.Args[0] 22308 if s0.Op != OpAMD64SHLLconst { 22309 break 22310 } 22311 j0 := s0.AuxInt 22312 x0 := s0.Args[0] 22313 if x0.Op != OpAMD64MOVBloadidx1 { 22314 break 22315 } 22316 i0 := x0.AuxInt 22317 s := x0.Aux 22318 _ = x0.Args[2] 22319 idx := x0.Args[0] 22320 p := x0.Args[1] 22321 mem := x0.Args[2] 22322 or := v.Args[1] 22323 if or.Op != OpAMD64ORL { 22324 break 22325 } 22326 _ = or.Args[1] 22327 y := or.Args[0] 22328 s1 := or.Args[1] 22329 if s1.Op != OpAMD64SHLLconst { 22330 break 22331 } 22332 j1 := s1.AuxInt 22333 x1 := s1.Args[0] 22334 if x1.Op != OpAMD64MOVBloadidx1 { 22335 break 22336 } 22337 i1 := x1.AuxInt 22338 if x1.Aux != s { 22339 break 22340 } 22341 _ = x1.Args[2] 22342 if p != x1.Args[0] { 22343 break 22344 } 22345 if idx != x1.Args[1] { 22346 break 22347 } 22348 if mem != x1.Args[2] { 22349 break 22350 } 22351 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22352 break 22353 } 22354 b = mergePoint(b, x0, x1) 22355 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22356 v.reset(OpCopy) 22357 v.AddArg(v0) 22358 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22359 v1.AuxInt = j1 22360 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22361 v2.AuxInt = 8 22362 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22363 v3.AuxInt = i0 22364 v3.Aux = s 22365 v3.AddArg(p) 22366 v3.AddArg(idx) 22367 v3.AddArg(mem) 22368 v2.AddArg(v3) 22369 v1.AddArg(v2) 22370 v0.AddArg(v1) 22371 v0.AddArg(y) 22372 return true 22373 } 22374 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 22375 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22376 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22377 for { 22378 _ = v.Args[1] 22379 s0 := v.Args[0] 22380 if s0.Op != OpAMD64SHLLconst { 22381 break 22382 } 22383 j0 := s0.AuxInt 22384 x0 := s0.Args[0] 22385 if x0.Op != OpAMD64MOVBloadidx1 { 22386 break 22387 } 22388 i0 := x0.AuxInt 22389 s := x0.Aux 22390 _ = x0.Args[2] 22391 p := x0.Args[0] 22392 idx := x0.Args[1] 22393 mem := x0.Args[2] 22394 or := v.Args[1] 22395 if or.Op != OpAMD64ORL { 22396 break 22397 } 22398 _ = or.Args[1] 22399 y := or.Args[0] 22400 s1 := or.Args[1] 22401 if s1.Op != OpAMD64SHLLconst { 22402 break 22403 } 22404 j1 := s1.AuxInt 22405 x1 := s1.Args[0] 22406 if x1.Op != OpAMD64MOVBloadidx1 { 22407 break 22408 } 22409 i1 := x1.AuxInt 22410 if x1.Aux != s { 22411 break 22412 } 22413 _ = x1.Args[2] 22414 if idx != x1.Args[0] { 22415 break 22416 } 22417 if p != x1.Args[1] { 22418 break 22419 } 22420 if mem != x1.Args[2] { 22421 break 22422 } 22423 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22424 break 22425 } 22426 b = mergePoint(b, x0, x1) 22427 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22428 v.reset(OpCopy) 22429 v.AddArg(v0) 22430 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22431 v1.AuxInt = j1 22432 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22433 v2.AuxInt = 8 22434 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22435 v3.AuxInt = i0 22436 v3.Aux = s 22437 v3.AddArg(p) 22438 v3.AddArg(idx) 22439 v3.AddArg(mem) 22440 v2.AddArg(v3) 22441 v1.AddArg(v2) 22442 v0.AddArg(v1) 22443 v0.AddArg(y) 22444 return true 22445 } 22446 return false 22447 } 22448 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 22449 b := v.Block 22450 _ = b 22451 typ := &b.Func.Config.Types 22452 _ = typ 22453 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 22454 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22455 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22456 for { 22457 _ = v.Args[1] 22458 s0 := v.Args[0] 22459 if s0.Op != OpAMD64SHLLconst { 22460 break 22461 } 22462 j0 := s0.AuxInt 22463 x0 := s0.Args[0] 22464 if x0.Op != OpAMD64MOVBloadidx1 { 22465 break 22466 } 22467 i0 := x0.AuxInt 22468 s := x0.Aux 22469 _ = x0.Args[2] 22470 idx := x0.Args[0] 22471 p := x0.Args[1] 22472 mem := x0.Args[2] 22473 or := v.Args[1] 22474 if or.Op != OpAMD64ORL { 22475 break 22476 } 22477 _ = or.Args[1] 22478 y := or.Args[0] 22479 s1 := or.Args[1] 22480 if s1.Op != OpAMD64SHLLconst { 22481 break 22482 } 22483 j1 := s1.AuxInt 22484 x1 := s1.Args[0] 22485 if x1.Op != OpAMD64MOVBloadidx1 { 22486 break 22487 } 22488 i1 := x1.AuxInt 22489 if x1.Aux != s { 22490 break 22491 } 22492 _ = x1.Args[2] 22493 if idx != x1.Args[0] { 22494 break 22495 } 22496 if p != x1.Args[1] { 22497 break 22498 } 22499 if mem != x1.Args[2] { 22500 break 22501 } 22502 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22503 break 22504 } 22505 b = mergePoint(b, x0, x1) 22506 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22507 v.reset(OpCopy) 22508 v.AddArg(v0) 22509 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22510 v1.AuxInt = j1 22511 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22512 v2.AuxInt = 8 22513 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22514 v3.AuxInt = i0 22515 v3.Aux = s 22516 v3.AddArg(p) 22517 v3.AddArg(idx) 22518 v3.AddArg(mem) 22519 v2.AddArg(v3) 22520 v1.AddArg(v2) 22521 v0.AddArg(v1) 22522 v0.AddArg(y) 22523 return true 22524 } 22525 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22526 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22527 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22528 for { 22529 _ = v.Args[1] 22530 or := v.Args[0] 22531 if or.Op != OpAMD64ORL { 22532 break 22533 } 22534 _ = or.Args[1] 22535 s1 := or.Args[0] 22536 if s1.Op != OpAMD64SHLLconst { 22537 break 22538 } 22539 j1 := s1.AuxInt 22540 x1 := s1.Args[0] 22541 if x1.Op != OpAMD64MOVBloadidx1 { 22542 break 22543 } 22544 i1 := x1.AuxInt 22545 s := x1.Aux 22546 _ = x1.Args[2] 22547 p := x1.Args[0] 22548 idx := x1.Args[1] 22549 mem := x1.Args[2] 22550 y := or.Args[1] 22551 s0 := v.Args[1] 22552 if s0.Op != OpAMD64SHLLconst { 22553 break 22554 } 22555 j0 := s0.AuxInt 22556 x0 := s0.Args[0] 22557 if x0.Op != OpAMD64MOVBloadidx1 { 22558 break 22559 } 22560 i0 := x0.AuxInt 22561 if x0.Aux != s { 22562 break 22563 } 22564 _ = x0.Args[2] 22565 if p != x0.Args[0] { 22566 break 22567 } 22568 if idx != x0.Args[1] { 22569 break 22570 } 22571 if mem != x0.Args[2] { 22572 break 22573 } 22574 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22575 break 22576 } 22577 b = mergePoint(b, x0, x1) 22578 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22579 v.reset(OpCopy) 22580 v.AddArg(v0) 22581 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22582 v1.AuxInt = j1 22583 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22584 v2.AuxInt = 8 22585 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22586 v3.AuxInt = i0 22587 v3.Aux = s 22588 v3.AddArg(p) 22589 v3.AddArg(idx) 22590 v3.AddArg(mem) 22591 v2.AddArg(v3) 22592 v1.AddArg(v2) 22593 v0.AddArg(v1) 22594 v0.AddArg(y) 22595 return true 22596 } 22597 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22598 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22599 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22600 for { 22601 _ = v.Args[1] 22602 or := v.Args[0] 22603 if or.Op != OpAMD64ORL { 22604 break 22605 } 22606 _ = or.Args[1] 22607 s1 := or.Args[0] 22608 if s1.Op != OpAMD64SHLLconst { 22609 break 22610 } 22611 j1 := s1.AuxInt 22612 x1 := s1.Args[0] 22613 if x1.Op != OpAMD64MOVBloadidx1 { 22614 break 22615 } 22616 i1 := x1.AuxInt 22617 s := x1.Aux 22618 _ = x1.Args[2] 22619 idx := x1.Args[0] 22620 p := x1.Args[1] 22621 mem := x1.Args[2] 22622 y := or.Args[1] 22623 s0 := v.Args[1] 22624 if s0.Op != OpAMD64SHLLconst { 22625 break 22626 } 22627 j0 := s0.AuxInt 22628 x0 := s0.Args[0] 22629 if x0.Op != OpAMD64MOVBloadidx1 { 22630 break 22631 } 22632 i0 := x0.AuxInt 22633 if x0.Aux != s { 22634 break 22635 } 22636 _ = x0.Args[2] 22637 if p != x0.Args[0] { 22638 break 22639 } 22640 if idx != x0.Args[1] { 22641 break 22642 } 22643 if mem != x0.Args[2] { 22644 break 22645 } 22646 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22647 break 22648 } 22649 b = mergePoint(b, x0, x1) 22650 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22651 v.reset(OpCopy) 22652 v.AddArg(v0) 22653 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22654 v1.AuxInt = j1 22655 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22656 v2.AuxInt = 8 22657 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22658 v3.AuxInt = i0 22659 v3.Aux = s 22660 v3.AddArg(p) 22661 v3.AddArg(idx) 22662 v3.AddArg(mem) 22663 v2.AddArg(v3) 22664 v1.AddArg(v2) 22665 v0.AddArg(v1) 22666 v0.AddArg(y) 22667 return true 22668 } 22669 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22670 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22671 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22672 for { 22673 _ = v.Args[1] 22674 or := v.Args[0] 22675 if or.Op != OpAMD64ORL { 22676 break 22677 } 22678 _ = or.Args[1] 22679 y := or.Args[0] 22680 s1 := or.Args[1] 22681 if s1.Op != OpAMD64SHLLconst { 22682 break 22683 } 22684 j1 := s1.AuxInt 22685 x1 := s1.Args[0] 22686 if x1.Op != OpAMD64MOVBloadidx1 { 22687 break 22688 } 22689 i1 := x1.AuxInt 22690 s := x1.Aux 22691 _ = x1.Args[2] 22692 p := x1.Args[0] 22693 idx := x1.Args[1] 22694 mem := x1.Args[2] 22695 s0 := v.Args[1] 22696 if s0.Op != OpAMD64SHLLconst { 22697 break 22698 } 22699 j0 := s0.AuxInt 22700 x0 := s0.Args[0] 22701 if x0.Op != OpAMD64MOVBloadidx1 { 22702 break 22703 } 22704 i0 := x0.AuxInt 22705 if x0.Aux != s { 22706 break 22707 } 22708 _ = x0.Args[2] 22709 if p != x0.Args[0] { 22710 break 22711 } 22712 if idx != x0.Args[1] { 22713 break 22714 } 22715 if mem != x0.Args[2] { 22716 break 22717 } 22718 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22719 break 22720 } 22721 b = mergePoint(b, x0, x1) 22722 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22723 v.reset(OpCopy) 22724 v.AddArg(v0) 22725 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22726 v1.AuxInt = j1 22727 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22728 v2.AuxInt = 8 22729 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22730 v3.AuxInt = i0 22731 v3.Aux = s 22732 v3.AddArg(p) 22733 v3.AddArg(idx) 22734 v3.AddArg(mem) 22735 v2.AddArg(v3) 22736 v1.AddArg(v2) 22737 v0.AddArg(v1) 22738 v0.AddArg(y) 22739 return true 22740 } 22741 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22742 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22743 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22744 for { 22745 _ = v.Args[1] 22746 or := v.Args[0] 22747 if or.Op != OpAMD64ORL { 22748 break 22749 } 22750 _ = or.Args[1] 22751 y := or.Args[0] 22752 s1 := or.Args[1] 22753 if s1.Op != OpAMD64SHLLconst { 22754 break 22755 } 22756 j1 := s1.AuxInt 22757 x1 := s1.Args[0] 22758 if x1.Op != OpAMD64MOVBloadidx1 { 22759 break 22760 } 22761 i1 := x1.AuxInt 22762 s := x1.Aux 22763 _ = x1.Args[2] 22764 idx := x1.Args[0] 22765 p := x1.Args[1] 22766 mem := x1.Args[2] 22767 s0 := v.Args[1] 22768 if s0.Op != OpAMD64SHLLconst { 22769 break 22770 } 22771 j0 := s0.AuxInt 22772 x0 := s0.Args[0] 22773 if x0.Op != OpAMD64MOVBloadidx1 { 22774 break 22775 } 22776 i0 := x0.AuxInt 22777 if x0.Aux != s { 22778 break 22779 } 22780 _ = x0.Args[2] 22781 if p != x0.Args[0] { 22782 break 22783 } 22784 if idx != x0.Args[1] { 22785 break 22786 } 22787 if mem != x0.Args[2] { 22788 break 22789 } 22790 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22791 break 22792 } 22793 b = mergePoint(b, x0, x1) 22794 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22795 v.reset(OpCopy) 22796 v.AddArg(v0) 22797 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22798 v1.AuxInt = j1 22799 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22800 v2.AuxInt = 8 22801 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22802 v3.AuxInt = i0 22803 v3.Aux = s 22804 v3.AddArg(p) 22805 v3.AddArg(idx) 22806 v3.AddArg(mem) 22807 v2.AddArg(v3) 22808 v1.AddArg(v2) 22809 v0.AddArg(v1) 22810 v0.AddArg(y) 22811 return true 22812 } 22813 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 22814 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22815 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22816 for { 22817 _ = v.Args[1] 22818 or := v.Args[0] 22819 if or.Op != OpAMD64ORL { 22820 break 22821 } 22822 _ = or.Args[1] 22823 s1 := or.Args[0] 22824 if s1.Op != OpAMD64SHLLconst { 22825 break 22826 } 22827 j1 := s1.AuxInt 22828 x1 := s1.Args[0] 22829 if x1.Op != OpAMD64MOVBloadidx1 { 22830 break 22831 } 22832 i1 := x1.AuxInt 22833 s := x1.Aux 22834 _ = x1.Args[2] 22835 p := x1.Args[0] 22836 idx := x1.Args[1] 22837 mem := x1.Args[2] 22838 y := or.Args[1] 22839 s0 := v.Args[1] 22840 if s0.Op != OpAMD64SHLLconst { 22841 break 22842 } 22843 j0 := s0.AuxInt 22844 x0 := s0.Args[0] 22845 if x0.Op != OpAMD64MOVBloadidx1 { 22846 break 22847 } 22848 i0 := x0.AuxInt 22849 if x0.Aux != s { 22850 break 22851 } 22852 _ = x0.Args[2] 22853 if idx != x0.Args[0] { 22854 break 22855 } 22856 if p != x0.Args[1] { 22857 break 22858 } 22859 if mem != x0.Args[2] { 22860 break 22861 } 22862 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22863 break 22864 } 22865 b = mergePoint(b, x0, x1) 22866 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22867 v.reset(OpCopy) 22868 v.AddArg(v0) 22869 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22870 v1.AuxInt = j1 22871 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22872 v2.AuxInt = 8 22873 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22874 v3.AuxInt = i0 22875 v3.Aux = s 22876 v3.AddArg(p) 22877 v3.AddArg(idx) 22878 v3.AddArg(mem) 22879 v2.AddArg(v3) 22880 v1.AddArg(v2) 22881 v0.AddArg(v1) 22882 v0.AddArg(y) 22883 return true 22884 } 22885 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 22886 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22887 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22888 for { 22889 _ = v.Args[1] 22890 or := v.Args[0] 22891 if or.Op != OpAMD64ORL { 22892 break 22893 } 22894 _ = or.Args[1] 22895 s1 := or.Args[0] 22896 if s1.Op != OpAMD64SHLLconst { 22897 break 22898 } 22899 j1 := s1.AuxInt 22900 x1 := s1.Args[0] 22901 if x1.Op != OpAMD64MOVBloadidx1 { 22902 break 22903 } 22904 i1 := x1.AuxInt 22905 s := x1.Aux 22906 _ = x1.Args[2] 22907 idx := x1.Args[0] 22908 p := x1.Args[1] 22909 mem := x1.Args[2] 22910 y := or.Args[1] 22911 s0 := v.Args[1] 22912 if s0.Op != OpAMD64SHLLconst { 22913 break 22914 } 22915 j0 := s0.AuxInt 22916 x0 := s0.Args[0] 22917 if x0.Op != OpAMD64MOVBloadidx1 { 22918 break 22919 } 22920 i0 := x0.AuxInt 22921 if x0.Aux != s { 22922 break 22923 } 22924 _ = x0.Args[2] 22925 if idx != x0.Args[0] { 22926 break 22927 } 22928 if p != x0.Args[1] { 22929 break 22930 } 22931 if mem != x0.Args[2] { 22932 break 22933 } 22934 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22935 break 22936 } 22937 b = mergePoint(b, x0, x1) 22938 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22939 v.reset(OpCopy) 22940 v.AddArg(v0) 22941 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22942 v1.AuxInt = j1 22943 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22944 v2.AuxInt = 8 22945 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22946 v3.AuxInt = i0 22947 v3.Aux = s 22948 v3.AddArg(p) 22949 v3.AddArg(idx) 22950 v3.AddArg(mem) 22951 v2.AddArg(v3) 22952 v1.AddArg(v2) 22953 v0.AddArg(v1) 22954 v0.AddArg(y) 22955 return true 22956 } 22957 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 22958 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22959 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22960 for { 22961 _ = v.Args[1] 22962 or := v.Args[0] 22963 if or.Op != OpAMD64ORL { 22964 break 22965 } 22966 _ = or.Args[1] 22967 y := or.Args[0] 22968 s1 := or.Args[1] 22969 if s1.Op != OpAMD64SHLLconst { 22970 break 22971 } 22972 j1 := s1.AuxInt 22973 x1 := s1.Args[0] 22974 if x1.Op != OpAMD64MOVBloadidx1 { 22975 break 22976 } 22977 i1 := x1.AuxInt 22978 s := x1.Aux 22979 _ = x1.Args[2] 22980 p := x1.Args[0] 22981 idx := x1.Args[1] 22982 mem := x1.Args[2] 22983 s0 := v.Args[1] 22984 if s0.Op != OpAMD64SHLLconst { 22985 break 22986 } 22987 j0 := s0.AuxInt 22988 x0 := s0.Args[0] 22989 if x0.Op != OpAMD64MOVBloadidx1 { 22990 break 22991 } 22992 i0 := x0.AuxInt 22993 if x0.Aux != s { 22994 break 22995 } 22996 _ = x0.Args[2] 22997 if idx != x0.Args[0] { 22998 break 22999 } 23000 if p != x0.Args[1] { 23001 break 23002 } 23003 if mem != x0.Args[2] { 23004 break 23005 } 23006 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23007 break 23008 } 23009 b = mergePoint(b, x0, x1) 23010 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23011 v.reset(OpCopy) 23012 v.AddArg(v0) 23013 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23014 v1.AuxInt = j1 23015 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23016 v2.AuxInt = 8 23017 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23018 v3.AuxInt = i0 23019 v3.Aux = s 23020 v3.AddArg(p) 23021 v3.AddArg(idx) 23022 v3.AddArg(mem) 23023 v2.AddArg(v3) 23024 v1.AddArg(v2) 23025 v0.AddArg(v1) 23026 v0.AddArg(y) 23027 return true 23028 } 23029 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23030 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23031 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23032 for { 23033 _ = v.Args[1] 23034 or := v.Args[0] 23035 if or.Op != OpAMD64ORL { 23036 break 23037 } 23038 _ = or.Args[1] 23039 y := or.Args[0] 23040 s1 := or.Args[1] 23041 if s1.Op != OpAMD64SHLLconst { 23042 break 23043 } 23044 j1 := s1.AuxInt 23045 x1 := s1.Args[0] 23046 if x1.Op != OpAMD64MOVBloadidx1 { 23047 break 23048 } 23049 i1 := x1.AuxInt 23050 s := x1.Aux 23051 _ = x1.Args[2] 23052 idx := x1.Args[0] 23053 p := x1.Args[1] 23054 mem := x1.Args[2] 23055 s0 := v.Args[1] 23056 if s0.Op != OpAMD64SHLLconst { 23057 break 23058 } 23059 j0 := s0.AuxInt 23060 x0 := s0.Args[0] 23061 if x0.Op != OpAMD64MOVBloadidx1 { 23062 break 23063 } 23064 i0 := x0.AuxInt 23065 if x0.Aux != s { 23066 break 23067 } 23068 _ = x0.Args[2] 23069 if idx != x0.Args[0] { 23070 break 23071 } 23072 if p != x0.Args[1] { 23073 break 23074 } 23075 if mem != x0.Args[2] { 23076 break 23077 } 23078 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23079 break 23080 } 23081 b = mergePoint(b, x0, x1) 23082 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23083 v.reset(OpCopy) 23084 v.AddArg(v0) 23085 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23086 v1.AuxInt = j1 23087 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23088 v2.AuxInt = 8 23089 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23090 v3.AuxInt = i0 23091 v3.Aux = s 23092 v3.AddArg(p) 23093 v3.AddArg(idx) 23094 v3.AddArg(mem) 23095 v2.AddArg(v3) 23096 v1.AddArg(v2) 23097 v0.AddArg(v1) 23098 v0.AddArg(y) 23099 return true 23100 } 23101 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 23102 // cond: canMergeLoad(v, l, x) && clobber(l) 23103 // result: (ORLmem x [off] {sym} ptr mem) 23104 for { 23105 _ = v.Args[1] 23106 x := v.Args[0] 23107 l := v.Args[1] 23108 if l.Op != OpAMD64MOVLload { 23109 break 23110 } 23111 off := l.AuxInt 23112 sym := l.Aux 23113 _ = l.Args[1] 23114 ptr := l.Args[0] 23115 mem := l.Args[1] 23116 if !(canMergeLoad(v, l, x) && clobber(l)) { 23117 break 23118 } 23119 v.reset(OpAMD64ORLmem) 23120 v.AuxInt = off 23121 v.Aux = sym 23122 v.AddArg(x) 23123 v.AddArg(ptr) 23124 v.AddArg(mem) 23125 return true 23126 } 23127 return false 23128 } 23129 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 23130 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 23131 // cond: canMergeLoad(v, l, x) && clobber(l) 23132 // result: (ORLmem x [off] {sym} ptr mem) 23133 for { 23134 _ = v.Args[1] 23135 l := v.Args[0] 23136 if l.Op != OpAMD64MOVLload { 23137 break 23138 } 23139 off := l.AuxInt 23140 sym := l.Aux 23141 _ = l.Args[1] 23142 ptr := l.Args[0] 23143 mem := l.Args[1] 23144 x := v.Args[1] 23145 if !(canMergeLoad(v, l, x) && clobber(l)) { 23146 break 23147 } 23148 v.reset(OpAMD64ORLmem) 23149 v.AuxInt = off 23150 v.Aux = sym 23151 v.AddArg(x) 23152 v.AddArg(ptr) 23153 v.AddArg(mem) 23154 return true 23155 } 23156 return false 23157 } 23158 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 23159 // match: (ORLconst [c] x) 23160 // cond: int32(c)==0 23161 // result: x 23162 for { 23163 c := v.AuxInt 23164 x := v.Args[0] 23165 if !(int32(c) == 0) { 23166 break 23167 } 23168 v.reset(OpCopy) 23169 v.Type = x.Type 23170 v.AddArg(x) 23171 return true 23172 } 23173 // match: (ORLconst [c] _) 23174 // cond: int32(c)==-1 23175 // result: (MOVLconst [-1]) 23176 for { 23177 c := v.AuxInt 23178 if !(int32(c) == -1) { 23179 break 23180 } 23181 v.reset(OpAMD64MOVLconst) 23182 v.AuxInt = -1 23183 return true 23184 } 23185 // match: (ORLconst [c] (MOVLconst [d])) 23186 // cond: 23187 // result: (MOVLconst [c|d]) 23188 for { 23189 c := v.AuxInt 23190 v_0 := v.Args[0] 23191 if v_0.Op != OpAMD64MOVLconst { 23192 break 23193 } 23194 d := v_0.AuxInt 23195 v.reset(OpAMD64MOVLconst) 23196 v.AuxInt = c | d 23197 return true 23198 } 23199 return false 23200 } 23201 func rewriteValueAMD64_OpAMD64ORLmem_0(v *Value) bool { 23202 b := v.Block 23203 _ = b 23204 typ := &b.Func.Config.Types 23205 _ = typ 23206 // match: (ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 23207 // cond: 23208 // result: ( ORL x (MOVLf2i y)) 23209 for { 23210 off := v.AuxInt 23211 sym := v.Aux 23212 _ = v.Args[2] 23213 x := v.Args[0] 23214 ptr := v.Args[1] 23215 v_2 := v.Args[2] 23216 if v_2.Op != OpAMD64MOVSSstore { 23217 break 23218 } 23219 if v_2.AuxInt != off { 23220 break 23221 } 23222 if v_2.Aux != sym { 23223 break 23224 } 23225 _ = v_2.Args[2] 23226 if ptr != v_2.Args[0] { 23227 break 23228 } 23229 y := v_2.Args[1] 23230 v.reset(OpAMD64ORL) 23231 v.AddArg(x) 23232 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 23233 v0.AddArg(y) 23234 v.AddArg(v0) 23235 return true 23236 } 23237 return false 23238 } 23239 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 23240 // match: (ORQ x (MOVQconst [c])) 23241 // cond: is32Bit(c) 23242 // result: (ORQconst [c] x) 23243 for { 23244 _ = v.Args[1] 23245 x := v.Args[0] 23246 v_1 := v.Args[1] 23247 if v_1.Op != OpAMD64MOVQconst { 23248 break 23249 } 23250 c := v_1.AuxInt 23251 if !(is32Bit(c)) { 23252 break 23253 } 23254 v.reset(OpAMD64ORQconst) 23255 v.AuxInt = c 23256 v.AddArg(x) 23257 return true 23258 } 23259 // match: (ORQ (MOVQconst [c]) x) 23260 // cond: is32Bit(c) 23261 // result: (ORQconst [c] x) 23262 for { 23263 _ = v.Args[1] 23264 v_0 := v.Args[0] 23265 if v_0.Op != OpAMD64MOVQconst { 23266 break 23267 } 23268 c := v_0.AuxInt 23269 x := v.Args[1] 23270 if !(is32Bit(c)) { 23271 break 23272 } 23273 v.reset(OpAMD64ORQconst) 23274 v.AuxInt = c 23275 v.AddArg(x) 23276 return true 23277 } 23278 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 23279 // cond: d==64-c 23280 // result: (ROLQconst x [c]) 23281 for { 23282 _ = v.Args[1] 23283 v_0 := v.Args[0] 23284 if v_0.Op != OpAMD64SHLQconst { 23285 break 23286 } 23287 c := v_0.AuxInt 23288 x := v_0.Args[0] 23289 v_1 := v.Args[1] 23290 if v_1.Op != OpAMD64SHRQconst { 23291 break 23292 } 23293 d := v_1.AuxInt 23294 if x != v_1.Args[0] { 23295 break 23296 } 23297 if !(d == 64-c) { 23298 break 23299 } 23300 v.reset(OpAMD64ROLQconst) 23301 v.AuxInt = c 23302 v.AddArg(x) 23303 return true 23304 } 23305 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 23306 // cond: d==64-c 23307 // result: (ROLQconst x [c]) 23308 for { 23309 _ = v.Args[1] 23310 v_0 := v.Args[0] 23311 if v_0.Op != OpAMD64SHRQconst { 23312 break 23313 } 23314 d := v_0.AuxInt 23315 x := v_0.Args[0] 23316 v_1 := v.Args[1] 23317 if v_1.Op != OpAMD64SHLQconst { 23318 break 23319 } 23320 c := v_1.AuxInt 23321 if x != v_1.Args[0] { 23322 break 23323 } 23324 if !(d == 64-c) { 23325 break 23326 } 23327 v.reset(OpAMD64ROLQconst) 23328 v.AuxInt = c 23329 v.AddArg(x) 23330 return true 23331 } 23332 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 23333 // cond: 23334 // result: (ROLQ x y) 23335 for { 23336 _ = v.Args[1] 23337 v_0 := v.Args[0] 23338 if v_0.Op != OpAMD64SHLQ { 23339 break 23340 } 23341 _ = v_0.Args[1] 23342 x := v_0.Args[0] 23343 y := v_0.Args[1] 23344 v_1 := v.Args[1] 23345 if v_1.Op != OpAMD64ANDQ { 23346 break 23347 } 23348 _ = v_1.Args[1] 23349 v_1_0 := v_1.Args[0] 23350 if v_1_0.Op != OpAMD64SHRQ { 23351 break 23352 } 23353 _ = v_1_0.Args[1] 23354 if x != v_1_0.Args[0] { 23355 break 23356 } 23357 v_1_0_1 := v_1_0.Args[1] 23358 if v_1_0_1.Op != OpAMD64NEGQ { 23359 break 23360 } 23361 if y != v_1_0_1.Args[0] { 23362 break 23363 } 23364 v_1_1 := v_1.Args[1] 23365 if v_1_1.Op != OpAMD64SBBQcarrymask { 23366 break 23367 } 23368 v_1_1_0 := v_1_1.Args[0] 23369 if v_1_1_0.Op != OpAMD64CMPQconst { 23370 break 23371 } 23372 if v_1_1_0.AuxInt != 64 { 23373 break 23374 } 23375 v_1_1_0_0 := v_1_1_0.Args[0] 23376 if v_1_1_0_0.Op != OpAMD64NEGQ { 23377 break 23378 } 23379 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 23380 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 23381 break 23382 } 23383 if v_1_1_0_0_0.AuxInt != -64 { 23384 break 23385 } 23386 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 23387 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 23388 break 23389 } 23390 if v_1_1_0_0_0_0.AuxInt != 63 { 23391 break 23392 } 23393 if y != v_1_1_0_0_0_0.Args[0] { 23394 break 23395 } 23396 v.reset(OpAMD64ROLQ) 23397 v.AddArg(x) 23398 v.AddArg(y) 23399 return true 23400 } 23401 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 23402 // cond: 23403 // result: (ROLQ x y) 23404 for { 23405 _ = v.Args[1] 23406 v_0 := v.Args[0] 23407 if v_0.Op != OpAMD64SHLQ { 23408 break 23409 } 23410 _ = v_0.Args[1] 23411 x := v_0.Args[0] 23412 y := v_0.Args[1] 23413 v_1 := v.Args[1] 23414 if v_1.Op != OpAMD64ANDQ { 23415 break 23416 } 23417 _ = v_1.Args[1] 23418 v_1_0 := v_1.Args[0] 23419 if v_1_0.Op != OpAMD64SBBQcarrymask { 23420 break 23421 } 23422 v_1_0_0 := v_1_0.Args[0] 23423 if v_1_0_0.Op != OpAMD64CMPQconst { 23424 break 23425 } 23426 if v_1_0_0.AuxInt != 64 { 23427 break 23428 } 23429 v_1_0_0_0 := v_1_0_0.Args[0] 23430 if v_1_0_0_0.Op != OpAMD64NEGQ { 23431 break 23432 } 23433 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 23434 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 23435 break 23436 } 23437 if v_1_0_0_0_0.AuxInt != -64 { 23438 break 23439 } 23440 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 23441 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 23442 break 23443 } 23444 if v_1_0_0_0_0_0.AuxInt != 63 { 23445 break 23446 } 23447 if y != v_1_0_0_0_0_0.Args[0] { 23448 break 23449 } 23450 v_1_1 := v_1.Args[1] 23451 if v_1_1.Op != OpAMD64SHRQ { 23452 break 23453 } 23454 _ = v_1_1.Args[1] 23455 if x != v_1_1.Args[0] { 23456 break 23457 } 23458 v_1_1_1 := v_1_1.Args[1] 23459 if v_1_1_1.Op != OpAMD64NEGQ { 23460 break 23461 } 23462 if y != v_1_1_1.Args[0] { 23463 break 23464 } 23465 v.reset(OpAMD64ROLQ) 23466 v.AddArg(x) 23467 v.AddArg(y) 23468 return true 23469 } 23470 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 23471 // cond: 23472 // result: (ROLQ x y) 23473 for { 23474 _ = v.Args[1] 23475 v_0 := v.Args[0] 23476 if v_0.Op != OpAMD64ANDQ { 23477 break 23478 } 23479 _ = v_0.Args[1] 23480 v_0_0 := v_0.Args[0] 23481 if v_0_0.Op != OpAMD64SHRQ { 23482 break 23483 } 23484 _ = v_0_0.Args[1] 23485 x := v_0_0.Args[0] 23486 v_0_0_1 := v_0_0.Args[1] 23487 if v_0_0_1.Op != OpAMD64NEGQ { 23488 break 23489 } 23490 y := v_0_0_1.Args[0] 23491 v_0_1 := v_0.Args[1] 23492 if v_0_1.Op != OpAMD64SBBQcarrymask { 23493 break 23494 } 23495 v_0_1_0 := v_0_1.Args[0] 23496 if v_0_1_0.Op != OpAMD64CMPQconst { 23497 break 23498 } 23499 if v_0_1_0.AuxInt != 64 { 23500 break 23501 } 23502 v_0_1_0_0 := v_0_1_0.Args[0] 23503 if v_0_1_0_0.Op != OpAMD64NEGQ { 23504 break 23505 } 23506 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 23507 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 23508 break 23509 } 23510 if v_0_1_0_0_0.AuxInt != -64 { 23511 break 23512 } 23513 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 23514 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 23515 break 23516 } 23517 if v_0_1_0_0_0_0.AuxInt != 63 { 23518 break 23519 } 23520 if y != v_0_1_0_0_0_0.Args[0] { 23521 break 23522 } 23523 v_1 := v.Args[1] 23524 if v_1.Op != OpAMD64SHLQ { 23525 break 23526 } 23527 _ = v_1.Args[1] 23528 if x != v_1.Args[0] { 23529 break 23530 } 23531 if y != v_1.Args[1] { 23532 break 23533 } 23534 v.reset(OpAMD64ROLQ) 23535 v.AddArg(x) 23536 v.AddArg(y) 23537 return true 23538 } 23539 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 23540 // cond: 23541 // result: (ROLQ x y) 23542 for { 23543 _ = v.Args[1] 23544 v_0 := v.Args[0] 23545 if v_0.Op != OpAMD64ANDQ { 23546 break 23547 } 23548 _ = v_0.Args[1] 23549 v_0_0 := v_0.Args[0] 23550 if v_0_0.Op != OpAMD64SBBQcarrymask { 23551 break 23552 } 23553 v_0_0_0 := v_0_0.Args[0] 23554 if v_0_0_0.Op != OpAMD64CMPQconst { 23555 break 23556 } 23557 if v_0_0_0.AuxInt != 64 { 23558 break 23559 } 23560 v_0_0_0_0 := v_0_0_0.Args[0] 23561 if v_0_0_0_0.Op != OpAMD64NEGQ { 23562 break 23563 } 23564 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 23565 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 23566 break 23567 } 23568 if v_0_0_0_0_0.AuxInt != -64 { 23569 break 23570 } 23571 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 23572 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 23573 break 23574 } 23575 if v_0_0_0_0_0_0.AuxInt != 63 { 23576 break 23577 } 23578 y := v_0_0_0_0_0_0.Args[0] 23579 v_0_1 := v_0.Args[1] 23580 if v_0_1.Op != OpAMD64SHRQ { 23581 break 23582 } 23583 _ = v_0_1.Args[1] 23584 x := v_0_1.Args[0] 23585 v_0_1_1 := v_0_1.Args[1] 23586 if v_0_1_1.Op != OpAMD64NEGQ { 23587 break 23588 } 23589 if y != v_0_1_1.Args[0] { 23590 break 23591 } 23592 v_1 := v.Args[1] 23593 if v_1.Op != OpAMD64SHLQ { 23594 break 23595 } 23596 _ = v_1.Args[1] 23597 if x != v_1.Args[0] { 23598 break 23599 } 23600 if y != v_1.Args[1] { 23601 break 23602 } 23603 v.reset(OpAMD64ROLQ) 23604 v.AddArg(x) 23605 v.AddArg(y) 23606 return true 23607 } 23608 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 23609 // cond: 23610 // result: (ROLQ x y) 23611 for { 23612 _ = v.Args[1] 23613 v_0 := v.Args[0] 23614 if v_0.Op != OpAMD64SHLQ { 23615 break 23616 } 23617 _ = v_0.Args[1] 23618 x := v_0.Args[0] 23619 y := v_0.Args[1] 23620 v_1 := v.Args[1] 23621 if v_1.Op != OpAMD64ANDQ { 23622 break 23623 } 23624 _ = v_1.Args[1] 23625 v_1_0 := v_1.Args[0] 23626 if v_1_0.Op != OpAMD64SHRQ { 23627 break 23628 } 23629 _ = v_1_0.Args[1] 23630 if x != v_1_0.Args[0] { 23631 break 23632 } 23633 v_1_0_1 := v_1_0.Args[1] 23634 if v_1_0_1.Op != OpAMD64NEGL { 23635 break 23636 } 23637 if y != v_1_0_1.Args[0] { 23638 break 23639 } 23640 v_1_1 := v_1.Args[1] 23641 if v_1_1.Op != OpAMD64SBBQcarrymask { 23642 break 23643 } 23644 v_1_1_0 := v_1_1.Args[0] 23645 if v_1_1_0.Op != OpAMD64CMPLconst { 23646 break 23647 } 23648 if v_1_1_0.AuxInt != 64 { 23649 break 23650 } 23651 v_1_1_0_0 := v_1_1_0.Args[0] 23652 if v_1_1_0_0.Op != OpAMD64NEGL { 23653 break 23654 } 23655 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 23656 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 23657 break 23658 } 23659 if v_1_1_0_0_0.AuxInt != -64 { 23660 break 23661 } 23662 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 23663 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 23664 break 23665 } 23666 if v_1_1_0_0_0_0.AuxInt != 63 { 23667 break 23668 } 23669 if y != v_1_1_0_0_0_0.Args[0] { 23670 break 23671 } 23672 v.reset(OpAMD64ROLQ) 23673 v.AddArg(x) 23674 v.AddArg(y) 23675 return true 23676 } 23677 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 23678 // cond: 23679 // result: (ROLQ x y) 23680 for { 23681 _ = v.Args[1] 23682 v_0 := v.Args[0] 23683 if v_0.Op != OpAMD64SHLQ { 23684 break 23685 } 23686 _ = v_0.Args[1] 23687 x := v_0.Args[0] 23688 y := v_0.Args[1] 23689 v_1 := v.Args[1] 23690 if v_1.Op != OpAMD64ANDQ { 23691 break 23692 } 23693 _ = v_1.Args[1] 23694 v_1_0 := v_1.Args[0] 23695 if v_1_0.Op != OpAMD64SBBQcarrymask { 23696 break 23697 } 23698 v_1_0_0 := v_1_0.Args[0] 23699 if v_1_0_0.Op != OpAMD64CMPLconst { 23700 break 23701 } 23702 if v_1_0_0.AuxInt != 64 { 23703 break 23704 } 23705 v_1_0_0_0 := v_1_0_0.Args[0] 23706 if v_1_0_0_0.Op != OpAMD64NEGL { 23707 break 23708 } 23709 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 23710 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 23711 break 23712 } 23713 if v_1_0_0_0_0.AuxInt != -64 { 23714 break 23715 } 23716 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 23717 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 23718 break 23719 } 23720 if v_1_0_0_0_0_0.AuxInt != 63 { 23721 break 23722 } 23723 if y != v_1_0_0_0_0_0.Args[0] { 23724 break 23725 } 23726 v_1_1 := v_1.Args[1] 23727 if v_1_1.Op != OpAMD64SHRQ { 23728 break 23729 } 23730 _ = v_1_1.Args[1] 23731 if x != v_1_1.Args[0] { 23732 break 23733 } 23734 v_1_1_1 := v_1_1.Args[1] 23735 if v_1_1_1.Op != OpAMD64NEGL { 23736 break 23737 } 23738 if y != v_1_1_1.Args[0] { 23739 break 23740 } 23741 v.reset(OpAMD64ROLQ) 23742 v.AddArg(x) 23743 v.AddArg(y) 23744 return true 23745 } 23746 return false 23747 } 23748 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 23749 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 23750 // cond: 23751 // result: (ROLQ x y) 23752 for { 23753 _ = v.Args[1] 23754 v_0 := v.Args[0] 23755 if v_0.Op != OpAMD64ANDQ { 23756 break 23757 } 23758 _ = v_0.Args[1] 23759 v_0_0 := v_0.Args[0] 23760 if v_0_0.Op != OpAMD64SHRQ { 23761 break 23762 } 23763 _ = v_0_0.Args[1] 23764 x := v_0_0.Args[0] 23765 v_0_0_1 := v_0_0.Args[1] 23766 if v_0_0_1.Op != OpAMD64NEGL { 23767 break 23768 } 23769 y := v_0_0_1.Args[0] 23770 v_0_1 := v_0.Args[1] 23771 if v_0_1.Op != OpAMD64SBBQcarrymask { 23772 break 23773 } 23774 v_0_1_0 := v_0_1.Args[0] 23775 if v_0_1_0.Op != OpAMD64CMPLconst { 23776 break 23777 } 23778 if v_0_1_0.AuxInt != 64 { 23779 break 23780 } 23781 v_0_1_0_0 := v_0_1_0.Args[0] 23782 if v_0_1_0_0.Op != OpAMD64NEGL { 23783 break 23784 } 23785 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 23786 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 23787 break 23788 } 23789 if v_0_1_0_0_0.AuxInt != -64 { 23790 break 23791 } 23792 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 23793 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 23794 break 23795 } 23796 if v_0_1_0_0_0_0.AuxInt != 63 { 23797 break 23798 } 23799 if y != v_0_1_0_0_0_0.Args[0] { 23800 break 23801 } 23802 v_1 := v.Args[1] 23803 if v_1.Op != OpAMD64SHLQ { 23804 break 23805 } 23806 _ = v_1.Args[1] 23807 if x != v_1.Args[0] { 23808 break 23809 } 23810 if y != v_1.Args[1] { 23811 break 23812 } 23813 v.reset(OpAMD64ROLQ) 23814 v.AddArg(x) 23815 v.AddArg(y) 23816 return true 23817 } 23818 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 23819 // cond: 23820 // result: (ROLQ x y) 23821 for { 23822 _ = v.Args[1] 23823 v_0 := v.Args[0] 23824 if v_0.Op != OpAMD64ANDQ { 23825 break 23826 } 23827 _ = v_0.Args[1] 23828 v_0_0 := v_0.Args[0] 23829 if v_0_0.Op != OpAMD64SBBQcarrymask { 23830 break 23831 } 23832 v_0_0_0 := v_0_0.Args[0] 23833 if v_0_0_0.Op != OpAMD64CMPLconst { 23834 break 23835 } 23836 if v_0_0_0.AuxInt != 64 { 23837 break 23838 } 23839 v_0_0_0_0 := v_0_0_0.Args[0] 23840 if v_0_0_0_0.Op != OpAMD64NEGL { 23841 break 23842 } 23843 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 23844 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 23845 break 23846 } 23847 if v_0_0_0_0_0.AuxInt != -64 { 23848 break 23849 } 23850 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 23851 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 23852 break 23853 } 23854 if v_0_0_0_0_0_0.AuxInt != 63 { 23855 break 23856 } 23857 y := v_0_0_0_0_0_0.Args[0] 23858 v_0_1 := v_0.Args[1] 23859 if v_0_1.Op != OpAMD64SHRQ { 23860 break 23861 } 23862 _ = v_0_1.Args[1] 23863 x := v_0_1.Args[0] 23864 v_0_1_1 := v_0_1.Args[1] 23865 if v_0_1_1.Op != OpAMD64NEGL { 23866 break 23867 } 23868 if y != v_0_1_1.Args[0] { 23869 break 23870 } 23871 v_1 := v.Args[1] 23872 if v_1.Op != OpAMD64SHLQ { 23873 break 23874 } 23875 _ = v_1.Args[1] 23876 if x != v_1.Args[0] { 23877 break 23878 } 23879 if y != v_1.Args[1] { 23880 break 23881 } 23882 v.reset(OpAMD64ROLQ) 23883 v.AddArg(x) 23884 v.AddArg(y) 23885 return true 23886 } 23887 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 23888 // cond: 23889 // result: (RORQ x y) 23890 for { 23891 _ = v.Args[1] 23892 v_0 := v.Args[0] 23893 if v_0.Op != OpAMD64SHRQ { 23894 break 23895 } 23896 _ = v_0.Args[1] 23897 x := v_0.Args[0] 23898 y := v_0.Args[1] 23899 v_1 := v.Args[1] 23900 if v_1.Op != OpAMD64ANDQ { 23901 break 23902 } 23903 _ = v_1.Args[1] 23904 v_1_0 := v_1.Args[0] 23905 if v_1_0.Op != OpAMD64SHLQ { 23906 break 23907 } 23908 _ = v_1_0.Args[1] 23909 if x != v_1_0.Args[0] { 23910 break 23911 } 23912 v_1_0_1 := v_1_0.Args[1] 23913 if v_1_0_1.Op != OpAMD64NEGQ { 23914 break 23915 } 23916 if y != v_1_0_1.Args[0] { 23917 break 23918 } 23919 v_1_1 := v_1.Args[1] 23920 if v_1_1.Op != OpAMD64SBBQcarrymask { 23921 break 23922 } 23923 v_1_1_0 := v_1_1.Args[0] 23924 if v_1_1_0.Op != OpAMD64CMPQconst { 23925 break 23926 } 23927 if v_1_1_0.AuxInt != 64 { 23928 break 23929 } 23930 v_1_1_0_0 := v_1_1_0.Args[0] 23931 if v_1_1_0_0.Op != OpAMD64NEGQ { 23932 break 23933 } 23934 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 23935 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 23936 break 23937 } 23938 if v_1_1_0_0_0.AuxInt != -64 { 23939 break 23940 } 23941 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 23942 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 23943 break 23944 } 23945 if v_1_1_0_0_0_0.AuxInt != 63 { 23946 break 23947 } 23948 if y != v_1_1_0_0_0_0.Args[0] { 23949 break 23950 } 23951 v.reset(OpAMD64RORQ) 23952 v.AddArg(x) 23953 v.AddArg(y) 23954 return true 23955 } 23956 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 23957 // cond: 23958 // result: (RORQ x y) 23959 for { 23960 _ = v.Args[1] 23961 v_0 := v.Args[0] 23962 if v_0.Op != OpAMD64SHRQ { 23963 break 23964 } 23965 _ = v_0.Args[1] 23966 x := v_0.Args[0] 23967 y := v_0.Args[1] 23968 v_1 := v.Args[1] 23969 if v_1.Op != OpAMD64ANDQ { 23970 break 23971 } 23972 _ = v_1.Args[1] 23973 v_1_0 := v_1.Args[0] 23974 if v_1_0.Op != OpAMD64SBBQcarrymask { 23975 break 23976 } 23977 v_1_0_0 := v_1_0.Args[0] 23978 if v_1_0_0.Op != OpAMD64CMPQconst { 23979 break 23980 } 23981 if v_1_0_0.AuxInt != 64 { 23982 break 23983 } 23984 v_1_0_0_0 := v_1_0_0.Args[0] 23985 if v_1_0_0_0.Op != OpAMD64NEGQ { 23986 break 23987 } 23988 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 23989 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 23990 break 23991 } 23992 if v_1_0_0_0_0.AuxInt != -64 { 23993 break 23994 } 23995 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 23996 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 23997 break 23998 } 23999 if v_1_0_0_0_0_0.AuxInt != 63 { 24000 break 24001 } 24002 if y != v_1_0_0_0_0_0.Args[0] { 24003 break 24004 } 24005 v_1_1 := v_1.Args[1] 24006 if v_1_1.Op != OpAMD64SHLQ { 24007 break 24008 } 24009 _ = v_1_1.Args[1] 24010 if x != v_1_1.Args[0] { 24011 break 24012 } 24013 v_1_1_1 := v_1_1.Args[1] 24014 if v_1_1_1.Op != OpAMD64NEGQ { 24015 break 24016 } 24017 if y != v_1_1_1.Args[0] { 24018 break 24019 } 24020 v.reset(OpAMD64RORQ) 24021 v.AddArg(x) 24022 v.AddArg(y) 24023 return true 24024 } 24025 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 24026 // cond: 24027 // result: (RORQ x y) 24028 for { 24029 _ = v.Args[1] 24030 v_0 := v.Args[0] 24031 if v_0.Op != OpAMD64ANDQ { 24032 break 24033 } 24034 _ = v_0.Args[1] 24035 v_0_0 := v_0.Args[0] 24036 if v_0_0.Op != OpAMD64SHLQ { 24037 break 24038 } 24039 _ = v_0_0.Args[1] 24040 x := v_0_0.Args[0] 24041 v_0_0_1 := v_0_0.Args[1] 24042 if v_0_0_1.Op != OpAMD64NEGQ { 24043 break 24044 } 24045 y := v_0_0_1.Args[0] 24046 v_0_1 := v_0.Args[1] 24047 if v_0_1.Op != OpAMD64SBBQcarrymask { 24048 break 24049 } 24050 v_0_1_0 := v_0_1.Args[0] 24051 if v_0_1_0.Op != OpAMD64CMPQconst { 24052 break 24053 } 24054 if v_0_1_0.AuxInt != 64 { 24055 break 24056 } 24057 v_0_1_0_0 := v_0_1_0.Args[0] 24058 if v_0_1_0_0.Op != OpAMD64NEGQ { 24059 break 24060 } 24061 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24062 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 24063 break 24064 } 24065 if v_0_1_0_0_0.AuxInt != -64 { 24066 break 24067 } 24068 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24069 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 24070 break 24071 } 24072 if v_0_1_0_0_0_0.AuxInt != 63 { 24073 break 24074 } 24075 if y != v_0_1_0_0_0_0.Args[0] { 24076 break 24077 } 24078 v_1 := v.Args[1] 24079 if v_1.Op != OpAMD64SHRQ { 24080 break 24081 } 24082 _ = v_1.Args[1] 24083 if x != v_1.Args[0] { 24084 break 24085 } 24086 if y != v_1.Args[1] { 24087 break 24088 } 24089 v.reset(OpAMD64RORQ) 24090 v.AddArg(x) 24091 v.AddArg(y) 24092 return true 24093 } 24094 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 24095 // cond: 24096 // result: (RORQ x y) 24097 for { 24098 _ = v.Args[1] 24099 v_0 := v.Args[0] 24100 if v_0.Op != OpAMD64ANDQ { 24101 break 24102 } 24103 _ = v_0.Args[1] 24104 v_0_0 := v_0.Args[0] 24105 if v_0_0.Op != OpAMD64SBBQcarrymask { 24106 break 24107 } 24108 v_0_0_0 := v_0_0.Args[0] 24109 if v_0_0_0.Op != OpAMD64CMPQconst { 24110 break 24111 } 24112 if v_0_0_0.AuxInt != 64 { 24113 break 24114 } 24115 v_0_0_0_0 := v_0_0_0.Args[0] 24116 if v_0_0_0_0.Op != OpAMD64NEGQ { 24117 break 24118 } 24119 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24120 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 24121 break 24122 } 24123 if v_0_0_0_0_0.AuxInt != -64 { 24124 break 24125 } 24126 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24127 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 24128 break 24129 } 24130 if v_0_0_0_0_0_0.AuxInt != 63 { 24131 break 24132 } 24133 y := v_0_0_0_0_0_0.Args[0] 24134 v_0_1 := v_0.Args[1] 24135 if v_0_1.Op != OpAMD64SHLQ { 24136 break 24137 } 24138 _ = v_0_1.Args[1] 24139 x := v_0_1.Args[0] 24140 v_0_1_1 := v_0_1.Args[1] 24141 if v_0_1_1.Op != OpAMD64NEGQ { 24142 break 24143 } 24144 if y != v_0_1_1.Args[0] { 24145 break 24146 } 24147 v_1 := v.Args[1] 24148 if v_1.Op != OpAMD64SHRQ { 24149 break 24150 } 24151 _ = v_1.Args[1] 24152 if x != v_1.Args[0] { 24153 break 24154 } 24155 if y != v_1.Args[1] { 24156 break 24157 } 24158 v.reset(OpAMD64RORQ) 24159 v.AddArg(x) 24160 v.AddArg(y) 24161 return true 24162 } 24163 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 24164 // cond: 24165 // result: (RORQ x y) 24166 for { 24167 _ = v.Args[1] 24168 v_0 := v.Args[0] 24169 if v_0.Op != OpAMD64SHRQ { 24170 break 24171 } 24172 _ = v_0.Args[1] 24173 x := v_0.Args[0] 24174 y := v_0.Args[1] 24175 v_1 := v.Args[1] 24176 if v_1.Op != OpAMD64ANDQ { 24177 break 24178 } 24179 _ = v_1.Args[1] 24180 v_1_0 := v_1.Args[0] 24181 if v_1_0.Op != OpAMD64SHLQ { 24182 break 24183 } 24184 _ = v_1_0.Args[1] 24185 if x != v_1_0.Args[0] { 24186 break 24187 } 24188 v_1_0_1 := v_1_0.Args[1] 24189 if v_1_0_1.Op != OpAMD64NEGL { 24190 break 24191 } 24192 if y != v_1_0_1.Args[0] { 24193 break 24194 } 24195 v_1_1 := v_1.Args[1] 24196 if v_1_1.Op != OpAMD64SBBQcarrymask { 24197 break 24198 } 24199 v_1_1_0 := v_1_1.Args[0] 24200 if v_1_1_0.Op != OpAMD64CMPLconst { 24201 break 24202 } 24203 if v_1_1_0.AuxInt != 64 { 24204 break 24205 } 24206 v_1_1_0_0 := v_1_1_0.Args[0] 24207 if v_1_1_0_0.Op != OpAMD64NEGL { 24208 break 24209 } 24210 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24211 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 24212 break 24213 } 24214 if v_1_1_0_0_0.AuxInt != -64 { 24215 break 24216 } 24217 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24218 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 24219 break 24220 } 24221 if v_1_1_0_0_0_0.AuxInt != 63 { 24222 break 24223 } 24224 if y != v_1_1_0_0_0_0.Args[0] { 24225 break 24226 } 24227 v.reset(OpAMD64RORQ) 24228 v.AddArg(x) 24229 v.AddArg(y) 24230 return true 24231 } 24232 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 24233 // cond: 24234 // result: (RORQ x y) 24235 for { 24236 _ = v.Args[1] 24237 v_0 := v.Args[0] 24238 if v_0.Op != OpAMD64SHRQ { 24239 break 24240 } 24241 _ = v_0.Args[1] 24242 x := v_0.Args[0] 24243 y := v_0.Args[1] 24244 v_1 := v.Args[1] 24245 if v_1.Op != OpAMD64ANDQ { 24246 break 24247 } 24248 _ = v_1.Args[1] 24249 v_1_0 := v_1.Args[0] 24250 if v_1_0.Op != OpAMD64SBBQcarrymask { 24251 break 24252 } 24253 v_1_0_0 := v_1_0.Args[0] 24254 if v_1_0_0.Op != OpAMD64CMPLconst { 24255 break 24256 } 24257 if v_1_0_0.AuxInt != 64 { 24258 break 24259 } 24260 v_1_0_0_0 := v_1_0_0.Args[0] 24261 if v_1_0_0_0.Op != OpAMD64NEGL { 24262 break 24263 } 24264 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24265 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 24266 break 24267 } 24268 if v_1_0_0_0_0.AuxInt != -64 { 24269 break 24270 } 24271 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24272 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 24273 break 24274 } 24275 if v_1_0_0_0_0_0.AuxInt != 63 { 24276 break 24277 } 24278 if y != v_1_0_0_0_0_0.Args[0] { 24279 break 24280 } 24281 v_1_1 := v_1.Args[1] 24282 if v_1_1.Op != OpAMD64SHLQ { 24283 break 24284 } 24285 _ = v_1_1.Args[1] 24286 if x != v_1_1.Args[0] { 24287 break 24288 } 24289 v_1_1_1 := v_1_1.Args[1] 24290 if v_1_1_1.Op != OpAMD64NEGL { 24291 break 24292 } 24293 if y != v_1_1_1.Args[0] { 24294 break 24295 } 24296 v.reset(OpAMD64RORQ) 24297 v.AddArg(x) 24298 v.AddArg(y) 24299 return true 24300 } 24301 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 24302 // cond: 24303 // result: (RORQ x y) 24304 for { 24305 _ = v.Args[1] 24306 v_0 := v.Args[0] 24307 if v_0.Op != OpAMD64ANDQ { 24308 break 24309 } 24310 _ = v_0.Args[1] 24311 v_0_0 := v_0.Args[0] 24312 if v_0_0.Op != OpAMD64SHLQ { 24313 break 24314 } 24315 _ = v_0_0.Args[1] 24316 x := v_0_0.Args[0] 24317 v_0_0_1 := v_0_0.Args[1] 24318 if v_0_0_1.Op != OpAMD64NEGL { 24319 break 24320 } 24321 y := v_0_0_1.Args[0] 24322 v_0_1 := v_0.Args[1] 24323 if v_0_1.Op != OpAMD64SBBQcarrymask { 24324 break 24325 } 24326 v_0_1_0 := v_0_1.Args[0] 24327 if v_0_1_0.Op != OpAMD64CMPLconst { 24328 break 24329 } 24330 if v_0_1_0.AuxInt != 64 { 24331 break 24332 } 24333 v_0_1_0_0 := v_0_1_0.Args[0] 24334 if v_0_1_0_0.Op != OpAMD64NEGL { 24335 break 24336 } 24337 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24338 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 24339 break 24340 } 24341 if v_0_1_0_0_0.AuxInt != -64 { 24342 break 24343 } 24344 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24345 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 24346 break 24347 } 24348 if v_0_1_0_0_0_0.AuxInt != 63 { 24349 break 24350 } 24351 if y != v_0_1_0_0_0_0.Args[0] { 24352 break 24353 } 24354 v_1 := v.Args[1] 24355 if v_1.Op != OpAMD64SHRQ { 24356 break 24357 } 24358 _ = v_1.Args[1] 24359 if x != v_1.Args[0] { 24360 break 24361 } 24362 if y != v_1.Args[1] { 24363 break 24364 } 24365 v.reset(OpAMD64RORQ) 24366 v.AddArg(x) 24367 v.AddArg(y) 24368 return true 24369 } 24370 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 24371 // cond: 24372 // result: (RORQ x y) 24373 for { 24374 _ = v.Args[1] 24375 v_0 := v.Args[0] 24376 if v_0.Op != OpAMD64ANDQ { 24377 break 24378 } 24379 _ = v_0.Args[1] 24380 v_0_0 := v_0.Args[0] 24381 if v_0_0.Op != OpAMD64SBBQcarrymask { 24382 break 24383 } 24384 v_0_0_0 := v_0_0.Args[0] 24385 if v_0_0_0.Op != OpAMD64CMPLconst { 24386 break 24387 } 24388 if v_0_0_0.AuxInt != 64 { 24389 break 24390 } 24391 v_0_0_0_0 := v_0_0_0.Args[0] 24392 if v_0_0_0_0.Op != OpAMD64NEGL { 24393 break 24394 } 24395 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24396 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 24397 break 24398 } 24399 if v_0_0_0_0_0.AuxInt != -64 { 24400 break 24401 } 24402 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24403 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 24404 break 24405 } 24406 if v_0_0_0_0_0_0.AuxInt != 63 { 24407 break 24408 } 24409 y := v_0_0_0_0_0_0.Args[0] 24410 v_0_1 := v_0.Args[1] 24411 if v_0_1.Op != OpAMD64SHLQ { 24412 break 24413 } 24414 _ = v_0_1.Args[1] 24415 x := v_0_1.Args[0] 24416 v_0_1_1 := v_0_1.Args[1] 24417 if v_0_1_1.Op != OpAMD64NEGL { 24418 break 24419 } 24420 if y != v_0_1_1.Args[0] { 24421 break 24422 } 24423 v_1 := v.Args[1] 24424 if v_1.Op != OpAMD64SHRQ { 24425 break 24426 } 24427 _ = v_1.Args[1] 24428 if x != v_1.Args[0] { 24429 break 24430 } 24431 if y != v_1.Args[1] { 24432 break 24433 } 24434 v.reset(OpAMD64RORQ) 24435 v.AddArg(x) 24436 v.AddArg(y) 24437 return true 24438 } 24439 return false 24440 } 24441 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 24442 b := v.Block 24443 _ = b 24444 typ := &b.Func.Config.Types 24445 _ = typ 24446 // match: (ORQ x x) 24447 // cond: 24448 // result: x 24449 for { 24450 _ = v.Args[1] 24451 x := v.Args[0] 24452 if x != v.Args[1] { 24453 break 24454 } 24455 v.reset(OpCopy) 24456 v.Type = x.Type 24457 v.AddArg(x) 24458 return true 24459 } 24460 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 24461 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24462 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 24463 for { 24464 _ = v.Args[1] 24465 x0 := v.Args[0] 24466 if x0.Op != OpAMD64MOVBload { 24467 break 24468 } 24469 i0 := x0.AuxInt 24470 s := x0.Aux 24471 _ = x0.Args[1] 24472 p := x0.Args[0] 24473 mem := x0.Args[1] 24474 sh := v.Args[1] 24475 if sh.Op != OpAMD64SHLQconst { 24476 break 24477 } 24478 if sh.AuxInt != 8 { 24479 break 24480 } 24481 x1 := sh.Args[0] 24482 if x1.Op != OpAMD64MOVBload { 24483 break 24484 } 24485 i1 := x1.AuxInt 24486 if x1.Aux != s { 24487 break 24488 } 24489 _ = x1.Args[1] 24490 if p != x1.Args[0] { 24491 break 24492 } 24493 if mem != x1.Args[1] { 24494 break 24495 } 24496 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24497 break 24498 } 24499 b = mergePoint(b, x0, x1) 24500 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24501 v.reset(OpCopy) 24502 v.AddArg(v0) 24503 v0.AuxInt = i0 24504 v0.Aux = s 24505 v0.AddArg(p) 24506 v0.AddArg(mem) 24507 return true 24508 } 24509 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 24510 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24511 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 24512 for { 24513 _ = v.Args[1] 24514 sh := v.Args[0] 24515 if sh.Op != OpAMD64SHLQconst { 24516 break 24517 } 24518 if sh.AuxInt != 8 { 24519 break 24520 } 24521 x1 := sh.Args[0] 24522 if x1.Op != OpAMD64MOVBload { 24523 break 24524 } 24525 i1 := x1.AuxInt 24526 s := x1.Aux 24527 _ = x1.Args[1] 24528 p := x1.Args[0] 24529 mem := x1.Args[1] 24530 x0 := v.Args[1] 24531 if x0.Op != OpAMD64MOVBload { 24532 break 24533 } 24534 i0 := x0.AuxInt 24535 if x0.Aux != s { 24536 break 24537 } 24538 _ = x0.Args[1] 24539 if p != x0.Args[0] { 24540 break 24541 } 24542 if mem != x0.Args[1] { 24543 break 24544 } 24545 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24546 break 24547 } 24548 b = mergePoint(b, x0, x1) 24549 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24550 v.reset(OpCopy) 24551 v.AddArg(v0) 24552 v0.AuxInt = i0 24553 v0.Aux = s 24554 v0.AddArg(p) 24555 v0.AddArg(mem) 24556 return true 24557 } 24558 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 24559 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24560 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 24561 for { 24562 _ = v.Args[1] 24563 x0 := v.Args[0] 24564 if x0.Op != OpAMD64MOVWload { 24565 break 24566 } 24567 i0 := x0.AuxInt 24568 s := x0.Aux 24569 _ = x0.Args[1] 24570 p := x0.Args[0] 24571 mem := x0.Args[1] 24572 sh := v.Args[1] 24573 if sh.Op != OpAMD64SHLQconst { 24574 break 24575 } 24576 if sh.AuxInt != 16 { 24577 break 24578 } 24579 x1 := sh.Args[0] 24580 if x1.Op != OpAMD64MOVWload { 24581 break 24582 } 24583 i1 := x1.AuxInt 24584 if x1.Aux != s { 24585 break 24586 } 24587 _ = x1.Args[1] 24588 if p != x1.Args[0] { 24589 break 24590 } 24591 if mem != x1.Args[1] { 24592 break 24593 } 24594 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24595 break 24596 } 24597 b = mergePoint(b, x0, x1) 24598 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24599 v.reset(OpCopy) 24600 v.AddArg(v0) 24601 v0.AuxInt = i0 24602 v0.Aux = s 24603 v0.AddArg(p) 24604 v0.AddArg(mem) 24605 return true 24606 } 24607 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 24608 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24609 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 24610 for { 24611 _ = v.Args[1] 24612 sh := v.Args[0] 24613 if sh.Op != OpAMD64SHLQconst { 24614 break 24615 } 24616 if sh.AuxInt != 16 { 24617 break 24618 } 24619 x1 := sh.Args[0] 24620 if x1.Op != OpAMD64MOVWload { 24621 break 24622 } 24623 i1 := x1.AuxInt 24624 s := x1.Aux 24625 _ = x1.Args[1] 24626 p := x1.Args[0] 24627 mem := x1.Args[1] 24628 x0 := v.Args[1] 24629 if x0.Op != OpAMD64MOVWload { 24630 break 24631 } 24632 i0 := x0.AuxInt 24633 if x0.Aux != s { 24634 break 24635 } 24636 _ = x0.Args[1] 24637 if p != x0.Args[0] { 24638 break 24639 } 24640 if mem != x0.Args[1] { 24641 break 24642 } 24643 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24644 break 24645 } 24646 b = mergePoint(b, x0, x1) 24647 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24648 v.reset(OpCopy) 24649 v.AddArg(v0) 24650 v0.AuxInt = i0 24651 v0.Aux = s 24652 v0.AddArg(p) 24653 v0.AddArg(mem) 24654 return true 24655 } 24656 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 24657 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24658 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 24659 for { 24660 _ = v.Args[1] 24661 x0 := v.Args[0] 24662 if x0.Op != OpAMD64MOVLload { 24663 break 24664 } 24665 i0 := x0.AuxInt 24666 s := x0.Aux 24667 _ = x0.Args[1] 24668 p := x0.Args[0] 24669 mem := x0.Args[1] 24670 sh := v.Args[1] 24671 if sh.Op != OpAMD64SHLQconst { 24672 break 24673 } 24674 if sh.AuxInt != 32 { 24675 break 24676 } 24677 x1 := sh.Args[0] 24678 if x1.Op != OpAMD64MOVLload { 24679 break 24680 } 24681 i1 := x1.AuxInt 24682 if x1.Aux != s { 24683 break 24684 } 24685 _ = x1.Args[1] 24686 if p != x1.Args[0] { 24687 break 24688 } 24689 if mem != x1.Args[1] { 24690 break 24691 } 24692 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24693 break 24694 } 24695 b = mergePoint(b, x0, x1) 24696 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24697 v.reset(OpCopy) 24698 v.AddArg(v0) 24699 v0.AuxInt = i0 24700 v0.Aux = s 24701 v0.AddArg(p) 24702 v0.AddArg(mem) 24703 return true 24704 } 24705 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 24706 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24707 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 24708 for { 24709 _ = v.Args[1] 24710 sh := v.Args[0] 24711 if sh.Op != OpAMD64SHLQconst { 24712 break 24713 } 24714 if sh.AuxInt != 32 { 24715 break 24716 } 24717 x1 := sh.Args[0] 24718 if x1.Op != OpAMD64MOVLload { 24719 break 24720 } 24721 i1 := x1.AuxInt 24722 s := x1.Aux 24723 _ = x1.Args[1] 24724 p := x1.Args[0] 24725 mem := x1.Args[1] 24726 x0 := v.Args[1] 24727 if x0.Op != OpAMD64MOVLload { 24728 break 24729 } 24730 i0 := x0.AuxInt 24731 if x0.Aux != s { 24732 break 24733 } 24734 _ = x0.Args[1] 24735 if p != x0.Args[0] { 24736 break 24737 } 24738 if mem != x0.Args[1] { 24739 break 24740 } 24741 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24742 break 24743 } 24744 b = mergePoint(b, x0, x1) 24745 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24746 v.reset(OpCopy) 24747 v.AddArg(v0) 24748 v0.AuxInt = i0 24749 v0.Aux = s 24750 v0.AddArg(p) 24751 v0.AddArg(mem) 24752 return true 24753 } 24754 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 24755 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24756 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 24757 for { 24758 _ = v.Args[1] 24759 s1 := v.Args[0] 24760 if s1.Op != OpAMD64SHLQconst { 24761 break 24762 } 24763 j1 := s1.AuxInt 24764 x1 := s1.Args[0] 24765 if x1.Op != OpAMD64MOVBload { 24766 break 24767 } 24768 i1 := x1.AuxInt 24769 s := x1.Aux 24770 _ = x1.Args[1] 24771 p := x1.Args[0] 24772 mem := x1.Args[1] 24773 or := v.Args[1] 24774 if or.Op != OpAMD64ORQ { 24775 break 24776 } 24777 _ = or.Args[1] 24778 s0 := or.Args[0] 24779 if s0.Op != OpAMD64SHLQconst { 24780 break 24781 } 24782 j0 := s0.AuxInt 24783 x0 := s0.Args[0] 24784 if x0.Op != OpAMD64MOVBload { 24785 break 24786 } 24787 i0 := x0.AuxInt 24788 if x0.Aux != s { 24789 break 24790 } 24791 _ = x0.Args[1] 24792 if p != x0.Args[0] { 24793 break 24794 } 24795 if mem != x0.Args[1] { 24796 break 24797 } 24798 y := or.Args[1] 24799 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24800 break 24801 } 24802 b = mergePoint(b, x0, x1) 24803 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24804 v.reset(OpCopy) 24805 v.AddArg(v0) 24806 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24807 v1.AuxInt = j0 24808 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24809 v2.AuxInt = i0 24810 v2.Aux = s 24811 v2.AddArg(p) 24812 v2.AddArg(mem) 24813 v1.AddArg(v2) 24814 v0.AddArg(v1) 24815 v0.AddArg(y) 24816 return true 24817 } 24818 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 24819 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24820 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 24821 for { 24822 _ = v.Args[1] 24823 s1 := v.Args[0] 24824 if s1.Op != OpAMD64SHLQconst { 24825 break 24826 } 24827 j1 := s1.AuxInt 24828 x1 := s1.Args[0] 24829 if x1.Op != OpAMD64MOVBload { 24830 break 24831 } 24832 i1 := x1.AuxInt 24833 s := x1.Aux 24834 _ = x1.Args[1] 24835 p := x1.Args[0] 24836 mem := x1.Args[1] 24837 or := v.Args[1] 24838 if or.Op != OpAMD64ORQ { 24839 break 24840 } 24841 _ = or.Args[1] 24842 y := or.Args[0] 24843 s0 := or.Args[1] 24844 if s0.Op != OpAMD64SHLQconst { 24845 break 24846 } 24847 j0 := s0.AuxInt 24848 x0 := s0.Args[0] 24849 if x0.Op != OpAMD64MOVBload { 24850 break 24851 } 24852 i0 := x0.AuxInt 24853 if x0.Aux != s { 24854 break 24855 } 24856 _ = x0.Args[1] 24857 if p != x0.Args[0] { 24858 break 24859 } 24860 if mem != x0.Args[1] { 24861 break 24862 } 24863 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24864 break 24865 } 24866 b = mergePoint(b, x0, x1) 24867 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24868 v.reset(OpCopy) 24869 v.AddArg(v0) 24870 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24871 v1.AuxInt = j0 24872 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24873 v2.AuxInt = i0 24874 v2.Aux = s 24875 v2.AddArg(p) 24876 v2.AddArg(mem) 24877 v1.AddArg(v2) 24878 v0.AddArg(v1) 24879 v0.AddArg(y) 24880 return true 24881 } 24882 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 24883 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24884 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 24885 for { 24886 _ = v.Args[1] 24887 or := v.Args[0] 24888 if or.Op != OpAMD64ORQ { 24889 break 24890 } 24891 _ = or.Args[1] 24892 s0 := or.Args[0] 24893 if s0.Op != OpAMD64SHLQconst { 24894 break 24895 } 24896 j0 := s0.AuxInt 24897 x0 := s0.Args[0] 24898 if x0.Op != OpAMD64MOVBload { 24899 break 24900 } 24901 i0 := x0.AuxInt 24902 s := x0.Aux 24903 _ = x0.Args[1] 24904 p := x0.Args[0] 24905 mem := x0.Args[1] 24906 y := or.Args[1] 24907 s1 := v.Args[1] 24908 if s1.Op != OpAMD64SHLQconst { 24909 break 24910 } 24911 j1 := s1.AuxInt 24912 x1 := s1.Args[0] 24913 if x1.Op != OpAMD64MOVBload { 24914 break 24915 } 24916 i1 := x1.AuxInt 24917 if x1.Aux != s { 24918 break 24919 } 24920 _ = x1.Args[1] 24921 if p != x1.Args[0] { 24922 break 24923 } 24924 if mem != x1.Args[1] { 24925 break 24926 } 24927 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24928 break 24929 } 24930 b = mergePoint(b, x0, x1) 24931 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24932 v.reset(OpCopy) 24933 v.AddArg(v0) 24934 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24935 v1.AuxInt = j0 24936 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24937 v2.AuxInt = i0 24938 v2.Aux = s 24939 v2.AddArg(p) 24940 v2.AddArg(mem) 24941 v1.AddArg(v2) 24942 v0.AddArg(v1) 24943 v0.AddArg(y) 24944 return true 24945 } 24946 return false 24947 } 24948 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 24949 b := v.Block 24950 _ = b 24951 typ := &b.Func.Config.Types 24952 _ = typ 24953 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 24954 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24955 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 24956 for { 24957 _ = v.Args[1] 24958 or := v.Args[0] 24959 if or.Op != OpAMD64ORQ { 24960 break 24961 } 24962 _ = or.Args[1] 24963 y := or.Args[0] 24964 s0 := or.Args[1] 24965 if s0.Op != OpAMD64SHLQconst { 24966 break 24967 } 24968 j0 := s0.AuxInt 24969 x0 := s0.Args[0] 24970 if x0.Op != OpAMD64MOVBload { 24971 break 24972 } 24973 i0 := x0.AuxInt 24974 s := x0.Aux 24975 _ = x0.Args[1] 24976 p := x0.Args[0] 24977 mem := x0.Args[1] 24978 s1 := v.Args[1] 24979 if s1.Op != OpAMD64SHLQconst { 24980 break 24981 } 24982 j1 := s1.AuxInt 24983 x1 := s1.Args[0] 24984 if x1.Op != OpAMD64MOVBload { 24985 break 24986 } 24987 i1 := x1.AuxInt 24988 if x1.Aux != s { 24989 break 24990 } 24991 _ = x1.Args[1] 24992 if p != x1.Args[0] { 24993 break 24994 } 24995 if mem != x1.Args[1] { 24996 break 24997 } 24998 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24999 break 25000 } 25001 b = mergePoint(b, x0, x1) 25002 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25003 v.reset(OpCopy) 25004 v.AddArg(v0) 25005 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25006 v1.AuxInt = j0 25007 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25008 v2.AuxInt = i0 25009 v2.Aux = s 25010 v2.AddArg(p) 25011 v2.AddArg(mem) 25012 v1.AddArg(v2) 25013 v0.AddArg(v1) 25014 v0.AddArg(y) 25015 return true 25016 } 25017 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 25018 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25019 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25020 for { 25021 _ = v.Args[1] 25022 s1 := v.Args[0] 25023 if s1.Op != OpAMD64SHLQconst { 25024 break 25025 } 25026 j1 := s1.AuxInt 25027 x1 := s1.Args[0] 25028 if x1.Op != OpAMD64MOVWload { 25029 break 25030 } 25031 i1 := x1.AuxInt 25032 s := x1.Aux 25033 _ = x1.Args[1] 25034 p := x1.Args[0] 25035 mem := x1.Args[1] 25036 or := v.Args[1] 25037 if or.Op != OpAMD64ORQ { 25038 break 25039 } 25040 _ = or.Args[1] 25041 s0 := or.Args[0] 25042 if s0.Op != OpAMD64SHLQconst { 25043 break 25044 } 25045 j0 := s0.AuxInt 25046 x0 := s0.Args[0] 25047 if x0.Op != OpAMD64MOVWload { 25048 break 25049 } 25050 i0 := x0.AuxInt 25051 if x0.Aux != s { 25052 break 25053 } 25054 _ = x0.Args[1] 25055 if p != x0.Args[0] { 25056 break 25057 } 25058 if mem != x0.Args[1] { 25059 break 25060 } 25061 y := or.Args[1] 25062 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25063 break 25064 } 25065 b = mergePoint(b, x0, x1) 25066 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25067 v.reset(OpCopy) 25068 v.AddArg(v0) 25069 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25070 v1.AuxInt = j0 25071 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25072 v2.AuxInt = i0 25073 v2.Aux = s 25074 v2.AddArg(p) 25075 v2.AddArg(mem) 25076 v1.AddArg(v2) 25077 v0.AddArg(v1) 25078 v0.AddArg(y) 25079 return true 25080 } 25081 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 25082 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25083 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25084 for { 25085 _ = v.Args[1] 25086 s1 := v.Args[0] 25087 if s1.Op != OpAMD64SHLQconst { 25088 break 25089 } 25090 j1 := s1.AuxInt 25091 x1 := s1.Args[0] 25092 if x1.Op != OpAMD64MOVWload { 25093 break 25094 } 25095 i1 := x1.AuxInt 25096 s := x1.Aux 25097 _ = x1.Args[1] 25098 p := x1.Args[0] 25099 mem := x1.Args[1] 25100 or := v.Args[1] 25101 if or.Op != OpAMD64ORQ { 25102 break 25103 } 25104 _ = or.Args[1] 25105 y := or.Args[0] 25106 s0 := or.Args[1] 25107 if s0.Op != OpAMD64SHLQconst { 25108 break 25109 } 25110 j0 := s0.AuxInt 25111 x0 := s0.Args[0] 25112 if x0.Op != OpAMD64MOVWload { 25113 break 25114 } 25115 i0 := x0.AuxInt 25116 if x0.Aux != s { 25117 break 25118 } 25119 _ = x0.Args[1] 25120 if p != x0.Args[0] { 25121 break 25122 } 25123 if mem != x0.Args[1] { 25124 break 25125 } 25126 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25127 break 25128 } 25129 b = mergePoint(b, x0, x1) 25130 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25131 v.reset(OpCopy) 25132 v.AddArg(v0) 25133 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25134 v1.AuxInt = j0 25135 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25136 v2.AuxInt = i0 25137 v2.Aux = s 25138 v2.AddArg(p) 25139 v2.AddArg(mem) 25140 v1.AddArg(v2) 25141 v0.AddArg(v1) 25142 v0.AddArg(y) 25143 return true 25144 } 25145 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 25146 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25147 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25148 for { 25149 _ = v.Args[1] 25150 or := v.Args[0] 25151 if or.Op != OpAMD64ORQ { 25152 break 25153 } 25154 _ = or.Args[1] 25155 s0 := or.Args[0] 25156 if s0.Op != OpAMD64SHLQconst { 25157 break 25158 } 25159 j0 := s0.AuxInt 25160 x0 := s0.Args[0] 25161 if x0.Op != OpAMD64MOVWload { 25162 break 25163 } 25164 i0 := x0.AuxInt 25165 s := x0.Aux 25166 _ = x0.Args[1] 25167 p := x0.Args[0] 25168 mem := x0.Args[1] 25169 y := or.Args[1] 25170 s1 := v.Args[1] 25171 if s1.Op != OpAMD64SHLQconst { 25172 break 25173 } 25174 j1 := s1.AuxInt 25175 x1 := s1.Args[0] 25176 if x1.Op != OpAMD64MOVWload { 25177 break 25178 } 25179 i1 := x1.AuxInt 25180 if x1.Aux != s { 25181 break 25182 } 25183 _ = x1.Args[1] 25184 if p != x1.Args[0] { 25185 break 25186 } 25187 if mem != x1.Args[1] { 25188 break 25189 } 25190 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25191 break 25192 } 25193 b = mergePoint(b, x0, x1) 25194 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25195 v.reset(OpCopy) 25196 v.AddArg(v0) 25197 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25198 v1.AuxInt = j0 25199 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25200 v2.AuxInt = i0 25201 v2.Aux = s 25202 v2.AddArg(p) 25203 v2.AddArg(mem) 25204 v1.AddArg(v2) 25205 v0.AddArg(v1) 25206 v0.AddArg(y) 25207 return true 25208 } 25209 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 25210 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25211 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25212 for { 25213 _ = v.Args[1] 25214 or := v.Args[0] 25215 if or.Op != OpAMD64ORQ { 25216 break 25217 } 25218 _ = or.Args[1] 25219 y := or.Args[0] 25220 s0 := or.Args[1] 25221 if s0.Op != OpAMD64SHLQconst { 25222 break 25223 } 25224 j0 := s0.AuxInt 25225 x0 := s0.Args[0] 25226 if x0.Op != OpAMD64MOVWload { 25227 break 25228 } 25229 i0 := x0.AuxInt 25230 s := x0.Aux 25231 _ = x0.Args[1] 25232 p := x0.Args[0] 25233 mem := x0.Args[1] 25234 s1 := v.Args[1] 25235 if s1.Op != OpAMD64SHLQconst { 25236 break 25237 } 25238 j1 := s1.AuxInt 25239 x1 := s1.Args[0] 25240 if x1.Op != OpAMD64MOVWload { 25241 break 25242 } 25243 i1 := x1.AuxInt 25244 if x1.Aux != s { 25245 break 25246 } 25247 _ = x1.Args[1] 25248 if p != x1.Args[0] { 25249 break 25250 } 25251 if mem != x1.Args[1] { 25252 break 25253 } 25254 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25255 break 25256 } 25257 b = mergePoint(b, x0, x1) 25258 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25259 v.reset(OpCopy) 25260 v.AddArg(v0) 25261 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25262 v1.AuxInt = j0 25263 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25264 v2.AuxInt = i0 25265 v2.Aux = s 25266 v2.AddArg(p) 25267 v2.AddArg(mem) 25268 v1.AddArg(v2) 25269 v0.AddArg(v1) 25270 v0.AddArg(y) 25271 return true 25272 } 25273 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25274 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25275 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25276 for { 25277 _ = v.Args[1] 25278 x0 := v.Args[0] 25279 if x0.Op != OpAMD64MOVBloadidx1 { 25280 break 25281 } 25282 i0 := x0.AuxInt 25283 s := x0.Aux 25284 _ = x0.Args[2] 25285 p := x0.Args[0] 25286 idx := x0.Args[1] 25287 mem := x0.Args[2] 25288 sh := v.Args[1] 25289 if sh.Op != OpAMD64SHLQconst { 25290 break 25291 } 25292 if sh.AuxInt != 8 { 25293 break 25294 } 25295 x1 := sh.Args[0] 25296 if x1.Op != OpAMD64MOVBloadidx1 { 25297 break 25298 } 25299 i1 := x1.AuxInt 25300 if x1.Aux != s { 25301 break 25302 } 25303 _ = x1.Args[2] 25304 if p != x1.Args[0] { 25305 break 25306 } 25307 if idx != x1.Args[1] { 25308 break 25309 } 25310 if mem != x1.Args[2] { 25311 break 25312 } 25313 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25314 break 25315 } 25316 b = mergePoint(b, x0, x1) 25317 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25318 v.reset(OpCopy) 25319 v.AddArg(v0) 25320 v0.AuxInt = i0 25321 v0.Aux = s 25322 v0.AddArg(p) 25323 v0.AddArg(idx) 25324 v0.AddArg(mem) 25325 return true 25326 } 25327 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25328 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25329 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25330 for { 25331 _ = v.Args[1] 25332 x0 := v.Args[0] 25333 if x0.Op != OpAMD64MOVBloadidx1 { 25334 break 25335 } 25336 i0 := x0.AuxInt 25337 s := x0.Aux 25338 _ = x0.Args[2] 25339 idx := x0.Args[0] 25340 p := x0.Args[1] 25341 mem := x0.Args[2] 25342 sh := v.Args[1] 25343 if sh.Op != OpAMD64SHLQconst { 25344 break 25345 } 25346 if sh.AuxInt != 8 { 25347 break 25348 } 25349 x1 := sh.Args[0] 25350 if x1.Op != OpAMD64MOVBloadidx1 { 25351 break 25352 } 25353 i1 := x1.AuxInt 25354 if x1.Aux != s { 25355 break 25356 } 25357 _ = x1.Args[2] 25358 if p != x1.Args[0] { 25359 break 25360 } 25361 if idx != x1.Args[1] { 25362 break 25363 } 25364 if mem != x1.Args[2] { 25365 break 25366 } 25367 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25368 break 25369 } 25370 b = mergePoint(b, x0, x1) 25371 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25372 v.reset(OpCopy) 25373 v.AddArg(v0) 25374 v0.AuxInt = i0 25375 v0.Aux = s 25376 v0.AddArg(p) 25377 v0.AddArg(idx) 25378 v0.AddArg(mem) 25379 return true 25380 } 25381 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25382 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25383 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25384 for { 25385 _ = v.Args[1] 25386 x0 := v.Args[0] 25387 if x0.Op != OpAMD64MOVBloadidx1 { 25388 break 25389 } 25390 i0 := x0.AuxInt 25391 s := x0.Aux 25392 _ = x0.Args[2] 25393 p := x0.Args[0] 25394 idx := x0.Args[1] 25395 mem := x0.Args[2] 25396 sh := v.Args[1] 25397 if sh.Op != OpAMD64SHLQconst { 25398 break 25399 } 25400 if sh.AuxInt != 8 { 25401 break 25402 } 25403 x1 := sh.Args[0] 25404 if x1.Op != OpAMD64MOVBloadidx1 { 25405 break 25406 } 25407 i1 := x1.AuxInt 25408 if x1.Aux != s { 25409 break 25410 } 25411 _ = x1.Args[2] 25412 if idx != x1.Args[0] { 25413 break 25414 } 25415 if p != x1.Args[1] { 25416 break 25417 } 25418 if mem != x1.Args[2] { 25419 break 25420 } 25421 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25422 break 25423 } 25424 b = mergePoint(b, x0, x1) 25425 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25426 v.reset(OpCopy) 25427 v.AddArg(v0) 25428 v0.AuxInt = i0 25429 v0.Aux = s 25430 v0.AddArg(p) 25431 v0.AddArg(idx) 25432 v0.AddArg(mem) 25433 return true 25434 } 25435 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25436 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25437 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25438 for { 25439 _ = v.Args[1] 25440 x0 := v.Args[0] 25441 if x0.Op != OpAMD64MOVBloadidx1 { 25442 break 25443 } 25444 i0 := x0.AuxInt 25445 s := x0.Aux 25446 _ = x0.Args[2] 25447 idx := x0.Args[0] 25448 p := x0.Args[1] 25449 mem := x0.Args[2] 25450 sh := v.Args[1] 25451 if sh.Op != OpAMD64SHLQconst { 25452 break 25453 } 25454 if sh.AuxInt != 8 { 25455 break 25456 } 25457 x1 := sh.Args[0] 25458 if x1.Op != OpAMD64MOVBloadidx1 { 25459 break 25460 } 25461 i1 := x1.AuxInt 25462 if x1.Aux != s { 25463 break 25464 } 25465 _ = x1.Args[2] 25466 if idx != x1.Args[0] { 25467 break 25468 } 25469 if p != x1.Args[1] { 25470 break 25471 } 25472 if mem != x1.Args[2] { 25473 break 25474 } 25475 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25476 break 25477 } 25478 b = mergePoint(b, x0, x1) 25479 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25480 v.reset(OpCopy) 25481 v.AddArg(v0) 25482 v0.AuxInt = i0 25483 v0.Aux = s 25484 v0.AddArg(p) 25485 v0.AddArg(idx) 25486 v0.AddArg(mem) 25487 return true 25488 } 25489 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 25490 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25491 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25492 for { 25493 _ = v.Args[1] 25494 sh := v.Args[0] 25495 if sh.Op != OpAMD64SHLQconst { 25496 break 25497 } 25498 if sh.AuxInt != 8 { 25499 break 25500 } 25501 x1 := sh.Args[0] 25502 if x1.Op != OpAMD64MOVBloadidx1 { 25503 break 25504 } 25505 i1 := x1.AuxInt 25506 s := x1.Aux 25507 _ = x1.Args[2] 25508 p := x1.Args[0] 25509 idx := x1.Args[1] 25510 mem := x1.Args[2] 25511 x0 := v.Args[1] 25512 if x0.Op != OpAMD64MOVBloadidx1 { 25513 break 25514 } 25515 i0 := x0.AuxInt 25516 if x0.Aux != s { 25517 break 25518 } 25519 _ = x0.Args[2] 25520 if p != x0.Args[0] { 25521 break 25522 } 25523 if idx != x0.Args[1] { 25524 break 25525 } 25526 if mem != x0.Args[2] { 25527 break 25528 } 25529 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25530 break 25531 } 25532 b = mergePoint(b, x0, x1) 25533 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25534 v.reset(OpCopy) 25535 v.AddArg(v0) 25536 v0.AuxInt = i0 25537 v0.Aux = s 25538 v0.AddArg(p) 25539 v0.AddArg(idx) 25540 v0.AddArg(mem) 25541 return true 25542 } 25543 return false 25544 } 25545 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 25546 b := v.Block 25547 _ = b 25548 typ := &b.Func.Config.Types 25549 _ = typ 25550 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 25551 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25552 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25553 for { 25554 _ = v.Args[1] 25555 sh := v.Args[0] 25556 if sh.Op != OpAMD64SHLQconst { 25557 break 25558 } 25559 if sh.AuxInt != 8 { 25560 break 25561 } 25562 x1 := sh.Args[0] 25563 if x1.Op != OpAMD64MOVBloadidx1 { 25564 break 25565 } 25566 i1 := x1.AuxInt 25567 s := x1.Aux 25568 _ = x1.Args[2] 25569 idx := x1.Args[0] 25570 p := x1.Args[1] 25571 mem := x1.Args[2] 25572 x0 := v.Args[1] 25573 if x0.Op != OpAMD64MOVBloadidx1 { 25574 break 25575 } 25576 i0 := x0.AuxInt 25577 if x0.Aux != s { 25578 break 25579 } 25580 _ = x0.Args[2] 25581 if p != x0.Args[0] { 25582 break 25583 } 25584 if idx != x0.Args[1] { 25585 break 25586 } 25587 if mem != x0.Args[2] { 25588 break 25589 } 25590 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25591 break 25592 } 25593 b = mergePoint(b, x0, x1) 25594 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25595 v.reset(OpCopy) 25596 v.AddArg(v0) 25597 v0.AuxInt = i0 25598 v0.Aux = s 25599 v0.AddArg(p) 25600 v0.AddArg(idx) 25601 v0.AddArg(mem) 25602 return true 25603 } 25604 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 25605 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25606 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25607 for { 25608 _ = v.Args[1] 25609 sh := v.Args[0] 25610 if sh.Op != OpAMD64SHLQconst { 25611 break 25612 } 25613 if sh.AuxInt != 8 { 25614 break 25615 } 25616 x1 := sh.Args[0] 25617 if x1.Op != OpAMD64MOVBloadidx1 { 25618 break 25619 } 25620 i1 := x1.AuxInt 25621 s := x1.Aux 25622 _ = x1.Args[2] 25623 p := x1.Args[0] 25624 idx := x1.Args[1] 25625 mem := x1.Args[2] 25626 x0 := v.Args[1] 25627 if x0.Op != OpAMD64MOVBloadidx1 { 25628 break 25629 } 25630 i0 := x0.AuxInt 25631 if x0.Aux != s { 25632 break 25633 } 25634 _ = x0.Args[2] 25635 if idx != x0.Args[0] { 25636 break 25637 } 25638 if p != x0.Args[1] { 25639 break 25640 } 25641 if mem != x0.Args[2] { 25642 break 25643 } 25644 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25645 break 25646 } 25647 b = mergePoint(b, x0, x1) 25648 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25649 v.reset(OpCopy) 25650 v.AddArg(v0) 25651 v0.AuxInt = i0 25652 v0.Aux = s 25653 v0.AddArg(p) 25654 v0.AddArg(idx) 25655 v0.AddArg(mem) 25656 return true 25657 } 25658 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 25659 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25660 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25661 for { 25662 _ = v.Args[1] 25663 sh := v.Args[0] 25664 if sh.Op != OpAMD64SHLQconst { 25665 break 25666 } 25667 if sh.AuxInt != 8 { 25668 break 25669 } 25670 x1 := sh.Args[0] 25671 if x1.Op != OpAMD64MOVBloadidx1 { 25672 break 25673 } 25674 i1 := x1.AuxInt 25675 s := x1.Aux 25676 _ = x1.Args[2] 25677 idx := x1.Args[0] 25678 p := x1.Args[1] 25679 mem := x1.Args[2] 25680 x0 := v.Args[1] 25681 if x0.Op != OpAMD64MOVBloadidx1 { 25682 break 25683 } 25684 i0 := x0.AuxInt 25685 if x0.Aux != s { 25686 break 25687 } 25688 _ = x0.Args[2] 25689 if idx != x0.Args[0] { 25690 break 25691 } 25692 if p != x0.Args[1] { 25693 break 25694 } 25695 if mem != x0.Args[2] { 25696 break 25697 } 25698 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25699 break 25700 } 25701 b = mergePoint(b, x0, x1) 25702 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25703 v.reset(OpCopy) 25704 v.AddArg(v0) 25705 v0.AuxInt = i0 25706 v0.Aux = s 25707 v0.AddArg(p) 25708 v0.AddArg(idx) 25709 v0.AddArg(mem) 25710 return true 25711 } 25712 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25713 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25714 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25715 for { 25716 _ = v.Args[1] 25717 x0 := v.Args[0] 25718 if x0.Op != OpAMD64MOVWloadidx1 { 25719 break 25720 } 25721 i0 := x0.AuxInt 25722 s := x0.Aux 25723 _ = x0.Args[2] 25724 p := x0.Args[0] 25725 idx := x0.Args[1] 25726 mem := x0.Args[2] 25727 sh := v.Args[1] 25728 if sh.Op != OpAMD64SHLQconst { 25729 break 25730 } 25731 if sh.AuxInt != 16 { 25732 break 25733 } 25734 x1 := sh.Args[0] 25735 if x1.Op != OpAMD64MOVWloadidx1 { 25736 break 25737 } 25738 i1 := x1.AuxInt 25739 if x1.Aux != s { 25740 break 25741 } 25742 _ = x1.Args[2] 25743 if p != x1.Args[0] { 25744 break 25745 } 25746 if idx != x1.Args[1] { 25747 break 25748 } 25749 if mem != x1.Args[2] { 25750 break 25751 } 25752 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25753 break 25754 } 25755 b = mergePoint(b, x0, x1) 25756 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 25757 v.reset(OpCopy) 25758 v.AddArg(v0) 25759 v0.AuxInt = i0 25760 v0.Aux = s 25761 v0.AddArg(p) 25762 v0.AddArg(idx) 25763 v0.AddArg(mem) 25764 return true 25765 } 25766 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25767 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25768 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25769 for { 25770 _ = v.Args[1] 25771 x0 := v.Args[0] 25772 if x0.Op != OpAMD64MOVWloadidx1 { 25773 break 25774 } 25775 i0 := x0.AuxInt 25776 s := x0.Aux 25777 _ = x0.Args[2] 25778 idx := x0.Args[0] 25779 p := x0.Args[1] 25780 mem := x0.Args[2] 25781 sh := v.Args[1] 25782 if sh.Op != OpAMD64SHLQconst { 25783 break 25784 } 25785 if sh.AuxInt != 16 { 25786 break 25787 } 25788 x1 := sh.Args[0] 25789 if x1.Op != OpAMD64MOVWloadidx1 { 25790 break 25791 } 25792 i1 := x1.AuxInt 25793 if x1.Aux != s { 25794 break 25795 } 25796 _ = x1.Args[2] 25797 if p != x1.Args[0] { 25798 break 25799 } 25800 if idx != x1.Args[1] { 25801 break 25802 } 25803 if mem != x1.Args[2] { 25804 break 25805 } 25806 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25807 break 25808 } 25809 b = mergePoint(b, x0, x1) 25810 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 25811 v.reset(OpCopy) 25812 v.AddArg(v0) 25813 v0.AuxInt = i0 25814 v0.Aux = s 25815 v0.AddArg(p) 25816 v0.AddArg(idx) 25817 v0.AddArg(mem) 25818 return true 25819 } 25820 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 25821 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25822 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25823 for { 25824 _ = v.Args[1] 25825 x0 := v.Args[0] 25826 if x0.Op != OpAMD64MOVWloadidx1 { 25827 break 25828 } 25829 i0 := x0.AuxInt 25830 s := x0.Aux 25831 _ = x0.Args[2] 25832 p := x0.Args[0] 25833 idx := x0.Args[1] 25834 mem := x0.Args[2] 25835 sh := v.Args[1] 25836 if sh.Op != OpAMD64SHLQconst { 25837 break 25838 } 25839 if sh.AuxInt != 16 { 25840 break 25841 } 25842 x1 := sh.Args[0] 25843 if x1.Op != OpAMD64MOVWloadidx1 { 25844 break 25845 } 25846 i1 := x1.AuxInt 25847 if x1.Aux != s { 25848 break 25849 } 25850 _ = x1.Args[2] 25851 if idx != x1.Args[0] { 25852 break 25853 } 25854 if p != x1.Args[1] { 25855 break 25856 } 25857 if mem != x1.Args[2] { 25858 break 25859 } 25860 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25861 break 25862 } 25863 b = mergePoint(b, x0, x1) 25864 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 25865 v.reset(OpCopy) 25866 v.AddArg(v0) 25867 v0.AuxInt = i0 25868 v0.Aux = s 25869 v0.AddArg(p) 25870 v0.AddArg(idx) 25871 v0.AddArg(mem) 25872 return true 25873 } 25874 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 25875 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25876 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25877 for { 25878 _ = v.Args[1] 25879 x0 := v.Args[0] 25880 if x0.Op != OpAMD64MOVWloadidx1 { 25881 break 25882 } 25883 i0 := x0.AuxInt 25884 s := x0.Aux 25885 _ = x0.Args[2] 25886 idx := x0.Args[0] 25887 p := x0.Args[1] 25888 mem := x0.Args[2] 25889 sh := v.Args[1] 25890 if sh.Op != OpAMD64SHLQconst { 25891 break 25892 } 25893 if sh.AuxInt != 16 { 25894 break 25895 } 25896 x1 := sh.Args[0] 25897 if x1.Op != OpAMD64MOVWloadidx1 { 25898 break 25899 } 25900 i1 := x1.AuxInt 25901 if x1.Aux != s { 25902 break 25903 } 25904 _ = x1.Args[2] 25905 if idx != x1.Args[0] { 25906 break 25907 } 25908 if p != x1.Args[1] { 25909 break 25910 } 25911 if mem != x1.Args[2] { 25912 break 25913 } 25914 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25915 break 25916 } 25917 b = mergePoint(b, x0, x1) 25918 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 25919 v.reset(OpCopy) 25920 v.AddArg(v0) 25921 v0.AuxInt = i0 25922 v0.Aux = s 25923 v0.AddArg(p) 25924 v0.AddArg(idx) 25925 v0.AddArg(mem) 25926 return true 25927 } 25928 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 25929 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25930 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25931 for { 25932 _ = v.Args[1] 25933 sh := v.Args[0] 25934 if sh.Op != OpAMD64SHLQconst { 25935 break 25936 } 25937 if sh.AuxInt != 16 { 25938 break 25939 } 25940 x1 := sh.Args[0] 25941 if x1.Op != OpAMD64MOVWloadidx1 { 25942 break 25943 } 25944 i1 := x1.AuxInt 25945 s := x1.Aux 25946 _ = x1.Args[2] 25947 p := x1.Args[0] 25948 idx := x1.Args[1] 25949 mem := x1.Args[2] 25950 x0 := v.Args[1] 25951 if x0.Op != OpAMD64MOVWloadidx1 { 25952 break 25953 } 25954 i0 := x0.AuxInt 25955 if x0.Aux != s { 25956 break 25957 } 25958 _ = x0.Args[2] 25959 if p != x0.Args[0] { 25960 break 25961 } 25962 if idx != x0.Args[1] { 25963 break 25964 } 25965 if mem != x0.Args[2] { 25966 break 25967 } 25968 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25969 break 25970 } 25971 b = mergePoint(b, x0, x1) 25972 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 25973 v.reset(OpCopy) 25974 v.AddArg(v0) 25975 v0.AuxInt = i0 25976 v0.Aux = s 25977 v0.AddArg(p) 25978 v0.AddArg(idx) 25979 v0.AddArg(mem) 25980 return true 25981 } 25982 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 25983 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25984 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25985 for { 25986 _ = v.Args[1] 25987 sh := v.Args[0] 25988 if sh.Op != OpAMD64SHLQconst { 25989 break 25990 } 25991 if sh.AuxInt != 16 { 25992 break 25993 } 25994 x1 := sh.Args[0] 25995 if x1.Op != OpAMD64MOVWloadidx1 { 25996 break 25997 } 25998 i1 := x1.AuxInt 25999 s := x1.Aux 26000 _ = x1.Args[2] 26001 idx := x1.Args[0] 26002 p := x1.Args[1] 26003 mem := x1.Args[2] 26004 x0 := v.Args[1] 26005 if x0.Op != OpAMD64MOVWloadidx1 { 26006 break 26007 } 26008 i0 := x0.AuxInt 26009 if x0.Aux != s { 26010 break 26011 } 26012 _ = x0.Args[2] 26013 if p != x0.Args[0] { 26014 break 26015 } 26016 if idx != x0.Args[1] { 26017 break 26018 } 26019 if mem != x0.Args[2] { 26020 break 26021 } 26022 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26023 break 26024 } 26025 b = mergePoint(b, x0, x1) 26026 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26027 v.reset(OpCopy) 26028 v.AddArg(v0) 26029 v0.AuxInt = i0 26030 v0.Aux = s 26031 v0.AddArg(p) 26032 v0.AddArg(idx) 26033 v0.AddArg(mem) 26034 return true 26035 } 26036 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 26037 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26038 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26039 for { 26040 _ = v.Args[1] 26041 sh := v.Args[0] 26042 if sh.Op != OpAMD64SHLQconst { 26043 break 26044 } 26045 if sh.AuxInt != 16 { 26046 break 26047 } 26048 x1 := sh.Args[0] 26049 if x1.Op != OpAMD64MOVWloadidx1 { 26050 break 26051 } 26052 i1 := x1.AuxInt 26053 s := x1.Aux 26054 _ = x1.Args[2] 26055 p := x1.Args[0] 26056 idx := x1.Args[1] 26057 mem := x1.Args[2] 26058 x0 := v.Args[1] 26059 if x0.Op != OpAMD64MOVWloadidx1 { 26060 break 26061 } 26062 i0 := x0.AuxInt 26063 if x0.Aux != s { 26064 break 26065 } 26066 _ = x0.Args[2] 26067 if idx != x0.Args[0] { 26068 break 26069 } 26070 if p != x0.Args[1] { 26071 break 26072 } 26073 if mem != x0.Args[2] { 26074 break 26075 } 26076 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26077 break 26078 } 26079 b = mergePoint(b, x0, x1) 26080 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26081 v.reset(OpCopy) 26082 v.AddArg(v0) 26083 v0.AuxInt = i0 26084 v0.Aux = s 26085 v0.AddArg(p) 26086 v0.AddArg(idx) 26087 v0.AddArg(mem) 26088 return true 26089 } 26090 return false 26091 } 26092 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 26093 b := v.Block 26094 _ = b 26095 typ := &b.Func.Config.Types 26096 _ = typ 26097 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 26098 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26099 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26100 for { 26101 _ = v.Args[1] 26102 sh := v.Args[0] 26103 if sh.Op != OpAMD64SHLQconst { 26104 break 26105 } 26106 if sh.AuxInt != 16 { 26107 break 26108 } 26109 x1 := sh.Args[0] 26110 if x1.Op != OpAMD64MOVWloadidx1 { 26111 break 26112 } 26113 i1 := x1.AuxInt 26114 s := x1.Aux 26115 _ = x1.Args[2] 26116 idx := x1.Args[0] 26117 p := x1.Args[1] 26118 mem := x1.Args[2] 26119 x0 := v.Args[1] 26120 if x0.Op != OpAMD64MOVWloadidx1 { 26121 break 26122 } 26123 i0 := x0.AuxInt 26124 if x0.Aux != s { 26125 break 26126 } 26127 _ = x0.Args[2] 26128 if idx != x0.Args[0] { 26129 break 26130 } 26131 if p != x0.Args[1] { 26132 break 26133 } 26134 if mem != x0.Args[2] { 26135 break 26136 } 26137 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26138 break 26139 } 26140 b = mergePoint(b, x0, x1) 26141 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26142 v.reset(OpCopy) 26143 v.AddArg(v0) 26144 v0.AuxInt = i0 26145 v0.Aux = s 26146 v0.AddArg(p) 26147 v0.AddArg(idx) 26148 v0.AddArg(mem) 26149 return true 26150 } 26151 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 26152 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26153 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26154 for { 26155 _ = v.Args[1] 26156 x0 := v.Args[0] 26157 if x0.Op != OpAMD64MOVLloadidx1 { 26158 break 26159 } 26160 i0 := x0.AuxInt 26161 s := x0.Aux 26162 _ = x0.Args[2] 26163 p := x0.Args[0] 26164 idx := x0.Args[1] 26165 mem := x0.Args[2] 26166 sh := v.Args[1] 26167 if sh.Op != OpAMD64SHLQconst { 26168 break 26169 } 26170 if sh.AuxInt != 32 { 26171 break 26172 } 26173 x1 := sh.Args[0] 26174 if x1.Op != OpAMD64MOVLloadidx1 { 26175 break 26176 } 26177 i1 := x1.AuxInt 26178 if x1.Aux != s { 26179 break 26180 } 26181 _ = x1.Args[2] 26182 if p != x1.Args[0] { 26183 break 26184 } 26185 if idx != x1.Args[1] { 26186 break 26187 } 26188 if mem != x1.Args[2] { 26189 break 26190 } 26191 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26192 break 26193 } 26194 b = mergePoint(b, x0, x1) 26195 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26196 v.reset(OpCopy) 26197 v.AddArg(v0) 26198 v0.AuxInt = i0 26199 v0.Aux = s 26200 v0.AddArg(p) 26201 v0.AddArg(idx) 26202 v0.AddArg(mem) 26203 return true 26204 } 26205 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 26206 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26207 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26208 for { 26209 _ = v.Args[1] 26210 x0 := v.Args[0] 26211 if x0.Op != OpAMD64MOVLloadidx1 { 26212 break 26213 } 26214 i0 := x0.AuxInt 26215 s := x0.Aux 26216 _ = x0.Args[2] 26217 idx := x0.Args[0] 26218 p := x0.Args[1] 26219 mem := x0.Args[2] 26220 sh := v.Args[1] 26221 if sh.Op != OpAMD64SHLQconst { 26222 break 26223 } 26224 if sh.AuxInt != 32 { 26225 break 26226 } 26227 x1 := sh.Args[0] 26228 if x1.Op != OpAMD64MOVLloadidx1 { 26229 break 26230 } 26231 i1 := x1.AuxInt 26232 if x1.Aux != s { 26233 break 26234 } 26235 _ = x1.Args[2] 26236 if p != x1.Args[0] { 26237 break 26238 } 26239 if idx != x1.Args[1] { 26240 break 26241 } 26242 if mem != x1.Args[2] { 26243 break 26244 } 26245 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26246 break 26247 } 26248 b = mergePoint(b, x0, x1) 26249 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26250 v.reset(OpCopy) 26251 v.AddArg(v0) 26252 v0.AuxInt = i0 26253 v0.Aux = s 26254 v0.AddArg(p) 26255 v0.AddArg(idx) 26256 v0.AddArg(mem) 26257 return true 26258 } 26259 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 26260 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26261 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26262 for { 26263 _ = v.Args[1] 26264 x0 := v.Args[0] 26265 if x0.Op != OpAMD64MOVLloadidx1 { 26266 break 26267 } 26268 i0 := x0.AuxInt 26269 s := x0.Aux 26270 _ = x0.Args[2] 26271 p := x0.Args[0] 26272 idx := x0.Args[1] 26273 mem := x0.Args[2] 26274 sh := v.Args[1] 26275 if sh.Op != OpAMD64SHLQconst { 26276 break 26277 } 26278 if sh.AuxInt != 32 { 26279 break 26280 } 26281 x1 := sh.Args[0] 26282 if x1.Op != OpAMD64MOVLloadidx1 { 26283 break 26284 } 26285 i1 := x1.AuxInt 26286 if x1.Aux != s { 26287 break 26288 } 26289 _ = x1.Args[2] 26290 if idx != x1.Args[0] { 26291 break 26292 } 26293 if p != x1.Args[1] { 26294 break 26295 } 26296 if mem != x1.Args[2] { 26297 break 26298 } 26299 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26300 break 26301 } 26302 b = mergePoint(b, x0, x1) 26303 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26304 v.reset(OpCopy) 26305 v.AddArg(v0) 26306 v0.AuxInt = i0 26307 v0.Aux = s 26308 v0.AddArg(p) 26309 v0.AddArg(idx) 26310 v0.AddArg(mem) 26311 return true 26312 } 26313 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 26314 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26315 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26316 for { 26317 _ = v.Args[1] 26318 x0 := v.Args[0] 26319 if x0.Op != OpAMD64MOVLloadidx1 { 26320 break 26321 } 26322 i0 := x0.AuxInt 26323 s := x0.Aux 26324 _ = x0.Args[2] 26325 idx := x0.Args[0] 26326 p := x0.Args[1] 26327 mem := x0.Args[2] 26328 sh := v.Args[1] 26329 if sh.Op != OpAMD64SHLQconst { 26330 break 26331 } 26332 if sh.AuxInt != 32 { 26333 break 26334 } 26335 x1 := sh.Args[0] 26336 if x1.Op != OpAMD64MOVLloadidx1 { 26337 break 26338 } 26339 i1 := x1.AuxInt 26340 if x1.Aux != s { 26341 break 26342 } 26343 _ = x1.Args[2] 26344 if idx != x1.Args[0] { 26345 break 26346 } 26347 if p != x1.Args[1] { 26348 break 26349 } 26350 if mem != x1.Args[2] { 26351 break 26352 } 26353 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26354 break 26355 } 26356 b = mergePoint(b, x0, x1) 26357 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26358 v.reset(OpCopy) 26359 v.AddArg(v0) 26360 v0.AuxInt = i0 26361 v0.Aux = s 26362 v0.AddArg(p) 26363 v0.AddArg(idx) 26364 v0.AddArg(mem) 26365 return true 26366 } 26367 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 26368 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26369 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26370 for { 26371 _ = v.Args[1] 26372 sh := v.Args[0] 26373 if sh.Op != OpAMD64SHLQconst { 26374 break 26375 } 26376 if sh.AuxInt != 32 { 26377 break 26378 } 26379 x1 := sh.Args[0] 26380 if x1.Op != OpAMD64MOVLloadidx1 { 26381 break 26382 } 26383 i1 := x1.AuxInt 26384 s := x1.Aux 26385 _ = x1.Args[2] 26386 p := x1.Args[0] 26387 idx := x1.Args[1] 26388 mem := x1.Args[2] 26389 x0 := v.Args[1] 26390 if x0.Op != OpAMD64MOVLloadidx1 { 26391 break 26392 } 26393 i0 := x0.AuxInt 26394 if x0.Aux != s { 26395 break 26396 } 26397 _ = x0.Args[2] 26398 if p != x0.Args[0] { 26399 break 26400 } 26401 if idx != x0.Args[1] { 26402 break 26403 } 26404 if mem != x0.Args[2] { 26405 break 26406 } 26407 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26408 break 26409 } 26410 b = mergePoint(b, x0, x1) 26411 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26412 v.reset(OpCopy) 26413 v.AddArg(v0) 26414 v0.AuxInt = i0 26415 v0.Aux = s 26416 v0.AddArg(p) 26417 v0.AddArg(idx) 26418 v0.AddArg(mem) 26419 return true 26420 } 26421 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 26422 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26423 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26424 for { 26425 _ = v.Args[1] 26426 sh := v.Args[0] 26427 if sh.Op != OpAMD64SHLQconst { 26428 break 26429 } 26430 if sh.AuxInt != 32 { 26431 break 26432 } 26433 x1 := sh.Args[0] 26434 if x1.Op != OpAMD64MOVLloadidx1 { 26435 break 26436 } 26437 i1 := x1.AuxInt 26438 s := x1.Aux 26439 _ = x1.Args[2] 26440 idx := x1.Args[0] 26441 p := x1.Args[1] 26442 mem := x1.Args[2] 26443 x0 := v.Args[1] 26444 if x0.Op != OpAMD64MOVLloadidx1 { 26445 break 26446 } 26447 i0 := x0.AuxInt 26448 if x0.Aux != s { 26449 break 26450 } 26451 _ = x0.Args[2] 26452 if p != x0.Args[0] { 26453 break 26454 } 26455 if idx != x0.Args[1] { 26456 break 26457 } 26458 if mem != x0.Args[2] { 26459 break 26460 } 26461 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26462 break 26463 } 26464 b = mergePoint(b, x0, x1) 26465 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26466 v.reset(OpCopy) 26467 v.AddArg(v0) 26468 v0.AuxInt = i0 26469 v0.Aux = s 26470 v0.AddArg(p) 26471 v0.AddArg(idx) 26472 v0.AddArg(mem) 26473 return true 26474 } 26475 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 26476 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26477 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26478 for { 26479 _ = v.Args[1] 26480 sh := v.Args[0] 26481 if sh.Op != OpAMD64SHLQconst { 26482 break 26483 } 26484 if sh.AuxInt != 32 { 26485 break 26486 } 26487 x1 := sh.Args[0] 26488 if x1.Op != OpAMD64MOVLloadidx1 { 26489 break 26490 } 26491 i1 := x1.AuxInt 26492 s := x1.Aux 26493 _ = x1.Args[2] 26494 p := x1.Args[0] 26495 idx := x1.Args[1] 26496 mem := x1.Args[2] 26497 x0 := v.Args[1] 26498 if x0.Op != OpAMD64MOVLloadidx1 { 26499 break 26500 } 26501 i0 := x0.AuxInt 26502 if x0.Aux != s { 26503 break 26504 } 26505 _ = x0.Args[2] 26506 if idx != x0.Args[0] { 26507 break 26508 } 26509 if p != x0.Args[1] { 26510 break 26511 } 26512 if mem != x0.Args[2] { 26513 break 26514 } 26515 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26516 break 26517 } 26518 b = mergePoint(b, x0, x1) 26519 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26520 v.reset(OpCopy) 26521 v.AddArg(v0) 26522 v0.AuxInt = i0 26523 v0.Aux = s 26524 v0.AddArg(p) 26525 v0.AddArg(idx) 26526 v0.AddArg(mem) 26527 return true 26528 } 26529 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 26530 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26531 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26532 for { 26533 _ = v.Args[1] 26534 sh := v.Args[0] 26535 if sh.Op != OpAMD64SHLQconst { 26536 break 26537 } 26538 if sh.AuxInt != 32 { 26539 break 26540 } 26541 x1 := sh.Args[0] 26542 if x1.Op != OpAMD64MOVLloadidx1 { 26543 break 26544 } 26545 i1 := x1.AuxInt 26546 s := x1.Aux 26547 _ = x1.Args[2] 26548 idx := x1.Args[0] 26549 p := x1.Args[1] 26550 mem := x1.Args[2] 26551 x0 := v.Args[1] 26552 if x0.Op != OpAMD64MOVLloadidx1 { 26553 break 26554 } 26555 i0 := x0.AuxInt 26556 if x0.Aux != s { 26557 break 26558 } 26559 _ = x0.Args[2] 26560 if idx != x0.Args[0] { 26561 break 26562 } 26563 if p != x0.Args[1] { 26564 break 26565 } 26566 if mem != x0.Args[2] { 26567 break 26568 } 26569 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26570 break 26571 } 26572 b = mergePoint(b, x0, x1) 26573 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26574 v.reset(OpCopy) 26575 v.AddArg(v0) 26576 v0.AuxInt = i0 26577 v0.Aux = s 26578 v0.AddArg(p) 26579 v0.AddArg(idx) 26580 v0.AddArg(mem) 26581 return true 26582 } 26583 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 26584 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26585 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26586 for { 26587 _ = v.Args[1] 26588 s1 := v.Args[0] 26589 if s1.Op != OpAMD64SHLQconst { 26590 break 26591 } 26592 j1 := s1.AuxInt 26593 x1 := s1.Args[0] 26594 if x1.Op != OpAMD64MOVBloadidx1 { 26595 break 26596 } 26597 i1 := x1.AuxInt 26598 s := x1.Aux 26599 _ = x1.Args[2] 26600 p := x1.Args[0] 26601 idx := x1.Args[1] 26602 mem := x1.Args[2] 26603 or := v.Args[1] 26604 if or.Op != OpAMD64ORQ { 26605 break 26606 } 26607 _ = or.Args[1] 26608 s0 := or.Args[0] 26609 if s0.Op != OpAMD64SHLQconst { 26610 break 26611 } 26612 j0 := s0.AuxInt 26613 x0 := s0.Args[0] 26614 if x0.Op != OpAMD64MOVBloadidx1 { 26615 break 26616 } 26617 i0 := x0.AuxInt 26618 if x0.Aux != s { 26619 break 26620 } 26621 _ = x0.Args[2] 26622 if p != x0.Args[0] { 26623 break 26624 } 26625 if idx != x0.Args[1] { 26626 break 26627 } 26628 if mem != x0.Args[2] { 26629 break 26630 } 26631 y := or.Args[1] 26632 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26633 break 26634 } 26635 b = mergePoint(b, x0, x1) 26636 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26637 v.reset(OpCopy) 26638 v.AddArg(v0) 26639 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26640 v1.AuxInt = j0 26641 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26642 v2.AuxInt = i0 26643 v2.Aux = s 26644 v2.AddArg(p) 26645 v2.AddArg(idx) 26646 v2.AddArg(mem) 26647 v1.AddArg(v2) 26648 v0.AddArg(v1) 26649 v0.AddArg(y) 26650 return true 26651 } 26652 return false 26653 } 26654 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 26655 b := v.Block 26656 _ = b 26657 typ := &b.Func.Config.Types 26658 _ = typ 26659 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 26660 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26661 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26662 for { 26663 _ = v.Args[1] 26664 s1 := v.Args[0] 26665 if s1.Op != OpAMD64SHLQconst { 26666 break 26667 } 26668 j1 := s1.AuxInt 26669 x1 := s1.Args[0] 26670 if x1.Op != OpAMD64MOVBloadidx1 { 26671 break 26672 } 26673 i1 := x1.AuxInt 26674 s := x1.Aux 26675 _ = x1.Args[2] 26676 idx := x1.Args[0] 26677 p := x1.Args[1] 26678 mem := x1.Args[2] 26679 or := v.Args[1] 26680 if or.Op != OpAMD64ORQ { 26681 break 26682 } 26683 _ = or.Args[1] 26684 s0 := or.Args[0] 26685 if s0.Op != OpAMD64SHLQconst { 26686 break 26687 } 26688 j0 := s0.AuxInt 26689 x0 := s0.Args[0] 26690 if x0.Op != OpAMD64MOVBloadidx1 { 26691 break 26692 } 26693 i0 := x0.AuxInt 26694 if x0.Aux != s { 26695 break 26696 } 26697 _ = x0.Args[2] 26698 if p != x0.Args[0] { 26699 break 26700 } 26701 if idx != x0.Args[1] { 26702 break 26703 } 26704 if mem != x0.Args[2] { 26705 break 26706 } 26707 y := or.Args[1] 26708 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26709 break 26710 } 26711 b = mergePoint(b, x0, x1) 26712 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26713 v.reset(OpCopy) 26714 v.AddArg(v0) 26715 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26716 v1.AuxInt = j0 26717 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26718 v2.AuxInt = i0 26719 v2.Aux = s 26720 v2.AddArg(p) 26721 v2.AddArg(idx) 26722 v2.AddArg(mem) 26723 v1.AddArg(v2) 26724 v0.AddArg(v1) 26725 v0.AddArg(y) 26726 return true 26727 } 26728 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 26729 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26730 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26731 for { 26732 _ = v.Args[1] 26733 s1 := v.Args[0] 26734 if s1.Op != OpAMD64SHLQconst { 26735 break 26736 } 26737 j1 := s1.AuxInt 26738 x1 := s1.Args[0] 26739 if x1.Op != OpAMD64MOVBloadidx1 { 26740 break 26741 } 26742 i1 := x1.AuxInt 26743 s := x1.Aux 26744 _ = x1.Args[2] 26745 p := x1.Args[0] 26746 idx := x1.Args[1] 26747 mem := x1.Args[2] 26748 or := v.Args[1] 26749 if or.Op != OpAMD64ORQ { 26750 break 26751 } 26752 _ = or.Args[1] 26753 s0 := or.Args[0] 26754 if s0.Op != OpAMD64SHLQconst { 26755 break 26756 } 26757 j0 := s0.AuxInt 26758 x0 := s0.Args[0] 26759 if x0.Op != OpAMD64MOVBloadidx1 { 26760 break 26761 } 26762 i0 := x0.AuxInt 26763 if x0.Aux != s { 26764 break 26765 } 26766 _ = x0.Args[2] 26767 if idx != x0.Args[0] { 26768 break 26769 } 26770 if p != x0.Args[1] { 26771 break 26772 } 26773 if mem != x0.Args[2] { 26774 break 26775 } 26776 y := or.Args[1] 26777 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26778 break 26779 } 26780 b = mergePoint(b, x0, x1) 26781 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26782 v.reset(OpCopy) 26783 v.AddArg(v0) 26784 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26785 v1.AuxInt = j0 26786 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26787 v2.AuxInt = i0 26788 v2.Aux = s 26789 v2.AddArg(p) 26790 v2.AddArg(idx) 26791 v2.AddArg(mem) 26792 v1.AddArg(v2) 26793 v0.AddArg(v1) 26794 v0.AddArg(y) 26795 return true 26796 } 26797 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 26798 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26799 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26800 for { 26801 _ = v.Args[1] 26802 s1 := v.Args[0] 26803 if s1.Op != OpAMD64SHLQconst { 26804 break 26805 } 26806 j1 := s1.AuxInt 26807 x1 := s1.Args[0] 26808 if x1.Op != OpAMD64MOVBloadidx1 { 26809 break 26810 } 26811 i1 := x1.AuxInt 26812 s := x1.Aux 26813 _ = x1.Args[2] 26814 idx := x1.Args[0] 26815 p := x1.Args[1] 26816 mem := x1.Args[2] 26817 or := v.Args[1] 26818 if or.Op != OpAMD64ORQ { 26819 break 26820 } 26821 _ = or.Args[1] 26822 s0 := or.Args[0] 26823 if s0.Op != OpAMD64SHLQconst { 26824 break 26825 } 26826 j0 := s0.AuxInt 26827 x0 := s0.Args[0] 26828 if x0.Op != OpAMD64MOVBloadidx1 { 26829 break 26830 } 26831 i0 := x0.AuxInt 26832 if x0.Aux != s { 26833 break 26834 } 26835 _ = x0.Args[2] 26836 if idx != x0.Args[0] { 26837 break 26838 } 26839 if p != x0.Args[1] { 26840 break 26841 } 26842 if mem != x0.Args[2] { 26843 break 26844 } 26845 y := or.Args[1] 26846 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26847 break 26848 } 26849 b = mergePoint(b, x0, x1) 26850 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26851 v.reset(OpCopy) 26852 v.AddArg(v0) 26853 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26854 v1.AuxInt = j0 26855 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26856 v2.AuxInt = i0 26857 v2.Aux = s 26858 v2.AddArg(p) 26859 v2.AddArg(idx) 26860 v2.AddArg(mem) 26861 v1.AddArg(v2) 26862 v0.AddArg(v1) 26863 v0.AddArg(y) 26864 return true 26865 } 26866 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 26867 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26868 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26869 for { 26870 _ = v.Args[1] 26871 s1 := v.Args[0] 26872 if s1.Op != OpAMD64SHLQconst { 26873 break 26874 } 26875 j1 := s1.AuxInt 26876 x1 := s1.Args[0] 26877 if x1.Op != OpAMD64MOVBloadidx1 { 26878 break 26879 } 26880 i1 := x1.AuxInt 26881 s := x1.Aux 26882 _ = x1.Args[2] 26883 p := x1.Args[0] 26884 idx := x1.Args[1] 26885 mem := x1.Args[2] 26886 or := v.Args[1] 26887 if or.Op != OpAMD64ORQ { 26888 break 26889 } 26890 _ = or.Args[1] 26891 y := or.Args[0] 26892 s0 := or.Args[1] 26893 if s0.Op != OpAMD64SHLQconst { 26894 break 26895 } 26896 j0 := s0.AuxInt 26897 x0 := s0.Args[0] 26898 if x0.Op != OpAMD64MOVBloadidx1 { 26899 break 26900 } 26901 i0 := x0.AuxInt 26902 if x0.Aux != s { 26903 break 26904 } 26905 _ = x0.Args[2] 26906 if p != x0.Args[0] { 26907 break 26908 } 26909 if idx != x0.Args[1] { 26910 break 26911 } 26912 if mem != x0.Args[2] { 26913 break 26914 } 26915 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26916 break 26917 } 26918 b = mergePoint(b, x0, x1) 26919 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26920 v.reset(OpCopy) 26921 v.AddArg(v0) 26922 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26923 v1.AuxInt = j0 26924 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26925 v2.AuxInt = i0 26926 v2.Aux = s 26927 v2.AddArg(p) 26928 v2.AddArg(idx) 26929 v2.AddArg(mem) 26930 v1.AddArg(v2) 26931 v0.AddArg(v1) 26932 v0.AddArg(y) 26933 return true 26934 } 26935 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 26936 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26937 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26938 for { 26939 _ = v.Args[1] 26940 s1 := v.Args[0] 26941 if s1.Op != OpAMD64SHLQconst { 26942 break 26943 } 26944 j1 := s1.AuxInt 26945 x1 := s1.Args[0] 26946 if x1.Op != OpAMD64MOVBloadidx1 { 26947 break 26948 } 26949 i1 := x1.AuxInt 26950 s := x1.Aux 26951 _ = x1.Args[2] 26952 idx := x1.Args[0] 26953 p := x1.Args[1] 26954 mem := x1.Args[2] 26955 or := v.Args[1] 26956 if or.Op != OpAMD64ORQ { 26957 break 26958 } 26959 _ = or.Args[1] 26960 y := or.Args[0] 26961 s0 := or.Args[1] 26962 if s0.Op != OpAMD64SHLQconst { 26963 break 26964 } 26965 j0 := s0.AuxInt 26966 x0 := s0.Args[0] 26967 if x0.Op != OpAMD64MOVBloadidx1 { 26968 break 26969 } 26970 i0 := x0.AuxInt 26971 if x0.Aux != s { 26972 break 26973 } 26974 _ = x0.Args[2] 26975 if p != x0.Args[0] { 26976 break 26977 } 26978 if idx != x0.Args[1] { 26979 break 26980 } 26981 if mem != x0.Args[2] { 26982 break 26983 } 26984 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26985 break 26986 } 26987 b = mergePoint(b, x0, x1) 26988 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26989 v.reset(OpCopy) 26990 v.AddArg(v0) 26991 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26992 v1.AuxInt = j0 26993 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26994 v2.AuxInt = i0 26995 v2.Aux = s 26996 v2.AddArg(p) 26997 v2.AddArg(idx) 26998 v2.AddArg(mem) 26999 v1.AddArg(v2) 27000 v0.AddArg(v1) 27001 v0.AddArg(y) 27002 return true 27003 } 27004 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27005 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27006 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27007 for { 27008 _ = v.Args[1] 27009 s1 := v.Args[0] 27010 if s1.Op != OpAMD64SHLQconst { 27011 break 27012 } 27013 j1 := s1.AuxInt 27014 x1 := s1.Args[0] 27015 if x1.Op != OpAMD64MOVBloadidx1 { 27016 break 27017 } 27018 i1 := x1.AuxInt 27019 s := x1.Aux 27020 _ = x1.Args[2] 27021 p := x1.Args[0] 27022 idx := x1.Args[1] 27023 mem := x1.Args[2] 27024 or := v.Args[1] 27025 if or.Op != OpAMD64ORQ { 27026 break 27027 } 27028 _ = or.Args[1] 27029 y := or.Args[0] 27030 s0 := or.Args[1] 27031 if s0.Op != OpAMD64SHLQconst { 27032 break 27033 } 27034 j0 := s0.AuxInt 27035 x0 := s0.Args[0] 27036 if x0.Op != OpAMD64MOVBloadidx1 { 27037 break 27038 } 27039 i0 := x0.AuxInt 27040 if x0.Aux != s { 27041 break 27042 } 27043 _ = x0.Args[2] 27044 if idx != x0.Args[0] { 27045 break 27046 } 27047 if p != x0.Args[1] { 27048 break 27049 } 27050 if mem != x0.Args[2] { 27051 break 27052 } 27053 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27054 break 27055 } 27056 b = mergePoint(b, x0, x1) 27057 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27058 v.reset(OpCopy) 27059 v.AddArg(v0) 27060 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27061 v1.AuxInt = j0 27062 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27063 v2.AuxInt = i0 27064 v2.Aux = s 27065 v2.AddArg(p) 27066 v2.AddArg(idx) 27067 v2.AddArg(mem) 27068 v1.AddArg(v2) 27069 v0.AddArg(v1) 27070 v0.AddArg(y) 27071 return true 27072 } 27073 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27074 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27075 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27076 for { 27077 _ = v.Args[1] 27078 s1 := v.Args[0] 27079 if s1.Op != OpAMD64SHLQconst { 27080 break 27081 } 27082 j1 := s1.AuxInt 27083 x1 := s1.Args[0] 27084 if x1.Op != OpAMD64MOVBloadidx1 { 27085 break 27086 } 27087 i1 := x1.AuxInt 27088 s := x1.Aux 27089 _ = x1.Args[2] 27090 idx := x1.Args[0] 27091 p := x1.Args[1] 27092 mem := x1.Args[2] 27093 or := v.Args[1] 27094 if or.Op != OpAMD64ORQ { 27095 break 27096 } 27097 _ = or.Args[1] 27098 y := or.Args[0] 27099 s0 := or.Args[1] 27100 if s0.Op != OpAMD64SHLQconst { 27101 break 27102 } 27103 j0 := s0.AuxInt 27104 x0 := s0.Args[0] 27105 if x0.Op != OpAMD64MOVBloadidx1 { 27106 break 27107 } 27108 i0 := x0.AuxInt 27109 if x0.Aux != s { 27110 break 27111 } 27112 _ = x0.Args[2] 27113 if idx != x0.Args[0] { 27114 break 27115 } 27116 if p != x0.Args[1] { 27117 break 27118 } 27119 if mem != x0.Args[2] { 27120 break 27121 } 27122 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27123 break 27124 } 27125 b = mergePoint(b, x0, x1) 27126 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27127 v.reset(OpCopy) 27128 v.AddArg(v0) 27129 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27130 v1.AuxInt = j0 27131 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27132 v2.AuxInt = i0 27133 v2.Aux = s 27134 v2.AddArg(p) 27135 v2.AddArg(idx) 27136 v2.AddArg(mem) 27137 v1.AddArg(v2) 27138 v0.AddArg(v1) 27139 v0.AddArg(y) 27140 return true 27141 } 27142 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27143 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27144 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27145 for { 27146 _ = v.Args[1] 27147 or := v.Args[0] 27148 if or.Op != OpAMD64ORQ { 27149 break 27150 } 27151 _ = or.Args[1] 27152 s0 := or.Args[0] 27153 if s0.Op != OpAMD64SHLQconst { 27154 break 27155 } 27156 j0 := s0.AuxInt 27157 x0 := s0.Args[0] 27158 if x0.Op != OpAMD64MOVBloadidx1 { 27159 break 27160 } 27161 i0 := x0.AuxInt 27162 s := x0.Aux 27163 _ = x0.Args[2] 27164 p := x0.Args[0] 27165 idx := x0.Args[1] 27166 mem := x0.Args[2] 27167 y := or.Args[1] 27168 s1 := v.Args[1] 27169 if s1.Op != OpAMD64SHLQconst { 27170 break 27171 } 27172 j1 := s1.AuxInt 27173 x1 := s1.Args[0] 27174 if x1.Op != OpAMD64MOVBloadidx1 { 27175 break 27176 } 27177 i1 := x1.AuxInt 27178 if x1.Aux != s { 27179 break 27180 } 27181 _ = x1.Args[2] 27182 if p != x1.Args[0] { 27183 break 27184 } 27185 if idx != x1.Args[1] { 27186 break 27187 } 27188 if mem != x1.Args[2] { 27189 break 27190 } 27191 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27192 break 27193 } 27194 b = mergePoint(b, x0, x1) 27195 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27196 v.reset(OpCopy) 27197 v.AddArg(v0) 27198 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27199 v1.AuxInt = j0 27200 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27201 v2.AuxInt = i0 27202 v2.Aux = s 27203 v2.AddArg(p) 27204 v2.AddArg(idx) 27205 v2.AddArg(mem) 27206 v1.AddArg(v2) 27207 v0.AddArg(v1) 27208 v0.AddArg(y) 27209 return true 27210 } 27211 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27212 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27213 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27214 for { 27215 _ = v.Args[1] 27216 or := v.Args[0] 27217 if or.Op != OpAMD64ORQ { 27218 break 27219 } 27220 _ = or.Args[1] 27221 s0 := or.Args[0] 27222 if s0.Op != OpAMD64SHLQconst { 27223 break 27224 } 27225 j0 := s0.AuxInt 27226 x0 := s0.Args[0] 27227 if x0.Op != OpAMD64MOVBloadidx1 { 27228 break 27229 } 27230 i0 := x0.AuxInt 27231 s := x0.Aux 27232 _ = x0.Args[2] 27233 idx := x0.Args[0] 27234 p := x0.Args[1] 27235 mem := x0.Args[2] 27236 y := or.Args[1] 27237 s1 := v.Args[1] 27238 if s1.Op != OpAMD64SHLQconst { 27239 break 27240 } 27241 j1 := s1.AuxInt 27242 x1 := s1.Args[0] 27243 if x1.Op != OpAMD64MOVBloadidx1 { 27244 break 27245 } 27246 i1 := x1.AuxInt 27247 if x1.Aux != s { 27248 break 27249 } 27250 _ = x1.Args[2] 27251 if p != x1.Args[0] { 27252 break 27253 } 27254 if idx != x1.Args[1] { 27255 break 27256 } 27257 if mem != x1.Args[2] { 27258 break 27259 } 27260 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27261 break 27262 } 27263 b = mergePoint(b, x0, x1) 27264 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27265 v.reset(OpCopy) 27266 v.AddArg(v0) 27267 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27268 v1.AuxInt = j0 27269 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27270 v2.AuxInt = i0 27271 v2.Aux = s 27272 v2.AddArg(p) 27273 v2.AddArg(idx) 27274 v2.AddArg(mem) 27275 v1.AddArg(v2) 27276 v0.AddArg(v1) 27277 v0.AddArg(y) 27278 return true 27279 } 27280 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27281 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27282 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27283 for { 27284 _ = v.Args[1] 27285 or := v.Args[0] 27286 if or.Op != OpAMD64ORQ { 27287 break 27288 } 27289 _ = or.Args[1] 27290 y := or.Args[0] 27291 s0 := or.Args[1] 27292 if s0.Op != OpAMD64SHLQconst { 27293 break 27294 } 27295 j0 := s0.AuxInt 27296 x0 := s0.Args[0] 27297 if x0.Op != OpAMD64MOVBloadidx1 { 27298 break 27299 } 27300 i0 := x0.AuxInt 27301 s := x0.Aux 27302 _ = x0.Args[2] 27303 p := x0.Args[0] 27304 idx := x0.Args[1] 27305 mem := x0.Args[2] 27306 s1 := v.Args[1] 27307 if s1.Op != OpAMD64SHLQconst { 27308 break 27309 } 27310 j1 := s1.AuxInt 27311 x1 := s1.Args[0] 27312 if x1.Op != OpAMD64MOVBloadidx1 { 27313 break 27314 } 27315 i1 := x1.AuxInt 27316 if x1.Aux != s { 27317 break 27318 } 27319 _ = x1.Args[2] 27320 if p != x1.Args[0] { 27321 break 27322 } 27323 if idx != x1.Args[1] { 27324 break 27325 } 27326 if mem != x1.Args[2] { 27327 break 27328 } 27329 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27330 break 27331 } 27332 b = mergePoint(b, x0, x1) 27333 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27334 v.reset(OpCopy) 27335 v.AddArg(v0) 27336 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27337 v1.AuxInt = j0 27338 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27339 v2.AuxInt = i0 27340 v2.Aux = s 27341 v2.AddArg(p) 27342 v2.AddArg(idx) 27343 v2.AddArg(mem) 27344 v1.AddArg(v2) 27345 v0.AddArg(v1) 27346 v0.AddArg(y) 27347 return true 27348 } 27349 return false 27350 } 27351 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 27352 b := v.Block 27353 _ = b 27354 typ := &b.Func.Config.Types 27355 _ = typ 27356 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27357 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27358 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27359 for { 27360 _ = v.Args[1] 27361 or := v.Args[0] 27362 if or.Op != OpAMD64ORQ { 27363 break 27364 } 27365 _ = or.Args[1] 27366 y := or.Args[0] 27367 s0 := or.Args[1] 27368 if s0.Op != OpAMD64SHLQconst { 27369 break 27370 } 27371 j0 := s0.AuxInt 27372 x0 := s0.Args[0] 27373 if x0.Op != OpAMD64MOVBloadidx1 { 27374 break 27375 } 27376 i0 := x0.AuxInt 27377 s := x0.Aux 27378 _ = x0.Args[2] 27379 idx := x0.Args[0] 27380 p := x0.Args[1] 27381 mem := x0.Args[2] 27382 s1 := v.Args[1] 27383 if s1.Op != OpAMD64SHLQconst { 27384 break 27385 } 27386 j1 := s1.AuxInt 27387 x1 := s1.Args[0] 27388 if x1.Op != OpAMD64MOVBloadidx1 { 27389 break 27390 } 27391 i1 := x1.AuxInt 27392 if x1.Aux != s { 27393 break 27394 } 27395 _ = x1.Args[2] 27396 if p != x1.Args[0] { 27397 break 27398 } 27399 if idx != x1.Args[1] { 27400 break 27401 } 27402 if mem != x1.Args[2] { 27403 break 27404 } 27405 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27406 break 27407 } 27408 b = mergePoint(b, x0, x1) 27409 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27410 v.reset(OpCopy) 27411 v.AddArg(v0) 27412 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27413 v1.AuxInt = j0 27414 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27415 v2.AuxInt = i0 27416 v2.Aux = s 27417 v2.AddArg(p) 27418 v2.AddArg(idx) 27419 v2.AddArg(mem) 27420 v1.AddArg(v2) 27421 v0.AddArg(v1) 27422 v0.AddArg(y) 27423 return true 27424 } 27425 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27426 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27427 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27428 for { 27429 _ = v.Args[1] 27430 or := v.Args[0] 27431 if or.Op != OpAMD64ORQ { 27432 break 27433 } 27434 _ = or.Args[1] 27435 s0 := or.Args[0] 27436 if s0.Op != OpAMD64SHLQconst { 27437 break 27438 } 27439 j0 := s0.AuxInt 27440 x0 := s0.Args[0] 27441 if x0.Op != OpAMD64MOVBloadidx1 { 27442 break 27443 } 27444 i0 := x0.AuxInt 27445 s := x0.Aux 27446 _ = x0.Args[2] 27447 p := x0.Args[0] 27448 idx := x0.Args[1] 27449 mem := x0.Args[2] 27450 y := or.Args[1] 27451 s1 := v.Args[1] 27452 if s1.Op != OpAMD64SHLQconst { 27453 break 27454 } 27455 j1 := s1.AuxInt 27456 x1 := s1.Args[0] 27457 if x1.Op != OpAMD64MOVBloadidx1 { 27458 break 27459 } 27460 i1 := x1.AuxInt 27461 if x1.Aux != s { 27462 break 27463 } 27464 _ = x1.Args[2] 27465 if idx != x1.Args[0] { 27466 break 27467 } 27468 if p != x1.Args[1] { 27469 break 27470 } 27471 if mem != x1.Args[2] { 27472 break 27473 } 27474 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27475 break 27476 } 27477 b = mergePoint(b, x0, x1) 27478 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27479 v.reset(OpCopy) 27480 v.AddArg(v0) 27481 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27482 v1.AuxInt = j0 27483 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27484 v2.AuxInt = i0 27485 v2.Aux = s 27486 v2.AddArg(p) 27487 v2.AddArg(idx) 27488 v2.AddArg(mem) 27489 v1.AddArg(v2) 27490 v0.AddArg(v1) 27491 v0.AddArg(y) 27492 return true 27493 } 27494 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27495 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27496 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27497 for { 27498 _ = v.Args[1] 27499 or := v.Args[0] 27500 if or.Op != OpAMD64ORQ { 27501 break 27502 } 27503 _ = or.Args[1] 27504 s0 := or.Args[0] 27505 if s0.Op != OpAMD64SHLQconst { 27506 break 27507 } 27508 j0 := s0.AuxInt 27509 x0 := s0.Args[0] 27510 if x0.Op != OpAMD64MOVBloadidx1 { 27511 break 27512 } 27513 i0 := x0.AuxInt 27514 s := x0.Aux 27515 _ = x0.Args[2] 27516 idx := x0.Args[0] 27517 p := x0.Args[1] 27518 mem := x0.Args[2] 27519 y := or.Args[1] 27520 s1 := v.Args[1] 27521 if s1.Op != OpAMD64SHLQconst { 27522 break 27523 } 27524 j1 := s1.AuxInt 27525 x1 := s1.Args[0] 27526 if x1.Op != OpAMD64MOVBloadidx1 { 27527 break 27528 } 27529 i1 := x1.AuxInt 27530 if x1.Aux != s { 27531 break 27532 } 27533 _ = x1.Args[2] 27534 if idx != x1.Args[0] { 27535 break 27536 } 27537 if p != x1.Args[1] { 27538 break 27539 } 27540 if mem != x1.Args[2] { 27541 break 27542 } 27543 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27544 break 27545 } 27546 b = mergePoint(b, x0, x1) 27547 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27548 v.reset(OpCopy) 27549 v.AddArg(v0) 27550 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27551 v1.AuxInt = j0 27552 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27553 v2.AuxInt = i0 27554 v2.Aux = s 27555 v2.AddArg(p) 27556 v2.AddArg(idx) 27557 v2.AddArg(mem) 27558 v1.AddArg(v2) 27559 v0.AddArg(v1) 27560 v0.AddArg(y) 27561 return true 27562 } 27563 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27564 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27565 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27566 for { 27567 _ = v.Args[1] 27568 or := v.Args[0] 27569 if or.Op != OpAMD64ORQ { 27570 break 27571 } 27572 _ = or.Args[1] 27573 y := or.Args[0] 27574 s0 := or.Args[1] 27575 if s0.Op != OpAMD64SHLQconst { 27576 break 27577 } 27578 j0 := s0.AuxInt 27579 x0 := s0.Args[0] 27580 if x0.Op != OpAMD64MOVBloadidx1 { 27581 break 27582 } 27583 i0 := x0.AuxInt 27584 s := x0.Aux 27585 _ = x0.Args[2] 27586 p := x0.Args[0] 27587 idx := x0.Args[1] 27588 mem := x0.Args[2] 27589 s1 := v.Args[1] 27590 if s1.Op != OpAMD64SHLQconst { 27591 break 27592 } 27593 j1 := s1.AuxInt 27594 x1 := s1.Args[0] 27595 if x1.Op != OpAMD64MOVBloadidx1 { 27596 break 27597 } 27598 i1 := x1.AuxInt 27599 if x1.Aux != s { 27600 break 27601 } 27602 _ = x1.Args[2] 27603 if idx != x1.Args[0] { 27604 break 27605 } 27606 if p != x1.Args[1] { 27607 break 27608 } 27609 if mem != x1.Args[2] { 27610 break 27611 } 27612 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27613 break 27614 } 27615 b = mergePoint(b, x0, x1) 27616 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27617 v.reset(OpCopy) 27618 v.AddArg(v0) 27619 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27620 v1.AuxInt = j0 27621 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27622 v2.AuxInt = i0 27623 v2.Aux = s 27624 v2.AddArg(p) 27625 v2.AddArg(idx) 27626 v2.AddArg(mem) 27627 v1.AddArg(v2) 27628 v0.AddArg(v1) 27629 v0.AddArg(y) 27630 return true 27631 } 27632 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27633 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27634 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27635 for { 27636 _ = v.Args[1] 27637 or := v.Args[0] 27638 if or.Op != OpAMD64ORQ { 27639 break 27640 } 27641 _ = or.Args[1] 27642 y := or.Args[0] 27643 s0 := or.Args[1] 27644 if s0.Op != OpAMD64SHLQconst { 27645 break 27646 } 27647 j0 := s0.AuxInt 27648 x0 := s0.Args[0] 27649 if x0.Op != OpAMD64MOVBloadidx1 { 27650 break 27651 } 27652 i0 := x0.AuxInt 27653 s := x0.Aux 27654 _ = x0.Args[2] 27655 idx := x0.Args[0] 27656 p := x0.Args[1] 27657 mem := x0.Args[2] 27658 s1 := v.Args[1] 27659 if s1.Op != OpAMD64SHLQconst { 27660 break 27661 } 27662 j1 := s1.AuxInt 27663 x1 := s1.Args[0] 27664 if x1.Op != OpAMD64MOVBloadidx1 { 27665 break 27666 } 27667 i1 := x1.AuxInt 27668 if x1.Aux != s { 27669 break 27670 } 27671 _ = x1.Args[2] 27672 if idx != x1.Args[0] { 27673 break 27674 } 27675 if p != x1.Args[1] { 27676 break 27677 } 27678 if mem != x1.Args[2] { 27679 break 27680 } 27681 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27682 break 27683 } 27684 b = mergePoint(b, x0, x1) 27685 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27686 v.reset(OpCopy) 27687 v.AddArg(v0) 27688 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27689 v1.AuxInt = j0 27690 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27691 v2.AuxInt = i0 27692 v2.Aux = s 27693 v2.AddArg(p) 27694 v2.AddArg(idx) 27695 v2.AddArg(mem) 27696 v1.AddArg(v2) 27697 v0.AddArg(v1) 27698 v0.AddArg(y) 27699 return true 27700 } 27701 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 27702 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27703 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27704 for { 27705 _ = v.Args[1] 27706 s1 := v.Args[0] 27707 if s1.Op != OpAMD64SHLQconst { 27708 break 27709 } 27710 j1 := s1.AuxInt 27711 x1 := s1.Args[0] 27712 if x1.Op != OpAMD64MOVWloadidx1 { 27713 break 27714 } 27715 i1 := x1.AuxInt 27716 s := x1.Aux 27717 _ = x1.Args[2] 27718 p := x1.Args[0] 27719 idx := x1.Args[1] 27720 mem := x1.Args[2] 27721 or := v.Args[1] 27722 if or.Op != OpAMD64ORQ { 27723 break 27724 } 27725 _ = or.Args[1] 27726 s0 := or.Args[0] 27727 if s0.Op != OpAMD64SHLQconst { 27728 break 27729 } 27730 j0 := s0.AuxInt 27731 x0 := s0.Args[0] 27732 if x0.Op != OpAMD64MOVWloadidx1 { 27733 break 27734 } 27735 i0 := x0.AuxInt 27736 if x0.Aux != s { 27737 break 27738 } 27739 _ = x0.Args[2] 27740 if p != x0.Args[0] { 27741 break 27742 } 27743 if idx != x0.Args[1] { 27744 break 27745 } 27746 if mem != x0.Args[2] { 27747 break 27748 } 27749 y := or.Args[1] 27750 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27751 break 27752 } 27753 b = mergePoint(b, x0, x1) 27754 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27755 v.reset(OpCopy) 27756 v.AddArg(v0) 27757 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27758 v1.AuxInt = j0 27759 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27760 v2.AuxInt = i0 27761 v2.Aux = s 27762 v2.AddArg(p) 27763 v2.AddArg(idx) 27764 v2.AddArg(mem) 27765 v1.AddArg(v2) 27766 v0.AddArg(v1) 27767 v0.AddArg(y) 27768 return true 27769 } 27770 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 27771 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27772 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27773 for { 27774 _ = v.Args[1] 27775 s1 := v.Args[0] 27776 if s1.Op != OpAMD64SHLQconst { 27777 break 27778 } 27779 j1 := s1.AuxInt 27780 x1 := s1.Args[0] 27781 if x1.Op != OpAMD64MOVWloadidx1 { 27782 break 27783 } 27784 i1 := x1.AuxInt 27785 s := x1.Aux 27786 _ = x1.Args[2] 27787 idx := x1.Args[0] 27788 p := x1.Args[1] 27789 mem := x1.Args[2] 27790 or := v.Args[1] 27791 if or.Op != OpAMD64ORQ { 27792 break 27793 } 27794 _ = or.Args[1] 27795 s0 := or.Args[0] 27796 if s0.Op != OpAMD64SHLQconst { 27797 break 27798 } 27799 j0 := s0.AuxInt 27800 x0 := s0.Args[0] 27801 if x0.Op != OpAMD64MOVWloadidx1 { 27802 break 27803 } 27804 i0 := x0.AuxInt 27805 if x0.Aux != s { 27806 break 27807 } 27808 _ = x0.Args[2] 27809 if p != x0.Args[0] { 27810 break 27811 } 27812 if idx != x0.Args[1] { 27813 break 27814 } 27815 if mem != x0.Args[2] { 27816 break 27817 } 27818 y := or.Args[1] 27819 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27820 break 27821 } 27822 b = mergePoint(b, x0, x1) 27823 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27824 v.reset(OpCopy) 27825 v.AddArg(v0) 27826 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27827 v1.AuxInt = j0 27828 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27829 v2.AuxInt = i0 27830 v2.Aux = s 27831 v2.AddArg(p) 27832 v2.AddArg(idx) 27833 v2.AddArg(mem) 27834 v1.AddArg(v2) 27835 v0.AddArg(v1) 27836 v0.AddArg(y) 27837 return true 27838 } 27839 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 27840 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27841 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27842 for { 27843 _ = v.Args[1] 27844 s1 := v.Args[0] 27845 if s1.Op != OpAMD64SHLQconst { 27846 break 27847 } 27848 j1 := s1.AuxInt 27849 x1 := s1.Args[0] 27850 if x1.Op != OpAMD64MOVWloadidx1 { 27851 break 27852 } 27853 i1 := x1.AuxInt 27854 s := x1.Aux 27855 _ = x1.Args[2] 27856 p := x1.Args[0] 27857 idx := x1.Args[1] 27858 mem := x1.Args[2] 27859 or := v.Args[1] 27860 if or.Op != OpAMD64ORQ { 27861 break 27862 } 27863 _ = or.Args[1] 27864 s0 := or.Args[0] 27865 if s0.Op != OpAMD64SHLQconst { 27866 break 27867 } 27868 j0 := s0.AuxInt 27869 x0 := s0.Args[0] 27870 if x0.Op != OpAMD64MOVWloadidx1 { 27871 break 27872 } 27873 i0 := x0.AuxInt 27874 if x0.Aux != s { 27875 break 27876 } 27877 _ = x0.Args[2] 27878 if idx != x0.Args[0] { 27879 break 27880 } 27881 if p != x0.Args[1] { 27882 break 27883 } 27884 if mem != x0.Args[2] { 27885 break 27886 } 27887 y := or.Args[1] 27888 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27889 break 27890 } 27891 b = mergePoint(b, x0, x1) 27892 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27893 v.reset(OpCopy) 27894 v.AddArg(v0) 27895 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27896 v1.AuxInt = j0 27897 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27898 v2.AuxInt = i0 27899 v2.Aux = s 27900 v2.AddArg(p) 27901 v2.AddArg(idx) 27902 v2.AddArg(mem) 27903 v1.AddArg(v2) 27904 v0.AddArg(v1) 27905 v0.AddArg(y) 27906 return true 27907 } 27908 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 27909 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27910 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27911 for { 27912 _ = v.Args[1] 27913 s1 := v.Args[0] 27914 if s1.Op != OpAMD64SHLQconst { 27915 break 27916 } 27917 j1 := s1.AuxInt 27918 x1 := s1.Args[0] 27919 if x1.Op != OpAMD64MOVWloadidx1 { 27920 break 27921 } 27922 i1 := x1.AuxInt 27923 s := x1.Aux 27924 _ = x1.Args[2] 27925 idx := x1.Args[0] 27926 p := x1.Args[1] 27927 mem := x1.Args[2] 27928 or := v.Args[1] 27929 if or.Op != OpAMD64ORQ { 27930 break 27931 } 27932 _ = or.Args[1] 27933 s0 := or.Args[0] 27934 if s0.Op != OpAMD64SHLQconst { 27935 break 27936 } 27937 j0 := s0.AuxInt 27938 x0 := s0.Args[0] 27939 if x0.Op != OpAMD64MOVWloadidx1 { 27940 break 27941 } 27942 i0 := x0.AuxInt 27943 if x0.Aux != s { 27944 break 27945 } 27946 _ = x0.Args[2] 27947 if idx != x0.Args[0] { 27948 break 27949 } 27950 if p != x0.Args[1] { 27951 break 27952 } 27953 if mem != x0.Args[2] { 27954 break 27955 } 27956 y := or.Args[1] 27957 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27958 break 27959 } 27960 b = mergePoint(b, x0, x1) 27961 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27962 v.reset(OpCopy) 27963 v.AddArg(v0) 27964 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27965 v1.AuxInt = j0 27966 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27967 v2.AuxInt = i0 27968 v2.Aux = s 27969 v2.AddArg(p) 27970 v2.AddArg(idx) 27971 v2.AddArg(mem) 27972 v1.AddArg(v2) 27973 v0.AddArg(v1) 27974 v0.AddArg(y) 27975 return true 27976 } 27977 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 27978 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27979 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27980 for { 27981 _ = v.Args[1] 27982 s1 := v.Args[0] 27983 if s1.Op != OpAMD64SHLQconst { 27984 break 27985 } 27986 j1 := s1.AuxInt 27987 x1 := s1.Args[0] 27988 if x1.Op != OpAMD64MOVWloadidx1 { 27989 break 27990 } 27991 i1 := x1.AuxInt 27992 s := x1.Aux 27993 _ = x1.Args[2] 27994 p := x1.Args[0] 27995 idx := x1.Args[1] 27996 mem := x1.Args[2] 27997 or := v.Args[1] 27998 if or.Op != OpAMD64ORQ { 27999 break 28000 } 28001 _ = or.Args[1] 28002 y := or.Args[0] 28003 s0 := or.Args[1] 28004 if s0.Op != OpAMD64SHLQconst { 28005 break 28006 } 28007 j0 := s0.AuxInt 28008 x0 := s0.Args[0] 28009 if x0.Op != OpAMD64MOVWloadidx1 { 28010 break 28011 } 28012 i0 := x0.AuxInt 28013 if x0.Aux != s { 28014 break 28015 } 28016 _ = x0.Args[2] 28017 if p != x0.Args[0] { 28018 break 28019 } 28020 if idx != x0.Args[1] { 28021 break 28022 } 28023 if mem != x0.Args[2] { 28024 break 28025 } 28026 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28027 break 28028 } 28029 b = mergePoint(b, x0, x1) 28030 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28031 v.reset(OpCopy) 28032 v.AddArg(v0) 28033 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28034 v1.AuxInt = j0 28035 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28036 v2.AuxInt = i0 28037 v2.Aux = s 28038 v2.AddArg(p) 28039 v2.AddArg(idx) 28040 v2.AddArg(mem) 28041 v1.AddArg(v2) 28042 v0.AddArg(v1) 28043 v0.AddArg(y) 28044 return true 28045 } 28046 return false 28047 } 28048 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 28049 b := v.Block 28050 _ = b 28051 typ := &b.Func.Config.Types 28052 _ = typ 28053 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28054 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28055 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28056 for { 28057 _ = v.Args[1] 28058 s1 := v.Args[0] 28059 if s1.Op != OpAMD64SHLQconst { 28060 break 28061 } 28062 j1 := s1.AuxInt 28063 x1 := s1.Args[0] 28064 if x1.Op != OpAMD64MOVWloadidx1 { 28065 break 28066 } 28067 i1 := x1.AuxInt 28068 s := x1.Aux 28069 _ = x1.Args[2] 28070 idx := x1.Args[0] 28071 p := x1.Args[1] 28072 mem := x1.Args[2] 28073 or := v.Args[1] 28074 if or.Op != OpAMD64ORQ { 28075 break 28076 } 28077 _ = or.Args[1] 28078 y := or.Args[0] 28079 s0 := or.Args[1] 28080 if s0.Op != OpAMD64SHLQconst { 28081 break 28082 } 28083 j0 := s0.AuxInt 28084 x0 := s0.Args[0] 28085 if x0.Op != OpAMD64MOVWloadidx1 { 28086 break 28087 } 28088 i0 := x0.AuxInt 28089 if x0.Aux != s { 28090 break 28091 } 28092 _ = x0.Args[2] 28093 if p != x0.Args[0] { 28094 break 28095 } 28096 if idx != x0.Args[1] { 28097 break 28098 } 28099 if mem != x0.Args[2] { 28100 break 28101 } 28102 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28103 break 28104 } 28105 b = mergePoint(b, x0, x1) 28106 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28107 v.reset(OpCopy) 28108 v.AddArg(v0) 28109 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28110 v1.AuxInt = j0 28111 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28112 v2.AuxInt = i0 28113 v2.Aux = s 28114 v2.AddArg(p) 28115 v2.AddArg(idx) 28116 v2.AddArg(mem) 28117 v1.AddArg(v2) 28118 v0.AddArg(v1) 28119 v0.AddArg(y) 28120 return true 28121 } 28122 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28123 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28124 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28125 for { 28126 _ = v.Args[1] 28127 s1 := v.Args[0] 28128 if s1.Op != OpAMD64SHLQconst { 28129 break 28130 } 28131 j1 := s1.AuxInt 28132 x1 := s1.Args[0] 28133 if x1.Op != OpAMD64MOVWloadidx1 { 28134 break 28135 } 28136 i1 := x1.AuxInt 28137 s := x1.Aux 28138 _ = x1.Args[2] 28139 p := x1.Args[0] 28140 idx := x1.Args[1] 28141 mem := x1.Args[2] 28142 or := v.Args[1] 28143 if or.Op != OpAMD64ORQ { 28144 break 28145 } 28146 _ = or.Args[1] 28147 y := or.Args[0] 28148 s0 := or.Args[1] 28149 if s0.Op != OpAMD64SHLQconst { 28150 break 28151 } 28152 j0 := s0.AuxInt 28153 x0 := s0.Args[0] 28154 if x0.Op != OpAMD64MOVWloadidx1 { 28155 break 28156 } 28157 i0 := x0.AuxInt 28158 if x0.Aux != s { 28159 break 28160 } 28161 _ = x0.Args[2] 28162 if idx != x0.Args[0] { 28163 break 28164 } 28165 if p != x0.Args[1] { 28166 break 28167 } 28168 if mem != x0.Args[2] { 28169 break 28170 } 28171 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28172 break 28173 } 28174 b = mergePoint(b, x0, x1) 28175 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28176 v.reset(OpCopy) 28177 v.AddArg(v0) 28178 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28179 v1.AuxInt = j0 28180 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28181 v2.AuxInt = i0 28182 v2.Aux = s 28183 v2.AddArg(p) 28184 v2.AddArg(idx) 28185 v2.AddArg(mem) 28186 v1.AddArg(v2) 28187 v0.AddArg(v1) 28188 v0.AddArg(y) 28189 return true 28190 } 28191 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28192 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28193 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28194 for { 28195 _ = v.Args[1] 28196 s1 := v.Args[0] 28197 if s1.Op != OpAMD64SHLQconst { 28198 break 28199 } 28200 j1 := s1.AuxInt 28201 x1 := s1.Args[0] 28202 if x1.Op != OpAMD64MOVWloadidx1 { 28203 break 28204 } 28205 i1 := x1.AuxInt 28206 s := x1.Aux 28207 _ = x1.Args[2] 28208 idx := x1.Args[0] 28209 p := x1.Args[1] 28210 mem := x1.Args[2] 28211 or := v.Args[1] 28212 if or.Op != OpAMD64ORQ { 28213 break 28214 } 28215 _ = or.Args[1] 28216 y := or.Args[0] 28217 s0 := or.Args[1] 28218 if s0.Op != OpAMD64SHLQconst { 28219 break 28220 } 28221 j0 := s0.AuxInt 28222 x0 := s0.Args[0] 28223 if x0.Op != OpAMD64MOVWloadidx1 { 28224 break 28225 } 28226 i0 := x0.AuxInt 28227 if x0.Aux != s { 28228 break 28229 } 28230 _ = x0.Args[2] 28231 if idx != x0.Args[0] { 28232 break 28233 } 28234 if p != x0.Args[1] { 28235 break 28236 } 28237 if mem != x0.Args[2] { 28238 break 28239 } 28240 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28241 break 28242 } 28243 b = mergePoint(b, x0, x1) 28244 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28245 v.reset(OpCopy) 28246 v.AddArg(v0) 28247 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28248 v1.AuxInt = j0 28249 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28250 v2.AuxInt = i0 28251 v2.Aux = s 28252 v2.AddArg(p) 28253 v2.AddArg(idx) 28254 v2.AddArg(mem) 28255 v1.AddArg(v2) 28256 v0.AddArg(v1) 28257 v0.AddArg(y) 28258 return true 28259 } 28260 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28261 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28262 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28263 for { 28264 _ = v.Args[1] 28265 or := v.Args[0] 28266 if or.Op != OpAMD64ORQ { 28267 break 28268 } 28269 _ = or.Args[1] 28270 s0 := or.Args[0] 28271 if s0.Op != OpAMD64SHLQconst { 28272 break 28273 } 28274 j0 := s0.AuxInt 28275 x0 := s0.Args[0] 28276 if x0.Op != OpAMD64MOVWloadidx1 { 28277 break 28278 } 28279 i0 := x0.AuxInt 28280 s := x0.Aux 28281 _ = x0.Args[2] 28282 p := x0.Args[0] 28283 idx := x0.Args[1] 28284 mem := x0.Args[2] 28285 y := or.Args[1] 28286 s1 := v.Args[1] 28287 if s1.Op != OpAMD64SHLQconst { 28288 break 28289 } 28290 j1 := s1.AuxInt 28291 x1 := s1.Args[0] 28292 if x1.Op != OpAMD64MOVWloadidx1 { 28293 break 28294 } 28295 i1 := x1.AuxInt 28296 if x1.Aux != s { 28297 break 28298 } 28299 _ = x1.Args[2] 28300 if p != x1.Args[0] { 28301 break 28302 } 28303 if idx != x1.Args[1] { 28304 break 28305 } 28306 if mem != x1.Args[2] { 28307 break 28308 } 28309 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28310 break 28311 } 28312 b = mergePoint(b, x0, x1) 28313 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28314 v.reset(OpCopy) 28315 v.AddArg(v0) 28316 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28317 v1.AuxInt = j0 28318 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28319 v2.AuxInt = i0 28320 v2.Aux = s 28321 v2.AddArg(p) 28322 v2.AddArg(idx) 28323 v2.AddArg(mem) 28324 v1.AddArg(v2) 28325 v0.AddArg(v1) 28326 v0.AddArg(y) 28327 return true 28328 } 28329 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28330 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28331 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28332 for { 28333 _ = v.Args[1] 28334 or := v.Args[0] 28335 if or.Op != OpAMD64ORQ { 28336 break 28337 } 28338 _ = or.Args[1] 28339 s0 := or.Args[0] 28340 if s0.Op != OpAMD64SHLQconst { 28341 break 28342 } 28343 j0 := s0.AuxInt 28344 x0 := s0.Args[0] 28345 if x0.Op != OpAMD64MOVWloadidx1 { 28346 break 28347 } 28348 i0 := x0.AuxInt 28349 s := x0.Aux 28350 _ = x0.Args[2] 28351 idx := x0.Args[0] 28352 p := x0.Args[1] 28353 mem := x0.Args[2] 28354 y := or.Args[1] 28355 s1 := v.Args[1] 28356 if s1.Op != OpAMD64SHLQconst { 28357 break 28358 } 28359 j1 := s1.AuxInt 28360 x1 := s1.Args[0] 28361 if x1.Op != OpAMD64MOVWloadidx1 { 28362 break 28363 } 28364 i1 := x1.AuxInt 28365 if x1.Aux != s { 28366 break 28367 } 28368 _ = x1.Args[2] 28369 if p != x1.Args[0] { 28370 break 28371 } 28372 if idx != x1.Args[1] { 28373 break 28374 } 28375 if mem != x1.Args[2] { 28376 break 28377 } 28378 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28379 break 28380 } 28381 b = mergePoint(b, x0, x1) 28382 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28383 v.reset(OpCopy) 28384 v.AddArg(v0) 28385 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28386 v1.AuxInt = j0 28387 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28388 v2.AuxInt = i0 28389 v2.Aux = s 28390 v2.AddArg(p) 28391 v2.AddArg(idx) 28392 v2.AddArg(mem) 28393 v1.AddArg(v2) 28394 v0.AddArg(v1) 28395 v0.AddArg(y) 28396 return true 28397 } 28398 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28399 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28400 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28401 for { 28402 _ = v.Args[1] 28403 or := v.Args[0] 28404 if or.Op != OpAMD64ORQ { 28405 break 28406 } 28407 _ = or.Args[1] 28408 y := or.Args[0] 28409 s0 := or.Args[1] 28410 if s0.Op != OpAMD64SHLQconst { 28411 break 28412 } 28413 j0 := s0.AuxInt 28414 x0 := s0.Args[0] 28415 if x0.Op != OpAMD64MOVWloadidx1 { 28416 break 28417 } 28418 i0 := x0.AuxInt 28419 s := x0.Aux 28420 _ = x0.Args[2] 28421 p := x0.Args[0] 28422 idx := x0.Args[1] 28423 mem := x0.Args[2] 28424 s1 := v.Args[1] 28425 if s1.Op != OpAMD64SHLQconst { 28426 break 28427 } 28428 j1 := s1.AuxInt 28429 x1 := s1.Args[0] 28430 if x1.Op != OpAMD64MOVWloadidx1 { 28431 break 28432 } 28433 i1 := x1.AuxInt 28434 if x1.Aux != s { 28435 break 28436 } 28437 _ = x1.Args[2] 28438 if p != x1.Args[0] { 28439 break 28440 } 28441 if idx != x1.Args[1] { 28442 break 28443 } 28444 if mem != x1.Args[2] { 28445 break 28446 } 28447 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28448 break 28449 } 28450 b = mergePoint(b, x0, x1) 28451 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28452 v.reset(OpCopy) 28453 v.AddArg(v0) 28454 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28455 v1.AuxInt = j0 28456 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28457 v2.AuxInt = i0 28458 v2.Aux = s 28459 v2.AddArg(p) 28460 v2.AddArg(idx) 28461 v2.AddArg(mem) 28462 v1.AddArg(v2) 28463 v0.AddArg(v1) 28464 v0.AddArg(y) 28465 return true 28466 } 28467 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28468 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28469 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28470 for { 28471 _ = v.Args[1] 28472 or := v.Args[0] 28473 if or.Op != OpAMD64ORQ { 28474 break 28475 } 28476 _ = or.Args[1] 28477 y := or.Args[0] 28478 s0 := or.Args[1] 28479 if s0.Op != OpAMD64SHLQconst { 28480 break 28481 } 28482 j0 := s0.AuxInt 28483 x0 := s0.Args[0] 28484 if x0.Op != OpAMD64MOVWloadidx1 { 28485 break 28486 } 28487 i0 := x0.AuxInt 28488 s := x0.Aux 28489 _ = x0.Args[2] 28490 idx := x0.Args[0] 28491 p := x0.Args[1] 28492 mem := x0.Args[2] 28493 s1 := v.Args[1] 28494 if s1.Op != OpAMD64SHLQconst { 28495 break 28496 } 28497 j1 := s1.AuxInt 28498 x1 := s1.Args[0] 28499 if x1.Op != OpAMD64MOVWloadidx1 { 28500 break 28501 } 28502 i1 := x1.AuxInt 28503 if x1.Aux != s { 28504 break 28505 } 28506 _ = x1.Args[2] 28507 if p != x1.Args[0] { 28508 break 28509 } 28510 if idx != x1.Args[1] { 28511 break 28512 } 28513 if mem != x1.Args[2] { 28514 break 28515 } 28516 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28517 break 28518 } 28519 b = mergePoint(b, x0, x1) 28520 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28521 v.reset(OpCopy) 28522 v.AddArg(v0) 28523 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28524 v1.AuxInt = j0 28525 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28526 v2.AuxInt = i0 28527 v2.Aux = s 28528 v2.AddArg(p) 28529 v2.AddArg(idx) 28530 v2.AddArg(mem) 28531 v1.AddArg(v2) 28532 v0.AddArg(v1) 28533 v0.AddArg(y) 28534 return true 28535 } 28536 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28537 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28538 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28539 for { 28540 _ = v.Args[1] 28541 or := v.Args[0] 28542 if or.Op != OpAMD64ORQ { 28543 break 28544 } 28545 _ = or.Args[1] 28546 s0 := or.Args[0] 28547 if s0.Op != OpAMD64SHLQconst { 28548 break 28549 } 28550 j0 := s0.AuxInt 28551 x0 := s0.Args[0] 28552 if x0.Op != OpAMD64MOVWloadidx1 { 28553 break 28554 } 28555 i0 := x0.AuxInt 28556 s := x0.Aux 28557 _ = x0.Args[2] 28558 p := x0.Args[0] 28559 idx := x0.Args[1] 28560 mem := x0.Args[2] 28561 y := or.Args[1] 28562 s1 := v.Args[1] 28563 if s1.Op != OpAMD64SHLQconst { 28564 break 28565 } 28566 j1 := s1.AuxInt 28567 x1 := s1.Args[0] 28568 if x1.Op != OpAMD64MOVWloadidx1 { 28569 break 28570 } 28571 i1 := x1.AuxInt 28572 if x1.Aux != s { 28573 break 28574 } 28575 _ = x1.Args[2] 28576 if idx != x1.Args[0] { 28577 break 28578 } 28579 if p != x1.Args[1] { 28580 break 28581 } 28582 if mem != x1.Args[2] { 28583 break 28584 } 28585 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28586 break 28587 } 28588 b = mergePoint(b, x0, x1) 28589 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28590 v.reset(OpCopy) 28591 v.AddArg(v0) 28592 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28593 v1.AuxInt = j0 28594 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28595 v2.AuxInt = i0 28596 v2.Aux = s 28597 v2.AddArg(p) 28598 v2.AddArg(idx) 28599 v2.AddArg(mem) 28600 v1.AddArg(v2) 28601 v0.AddArg(v1) 28602 v0.AddArg(y) 28603 return true 28604 } 28605 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28606 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28607 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28608 for { 28609 _ = v.Args[1] 28610 or := v.Args[0] 28611 if or.Op != OpAMD64ORQ { 28612 break 28613 } 28614 _ = or.Args[1] 28615 s0 := or.Args[0] 28616 if s0.Op != OpAMD64SHLQconst { 28617 break 28618 } 28619 j0 := s0.AuxInt 28620 x0 := s0.Args[0] 28621 if x0.Op != OpAMD64MOVWloadidx1 { 28622 break 28623 } 28624 i0 := x0.AuxInt 28625 s := x0.Aux 28626 _ = x0.Args[2] 28627 idx := x0.Args[0] 28628 p := x0.Args[1] 28629 mem := x0.Args[2] 28630 y := or.Args[1] 28631 s1 := v.Args[1] 28632 if s1.Op != OpAMD64SHLQconst { 28633 break 28634 } 28635 j1 := s1.AuxInt 28636 x1 := s1.Args[0] 28637 if x1.Op != OpAMD64MOVWloadidx1 { 28638 break 28639 } 28640 i1 := x1.AuxInt 28641 if x1.Aux != s { 28642 break 28643 } 28644 _ = x1.Args[2] 28645 if idx != x1.Args[0] { 28646 break 28647 } 28648 if p != x1.Args[1] { 28649 break 28650 } 28651 if mem != x1.Args[2] { 28652 break 28653 } 28654 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28655 break 28656 } 28657 b = mergePoint(b, x0, x1) 28658 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28659 v.reset(OpCopy) 28660 v.AddArg(v0) 28661 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28662 v1.AuxInt = j0 28663 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28664 v2.AuxInt = i0 28665 v2.Aux = s 28666 v2.AddArg(p) 28667 v2.AddArg(idx) 28668 v2.AddArg(mem) 28669 v1.AddArg(v2) 28670 v0.AddArg(v1) 28671 v0.AddArg(y) 28672 return true 28673 } 28674 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28675 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28676 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28677 for { 28678 _ = v.Args[1] 28679 or := v.Args[0] 28680 if or.Op != OpAMD64ORQ { 28681 break 28682 } 28683 _ = or.Args[1] 28684 y := or.Args[0] 28685 s0 := or.Args[1] 28686 if s0.Op != OpAMD64SHLQconst { 28687 break 28688 } 28689 j0 := s0.AuxInt 28690 x0 := s0.Args[0] 28691 if x0.Op != OpAMD64MOVWloadidx1 { 28692 break 28693 } 28694 i0 := x0.AuxInt 28695 s := x0.Aux 28696 _ = x0.Args[2] 28697 p := x0.Args[0] 28698 idx := x0.Args[1] 28699 mem := x0.Args[2] 28700 s1 := v.Args[1] 28701 if s1.Op != OpAMD64SHLQconst { 28702 break 28703 } 28704 j1 := s1.AuxInt 28705 x1 := s1.Args[0] 28706 if x1.Op != OpAMD64MOVWloadidx1 { 28707 break 28708 } 28709 i1 := x1.AuxInt 28710 if x1.Aux != s { 28711 break 28712 } 28713 _ = x1.Args[2] 28714 if idx != x1.Args[0] { 28715 break 28716 } 28717 if p != x1.Args[1] { 28718 break 28719 } 28720 if mem != x1.Args[2] { 28721 break 28722 } 28723 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28724 break 28725 } 28726 b = mergePoint(b, x0, x1) 28727 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28728 v.reset(OpCopy) 28729 v.AddArg(v0) 28730 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28731 v1.AuxInt = j0 28732 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28733 v2.AuxInt = i0 28734 v2.Aux = s 28735 v2.AddArg(p) 28736 v2.AddArg(idx) 28737 v2.AddArg(mem) 28738 v1.AddArg(v2) 28739 v0.AddArg(v1) 28740 v0.AddArg(y) 28741 return true 28742 } 28743 return false 28744 } 28745 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 28746 b := v.Block 28747 _ = b 28748 typ := &b.Func.Config.Types 28749 _ = typ 28750 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28751 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28752 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28753 for { 28754 _ = v.Args[1] 28755 or := v.Args[0] 28756 if or.Op != OpAMD64ORQ { 28757 break 28758 } 28759 _ = or.Args[1] 28760 y := or.Args[0] 28761 s0 := or.Args[1] 28762 if s0.Op != OpAMD64SHLQconst { 28763 break 28764 } 28765 j0 := s0.AuxInt 28766 x0 := s0.Args[0] 28767 if x0.Op != OpAMD64MOVWloadidx1 { 28768 break 28769 } 28770 i0 := x0.AuxInt 28771 s := x0.Aux 28772 _ = x0.Args[2] 28773 idx := x0.Args[0] 28774 p := x0.Args[1] 28775 mem := x0.Args[2] 28776 s1 := v.Args[1] 28777 if s1.Op != OpAMD64SHLQconst { 28778 break 28779 } 28780 j1 := s1.AuxInt 28781 x1 := s1.Args[0] 28782 if x1.Op != OpAMD64MOVWloadidx1 { 28783 break 28784 } 28785 i1 := x1.AuxInt 28786 if x1.Aux != s { 28787 break 28788 } 28789 _ = x1.Args[2] 28790 if idx != x1.Args[0] { 28791 break 28792 } 28793 if p != x1.Args[1] { 28794 break 28795 } 28796 if mem != x1.Args[2] { 28797 break 28798 } 28799 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28800 break 28801 } 28802 b = mergePoint(b, x0, x1) 28803 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28804 v.reset(OpCopy) 28805 v.AddArg(v0) 28806 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28807 v1.AuxInt = j0 28808 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28809 v2.AuxInt = i0 28810 v2.Aux = s 28811 v2.AddArg(p) 28812 v2.AddArg(idx) 28813 v2.AddArg(mem) 28814 v1.AddArg(v2) 28815 v0.AddArg(v1) 28816 v0.AddArg(y) 28817 return true 28818 } 28819 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 28820 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28821 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 28822 for { 28823 _ = v.Args[1] 28824 x1 := v.Args[0] 28825 if x1.Op != OpAMD64MOVBload { 28826 break 28827 } 28828 i1 := x1.AuxInt 28829 s := x1.Aux 28830 _ = x1.Args[1] 28831 p := x1.Args[0] 28832 mem := x1.Args[1] 28833 sh := v.Args[1] 28834 if sh.Op != OpAMD64SHLQconst { 28835 break 28836 } 28837 if sh.AuxInt != 8 { 28838 break 28839 } 28840 x0 := sh.Args[0] 28841 if x0.Op != OpAMD64MOVBload { 28842 break 28843 } 28844 i0 := x0.AuxInt 28845 if x0.Aux != s { 28846 break 28847 } 28848 _ = x0.Args[1] 28849 if p != x0.Args[0] { 28850 break 28851 } 28852 if mem != x0.Args[1] { 28853 break 28854 } 28855 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28856 break 28857 } 28858 b = mergePoint(b, x0, x1) 28859 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28860 v.reset(OpCopy) 28861 v.AddArg(v0) 28862 v0.AuxInt = 8 28863 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 28864 v1.AuxInt = i0 28865 v1.Aux = s 28866 v1.AddArg(p) 28867 v1.AddArg(mem) 28868 v0.AddArg(v1) 28869 return true 28870 } 28871 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 28872 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28873 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 28874 for { 28875 _ = v.Args[1] 28876 sh := v.Args[0] 28877 if sh.Op != OpAMD64SHLQconst { 28878 break 28879 } 28880 if sh.AuxInt != 8 { 28881 break 28882 } 28883 x0 := sh.Args[0] 28884 if x0.Op != OpAMD64MOVBload { 28885 break 28886 } 28887 i0 := x0.AuxInt 28888 s := x0.Aux 28889 _ = x0.Args[1] 28890 p := x0.Args[0] 28891 mem := x0.Args[1] 28892 x1 := v.Args[1] 28893 if x1.Op != OpAMD64MOVBload { 28894 break 28895 } 28896 i1 := x1.AuxInt 28897 if x1.Aux != s { 28898 break 28899 } 28900 _ = x1.Args[1] 28901 if p != x1.Args[0] { 28902 break 28903 } 28904 if mem != x1.Args[1] { 28905 break 28906 } 28907 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28908 break 28909 } 28910 b = mergePoint(b, x0, x1) 28911 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28912 v.reset(OpCopy) 28913 v.AddArg(v0) 28914 v0.AuxInt = 8 28915 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 28916 v1.AuxInt = i0 28917 v1.Aux = s 28918 v1.AddArg(p) 28919 v1.AddArg(mem) 28920 v0.AddArg(v1) 28921 return true 28922 } 28923 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 28924 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28925 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 28926 for { 28927 _ = v.Args[1] 28928 r1 := v.Args[0] 28929 if r1.Op != OpAMD64ROLWconst { 28930 break 28931 } 28932 if r1.AuxInt != 8 { 28933 break 28934 } 28935 x1 := r1.Args[0] 28936 if x1.Op != OpAMD64MOVWload { 28937 break 28938 } 28939 i1 := x1.AuxInt 28940 s := x1.Aux 28941 _ = x1.Args[1] 28942 p := x1.Args[0] 28943 mem := x1.Args[1] 28944 sh := v.Args[1] 28945 if sh.Op != OpAMD64SHLQconst { 28946 break 28947 } 28948 if sh.AuxInt != 16 { 28949 break 28950 } 28951 r0 := sh.Args[0] 28952 if r0.Op != OpAMD64ROLWconst { 28953 break 28954 } 28955 if r0.AuxInt != 8 { 28956 break 28957 } 28958 x0 := r0.Args[0] 28959 if x0.Op != OpAMD64MOVWload { 28960 break 28961 } 28962 i0 := x0.AuxInt 28963 if x0.Aux != s { 28964 break 28965 } 28966 _ = x0.Args[1] 28967 if p != x0.Args[0] { 28968 break 28969 } 28970 if mem != x0.Args[1] { 28971 break 28972 } 28973 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28974 break 28975 } 28976 b = mergePoint(b, x0, x1) 28977 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28978 v.reset(OpCopy) 28979 v.AddArg(v0) 28980 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 28981 v1.AuxInt = i0 28982 v1.Aux = s 28983 v1.AddArg(p) 28984 v1.AddArg(mem) 28985 v0.AddArg(v1) 28986 return true 28987 } 28988 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 28989 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28990 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 28991 for { 28992 _ = v.Args[1] 28993 sh := v.Args[0] 28994 if sh.Op != OpAMD64SHLQconst { 28995 break 28996 } 28997 if sh.AuxInt != 16 { 28998 break 28999 } 29000 r0 := sh.Args[0] 29001 if r0.Op != OpAMD64ROLWconst { 29002 break 29003 } 29004 if r0.AuxInt != 8 { 29005 break 29006 } 29007 x0 := r0.Args[0] 29008 if x0.Op != OpAMD64MOVWload { 29009 break 29010 } 29011 i0 := x0.AuxInt 29012 s := x0.Aux 29013 _ = x0.Args[1] 29014 p := x0.Args[0] 29015 mem := x0.Args[1] 29016 r1 := v.Args[1] 29017 if r1.Op != OpAMD64ROLWconst { 29018 break 29019 } 29020 if r1.AuxInt != 8 { 29021 break 29022 } 29023 x1 := r1.Args[0] 29024 if x1.Op != OpAMD64MOVWload { 29025 break 29026 } 29027 i1 := x1.AuxInt 29028 if x1.Aux != s { 29029 break 29030 } 29031 _ = x1.Args[1] 29032 if p != x1.Args[0] { 29033 break 29034 } 29035 if mem != x1.Args[1] { 29036 break 29037 } 29038 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29039 break 29040 } 29041 b = mergePoint(b, x0, x1) 29042 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29043 v.reset(OpCopy) 29044 v.AddArg(v0) 29045 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29046 v1.AuxInt = i0 29047 v1.Aux = s 29048 v1.AddArg(p) 29049 v1.AddArg(mem) 29050 v0.AddArg(v1) 29051 return true 29052 } 29053 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 29054 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29055 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 29056 for { 29057 _ = v.Args[1] 29058 r1 := v.Args[0] 29059 if r1.Op != OpAMD64BSWAPL { 29060 break 29061 } 29062 x1 := r1.Args[0] 29063 if x1.Op != OpAMD64MOVLload { 29064 break 29065 } 29066 i1 := x1.AuxInt 29067 s := x1.Aux 29068 _ = x1.Args[1] 29069 p := x1.Args[0] 29070 mem := x1.Args[1] 29071 sh := v.Args[1] 29072 if sh.Op != OpAMD64SHLQconst { 29073 break 29074 } 29075 if sh.AuxInt != 32 { 29076 break 29077 } 29078 r0 := sh.Args[0] 29079 if r0.Op != OpAMD64BSWAPL { 29080 break 29081 } 29082 x0 := r0.Args[0] 29083 if x0.Op != OpAMD64MOVLload { 29084 break 29085 } 29086 i0 := x0.AuxInt 29087 if x0.Aux != s { 29088 break 29089 } 29090 _ = x0.Args[1] 29091 if p != x0.Args[0] { 29092 break 29093 } 29094 if mem != x0.Args[1] { 29095 break 29096 } 29097 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29098 break 29099 } 29100 b = mergePoint(b, x0, x1) 29101 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29102 v.reset(OpCopy) 29103 v.AddArg(v0) 29104 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 29105 v1.AuxInt = i0 29106 v1.Aux = s 29107 v1.AddArg(p) 29108 v1.AddArg(mem) 29109 v0.AddArg(v1) 29110 return true 29111 } 29112 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 29113 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29114 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 29115 for { 29116 _ = v.Args[1] 29117 sh := v.Args[0] 29118 if sh.Op != OpAMD64SHLQconst { 29119 break 29120 } 29121 if sh.AuxInt != 32 { 29122 break 29123 } 29124 r0 := sh.Args[0] 29125 if r0.Op != OpAMD64BSWAPL { 29126 break 29127 } 29128 x0 := r0.Args[0] 29129 if x0.Op != OpAMD64MOVLload { 29130 break 29131 } 29132 i0 := x0.AuxInt 29133 s := x0.Aux 29134 _ = x0.Args[1] 29135 p := x0.Args[0] 29136 mem := x0.Args[1] 29137 r1 := v.Args[1] 29138 if r1.Op != OpAMD64BSWAPL { 29139 break 29140 } 29141 x1 := r1.Args[0] 29142 if x1.Op != OpAMD64MOVLload { 29143 break 29144 } 29145 i1 := x1.AuxInt 29146 if x1.Aux != s { 29147 break 29148 } 29149 _ = x1.Args[1] 29150 if p != x1.Args[0] { 29151 break 29152 } 29153 if mem != x1.Args[1] { 29154 break 29155 } 29156 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29157 break 29158 } 29159 b = mergePoint(b, x0, x1) 29160 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29161 v.reset(OpCopy) 29162 v.AddArg(v0) 29163 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 29164 v1.AuxInt = i0 29165 v1.Aux = s 29166 v1.AddArg(p) 29167 v1.AddArg(mem) 29168 v0.AddArg(v1) 29169 return true 29170 } 29171 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 29172 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29173 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29174 for { 29175 _ = v.Args[1] 29176 s0 := v.Args[0] 29177 if s0.Op != OpAMD64SHLQconst { 29178 break 29179 } 29180 j0 := s0.AuxInt 29181 x0 := s0.Args[0] 29182 if x0.Op != OpAMD64MOVBload { 29183 break 29184 } 29185 i0 := x0.AuxInt 29186 s := x0.Aux 29187 _ = x0.Args[1] 29188 p := x0.Args[0] 29189 mem := x0.Args[1] 29190 or := v.Args[1] 29191 if or.Op != OpAMD64ORQ { 29192 break 29193 } 29194 _ = or.Args[1] 29195 s1 := or.Args[0] 29196 if s1.Op != OpAMD64SHLQconst { 29197 break 29198 } 29199 j1 := s1.AuxInt 29200 x1 := s1.Args[0] 29201 if x1.Op != OpAMD64MOVBload { 29202 break 29203 } 29204 i1 := x1.AuxInt 29205 if x1.Aux != s { 29206 break 29207 } 29208 _ = x1.Args[1] 29209 if p != x1.Args[0] { 29210 break 29211 } 29212 if mem != x1.Args[1] { 29213 break 29214 } 29215 y := or.Args[1] 29216 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29217 break 29218 } 29219 b = mergePoint(b, x0, x1) 29220 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29221 v.reset(OpCopy) 29222 v.AddArg(v0) 29223 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29224 v1.AuxInt = j1 29225 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29226 v2.AuxInt = 8 29227 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29228 v3.AuxInt = i0 29229 v3.Aux = s 29230 v3.AddArg(p) 29231 v3.AddArg(mem) 29232 v2.AddArg(v3) 29233 v1.AddArg(v2) 29234 v0.AddArg(v1) 29235 v0.AddArg(y) 29236 return true 29237 } 29238 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 29239 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29240 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29241 for { 29242 _ = v.Args[1] 29243 s0 := v.Args[0] 29244 if s0.Op != OpAMD64SHLQconst { 29245 break 29246 } 29247 j0 := s0.AuxInt 29248 x0 := s0.Args[0] 29249 if x0.Op != OpAMD64MOVBload { 29250 break 29251 } 29252 i0 := x0.AuxInt 29253 s := x0.Aux 29254 _ = x0.Args[1] 29255 p := x0.Args[0] 29256 mem := x0.Args[1] 29257 or := v.Args[1] 29258 if or.Op != OpAMD64ORQ { 29259 break 29260 } 29261 _ = or.Args[1] 29262 y := or.Args[0] 29263 s1 := or.Args[1] 29264 if s1.Op != OpAMD64SHLQconst { 29265 break 29266 } 29267 j1 := s1.AuxInt 29268 x1 := s1.Args[0] 29269 if x1.Op != OpAMD64MOVBload { 29270 break 29271 } 29272 i1 := x1.AuxInt 29273 if x1.Aux != s { 29274 break 29275 } 29276 _ = x1.Args[1] 29277 if p != x1.Args[0] { 29278 break 29279 } 29280 if mem != x1.Args[1] { 29281 break 29282 } 29283 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29284 break 29285 } 29286 b = mergePoint(b, x0, x1) 29287 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29288 v.reset(OpCopy) 29289 v.AddArg(v0) 29290 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29291 v1.AuxInt = j1 29292 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29293 v2.AuxInt = 8 29294 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29295 v3.AuxInt = i0 29296 v3.Aux = s 29297 v3.AddArg(p) 29298 v3.AddArg(mem) 29299 v2.AddArg(v3) 29300 v1.AddArg(v2) 29301 v0.AddArg(v1) 29302 v0.AddArg(y) 29303 return true 29304 } 29305 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 29306 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29307 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29308 for { 29309 _ = v.Args[1] 29310 or := v.Args[0] 29311 if or.Op != OpAMD64ORQ { 29312 break 29313 } 29314 _ = or.Args[1] 29315 s1 := or.Args[0] 29316 if s1.Op != OpAMD64SHLQconst { 29317 break 29318 } 29319 j1 := s1.AuxInt 29320 x1 := s1.Args[0] 29321 if x1.Op != OpAMD64MOVBload { 29322 break 29323 } 29324 i1 := x1.AuxInt 29325 s := x1.Aux 29326 _ = x1.Args[1] 29327 p := x1.Args[0] 29328 mem := x1.Args[1] 29329 y := or.Args[1] 29330 s0 := v.Args[1] 29331 if s0.Op != OpAMD64SHLQconst { 29332 break 29333 } 29334 j0 := s0.AuxInt 29335 x0 := s0.Args[0] 29336 if x0.Op != OpAMD64MOVBload { 29337 break 29338 } 29339 i0 := x0.AuxInt 29340 if x0.Aux != s { 29341 break 29342 } 29343 _ = x0.Args[1] 29344 if p != x0.Args[0] { 29345 break 29346 } 29347 if mem != x0.Args[1] { 29348 break 29349 } 29350 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29351 break 29352 } 29353 b = mergePoint(b, x0, x1) 29354 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29355 v.reset(OpCopy) 29356 v.AddArg(v0) 29357 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29358 v1.AuxInt = j1 29359 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29360 v2.AuxInt = 8 29361 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29362 v3.AuxInt = i0 29363 v3.Aux = s 29364 v3.AddArg(p) 29365 v3.AddArg(mem) 29366 v2.AddArg(v3) 29367 v1.AddArg(v2) 29368 v0.AddArg(v1) 29369 v0.AddArg(y) 29370 return true 29371 } 29372 return false 29373 } 29374 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 29375 b := v.Block 29376 _ = b 29377 typ := &b.Func.Config.Types 29378 _ = typ 29379 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 29380 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29381 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29382 for { 29383 _ = v.Args[1] 29384 or := v.Args[0] 29385 if or.Op != OpAMD64ORQ { 29386 break 29387 } 29388 _ = or.Args[1] 29389 y := or.Args[0] 29390 s1 := or.Args[1] 29391 if s1.Op != OpAMD64SHLQconst { 29392 break 29393 } 29394 j1 := s1.AuxInt 29395 x1 := s1.Args[0] 29396 if x1.Op != OpAMD64MOVBload { 29397 break 29398 } 29399 i1 := x1.AuxInt 29400 s := x1.Aux 29401 _ = x1.Args[1] 29402 p := x1.Args[0] 29403 mem := x1.Args[1] 29404 s0 := v.Args[1] 29405 if s0.Op != OpAMD64SHLQconst { 29406 break 29407 } 29408 j0 := s0.AuxInt 29409 x0 := s0.Args[0] 29410 if x0.Op != OpAMD64MOVBload { 29411 break 29412 } 29413 i0 := x0.AuxInt 29414 if x0.Aux != s { 29415 break 29416 } 29417 _ = x0.Args[1] 29418 if p != x0.Args[0] { 29419 break 29420 } 29421 if mem != x0.Args[1] { 29422 break 29423 } 29424 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29425 break 29426 } 29427 b = mergePoint(b, x0, x1) 29428 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29429 v.reset(OpCopy) 29430 v.AddArg(v0) 29431 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29432 v1.AuxInt = j1 29433 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29434 v2.AuxInt = 8 29435 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29436 v3.AuxInt = i0 29437 v3.Aux = s 29438 v3.AddArg(p) 29439 v3.AddArg(mem) 29440 v2.AddArg(v3) 29441 v1.AddArg(v2) 29442 v0.AddArg(v1) 29443 v0.AddArg(y) 29444 return true 29445 } 29446 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 29447 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29448 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29449 for { 29450 _ = v.Args[1] 29451 s0 := v.Args[0] 29452 if s0.Op != OpAMD64SHLQconst { 29453 break 29454 } 29455 j0 := s0.AuxInt 29456 r0 := s0.Args[0] 29457 if r0.Op != OpAMD64ROLWconst { 29458 break 29459 } 29460 if r0.AuxInt != 8 { 29461 break 29462 } 29463 x0 := r0.Args[0] 29464 if x0.Op != OpAMD64MOVWload { 29465 break 29466 } 29467 i0 := x0.AuxInt 29468 s := x0.Aux 29469 _ = x0.Args[1] 29470 p := x0.Args[0] 29471 mem := x0.Args[1] 29472 or := v.Args[1] 29473 if or.Op != OpAMD64ORQ { 29474 break 29475 } 29476 _ = or.Args[1] 29477 s1 := or.Args[0] 29478 if s1.Op != OpAMD64SHLQconst { 29479 break 29480 } 29481 j1 := s1.AuxInt 29482 r1 := s1.Args[0] 29483 if r1.Op != OpAMD64ROLWconst { 29484 break 29485 } 29486 if r1.AuxInt != 8 { 29487 break 29488 } 29489 x1 := r1.Args[0] 29490 if x1.Op != OpAMD64MOVWload { 29491 break 29492 } 29493 i1 := x1.AuxInt 29494 if x1.Aux != s { 29495 break 29496 } 29497 _ = x1.Args[1] 29498 if p != x1.Args[0] { 29499 break 29500 } 29501 if mem != x1.Args[1] { 29502 break 29503 } 29504 y := or.Args[1] 29505 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29506 break 29507 } 29508 b = mergePoint(b, x0, x1) 29509 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29510 v.reset(OpCopy) 29511 v.AddArg(v0) 29512 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29513 v1.AuxInt = j1 29514 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29515 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29516 v3.AuxInt = i0 29517 v3.Aux = s 29518 v3.AddArg(p) 29519 v3.AddArg(mem) 29520 v2.AddArg(v3) 29521 v1.AddArg(v2) 29522 v0.AddArg(v1) 29523 v0.AddArg(y) 29524 return true 29525 } 29526 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 29527 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29528 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29529 for { 29530 _ = v.Args[1] 29531 s0 := v.Args[0] 29532 if s0.Op != OpAMD64SHLQconst { 29533 break 29534 } 29535 j0 := s0.AuxInt 29536 r0 := s0.Args[0] 29537 if r0.Op != OpAMD64ROLWconst { 29538 break 29539 } 29540 if r0.AuxInt != 8 { 29541 break 29542 } 29543 x0 := r0.Args[0] 29544 if x0.Op != OpAMD64MOVWload { 29545 break 29546 } 29547 i0 := x0.AuxInt 29548 s := x0.Aux 29549 _ = x0.Args[1] 29550 p := x0.Args[0] 29551 mem := x0.Args[1] 29552 or := v.Args[1] 29553 if or.Op != OpAMD64ORQ { 29554 break 29555 } 29556 _ = or.Args[1] 29557 y := or.Args[0] 29558 s1 := or.Args[1] 29559 if s1.Op != OpAMD64SHLQconst { 29560 break 29561 } 29562 j1 := s1.AuxInt 29563 r1 := s1.Args[0] 29564 if r1.Op != OpAMD64ROLWconst { 29565 break 29566 } 29567 if r1.AuxInt != 8 { 29568 break 29569 } 29570 x1 := r1.Args[0] 29571 if x1.Op != OpAMD64MOVWload { 29572 break 29573 } 29574 i1 := x1.AuxInt 29575 if x1.Aux != s { 29576 break 29577 } 29578 _ = x1.Args[1] 29579 if p != x1.Args[0] { 29580 break 29581 } 29582 if mem != x1.Args[1] { 29583 break 29584 } 29585 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29586 break 29587 } 29588 b = mergePoint(b, x0, x1) 29589 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29590 v.reset(OpCopy) 29591 v.AddArg(v0) 29592 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29593 v1.AuxInt = j1 29594 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29595 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29596 v3.AuxInt = i0 29597 v3.Aux = s 29598 v3.AddArg(p) 29599 v3.AddArg(mem) 29600 v2.AddArg(v3) 29601 v1.AddArg(v2) 29602 v0.AddArg(v1) 29603 v0.AddArg(y) 29604 return true 29605 } 29606 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29607 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29608 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29609 for { 29610 _ = v.Args[1] 29611 or := v.Args[0] 29612 if or.Op != OpAMD64ORQ { 29613 break 29614 } 29615 _ = or.Args[1] 29616 s1 := or.Args[0] 29617 if s1.Op != OpAMD64SHLQconst { 29618 break 29619 } 29620 j1 := s1.AuxInt 29621 r1 := s1.Args[0] 29622 if r1.Op != OpAMD64ROLWconst { 29623 break 29624 } 29625 if r1.AuxInt != 8 { 29626 break 29627 } 29628 x1 := r1.Args[0] 29629 if x1.Op != OpAMD64MOVWload { 29630 break 29631 } 29632 i1 := x1.AuxInt 29633 s := x1.Aux 29634 _ = x1.Args[1] 29635 p := x1.Args[0] 29636 mem := x1.Args[1] 29637 y := or.Args[1] 29638 s0 := v.Args[1] 29639 if s0.Op != OpAMD64SHLQconst { 29640 break 29641 } 29642 j0 := s0.AuxInt 29643 r0 := s0.Args[0] 29644 if r0.Op != OpAMD64ROLWconst { 29645 break 29646 } 29647 if r0.AuxInt != 8 { 29648 break 29649 } 29650 x0 := r0.Args[0] 29651 if x0.Op != OpAMD64MOVWload { 29652 break 29653 } 29654 i0 := x0.AuxInt 29655 if x0.Aux != s { 29656 break 29657 } 29658 _ = x0.Args[1] 29659 if p != x0.Args[0] { 29660 break 29661 } 29662 if mem != x0.Args[1] { 29663 break 29664 } 29665 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29666 break 29667 } 29668 b = mergePoint(b, x0, x1) 29669 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29670 v.reset(OpCopy) 29671 v.AddArg(v0) 29672 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29673 v1.AuxInt = j1 29674 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29675 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29676 v3.AuxInt = i0 29677 v3.Aux = s 29678 v3.AddArg(p) 29679 v3.AddArg(mem) 29680 v2.AddArg(v3) 29681 v1.AddArg(v2) 29682 v0.AddArg(v1) 29683 v0.AddArg(y) 29684 return true 29685 } 29686 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29687 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29688 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29689 for { 29690 _ = v.Args[1] 29691 or := v.Args[0] 29692 if or.Op != OpAMD64ORQ { 29693 break 29694 } 29695 _ = or.Args[1] 29696 y := or.Args[0] 29697 s1 := or.Args[1] 29698 if s1.Op != OpAMD64SHLQconst { 29699 break 29700 } 29701 j1 := s1.AuxInt 29702 r1 := s1.Args[0] 29703 if r1.Op != OpAMD64ROLWconst { 29704 break 29705 } 29706 if r1.AuxInt != 8 { 29707 break 29708 } 29709 x1 := r1.Args[0] 29710 if x1.Op != OpAMD64MOVWload { 29711 break 29712 } 29713 i1 := x1.AuxInt 29714 s := x1.Aux 29715 _ = x1.Args[1] 29716 p := x1.Args[0] 29717 mem := x1.Args[1] 29718 s0 := v.Args[1] 29719 if s0.Op != OpAMD64SHLQconst { 29720 break 29721 } 29722 j0 := s0.AuxInt 29723 r0 := s0.Args[0] 29724 if r0.Op != OpAMD64ROLWconst { 29725 break 29726 } 29727 if r0.AuxInt != 8 { 29728 break 29729 } 29730 x0 := r0.Args[0] 29731 if x0.Op != OpAMD64MOVWload { 29732 break 29733 } 29734 i0 := x0.AuxInt 29735 if x0.Aux != s { 29736 break 29737 } 29738 _ = x0.Args[1] 29739 if p != x0.Args[0] { 29740 break 29741 } 29742 if mem != x0.Args[1] { 29743 break 29744 } 29745 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29746 break 29747 } 29748 b = mergePoint(b, x0, x1) 29749 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29750 v.reset(OpCopy) 29751 v.AddArg(v0) 29752 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29753 v1.AuxInt = j1 29754 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29755 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29756 v3.AuxInt = i0 29757 v3.Aux = s 29758 v3.AddArg(p) 29759 v3.AddArg(mem) 29760 v2.AddArg(v3) 29761 v1.AddArg(v2) 29762 v0.AddArg(v1) 29763 v0.AddArg(y) 29764 return true 29765 } 29766 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 29767 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29768 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 29769 for { 29770 _ = v.Args[1] 29771 x1 := v.Args[0] 29772 if x1.Op != OpAMD64MOVBloadidx1 { 29773 break 29774 } 29775 i1 := x1.AuxInt 29776 s := x1.Aux 29777 _ = x1.Args[2] 29778 p := x1.Args[0] 29779 idx := x1.Args[1] 29780 mem := x1.Args[2] 29781 sh := v.Args[1] 29782 if sh.Op != OpAMD64SHLQconst { 29783 break 29784 } 29785 if sh.AuxInt != 8 { 29786 break 29787 } 29788 x0 := sh.Args[0] 29789 if x0.Op != OpAMD64MOVBloadidx1 { 29790 break 29791 } 29792 i0 := x0.AuxInt 29793 if x0.Aux != s { 29794 break 29795 } 29796 _ = x0.Args[2] 29797 if p != x0.Args[0] { 29798 break 29799 } 29800 if idx != x0.Args[1] { 29801 break 29802 } 29803 if mem != x0.Args[2] { 29804 break 29805 } 29806 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29807 break 29808 } 29809 b = mergePoint(b, x0, x1) 29810 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29811 v.reset(OpCopy) 29812 v.AddArg(v0) 29813 v0.AuxInt = 8 29814 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29815 v1.AuxInt = i0 29816 v1.Aux = s 29817 v1.AddArg(p) 29818 v1.AddArg(idx) 29819 v1.AddArg(mem) 29820 v0.AddArg(v1) 29821 return true 29822 } 29823 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 29824 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29825 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 29826 for { 29827 _ = v.Args[1] 29828 x1 := v.Args[0] 29829 if x1.Op != OpAMD64MOVBloadidx1 { 29830 break 29831 } 29832 i1 := x1.AuxInt 29833 s := x1.Aux 29834 _ = x1.Args[2] 29835 idx := x1.Args[0] 29836 p := x1.Args[1] 29837 mem := x1.Args[2] 29838 sh := v.Args[1] 29839 if sh.Op != OpAMD64SHLQconst { 29840 break 29841 } 29842 if sh.AuxInt != 8 { 29843 break 29844 } 29845 x0 := sh.Args[0] 29846 if x0.Op != OpAMD64MOVBloadidx1 { 29847 break 29848 } 29849 i0 := x0.AuxInt 29850 if x0.Aux != s { 29851 break 29852 } 29853 _ = x0.Args[2] 29854 if p != x0.Args[0] { 29855 break 29856 } 29857 if idx != x0.Args[1] { 29858 break 29859 } 29860 if mem != x0.Args[2] { 29861 break 29862 } 29863 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29864 break 29865 } 29866 b = mergePoint(b, x0, x1) 29867 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29868 v.reset(OpCopy) 29869 v.AddArg(v0) 29870 v0.AuxInt = 8 29871 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29872 v1.AuxInt = i0 29873 v1.Aux = s 29874 v1.AddArg(p) 29875 v1.AddArg(idx) 29876 v1.AddArg(mem) 29877 v0.AddArg(v1) 29878 return true 29879 } 29880 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 29881 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29882 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 29883 for { 29884 _ = v.Args[1] 29885 x1 := v.Args[0] 29886 if x1.Op != OpAMD64MOVBloadidx1 { 29887 break 29888 } 29889 i1 := x1.AuxInt 29890 s := x1.Aux 29891 _ = x1.Args[2] 29892 p := x1.Args[0] 29893 idx := x1.Args[1] 29894 mem := x1.Args[2] 29895 sh := v.Args[1] 29896 if sh.Op != OpAMD64SHLQconst { 29897 break 29898 } 29899 if sh.AuxInt != 8 { 29900 break 29901 } 29902 x0 := sh.Args[0] 29903 if x0.Op != OpAMD64MOVBloadidx1 { 29904 break 29905 } 29906 i0 := x0.AuxInt 29907 if x0.Aux != s { 29908 break 29909 } 29910 _ = x0.Args[2] 29911 if idx != x0.Args[0] { 29912 break 29913 } 29914 if p != x0.Args[1] { 29915 break 29916 } 29917 if mem != x0.Args[2] { 29918 break 29919 } 29920 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29921 break 29922 } 29923 b = mergePoint(b, x0, x1) 29924 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29925 v.reset(OpCopy) 29926 v.AddArg(v0) 29927 v0.AuxInt = 8 29928 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29929 v1.AuxInt = i0 29930 v1.Aux = s 29931 v1.AddArg(p) 29932 v1.AddArg(idx) 29933 v1.AddArg(mem) 29934 v0.AddArg(v1) 29935 return true 29936 } 29937 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 29938 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29939 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 29940 for { 29941 _ = v.Args[1] 29942 x1 := v.Args[0] 29943 if x1.Op != OpAMD64MOVBloadidx1 { 29944 break 29945 } 29946 i1 := x1.AuxInt 29947 s := x1.Aux 29948 _ = x1.Args[2] 29949 idx := x1.Args[0] 29950 p := x1.Args[1] 29951 mem := x1.Args[2] 29952 sh := v.Args[1] 29953 if sh.Op != OpAMD64SHLQconst { 29954 break 29955 } 29956 if sh.AuxInt != 8 { 29957 break 29958 } 29959 x0 := sh.Args[0] 29960 if x0.Op != OpAMD64MOVBloadidx1 { 29961 break 29962 } 29963 i0 := x0.AuxInt 29964 if x0.Aux != s { 29965 break 29966 } 29967 _ = x0.Args[2] 29968 if idx != x0.Args[0] { 29969 break 29970 } 29971 if p != x0.Args[1] { 29972 break 29973 } 29974 if mem != x0.Args[2] { 29975 break 29976 } 29977 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29978 break 29979 } 29980 b = mergePoint(b, x0, x1) 29981 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29982 v.reset(OpCopy) 29983 v.AddArg(v0) 29984 v0.AuxInt = 8 29985 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29986 v1.AuxInt = i0 29987 v1.Aux = s 29988 v1.AddArg(p) 29989 v1.AddArg(idx) 29990 v1.AddArg(mem) 29991 v0.AddArg(v1) 29992 return true 29993 } 29994 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 29995 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29996 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 29997 for { 29998 _ = v.Args[1] 29999 sh := v.Args[0] 30000 if sh.Op != OpAMD64SHLQconst { 30001 break 30002 } 30003 if sh.AuxInt != 8 { 30004 break 30005 } 30006 x0 := sh.Args[0] 30007 if x0.Op != OpAMD64MOVBloadidx1 { 30008 break 30009 } 30010 i0 := x0.AuxInt 30011 s := x0.Aux 30012 _ = x0.Args[2] 30013 p := x0.Args[0] 30014 idx := x0.Args[1] 30015 mem := x0.Args[2] 30016 x1 := v.Args[1] 30017 if x1.Op != OpAMD64MOVBloadidx1 { 30018 break 30019 } 30020 i1 := x1.AuxInt 30021 if x1.Aux != s { 30022 break 30023 } 30024 _ = x1.Args[2] 30025 if p != x1.Args[0] { 30026 break 30027 } 30028 if idx != x1.Args[1] { 30029 break 30030 } 30031 if mem != x1.Args[2] { 30032 break 30033 } 30034 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30035 break 30036 } 30037 b = mergePoint(b, x0, x1) 30038 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30039 v.reset(OpCopy) 30040 v.AddArg(v0) 30041 v0.AuxInt = 8 30042 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30043 v1.AuxInt = i0 30044 v1.Aux = s 30045 v1.AddArg(p) 30046 v1.AddArg(idx) 30047 v1.AddArg(mem) 30048 v0.AddArg(v1) 30049 return true 30050 } 30051 return false 30052 } 30053 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 30054 b := v.Block 30055 _ = b 30056 typ := &b.Func.Config.Types 30057 _ = typ 30058 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 30059 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30060 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30061 for { 30062 _ = v.Args[1] 30063 sh := v.Args[0] 30064 if sh.Op != OpAMD64SHLQconst { 30065 break 30066 } 30067 if sh.AuxInt != 8 { 30068 break 30069 } 30070 x0 := sh.Args[0] 30071 if x0.Op != OpAMD64MOVBloadidx1 { 30072 break 30073 } 30074 i0 := x0.AuxInt 30075 s := x0.Aux 30076 _ = x0.Args[2] 30077 idx := x0.Args[0] 30078 p := x0.Args[1] 30079 mem := x0.Args[2] 30080 x1 := v.Args[1] 30081 if x1.Op != OpAMD64MOVBloadidx1 { 30082 break 30083 } 30084 i1 := x1.AuxInt 30085 if x1.Aux != s { 30086 break 30087 } 30088 _ = x1.Args[2] 30089 if p != x1.Args[0] { 30090 break 30091 } 30092 if idx != x1.Args[1] { 30093 break 30094 } 30095 if mem != x1.Args[2] { 30096 break 30097 } 30098 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30099 break 30100 } 30101 b = mergePoint(b, x0, x1) 30102 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30103 v.reset(OpCopy) 30104 v.AddArg(v0) 30105 v0.AuxInt = 8 30106 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30107 v1.AuxInt = i0 30108 v1.Aux = s 30109 v1.AddArg(p) 30110 v1.AddArg(idx) 30111 v1.AddArg(mem) 30112 v0.AddArg(v1) 30113 return true 30114 } 30115 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 30116 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30117 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30118 for { 30119 _ = v.Args[1] 30120 sh := v.Args[0] 30121 if sh.Op != OpAMD64SHLQconst { 30122 break 30123 } 30124 if sh.AuxInt != 8 { 30125 break 30126 } 30127 x0 := sh.Args[0] 30128 if x0.Op != OpAMD64MOVBloadidx1 { 30129 break 30130 } 30131 i0 := x0.AuxInt 30132 s := x0.Aux 30133 _ = x0.Args[2] 30134 p := x0.Args[0] 30135 idx := x0.Args[1] 30136 mem := x0.Args[2] 30137 x1 := v.Args[1] 30138 if x1.Op != OpAMD64MOVBloadidx1 { 30139 break 30140 } 30141 i1 := x1.AuxInt 30142 if x1.Aux != s { 30143 break 30144 } 30145 _ = x1.Args[2] 30146 if idx != x1.Args[0] { 30147 break 30148 } 30149 if p != x1.Args[1] { 30150 break 30151 } 30152 if mem != x1.Args[2] { 30153 break 30154 } 30155 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30156 break 30157 } 30158 b = mergePoint(b, x0, x1) 30159 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30160 v.reset(OpCopy) 30161 v.AddArg(v0) 30162 v0.AuxInt = 8 30163 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30164 v1.AuxInt = i0 30165 v1.Aux = s 30166 v1.AddArg(p) 30167 v1.AddArg(idx) 30168 v1.AddArg(mem) 30169 v0.AddArg(v1) 30170 return true 30171 } 30172 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 30173 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30174 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30175 for { 30176 _ = v.Args[1] 30177 sh := v.Args[0] 30178 if sh.Op != OpAMD64SHLQconst { 30179 break 30180 } 30181 if sh.AuxInt != 8 { 30182 break 30183 } 30184 x0 := sh.Args[0] 30185 if x0.Op != OpAMD64MOVBloadidx1 { 30186 break 30187 } 30188 i0 := x0.AuxInt 30189 s := x0.Aux 30190 _ = x0.Args[2] 30191 idx := x0.Args[0] 30192 p := x0.Args[1] 30193 mem := x0.Args[2] 30194 x1 := v.Args[1] 30195 if x1.Op != OpAMD64MOVBloadidx1 { 30196 break 30197 } 30198 i1 := x1.AuxInt 30199 if x1.Aux != s { 30200 break 30201 } 30202 _ = x1.Args[2] 30203 if idx != x1.Args[0] { 30204 break 30205 } 30206 if p != x1.Args[1] { 30207 break 30208 } 30209 if mem != x1.Args[2] { 30210 break 30211 } 30212 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30213 break 30214 } 30215 b = mergePoint(b, x0, x1) 30216 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30217 v.reset(OpCopy) 30218 v.AddArg(v0) 30219 v0.AuxInt = 8 30220 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30221 v1.AuxInt = i0 30222 v1.Aux = s 30223 v1.AddArg(p) 30224 v1.AddArg(idx) 30225 v1.AddArg(mem) 30226 v0.AddArg(v1) 30227 return true 30228 } 30229 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30230 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30231 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30232 for { 30233 _ = v.Args[1] 30234 r1 := v.Args[0] 30235 if r1.Op != OpAMD64ROLWconst { 30236 break 30237 } 30238 if r1.AuxInt != 8 { 30239 break 30240 } 30241 x1 := r1.Args[0] 30242 if x1.Op != OpAMD64MOVWloadidx1 { 30243 break 30244 } 30245 i1 := x1.AuxInt 30246 s := x1.Aux 30247 _ = x1.Args[2] 30248 p := x1.Args[0] 30249 idx := x1.Args[1] 30250 mem := x1.Args[2] 30251 sh := v.Args[1] 30252 if sh.Op != OpAMD64SHLQconst { 30253 break 30254 } 30255 if sh.AuxInt != 16 { 30256 break 30257 } 30258 r0 := sh.Args[0] 30259 if r0.Op != OpAMD64ROLWconst { 30260 break 30261 } 30262 if r0.AuxInt != 8 { 30263 break 30264 } 30265 x0 := r0.Args[0] 30266 if x0.Op != OpAMD64MOVWloadidx1 { 30267 break 30268 } 30269 i0 := x0.AuxInt 30270 if x0.Aux != s { 30271 break 30272 } 30273 _ = x0.Args[2] 30274 if p != x0.Args[0] { 30275 break 30276 } 30277 if idx != x0.Args[1] { 30278 break 30279 } 30280 if mem != x0.Args[2] { 30281 break 30282 } 30283 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30284 break 30285 } 30286 b = mergePoint(b, x0, x1) 30287 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30288 v.reset(OpCopy) 30289 v.AddArg(v0) 30290 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30291 v1.AuxInt = i0 30292 v1.Aux = s 30293 v1.AddArg(p) 30294 v1.AddArg(idx) 30295 v1.AddArg(mem) 30296 v0.AddArg(v1) 30297 return true 30298 } 30299 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30300 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30301 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30302 for { 30303 _ = v.Args[1] 30304 r1 := v.Args[0] 30305 if r1.Op != OpAMD64ROLWconst { 30306 break 30307 } 30308 if r1.AuxInt != 8 { 30309 break 30310 } 30311 x1 := r1.Args[0] 30312 if x1.Op != OpAMD64MOVWloadidx1 { 30313 break 30314 } 30315 i1 := x1.AuxInt 30316 s := x1.Aux 30317 _ = x1.Args[2] 30318 idx := x1.Args[0] 30319 p := x1.Args[1] 30320 mem := x1.Args[2] 30321 sh := v.Args[1] 30322 if sh.Op != OpAMD64SHLQconst { 30323 break 30324 } 30325 if sh.AuxInt != 16 { 30326 break 30327 } 30328 r0 := sh.Args[0] 30329 if r0.Op != OpAMD64ROLWconst { 30330 break 30331 } 30332 if r0.AuxInt != 8 { 30333 break 30334 } 30335 x0 := r0.Args[0] 30336 if x0.Op != OpAMD64MOVWloadidx1 { 30337 break 30338 } 30339 i0 := x0.AuxInt 30340 if x0.Aux != s { 30341 break 30342 } 30343 _ = x0.Args[2] 30344 if p != x0.Args[0] { 30345 break 30346 } 30347 if idx != x0.Args[1] { 30348 break 30349 } 30350 if mem != x0.Args[2] { 30351 break 30352 } 30353 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30354 break 30355 } 30356 b = mergePoint(b, x0, x1) 30357 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30358 v.reset(OpCopy) 30359 v.AddArg(v0) 30360 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30361 v1.AuxInt = i0 30362 v1.Aux = s 30363 v1.AddArg(p) 30364 v1.AddArg(idx) 30365 v1.AddArg(mem) 30366 v0.AddArg(v1) 30367 return true 30368 } 30369 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30370 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30371 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30372 for { 30373 _ = v.Args[1] 30374 r1 := v.Args[0] 30375 if r1.Op != OpAMD64ROLWconst { 30376 break 30377 } 30378 if r1.AuxInt != 8 { 30379 break 30380 } 30381 x1 := r1.Args[0] 30382 if x1.Op != OpAMD64MOVWloadidx1 { 30383 break 30384 } 30385 i1 := x1.AuxInt 30386 s := x1.Aux 30387 _ = x1.Args[2] 30388 p := x1.Args[0] 30389 idx := x1.Args[1] 30390 mem := x1.Args[2] 30391 sh := v.Args[1] 30392 if sh.Op != OpAMD64SHLQconst { 30393 break 30394 } 30395 if sh.AuxInt != 16 { 30396 break 30397 } 30398 r0 := sh.Args[0] 30399 if r0.Op != OpAMD64ROLWconst { 30400 break 30401 } 30402 if r0.AuxInt != 8 { 30403 break 30404 } 30405 x0 := r0.Args[0] 30406 if x0.Op != OpAMD64MOVWloadidx1 { 30407 break 30408 } 30409 i0 := x0.AuxInt 30410 if x0.Aux != s { 30411 break 30412 } 30413 _ = x0.Args[2] 30414 if idx != x0.Args[0] { 30415 break 30416 } 30417 if p != x0.Args[1] { 30418 break 30419 } 30420 if mem != x0.Args[2] { 30421 break 30422 } 30423 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30424 break 30425 } 30426 b = mergePoint(b, x0, x1) 30427 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30428 v.reset(OpCopy) 30429 v.AddArg(v0) 30430 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30431 v1.AuxInt = i0 30432 v1.Aux = s 30433 v1.AddArg(p) 30434 v1.AddArg(idx) 30435 v1.AddArg(mem) 30436 v0.AddArg(v1) 30437 return true 30438 } 30439 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30440 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30441 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30442 for { 30443 _ = v.Args[1] 30444 r1 := v.Args[0] 30445 if r1.Op != OpAMD64ROLWconst { 30446 break 30447 } 30448 if r1.AuxInt != 8 { 30449 break 30450 } 30451 x1 := r1.Args[0] 30452 if x1.Op != OpAMD64MOVWloadidx1 { 30453 break 30454 } 30455 i1 := x1.AuxInt 30456 s := x1.Aux 30457 _ = x1.Args[2] 30458 idx := x1.Args[0] 30459 p := x1.Args[1] 30460 mem := x1.Args[2] 30461 sh := v.Args[1] 30462 if sh.Op != OpAMD64SHLQconst { 30463 break 30464 } 30465 if sh.AuxInt != 16 { 30466 break 30467 } 30468 r0 := sh.Args[0] 30469 if r0.Op != OpAMD64ROLWconst { 30470 break 30471 } 30472 if r0.AuxInt != 8 { 30473 break 30474 } 30475 x0 := r0.Args[0] 30476 if x0.Op != OpAMD64MOVWloadidx1 { 30477 break 30478 } 30479 i0 := x0.AuxInt 30480 if x0.Aux != s { 30481 break 30482 } 30483 _ = x0.Args[2] 30484 if idx != x0.Args[0] { 30485 break 30486 } 30487 if p != x0.Args[1] { 30488 break 30489 } 30490 if mem != x0.Args[2] { 30491 break 30492 } 30493 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30494 break 30495 } 30496 b = mergePoint(b, x0, x1) 30497 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30498 v.reset(OpCopy) 30499 v.AddArg(v0) 30500 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30501 v1.AuxInt = i0 30502 v1.Aux = s 30503 v1.AddArg(p) 30504 v1.AddArg(idx) 30505 v1.AddArg(mem) 30506 v0.AddArg(v1) 30507 return true 30508 } 30509 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30510 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30511 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30512 for { 30513 _ = v.Args[1] 30514 sh := v.Args[0] 30515 if sh.Op != OpAMD64SHLQconst { 30516 break 30517 } 30518 if sh.AuxInt != 16 { 30519 break 30520 } 30521 r0 := sh.Args[0] 30522 if r0.Op != OpAMD64ROLWconst { 30523 break 30524 } 30525 if r0.AuxInt != 8 { 30526 break 30527 } 30528 x0 := r0.Args[0] 30529 if x0.Op != OpAMD64MOVWloadidx1 { 30530 break 30531 } 30532 i0 := x0.AuxInt 30533 s := x0.Aux 30534 _ = x0.Args[2] 30535 p := x0.Args[0] 30536 idx := x0.Args[1] 30537 mem := x0.Args[2] 30538 r1 := v.Args[1] 30539 if r1.Op != OpAMD64ROLWconst { 30540 break 30541 } 30542 if r1.AuxInt != 8 { 30543 break 30544 } 30545 x1 := r1.Args[0] 30546 if x1.Op != OpAMD64MOVWloadidx1 { 30547 break 30548 } 30549 i1 := x1.AuxInt 30550 if x1.Aux != s { 30551 break 30552 } 30553 _ = x1.Args[2] 30554 if p != x1.Args[0] { 30555 break 30556 } 30557 if idx != x1.Args[1] { 30558 break 30559 } 30560 if mem != x1.Args[2] { 30561 break 30562 } 30563 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30564 break 30565 } 30566 b = mergePoint(b, x0, x1) 30567 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30568 v.reset(OpCopy) 30569 v.AddArg(v0) 30570 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30571 v1.AuxInt = i0 30572 v1.Aux = s 30573 v1.AddArg(p) 30574 v1.AddArg(idx) 30575 v1.AddArg(mem) 30576 v0.AddArg(v1) 30577 return true 30578 } 30579 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30580 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30581 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30582 for { 30583 _ = v.Args[1] 30584 sh := v.Args[0] 30585 if sh.Op != OpAMD64SHLQconst { 30586 break 30587 } 30588 if sh.AuxInt != 16 { 30589 break 30590 } 30591 r0 := sh.Args[0] 30592 if r0.Op != OpAMD64ROLWconst { 30593 break 30594 } 30595 if r0.AuxInt != 8 { 30596 break 30597 } 30598 x0 := r0.Args[0] 30599 if x0.Op != OpAMD64MOVWloadidx1 { 30600 break 30601 } 30602 i0 := x0.AuxInt 30603 s := x0.Aux 30604 _ = x0.Args[2] 30605 idx := x0.Args[0] 30606 p := x0.Args[1] 30607 mem := x0.Args[2] 30608 r1 := v.Args[1] 30609 if r1.Op != OpAMD64ROLWconst { 30610 break 30611 } 30612 if r1.AuxInt != 8 { 30613 break 30614 } 30615 x1 := r1.Args[0] 30616 if x1.Op != OpAMD64MOVWloadidx1 { 30617 break 30618 } 30619 i1 := x1.AuxInt 30620 if x1.Aux != s { 30621 break 30622 } 30623 _ = x1.Args[2] 30624 if p != x1.Args[0] { 30625 break 30626 } 30627 if idx != x1.Args[1] { 30628 break 30629 } 30630 if mem != x1.Args[2] { 30631 break 30632 } 30633 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30634 break 30635 } 30636 b = mergePoint(b, x0, x1) 30637 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30638 v.reset(OpCopy) 30639 v.AddArg(v0) 30640 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30641 v1.AuxInt = i0 30642 v1.Aux = s 30643 v1.AddArg(p) 30644 v1.AddArg(idx) 30645 v1.AddArg(mem) 30646 v0.AddArg(v1) 30647 return true 30648 } 30649 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30650 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30651 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30652 for { 30653 _ = v.Args[1] 30654 sh := v.Args[0] 30655 if sh.Op != OpAMD64SHLQconst { 30656 break 30657 } 30658 if sh.AuxInt != 16 { 30659 break 30660 } 30661 r0 := sh.Args[0] 30662 if r0.Op != OpAMD64ROLWconst { 30663 break 30664 } 30665 if r0.AuxInt != 8 { 30666 break 30667 } 30668 x0 := r0.Args[0] 30669 if x0.Op != OpAMD64MOVWloadidx1 { 30670 break 30671 } 30672 i0 := x0.AuxInt 30673 s := x0.Aux 30674 _ = x0.Args[2] 30675 p := x0.Args[0] 30676 idx := x0.Args[1] 30677 mem := x0.Args[2] 30678 r1 := v.Args[1] 30679 if r1.Op != OpAMD64ROLWconst { 30680 break 30681 } 30682 if r1.AuxInt != 8 { 30683 break 30684 } 30685 x1 := r1.Args[0] 30686 if x1.Op != OpAMD64MOVWloadidx1 { 30687 break 30688 } 30689 i1 := x1.AuxInt 30690 if x1.Aux != s { 30691 break 30692 } 30693 _ = x1.Args[2] 30694 if idx != x1.Args[0] { 30695 break 30696 } 30697 if p != x1.Args[1] { 30698 break 30699 } 30700 if mem != x1.Args[2] { 30701 break 30702 } 30703 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30704 break 30705 } 30706 b = mergePoint(b, x0, x1) 30707 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30708 v.reset(OpCopy) 30709 v.AddArg(v0) 30710 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30711 v1.AuxInt = i0 30712 v1.Aux = s 30713 v1.AddArg(p) 30714 v1.AddArg(idx) 30715 v1.AddArg(mem) 30716 v0.AddArg(v1) 30717 return true 30718 } 30719 return false 30720 } 30721 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 30722 b := v.Block 30723 _ = b 30724 typ := &b.Func.Config.Types 30725 _ = typ 30726 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30727 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30728 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30729 for { 30730 _ = v.Args[1] 30731 sh := v.Args[0] 30732 if sh.Op != OpAMD64SHLQconst { 30733 break 30734 } 30735 if sh.AuxInt != 16 { 30736 break 30737 } 30738 r0 := sh.Args[0] 30739 if r0.Op != OpAMD64ROLWconst { 30740 break 30741 } 30742 if r0.AuxInt != 8 { 30743 break 30744 } 30745 x0 := r0.Args[0] 30746 if x0.Op != OpAMD64MOVWloadidx1 { 30747 break 30748 } 30749 i0 := x0.AuxInt 30750 s := x0.Aux 30751 _ = x0.Args[2] 30752 idx := x0.Args[0] 30753 p := x0.Args[1] 30754 mem := x0.Args[2] 30755 r1 := v.Args[1] 30756 if r1.Op != OpAMD64ROLWconst { 30757 break 30758 } 30759 if r1.AuxInt != 8 { 30760 break 30761 } 30762 x1 := r1.Args[0] 30763 if x1.Op != OpAMD64MOVWloadidx1 { 30764 break 30765 } 30766 i1 := x1.AuxInt 30767 if x1.Aux != s { 30768 break 30769 } 30770 _ = x1.Args[2] 30771 if idx != x1.Args[0] { 30772 break 30773 } 30774 if p != x1.Args[1] { 30775 break 30776 } 30777 if mem != x1.Args[2] { 30778 break 30779 } 30780 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30781 break 30782 } 30783 b = mergePoint(b, x0, x1) 30784 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30785 v.reset(OpCopy) 30786 v.AddArg(v0) 30787 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30788 v1.AuxInt = i0 30789 v1.Aux = s 30790 v1.AddArg(p) 30791 v1.AddArg(idx) 30792 v1.AddArg(mem) 30793 v0.AddArg(v1) 30794 return true 30795 } 30796 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 30797 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30798 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 30799 for { 30800 _ = v.Args[1] 30801 r1 := v.Args[0] 30802 if r1.Op != OpAMD64BSWAPL { 30803 break 30804 } 30805 x1 := r1.Args[0] 30806 if x1.Op != OpAMD64MOVLloadidx1 { 30807 break 30808 } 30809 i1 := x1.AuxInt 30810 s := x1.Aux 30811 _ = x1.Args[2] 30812 p := x1.Args[0] 30813 idx := x1.Args[1] 30814 mem := x1.Args[2] 30815 sh := v.Args[1] 30816 if sh.Op != OpAMD64SHLQconst { 30817 break 30818 } 30819 if sh.AuxInt != 32 { 30820 break 30821 } 30822 r0 := sh.Args[0] 30823 if r0.Op != OpAMD64BSWAPL { 30824 break 30825 } 30826 x0 := r0.Args[0] 30827 if x0.Op != OpAMD64MOVLloadidx1 { 30828 break 30829 } 30830 i0 := x0.AuxInt 30831 if x0.Aux != s { 30832 break 30833 } 30834 _ = x0.Args[2] 30835 if p != x0.Args[0] { 30836 break 30837 } 30838 if idx != x0.Args[1] { 30839 break 30840 } 30841 if mem != x0.Args[2] { 30842 break 30843 } 30844 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30845 break 30846 } 30847 b = mergePoint(b, x0, x1) 30848 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 30849 v.reset(OpCopy) 30850 v.AddArg(v0) 30851 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 30852 v1.AuxInt = i0 30853 v1.Aux = s 30854 v1.AddArg(p) 30855 v1.AddArg(idx) 30856 v1.AddArg(mem) 30857 v0.AddArg(v1) 30858 return true 30859 } 30860 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 30861 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30862 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 30863 for { 30864 _ = v.Args[1] 30865 r1 := v.Args[0] 30866 if r1.Op != OpAMD64BSWAPL { 30867 break 30868 } 30869 x1 := r1.Args[0] 30870 if x1.Op != OpAMD64MOVLloadidx1 { 30871 break 30872 } 30873 i1 := x1.AuxInt 30874 s := x1.Aux 30875 _ = x1.Args[2] 30876 idx := x1.Args[0] 30877 p := x1.Args[1] 30878 mem := x1.Args[2] 30879 sh := v.Args[1] 30880 if sh.Op != OpAMD64SHLQconst { 30881 break 30882 } 30883 if sh.AuxInt != 32 { 30884 break 30885 } 30886 r0 := sh.Args[0] 30887 if r0.Op != OpAMD64BSWAPL { 30888 break 30889 } 30890 x0 := r0.Args[0] 30891 if x0.Op != OpAMD64MOVLloadidx1 { 30892 break 30893 } 30894 i0 := x0.AuxInt 30895 if x0.Aux != s { 30896 break 30897 } 30898 _ = x0.Args[2] 30899 if p != x0.Args[0] { 30900 break 30901 } 30902 if idx != x0.Args[1] { 30903 break 30904 } 30905 if mem != x0.Args[2] { 30906 break 30907 } 30908 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30909 break 30910 } 30911 b = mergePoint(b, x0, x1) 30912 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 30913 v.reset(OpCopy) 30914 v.AddArg(v0) 30915 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 30916 v1.AuxInt = i0 30917 v1.Aux = s 30918 v1.AddArg(p) 30919 v1.AddArg(idx) 30920 v1.AddArg(mem) 30921 v0.AddArg(v1) 30922 return true 30923 } 30924 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 30925 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30926 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 30927 for { 30928 _ = v.Args[1] 30929 r1 := v.Args[0] 30930 if r1.Op != OpAMD64BSWAPL { 30931 break 30932 } 30933 x1 := r1.Args[0] 30934 if x1.Op != OpAMD64MOVLloadidx1 { 30935 break 30936 } 30937 i1 := x1.AuxInt 30938 s := x1.Aux 30939 _ = x1.Args[2] 30940 p := x1.Args[0] 30941 idx := x1.Args[1] 30942 mem := x1.Args[2] 30943 sh := v.Args[1] 30944 if sh.Op != OpAMD64SHLQconst { 30945 break 30946 } 30947 if sh.AuxInt != 32 { 30948 break 30949 } 30950 r0 := sh.Args[0] 30951 if r0.Op != OpAMD64BSWAPL { 30952 break 30953 } 30954 x0 := r0.Args[0] 30955 if x0.Op != OpAMD64MOVLloadidx1 { 30956 break 30957 } 30958 i0 := x0.AuxInt 30959 if x0.Aux != s { 30960 break 30961 } 30962 _ = x0.Args[2] 30963 if idx != x0.Args[0] { 30964 break 30965 } 30966 if p != x0.Args[1] { 30967 break 30968 } 30969 if mem != x0.Args[2] { 30970 break 30971 } 30972 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30973 break 30974 } 30975 b = mergePoint(b, x0, x1) 30976 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 30977 v.reset(OpCopy) 30978 v.AddArg(v0) 30979 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 30980 v1.AuxInt = i0 30981 v1.Aux = s 30982 v1.AddArg(p) 30983 v1.AddArg(idx) 30984 v1.AddArg(mem) 30985 v0.AddArg(v1) 30986 return true 30987 } 30988 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 30989 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30990 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 30991 for { 30992 _ = v.Args[1] 30993 r1 := v.Args[0] 30994 if r1.Op != OpAMD64BSWAPL { 30995 break 30996 } 30997 x1 := r1.Args[0] 30998 if x1.Op != OpAMD64MOVLloadidx1 { 30999 break 31000 } 31001 i1 := x1.AuxInt 31002 s := x1.Aux 31003 _ = x1.Args[2] 31004 idx := x1.Args[0] 31005 p := x1.Args[1] 31006 mem := x1.Args[2] 31007 sh := v.Args[1] 31008 if sh.Op != OpAMD64SHLQconst { 31009 break 31010 } 31011 if sh.AuxInt != 32 { 31012 break 31013 } 31014 r0 := sh.Args[0] 31015 if r0.Op != OpAMD64BSWAPL { 31016 break 31017 } 31018 x0 := r0.Args[0] 31019 if x0.Op != OpAMD64MOVLloadidx1 { 31020 break 31021 } 31022 i0 := x0.AuxInt 31023 if x0.Aux != s { 31024 break 31025 } 31026 _ = x0.Args[2] 31027 if idx != x0.Args[0] { 31028 break 31029 } 31030 if p != x0.Args[1] { 31031 break 31032 } 31033 if mem != x0.Args[2] { 31034 break 31035 } 31036 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31037 break 31038 } 31039 b = mergePoint(b, x0, x1) 31040 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31041 v.reset(OpCopy) 31042 v.AddArg(v0) 31043 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31044 v1.AuxInt = i0 31045 v1.Aux = s 31046 v1.AddArg(p) 31047 v1.AddArg(idx) 31048 v1.AddArg(mem) 31049 v0.AddArg(v1) 31050 return true 31051 } 31052 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 31053 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31054 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31055 for { 31056 _ = v.Args[1] 31057 sh := v.Args[0] 31058 if sh.Op != OpAMD64SHLQconst { 31059 break 31060 } 31061 if sh.AuxInt != 32 { 31062 break 31063 } 31064 r0 := sh.Args[0] 31065 if r0.Op != OpAMD64BSWAPL { 31066 break 31067 } 31068 x0 := r0.Args[0] 31069 if x0.Op != OpAMD64MOVLloadidx1 { 31070 break 31071 } 31072 i0 := x0.AuxInt 31073 s := x0.Aux 31074 _ = x0.Args[2] 31075 p := x0.Args[0] 31076 idx := x0.Args[1] 31077 mem := x0.Args[2] 31078 r1 := v.Args[1] 31079 if r1.Op != OpAMD64BSWAPL { 31080 break 31081 } 31082 x1 := r1.Args[0] 31083 if x1.Op != OpAMD64MOVLloadidx1 { 31084 break 31085 } 31086 i1 := x1.AuxInt 31087 if x1.Aux != s { 31088 break 31089 } 31090 _ = x1.Args[2] 31091 if p != x1.Args[0] { 31092 break 31093 } 31094 if idx != x1.Args[1] { 31095 break 31096 } 31097 if mem != x1.Args[2] { 31098 break 31099 } 31100 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31101 break 31102 } 31103 b = mergePoint(b, x0, x1) 31104 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31105 v.reset(OpCopy) 31106 v.AddArg(v0) 31107 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31108 v1.AuxInt = i0 31109 v1.Aux = s 31110 v1.AddArg(p) 31111 v1.AddArg(idx) 31112 v1.AddArg(mem) 31113 v0.AddArg(v1) 31114 return true 31115 } 31116 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 31117 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31118 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31119 for { 31120 _ = v.Args[1] 31121 sh := v.Args[0] 31122 if sh.Op != OpAMD64SHLQconst { 31123 break 31124 } 31125 if sh.AuxInt != 32 { 31126 break 31127 } 31128 r0 := sh.Args[0] 31129 if r0.Op != OpAMD64BSWAPL { 31130 break 31131 } 31132 x0 := r0.Args[0] 31133 if x0.Op != OpAMD64MOVLloadidx1 { 31134 break 31135 } 31136 i0 := x0.AuxInt 31137 s := x0.Aux 31138 _ = x0.Args[2] 31139 idx := x0.Args[0] 31140 p := x0.Args[1] 31141 mem := x0.Args[2] 31142 r1 := v.Args[1] 31143 if r1.Op != OpAMD64BSWAPL { 31144 break 31145 } 31146 x1 := r1.Args[0] 31147 if x1.Op != OpAMD64MOVLloadidx1 { 31148 break 31149 } 31150 i1 := x1.AuxInt 31151 if x1.Aux != s { 31152 break 31153 } 31154 _ = x1.Args[2] 31155 if p != x1.Args[0] { 31156 break 31157 } 31158 if idx != x1.Args[1] { 31159 break 31160 } 31161 if mem != x1.Args[2] { 31162 break 31163 } 31164 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31165 break 31166 } 31167 b = mergePoint(b, x0, x1) 31168 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31169 v.reset(OpCopy) 31170 v.AddArg(v0) 31171 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31172 v1.AuxInt = i0 31173 v1.Aux = s 31174 v1.AddArg(p) 31175 v1.AddArg(idx) 31176 v1.AddArg(mem) 31177 v0.AddArg(v1) 31178 return true 31179 } 31180 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 31181 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31182 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31183 for { 31184 _ = v.Args[1] 31185 sh := v.Args[0] 31186 if sh.Op != OpAMD64SHLQconst { 31187 break 31188 } 31189 if sh.AuxInt != 32 { 31190 break 31191 } 31192 r0 := sh.Args[0] 31193 if r0.Op != OpAMD64BSWAPL { 31194 break 31195 } 31196 x0 := r0.Args[0] 31197 if x0.Op != OpAMD64MOVLloadidx1 { 31198 break 31199 } 31200 i0 := x0.AuxInt 31201 s := x0.Aux 31202 _ = x0.Args[2] 31203 p := x0.Args[0] 31204 idx := x0.Args[1] 31205 mem := x0.Args[2] 31206 r1 := v.Args[1] 31207 if r1.Op != OpAMD64BSWAPL { 31208 break 31209 } 31210 x1 := r1.Args[0] 31211 if x1.Op != OpAMD64MOVLloadidx1 { 31212 break 31213 } 31214 i1 := x1.AuxInt 31215 if x1.Aux != s { 31216 break 31217 } 31218 _ = x1.Args[2] 31219 if idx != x1.Args[0] { 31220 break 31221 } 31222 if p != x1.Args[1] { 31223 break 31224 } 31225 if mem != x1.Args[2] { 31226 break 31227 } 31228 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31229 break 31230 } 31231 b = mergePoint(b, x0, x1) 31232 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31233 v.reset(OpCopy) 31234 v.AddArg(v0) 31235 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31236 v1.AuxInt = i0 31237 v1.Aux = s 31238 v1.AddArg(p) 31239 v1.AddArg(idx) 31240 v1.AddArg(mem) 31241 v0.AddArg(v1) 31242 return true 31243 } 31244 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 31245 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31246 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31247 for { 31248 _ = v.Args[1] 31249 sh := v.Args[0] 31250 if sh.Op != OpAMD64SHLQconst { 31251 break 31252 } 31253 if sh.AuxInt != 32 { 31254 break 31255 } 31256 r0 := sh.Args[0] 31257 if r0.Op != OpAMD64BSWAPL { 31258 break 31259 } 31260 x0 := r0.Args[0] 31261 if x0.Op != OpAMD64MOVLloadidx1 { 31262 break 31263 } 31264 i0 := x0.AuxInt 31265 s := x0.Aux 31266 _ = x0.Args[2] 31267 idx := x0.Args[0] 31268 p := x0.Args[1] 31269 mem := x0.Args[2] 31270 r1 := v.Args[1] 31271 if r1.Op != OpAMD64BSWAPL { 31272 break 31273 } 31274 x1 := r1.Args[0] 31275 if x1.Op != OpAMD64MOVLloadidx1 { 31276 break 31277 } 31278 i1 := x1.AuxInt 31279 if x1.Aux != s { 31280 break 31281 } 31282 _ = x1.Args[2] 31283 if idx != x1.Args[0] { 31284 break 31285 } 31286 if p != x1.Args[1] { 31287 break 31288 } 31289 if mem != x1.Args[2] { 31290 break 31291 } 31292 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31293 break 31294 } 31295 b = mergePoint(b, x0, x1) 31296 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31297 v.reset(OpCopy) 31298 v.AddArg(v0) 31299 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31300 v1.AuxInt = i0 31301 v1.Aux = s 31302 v1.AddArg(p) 31303 v1.AddArg(idx) 31304 v1.AddArg(mem) 31305 v0.AddArg(v1) 31306 return true 31307 } 31308 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 31309 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31310 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31311 for { 31312 _ = v.Args[1] 31313 s0 := v.Args[0] 31314 if s0.Op != OpAMD64SHLQconst { 31315 break 31316 } 31317 j0 := s0.AuxInt 31318 x0 := s0.Args[0] 31319 if x0.Op != OpAMD64MOVBloadidx1 { 31320 break 31321 } 31322 i0 := x0.AuxInt 31323 s := x0.Aux 31324 _ = x0.Args[2] 31325 p := x0.Args[0] 31326 idx := x0.Args[1] 31327 mem := x0.Args[2] 31328 or := v.Args[1] 31329 if or.Op != OpAMD64ORQ { 31330 break 31331 } 31332 _ = or.Args[1] 31333 s1 := or.Args[0] 31334 if s1.Op != OpAMD64SHLQconst { 31335 break 31336 } 31337 j1 := s1.AuxInt 31338 x1 := s1.Args[0] 31339 if x1.Op != OpAMD64MOVBloadidx1 { 31340 break 31341 } 31342 i1 := x1.AuxInt 31343 if x1.Aux != s { 31344 break 31345 } 31346 _ = x1.Args[2] 31347 if p != x1.Args[0] { 31348 break 31349 } 31350 if idx != x1.Args[1] { 31351 break 31352 } 31353 if mem != x1.Args[2] { 31354 break 31355 } 31356 y := or.Args[1] 31357 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31358 break 31359 } 31360 b = mergePoint(b, x0, x1) 31361 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31362 v.reset(OpCopy) 31363 v.AddArg(v0) 31364 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31365 v1.AuxInt = j1 31366 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31367 v2.AuxInt = 8 31368 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31369 v3.AuxInt = i0 31370 v3.Aux = s 31371 v3.AddArg(p) 31372 v3.AddArg(idx) 31373 v3.AddArg(mem) 31374 v2.AddArg(v3) 31375 v1.AddArg(v2) 31376 v0.AddArg(v1) 31377 v0.AddArg(y) 31378 return true 31379 } 31380 return false 31381 } 31382 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 31383 b := v.Block 31384 _ = b 31385 typ := &b.Func.Config.Types 31386 _ = typ 31387 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 31388 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31389 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31390 for { 31391 _ = v.Args[1] 31392 s0 := v.Args[0] 31393 if s0.Op != OpAMD64SHLQconst { 31394 break 31395 } 31396 j0 := s0.AuxInt 31397 x0 := s0.Args[0] 31398 if x0.Op != OpAMD64MOVBloadidx1 { 31399 break 31400 } 31401 i0 := x0.AuxInt 31402 s := x0.Aux 31403 _ = x0.Args[2] 31404 idx := x0.Args[0] 31405 p := x0.Args[1] 31406 mem := x0.Args[2] 31407 or := v.Args[1] 31408 if or.Op != OpAMD64ORQ { 31409 break 31410 } 31411 _ = or.Args[1] 31412 s1 := or.Args[0] 31413 if s1.Op != OpAMD64SHLQconst { 31414 break 31415 } 31416 j1 := s1.AuxInt 31417 x1 := s1.Args[0] 31418 if x1.Op != OpAMD64MOVBloadidx1 { 31419 break 31420 } 31421 i1 := x1.AuxInt 31422 if x1.Aux != s { 31423 break 31424 } 31425 _ = x1.Args[2] 31426 if p != x1.Args[0] { 31427 break 31428 } 31429 if idx != x1.Args[1] { 31430 break 31431 } 31432 if mem != x1.Args[2] { 31433 break 31434 } 31435 y := or.Args[1] 31436 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31437 break 31438 } 31439 b = mergePoint(b, x0, x1) 31440 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31441 v.reset(OpCopy) 31442 v.AddArg(v0) 31443 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31444 v1.AuxInt = j1 31445 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31446 v2.AuxInt = 8 31447 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31448 v3.AuxInt = i0 31449 v3.Aux = s 31450 v3.AddArg(p) 31451 v3.AddArg(idx) 31452 v3.AddArg(mem) 31453 v2.AddArg(v3) 31454 v1.AddArg(v2) 31455 v0.AddArg(v1) 31456 v0.AddArg(y) 31457 return true 31458 } 31459 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 31460 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31461 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31462 for { 31463 _ = v.Args[1] 31464 s0 := v.Args[0] 31465 if s0.Op != OpAMD64SHLQconst { 31466 break 31467 } 31468 j0 := s0.AuxInt 31469 x0 := s0.Args[0] 31470 if x0.Op != OpAMD64MOVBloadidx1 { 31471 break 31472 } 31473 i0 := x0.AuxInt 31474 s := x0.Aux 31475 _ = x0.Args[2] 31476 p := x0.Args[0] 31477 idx := x0.Args[1] 31478 mem := x0.Args[2] 31479 or := v.Args[1] 31480 if or.Op != OpAMD64ORQ { 31481 break 31482 } 31483 _ = or.Args[1] 31484 s1 := or.Args[0] 31485 if s1.Op != OpAMD64SHLQconst { 31486 break 31487 } 31488 j1 := s1.AuxInt 31489 x1 := s1.Args[0] 31490 if x1.Op != OpAMD64MOVBloadidx1 { 31491 break 31492 } 31493 i1 := x1.AuxInt 31494 if x1.Aux != s { 31495 break 31496 } 31497 _ = x1.Args[2] 31498 if idx != x1.Args[0] { 31499 break 31500 } 31501 if p != x1.Args[1] { 31502 break 31503 } 31504 if mem != x1.Args[2] { 31505 break 31506 } 31507 y := or.Args[1] 31508 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31509 break 31510 } 31511 b = mergePoint(b, x0, x1) 31512 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31513 v.reset(OpCopy) 31514 v.AddArg(v0) 31515 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31516 v1.AuxInt = j1 31517 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31518 v2.AuxInt = 8 31519 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31520 v3.AuxInt = i0 31521 v3.Aux = s 31522 v3.AddArg(p) 31523 v3.AddArg(idx) 31524 v3.AddArg(mem) 31525 v2.AddArg(v3) 31526 v1.AddArg(v2) 31527 v0.AddArg(v1) 31528 v0.AddArg(y) 31529 return true 31530 } 31531 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 31532 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31533 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31534 for { 31535 _ = v.Args[1] 31536 s0 := v.Args[0] 31537 if s0.Op != OpAMD64SHLQconst { 31538 break 31539 } 31540 j0 := s0.AuxInt 31541 x0 := s0.Args[0] 31542 if x0.Op != OpAMD64MOVBloadidx1 { 31543 break 31544 } 31545 i0 := x0.AuxInt 31546 s := x0.Aux 31547 _ = x0.Args[2] 31548 idx := x0.Args[0] 31549 p := x0.Args[1] 31550 mem := x0.Args[2] 31551 or := v.Args[1] 31552 if or.Op != OpAMD64ORQ { 31553 break 31554 } 31555 _ = or.Args[1] 31556 s1 := or.Args[0] 31557 if s1.Op != OpAMD64SHLQconst { 31558 break 31559 } 31560 j1 := s1.AuxInt 31561 x1 := s1.Args[0] 31562 if x1.Op != OpAMD64MOVBloadidx1 { 31563 break 31564 } 31565 i1 := x1.AuxInt 31566 if x1.Aux != s { 31567 break 31568 } 31569 _ = x1.Args[2] 31570 if idx != x1.Args[0] { 31571 break 31572 } 31573 if p != x1.Args[1] { 31574 break 31575 } 31576 if mem != x1.Args[2] { 31577 break 31578 } 31579 y := or.Args[1] 31580 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31581 break 31582 } 31583 b = mergePoint(b, x0, x1) 31584 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31585 v.reset(OpCopy) 31586 v.AddArg(v0) 31587 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31588 v1.AuxInt = j1 31589 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31590 v2.AuxInt = 8 31591 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31592 v3.AuxInt = i0 31593 v3.Aux = s 31594 v3.AddArg(p) 31595 v3.AddArg(idx) 31596 v3.AddArg(mem) 31597 v2.AddArg(v3) 31598 v1.AddArg(v2) 31599 v0.AddArg(v1) 31600 v0.AddArg(y) 31601 return true 31602 } 31603 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 31604 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31605 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31606 for { 31607 _ = v.Args[1] 31608 s0 := v.Args[0] 31609 if s0.Op != OpAMD64SHLQconst { 31610 break 31611 } 31612 j0 := s0.AuxInt 31613 x0 := s0.Args[0] 31614 if x0.Op != OpAMD64MOVBloadidx1 { 31615 break 31616 } 31617 i0 := x0.AuxInt 31618 s := x0.Aux 31619 _ = x0.Args[2] 31620 p := x0.Args[0] 31621 idx := x0.Args[1] 31622 mem := x0.Args[2] 31623 or := v.Args[1] 31624 if or.Op != OpAMD64ORQ { 31625 break 31626 } 31627 _ = or.Args[1] 31628 y := or.Args[0] 31629 s1 := or.Args[1] 31630 if s1.Op != OpAMD64SHLQconst { 31631 break 31632 } 31633 j1 := s1.AuxInt 31634 x1 := s1.Args[0] 31635 if x1.Op != OpAMD64MOVBloadidx1 { 31636 break 31637 } 31638 i1 := x1.AuxInt 31639 if x1.Aux != s { 31640 break 31641 } 31642 _ = x1.Args[2] 31643 if p != x1.Args[0] { 31644 break 31645 } 31646 if idx != x1.Args[1] { 31647 break 31648 } 31649 if mem != x1.Args[2] { 31650 break 31651 } 31652 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31653 break 31654 } 31655 b = mergePoint(b, x0, x1) 31656 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31657 v.reset(OpCopy) 31658 v.AddArg(v0) 31659 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31660 v1.AuxInt = j1 31661 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31662 v2.AuxInt = 8 31663 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31664 v3.AuxInt = i0 31665 v3.Aux = s 31666 v3.AddArg(p) 31667 v3.AddArg(idx) 31668 v3.AddArg(mem) 31669 v2.AddArg(v3) 31670 v1.AddArg(v2) 31671 v0.AddArg(v1) 31672 v0.AddArg(y) 31673 return true 31674 } 31675 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 31676 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31677 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31678 for { 31679 _ = v.Args[1] 31680 s0 := v.Args[0] 31681 if s0.Op != OpAMD64SHLQconst { 31682 break 31683 } 31684 j0 := s0.AuxInt 31685 x0 := s0.Args[0] 31686 if x0.Op != OpAMD64MOVBloadidx1 { 31687 break 31688 } 31689 i0 := x0.AuxInt 31690 s := x0.Aux 31691 _ = x0.Args[2] 31692 idx := x0.Args[0] 31693 p := x0.Args[1] 31694 mem := x0.Args[2] 31695 or := v.Args[1] 31696 if or.Op != OpAMD64ORQ { 31697 break 31698 } 31699 _ = or.Args[1] 31700 y := or.Args[0] 31701 s1 := or.Args[1] 31702 if s1.Op != OpAMD64SHLQconst { 31703 break 31704 } 31705 j1 := s1.AuxInt 31706 x1 := s1.Args[0] 31707 if x1.Op != OpAMD64MOVBloadidx1 { 31708 break 31709 } 31710 i1 := x1.AuxInt 31711 if x1.Aux != s { 31712 break 31713 } 31714 _ = x1.Args[2] 31715 if p != x1.Args[0] { 31716 break 31717 } 31718 if idx != x1.Args[1] { 31719 break 31720 } 31721 if mem != x1.Args[2] { 31722 break 31723 } 31724 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31725 break 31726 } 31727 b = mergePoint(b, x0, x1) 31728 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31729 v.reset(OpCopy) 31730 v.AddArg(v0) 31731 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31732 v1.AuxInt = j1 31733 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31734 v2.AuxInt = 8 31735 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31736 v3.AuxInt = i0 31737 v3.Aux = s 31738 v3.AddArg(p) 31739 v3.AddArg(idx) 31740 v3.AddArg(mem) 31741 v2.AddArg(v3) 31742 v1.AddArg(v2) 31743 v0.AddArg(v1) 31744 v0.AddArg(y) 31745 return true 31746 } 31747 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 31748 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31749 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31750 for { 31751 _ = v.Args[1] 31752 s0 := v.Args[0] 31753 if s0.Op != OpAMD64SHLQconst { 31754 break 31755 } 31756 j0 := s0.AuxInt 31757 x0 := s0.Args[0] 31758 if x0.Op != OpAMD64MOVBloadidx1 { 31759 break 31760 } 31761 i0 := x0.AuxInt 31762 s := x0.Aux 31763 _ = x0.Args[2] 31764 p := x0.Args[0] 31765 idx := x0.Args[1] 31766 mem := x0.Args[2] 31767 or := v.Args[1] 31768 if or.Op != OpAMD64ORQ { 31769 break 31770 } 31771 _ = or.Args[1] 31772 y := or.Args[0] 31773 s1 := or.Args[1] 31774 if s1.Op != OpAMD64SHLQconst { 31775 break 31776 } 31777 j1 := s1.AuxInt 31778 x1 := s1.Args[0] 31779 if x1.Op != OpAMD64MOVBloadidx1 { 31780 break 31781 } 31782 i1 := x1.AuxInt 31783 if x1.Aux != s { 31784 break 31785 } 31786 _ = x1.Args[2] 31787 if idx != x1.Args[0] { 31788 break 31789 } 31790 if p != x1.Args[1] { 31791 break 31792 } 31793 if mem != x1.Args[2] { 31794 break 31795 } 31796 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31797 break 31798 } 31799 b = mergePoint(b, x0, x1) 31800 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31801 v.reset(OpCopy) 31802 v.AddArg(v0) 31803 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31804 v1.AuxInt = j1 31805 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31806 v2.AuxInt = 8 31807 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31808 v3.AuxInt = i0 31809 v3.Aux = s 31810 v3.AddArg(p) 31811 v3.AddArg(idx) 31812 v3.AddArg(mem) 31813 v2.AddArg(v3) 31814 v1.AddArg(v2) 31815 v0.AddArg(v1) 31816 v0.AddArg(y) 31817 return true 31818 } 31819 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 31820 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31821 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31822 for { 31823 _ = v.Args[1] 31824 s0 := v.Args[0] 31825 if s0.Op != OpAMD64SHLQconst { 31826 break 31827 } 31828 j0 := s0.AuxInt 31829 x0 := s0.Args[0] 31830 if x0.Op != OpAMD64MOVBloadidx1 { 31831 break 31832 } 31833 i0 := x0.AuxInt 31834 s := x0.Aux 31835 _ = x0.Args[2] 31836 idx := x0.Args[0] 31837 p := x0.Args[1] 31838 mem := x0.Args[2] 31839 or := v.Args[1] 31840 if or.Op != OpAMD64ORQ { 31841 break 31842 } 31843 _ = or.Args[1] 31844 y := or.Args[0] 31845 s1 := or.Args[1] 31846 if s1.Op != OpAMD64SHLQconst { 31847 break 31848 } 31849 j1 := s1.AuxInt 31850 x1 := s1.Args[0] 31851 if x1.Op != OpAMD64MOVBloadidx1 { 31852 break 31853 } 31854 i1 := x1.AuxInt 31855 if x1.Aux != s { 31856 break 31857 } 31858 _ = x1.Args[2] 31859 if idx != x1.Args[0] { 31860 break 31861 } 31862 if p != x1.Args[1] { 31863 break 31864 } 31865 if mem != x1.Args[2] { 31866 break 31867 } 31868 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31869 break 31870 } 31871 b = mergePoint(b, x0, x1) 31872 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31873 v.reset(OpCopy) 31874 v.AddArg(v0) 31875 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31876 v1.AuxInt = j1 31877 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31878 v2.AuxInt = 8 31879 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31880 v3.AuxInt = i0 31881 v3.Aux = s 31882 v3.AddArg(p) 31883 v3.AddArg(idx) 31884 v3.AddArg(mem) 31885 v2.AddArg(v3) 31886 v1.AddArg(v2) 31887 v0.AddArg(v1) 31888 v0.AddArg(y) 31889 return true 31890 } 31891 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 31892 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31893 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31894 for { 31895 _ = v.Args[1] 31896 or := v.Args[0] 31897 if or.Op != OpAMD64ORQ { 31898 break 31899 } 31900 _ = or.Args[1] 31901 s1 := or.Args[0] 31902 if s1.Op != OpAMD64SHLQconst { 31903 break 31904 } 31905 j1 := s1.AuxInt 31906 x1 := s1.Args[0] 31907 if x1.Op != OpAMD64MOVBloadidx1 { 31908 break 31909 } 31910 i1 := x1.AuxInt 31911 s := x1.Aux 31912 _ = x1.Args[2] 31913 p := x1.Args[0] 31914 idx := x1.Args[1] 31915 mem := x1.Args[2] 31916 y := or.Args[1] 31917 s0 := v.Args[1] 31918 if s0.Op != OpAMD64SHLQconst { 31919 break 31920 } 31921 j0 := s0.AuxInt 31922 x0 := s0.Args[0] 31923 if x0.Op != OpAMD64MOVBloadidx1 { 31924 break 31925 } 31926 i0 := x0.AuxInt 31927 if x0.Aux != s { 31928 break 31929 } 31930 _ = x0.Args[2] 31931 if p != x0.Args[0] { 31932 break 31933 } 31934 if idx != x0.Args[1] { 31935 break 31936 } 31937 if mem != x0.Args[2] { 31938 break 31939 } 31940 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31941 break 31942 } 31943 b = mergePoint(b, x0, x1) 31944 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31945 v.reset(OpCopy) 31946 v.AddArg(v0) 31947 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31948 v1.AuxInt = j1 31949 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31950 v2.AuxInt = 8 31951 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31952 v3.AuxInt = i0 31953 v3.Aux = s 31954 v3.AddArg(p) 31955 v3.AddArg(idx) 31956 v3.AddArg(mem) 31957 v2.AddArg(v3) 31958 v1.AddArg(v2) 31959 v0.AddArg(v1) 31960 v0.AddArg(y) 31961 return true 31962 } 31963 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 31964 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31965 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31966 for { 31967 _ = v.Args[1] 31968 or := v.Args[0] 31969 if or.Op != OpAMD64ORQ { 31970 break 31971 } 31972 _ = or.Args[1] 31973 s1 := or.Args[0] 31974 if s1.Op != OpAMD64SHLQconst { 31975 break 31976 } 31977 j1 := s1.AuxInt 31978 x1 := s1.Args[0] 31979 if x1.Op != OpAMD64MOVBloadidx1 { 31980 break 31981 } 31982 i1 := x1.AuxInt 31983 s := x1.Aux 31984 _ = x1.Args[2] 31985 idx := x1.Args[0] 31986 p := x1.Args[1] 31987 mem := x1.Args[2] 31988 y := or.Args[1] 31989 s0 := v.Args[1] 31990 if s0.Op != OpAMD64SHLQconst { 31991 break 31992 } 31993 j0 := s0.AuxInt 31994 x0 := s0.Args[0] 31995 if x0.Op != OpAMD64MOVBloadidx1 { 31996 break 31997 } 31998 i0 := x0.AuxInt 31999 if x0.Aux != s { 32000 break 32001 } 32002 _ = x0.Args[2] 32003 if p != x0.Args[0] { 32004 break 32005 } 32006 if idx != x0.Args[1] { 32007 break 32008 } 32009 if mem != x0.Args[2] { 32010 break 32011 } 32012 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32013 break 32014 } 32015 b = mergePoint(b, x0, x1) 32016 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32017 v.reset(OpCopy) 32018 v.AddArg(v0) 32019 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32020 v1.AuxInt = j1 32021 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32022 v2.AuxInt = 8 32023 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32024 v3.AuxInt = i0 32025 v3.Aux = s 32026 v3.AddArg(p) 32027 v3.AddArg(idx) 32028 v3.AddArg(mem) 32029 v2.AddArg(v3) 32030 v1.AddArg(v2) 32031 v0.AddArg(v1) 32032 v0.AddArg(y) 32033 return true 32034 } 32035 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32036 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32037 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32038 for { 32039 _ = v.Args[1] 32040 or := v.Args[0] 32041 if or.Op != OpAMD64ORQ { 32042 break 32043 } 32044 _ = or.Args[1] 32045 y := or.Args[0] 32046 s1 := or.Args[1] 32047 if s1.Op != OpAMD64SHLQconst { 32048 break 32049 } 32050 j1 := s1.AuxInt 32051 x1 := s1.Args[0] 32052 if x1.Op != OpAMD64MOVBloadidx1 { 32053 break 32054 } 32055 i1 := x1.AuxInt 32056 s := x1.Aux 32057 _ = x1.Args[2] 32058 p := x1.Args[0] 32059 idx := x1.Args[1] 32060 mem := x1.Args[2] 32061 s0 := v.Args[1] 32062 if s0.Op != OpAMD64SHLQconst { 32063 break 32064 } 32065 j0 := s0.AuxInt 32066 x0 := s0.Args[0] 32067 if x0.Op != OpAMD64MOVBloadidx1 { 32068 break 32069 } 32070 i0 := x0.AuxInt 32071 if x0.Aux != s { 32072 break 32073 } 32074 _ = x0.Args[2] 32075 if p != x0.Args[0] { 32076 break 32077 } 32078 if idx != x0.Args[1] { 32079 break 32080 } 32081 if mem != x0.Args[2] { 32082 break 32083 } 32084 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32085 break 32086 } 32087 b = mergePoint(b, x0, x1) 32088 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32089 v.reset(OpCopy) 32090 v.AddArg(v0) 32091 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32092 v1.AuxInt = j1 32093 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32094 v2.AuxInt = 8 32095 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32096 v3.AuxInt = i0 32097 v3.Aux = s 32098 v3.AddArg(p) 32099 v3.AddArg(idx) 32100 v3.AddArg(mem) 32101 v2.AddArg(v3) 32102 v1.AddArg(v2) 32103 v0.AddArg(v1) 32104 v0.AddArg(y) 32105 return true 32106 } 32107 return false 32108 } 32109 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 32110 b := v.Block 32111 _ = b 32112 typ := &b.Func.Config.Types 32113 _ = typ 32114 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32115 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32116 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32117 for { 32118 _ = v.Args[1] 32119 or := v.Args[0] 32120 if or.Op != OpAMD64ORQ { 32121 break 32122 } 32123 _ = or.Args[1] 32124 y := or.Args[0] 32125 s1 := or.Args[1] 32126 if s1.Op != OpAMD64SHLQconst { 32127 break 32128 } 32129 j1 := s1.AuxInt 32130 x1 := s1.Args[0] 32131 if x1.Op != OpAMD64MOVBloadidx1 { 32132 break 32133 } 32134 i1 := x1.AuxInt 32135 s := x1.Aux 32136 _ = x1.Args[2] 32137 idx := x1.Args[0] 32138 p := x1.Args[1] 32139 mem := x1.Args[2] 32140 s0 := v.Args[1] 32141 if s0.Op != OpAMD64SHLQconst { 32142 break 32143 } 32144 j0 := s0.AuxInt 32145 x0 := s0.Args[0] 32146 if x0.Op != OpAMD64MOVBloadidx1 { 32147 break 32148 } 32149 i0 := x0.AuxInt 32150 if x0.Aux != s { 32151 break 32152 } 32153 _ = x0.Args[2] 32154 if p != x0.Args[0] { 32155 break 32156 } 32157 if idx != x0.Args[1] { 32158 break 32159 } 32160 if mem != x0.Args[2] { 32161 break 32162 } 32163 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32164 break 32165 } 32166 b = mergePoint(b, x0, x1) 32167 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32168 v.reset(OpCopy) 32169 v.AddArg(v0) 32170 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32171 v1.AuxInt = j1 32172 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32173 v2.AuxInt = 8 32174 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32175 v3.AuxInt = i0 32176 v3.Aux = s 32177 v3.AddArg(p) 32178 v3.AddArg(idx) 32179 v3.AddArg(mem) 32180 v2.AddArg(v3) 32181 v1.AddArg(v2) 32182 v0.AddArg(v1) 32183 v0.AddArg(y) 32184 return true 32185 } 32186 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32187 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32188 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32189 for { 32190 _ = v.Args[1] 32191 or := v.Args[0] 32192 if or.Op != OpAMD64ORQ { 32193 break 32194 } 32195 _ = or.Args[1] 32196 s1 := or.Args[0] 32197 if s1.Op != OpAMD64SHLQconst { 32198 break 32199 } 32200 j1 := s1.AuxInt 32201 x1 := s1.Args[0] 32202 if x1.Op != OpAMD64MOVBloadidx1 { 32203 break 32204 } 32205 i1 := x1.AuxInt 32206 s := x1.Aux 32207 _ = x1.Args[2] 32208 p := x1.Args[0] 32209 idx := x1.Args[1] 32210 mem := x1.Args[2] 32211 y := or.Args[1] 32212 s0 := v.Args[1] 32213 if s0.Op != OpAMD64SHLQconst { 32214 break 32215 } 32216 j0 := s0.AuxInt 32217 x0 := s0.Args[0] 32218 if x0.Op != OpAMD64MOVBloadidx1 { 32219 break 32220 } 32221 i0 := x0.AuxInt 32222 if x0.Aux != s { 32223 break 32224 } 32225 _ = x0.Args[2] 32226 if idx != x0.Args[0] { 32227 break 32228 } 32229 if p != x0.Args[1] { 32230 break 32231 } 32232 if mem != x0.Args[2] { 32233 break 32234 } 32235 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32236 break 32237 } 32238 b = mergePoint(b, x0, x1) 32239 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32240 v.reset(OpCopy) 32241 v.AddArg(v0) 32242 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32243 v1.AuxInt = j1 32244 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32245 v2.AuxInt = 8 32246 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32247 v3.AuxInt = i0 32248 v3.Aux = s 32249 v3.AddArg(p) 32250 v3.AddArg(idx) 32251 v3.AddArg(mem) 32252 v2.AddArg(v3) 32253 v1.AddArg(v2) 32254 v0.AddArg(v1) 32255 v0.AddArg(y) 32256 return true 32257 } 32258 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32259 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32260 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32261 for { 32262 _ = v.Args[1] 32263 or := v.Args[0] 32264 if or.Op != OpAMD64ORQ { 32265 break 32266 } 32267 _ = or.Args[1] 32268 s1 := or.Args[0] 32269 if s1.Op != OpAMD64SHLQconst { 32270 break 32271 } 32272 j1 := s1.AuxInt 32273 x1 := s1.Args[0] 32274 if x1.Op != OpAMD64MOVBloadidx1 { 32275 break 32276 } 32277 i1 := x1.AuxInt 32278 s := x1.Aux 32279 _ = x1.Args[2] 32280 idx := x1.Args[0] 32281 p := x1.Args[1] 32282 mem := x1.Args[2] 32283 y := or.Args[1] 32284 s0 := v.Args[1] 32285 if s0.Op != OpAMD64SHLQconst { 32286 break 32287 } 32288 j0 := s0.AuxInt 32289 x0 := s0.Args[0] 32290 if x0.Op != OpAMD64MOVBloadidx1 { 32291 break 32292 } 32293 i0 := x0.AuxInt 32294 if x0.Aux != s { 32295 break 32296 } 32297 _ = x0.Args[2] 32298 if idx != x0.Args[0] { 32299 break 32300 } 32301 if p != x0.Args[1] { 32302 break 32303 } 32304 if mem != x0.Args[2] { 32305 break 32306 } 32307 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32308 break 32309 } 32310 b = mergePoint(b, x0, x1) 32311 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32312 v.reset(OpCopy) 32313 v.AddArg(v0) 32314 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32315 v1.AuxInt = j1 32316 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32317 v2.AuxInt = 8 32318 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32319 v3.AuxInt = i0 32320 v3.Aux = s 32321 v3.AddArg(p) 32322 v3.AddArg(idx) 32323 v3.AddArg(mem) 32324 v2.AddArg(v3) 32325 v1.AddArg(v2) 32326 v0.AddArg(v1) 32327 v0.AddArg(y) 32328 return true 32329 } 32330 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32331 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32332 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32333 for { 32334 _ = v.Args[1] 32335 or := v.Args[0] 32336 if or.Op != OpAMD64ORQ { 32337 break 32338 } 32339 _ = or.Args[1] 32340 y := or.Args[0] 32341 s1 := or.Args[1] 32342 if s1.Op != OpAMD64SHLQconst { 32343 break 32344 } 32345 j1 := s1.AuxInt 32346 x1 := s1.Args[0] 32347 if x1.Op != OpAMD64MOVBloadidx1 { 32348 break 32349 } 32350 i1 := x1.AuxInt 32351 s := x1.Aux 32352 _ = x1.Args[2] 32353 p := x1.Args[0] 32354 idx := x1.Args[1] 32355 mem := x1.Args[2] 32356 s0 := v.Args[1] 32357 if s0.Op != OpAMD64SHLQconst { 32358 break 32359 } 32360 j0 := s0.AuxInt 32361 x0 := s0.Args[0] 32362 if x0.Op != OpAMD64MOVBloadidx1 { 32363 break 32364 } 32365 i0 := x0.AuxInt 32366 if x0.Aux != s { 32367 break 32368 } 32369 _ = x0.Args[2] 32370 if idx != x0.Args[0] { 32371 break 32372 } 32373 if p != x0.Args[1] { 32374 break 32375 } 32376 if mem != x0.Args[2] { 32377 break 32378 } 32379 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32380 break 32381 } 32382 b = mergePoint(b, x0, x1) 32383 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32384 v.reset(OpCopy) 32385 v.AddArg(v0) 32386 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32387 v1.AuxInt = j1 32388 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32389 v2.AuxInt = 8 32390 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32391 v3.AuxInt = i0 32392 v3.Aux = s 32393 v3.AddArg(p) 32394 v3.AddArg(idx) 32395 v3.AddArg(mem) 32396 v2.AddArg(v3) 32397 v1.AddArg(v2) 32398 v0.AddArg(v1) 32399 v0.AddArg(y) 32400 return true 32401 } 32402 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32403 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32404 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32405 for { 32406 _ = v.Args[1] 32407 or := v.Args[0] 32408 if or.Op != OpAMD64ORQ { 32409 break 32410 } 32411 _ = or.Args[1] 32412 y := or.Args[0] 32413 s1 := or.Args[1] 32414 if s1.Op != OpAMD64SHLQconst { 32415 break 32416 } 32417 j1 := s1.AuxInt 32418 x1 := s1.Args[0] 32419 if x1.Op != OpAMD64MOVBloadidx1 { 32420 break 32421 } 32422 i1 := x1.AuxInt 32423 s := x1.Aux 32424 _ = x1.Args[2] 32425 idx := x1.Args[0] 32426 p := x1.Args[1] 32427 mem := x1.Args[2] 32428 s0 := v.Args[1] 32429 if s0.Op != OpAMD64SHLQconst { 32430 break 32431 } 32432 j0 := s0.AuxInt 32433 x0 := s0.Args[0] 32434 if x0.Op != OpAMD64MOVBloadidx1 { 32435 break 32436 } 32437 i0 := x0.AuxInt 32438 if x0.Aux != s { 32439 break 32440 } 32441 _ = x0.Args[2] 32442 if idx != x0.Args[0] { 32443 break 32444 } 32445 if p != x0.Args[1] { 32446 break 32447 } 32448 if mem != x0.Args[2] { 32449 break 32450 } 32451 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32452 break 32453 } 32454 b = mergePoint(b, x0, x1) 32455 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32456 v.reset(OpCopy) 32457 v.AddArg(v0) 32458 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32459 v1.AuxInt = j1 32460 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32461 v2.AuxInt = 8 32462 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32463 v3.AuxInt = i0 32464 v3.Aux = s 32465 v3.AddArg(p) 32466 v3.AddArg(idx) 32467 v3.AddArg(mem) 32468 v2.AddArg(v3) 32469 v1.AddArg(v2) 32470 v0.AddArg(v1) 32471 v0.AddArg(y) 32472 return true 32473 } 32474 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 32475 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32476 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32477 for { 32478 _ = v.Args[1] 32479 s0 := v.Args[0] 32480 if s0.Op != OpAMD64SHLQconst { 32481 break 32482 } 32483 j0 := s0.AuxInt 32484 r0 := s0.Args[0] 32485 if r0.Op != OpAMD64ROLWconst { 32486 break 32487 } 32488 if r0.AuxInt != 8 { 32489 break 32490 } 32491 x0 := r0.Args[0] 32492 if x0.Op != OpAMD64MOVWloadidx1 { 32493 break 32494 } 32495 i0 := x0.AuxInt 32496 s := x0.Aux 32497 _ = x0.Args[2] 32498 p := x0.Args[0] 32499 idx := x0.Args[1] 32500 mem := x0.Args[2] 32501 or := v.Args[1] 32502 if or.Op != OpAMD64ORQ { 32503 break 32504 } 32505 _ = or.Args[1] 32506 s1 := or.Args[0] 32507 if s1.Op != OpAMD64SHLQconst { 32508 break 32509 } 32510 j1 := s1.AuxInt 32511 r1 := s1.Args[0] 32512 if r1.Op != OpAMD64ROLWconst { 32513 break 32514 } 32515 if r1.AuxInt != 8 { 32516 break 32517 } 32518 x1 := r1.Args[0] 32519 if x1.Op != OpAMD64MOVWloadidx1 { 32520 break 32521 } 32522 i1 := x1.AuxInt 32523 if x1.Aux != s { 32524 break 32525 } 32526 _ = x1.Args[2] 32527 if p != x1.Args[0] { 32528 break 32529 } 32530 if idx != x1.Args[1] { 32531 break 32532 } 32533 if mem != x1.Args[2] { 32534 break 32535 } 32536 y := or.Args[1] 32537 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32538 break 32539 } 32540 b = mergePoint(b, x0, x1) 32541 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32542 v.reset(OpCopy) 32543 v.AddArg(v0) 32544 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32545 v1.AuxInt = j1 32546 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32547 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32548 v3.AuxInt = i0 32549 v3.Aux = s 32550 v3.AddArg(p) 32551 v3.AddArg(idx) 32552 v3.AddArg(mem) 32553 v2.AddArg(v3) 32554 v1.AddArg(v2) 32555 v0.AddArg(v1) 32556 v0.AddArg(y) 32557 return true 32558 } 32559 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 32560 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32561 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32562 for { 32563 _ = v.Args[1] 32564 s0 := v.Args[0] 32565 if s0.Op != OpAMD64SHLQconst { 32566 break 32567 } 32568 j0 := s0.AuxInt 32569 r0 := s0.Args[0] 32570 if r0.Op != OpAMD64ROLWconst { 32571 break 32572 } 32573 if r0.AuxInt != 8 { 32574 break 32575 } 32576 x0 := r0.Args[0] 32577 if x0.Op != OpAMD64MOVWloadidx1 { 32578 break 32579 } 32580 i0 := x0.AuxInt 32581 s := x0.Aux 32582 _ = x0.Args[2] 32583 idx := x0.Args[0] 32584 p := x0.Args[1] 32585 mem := x0.Args[2] 32586 or := v.Args[1] 32587 if or.Op != OpAMD64ORQ { 32588 break 32589 } 32590 _ = or.Args[1] 32591 s1 := or.Args[0] 32592 if s1.Op != OpAMD64SHLQconst { 32593 break 32594 } 32595 j1 := s1.AuxInt 32596 r1 := s1.Args[0] 32597 if r1.Op != OpAMD64ROLWconst { 32598 break 32599 } 32600 if r1.AuxInt != 8 { 32601 break 32602 } 32603 x1 := r1.Args[0] 32604 if x1.Op != OpAMD64MOVWloadidx1 { 32605 break 32606 } 32607 i1 := x1.AuxInt 32608 if x1.Aux != s { 32609 break 32610 } 32611 _ = x1.Args[2] 32612 if p != x1.Args[0] { 32613 break 32614 } 32615 if idx != x1.Args[1] { 32616 break 32617 } 32618 if mem != x1.Args[2] { 32619 break 32620 } 32621 y := or.Args[1] 32622 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32623 break 32624 } 32625 b = mergePoint(b, x0, x1) 32626 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32627 v.reset(OpCopy) 32628 v.AddArg(v0) 32629 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32630 v1.AuxInt = j1 32631 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32632 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32633 v3.AuxInt = i0 32634 v3.Aux = s 32635 v3.AddArg(p) 32636 v3.AddArg(idx) 32637 v3.AddArg(mem) 32638 v2.AddArg(v3) 32639 v1.AddArg(v2) 32640 v0.AddArg(v1) 32641 v0.AddArg(y) 32642 return true 32643 } 32644 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 32645 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32646 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32647 for { 32648 _ = v.Args[1] 32649 s0 := v.Args[0] 32650 if s0.Op != OpAMD64SHLQconst { 32651 break 32652 } 32653 j0 := s0.AuxInt 32654 r0 := s0.Args[0] 32655 if r0.Op != OpAMD64ROLWconst { 32656 break 32657 } 32658 if r0.AuxInt != 8 { 32659 break 32660 } 32661 x0 := r0.Args[0] 32662 if x0.Op != OpAMD64MOVWloadidx1 { 32663 break 32664 } 32665 i0 := x0.AuxInt 32666 s := x0.Aux 32667 _ = x0.Args[2] 32668 p := x0.Args[0] 32669 idx := x0.Args[1] 32670 mem := x0.Args[2] 32671 or := v.Args[1] 32672 if or.Op != OpAMD64ORQ { 32673 break 32674 } 32675 _ = or.Args[1] 32676 s1 := or.Args[0] 32677 if s1.Op != OpAMD64SHLQconst { 32678 break 32679 } 32680 j1 := s1.AuxInt 32681 r1 := s1.Args[0] 32682 if r1.Op != OpAMD64ROLWconst { 32683 break 32684 } 32685 if r1.AuxInt != 8 { 32686 break 32687 } 32688 x1 := r1.Args[0] 32689 if x1.Op != OpAMD64MOVWloadidx1 { 32690 break 32691 } 32692 i1 := x1.AuxInt 32693 if x1.Aux != s { 32694 break 32695 } 32696 _ = x1.Args[2] 32697 if idx != x1.Args[0] { 32698 break 32699 } 32700 if p != x1.Args[1] { 32701 break 32702 } 32703 if mem != x1.Args[2] { 32704 break 32705 } 32706 y := or.Args[1] 32707 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32708 break 32709 } 32710 b = mergePoint(b, x0, x1) 32711 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32712 v.reset(OpCopy) 32713 v.AddArg(v0) 32714 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32715 v1.AuxInt = j1 32716 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32717 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32718 v3.AuxInt = i0 32719 v3.Aux = s 32720 v3.AddArg(p) 32721 v3.AddArg(idx) 32722 v3.AddArg(mem) 32723 v2.AddArg(v3) 32724 v1.AddArg(v2) 32725 v0.AddArg(v1) 32726 v0.AddArg(y) 32727 return true 32728 } 32729 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 32730 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32731 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32732 for { 32733 _ = v.Args[1] 32734 s0 := v.Args[0] 32735 if s0.Op != OpAMD64SHLQconst { 32736 break 32737 } 32738 j0 := s0.AuxInt 32739 r0 := s0.Args[0] 32740 if r0.Op != OpAMD64ROLWconst { 32741 break 32742 } 32743 if r0.AuxInt != 8 { 32744 break 32745 } 32746 x0 := r0.Args[0] 32747 if x0.Op != OpAMD64MOVWloadidx1 { 32748 break 32749 } 32750 i0 := x0.AuxInt 32751 s := x0.Aux 32752 _ = x0.Args[2] 32753 idx := x0.Args[0] 32754 p := x0.Args[1] 32755 mem := x0.Args[2] 32756 or := v.Args[1] 32757 if or.Op != OpAMD64ORQ { 32758 break 32759 } 32760 _ = or.Args[1] 32761 s1 := or.Args[0] 32762 if s1.Op != OpAMD64SHLQconst { 32763 break 32764 } 32765 j1 := s1.AuxInt 32766 r1 := s1.Args[0] 32767 if r1.Op != OpAMD64ROLWconst { 32768 break 32769 } 32770 if r1.AuxInt != 8 { 32771 break 32772 } 32773 x1 := r1.Args[0] 32774 if x1.Op != OpAMD64MOVWloadidx1 { 32775 break 32776 } 32777 i1 := x1.AuxInt 32778 if x1.Aux != s { 32779 break 32780 } 32781 _ = x1.Args[2] 32782 if idx != x1.Args[0] { 32783 break 32784 } 32785 if p != x1.Args[1] { 32786 break 32787 } 32788 if mem != x1.Args[2] { 32789 break 32790 } 32791 y := or.Args[1] 32792 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32793 break 32794 } 32795 b = mergePoint(b, x0, x1) 32796 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32797 v.reset(OpCopy) 32798 v.AddArg(v0) 32799 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32800 v1.AuxInt = j1 32801 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32802 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32803 v3.AuxInt = i0 32804 v3.Aux = s 32805 v3.AddArg(p) 32806 v3.AddArg(idx) 32807 v3.AddArg(mem) 32808 v2.AddArg(v3) 32809 v1.AddArg(v2) 32810 v0.AddArg(v1) 32811 v0.AddArg(y) 32812 return true 32813 } 32814 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 32815 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32816 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32817 for { 32818 _ = v.Args[1] 32819 s0 := v.Args[0] 32820 if s0.Op != OpAMD64SHLQconst { 32821 break 32822 } 32823 j0 := s0.AuxInt 32824 r0 := s0.Args[0] 32825 if r0.Op != OpAMD64ROLWconst { 32826 break 32827 } 32828 if r0.AuxInt != 8 { 32829 break 32830 } 32831 x0 := r0.Args[0] 32832 if x0.Op != OpAMD64MOVWloadidx1 { 32833 break 32834 } 32835 i0 := x0.AuxInt 32836 s := x0.Aux 32837 _ = x0.Args[2] 32838 p := x0.Args[0] 32839 idx := x0.Args[1] 32840 mem := x0.Args[2] 32841 or := v.Args[1] 32842 if or.Op != OpAMD64ORQ { 32843 break 32844 } 32845 _ = or.Args[1] 32846 y := or.Args[0] 32847 s1 := or.Args[1] 32848 if s1.Op != OpAMD64SHLQconst { 32849 break 32850 } 32851 j1 := s1.AuxInt 32852 r1 := s1.Args[0] 32853 if r1.Op != OpAMD64ROLWconst { 32854 break 32855 } 32856 if r1.AuxInt != 8 { 32857 break 32858 } 32859 x1 := r1.Args[0] 32860 if x1.Op != OpAMD64MOVWloadidx1 { 32861 break 32862 } 32863 i1 := x1.AuxInt 32864 if x1.Aux != s { 32865 break 32866 } 32867 _ = x1.Args[2] 32868 if p != x1.Args[0] { 32869 break 32870 } 32871 if idx != x1.Args[1] { 32872 break 32873 } 32874 if mem != x1.Args[2] { 32875 break 32876 } 32877 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32878 break 32879 } 32880 b = mergePoint(b, x0, x1) 32881 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32882 v.reset(OpCopy) 32883 v.AddArg(v0) 32884 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32885 v1.AuxInt = j1 32886 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32887 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32888 v3.AuxInt = i0 32889 v3.Aux = s 32890 v3.AddArg(p) 32891 v3.AddArg(idx) 32892 v3.AddArg(mem) 32893 v2.AddArg(v3) 32894 v1.AddArg(v2) 32895 v0.AddArg(v1) 32896 v0.AddArg(y) 32897 return true 32898 } 32899 return false 32900 } 32901 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 32902 b := v.Block 32903 _ = b 32904 typ := &b.Func.Config.Types 32905 _ = typ 32906 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 32907 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32908 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32909 for { 32910 _ = v.Args[1] 32911 s0 := v.Args[0] 32912 if s0.Op != OpAMD64SHLQconst { 32913 break 32914 } 32915 j0 := s0.AuxInt 32916 r0 := s0.Args[0] 32917 if r0.Op != OpAMD64ROLWconst { 32918 break 32919 } 32920 if r0.AuxInt != 8 { 32921 break 32922 } 32923 x0 := r0.Args[0] 32924 if x0.Op != OpAMD64MOVWloadidx1 { 32925 break 32926 } 32927 i0 := x0.AuxInt 32928 s := x0.Aux 32929 _ = x0.Args[2] 32930 idx := x0.Args[0] 32931 p := x0.Args[1] 32932 mem := x0.Args[2] 32933 or := v.Args[1] 32934 if or.Op != OpAMD64ORQ { 32935 break 32936 } 32937 _ = or.Args[1] 32938 y := or.Args[0] 32939 s1 := or.Args[1] 32940 if s1.Op != OpAMD64SHLQconst { 32941 break 32942 } 32943 j1 := s1.AuxInt 32944 r1 := s1.Args[0] 32945 if r1.Op != OpAMD64ROLWconst { 32946 break 32947 } 32948 if r1.AuxInt != 8 { 32949 break 32950 } 32951 x1 := r1.Args[0] 32952 if x1.Op != OpAMD64MOVWloadidx1 { 32953 break 32954 } 32955 i1 := x1.AuxInt 32956 if x1.Aux != s { 32957 break 32958 } 32959 _ = x1.Args[2] 32960 if p != x1.Args[0] { 32961 break 32962 } 32963 if idx != x1.Args[1] { 32964 break 32965 } 32966 if mem != x1.Args[2] { 32967 break 32968 } 32969 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32970 break 32971 } 32972 b = mergePoint(b, x0, x1) 32973 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32974 v.reset(OpCopy) 32975 v.AddArg(v0) 32976 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32977 v1.AuxInt = j1 32978 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32979 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32980 v3.AuxInt = i0 32981 v3.Aux = s 32982 v3.AddArg(p) 32983 v3.AddArg(idx) 32984 v3.AddArg(mem) 32985 v2.AddArg(v3) 32986 v1.AddArg(v2) 32987 v0.AddArg(v1) 32988 v0.AddArg(y) 32989 return true 32990 } 32991 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 32992 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32993 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32994 for { 32995 _ = v.Args[1] 32996 s0 := v.Args[0] 32997 if s0.Op != OpAMD64SHLQconst { 32998 break 32999 } 33000 j0 := s0.AuxInt 33001 r0 := s0.Args[0] 33002 if r0.Op != OpAMD64ROLWconst { 33003 break 33004 } 33005 if r0.AuxInt != 8 { 33006 break 33007 } 33008 x0 := r0.Args[0] 33009 if x0.Op != OpAMD64MOVWloadidx1 { 33010 break 33011 } 33012 i0 := x0.AuxInt 33013 s := x0.Aux 33014 _ = x0.Args[2] 33015 p := x0.Args[0] 33016 idx := x0.Args[1] 33017 mem := x0.Args[2] 33018 or := v.Args[1] 33019 if or.Op != OpAMD64ORQ { 33020 break 33021 } 33022 _ = or.Args[1] 33023 y := or.Args[0] 33024 s1 := or.Args[1] 33025 if s1.Op != OpAMD64SHLQconst { 33026 break 33027 } 33028 j1 := s1.AuxInt 33029 r1 := s1.Args[0] 33030 if r1.Op != OpAMD64ROLWconst { 33031 break 33032 } 33033 if r1.AuxInt != 8 { 33034 break 33035 } 33036 x1 := r1.Args[0] 33037 if x1.Op != OpAMD64MOVWloadidx1 { 33038 break 33039 } 33040 i1 := x1.AuxInt 33041 if x1.Aux != s { 33042 break 33043 } 33044 _ = x1.Args[2] 33045 if idx != x1.Args[0] { 33046 break 33047 } 33048 if p != x1.Args[1] { 33049 break 33050 } 33051 if mem != x1.Args[2] { 33052 break 33053 } 33054 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33055 break 33056 } 33057 b = mergePoint(b, x0, x1) 33058 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33059 v.reset(OpCopy) 33060 v.AddArg(v0) 33061 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33062 v1.AuxInt = j1 33063 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33064 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33065 v3.AuxInt = i0 33066 v3.Aux = s 33067 v3.AddArg(p) 33068 v3.AddArg(idx) 33069 v3.AddArg(mem) 33070 v2.AddArg(v3) 33071 v1.AddArg(v2) 33072 v0.AddArg(v1) 33073 v0.AddArg(y) 33074 return true 33075 } 33076 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 33077 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33078 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33079 for { 33080 _ = v.Args[1] 33081 s0 := v.Args[0] 33082 if s0.Op != OpAMD64SHLQconst { 33083 break 33084 } 33085 j0 := s0.AuxInt 33086 r0 := s0.Args[0] 33087 if r0.Op != OpAMD64ROLWconst { 33088 break 33089 } 33090 if r0.AuxInt != 8 { 33091 break 33092 } 33093 x0 := r0.Args[0] 33094 if x0.Op != OpAMD64MOVWloadidx1 { 33095 break 33096 } 33097 i0 := x0.AuxInt 33098 s := x0.Aux 33099 _ = x0.Args[2] 33100 idx := x0.Args[0] 33101 p := x0.Args[1] 33102 mem := x0.Args[2] 33103 or := v.Args[1] 33104 if or.Op != OpAMD64ORQ { 33105 break 33106 } 33107 _ = or.Args[1] 33108 y := or.Args[0] 33109 s1 := or.Args[1] 33110 if s1.Op != OpAMD64SHLQconst { 33111 break 33112 } 33113 j1 := s1.AuxInt 33114 r1 := s1.Args[0] 33115 if r1.Op != OpAMD64ROLWconst { 33116 break 33117 } 33118 if r1.AuxInt != 8 { 33119 break 33120 } 33121 x1 := r1.Args[0] 33122 if x1.Op != OpAMD64MOVWloadidx1 { 33123 break 33124 } 33125 i1 := x1.AuxInt 33126 if x1.Aux != s { 33127 break 33128 } 33129 _ = x1.Args[2] 33130 if idx != x1.Args[0] { 33131 break 33132 } 33133 if p != x1.Args[1] { 33134 break 33135 } 33136 if mem != x1.Args[2] { 33137 break 33138 } 33139 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33140 break 33141 } 33142 b = mergePoint(b, x0, x1) 33143 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33144 v.reset(OpCopy) 33145 v.AddArg(v0) 33146 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33147 v1.AuxInt = j1 33148 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33149 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33150 v3.AuxInt = i0 33151 v3.Aux = s 33152 v3.AddArg(p) 33153 v3.AddArg(idx) 33154 v3.AddArg(mem) 33155 v2.AddArg(v3) 33156 v1.AddArg(v2) 33157 v0.AddArg(v1) 33158 v0.AddArg(y) 33159 return true 33160 } 33161 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33162 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33163 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33164 for { 33165 _ = v.Args[1] 33166 or := v.Args[0] 33167 if or.Op != OpAMD64ORQ { 33168 break 33169 } 33170 _ = or.Args[1] 33171 s1 := or.Args[0] 33172 if s1.Op != OpAMD64SHLQconst { 33173 break 33174 } 33175 j1 := s1.AuxInt 33176 r1 := s1.Args[0] 33177 if r1.Op != OpAMD64ROLWconst { 33178 break 33179 } 33180 if r1.AuxInt != 8 { 33181 break 33182 } 33183 x1 := r1.Args[0] 33184 if x1.Op != OpAMD64MOVWloadidx1 { 33185 break 33186 } 33187 i1 := x1.AuxInt 33188 s := x1.Aux 33189 _ = x1.Args[2] 33190 p := x1.Args[0] 33191 idx := x1.Args[1] 33192 mem := x1.Args[2] 33193 y := or.Args[1] 33194 s0 := v.Args[1] 33195 if s0.Op != OpAMD64SHLQconst { 33196 break 33197 } 33198 j0 := s0.AuxInt 33199 r0 := s0.Args[0] 33200 if r0.Op != OpAMD64ROLWconst { 33201 break 33202 } 33203 if r0.AuxInt != 8 { 33204 break 33205 } 33206 x0 := r0.Args[0] 33207 if x0.Op != OpAMD64MOVWloadidx1 { 33208 break 33209 } 33210 i0 := x0.AuxInt 33211 if x0.Aux != s { 33212 break 33213 } 33214 _ = x0.Args[2] 33215 if p != x0.Args[0] { 33216 break 33217 } 33218 if idx != x0.Args[1] { 33219 break 33220 } 33221 if mem != x0.Args[2] { 33222 break 33223 } 33224 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33225 break 33226 } 33227 b = mergePoint(b, x0, x1) 33228 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33229 v.reset(OpCopy) 33230 v.AddArg(v0) 33231 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33232 v1.AuxInt = j1 33233 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33234 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33235 v3.AuxInt = i0 33236 v3.Aux = s 33237 v3.AddArg(p) 33238 v3.AddArg(idx) 33239 v3.AddArg(mem) 33240 v2.AddArg(v3) 33241 v1.AddArg(v2) 33242 v0.AddArg(v1) 33243 v0.AddArg(y) 33244 return true 33245 } 33246 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33247 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33248 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33249 for { 33250 _ = v.Args[1] 33251 or := v.Args[0] 33252 if or.Op != OpAMD64ORQ { 33253 break 33254 } 33255 _ = or.Args[1] 33256 s1 := or.Args[0] 33257 if s1.Op != OpAMD64SHLQconst { 33258 break 33259 } 33260 j1 := s1.AuxInt 33261 r1 := s1.Args[0] 33262 if r1.Op != OpAMD64ROLWconst { 33263 break 33264 } 33265 if r1.AuxInt != 8 { 33266 break 33267 } 33268 x1 := r1.Args[0] 33269 if x1.Op != OpAMD64MOVWloadidx1 { 33270 break 33271 } 33272 i1 := x1.AuxInt 33273 s := x1.Aux 33274 _ = x1.Args[2] 33275 idx := x1.Args[0] 33276 p := x1.Args[1] 33277 mem := x1.Args[2] 33278 y := or.Args[1] 33279 s0 := v.Args[1] 33280 if s0.Op != OpAMD64SHLQconst { 33281 break 33282 } 33283 j0 := s0.AuxInt 33284 r0 := s0.Args[0] 33285 if r0.Op != OpAMD64ROLWconst { 33286 break 33287 } 33288 if r0.AuxInt != 8 { 33289 break 33290 } 33291 x0 := r0.Args[0] 33292 if x0.Op != OpAMD64MOVWloadidx1 { 33293 break 33294 } 33295 i0 := x0.AuxInt 33296 if x0.Aux != s { 33297 break 33298 } 33299 _ = x0.Args[2] 33300 if p != x0.Args[0] { 33301 break 33302 } 33303 if idx != x0.Args[1] { 33304 break 33305 } 33306 if mem != x0.Args[2] { 33307 break 33308 } 33309 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33310 break 33311 } 33312 b = mergePoint(b, x0, x1) 33313 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33314 v.reset(OpCopy) 33315 v.AddArg(v0) 33316 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33317 v1.AuxInt = j1 33318 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33319 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33320 v3.AuxInt = i0 33321 v3.Aux = s 33322 v3.AddArg(p) 33323 v3.AddArg(idx) 33324 v3.AddArg(mem) 33325 v2.AddArg(v3) 33326 v1.AddArg(v2) 33327 v0.AddArg(v1) 33328 v0.AddArg(y) 33329 return true 33330 } 33331 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33332 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33333 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33334 for { 33335 _ = v.Args[1] 33336 or := v.Args[0] 33337 if or.Op != OpAMD64ORQ { 33338 break 33339 } 33340 _ = or.Args[1] 33341 y := or.Args[0] 33342 s1 := or.Args[1] 33343 if s1.Op != OpAMD64SHLQconst { 33344 break 33345 } 33346 j1 := s1.AuxInt 33347 r1 := s1.Args[0] 33348 if r1.Op != OpAMD64ROLWconst { 33349 break 33350 } 33351 if r1.AuxInt != 8 { 33352 break 33353 } 33354 x1 := r1.Args[0] 33355 if x1.Op != OpAMD64MOVWloadidx1 { 33356 break 33357 } 33358 i1 := x1.AuxInt 33359 s := x1.Aux 33360 _ = x1.Args[2] 33361 p := x1.Args[0] 33362 idx := x1.Args[1] 33363 mem := x1.Args[2] 33364 s0 := v.Args[1] 33365 if s0.Op != OpAMD64SHLQconst { 33366 break 33367 } 33368 j0 := s0.AuxInt 33369 r0 := s0.Args[0] 33370 if r0.Op != OpAMD64ROLWconst { 33371 break 33372 } 33373 if r0.AuxInt != 8 { 33374 break 33375 } 33376 x0 := r0.Args[0] 33377 if x0.Op != OpAMD64MOVWloadidx1 { 33378 break 33379 } 33380 i0 := x0.AuxInt 33381 if x0.Aux != s { 33382 break 33383 } 33384 _ = x0.Args[2] 33385 if p != x0.Args[0] { 33386 break 33387 } 33388 if idx != x0.Args[1] { 33389 break 33390 } 33391 if mem != x0.Args[2] { 33392 break 33393 } 33394 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33395 break 33396 } 33397 b = mergePoint(b, x0, x1) 33398 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33399 v.reset(OpCopy) 33400 v.AddArg(v0) 33401 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33402 v1.AuxInt = j1 33403 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33404 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33405 v3.AuxInt = i0 33406 v3.Aux = s 33407 v3.AddArg(p) 33408 v3.AddArg(idx) 33409 v3.AddArg(mem) 33410 v2.AddArg(v3) 33411 v1.AddArg(v2) 33412 v0.AddArg(v1) 33413 v0.AddArg(y) 33414 return true 33415 } 33416 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33417 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33418 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33419 for { 33420 _ = v.Args[1] 33421 or := v.Args[0] 33422 if or.Op != OpAMD64ORQ { 33423 break 33424 } 33425 _ = or.Args[1] 33426 y := or.Args[0] 33427 s1 := or.Args[1] 33428 if s1.Op != OpAMD64SHLQconst { 33429 break 33430 } 33431 j1 := s1.AuxInt 33432 r1 := s1.Args[0] 33433 if r1.Op != OpAMD64ROLWconst { 33434 break 33435 } 33436 if r1.AuxInt != 8 { 33437 break 33438 } 33439 x1 := r1.Args[0] 33440 if x1.Op != OpAMD64MOVWloadidx1 { 33441 break 33442 } 33443 i1 := x1.AuxInt 33444 s := x1.Aux 33445 _ = x1.Args[2] 33446 idx := x1.Args[0] 33447 p := x1.Args[1] 33448 mem := x1.Args[2] 33449 s0 := v.Args[1] 33450 if s0.Op != OpAMD64SHLQconst { 33451 break 33452 } 33453 j0 := s0.AuxInt 33454 r0 := s0.Args[0] 33455 if r0.Op != OpAMD64ROLWconst { 33456 break 33457 } 33458 if r0.AuxInt != 8 { 33459 break 33460 } 33461 x0 := r0.Args[0] 33462 if x0.Op != OpAMD64MOVWloadidx1 { 33463 break 33464 } 33465 i0 := x0.AuxInt 33466 if x0.Aux != s { 33467 break 33468 } 33469 _ = x0.Args[2] 33470 if p != x0.Args[0] { 33471 break 33472 } 33473 if idx != x0.Args[1] { 33474 break 33475 } 33476 if mem != x0.Args[2] { 33477 break 33478 } 33479 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33480 break 33481 } 33482 b = mergePoint(b, x0, x1) 33483 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33484 v.reset(OpCopy) 33485 v.AddArg(v0) 33486 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33487 v1.AuxInt = j1 33488 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33489 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33490 v3.AuxInt = i0 33491 v3.Aux = s 33492 v3.AddArg(p) 33493 v3.AddArg(idx) 33494 v3.AddArg(mem) 33495 v2.AddArg(v3) 33496 v1.AddArg(v2) 33497 v0.AddArg(v1) 33498 v0.AddArg(y) 33499 return true 33500 } 33501 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33502 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33503 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33504 for { 33505 _ = v.Args[1] 33506 or := v.Args[0] 33507 if or.Op != OpAMD64ORQ { 33508 break 33509 } 33510 _ = or.Args[1] 33511 s1 := or.Args[0] 33512 if s1.Op != OpAMD64SHLQconst { 33513 break 33514 } 33515 j1 := s1.AuxInt 33516 r1 := s1.Args[0] 33517 if r1.Op != OpAMD64ROLWconst { 33518 break 33519 } 33520 if r1.AuxInt != 8 { 33521 break 33522 } 33523 x1 := r1.Args[0] 33524 if x1.Op != OpAMD64MOVWloadidx1 { 33525 break 33526 } 33527 i1 := x1.AuxInt 33528 s := x1.Aux 33529 _ = x1.Args[2] 33530 p := x1.Args[0] 33531 idx := x1.Args[1] 33532 mem := x1.Args[2] 33533 y := or.Args[1] 33534 s0 := v.Args[1] 33535 if s0.Op != OpAMD64SHLQconst { 33536 break 33537 } 33538 j0 := s0.AuxInt 33539 r0 := s0.Args[0] 33540 if r0.Op != OpAMD64ROLWconst { 33541 break 33542 } 33543 if r0.AuxInt != 8 { 33544 break 33545 } 33546 x0 := r0.Args[0] 33547 if x0.Op != OpAMD64MOVWloadidx1 { 33548 break 33549 } 33550 i0 := x0.AuxInt 33551 if x0.Aux != s { 33552 break 33553 } 33554 _ = x0.Args[2] 33555 if idx != x0.Args[0] { 33556 break 33557 } 33558 if p != x0.Args[1] { 33559 break 33560 } 33561 if mem != x0.Args[2] { 33562 break 33563 } 33564 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33565 break 33566 } 33567 b = mergePoint(b, x0, x1) 33568 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33569 v.reset(OpCopy) 33570 v.AddArg(v0) 33571 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33572 v1.AuxInt = j1 33573 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33574 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33575 v3.AuxInt = i0 33576 v3.Aux = s 33577 v3.AddArg(p) 33578 v3.AddArg(idx) 33579 v3.AddArg(mem) 33580 v2.AddArg(v3) 33581 v1.AddArg(v2) 33582 v0.AddArg(v1) 33583 v0.AddArg(y) 33584 return true 33585 } 33586 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33587 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33588 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33589 for { 33590 _ = v.Args[1] 33591 or := v.Args[0] 33592 if or.Op != OpAMD64ORQ { 33593 break 33594 } 33595 _ = or.Args[1] 33596 s1 := or.Args[0] 33597 if s1.Op != OpAMD64SHLQconst { 33598 break 33599 } 33600 j1 := s1.AuxInt 33601 r1 := s1.Args[0] 33602 if r1.Op != OpAMD64ROLWconst { 33603 break 33604 } 33605 if r1.AuxInt != 8 { 33606 break 33607 } 33608 x1 := r1.Args[0] 33609 if x1.Op != OpAMD64MOVWloadidx1 { 33610 break 33611 } 33612 i1 := x1.AuxInt 33613 s := x1.Aux 33614 _ = x1.Args[2] 33615 idx := x1.Args[0] 33616 p := x1.Args[1] 33617 mem := x1.Args[2] 33618 y := or.Args[1] 33619 s0 := v.Args[1] 33620 if s0.Op != OpAMD64SHLQconst { 33621 break 33622 } 33623 j0 := s0.AuxInt 33624 r0 := s0.Args[0] 33625 if r0.Op != OpAMD64ROLWconst { 33626 break 33627 } 33628 if r0.AuxInt != 8 { 33629 break 33630 } 33631 x0 := r0.Args[0] 33632 if x0.Op != OpAMD64MOVWloadidx1 { 33633 break 33634 } 33635 i0 := x0.AuxInt 33636 if x0.Aux != s { 33637 break 33638 } 33639 _ = x0.Args[2] 33640 if idx != x0.Args[0] { 33641 break 33642 } 33643 if p != x0.Args[1] { 33644 break 33645 } 33646 if mem != x0.Args[2] { 33647 break 33648 } 33649 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33650 break 33651 } 33652 b = mergePoint(b, x0, x1) 33653 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33654 v.reset(OpCopy) 33655 v.AddArg(v0) 33656 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33657 v1.AuxInt = j1 33658 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33659 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33660 v3.AuxInt = i0 33661 v3.Aux = s 33662 v3.AddArg(p) 33663 v3.AddArg(idx) 33664 v3.AddArg(mem) 33665 v2.AddArg(v3) 33666 v1.AddArg(v2) 33667 v0.AddArg(v1) 33668 v0.AddArg(y) 33669 return true 33670 } 33671 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33672 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33673 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33674 for { 33675 _ = v.Args[1] 33676 or := v.Args[0] 33677 if or.Op != OpAMD64ORQ { 33678 break 33679 } 33680 _ = or.Args[1] 33681 y := or.Args[0] 33682 s1 := or.Args[1] 33683 if s1.Op != OpAMD64SHLQconst { 33684 break 33685 } 33686 j1 := s1.AuxInt 33687 r1 := s1.Args[0] 33688 if r1.Op != OpAMD64ROLWconst { 33689 break 33690 } 33691 if r1.AuxInt != 8 { 33692 break 33693 } 33694 x1 := r1.Args[0] 33695 if x1.Op != OpAMD64MOVWloadidx1 { 33696 break 33697 } 33698 i1 := x1.AuxInt 33699 s := x1.Aux 33700 _ = x1.Args[2] 33701 p := x1.Args[0] 33702 idx := x1.Args[1] 33703 mem := x1.Args[2] 33704 s0 := v.Args[1] 33705 if s0.Op != OpAMD64SHLQconst { 33706 break 33707 } 33708 j0 := s0.AuxInt 33709 r0 := s0.Args[0] 33710 if r0.Op != OpAMD64ROLWconst { 33711 break 33712 } 33713 if r0.AuxInt != 8 { 33714 break 33715 } 33716 x0 := r0.Args[0] 33717 if x0.Op != OpAMD64MOVWloadidx1 { 33718 break 33719 } 33720 i0 := x0.AuxInt 33721 if x0.Aux != s { 33722 break 33723 } 33724 _ = x0.Args[2] 33725 if idx != x0.Args[0] { 33726 break 33727 } 33728 if p != x0.Args[1] { 33729 break 33730 } 33731 if mem != x0.Args[2] { 33732 break 33733 } 33734 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33735 break 33736 } 33737 b = mergePoint(b, x0, x1) 33738 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33739 v.reset(OpCopy) 33740 v.AddArg(v0) 33741 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33742 v1.AuxInt = j1 33743 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33744 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33745 v3.AuxInt = i0 33746 v3.Aux = s 33747 v3.AddArg(p) 33748 v3.AddArg(idx) 33749 v3.AddArg(mem) 33750 v2.AddArg(v3) 33751 v1.AddArg(v2) 33752 v0.AddArg(v1) 33753 v0.AddArg(y) 33754 return true 33755 } 33756 return false 33757 } 33758 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 33759 b := v.Block 33760 _ = b 33761 typ := &b.Func.Config.Types 33762 _ = typ 33763 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33764 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33765 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33766 for { 33767 _ = v.Args[1] 33768 or := v.Args[0] 33769 if or.Op != OpAMD64ORQ { 33770 break 33771 } 33772 _ = or.Args[1] 33773 y := or.Args[0] 33774 s1 := or.Args[1] 33775 if s1.Op != OpAMD64SHLQconst { 33776 break 33777 } 33778 j1 := s1.AuxInt 33779 r1 := s1.Args[0] 33780 if r1.Op != OpAMD64ROLWconst { 33781 break 33782 } 33783 if r1.AuxInt != 8 { 33784 break 33785 } 33786 x1 := r1.Args[0] 33787 if x1.Op != OpAMD64MOVWloadidx1 { 33788 break 33789 } 33790 i1 := x1.AuxInt 33791 s := x1.Aux 33792 _ = x1.Args[2] 33793 idx := x1.Args[0] 33794 p := x1.Args[1] 33795 mem := x1.Args[2] 33796 s0 := v.Args[1] 33797 if s0.Op != OpAMD64SHLQconst { 33798 break 33799 } 33800 j0 := s0.AuxInt 33801 r0 := s0.Args[0] 33802 if r0.Op != OpAMD64ROLWconst { 33803 break 33804 } 33805 if r0.AuxInt != 8 { 33806 break 33807 } 33808 x0 := r0.Args[0] 33809 if x0.Op != OpAMD64MOVWloadidx1 { 33810 break 33811 } 33812 i0 := x0.AuxInt 33813 if x0.Aux != s { 33814 break 33815 } 33816 _ = x0.Args[2] 33817 if idx != x0.Args[0] { 33818 break 33819 } 33820 if p != x0.Args[1] { 33821 break 33822 } 33823 if mem != x0.Args[2] { 33824 break 33825 } 33826 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33827 break 33828 } 33829 b = mergePoint(b, x0, x1) 33830 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33831 v.reset(OpCopy) 33832 v.AddArg(v0) 33833 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33834 v1.AuxInt = j1 33835 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33836 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33837 v3.AuxInt = i0 33838 v3.Aux = s 33839 v3.AddArg(p) 33840 v3.AddArg(idx) 33841 v3.AddArg(mem) 33842 v2.AddArg(v3) 33843 v1.AddArg(v2) 33844 v0.AddArg(v1) 33845 v0.AddArg(y) 33846 return true 33847 } 33848 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 33849 // cond: canMergeLoad(v, l, x) && clobber(l) 33850 // result: (ORQmem x [off] {sym} ptr mem) 33851 for { 33852 _ = v.Args[1] 33853 x := v.Args[0] 33854 l := v.Args[1] 33855 if l.Op != OpAMD64MOVQload { 33856 break 33857 } 33858 off := l.AuxInt 33859 sym := l.Aux 33860 _ = l.Args[1] 33861 ptr := l.Args[0] 33862 mem := l.Args[1] 33863 if !(canMergeLoad(v, l, x) && clobber(l)) { 33864 break 33865 } 33866 v.reset(OpAMD64ORQmem) 33867 v.AuxInt = off 33868 v.Aux = sym 33869 v.AddArg(x) 33870 v.AddArg(ptr) 33871 v.AddArg(mem) 33872 return true 33873 } 33874 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 33875 // cond: canMergeLoad(v, l, x) && clobber(l) 33876 // result: (ORQmem x [off] {sym} ptr mem) 33877 for { 33878 _ = v.Args[1] 33879 l := v.Args[0] 33880 if l.Op != OpAMD64MOVQload { 33881 break 33882 } 33883 off := l.AuxInt 33884 sym := l.Aux 33885 _ = l.Args[1] 33886 ptr := l.Args[0] 33887 mem := l.Args[1] 33888 x := v.Args[1] 33889 if !(canMergeLoad(v, l, x) && clobber(l)) { 33890 break 33891 } 33892 v.reset(OpAMD64ORQmem) 33893 v.AuxInt = off 33894 v.Aux = sym 33895 v.AddArg(x) 33896 v.AddArg(ptr) 33897 v.AddArg(mem) 33898 return true 33899 } 33900 return false 33901 } 33902 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 33903 // match: (ORQconst [0] x) 33904 // cond: 33905 // result: x 33906 for { 33907 if v.AuxInt != 0 { 33908 break 33909 } 33910 x := v.Args[0] 33911 v.reset(OpCopy) 33912 v.Type = x.Type 33913 v.AddArg(x) 33914 return true 33915 } 33916 // match: (ORQconst [-1] _) 33917 // cond: 33918 // result: (MOVQconst [-1]) 33919 for { 33920 if v.AuxInt != -1 { 33921 break 33922 } 33923 v.reset(OpAMD64MOVQconst) 33924 v.AuxInt = -1 33925 return true 33926 } 33927 // match: (ORQconst [c] (MOVQconst [d])) 33928 // cond: 33929 // result: (MOVQconst [c|d]) 33930 for { 33931 c := v.AuxInt 33932 v_0 := v.Args[0] 33933 if v_0.Op != OpAMD64MOVQconst { 33934 break 33935 } 33936 d := v_0.AuxInt 33937 v.reset(OpAMD64MOVQconst) 33938 v.AuxInt = c | d 33939 return true 33940 } 33941 return false 33942 } 33943 func rewriteValueAMD64_OpAMD64ORQmem_0(v *Value) bool { 33944 b := v.Block 33945 _ = b 33946 typ := &b.Func.Config.Types 33947 _ = typ 33948 // match: (ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 33949 // cond: 33950 // result: ( ORQ x (MOVQf2i y)) 33951 for { 33952 off := v.AuxInt 33953 sym := v.Aux 33954 _ = v.Args[2] 33955 x := v.Args[0] 33956 ptr := v.Args[1] 33957 v_2 := v.Args[2] 33958 if v_2.Op != OpAMD64MOVSDstore { 33959 break 33960 } 33961 if v_2.AuxInt != off { 33962 break 33963 } 33964 if v_2.Aux != sym { 33965 break 33966 } 33967 _ = v_2.Args[2] 33968 if ptr != v_2.Args[0] { 33969 break 33970 } 33971 y := v_2.Args[1] 33972 v.reset(OpAMD64ORQ) 33973 v.AddArg(x) 33974 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 33975 v0.AddArg(y) 33976 v.AddArg(v0) 33977 return true 33978 } 33979 return false 33980 } 33981 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 33982 // match: (ROLB x (NEGQ y)) 33983 // cond: 33984 // result: (RORB x y) 33985 for { 33986 _ = v.Args[1] 33987 x := v.Args[0] 33988 v_1 := v.Args[1] 33989 if v_1.Op != OpAMD64NEGQ { 33990 break 33991 } 33992 y := v_1.Args[0] 33993 v.reset(OpAMD64RORB) 33994 v.AddArg(x) 33995 v.AddArg(y) 33996 return true 33997 } 33998 // match: (ROLB x (NEGL y)) 33999 // cond: 34000 // result: (RORB x y) 34001 for { 34002 _ = v.Args[1] 34003 x := v.Args[0] 34004 v_1 := v.Args[1] 34005 if v_1.Op != OpAMD64NEGL { 34006 break 34007 } 34008 y := v_1.Args[0] 34009 v.reset(OpAMD64RORB) 34010 v.AddArg(x) 34011 v.AddArg(y) 34012 return true 34013 } 34014 // match: (ROLB x (MOVQconst [c])) 34015 // cond: 34016 // result: (ROLBconst [c&7 ] x) 34017 for { 34018 _ = v.Args[1] 34019 x := v.Args[0] 34020 v_1 := v.Args[1] 34021 if v_1.Op != OpAMD64MOVQconst { 34022 break 34023 } 34024 c := v_1.AuxInt 34025 v.reset(OpAMD64ROLBconst) 34026 v.AuxInt = c & 7 34027 v.AddArg(x) 34028 return true 34029 } 34030 // match: (ROLB x (MOVLconst [c])) 34031 // cond: 34032 // result: (ROLBconst [c&7 ] x) 34033 for { 34034 _ = v.Args[1] 34035 x := v.Args[0] 34036 v_1 := v.Args[1] 34037 if v_1.Op != OpAMD64MOVLconst { 34038 break 34039 } 34040 c := v_1.AuxInt 34041 v.reset(OpAMD64ROLBconst) 34042 v.AuxInt = c & 7 34043 v.AddArg(x) 34044 return true 34045 } 34046 return false 34047 } 34048 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 34049 // match: (ROLBconst [c] (ROLBconst [d] x)) 34050 // cond: 34051 // result: (ROLBconst [(c+d)& 7] x) 34052 for { 34053 c := v.AuxInt 34054 v_0 := v.Args[0] 34055 if v_0.Op != OpAMD64ROLBconst { 34056 break 34057 } 34058 d := v_0.AuxInt 34059 x := v_0.Args[0] 34060 v.reset(OpAMD64ROLBconst) 34061 v.AuxInt = (c + d) & 7 34062 v.AddArg(x) 34063 return true 34064 } 34065 // match: (ROLBconst x [0]) 34066 // cond: 34067 // result: x 34068 for { 34069 if v.AuxInt != 0 { 34070 break 34071 } 34072 x := v.Args[0] 34073 v.reset(OpCopy) 34074 v.Type = x.Type 34075 v.AddArg(x) 34076 return true 34077 } 34078 return false 34079 } 34080 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 34081 // match: (ROLL x (NEGQ y)) 34082 // cond: 34083 // result: (RORL x y) 34084 for { 34085 _ = v.Args[1] 34086 x := v.Args[0] 34087 v_1 := v.Args[1] 34088 if v_1.Op != OpAMD64NEGQ { 34089 break 34090 } 34091 y := v_1.Args[0] 34092 v.reset(OpAMD64RORL) 34093 v.AddArg(x) 34094 v.AddArg(y) 34095 return true 34096 } 34097 // match: (ROLL x (NEGL y)) 34098 // cond: 34099 // result: (RORL x y) 34100 for { 34101 _ = v.Args[1] 34102 x := v.Args[0] 34103 v_1 := v.Args[1] 34104 if v_1.Op != OpAMD64NEGL { 34105 break 34106 } 34107 y := v_1.Args[0] 34108 v.reset(OpAMD64RORL) 34109 v.AddArg(x) 34110 v.AddArg(y) 34111 return true 34112 } 34113 // match: (ROLL x (MOVQconst [c])) 34114 // cond: 34115 // result: (ROLLconst [c&31] x) 34116 for { 34117 _ = v.Args[1] 34118 x := v.Args[0] 34119 v_1 := v.Args[1] 34120 if v_1.Op != OpAMD64MOVQconst { 34121 break 34122 } 34123 c := v_1.AuxInt 34124 v.reset(OpAMD64ROLLconst) 34125 v.AuxInt = c & 31 34126 v.AddArg(x) 34127 return true 34128 } 34129 // match: (ROLL x (MOVLconst [c])) 34130 // cond: 34131 // result: (ROLLconst [c&31] x) 34132 for { 34133 _ = v.Args[1] 34134 x := v.Args[0] 34135 v_1 := v.Args[1] 34136 if v_1.Op != OpAMD64MOVLconst { 34137 break 34138 } 34139 c := v_1.AuxInt 34140 v.reset(OpAMD64ROLLconst) 34141 v.AuxInt = c & 31 34142 v.AddArg(x) 34143 return true 34144 } 34145 return false 34146 } 34147 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 34148 // match: (ROLLconst [c] (ROLLconst [d] x)) 34149 // cond: 34150 // result: (ROLLconst [(c+d)&31] x) 34151 for { 34152 c := v.AuxInt 34153 v_0 := v.Args[0] 34154 if v_0.Op != OpAMD64ROLLconst { 34155 break 34156 } 34157 d := v_0.AuxInt 34158 x := v_0.Args[0] 34159 v.reset(OpAMD64ROLLconst) 34160 v.AuxInt = (c + d) & 31 34161 v.AddArg(x) 34162 return true 34163 } 34164 // match: (ROLLconst x [0]) 34165 // cond: 34166 // result: x 34167 for { 34168 if v.AuxInt != 0 { 34169 break 34170 } 34171 x := v.Args[0] 34172 v.reset(OpCopy) 34173 v.Type = x.Type 34174 v.AddArg(x) 34175 return true 34176 } 34177 return false 34178 } 34179 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 34180 // match: (ROLQ x (NEGQ y)) 34181 // cond: 34182 // result: (RORQ x y) 34183 for { 34184 _ = v.Args[1] 34185 x := v.Args[0] 34186 v_1 := v.Args[1] 34187 if v_1.Op != OpAMD64NEGQ { 34188 break 34189 } 34190 y := v_1.Args[0] 34191 v.reset(OpAMD64RORQ) 34192 v.AddArg(x) 34193 v.AddArg(y) 34194 return true 34195 } 34196 // match: (ROLQ x (NEGL y)) 34197 // cond: 34198 // result: (RORQ x y) 34199 for { 34200 _ = v.Args[1] 34201 x := v.Args[0] 34202 v_1 := v.Args[1] 34203 if v_1.Op != OpAMD64NEGL { 34204 break 34205 } 34206 y := v_1.Args[0] 34207 v.reset(OpAMD64RORQ) 34208 v.AddArg(x) 34209 v.AddArg(y) 34210 return true 34211 } 34212 // match: (ROLQ x (MOVQconst [c])) 34213 // cond: 34214 // result: (ROLQconst [c&63] x) 34215 for { 34216 _ = v.Args[1] 34217 x := v.Args[0] 34218 v_1 := v.Args[1] 34219 if v_1.Op != OpAMD64MOVQconst { 34220 break 34221 } 34222 c := v_1.AuxInt 34223 v.reset(OpAMD64ROLQconst) 34224 v.AuxInt = c & 63 34225 v.AddArg(x) 34226 return true 34227 } 34228 // match: (ROLQ x (MOVLconst [c])) 34229 // cond: 34230 // result: (ROLQconst [c&63] x) 34231 for { 34232 _ = v.Args[1] 34233 x := v.Args[0] 34234 v_1 := v.Args[1] 34235 if v_1.Op != OpAMD64MOVLconst { 34236 break 34237 } 34238 c := v_1.AuxInt 34239 v.reset(OpAMD64ROLQconst) 34240 v.AuxInt = c & 63 34241 v.AddArg(x) 34242 return true 34243 } 34244 return false 34245 } 34246 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 34247 // match: (ROLQconst [c] (ROLQconst [d] x)) 34248 // cond: 34249 // result: (ROLQconst [(c+d)&63] x) 34250 for { 34251 c := v.AuxInt 34252 v_0 := v.Args[0] 34253 if v_0.Op != OpAMD64ROLQconst { 34254 break 34255 } 34256 d := v_0.AuxInt 34257 x := v_0.Args[0] 34258 v.reset(OpAMD64ROLQconst) 34259 v.AuxInt = (c + d) & 63 34260 v.AddArg(x) 34261 return true 34262 } 34263 // match: (ROLQconst x [0]) 34264 // cond: 34265 // result: x 34266 for { 34267 if v.AuxInt != 0 { 34268 break 34269 } 34270 x := v.Args[0] 34271 v.reset(OpCopy) 34272 v.Type = x.Type 34273 v.AddArg(x) 34274 return true 34275 } 34276 return false 34277 } 34278 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 34279 // match: (ROLW x (NEGQ y)) 34280 // cond: 34281 // result: (RORW x y) 34282 for { 34283 _ = v.Args[1] 34284 x := v.Args[0] 34285 v_1 := v.Args[1] 34286 if v_1.Op != OpAMD64NEGQ { 34287 break 34288 } 34289 y := v_1.Args[0] 34290 v.reset(OpAMD64RORW) 34291 v.AddArg(x) 34292 v.AddArg(y) 34293 return true 34294 } 34295 // match: (ROLW x (NEGL y)) 34296 // cond: 34297 // result: (RORW x y) 34298 for { 34299 _ = v.Args[1] 34300 x := v.Args[0] 34301 v_1 := v.Args[1] 34302 if v_1.Op != OpAMD64NEGL { 34303 break 34304 } 34305 y := v_1.Args[0] 34306 v.reset(OpAMD64RORW) 34307 v.AddArg(x) 34308 v.AddArg(y) 34309 return true 34310 } 34311 // match: (ROLW x (MOVQconst [c])) 34312 // cond: 34313 // result: (ROLWconst [c&15] x) 34314 for { 34315 _ = v.Args[1] 34316 x := v.Args[0] 34317 v_1 := v.Args[1] 34318 if v_1.Op != OpAMD64MOVQconst { 34319 break 34320 } 34321 c := v_1.AuxInt 34322 v.reset(OpAMD64ROLWconst) 34323 v.AuxInt = c & 15 34324 v.AddArg(x) 34325 return true 34326 } 34327 // match: (ROLW x (MOVLconst [c])) 34328 // cond: 34329 // result: (ROLWconst [c&15] x) 34330 for { 34331 _ = v.Args[1] 34332 x := v.Args[0] 34333 v_1 := v.Args[1] 34334 if v_1.Op != OpAMD64MOVLconst { 34335 break 34336 } 34337 c := v_1.AuxInt 34338 v.reset(OpAMD64ROLWconst) 34339 v.AuxInt = c & 15 34340 v.AddArg(x) 34341 return true 34342 } 34343 return false 34344 } 34345 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 34346 // match: (ROLWconst [c] (ROLWconst [d] x)) 34347 // cond: 34348 // result: (ROLWconst [(c+d)&15] x) 34349 for { 34350 c := v.AuxInt 34351 v_0 := v.Args[0] 34352 if v_0.Op != OpAMD64ROLWconst { 34353 break 34354 } 34355 d := v_0.AuxInt 34356 x := v_0.Args[0] 34357 v.reset(OpAMD64ROLWconst) 34358 v.AuxInt = (c + d) & 15 34359 v.AddArg(x) 34360 return true 34361 } 34362 // match: (ROLWconst x [0]) 34363 // cond: 34364 // result: x 34365 for { 34366 if v.AuxInt != 0 { 34367 break 34368 } 34369 x := v.Args[0] 34370 v.reset(OpCopy) 34371 v.Type = x.Type 34372 v.AddArg(x) 34373 return true 34374 } 34375 return false 34376 } 34377 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 34378 // match: (RORB x (NEGQ y)) 34379 // cond: 34380 // result: (ROLB x y) 34381 for { 34382 _ = v.Args[1] 34383 x := v.Args[0] 34384 v_1 := v.Args[1] 34385 if v_1.Op != OpAMD64NEGQ { 34386 break 34387 } 34388 y := v_1.Args[0] 34389 v.reset(OpAMD64ROLB) 34390 v.AddArg(x) 34391 v.AddArg(y) 34392 return true 34393 } 34394 // match: (RORB x (NEGL y)) 34395 // cond: 34396 // result: (ROLB x y) 34397 for { 34398 _ = v.Args[1] 34399 x := v.Args[0] 34400 v_1 := v.Args[1] 34401 if v_1.Op != OpAMD64NEGL { 34402 break 34403 } 34404 y := v_1.Args[0] 34405 v.reset(OpAMD64ROLB) 34406 v.AddArg(x) 34407 v.AddArg(y) 34408 return true 34409 } 34410 // match: (RORB x (MOVQconst [c])) 34411 // cond: 34412 // result: (ROLBconst [(-c)&7 ] x) 34413 for { 34414 _ = v.Args[1] 34415 x := v.Args[0] 34416 v_1 := v.Args[1] 34417 if v_1.Op != OpAMD64MOVQconst { 34418 break 34419 } 34420 c := v_1.AuxInt 34421 v.reset(OpAMD64ROLBconst) 34422 v.AuxInt = (-c) & 7 34423 v.AddArg(x) 34424 return true 34425 } 34426 // match: (RORB x (MOVLconst [c])) 34427 // cond: 34428 // result: (ROLBconst [(-c)&7 ] x) 34429 for { 34430 _ = v.Args[1] 34431 x := v.Args[0] 34432 v_1 := v.Args[1] 34433 if v_1.Op != OpAMD64MOVLconst { 34434 break 34435 } 34436 c := v_1.AuxInt 34437 v.reset(OpAMD64ROLBconst) 34438 v.AuxInt = (-c) & 7 34439 v.AddArg(x) 34440 return true 34441 } 34442 return false 34443 } 34444 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 34445 // match: (RORL x (NEGQ y)) 34446 // cond: 34447 // result: (ROLL x y) 34448 for { 34449 _ = v.Args[1] 34450 x := v.Args[0] 34451 v_1 := v.Args[1] 34452 if v_1.Op != OpAMD64NEGQ { 34453 break 34454 } 34455 y := v_1.Args[0] 34456 v.reset(OpAMD64ROLL) 34457 v.AddArg(x) 34458 v.AddArg(y) 34459 return true 34460 } 34461 // match: (RORL x (NEGL y)) 34462 // cond: 34463 // result: (ROLL x y) 34464 for { 34465 _ = v.Args[1] 34466 x := v.Args[0] 34467 v_1 := v.Args[1] 34468 if v_1.Op != OpAMD64NEGL { 34469 break 34470 } 34471 y := v_1.Args[0] 34472 v.reset(OpAMD64ROLL) 34473 v.AddArg(x) 34474 v.AddArg(y) 34475 return true 34476 } 34477 // match: (RORL x (MOVQconst [c])) 34478 // cond: 34479 // result: (ROLLconst [(-c)&31] x) 34480 for { 34481 _ = v.Args[1] 34482 x := v.Args[0] 34483 v_1 := v.Args[1] 34484 if v_1.Op != OpAMD64MOVQconst { 34485 break 34486 } 34487 c := v_1.AuxInt 34488 v.reset(OpAMD64ROLLconst) 34489 v.AuxInt = (-c) & 31 34490 v.AddArg(x) 34491 return true 34492 } 34493 // match: (RORL x (MOVLconst [c])) 34494 // cond: 34495 // result: (ROLLconst [(-c)&31] x) 34496 for { 34497 _ = v.Args[1] 34498 x := v.Args[0] 34499 v_1 := v.Args[1] 34500 if v_1.Op != OpAMD64MOVLconst { 34501 break 34502 } 34503 c := v_1.AuxInt 34504 v.reset(OpAMD64ROLLconst) 34505 v.AuxInt = (-c) & 31 34506 v.AddArg(x) 34507 return true 34508 } 34509 return false 34510 } 34511 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 34512 // match: (RORQ x (NEGQ y)) 34513 // cond: 34514 // result: (ROLQ x y) 34515 for { 34516 _ = v.Args[1] 34517 x := v.Args[0] 34518 v_1 := v.Args[1] 34519 if v_1.Op != OpAMD64NEGQ { 34520 break 34521 } 34522 y := v_1.Args[0] 34523 v.reset(OpAMD64ROLQ) 34524 v.AddArg(x) 34525 v.AddArg(y) 34526 return true 34527 } 34528 // match: (RORQ x (NEGL y)) 34529 // cond: 34530 // result: (ROLQ x y) 34531 for { 34532 _ = v.Args[1] 34533 x := v.Args[0] 34534 v_1 := v.Args[1] 34535 if v_1.Op != OpAMD64NEGL { 34536 break 34537 } 34538 y := v_1.Args[0] 34539 v.reset(OpAMD64ROLQ) 34540 v.AddArg(x) 34541 v.AddArg(y) 34542 return true 34543 } 34544 // match: (RORQ x (MOVQconst [c])) 34545 // cond: 34546 // result: (ROLQconst [(-c)&63] x) 34547 for { 34548 _ = v.Args[1] 34549 x := v.Args[0] 34550 v_1 := v.Args[1] 34551 if v_1.Op != OpAMD64MOVQconst { 34552 break 34553 } 34554 c := v_1.AuxInt 34555 v.reset(OpAMD64ROLQconst) 34556 v.AuxInt = (-c) & 63 34557 v.AddArg(x) 34558 return true 34559 } 34560 // match: (RORQ x (MOVLconst [c])) 34561 // cond: 34562 // result: (ROLQconst [(-c)&63] x) 34563 for { 34564 _ = v.Args[1] 34565 x := v.Args[0] 34566 v_1 := v.Args[1] 34567 if v_1.Op != OpAMD64MOVLconst { 34568 break 34569 } 34570 c := v_1.AuxInt 34571 v.reset(OpAMD64ROLQconst) 34572 v.AuxInt = (-c) & 63 34573 v.AddArg(x) 34574 return true 34575 } 34576 return false 34577 } 34578 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 34579 // match: (RORW x (NEGQ y)) 34580 // cond: 34581 // result: (ROLW x y) 34582 for { 34583 _ = v.Args[1] 34584 x := v.Args[0] 34585 v_1 := v.Args[1] 34586 if v_1.Op != OpAMD64NEGQ { 34587 break 34588 } 34589 y := v_1.Args[0] 34590 v.reset(OpAMD64ROLW) 34591 v.AddArg(x) 34592 v.AddArg(y) 34593 return true 34594 } 34595 // match: (RORW x (NEGL y)) 34596 // cond: 34597 // result: (ROLW x y) 34598 for { 34599 _ = v.Args[1] 34600 x := v.Args[0] 34601 v_1 := v.Args[1] 34602 if v_1.Op != OpAMD64NEGL { 34603 break 34604 } 34605 y := v_1.Args[0] 34606 v.reset(OpAMD64ROLW) 34607 v.AddArg(x) 34608 v.AddArg(y) 34609 return true 34610 } 34611 // match: (RORW x (MOVQconst [c])) 34612 // cond: 34613 // result: (ROLWconst [(-c)&15] x) 34614 for { 34615 _ = v.Args[1] 34616 x := v.Args[0] 34617 v_1 := v.Args[1] 34618 if v_1.Op != OpAMD64MOVQconst { 34619 break 34620 } 34621 c := v_1.AuxInt 34622 v.reset(OpAMD64ROLWconst) 34623 v.AuxInt = (-c) & 15 34624 v.AddArg(x) 34625 return true 34626 } 34627 // match: (RORW x (MOVLconst [c])) 34628 // cond: 34629 // result: (ROLWconst [(-c)&15] x) 34630 for { 34631 _ = v.Args[1] 34632 x := v.Args[0] 34633 v_1 := v.Args[1] 34634 if v_1.Op != OpAMD64MOVLconst { 34635 break 34636 } 34637 c := v_1.AuxInt 34638 v.reset(OpAMD64ROLWconst) 34639 v.AuxInt = (-c) & 15 34640 v.AddArg(x) 34641 return true 34642 } 34643 return false 34644 } 34645 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 34646 // match: (SARB x (MOVQconst [c])) 34647 // cond: 34648 // result: (SARBconst [min(c&31,7)] x) 34649 for { 34650 _ = v.Args[1] 34651 x := v.Args[0] 34652 v_1 := v.Args[1] 34653 if v_1.Op != OpAMD64MOVQconst { 34654 break 34655 } 34656 c := v_1.AuxInt 34657 v.reset(OpAMD64SARBconst) 34658 v.AuxInt = min(c&31, 7) 34659 v.AddArg(x) 34660 return true 34661 } 34662 // match: (SARB x (MOVLconst [c])) 34663 // cond: 34664 // result: (SARBconst [min(c&31,7)] x) 34665 for { 34666 _ = v.Args[1] 34667 x := v.Args[0] 34668 v_1 := v.Args[1] 34669 if v_1.Op != OpAMD64MOVLconst { 34670 break 34671 } 34672 c := v_1.AuxInt 34673 v.reset(OpAMD64SARBconst) 34674 v.AuxInt = min(c&31, 7) 34675 v.AddArg(x) 34676 return true 34677 } 34678 return false 34679 } 34680 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 34681 // match: (SARBconst x [0]) 34682 // cond: 34683 // result: x 34684 for { 34685 if v.AuxInt != 0 { 34686 break 34687 } 34688 x := v.Args[0] 34689 v.reset(OpCopy) 34690 v.Type = x.Type 34691 v.AddArg(x) 34692 return true 34693 } 34694 // match: (SARBconst [c] (MOVQconst [d])) 34695 // cond: 34696 // result: (MOVQconst [d>>uint64(c)]) 34697 for { 34698 c := v.AuxInt 34699 v_0 := v.Args[0] 34700 if v_0.Op != OpAMD64MOVQconst { 34701 break 34702 } 34703 d := v_0.AuxInt 34704 v.reset(OpAMD64MOVQconst) 34705 v.AuxInt = d >> uint64(c) 34706 return true 34707 } 34708 return false 34709 } 34710 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 34711 b := v.Block 34712 _ = b 34713 // match: (SARL x (MOVQconst [c])) 34714 // cond: 34715 // result: (SARLconst [c&31] x) 34716 for { 34717 _ = v.Args[1] 34718 x := v.Args[0] 34719 v_1 := v.Args[1] 34720 if v_1.Op != OpAMD64MOVQconst { 34721 break 34722 } 34723 c := v_1.AuxInt 34724 v.reset(OpAMD64SARLconst) 34725 v.AuxInt = c & 31 34726 v.AddArg(x) 34727 return true 34728 } 34729 // match: (SARL x (MOVLconst [c])) 34730 // cond: 34731 // result: (SARLconst [c&31] x) 34732 for { 34733 _ = v.Args[1] 34734 x := v.Args[0] 34735 v_1 := v.Args[1] 34736 if v_1.Op != OpAMD64MOVLconst { 34737 break 34738 } 34739 c := v_1.AuxInt 34740 v.reset(OpAMD64SARLconst) 34741 v.AuxInt = c & 31 34742 v.AddArg(x) 34743 return true 34744 } 34745 // match: (SARL x (ADDQconst [c] y)) 34746 // cond: c & 31 == 0 34747 // result: (SARL x y) 34748 for { 34749 _ = v.Args[1] 34750 x := v.Args[0] 34751 v_1 := v.Args[1] 34752 if v_1.Op != OpAMD64ADDQconst { 34753 break 34754 } 34755 c := v_1.AuxInt 34756 y := v_1.Args[0] 34757 if !(c&31 == 0) { 34758 break 34759 } 34760 v.reset(OpAMD64SARL) 34761 v.AddArg(x) 34762 v.AddArg(y) 34763 return true 34764 } 34765 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 34766 // cond: c & 31 == 0 34767 // result: (SARL x (NEGQ <t> y)) 34768 for { 34769 _ = v.Args[1] 34770 x := v.Args[0] 34771 v_1 := v.Args[1] 34772 if v_1.Op != OpAMD64NEGQ { 34773 break 34774 } 34775 t := v_1.Type 34776 v_1_0 := v_1.Args[0] 34777 if v_1_0.Op != OpAMD64ADDQconst { 34778 break 34779 } 34780 c := v_1_0.AuxInt 34781 y := v_1_0.Args[0] 34782 if !(c&31 == 0) { 34783 break 34784 } 34785 v.reset(OpAMD64SARL) 34786 v.AddArg(x) 34787 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 34788 v0.AddArg(y) 34789 v.AddArg(v0) 34790 return true 34791 } 34792 // match: (SARL x (ANDQconst [c] y)) 34793 // cond: c & 31 == 31 34794 // result: (SARL x y) 34795 for { 34796 _ = v.Args[1] 34797 x := v.Args[0] 34798 v_1 := v.Args[1] 34799 if v_1.Op != OpAMD64ANDQconst { 34800 break 34801 } 34802 c := v_1.AuxInt 34803 y := v_1.Args[0] 34804 if !(c&31 == 31) { 34805 break 34806 } 34807 v.reset(OpAMD64SARL) 34808 v.AddArg(x) 34809 v.AddArg(y) 34810 return true 34811 } 34812 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 34813 // cond: c & 31 == 31 34814 // result: (SARL x (NEGQ <t> y)) 34815 for { 34816 _ = v.Args[1] 34817 x := v.Args[0] 34818 v_1 := v.Args[1] 34819 if v_1.Op != OpAMD64NEGQ { 34820 break 34821 } 34822 t := v_1.Type 34823 v_1_0 := v_1.Args[0] 34824 if v_1_0.Op != OpAMD64ANDQconst { 34825 break 34826 } 34827 c := v_1_0.AuxInt 34828 y := v_1_0.Args[0] 34829 if !(c&31 == 31) { 34830 break 34831 } 34832 v.reset(OpAMD64SARL) 34833 v.AddArg(x) 34834 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 34835 v0.AddArg(y) 34836 v.AddArg(v0) 34837 return true 34838 } 34839 // match: (SARL x (ADDLconst [c] y)) 34840 // cond: c & 31 == 0 34841 // result: (SARL x y) 34842 for { 34843 _ = v.Args[1] 34844 x := v.Args[0] 34845 v_1 := v.Args[1] 34846 if v_1.Op != OpAMD64ADDLconst { 34847 break 34848 } 34849 c := v_1.AuxInt 34850 y := v_1.Args[0] 34851 if !(c&31 == 0) { 34852 break 34853 } 34854 v.reset(OpAMD64SARL) 34855 v.AddArg(x) 34856 v.AddArg(y) 34857 return true 34858 } 34859 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 34860 // cond: c & 31 == 0 34861 // result: (SARL x (NEGL <t> y)) 34862 for { 34863 _ = v.Args[1] 34864 x := v.Args[0] 34865 v_1 := v.Args[1] 34866 if v_1.Op != OpAMD64NEGL { 34867 break 34868 } 34869 t := v_1.Type 34870 v_1_0 := v_1.Args[0] 34871 if v_1_0.Op != OpAMD64ADDLconst { 34872 break 34873 } 34874 c := v_1_0.AuxInt 34875 y := v_1_0.Args[0] 34876 if !(c&31 == 0) { 34877 break 34878 } 34879 v.reset(OpAMD64SARL) 34880 v.AddArg(x) 34881 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 34882 v0.AddArg(y) 34883 v.AddArg(v0) 34884 return true 34885 } 34886 // match: (SARL x (ANDLconst [c] y)) 34887 // cond: c & 31 == 31 34888 // result: (SARL x y) 34889 for { 34890 _ = v.Args[1] 34891 x := v.Args[0] 34892 v_1 := v.Args[1] 34893 if v_1.Op != OpAMD64ANDLconst { 34894 break 34895 } 34896 c := v_1.AuxInt 34897 y := v_1.Args[0] 34898 if !(c&31 == 31) { 34899 break 34900 } 34901 v.reset(OpAMD64SARL) 34902 v.AddArg(x) 34903 v.AddArg(y) 34904 return true 34905 } 34906 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 34907 // cond: c & 31 == 31 34908 // result: (SARL x (NEGL <t> y)) 34909 for { 34910 _ = v.Args[1] 34911 x := v.Args[0] 34912 v_1 := v.Args[1] 34913 if v_1.Op != OpAMD64NEGL { 34914 break 34915 } 34916 t := v_1.Type 34917 v_1_0 := v_1.Args[0] 34918 if v_1_0.Op != OpAMD64ANDLconst { 34919 break 34920 } 34921 c := v_1_0.AuxInt 34922 y := v_1_0.Args[0] 34923 if !(c&31 == 31) { 34924 break 34925 } 34926 v.reset(OpAMD64SARL) 34927 v.AddArg(x) 34928 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 34929 v0.AddArg(y) 34930 v.AddArg(v0) 34931 return true 34932 } 34933 return false 34934 } 34935 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 34936 // match: (SARLconst x [0]) 34937 // cond: 34938 // result: x 34939 for { 34940 if v.AuxInt != 0 { 34941 break 34942 } 34943 x := v.Args[0] 34944 v.reset(OpCopy) 34945 v.Type = x.Type 34946 v.AddArg(x) 34947 return true 34948 } 34949 // match: (SARLconst [c] (MOVQconst [d])) 34950 // cond: 34951 // result: (MOVQconst [d>>uint64(c)]) 34952 for { 34953 c := v.AuxInt 34954 v_0 := v.Args[0] 34955 if v_0.Op != OpAMD64MOVQconst { 34956 break 34957 } 34958 d := v_0.AuxInt 34959 v.reset(OpAMD64MOVQconst) 34960 v.AuxInt = d >> uint64(c) 34961 return true 34962 } 34963 return false 34964 } 34965 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 34966 b := v.Block 34967 _ = b 34968 // match: (SARQ x (MOVQconst [c])) 34969 // cond: 34970 // result: (SARQconst [c&63] x) 34971 for { 34972 _ = v.Args[1] 34973 x := v.Args[0] 34974 v_1 := v.Args[1] 34975 if v_1.Op != OpAMD64MOVQconst { 34976 break 34977 } 34978 c := v_1.AuxInt 34979 v.reset(OpAMD64SARQconst) 34980 v.AuxInt = c & 63 34981 v.AddArg(x) 34982 return true 34983 } 34984 // match: (SARQ x (MOVLconst [c])) 34985 // cond: 34986 // result: (SARQconst [c&63] x) 34987 for { 34988 _ = v.Args[1] 34989 x := v.Args[0] 34990 v_1 := v.Args[1] 34991 if v_1.Op != OpAMD64MOVLconst { 34992 break 34993 } 34994 c := v_1.AuxInt 34995 v.reset(OpAMD64SARQconst) 34996 v.AuxInt = c & 63 34997 v.AddArg(x) 34998 return true 34999 } 35000 // match: (SARQ x (ADDQconst [c] y)) 35001 // cond: c & 63 == 0 35002 // result: (SARQ x y) 35003 for { 35004 _ = v.Args[1] 35005 x := v.Args[0] 35006 v_1 := v.Args[1] 35007 if v_1.Op != OpAMD64ADDQconst { 35008 break 35009 } 35010 c := v_1.AuxInt 35011 y := v_1.Args[0] 35012 if !(c&63 == 0) { 35013 break 35014 } 35015 v.reset(OpAMD64SARQ) 35016 v.AddArg(x) 35017 v.AddArg(y) 35018 return true 35019 } 35020 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 35021 // cond: c & 63 == 0 35022 // result: (SARQ x (NEGQ <t> y)) 35023 for { 35024 _ = v.Args[1] 35025 x := v.Args[0] 35026 v_1 := v.Args[1] 35027 if v_1.Op != OpAMD64NEGQ { 35028 break 35029 } 35030 t := v_1.Type 35031 v_1_0 := v_1.Args[0] 35032 if v_1_0.Op != OpAMD64ADDQconst { 35033 break 35034 } 35035 c := v_1_0.AuxInt 35036 y := v_1_0.Args[0] 35037 if !(c&63 == 0) { 35038 break 35039 } 35040 v.reset(OpAMD64SARQ) 35041 v.AddArg(x) 35042 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35043 v0.AddArg(y) 35044 v.AddArg(v0) 35045 return true 35046 } 35047 // match: (SARQ x (ANDQconst [c] y)) 35048 // cond: c & 63 == 63 35049 // result: (SARQ x y) 35050 for { 35051 _ = v.Args[1] 35052 x := v.Args[0] 35053 v_1 := v.Args[1] 35054 if v_1.Op != OpAMD64ANDQconst { 35055 break 35056 } 35057 c := v_1.AuxInt 35058 y := v_1.Args[0] 35059 if !(c&63 == 63) { 35060 break 35061 } 35062 v.reset(OpAMD64SARQ) 35063 v.AddArg(x) 35064 v.AddArg(y) 35065 return true 35066 } 35067 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 35068 // cond: c & 63 == 63 35069 // result: (SARQ x (NEGQ <t> y)) 35070 for { 35071 _ = v.Args[1] 35072 x := v.Args[0] 35073 v_1 := v.Args[1] 35074 if v_1.Op != OpAMD64NEGQ { 35075 break 35076 } 35077 t := v_1.Type 35078 v_1_0 := v_1.Args[0] 35079 if v_1_0.Op != OpAMD64ANDQconst { 35080 break 35081 } 35082 c := v_1_0.AuxInt 35083 y := v_1_0.Args[0] 35084 if !(c&63 == 63) { 35085 break 35086 } 35087 v.reset(OpAMD64SARQ) 35088 v.AddArg(x) 35089 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35090 v0.AddArg(y) 35091 v.AddArg(v0) 35092 return true 35093 } 35094 // match: (SARQ x (ADDLconst [c] y)) 35095 // cond: c & 63 == 0 35096 // result: (SARQ x y) 35097 for { 35098 _ = v.Args[1] 35099 x := v.Args[0] 35100 v_1 := v.Args[1] 35101 if v_1.Op != OpAMD64ADDLconst { 35102 break 35103 } 35104 c := v_1.AuxInt 35105 y := v_1.Args[0] 35106 if !(c&63 == 0) { 35107 break 35108 } 35109 v.reset(OpAMD64SARQ) 35110 v.AddArg(x) 35111 v.AddArg(y) 35112 return true 35113 } 35114 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 35115 // cond: c & 63 == 0 35116 // result: (SARQ x (NEGL <t> y)) 35117 for { 35118 _ = v.Args[1] 35119 x := v.Args[0] 35120 v_1 := v.Args[1] 35121 if v_1.Op != OpAMD64NEGL { 35122 break 35123 } 35124 t := v_1.Type 35125 v_1_0 := v_1.Args[0] 35126 if v_1_0.Op != OpAMD64ADDLconst { 35127 break 35128 } 35129 c := v_1_0.AuxInt 35130 y := v_1_0.Args[0] 35131 if !(c&63 == 0) { 35132 break 35133 } 35134 v.reset(OpAMD64SARQ) 35135 v.AddArg(x) 35136 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35137 v0.AddArg(y) 35138 v.AddArg(v0) 35139 return true 35140 } 35141 // match: (SARQ x (ANDLconst [c] y)) 35142 // cond: c & 63 == 63 35143 // result: (SARQ x y) 35144 for { 35145 _ = v.Args[1] 35146 x := v.Args[0] 35147 v_1 := v.Args[1] 35148 if v_1.Op != OpAMD64ANDLconst { 35149 break 35150 } 35151 c := v_1.AuxInt 35152 y := v_1.Args[0] 35153 if !(c&63 == 63) { 35154 break 35155 } 35156 v.reset(OpAMD64SARQ) 35157 v.AddArg(x) 35158 v.AddArg(y) 35159 return true 35160 } 35161 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 35162 // cond: c & 63 == 63 35163 // result: (SARQ x (NEGL <t> y)) 35164 for { 35165 _ = v.Args[1] 35166 x := v.Args[0] 35167 v_1 := v.Args[1] 35168 if v_1.Op != OpAMD64NEGL { 35169 break 35170 } 35171 t := v_1.Type 35172 v_1_0 := v_1.Args[0] 35173 if v_1_0.Op != OpAMD64ANDLconst { 35174 break 35175 } 35176 c := v_1_0.AuxInt 35177 y := v_1_0.Args[0] 35178 if !(c&63 == 63) { 35179 break 35180 } 35181 v.reset(OpAMD64SARQ) 35182 v.AddArg(x) 35183 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35184 v0.AddArg(y) 35185 v.AddArg(v0) 35186 return true 35187 } 35188 return false 35189 } 35190 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 35191 // match: (SARQconst x [0]) 35192 // cond: 35193 // result: x 35194 for { 35195 if v.AuxInt != 0 { 35196 break 35197 } 35198 x := v.Args[0] 35199 v.reset(OpCopy) 35200 v.Type = x.Type 35201 v.AddArg(x) 35202 return true 35203 } 35204 // match: (SARQconst [c] (MOVQconst [d])) 35205 // cond: 35206 // result: (MOVQconst [d>>uint64(c)]) 35207 for { 35208 c := v.AuxInt 35209 v_0 := v.Args[0] 35210 if v_0.Op != OpAMD64MOVQconst { 35211 break 35212 } 35213 d := v_0.AuxInt 35214 v.reset(OpAMD64MOVQconst) 35215 v.AuxInt = d >> uint64(c) 35216 return true 35217 } 35218 return false 35219 } 35220 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 35221 // match: (SARW x (MOVQconst [c])) 35222 // cond: 35223 // result: (SARWconst [min(c&31,15)] x) 35224 for { 35225 _ = v.Args[1] 35226 x := v.Args[0] 35227 v_1 := v.Args[1] 35228 if v_1.Op != OpAMD64MOVQconst { 35229 break 35230 } 35231 c := v_1.AuxInt 35232 v.reset(OpAMD64SARWconst) 35233 v.AuxInt = min(c&31, 15) 35234 v.AddArg(x) 35235 return true 35236 } 35237 // match: (SARW x (MOVLconst [c])) 35238 // cond: 35239 // result: (SARWconst [min(c&31,15)] x) 35240 for { 35241 _ = v.Args[1] 35242 x := v.Args[0] 35243 v_1 := v.Args[1] 35244 if v_1.Op != OpAMD64MOVLconst { 35245 break 35246 } 35247 c := v_1.AuxInt 35248 v.reset(OpAMD64SARWconst) 35249 v.AuxInt = min(c&31, 15) 35250 v.AddArg(x) 35251 return true 35252 } 35253 return false 35254 } 35255 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 35256 // match: (SARWconst x [0]) 35257 // cond: 35258 // result: x 35259 for { 35260 if v.AuxInt != 0 { 35261 break 35262 } 35263 x := v.Args[0] 35264 v.reset(OpCopy) 35265 v.Type = x.Type 35266 v.AddArg(x) 35267 return true 35268 } 35269 // match: (SARWconst [c] (MOVQconst [d])) 35270 // cond: 35271 // result: (MOVQconst [d>>uint64(c)]) 35272 for { 35273 c := v.AuxInt 35274 v_0 := v.Args[0] 35275 if v_0.Op != OpAMD64MOVQconst { 35276 break 35277 } 35278 d := v_0.AuxInt 35279 v.reset(OpAMD64MOVQconst) 35280 v.AuxInt = d >> uint64(c) 35281 return true 35282 } 35283 return false 35284 } 35285 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 35286 // match: (SBBLcarrymask (FlagEQ)) 35287 // cond: 35288 // result: (MOVLconst [0]) 35289 for { 35290 v_0 := v.Args[0] 35291 if v_0.Op != OpAMD64FlagEQ { 35292 break 35293 } 35294 v.reset(OpAMD64MOVLconst) 35295 v.AuxInt = 0 35296 return true 35297 } 35298 // match: (SBBLcarrymask (FlagLT_ULT)) 35299 // cond: 35300 // result: (MOVLconst [-1]) 35301 for { 35302 v_0 := v.Args[0] 35303 if v_0.Op != OpAMD64FlagLT_ULT { 35304 break 35305 } 35306 v.reset(OpAMD64MOVLconst) 35307 v.AuxInt = -1 35308 return true 35309 } 35310 // match: (SBBLcarrymask (FlagLT_UGT)) 35311 // cond: 35312 // result: (MOVLconst [0]) 35313 for { 35314 v_0 := v.Args[0] 35315 if v_0.Op != OpAMD64FlagLT_UGT { 35316 break 35317 } 35318 v.reset(OpAMD64MOVLconst) 35319 v.AuxInt = 0 35320 return true 35321 } 35322 // match: (SBBLcarrymask (FlagGT_ULT)) 35323 // cond: 35324 // result: (MOVLconst [-1]) 35325 for { 35326 v_0 := v.Args[0] 35327 if v_0.Op != OpAMD64FlagGT_ULT { 35328 break 35329 } 35330 v.reset(OpAMD64MOVLconst) 35331 v.AuxInt = -1 35332 return true 35333 } 35334 // match: (SBBLcarrymask (FlagGT_UGT)) 35335 // cond: 35336 // result: (MOVLconst [0]) 35337 for { 35338 v_0 := v.Args[0] 35339 if v_0.Op != OpAMD64FlagGT_UGT { 35340 break 35341 } 35342 v.reset(OpAMD64MOVLconst) 35343 v.AuxInt = 0 35344 return true 35345 } 35346 return false 35347 } 35348 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 35349 // match: (SBBQcarrymask (FlagEQ)) 35350 // cond: 35351 // result: (MOVQconst [0]) 35352 for { 35353 v_0 := v.Args[0] 35354 if v_0.Op != OpAMD64FlagEQ { 35355 break 35356 } 35357 v.reset(OpAMD64MOVQconst) 35358 v.AuxInt = 0 35359 return true 35360 } 35361 // match: (SBBQcarrymask (FlagLT_ULT)) 35362 // cond: 35363 // result: (MOVQconst [-1]) 35364 for { 35365 v_0 := v.Args[0] 35366 if v_0.Op != OpAMD64FlagLT_ULT { 35367 break 35368 } 35369 v.reset(OpAMD64MOVQconst) 35370 v.AuxInt = -1 35371 return true 35372 } 35373 // match: (SBBQcarrymask (FlagLT_UGT)) 35374 // cond: 35375 // result: (MOVQconst [0]) 35376 for { 35377 v_0 := v.Args[0] 35378 if v_0.Op != OpAMD64FlagLT_UGT { 35379 break 35380 } 35381 v.reset(OpAMD64MOVQconst) 35382 v.AuxInt = 0 35383 return true 35384 } 35385 // match: (SBBQcarrymask (FlagGT_ULT)) 35386 // cond: 35387 // result: (MOVQconst [-1]) 35388 for { 35389 v_0 := v.Args[0] 35390 if v_0.Op != OpAMD64FlagGT_ULT { 35391 break 35392 } 35393 v.reset(OpAMD64MOVQconst) 35394 v.AuxInt = -1 35395 return true 35396 } 35397 // match: (SBBQcarrymask (FlagGT_UGT)) 35398 // cond: 35399 // result: (MOVQconst [0]) 35400 for { 35401 v_0 := v.Args[0] 35402 if v_0.Op != OpAMD64FlagGT_UGT { 35403 break 35404 } 35405 v.reset(OpAMD64MOVQconst) 35406 v.AuxInt = 0 35407 return true 35408 } 35409 return false 35410 } 35411 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 35412 // match: (SETA (InvertFlags x)) 35413 // cond: 35414 // result: (SETB x) 35415 for { 35416 v_0 := v.Args[0] 35417 if v_0.Op != OpAMD64InvertFlags { 35418 break 35419 } 35420 x := v_0.Args[0] 35421 v.reset(OpAMD64SETB) 35422 v.AddArg(x) 35423 return true 35424 } 35425 // match: (SETA (FlagEQ)) 35426 // cond: 35427 // result: (MOVLconst [0]) 35428 for { 35429 v_0 := v.Args[0] 35430 if v_0.Op != OpAMD64FlagEQ { 35431 break 35432 } 35433 v.reset(OpAMD64MOVLconst) 35434 v.AuxInt = 0 35435 return true 35436 } 35437 // match: (SETA (FlagLT_ULT)) 35438 // cond: 35439 // result: (MOVLconst [0]) 35440 for { 35441 v_0 := v.Args[0] 35442 if v_0.Op != OpAMD64FlagLT_ULT { 35443 break 35444 } 35445 v.reset(OpAMD64MOVLconst) 35446 v.AuxInt = 0 35447 return true 35448 } 35449 // match: (SETA (FlagLT_UGT)) 35450 // cond: 35451 // result: (MOVLconst [1]) 35452 for { 35453 v_0 := v.Args[0] 35454 if v_0.Op != OpAMD64FlagLT_UGT { 35455 break 35456 } 35457 v.reset(OpAMD64MOVLconst) 35458 v.AuxInt = 1 35459 return true 35460 } 35461 // match: (SETA (FlagGT_ULT)) 35462 // cond: 35463 // result: (MOVLconst [0]) 35464 for { 35465 v_0 := v.Args[0] 35466 if v_0.Op != OpAMD64FlagGT_ULT { 35467 break 35468 } 35469 v.reset(OpAMD64MOVLconst) 35470 v.AuxInt = 0 35471 return true 35472 } 35473 // match: (SETA (FlagGT_UGT)) 35474 // cond: 35475 // result: (MOVLconst [1]) 35476 for { 35477 v_0 := v.Args[0] 35478 if v_0.Op != OpAMD64FlagGT_UGT { 35479 break 35480 } 35481 v.reset(OpAMD64MOVLconst) 35482 v.AuxInt = 1 35483 return true 35484 } 35485 return false 35486 } 35487 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 35488 // match: (SETAE (InvertFlags x)) 35489 // cond: 35490 // result: (SETBE x) 35491 for { 35492 v_0 := v.Args[0] 35493 if v_0.Op != OpAMD64InvertFlags { 35494 break 35495 } 35496 x := v_0.Args[0] 35497 v.reset(OpAMD64SETBE) 35498 v.AddArg(x) 35499 return true 35500 } 35501 // match: (SETAE (FlagEQ)) 35502 // cond: 35503 // result: (MOVLconst [1]) 35504 for { 35505 v_0 := v.Args[0] 35506 if v_0.Op != OpAMD64FlagEQ { 35507 break 35508 } 35509 v.reset(OpAMD64MOVLconst) 35510 v.AuxInt = 1 35511 return true 35512 } 35513 // match: (SETAE (FlagLT_ULT)) 35514 // cond: 35515 // result: (MOVLconst [0]) 35516 for { 35517 v_0 := v.Args[0] 35518 if v_0.Op != OpAMD64FlagLT_ULT { 35519 break 35520 } 35521 v.reset(OpAMD64MOVLconst) 35522 v.AuxInt = 0 35523 return true 35524 } 35525 // match: (SETAE (FlagLT_UGT)) 35526 // cond: 35527 // result: (MOVLconst [1]) 35528 for { 35529 v_0 := v.Args[0] 35530 if v_0.Op != OpAMD64FlagLT_UGT { 35531 break 35532 } 35533 v.reset(OpAMD64MOVLconst) 35534 v.AuxInt = 1 35535 return true 35536 } 35537 // match: (SETAE (FlagGT_ULT)) 35538 // cond: 35539 // result: (MOVLconst [0]) 35540 for { 35541 v_0 := v.Args[0] 35542 if v_0.Op != OpAMD64FlagGT_ULT { 35543 break 35544 } 35545 v.reset(OpAMD64MOVLconst) 35546 v.AuxInt = 0 35547 return true 35548 } 35549 // match: (SETAE (FlagGT_UGT)) 35550 // cond: 35551 // result: (MOVLconst [1]) 35552 for { 35553 v_0 := v.Args[0] 35554 if v_0.Op != OpAMD64FlagGT_UGT { 35555 break 35556 } 35557 v.reset(OpAMD64MOVLconst) 35558 v.AuxInt = 1 35559 return true 35560 } 35561 return false 35562 } 35563 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 35564 // match: (SETB (InvertFlags x)) 35565 // cond: 35566 // result: (SETA x) 35567 for { 35568 v_0 := v.Args[0] 35569 if v_0.Op != OpAMD64InvertFlags { 35570 break 35571 } 35572 x := v_0.Args[0] 35573 v.reset(OpAMD64SETA) 35574 v.AddArg(x) 35575 return true 35576 } 35577 // match: (SETB (FlagEQ)) 35578 // cond: 35579 // result: (MOVLconst [0]) 35580 for { 35581 v_0 := v.Args[0] 35582 if v_0.Op != OpAMD64FlagEQ { 35583 break 35584 } 35585 v.reset(OpAMD64MOVLconst) 35586 v.AuxInt = 0 35587 return true 35588 } 35589 // match: (SETB (FlagLT_ULT)) 35590 // cond: 35591 // result: (MOVLconst [1]) 35592 for { 35593 v_0 := v.Args[0] 35594 if v_0.Op != OpAMD64FlagLT_ULT { 35595 break 35596 } 35597 v.reset(OpAMD64MOVLconst) 35598 v.AuxInt = 1 35599 return true 35600 } 35601 // match: (SETB (FlagLT_UGT)) 35602 // cond: 35603 // result: (MOVLconst [0]) 35604 for { 35605 v_0 := v.Args[0] 35606 if v_0.Op != OpAMD64FlagLT_UGT { 35607 break 35608 } 35609 v.reset(OpAMD64MOVLconst) 35610 v.AuxInt = 0 35611 return true 35612 } 35613 // match: (SETB (FlagGT_ULT)) 35614 // cond: 35615 // result: (MOVLconst [1]) 35616 for { 35617 v_0 := v.Args[0] 35618 if v_0.Op != OpAMD64FlagGT_ULT { 35619 break 35620 } 35621 v.reset(OpAMD64MOVLconst) 35622 v.AuxInt = 1 35623 return true 35624 } 35625 // match: (SETB (FlagGT_UGT)) 35626 // cond: 35627 // result: (MOVLconst [0]) 35628 for { 35629 v_0 := v.Args[0] 35630 if v_0.Op != OpAMD64FlagGT_UGT { 35631 break 35632 } 35633 v.reset(OpAMD64MOVLconst) 35634 v.AuxInt = 0 35635 return true 35636 } 35637 return false 35638 } 35639 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 35640 // match: (SETBE (InvertFlags x)) 35641 // cond: 35642 // result: (SETAE x) 35643 for { 35644 v_0 := v.Args[0] 35645 if v_0.Op != OpAMD64InvertFlags { 35646 break 35647 } 35648 x := v_0.Args[0] 35649 v.reset(OpAMD64SETAE) 35650 v.AddArg(x) 35651 return true 35652 } 35653 // match: (SETBE (FlagEQ)) 35654 // cond: 35655 // result: (MOVLconst [1]) 35656 for { 35657 v_0 := v.Args[0] 35658 if v_0.Op != OpAMD64FlagEQ { 35659 break 35660 } 35661 v.reset(OpAMD64MOVLconst) 35662 v.AuxInt = 1 35663 return true 35664 } 35665 // match: (SETBE (FlagLT_ULT)) 35666 // cond: 35667 // result: (MOVLconst [1]) 35668 for { 35669 v_0 := v.Args[0] 35670 if v_0.Op != OpAMD64FlagLT_ULT { 35671 break 35672 } 35673 v.reset(OpAMD64MOVLconst) 35674 v.AuxInt = 1 35675 return true 35676 } 35677 // match: (SETBE (FlagLT_UGT)) 35678 // cond: 35679 // result: (MOVLconst [0]) 35680 for { 35681 v_0 := v.Args[0] 35682 if v_0.Op != OpAMD64FlagLT_UGT { 35683 break 35684 } 35685 v.reset(OpAMD64MOVLconst) 35686 v.AuxInt = 0 35687 return true 35688 } 35689 // match: (SETBE (FlagGT_ULT)) 35690 // cond: 35691 // result: (MOVLconst [1]) 35692 for { 35693 v_0 := v.Args[0] 35694 if v_0.Op != OpAMD64FlagGT_ULT { 35695 break 35696 } 35697 v.reset(OpAMD64MOVLconst) 35698 v.AuxInt = 1 35699 return true 35700 } 35701 // match: (SETBE (FlagGT_UGT)) 35702 // cond: 35703 // result: (MOVLconst [0]) 35704 for { 35705 v_0 := v.Args[0] 35706 if v_0.Op != OpAMD64FlagGT_UGT { 35707 break 35708 } 35709 v.reset(OpAMD64MOVLconst) 35710 v.AuxInt = 0 35711 return true 35712 } 35713 return false 35714 } 35715 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 35716 b := v.Block 35717 _ = b 35718 config := b.Func.Config 35719 _ = config 35720 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 35721 // cond: !config.nacl 35722 // result: (SETAE (BTL x y)) 35723 for { 35724 v_0 := v.Args[0] 35725 if v_0.Op != OpAMD64TESTL { 35726 break 35727 } 35728 _ = v_0.Args[1] 35729 v_0_0 := v_0.Args[0] 35730 if v_0_0.Op != OpAMD64SHLL { 35731 break 35732 } 35733 _ = v_0_0.Args[1] 35734 v_0_0_0 := v_0_0.Args[0] 35735 if v_0_0_0.Op != OpAMD64MOVLconst { 35736 break 35737 } 35738 if v_0_0_0.AuxInt != 1 { 35739 break 35740 } 35741 x := v_0_0.Args[1] 35742 y := v_0.Args[1] 35743 if !(!config.nacl) { 35744 break 35745 } 35746 v.reset(OpAMD64SETAE) 35747 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 35748 v0.AddArg(x) 35749 v0.AddArg(y) 35750 v.AddArg(v0) 35751 return true 35752 } 35753 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 35754 // cond: !config.nacl 35755 // result: (SETAE (BTL x y)) 35756 for { 35757 v_0 := v.Args[0] 35758 if v_0.Op != OpAMD64TESTL { 35759 break 35760 } 35761 _ = v_0.Args[1] 35762 y := v_0.Args[0] 35763 v_0_1 := v_0.Args[1] 35764 if v_0_1.Op != OpAMD64SHLL { 35765 break 35766 } 35767 _ = v_0_1.Args[1] 35768 v_0_1_0 := v_0_1.Args[0] 35769 if v_0_1_0.Op != OpAMD64MOVLconst { 35770 break 35771 } 35772 if v_0_1_0.AuxInt != 1 { 35773 break 35774 } 35775 x := v_0_1.Args[1] 35776 if !(!config.nacl) { 35777 break 35778 } 35779 v.reset(OpAMD64SETAE) 35780 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 35781 v0.AddArg(x) 35782 v0.AddArg(y) 35783 v.AddArg(v0) 35784 return true 35785 } 35786 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 35787 // cond: !config.nacl 35788 // result: (SETAE (BTQ x y)) 35789 for { 35790 v_0 := v.Args[0] 35791 if v_0.Op != OpAMD64TESTQ { 35792 break 35793 } 35794 _ = v_0.Args[1] 35795 v_0_0 := v_0.Args[0] 35796 if v_0_0.Op != OpAMD64SHLQ { 35797 break 35798 } 35799 _ = v_0_0.Args[1] 35800 v_0_0_0 := v_0_0.Args[0] 35801 if v_0_0_0.Op != OpAMD64MOVQconst { 35802 break 35803 } 35804 if v_0_0_0.AuxInt != 1 { 35805 break 35806 } 35807 x := v_0_0.Args[1] 35808 y := v_0.Args[1] 35809 if !(!config.nacl) { 35810 break 35811 } 35812 v.reset(OpAMD64SETAE) 35813 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 35814 v0.AddArg(x) 35815 v0.AddArg(y) 35816 v.AddArg(v0) 35817 return true 35818 } 35819 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 35820 // cond: !config.nacl 35821 // result: (SETAE (BTQ x y)) 35822 for { 35823 v_0 := v.Args[0] 35824 if v_0.Op != OpAMD64TESTQ { 35825 break 35826 } 35827 _ = v_0.Args[1] 35828 y := v_0.Args[0] 35829 v_0_1 := v_0.Args[1] 35830 if v_0_1.Op != OpAMD64SHLQ { 35831 break 35832 } 35833 _ = v_0_1.Args[1] 35834 v_0_1_0 := v_0_1.Args[0] 35835 if v_0_1_0.Op != OpAMD64MOVQconst { 35836 break 35837 } 35838 if v_0_1_0.AuxInt != 1 { 35839 break 35840 } 35841 x := v_0_1.Args[1] 35842 if !(!config.nacl) { 35843 break 35844 } 35845 v.reset(OpAMD64SETAE) 35846 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 35847 v0.AddArg(x) 35848 v0.AddArg(y) 35849 v.AddArg(v0) 35850 return true 35851 } 35852 // match: (SETEQ (TESTLconst [c] x)) 35853 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 35854 // result: (SETAE (BTLconst [log2(c)] x)) 35855 for { 35856 v_0 := v.Args[0] 35857 if v_0.Op != OpAMD64TESTLconst { 35858 break 35859 } 35860 c := v_0.AuxInt 35861 x := v_0.Args[0] 35862 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 35863 break 35864 } 35865 v.reset(OpAMD64SETAE) 35866 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 35867 v0.AuxInt = log2(c) 35868 v0.AddArg(x) 35869 v.AddArg(v0) 35870 return true 35871 } 35872 // match: (SETEQ (TESTQconst [c] x)) 35873 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 35874 // result: (SETAE (BTQconst [log2(c)] x)) 35875 for { 35876 v_0 := v.Args[0] 35877 if v_0.Op != OpAMD64TESTQconst { 35878 break 35879 } 35880 c := v_0.AuxInt 35881 x := v_0.Args[0] 35882 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 35883 break 35884 } 35885 v.reset(OpAMD64SETAE) 35886 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 35887 v0.AuxInt = log2(c) 35888 v0.AddArg(x) 35889 v.AddArg(v0) 35890 return true 35891 } 35892 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 35893 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 35894 // result: (SETAE (BTQconst [log2(c)] x)) 35895 for { 35896 v_0 := v.Args[0] 35897 if v_0.Op != OpAMD64TESTQ { 35898 break 35899 } 35900 _ = v_0.Args[1] 35901 v_0_0 := v_0.Args[0] 35902 if v_0_0.Op != OpAMD64MOVQconst { 35903 break 35904 } 35905 c := v_0_0.AuxInt 35906 x := v_0.Args[1] 35907 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 35908 break 35909 } 35910 v.reset(OpAMD64SETAE) 35911 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 35912 v0.AuxInt = log2(c) 35913 v0.AddArg(x) 35914 v.AddArg(v0) 35915 return true 35916 } 35917 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 35918 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 35919 // result: (SETAE (BTQconst [log2(c)] x)) 35920 for { 35921 v_0 := v.Args[0] 35922 if v_0.Op != OpAMD64TESTQ { 35923 break 35924 } 35925 _ = v_0.Args[1] 35926 x := v_0.Args[0] 35927 v_0_1 := v_0.Args[1] 35928 if v_0_1.Op != OpAMD64MOVQconst { 35929 break 35930 } 35931 c := v_0_1.AuxInt 35932 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 35933 break 35934 } 35935 v.reset(OpAMD64SETAE) 35936 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 35937 v0.AuxInt = log2(c) 35938 v0.AddArg(x) 35939 v.AddArg(v0) 35940 return true 35941 } 35942 // match: (SETEQ (InvertFlags x)) 35943 // cond: 35944 // result: (SETEQ x) 35945 for { 35946 v_0 := v.Args[0] 35947 if v_0.Op != OpAMD64InvertFlags { 35948 break 35949 } 35950 x := v_0.Args[0] 35951 v.reset(OpAMD64SETEQ) 35952 v.AddArg(x) 35953 return true 35954 } 35955 // match: (SETEQ (FlagEQ)) 35956 // cond: 35957 // result: (MOVLconst [1]) 35958 for { 35959 v_0 := v.Args[0] 35960 if v_0.Op != OpAMD64FlagEQ { 35961 break 35962 } 35963 v.reset(OpAMD64MOVLconst) 35964 v.AuxInt = 1 35965 return true 35966 } 35967 return false 35968 } 35969 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 35970 // match: (SETEQ (FlagLT_ULT)) 35971 // cond: 35972 // result: (MOVLconst [0]) 35973 for { 35974 v_0 := v.Args[0] 35975 if v_0.Op != OpAMD64FlagLT_ULT { 35976 break 35977 } 35978 v.reset(OpAMD64MOVLconst) 35979 v.AuxInt = 0 35980 return true 35981 } 35982 // match: (SETEQ (FlagLT_UGT)) 35983 // cond: 35984 // result: (MOVLconst [0]) 35985 for { 35986 v_0 := v.Args[0] 35987 if v_0.Op != OpAMD64FlagLT_UGT { 35988 break 35989 } 35990 v.reset(OpAMD64MOVLconst) 35991 v.AuxInt = 0 35992 return true 35993 } 35994 // match: (SETEQ (FlagGT_ULT)) 35995 // cond: 35996 // result: (MOVLconst [0]) 35997 for { 35998 v_0 := v.Args[0] 35999 if v_0.Op != OpAMD64FlagGT_ULT { 36000 break 36001 } 36002 v.reset(OpAMD64MOVLconst) 36003 v.AuxInt = 0 36004 return true 36005 } 36006 // match: (SETEQ (FlagGT_UGT)) 36007 // cond: 36008 // result: (MOVLconst [0]) 36009 for { 36010 v_0 := v.Args[0] 36011 if v_0.Op != OpAMD64FlagGT_UGT { 36012 break 36013 } 36014 v.reset(OpAMD64MOVLconst) 36015 v.AuxInt = 0 36016 return true 36017 } 36018 return false 36019 } 36020 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 36021 // match: (SETG (InvertFlags x)) 36022 // cond: 36023 // result: (SETL x) 36024 for { 36025 v_0 := v.Args[0] 36026 if v_0.Op != OpAMD64InvertFlags { 36027 break 36028 } 36029 x := v_0.Args[0] 36030 v.reset(OpAMD64SETL) 36031 v.AddArg(x) 36032 return true 36033 } 36034 // match: (SETG (FlagEQ)) 36035 // cond: 36036 // result: (MOVLconst [0]) 36037 for { 36038 v_0 := v.Args[0] 36039 if v_0.Op != OpAMD64FlagEQ { 36040 break 36041 } 36042 v.reset(OpAMD64MOVLconst) 36043 v.AuxInt = 0 36044 return true 36045 } 36046 // match: (SETG (FlagLT_ULT)) 36047 // cond: 36048 // result: (MOVLconst [0]) 36049 for { 36050 v_0 := v.Args[0] 36051 if v_0.Op != OpAMD64FlagLT_ULT { 36052 break 36053 } 36054 v.reset(OpAMD64MOVLconst) 36055 v.AuxInt = 0 36056 return true 36057 } 36058 // match: (SETG (FlagLT_UGT)) 36059 // cond: 36060 // result: (MOVLconst [0]) 36061 for { 36062 v_0 := v.Args[0] 36063 if v_0.Op != OpAMD64FlagLT_UGT { 36064 break 36065 } 36066 v.reset(OpAMD64MOVLconst) 36067 v.AuxInt = 0 36068 return true 36069 } 36070 // match: (SETG (FlagGT_ULT)) 36071 // cond: 36072 // result: (MOVLconst [1]) 36073 for { 36074 v_0 := v.Args[0] 36075 if v_0.Op != OpAMD64FlagGT_ULT { 36076 break 36077 } 36078 v.reset(OpAMD64MOVLconst) 36079 v.AuxInt = 1 36080 return true 36081 } 36082 // match: (SETG (FlagGT_UGT)) 36083 // cond: 36084 // result: (MOVLconst [1]) 36085 for { 36086 v_0 := v.Args[0] 36087 if v_0.Op != OpAMD64FlagGT_UGT { 36088 break 36089 } 36090 v.reset(OpAMD64MOVLconst) 36091 v.AuxInt = 1 36092 return true 36093 } 36094 return false 36095 } 36096 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 36097 // match: (SETGE (InvertFlags x)) 36098 // cond: 36099 // result: (SETLE x) 36100 for { 36101 v_0 := v.Args[0] 36102 if v_0.Op != OpAMD64InvertFlags { 36103 break 36104 } 36105 x := v_0.Args[0] 36106 v.reset(OpAMD64SETLE) 36107 v.AddArg(x) 36108 return true 36109 } 36110 // match: (SETGE (FlagEQ)) 36111 // cond: 36112 // result: (MOVLconst [1]) 36113 for { 36114 v_0 := v.Args[0] 36115 if v_0.Op != OpAMD64FlagEQ { 36116 break 36117 } 36118 v.reset(OpAMD64MOVLconst) 36119 v.AuxInt = 1 36120 return true 36121 } 36122 // match: (SETGE (FlagLT_ULT)) 36123 // cond: 36124 // result: (MOVLconst [0]) 36125 for { 36126 v_0 := v.Args[0] 36127 if v_0.Op != OpAMD64FlagLT_ULT { 36128 break 36129 } 36130 v.reset(OpAMD64MOVLconst) 36131 v.AuxInt = 0 36132 return true 36133 } 36134 // match: (SETGE (FlagLT_UGT)) 36135 // cond: 36136 // result: (MOVLconst [0]) 36137 for { 36138 v_0 := v.Args[0] 36139 if v_0.Op != OpAMD64FlagLT_UGT { 36140 break 36141 } 36142 v.reset(OpAMD64MOVLconst) 36143 v.AuxInt = 0 36144 return true 36145 } 36146 // match: (SETGE (FlagGT_ULT)) 36147 // cond: 36148 // result: (MOVLconst [1]) 36149 for { 36150 v_0 := v.Args[0] 36151 if v_0.Op != OpAMD64FlagGT_ULT { 36152 break 36153 } 36154 v.reset(OpAMD64MOVLconst) 36155 v.AuxInt = 1 36156 return true 36157 } 36158 // match: (SETGE (FlagGT_UGT)) 36159 // cond: 36160 // result: (MOVLconst [1]) 36161 for { 36162 v_0 := v.Args[0] 36163 if v_0.Op != OpAMD64FlagGT_UGT { 36164 break 36165 } 36166 v.reset(OpAMD64MOVLconst) 36167 v.AuxInt = 1 36168 return true 36169 } 36170 return false 36171 } 36172 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 36173 // match: (SETL (InvertFlags x)) 36174 // cond: 36175 // result: (SETG x) 36176 for { 36177 v_0 := v.Args[0] 36178 if v_0.Op != OpAMD64InvertFlags { 36179 break 36180 } 36181 x := v_0.Args[0] 36182 v.reset(OpAMD64SETG) 36183 v.AddArg(x) 36184 return true 36185 } 36186 // match: (SETL (FlagEQ)) 36187 // cond: 36188 // result: (MOVLconst [0]) 36189 for { 36190 v_0 := v.Args[0] 36191 if v_0.Op != OpAMD64FlagEQ { 36192 break 36193 } 36194 v.reset(OpAMD64MOVLconst) 36195 v.AuxInt = 0 36196 return true 36197 } 36198 // match: (SETL (FlagLT_ULT)) 36199 // cond: 36200 // result: (MOVLconst [1]) 36201 for { 36202 v_0 := v.Args[0] 36203 if v_0.Op != OpAMD64FlagLT_ULT { 36204 break 36205 } 36206 v.reset(OpAMD64MOVLconst) 36207 v.AuxInt = 1 36208 return true 36209 } 36210 // match: (SETL (FlagLT_UGT)) 36211 // cond: 36212 // result: (MOVLconst [1]) 36213 for { 36214 v_0 := v.Args[0] 36215 if v_0.Op != OpAMD64FlagLT_UGT { 36216 break 36217 } 36218 v.reset(OpAMD64MOVLconst) 36219 v.AuxInt = 1 36220 return true 36221 } 36222 // match: (SETL (FlagGT_ULT)) 36223 // cond: 36224 // result: (MOVLconst [0]) 36225 for { 36226 v_0 := v.Args[0] 36227 if v_0.Op != OpAMD64FlagGT_ULT { 36228 break 36229 } 36230 v.reset(OpAMD64MOVLconst) 36231 v.AuxInt = 0 36232 return true 36233 } 36234 // match: (SETL (FlagGT_UGT)) 36235 // cond: 36236 // result: (MOVLconst [0]) 36237 for { 36238 v_0 := v.Args[0] 36239 if v_0.Op != OpAMD64FlagGT_UGT { 36240 break 36241 } 36242 v.reset(OpAMD64MOVLconst) 36243 v.AuxInt = 0 36244 return true 36245 } 36246 return false 36247 } 36248 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 36249 // match: (SETLE (InvertFlags x)) 36250 // cond: 36251 // result: (SETGE x) 36252 for { 36253 v_0 := v.Args[0] 36254 if v_0.Op != OpAMD64InvertFlags { 36255 break 36256 } 36257 x := v_0.Args[0] 36258 v.reset(OpAMD64SETGE) 36259 v.AddArg(x) 36260 return true 36261 } 36262 // match: (SETLE (FlagEQ)) 36263 // cond: 36264 // result: (MOVLconst [1]) 36265 for { 36266 v_0 := v.Args[0] 36267 if v_0.Op != OpAMD64FlagEQ { 36268 break 36269 } 36270 v.reset(OpAMD64MOVLconst) 36271 v.AuxInt = 1 36272 return true 36273 } 36274 // match: (SETLE (FlagLT_ULT)) 36275 // cond: 36276 // result: (MOVLconst [1]) 36277 for { 36278 v_0 := v.Args[0] 36279 if v_0.Op != OpAMD64FlagLT_ULT { 36280 break 36281 } 36282 v.reset(OpAMD64MOVLconst) 36283 v.AuxInt = 1 36284 return true 36285 } 36286 // match: (SETLE (FlagLT_UGT)) 36287 // cond: 36288 // result: (MOVLconst [1]) 36289 for { 36290 v_0 := v.Args[0] 36291 if v_0.Op != OpAMD64FlagLT_UGT { 36292 break 36293 } 36294 v.reset(OpAMD64MOVLconst) 36295 v.AuxInt = 1 36296 return true 36297 } 36298 // match: (SETLE (FlagGT_ULT)) 36299 // cond: 36300 // result: (MOVLconst [0]) 36301 for { 36302 v_0 := v.Args[0] 36303 if v_0.Op != OpAMD64FlagGT_ULT { 36304 break 36305 } 36306 v.reset(OpAMD64MOVLconst) 36307 v.AuxInt = 0 36308 return true 36309 } 36310 // match: (SETLE (FlagGT_UGT)) 36311 // cond: 36312 // result: (MOVLconst [0]) 36313 for { 36314 v_0 := v.Args[0] 36315 if v_0.Op != OpAMD64FlagGT_UGT { 36316 break 36317 } 36318 v.reset(OpAMD64MOVLconst) 36319 v.AuxInt = 0 36320 return true 36321 } 36322 return false 36323 } 36324 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 36325 b := v.Block 36326 _ = b 36327 config := b.Func.Config 36328 _ = config 36329 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 36330 // cond: !config.nacl 36331 // result: (SETB (BTL x y)) 36332 for { 36333 v_0 := v.Args[0] 36334 if v_0.Op != OpAMD64TESTL { 36335 break 36336 } 36337 _ = v_0.Args[1] 36338 v_0_0 := v_0.Args[0] 36339 if v_0_0.Op != OpAMD64SHLL { 36340 break 36341 } 36342 _ = v_0_0.Args[1] 36343 v_0_0_0 := v_0_0.Args[0] 36344 if v_0_0_0.Op != OpAMD64MOVLconst { 36345 break 36346 } 36347 if v_0_0_0.AuxInt != 1 { 36348 break 36349 } 36350 x := v_0_0.Args[1] 36351 y := v_0.Args[1] 36352 if !(!config.nacl) { 36353 break 36354 } 36355 v.reset(OpAMD64SETB) 36356 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36357 v0.AddArg(x) 36358 v0.AddArg(y) 36359 v.AddArg(v0) 36360 return true 36361 } 36362 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 36363 // cond: !config.nacl 36364 // result: (SETB (BTL x y)) 36365 for { 36366 v_0 := v.Args[0] 36367 if v_0.Op != OpAMD64TESTL { 36368 break 36369 } 36370 _ = v_0.Args[1] 36371 y := v_0.Args[0] 36372 v_0_1 := v_0.Args[1] 36373 if v_0_1.Op != OpAMD64SHLL { 36374 break 36375 } 36376 _ = v_0_1.Args[1] 36377 v_0_1_0 := v_0_1.Args[0] 36378 if v_0_1_0.Op != OpAMD64MOVLconst { 36379 break 36380 } 36381 if v_0_1_0.AuxInt != 1 { 36382 break 36383 } 36384 x := v_0_1.Args[1] 36385 if !(!config.nacl) { 36386 break 36387 } 36388 v.reset(OpAMD64SETB) 36389 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36390 v0.AddArg(x) 36391 v0.AddArg(y) 36392 v.AddArg(v0) 36393 return true 36394 } 36395 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 36396 // cond: !config.nacl 36397 // result: (SETB (BTQ x y)) 36398 for { 36399 v_0 := v.Args[0] 36400 if v_0.Op != OpAMD64TESTQ { 36401 break 36402 } 36403 _ = v_0.Args[1] 36404 v_0_0 := v_0.Args[0] 36405 if v_0_0.Op != OpAMD64SHLQ { 36406 break 36407 } 36408 _ = v_0_0.Args[1] 36409 v_0_0_0 := v_0_0.Args[0] 36410 if v_0_0_0.Op != OpAMD64MOVQconst { 36411 break 36412 } 36413 if v_0_0_0.AuxInt != 1 { 36414 break 36415 } 36416 x := v_0_0.Args[1] 36417 y := v_0.Args[1] 36418 if !(!config.nacl) { 36419 break 36420 } 36421 v.reset(OpAMD64SETB) 36422 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36423 v0.AddArg(x) 36424 v0.AddArg(y) 36425 v.AddArg(v0) 36426 return true 36427 } 36428 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 36429 // cond: !config.nacl 36430 // result: (SETB (BTQ x y)) 36431 for { 36432 v_0 := v.Args[0] 36433 if v_0.Op != OpAMD64TESTQ { 36434 break 36435 } 36436 _ = v_0.Args[1] 36437 y := v_0.Args[0] 36438 v_0_1 := v_0.Args[1] 36439 if v_0_1.Op != OpAMD64SHLQ { 36440 break 36441 } 36442 _ = v_0_1.Args[1] 36443 v_0_1_0 := v_0_1.Args[0] 36444 if v_0_1_0.Op != OpAMD64MOVQconst { 36445 break 36446 } 36447 if v_0_1_0.AuxInt != 1 { 36448 break 36449 } 36450 x := v_0_1.Args[1] 36451 if !(!config.nacl) { 36452 break 36453 } 36454 v.reset(OpAMD64SETB) 36455 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36456 v0.AddArg(x) 36457 v0.AddArg(y) 36458 v.AddArg(v0) 36459 return true 36460 } 36461 // match: (SETNE (TESTLconst [c] x)) 36462 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 36463 // result: (SETB (BTLconst [log2(c)] x)) 36464 for { 36465 v_0 := v.Args[0] 36466 if v_0.Op != OpAMD64TESTLconst { 36467 break 36468 } 36469 c := v_0.AuxInt 36470 x := v_0.Args[0] 36471 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 36472 break 36473 } 36474 v.reset(OpAMD64SETB) 36475 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 36476 v0.AuxInt = log2(c) 36477 v0.AddArg(x) 36478 v.AddArg(v0) 36479 return true 36480 } 36481 // match: (SETNE (TESTQconst [c] x)) 36482 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36483 // result: (SETB (BTQconst [log2(c)] x)) 36484 for { 36485 v_0 := v.Args[0] 36486 if v_0.Op != OpAMD64TESTQconst { 36487 break 36488 } 36489 c := v_0.AuxInt 36490 x := v_0.Args[0] 36491 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36492 break 36493 } 36494 v.reset(OpAMD64SETB) 36495 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36496 v0.AuxInt = log2(c) 36497 v0.AddArg(x) 36498 v.AddArg(v0) 36499 return true 36500 } 36501 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 36502 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36503 // result: (SETB (BTQconst [log2(c)] x)) 36504 for { 36505 v_0 := v.Args[0] 36506 if v_0.Op != OpAMD64TESTQ { 36507 break 36508 } 36509 _ = v_0.Args[1] 36510 v_0_0 := v_0.Args[0] 36511 if v_0_0.Op != OpAMD64MOVQconst { 36512 break 36513 } 36514 c := v_0_0.AuxInt 36515 x := v_0.Args[1] 36516 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36517 break 36518 } 36519 v.reset(OpAMD64SETB) 36520 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36521 v0.AuxInt = log2(c) 36522 v0.AddArg(x) 36523 v.AddArg(v0) 36524 return true 36525 } 36526 // match: (SETNE (TESTQ x (MOVQconst [c]))) 36527 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36528 // result: (SETB (BTQconst [log2(c)] x)) 36529 for { 36530 v_0 := v.Args[0] 36531 if v_0.Op != OpAMD64TESTQ { 36532 break 36533 } 36534 _ = v_0.Args[1] 36535 x := v_0.Args[0] 36536 v_0_1 := v_0.Args[1] 36537 if v_0_1.Op != OpAMD64MOVQconst { 36538 break 36539 } 36540 c := v_0_1.AuxInt 36541 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36542 break 36543 } 36544 v.reset(OpAMD64SETB) 36545 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36546 v0.AuxInt = log2(c) 36547 v0.AddArg(x) 36548 v.AddArg(v0) 36549 return true 36550 } 36551 // match: (SETNE (InvertFlags x)) 36552 // cond: 36553 // result: (SETNE x) 36554 for { 36555 v_0 := v.Args[0] 36556 if v_0.Op != OpAMD64InvertFlags { 36557 break 36558 } 36559 x := v_0.Args[0] 36560 v.reset(OpAMD64SETNE) 36561 v.AddArg(x) 36562 return true 36563 } 36564 // match: (SETNE (FlagEQ)) 36565 // cond: 36566 // result: (MOVLconst [0]) 36567 for { 36568 v_0 := v.Args[0] 36569 if v_0.Op != OpAMD64FlagEQ { 36570 break 36571 } 36572 v.reset(OpAMD64MOVLconst) 36573 v.AuxInt = 0 36574 return true 36575 } 36576 return false 36577 } 36578 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 36579 // match: (SETNE (FlagLT_ULT)) 36580 // cond: 36581 // result: (MOVLconst [1]) 36582 for { 36583 v_0 := v.Args[0] 36584 if v_0.Op != OpAMD64FlagLT_ULT { 36585 break 36586 } 36587 v.reset(OpAMD64MOVLconst) 36588 v.AuxInt = 1 36589 return true 36590 } 36591 // match: (SETNE (FlagLT_UGT)) 36592 // cond: 36593 // result: (MOVLconst [1]) 36594 for { 36595 v_0 := v.Args[0] 36596 if v_0.Op != OpAMD64FlagLT_UGT { 36597 break 36598 } 36599 v.reset(OpAMD64MOVLconst) 36600 v.AuxInt = 1 36601 return true 36602 } 36603 // match: (SETNE (FlagGT_ULT)) 36604 // cond: 36605 // result: (MOVLconst [1]) 36606 for { 36607 v_0 := v.Args[0] 36608 if v_0.Op != OpAMD64FlagGT_ULT { 36609 break 36610 } 36611 v.reset(OpAMD64MOVLconst) 36612 v.AuxInt = 1 36613 return true 36614 } 36615 // match: (SETNE (FlagGT_UGT)) 36616 // cond: 36617 // result: (MOVLconst [1]) 36618 for { 36619 v_0 := v.Args[0] 36620 if v_0.Op != OpAMD64FlagGT_UGT { 36621 break 36622 } 36623 v.reset(OpAMD64MOVLconst) 36624 v.AuxInt = 1 36625 return true 36626 } 36627 return false 36628 } 36629 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 36630 b := v.Block 36631 _ = b 36632 // match: (SHLL x (MOVQconst [c])) 36633 // cond: 36634 // result: (SHLLconst [c&31] x) 36635 for { 36636 _ = v.Args[1] 36637 x := v.Args[0] 36638 v_1 := v.Args[1] 36639 if v_1.Op != OpAMD64MOVQconst { 36640 break 36641 } 36642 c := v_1.AuxInt 36643 v.reset(OpAMD64SHLLconst) 36644 v.AuxInt = c & 31 36645 v.AddArg(x) 36646 return true 36647 } 36648 // match: (SHLL x (MOVLconst [c])) 36649 // cond: 36650 // result: (SHLLconst [c&31] x) 36651 for { 36652 _ = v.Args[1] 36653 x := v.Args[0] 36654 v_1 := v.Args[1] 36655 if v_1.Op != OpAMD64MOVLconst { 36656 break 36657 } 36658 c := v_1.AuxInt 36659 v.reset(OpAMD64SHLLconst) 36660 v.AuxInt = c & 31 36661 v.AddArg(x) 36662 return true 36663 } 36664 // match: (SHLL x (ADDQconst [c] y)) 36665 // cond: c & 31 == 0 36666 // result: (SHLL x y) 36667 for { 36668 _ = v.Args[1] 36669 x := v.Args[0] 36670 v_1 := v.Args[1] 36671 if v_1.Op != OpAMD64ADDQconst { 36672 break 36673 } 36674 c := v_1.AuxInt 36675 y := v_1.Args[0] 36676 if !(c&31 == 0) { 36677 break 36678 } 36679 v.reset(OpAMD64SHLL) 36680 v.AddArg(x) 36681 v.AddArg(y) 36682 return true 36683 } 36684 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 36685 // cond: c & 31 == 0 36686 // result: (SHLL x (NEGQ <t> y)) 36687 for { 36688 _ = v.Args[1] 36689 x := v.Args[0] 36690 v_1 := v.Args[1] 36691 if v_1.Op != OpAMD64NEGQ { 36692 break 36693 } 36694 t := v_1.Type 36695 v_1_0 := v_1.Args[0] 36696 if v_1_0.Op != OpAMD64ADDQconst { 36697 break 36698 } 36699 c := v_1_0.AuxInt 36700 y := v_1_0.Args[0] 36701 if !(c&31 == 0) { 36702 break 36703 } 36704 v.reset(OpAMD64SHLL) 36705 v.AddArg(x) 36706 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 36707 v0.AddArg(y) 36708 v.AddArg(v0) 36709 return true 36710 } 36711 // match: (SHLL x (ANDQconst [c] y)) 36712 // cond: c & 31 == 31 36713 // result: (SHLL x y) 36714 for { 36715 _ = v.Args[1] 36716 x := v.Args[0] 36717 v_1 := v.Args[1] 36718 if v_1.Op != OpAMD64ANDQconst { 36719 break 36720 } 36721 c := v_1.AuxInt 36722 y := v_1.Args[0] 36723 if !(c&31 == 31) { 36724 break 36725 } 36726 v.reset(OpAMD64SHLL) 36727 v.AddArg(x) 36728 v.AddArg(y) 36729 return true 36730 } 36731 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 36732 // cond: c & 31 == 31 36733 // result: (SHLL x (NEGQ <t> y)) 36734 for { 36735 _ = v.Args[1] 36736 x := v.Args[0] 36737 v_1 := v.Args[1] 36738 if v_1.Op != OpAMD64NEGQ { 36739 break 36740 } 36741 t := v_1.Type 36742 v_1_0 := v_1.Args[0] 36743 if v_1_0.Op != OpAMD64ANDQconst { 36744 break 36745 } 36746 c := v_1_0.AuxInt 36747 y := v_1_0.Args[0] 36748 if !(c&31 == 31) { 36749 break 36750 } 36751 v.reset(OpAMD64SHLL) 36752 v.AddArg(x) 36753 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 36754 v0.AddArg(y) 36755 v.AddArg(v0) 36756 return true 36757 } 36758 // match: (SHLL x (ADDLconst [c] y)) 36759 // cond: c & 31 == 0 36760 // result: (SHLL x y) 36761 for { 36762 _ = v.Args[1] 36763 x := v.Args[0] 36764 v_1 := v.Args[1] 36765 if v_1.Op != OpAMD64ADDLconst { 36766 break 36767 } 36768 c := v_1.AuxInt 36769 y := v_1.Args[0] 36770 if !(c&31 == 0) { 36771 break 36772 } 36773 v.reset(OpAMD64SHLL) 36774 v.AddArg(x) 36775 v.AddArg(y) 36776 return true 36777 } 36778 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 36779 // cond: c & 31 == 0 36780 // result: (SHLL x (NEGL <t> y)) 36781 for { 36782 _ = v.Args[1] 36783 x := v.Args[0] 36784 v_1 := v.Args[1] 36785 if v_1.Op != OpAMD64NEGL { 36786 break 36787 } 36788 t := v_1.Type 36789 v_1_0 := v_1.Args[0] 36790 if v_1_0.Op != OpAMD64ADDLconst { 36791 break 36792 } 36793 c := v_1_0.AuxInt 36794 y := v_1_0.Args[0] 36795 if !(c&31 == 0) { 36796 break 36797 } 36798 v.reset(OpAMD64SHLL) 36799 v.AddArg(x) 36800 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 36801 v0.AddArg(y) 36802 v.AddArg(v0) 36803 return true 36804 } 36805 // match: (SHLL x (ANDLconst [c] y)) 36806 // cond: c & 31 == 31 36807 // result: (SHLL x y) 36808 for { 36809 _ = v.Args[1] 36810 x := v.Args[0] 36811 v_1 := v.Args[1] 36812 if v_1.Op != OpAMD64ANDLconst { 36813 break 36814 } 36815 c := v_1.AuxInt 36816 y := v_1.Args[0] 36817 if !(c&31 == 31) { 36818 break 36819 } 36820 v.reset(OpAMD64SHLL) 36821 v.AddArg(x) 36822 v.AddArg(y) 36823 return true 36824 } 36825 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 36826 // cond: c & 31 == 31 36827 // result: (SHLL x (NEGL <t> y)) 36828 for { 36829 _ = v.Args[1] 36830 x := v.Args[0] 36831 v_1 := v.Args[1] 36832 if v_1.Op != OpAMD64NEGL { 36833 break 36834 } 36835 t := v_1.Type 36836 v_1_0 := v_1.Args[0] 36837 if v_1_0.Op != OpAMD64ANDLconst { 36838 break 36839 } 36840 c := v_1_0.AuxInt 36841 y := v_1_0.Args[0] 36842 if !(c&31 == 31) { 36843 break 36844 } 36845 v.reset(OpAMD64SHLL) 36846 v.AddArg(x) 36847 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 36848 v0.AddArg(y) 36849 v.AddArg(v0) 36850 return true 36851 } 36852 return false 36853 } 36854 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 36855 // match: (SHLLconst x [0]) 36856 // cond: 36857 // result: x 36858 for { 36859 if v.AuxInt != 0 { 36860 break 36861 } 36862 x := v.Args[0] 36863 v.reset(OpCopy) 36864 v.Type = x.Type 36865 v.AddArg(x) 36866 return true 36867 } 36868 return false 36869 } 36870 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 36871 b := v.Block 36872 _ = b 36873 // match: (SHLQ x (MOVQconst [c])) 36874 // cond: 36875 // result: (SHLQconst [c&63] x) 36876 for { 36877 _ = v.Args[1] 36878 x := v.Args[0] 36879 v_1 := v.Args[1] 36880 if v_1.Op != OpAMD64MOVQconst { 36881 break 36882 } 36883 c := v_1.AuxInt 36884 v.reset(OpAMD64SHLQconst) 36885 v.AuxInt = c & 63 36886 v.AddArg(x) 36887 return true 36888 } 36889 // match: (SHLQ x (MOVLconst [c])) 36890 // cond: 36891 // result: (SHLQconst [c&63] x) 36892 for { 36893 _ = v.Args[1] 36894 x := v.Args[0] 36895 v_1 := v.Args[1] 36896 if v_1.Op != OpAMD64MOVLconst { 36897 break 36898 } 36899 c := v_1.AuxInt 36900 v.reset(OpAMD64SHLQconst) 36901 v.AuxInt = c & 63 36902 v.AddArg(x) 36903 return true 36904 } 36905 // match: (SHLQ x (ADDQconst [c] y)) 36906 // cond: c & 63 == 0 36907 // result: (SHLQ x y) 36908 for { 36909 _ = v.Args[1] 36910 x := v.Args[0] 36911 v_1 := v.Args[1] 36912 if v_1.Op != OpAMD64ADDQconst { 36913 break 36914 } 36915 c := v_1.AuxInt 36916 y := v_1.Args[0] 36917 if !(c&63 == 0) { 36918 break 36919 } 36920 v.reset(OpAMD64SHLQ) 36921 v.AddArg(x) 36922 v.AddArg(y) 36923 return true 36924 } 36925 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 36926 // cond: c & 63 == 0 36927 // result: (SHLQ x (NEGQ <t> y)) 36928 for { 36929 _ = v.Args[1] 36930 x := v.Args[0] 36931 v_1 := v.Args[1] 36932 if v_1.Op != OpAMD64NEGQ { 36933 break 36934 } 36935 t := v_1.Type 36936 v_1_0 := v_1.Args[0] 36937 if v_1_0.Op != OpAMD64ADDQconst { 36938 break 36939 } 36940 c := v_1_0.AuxInt 36941 y := v_1_0.Args[0] 36942 if !(c&63 == 0) { 36943 break 36944 } 36945 v.reset(OpAMD64SHLQ) 36946 v.AddArg(x) 36947 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 36948 v0.AddArg(y) 36949 v.AddArg(v0) 36950 return true 36951 } 36952 // match: (SHLQ x (ANDQconst [c] y)) 36953 // cond: c & 63 == 63 36954 // result: (SHLQ x y) 36955 for { 36956 _ = v.Args[1] 36957 x := v.Args[0] 36958 v_1 := v.Args[1] 36959 if v_1.Op != OpAMD64ANDQconst { 36960 break 36961 } 36962 c := v_1.AuxInt 36963 y := v_1.Args[0] 36964 if !(c&63 == 63) { 36965 break 36966 } 36967 v.reset(OpAMD64SHLQ) 36968 v.AddArg(x) 36969 v.AddArg(y) 36970 return true 36971 } 36972 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 36973 // cond: c & 63 == 63 36974 // result: (SHLQ x (NEGQ <t> y)) 36975 for { 36976 _ = v.Args[1] 36977 x := v.Args[0] 36978 v_1 := v.Args[1] 36979 if v_1.Op != OpAMD64NEGQ { 36980 break 36981 } 36982 t := v_1.Type 36983 v_1_0 := v_1.Args[0] 36984 if v_1_0.Op != OpAMD64ANDQconst { 36985 break 36986 } 36987 c := v_1_0.AuxInt 36988 y := v_1_0.Args[0] 36989 if !(c&63 == 63) { 36990 break 36991 } 36992 v.reset(OpAMD64SHLQ) 36993 v.AddArg(x) 36994 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 36995 v0.AddArg(y) 36996 v.AddArg(v0) 36997 return true 36998 } 36999 // match: (SHLQ x (ADDLconst [c] y)) 37000 // cond: c & 63 == 0 37001 // result: (SHLQ x y) 37002 for { 37003 _ = v.Args[1] 37004 x := v.Args[0] 37005 v_1 := v.Args[1] 37006 if v_1.Op != OpAMD64ADDLconst { 37007 break 37008 } 37009 c := v_1.AuxInt 37010 y := v_1.Args[0] 37011 if !(c&63 == 0) { 37012 break 37013 } 37014 v.reset(OpAMD64SHLQ) 37015 v.AddArg(x) 37016 v.AddArg(y) 37017 return true 37018 } 37019 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 37020 // cond: c & 63 == 0 37021 // result: (SHLQ x (NEGL <t> y)) 37022 for { 37023 _ = v.Args[1] 37024 x := v.Args[0] 37025 v_1 := v.Args[1] 37026 if v_1.Op != OpAMD64NEGL { 37027 break 37028 } 37029 t := v_1.Type 37030 v_1_0 := v_1.Args[0] 37031 if v_1_0.Op != OpAMD64ADDLconst { 37032 break 37033 } 37034 c := v_1_0.AuxInt 37035 y := v_1_0.Args[0] 37036 if !(c&63 == 0) { 37037 break 37038 } 37039 v.reset(OpAMD64SHLQ) 37040 v.AddArg(x) 37041 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 37042 v0.AddArg(y) 37043 v.AddArg(v0) 37044 return true 37045 } 37046 // match: (SHLQ x (ANDLconst [c] y)) 37047 // cond: c & 63 == 63 37048 // result: (SHLQ x y) 37049 for { 37050 _ = v.Args[1] 37051 x := v.Args[0] 37052 v_1 := v.Args[1] 37053 if v_1.Op != OpAMD64ANDLconst { 37054 break 37055 } 37056 c := v_1.AuxInt 37057 y := v_1.Args[0] 37058 if !(c&63 == 63) { 37059 break 37060 } 37061 v.reset(OpAMD64SHLQ) 37062 v.AddArg(x) 37063 v.AddArg(y) 37064 return true 37065 } 37066 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 37067 // cond: c & 63 == 63 37068 // result: (SHLQ x (NEGL <t> y)) 37069 for { 37070 _ = v.Args[1] 37071 x := v.Args[0] 37072 v_1 := v.Args[1] 37073 if v_1.Op != OpAMD64NEGL { 37074 break 37075 } 37076 t := v_1.Type 37077 v_1_0 := v_1.Args[0] 37078 if v_1_0.Op != OpAMD64ANDLconst { 37079 break 37080 } 37081 c := v_1_0.AuxInt 37082 y := v_1_0.Args[0] 37083 if !(c&63 == 63) { 37084 break 37085 } 37086 v.reset(OpAMD64SHLQ) 37087 v.AddArg(x) 37088 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 37089 v0.AddArg(y) 37090 v.AddArg(v0) 37091 return true 37092 } 37093 return false 37094 } 37095 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 37096 // match: (SHLQconst x [0]) 37097 // cond: 37098 // result: x 37099 for { 37100 if v.AuxInt != 0 { 37101 break 37102 } 37103 x := v.Args[0] 37104 v.reset(OpCopy) 37105 v.Type = x.Type 37106 v.AddArg(x) 37107 return true 37108 } 37109 return false 37110 } 37111 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 37112 // match: (SHRB x (MOVQconst [c])) 37113 // cond: c&31 < 8 37114 // result: (SHRBconst [c&31] x) 37115 for { 37116 _ = v.Args[1] 37117 x := v.Args[0] 37118 v_1 := v.Args[1] 37119 if v_1.Op != OpAMD64MOVQconst { 37120 break 37121 } 37122 c := v_1.AuxInt 37123 if !(c&31 < 8) { 37124 break 37125 } 37126 v.reset(OpAMD64SHRBconst) 37127 v.AuxInt = c & 31 37128 v.AddArg(x) 37129 return true 37130 } 37131 // match: (SHRB x (MOVLconst [c])) 37132 // cond: c&31 < 8 37133 // result: (SHRBconst [c&31] x) 37134 for { 37135 _ = v.Args[1] 37136 x := v.Args[0] 37137 v_1 := v.Args[1] 37138 if v_1.Op != OpAMD64MOVLconst { 37139 break 37140 } 37141 c := v_1.AuxInt 37142 if !(c&31 < 8) { 37143 break 37144 } 37145 v.reset(OpAMD64SHRBconst) 37146 v.AuxInt = c & 31 37147 v.AddArg(x) 37148 return true 37149 } 37150 // match: (SHRB _ (MOVQconst [c])) 37151 // cond: c&31 >= 8 37152 // result: (MOVLconst [0]) 37153 for { 37154 _ = v.Args[1] 37155 v_1 := v.Args[1] 37156 if v_1.Op != OpAMD64MOVQconst { 37157 break 37158 } 37159 c := v_1.AuxInt 37160 if !(c&31 >= 8) { 37161 break 37162 } 37163 v.reset(OpAMD64MOVLconst) 37164 v.AuxInt = 0 37165 return true 37166 } 37167 // match: (SHRB _ (MOVLconst [c])) 37168 // cond: c&31 >= 8 37169 // result: (MOVLconst [0]) 37170 for { 37171 _ = v.Args[1] 37172 v_1 := v.Args[1] 37173 if v_1.Op != OpAMD64MOVLconst { 37174 break 37175 } 37176 c := v_1.AuxInt 37177 if !(c&31 >= 8) { 37178 break 37179 } 37180 v.reset(OpAMD64MOVLconst) 37181 v.AuxInt = 0 37182 return true 37183 } 37184 return false 37185 } 37186 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 37187 // match: (SHRBconst x [0]) 37188 // cond: 37189 // result: x 37190 for { 37191 if v.AuxInt != 0 { 37192 break 37193 } 37194 x := v.Args[0] 37195 v.reset(OpCopy) 37196 v.Type = x.Type 37197 v.AddArg(x) 37198 return true 37199 } 37200 return false 37201 } 37202 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 37203 b := v.Block 37204 _ = b 37205 // match: (SHRL x (MOVQconst [c])) 37206 // cond: 37207 // result: (SHRLconst [c&31] x) 37208 for { 37209 _ = v.Args[1] 37210 x := v.Args[0] 37211 v_1 := v.Args[1] 37212 if v_1.Op != OpAMD64MOVQconst { 37213 break 37214 } 37215 c := v_1.AuxInt 37216 v.reset(OpAMD64SHRLconst) 37217 v.AuxInt = c & 31 37218 v.AddArg(x) 37219 return true 37220 } 37221 // match: (SHRL x (MOVLconst [c])) 37222 // cond: 37223 // result: (SHRLconst [c&31] x) 37224 for { 37225 _ = v.Args[1] 37226 x := v.Args[0] 37227 v_1 := v.Args[1] 37228 if v_1.Op != OpAMD64MOVLconst { 37229 break 37230 } 37231 c := v_1.AuxInt 37232 v.reset(OpAMD64SHRLconst) 37233 v.AuxInt = c & 31 37234 v.AddArg(x) 37235 return true 37236 } 37237 // match: (SHRL x (ADDQconst [c] y)) 37238 // cond: c & 31 == 0 37239 // result: (SHRL x y) 37240 for { 37241 _ = v.Args[1] 37242 x := v.Args[0] 37243 v_1 := v.Args[1] 37244 if v_1.Op != OpAMD64ADDQconst { 37245 break 37246 } 37247 c := v_1.AuxInt 37248 y := v_1.Args[0] 37249 if !(c&31 == 0) { 37250 break 37251 } 37252 v.reset(OpAMD64SHRL) 37253 v.AddArg(x) 37254 v.AddArg(y) 37255 return true 37256 } 37257 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 37258 // cond: c & 31 == 0 37259 // result: (SHRL x (NEGQ <t> y)) 37260 for { 37261 _ = v.Args[1] 37262 x := v.Args[0] 37263 v_1 := v.Args[1] 37264 if v_1.Op != OpAMD64NEGQ { 37265 break 37266 } 37267 t := v_1.Type 37268 v_1_0 := v_1.Args[0] 37269 if v_1_0.Op != OpAMD64ADDQconst { 37270 break 37271 } 37272 c := v_1_0.AuxInt 37273 y := v_1_0.Args[0] 37274 if !(c&31 == 0) { 37275 break 37276 } 37277 v.reset(OpAMD64SHRL) 37278 v.AddArg(x) 37279 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 37280 v0.AddArg(y) 37281 v.AddArg(v0) 37282 return true 37283 } 37284 // match: (SHRL x (ANDQconst [c] y)) 37285 // cond: c & 31 == 31 37286 // result: (SHRL x y) 37287 for { 37288 _ = v.Args[1] 37289 x := v.Args[0] 37290 v_1 := v.Args[1] 37291 if v_1.Op != OpAMD64ANDQconst { 37292 break 37293 } 37294 c := v_1.AuxInt 37295 y := v_1.Args[0] 37296 if !(c&31 == 31) { 37297 break 37298 } 37299 v.reset(OpAMD64SHRL) 37300 v.AddArg(x) 37301 v.AddArg(y) 37302 return true 37303 } 37304 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 37305 // cond: c & 31 == 31 37306 // result: (SHRL x (NEGQ <t> y)) 37307 for { 37308 _ = v.Args[1] 37309 x := v.Args[0] 37310 v_1 := v.Args[1] 37311 if v_1.Op != OpAMD64NEGQ { 37312 break 37313 } 37314 t := v_1.Type 37315 v_1_0 := v_1.Args[0] 37316 if v_1_0.Op != OpAMD64ANDQconst { 37317 break 37318 } 37319 c := v_1_0.AuxInt 37320 y := v_1_0.Args[0] 37321 if !(c&31 == 31) { 37322 break 37323 } 37324 v.reset(OpAMD64SHRL) 37325 v.AddArg(x) 37326 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 37327 v0.AddArg(y) 37328 v.AddArg(v0) 37329 return true 37330 } 37331 // match: (SHRL x (ADDLconst [c] y)) 37332 // cond: c & 31 == 0 37333 // result: (SHRL x y) 37334 for { 37335 _ = v.Args[1] 37336 x := v.Args[0] 37337 v_1 := v.Args[1] 37338 if v_1.Op != OpAMD64ADDLconst { 37339 break 37340 } 37341 c := v_1.AuxInt 37342 y := v_1.Args[0] 37343 if !(c&31 == 0) { 37344 break 37345 } 37346 v.reset(OpAMD64SHRL) 37347 v.AddArg(x) 37348 v.AddArg(y) 37349 return true 37350 } 37351 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 37352 // cond: c & 31 == 0 37353 // result: (SHRL x (NEGL <t> y)) 37354 for { 37355 _ = v.Args[1] 37356 x := v.Args[0] 37357 v_1 := v.Args[1] 37358 if v_1.Op != OpAMD64NEGL { 37359 break 37360 } 37361 t := v_1.Type 37362 v_1_0 := v_1.Args[0] 37363 if v_1_0.Op != OpAMD64ADDLconst { 37364 break 37365 } 37366 c := v_1_0.AuxInt 37367 y := v_1_0.Args[0] 37368 if !(c&31 == 0) { 37369 break 37370 } 37371 v.reset(OpAMD64SHRL) 37372 v.AddArg(x) 37373 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 37374 v0.AddArg(y) 37375 v.AddArg(v0) 37376 return true 37377 } 37378 // match: (SHRL x (ANDLconst [c] y)) 37379 // cond: c & 31 == 31 37380 // result: (SHRL x y) 37381 for { 37382 _ = v.Args[1] 37383 x := v.Args[0] 37384 v_1 := v.Args[1] 37385 if v_1.Op != OpAMD64ANDLconst { 37386 break 37387 } 37388 c := v_1.AuxInt 37389 y := v_1.Args[0] 37390 if !(c&31 == 31) { 37391 break 37392 } 37393 v.reset(OpAMD64SHRL) 37394 v.AddArg(x) 37395 v.AddArg(y) 37396 return true 37397 } 37398 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 37399 // cond: c & 31 == 31 37400 // result: (SHRL x (NEGL <t> y)) 37401 for { 37402 _ = v.Args[1] 37403 x := v.Args[0] 37404 v_1 := v.Args[1] 37405 if v_1.Op != OpAMD64NEGL { 37406 break 37407 } 37408 t := v_1.Type 37409 v_1_0 := v_1.Args[0] 37410 if v_1_0.Op != OpAMD64ANDLconst { 37411 break 37412 } 37413 c := v_1_0.AuxInt 37414 y := v_1_0.Args[0] 37415 if !(c&31 == 31) { 37416 break 37417 } 37418 v.reset(OpAMD64SHRL) 37419 v.AddArg(x) 37420 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 37421 v0.AddArg(y) 37422 v.AddArg(v0) 37423 return true 37424 } 37425 return false 37426 } 37427 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 37428 // match: (SHRLconst x [0]) 37429 // cond: 37430 // result: x 37431 for { 37432 if v.AuxInt != 0 { 37433 break 37434 } 37435 x := v.Args[0] 37436 v.reset(OpCopy) 37437 v.Type = x.Type 37438 v.AddArg(x) 37439 return true 37440 } 37441 return false 37442 } 37443 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 37444 b := v.Block 37445 _ = b 37446 // match: (SHRQ x (MOVQconst [c])) 37447 // cond: 37448 // result: (SHRQconst [c&63] x) 37449 for { 37450 _ = v.Args[1] 37451 x := v.Args[0] 37452 v_1 := v.Args[1] 37453 if v_1.Op != OpAMD64MOVQconst { 37454 break 37455 } 37456 c := v_1.AuxInt 37457 v.reset(OpAMD64SHRQconst) 37458 v.AuxInt = c & 63 37459 v.AddArg(x) 37460 return true 37461 } 37462 // match: (SHRQ x (MOVLconst [c])) 37463 // cond: 37464 // result: (SHRQconst [c&63] x) 37465 for { 37466 _ = v.Args[1] 37467 x := v.Args[0] 37468 v_1 := v.Args[1] 37469 if v_1.Op != OpAMD64MOVLconst { 37470 break 37471 } 37472 c := v_1.AuxInt 37473 v.reset(OpAMD64SHRQconst) 37474 v.AuxInt = c & 63 37475 v.AddArg(x) 37476 return true 37477 } 37478 // match: (SHRQ x (ADDQconst [c] y)) 37479 // cond: c & 63 == 0 37480 // result: (SHRQ x y) 37481 for { 37482 _ = v.Args[1] 37483 x := v.Args[0] 37484 v_1 := v.Args[1] 37485 if v_1.Op != OpAMD64ADDQconst { 37486 break 37487 } 37488 c := v_1.AuxInt 37489 y := v_1.Args[0] 37490 if !(c&63 == 0) { 37491 break 37492 } 37493 v.reset(OpAMD64SHRQ) 37494 v.AddArg(x) 37495 v.AddArg(y) 37496 return true 37497 } 37498 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 37499 // cond: c & 63 == 0 37500 // result: (SHRQ x (NEGQ <t> y)) 37501 for { 37502 _ = v.Args[1] 37503 x := v.Args[0] 37504 v_1 := v.Args[1] 37505 if v_1.Op != OpAMD64NEGQ { 37506 break 37507 } 37508 t := v_1.Type 37509 v_1_0 := v_1.Args[0] 37510 if v_1_0.Op != OpAMD64ADDQconst { 37511 break 37512 } 37513 c := v_1_0.AuxInt 37514 y := v_1_0.Args[0] 37515 if !(c&63 == 0) { 37516 break 37517 } 37518 v.reset(OpAMD64SHRQ) 37519 v.AddArg(x) 37520 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 37521 v0.AddArg(y) 37522 v.AddArg(v0) 37523 return true 37524 } 37525 // match: (SHRQ x (ANDQconst [c] y)) 37526 // cond: c & 63 == 63 37527 // result: (SHRQ x y) 37528 for { 37529 _ = v.Args[1] 37530 x := v.Args[0] 37531 v_1 := v.Args[1] 37532 if v_1.Op != OpAMD64ANDQconst { 37533 break 37534 } 37535 c := v_1.AuxInt 37536 y := v_1.Args[0] 37537 if !(c&63 == 63) { 37538 break 37539 } 37540 v.reset(OpAMD64SHRQ) 37541 v.AddArg(x) 37542 v.AddArg(y) 37543 return true 37544 } 37545 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 37546 // cond: c & 63 == 63 37547 // result: (SHRQ x (NEGQ <t> y)) 37548 for { 37549 _ = v.Args[1] 37550 x := v.Args[0] 37551 v_1 := v.Args[1] 37552 if v_1.Op != OpAMD64NEGQ { 37553 break 37554 } 37555 t := v_1.Type 37556 v_1_0 := v_1.Args[0] 37557 if v_1_0.Op != OpAMD64ANDQconst { 37558 break 37559 } 37560 c := v_1_0.AuxInt 37561 y := v_1_0.Args[0] 37562 if !(c&63 == 63) { 37563 break 37564 } 37565 v.reset(OpAMD64SHRQ) 37566 v.AddArg(x) 37567 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 37568 v0.AddArg(y) 37569 v.AddArg(v0) 37570 return true 37571 } 37572 // match: (SHRQ x (ADDLconst [c] y)) 37573 // cond: c & 63 == 0 37574 // result: (SHRQ x y) 37575 for { 37576 _ = v.Args[1] 37577 x := v.Args[0] 37578 v_1 := v.Args[1] 37579 if v_1.Op != OpAMD64ADDLconst { 37580 break 37581 } 37582 c := v_1.AuxInt 37583 y := v_1.Args[0] 37584 if !(c&63 == 0) { 37585 break 37586 } 37587 v.reset(OpAMD64SHRQ) 37588 v.AddArg(x) 37589 v.AddArg(y) 37590 return true 37591 } 37592 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 37593 // cond: c & 63 == 0 37594 // result: (SHRQ x (NEGL <t> y)) 37595 for { 37596 _ = v.Args[1] 37597 x := v.Args[0] 37598 v_1 := v.Args[1] 37599 if v_1.Op != OpAMD64NEGL { 37600 break 37601 } 37602 t := v_1.Type 37603 v_1_0 := v_1.Args[0] 37604 if v_1_0.Op != OpAMD64ADDLconst { 37605 break 37606 } 37607 c := v_1_0.AuxInt 37608 y := v_1_0.Args[0] 37609 if !(c&63 == 0) { 37610 break 37611 } 37612 v.reset(OpAMD64SHRQ) 37613 v.AddArg(x) 37614 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 37615 v0.AddArg(y) 37616 v.AddArg(v0) 37617 return true 37618 } 37619 // match: (SHRQ x (ANDLconst [c] y)) 37620 // cond: c & 63 == 63 37621 // result: (SHRQ x y) 37622 for { 37623 _ = v.Args[1] 37624 x := v.Args[0] 37625 v_1 := v.Args[1] 37626 if v_1.Op != OpAMD64ANDLconst { 37627 break 37628 } 37629 c := v_1.AuxInt 37630 y := v_1.Args[0] 37631 if !(c&63 == 63) { 37632 break 37633 } 37634 v.reset(OpAMD64SHRQ) 37635 v.AddArg(x) 37636 v.AddArg(y) 37637 return true 37638 } 37639 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 37640 // cond: c & 63 == 63 37641 // result: (SHRQ x (NEGL <t> y)) 37642 for { 37643 _ = v.Args[1] 37644 x := v.Args[0] 37645 v_1 := v.Args[1] 37646 if v_1.Op != OpAMD64NEGL { 37647 break 37648 } 37649 t := v_1.Type 37650 v_1_0 := v_1.Args[0] 37651 if v_1_0.Op != OpAMD64ANDLconst { 37652 break 37653 } 37654 c := v_1_0.AuxInt 37655 y := v_1_0.Args[0] 37656 if !(c&63 == 63) { 37657 break 37658 } 37659 v.reset(OpAMD64SHRQ) 37660 v.AddArg(x) 37661 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 37662 v0.AddArg(y) 37663 v.AddArg(v0) 37664 return true 37665 } 37666 return false 37667 } 37668 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 37669 // match: (SHRQconst x [0]) 37670 // cond: 37671 // result: x 37672 for { 37673 if v.AuxInt != 0 { 37674 break 37675 } 37676 x := v.Args[0] 37677 v.reset(OpCopy) 37678 v.Type = x.Type 37679 v.AddArg(x) 37680 return true 37681 } 37682 return false 37683 } 37684 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 37685 // match: (SHRW x (MOVQconst [c])) 37686 // cond: c&31 < 16 37687 // result: (SHRWconst [c&31] x) 37688 for { 37689 _ = v.Args[1] 37690 x := v.Args[0] 37691 v_1 := v.Args[1] 37692 if v_1.Op != OpAMD64MOVQconst { 37693 break 37694 } 37695 c := v_1.AuxInt 37696 if !(c&31 < 16) { 37697 break 37698 } 37699 v.reset(OpAMD64SHRWconst) 37700 v.AuxInt = c & 31 37701 v.AddArg(x) 37702 return true 37703 } 37704 // match: (SHRW x (MOVLconst [c])) 37705 // cond: c&31 < 16 37706 // result: (SHRWconst [c&31] x) 37707 for { 37708 _ = v.Args[1] 37709 x := v.Args[0] 37710 v_1 := v.Args[1] 37711 if v_1.Op != OpAMD64MOVLconst { 37712 break 37713 } 37714 c := v_1.AuxInt 37715 if !(c&31 < 16) { 37716 break 37717 } 37718 v.reset(OpAMD64SHRWconst) 37719 v.AuxInt = c & 31 37720 v.AddArg(x) 37721 return true 37722 } 37723 // match: (SHRW _ (MOVQconst [c])) 37724 // cond: c&31 >= 16 37725 // result: (MOVLconst [0]) 37726 for { 37727 _ = v.Args[1] 37728 v_1 := v.Args[1] 37729 if v_1.Op != OpAMD64MOVQconst { 37730 break 37731 } 37732 c := v_1.AuxInt 37733 if !(c&31 >= 16) { 37734 break 37735 } 37736 v.reset(OpAMD64MOVLconst) 37737 v.AuxInt = 0 37738 return true 37739 } 37740 // match: (SHRW _ (MOVLconst [c])) 37741 // cond: c&31 >= 16 37742 // result: (MOVLconst [0]) 37743 for { 37744 _ = v.Args[1] 37745 v_1 := v.Args[1] 37746 if v_1.Op != OpAMD64MOVLconst { 37747 break 37748 } 37749 c := v_1.AuxInt 37750 if !(c&31 >= 16) { 37751 break 37752 } 37753 v.reset(OpAMD64MOVLconst) 37754 v.AuxInt = 0 37755 return true 37756 } 37757 return false 37758 } 37759 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 37760 // match: (SHRWconst x [0]) 37761 // cond: 37762 // result: x 37763 for { 37764 if v.AuxInt != 0 { 37765 break 37766 } 37767 x := v.Args[0] 37768 v.reset(OpCopy) 37769 v.Type = x.Type 37770 v.AddArg(x) 37771 return true 37772 } 37773 return false 37774 } 37775 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 37776 b := v.Block 37777 _ = b 37778 // match: (SUBL x (MOVLconst [c])) 37779 // cond: 37780 // result: (SUBLconst x [c]) 37781 for { 37782 _ = v.Args[1] 37783 x := v.Args[0] 37784 v_1 := v.Args[1] 37785 if v_1.Op != OpAMD64MOVLconst { 37786 break 37787 } 37788 c := v_1.AuxInt 37789 v.reset(OpAMD64SUBLconst) 37790 v.AuxInt = c 37791 v.AddArg(x) 37792 return true 37793 } 37794 // match: (SUBL (MOVLconst [c]) x) 37795 // cond: 37796 // result: (NEGL (SUBLconst <v.Type> x [c])) 37797 for { 37798 _ = v.Args[1] 37799 v_0 := v.Args[0] 37800 if v_0.Op != OpAMD64MOVLconst { 37801 break 37802 } 37803 c := v_0.AuxInt 37804 x := v.Args[1] 37805 v.reset(OpAMD64NEGL) 37806 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 37807 v0.AuxInt = c 37808 v0.AddArg(x) 37809 v.AddArg(v0) 37810 return true 37811 } 37812 // match: (SUBL x x) 37813 // cond: 37814 // result: (MOVLconst [0]) 37815 for { 37816 _ = v.Args[1] 37817 x := v.Args[0] 37818 if x != v.Args[1] { 37819 break 37820 } 37821 v.reset(OpAMD64MOVLconst) 37822 v.AuxInt = 0 37823 return true 37824 } 37825 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 37826 // cond: canMergeLoad(v, l, x) && clobber(l) 37827 // result: (SUBLmem x [off] {sym} ptr mem) 37828 for { 37829 _ = v.Args[1] 37830 x := v.Args[0] 37831 l := v.Args[1] 37832 if l.Op != OpAMD64MOVLload { 37833 break 37834 } 37835 off := l.AuxInt 37836 sym := l.Aux 37837 _ = l.Args[1] 37838 ptr := l.Args[0] 37839 mem := l.Args[1] 37840 if !(canMergeLoad(v, l, x) && clobber(l)) { 37841 break 37842 } 37843 v.reset(OpAMD64SUBLmem) 37844 v.AuxInt = off 37845 v.Aux = sym 37846 v.AddArg(x) 37847 v.AddArg(ptr) 37848 v.AddArg(mem) 37849 return true 37850 } 37851 return false 37852 } 37853 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 37854 // match: (SUBLconst [c] x) 37855 // cond: int32(c) == 0 37856 // result: x 37857 for { 37858 c := v.AuxInt 37859 x := v.Args[0] 37860 if !(int32(c) == 0) { 37861 break 37862 } 37863 v.reset(OpCopy) 37864 v.Type = x.Type 37865 v.AddArg(x) 37866 return true 37867 } 37868 // match: (SUBLconst [c] x) 37869 // cond: 37870 // result: (ADDLconst [int64(int32(-c))] x) 37871 for { 37872 c := v.AuxInt 37873 x := v.Args[0] 37874 v.reset(OpAMD64ADDLconst) 37875 v.AuxInt = int64(int32(-c)) 37876 v.AddArg(x) 37877 return true 37878 } 37879 } 37880 func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool { 37881 b := v.Block 37882 _ = b 37883 typ := &b.Func.Config.Types 37884 _ = typ 37885 // match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 37886 // cond: 37887 // result: (SUBL x (MOVLf2i y)) 37888 for { 37889 off := v.AuxInt 37890 sym := v.Aux 37891 _ = v.Args[2] 37892 x := v.Args[0] 37893 ptr := v.Args[1] 37894 v_2 := v.Args[2] 37895 if v_2.Op != OpAMD64MOVSSstore { 37896 break 37897 } 37898 if v_2.AuxInt != off { 37899 break 37900 } 37901 if v_2.Aux != sym { 37902 break 37903 } 37904 _ = v_2.Args[2] 37905 if ptr != v_2.Args[0] { 37906 break 37907 } 37908 y := v_2.Args[1] 37909 v.reset(OpAMD64SUBL) 37910 v.AddArg(x) 37911 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 37912 v0.AddArg(y) 37913 v.AddArg(v0) 37914 return true 37915 } 37916 return false 37917 } 37918 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 37919 b := v.Block 37920 _ = b 37921 // match: (SUBQ x (MOVQconst [c])) 37922 // cond: is32Bit(c) 37923 // result: (SUBQconst x [c]) 37924 for { 37925 _ = v.Args[1] 37926 x := v.Args[0] 37927 v_1 := v.Args[1] 37928 if v_1.Op != OpAMD64MOVQconst { 37929 break 37930 } 37931 c := v_1.AuxInt 37932 if !(is32Bit(c)) { 37933 break 37934 } 37935 v.reset(OpAMD64SUBQconst) 37936 v.AuxInt = c 37937 v.AddArg(x) 37938 return true 37939 } 37940 // match: (SUBQ (MOVQconst [c]) x) 37941 // cond: is32Bit(c) 37942 // result: (NEGQ (SUBQconst <v.Type> x [c])) 37943 for { 37944 _ = v.Args[1] 37945 v_0 := v.Args[0] 37946 if v_0.Op != OpAMD64MOVQconst { 37947 break 37948 } 37949 c := v_0.AuxInt 37950 x := v.Args[1] 37951 if !(is32Bit(c)) { 37952 break 37953 } 37954 v.reset(OpAMD64NEGQ) 37955 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 37956 v0.AuxInt = c 37957 v0.AddArg(x) 37958 v.AddArg(v0) 37959 return true 37960 } 37961 // match: (SUBQ x x) 37962 // cond: 37963 // result: (MOVQconst [0]) 37964 for { 37965 _ = v.Args[1] 37966 x := v.Args[0] 37967 if x != v.Args[1] { 37968 break 37969 } 37970 v.reset(OpAMD64MOVQconst) 37971 v.AuxInt = 0 37972 return true 37973 } 37974 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 37975 // cond: canMergeLoad(v, l, x) && clobber(l) 37976 // result: (SUBQmem x [off] {sym} ptr mem) 37977 for { 37978 _ = v.Args[1] 37979 x := v.Args[0] 37980 l := v.Args[1] 37981 if l.Op != OpAMD64MOVQload { 37982 break 37983 } 37984 off := l.AuxInt 37985 sym := l.Aux 37986 _ = l.Args[1] 37987 ptr := l.Args[0] 37988 mem := l.Args[1] 37989 if !(canMergeLoad(v, l, x) && clobber(l)) { 37990 break 37991 } 37992 v.reset(OpAMD64SUBQmem) 37993 v.AuxInt = off 37994 v.Aux = sym 37995 v.AddArg(x) 37996 v.AddArg(ptr) 37997 v.AddArg(mem) 37998 return true 37999 } 38000 return false 38001 } 38002 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 38003 // match: (SUBQconst [0] x) 38004 // cond: 38005 // result: x 38006 for { 38007 if v.AuxInt != 0 { 38008 break 38009 } 38010 x := v.Args[0] 38011 v.reset(OpCopy) 38012 v.Type = x.Type 38013 v.AddArg(x) 38014 return true 38015 } 38016 // match: (SUBQconst [c] x) 38017 // cond: c != -(1<<31) 38018 // result: (ADDQconst [-c] x) 38019 for { 38020 c := v.AuxInt 38021 x := v.Args[0] 38022 if !(c != -(1 << 31)) { 38023 break 38024 } 38025 v.reset(OpAMD64ADDQconst) 38026 v.AuxInt = -c 38027 v.AddArg(x) 38028 return true 38029 } 38030 // match: (SUBQconst (MOVQconst [d]) [c]) 38031 // cond: 38032 // result: (MOVQconst [d-c]) 38033 for { 38034 c := v.AuxInt 38035 v_0 := v.Args[0] 38036 if v_0.Op != OpAMD64MOVQconst { 38037 break 38038 } 38039 d := v_0.AuxInt 38040 v.reset(OpAMD64MOVQconst) 38041 v.AuxInt = d - c 38042 return true 38043 } 38044 // match: (SUBQconst (SUBQconst x [d]) [c]) 38045 // cond: is32Bit(-c-d) 38046 // result: (ADDQconst [-c-d] x) 38047 for { 38048 c := v.AuxInt 38049 v_0 := v.Args[0] 38050 if v_0.Op != OpAMD64SUBQconst { 38051 break 38052 } 38053 d := v_0.AuxInt 38054 x := v_0.Args[0] 38055 if !(is32Bit(-c - d)) { 38056 break 38057 } 38058 v.reset(OpAMD64ADDQconst) 38059 v.AuxInt = -c - d 38060 v.AddArg(x) 38061 return true 38062 } 38063 return false 38064 } 38065 func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool { 38066 b := v.Block 38067 _ = b 38068 typ := &b.Func.Config.Types 38069 _ = typ 38070 // match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 38071 // cond: 38072 // result: (SUBQ x (MOVQf2i y)) 38073 for { 38074 off := v.AuxInt 38075 sym := v.Aux 38076 _ = v.Args[2] 38077 x := v.Args[0] 38078 ptr := v.Args[1] 38079 v_2 := v.Args[2] 38080 if v_2.Op != OpAMD64MOVSDstore { 38081 break 38082 } 38083 if v_2.AuxInt != off { 38084 break 38085 } 38086 if v_2.Aux != sym { 38087 break 38088 } 38089 _ = v_2.Args[2] 38090 if ptr != v_2.Args[0] { 38091 break 38092 } 38093 y := v_2.Args[1] 38094 v.reset(OpAMD64SUBQ) 38095 v.AddArg(x) 38096 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 38097 v0.AddArg(y) 38098 v.AddArg(v0) 38099 return true 38100 } 38101 return false 38102 } 38103 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 38104 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 38105 // cond: canMergeLoad(v, l, x) && clobber(l) 38106 // result: (SUBSDmem x [off] {sym} ptr mem) 38107 for { 38108 _ = v.Args[1] 38109 x := v.Args[0] 38110 l := v.Args[1] 38111 if l.Op != OpAMD64MOVSDload { 38112 break 38113 } 38114 off := l.AuxInt 38115 sym := l.Aux 38116 _ = l.Args[1] 38117 ptr := l.Args[0] 38118 mem := l.Args[1] 38119 if !(canMergeLoad(v, l, x) && clobber(l)) { 38120 break 38121 } 38122 v.reset(OpAMD64SUBSDmem) 38123 v.AuxInt = off 38124 v.Aux = sym 38125 v.AddArg(x) 38126 v.AddArg(ptr) 38127 v.AddArg(mem) 38128 return true 38129 } 38130 return false 38131 } 38132 func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool { 38133 b := v.Block 38134 _ = b 38135 typ := &b.Func.Config.Types 38136 _ = typ 38137 // match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 38138 // cond: 38139 // result: (SUBSD x (MOVQi2f y)) 38140 for { 38141 off := v.AuxInt 38142 sym := v.Aux 38143 _ = v.Args[2] 38144 x := v.Args[0] 38145 ptr := v.Args[1] 38146 v_2 := v.Args[2] 38147 if v_2.Op != OpAMD64MOVQstore { 38148 break 38149 } 38150 if v_2.AuxInt != off { 38151 break 38152 } 38153 if v_2.Aux != sym { 38154 break 38155 } 38156 _ = v_2.Args[2] 38157 if ptr != v_2.Args[0] { 38158 break 38159 } 38160 y := v_2.Args[1] 38161 v.reset(OpAMD64SUBSD) 38162 v.AddArg(x) 38163 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 38164 v0.AddArg(y) 38165 v.AddArg(v0) 38166 return true 38167 } 38168 return false 38169 } 38170 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 38171 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 38172 // cond: canMergeLoad(v, l, x) && clobber(l) 38173 // result: (SUBSSmem x [off] {sym} ptr mem) 38174 for { 38175 _ = v.Args[1] 38176 x := v.Args[0] 38177 l := v.Args[1] 38178 if l.Op != OpAMD64MOVSSload { 38179 break 38180 } 38181 off := l.AuxInt 38182 sym := l.Aux 38183 _ = l.Args[1] 38184 ptr := l.Args[0] 38185 mem := l.Args[1] 38186 if !(canMergeLoad(v, l, x) && clobber(l)) { 38187 break 38188 } 38189 v.reset(OpAMD64SUBSSmem) 38190 v.AuxInt = off 38191 v.Aux = sym 38192 v.AddArg(x) 38193 v.AddArg(ptr) 38194 v.AddArg(mem) 38195 return true 38196 } 38197 return false 38198 } 38199 func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool { 38200 b := v.Block 38201 _ = b 38202 typ := &b.Func.Config.Types 38203 _ = typ 38204 // match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 38205 // cond: 38206 // result: (SUBSS x (MOVLi2f y)) 38207 for { 38208 off := v.AuxInt 38209 sym := v.Aux 38210 _ = v.Args[2] 38211 x := v.Args[0] 38212 ptr := v.Args[1] 38213 v_2 := v.Args[2] 38214 if v_2.Op != OpAMD64MOVLstore { 38215 break 38216 } 38217 if v_2.AuxInt != off { 38218 break 38219 } 38220 if v_2.Aux != sym { 38221 break 38222 } 38223 _ = v_2.Args[2] 38224 if ptr != v_2.Args[0] { 38225 break 38226 } 38227 y := v_2.Args[1] 38228 v.reset(OpAMD64SUBSS) 38229 v.AddArg(x) 38230 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 38231 v0.AddArg(y) 38232 v.AddArg(v0) 38233 return true 38234 } 38235 return false 38236 } 38237 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 38238 // match: (TESTB (MOVLconst [c]) x) 38239 // cond: 38240 // result: (TESTBconst [c] x) 38241 for { 38242 _ = v.Args[1] 38243 v_0 := v.Args[0] 38244 if v_0.Op != OpAMD64MOVLconst { 38245 break 38246 } 38247 c := v_0.AuxInt 38248 x := v.Args[1] 38249 v.reset(OpAMD64TESTBconst) 38250 v.AuxInt = c 38251 v.AddArg(x) 38252 return true 38253 } 38254 // match: (TESTB x (MOVLconst [c])) 38255 // cond: 38256 // result: (TESTBconst [c] x) 38257 for { 38258 _ = v.Args[1] 38259 x := v.Args[0] 38260 v_1 := v.Args[1] 38261 if v_1.Op != OpAMD64MOVLconst { 38262 break 38263 } 38264 c := v_1.AuxInt 38265 v.reset(OpAMD64TESTBconst) 38266 v.AuxInt = c 38267 v.AddArg(x) 38268 return true 38269 } 38270 return false 38271 } 38272 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 38273 // match: (TESTL (MOVLconst [c]) x) 38274 // cond: 38275 // result: (TESTLconst [c] x) 38276 for { 38277 _ = v.Args[1] 38278 v_0 := v.Args[0] 38279 if v_0.Op != OpAMD64MOVLconst { 38280 break 38281 } 38282 c := v_0.AuxInt 38283 x := v.Args[1] 38284 v.reset(OpAMD64TESTLconst) 38285 v.AuxInt = c 38286 v.AddArg(x) 38287 return true 38288 } 38289 // match: (TESTL x (MOVLconst [c])) 38290 // cond: 38291 // result: (TESTLconst [c] x) 38292 for { 38293 _ = v.Args[1] 38294 x := v.Args[0] 38295 v_1 := v.Args[1] 38296 if v_1.Op != OpAMD64MOVLconst { 38297 break 38298 } 38299 c := v_1.AuxInt 38300 v.reset(OpAMD64TESTLconst) 38301 v.AuxInt = c 38302 v.AddArg(x) 38303 return true 38304 } 38305 return false 38306 } 38307 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 38308 // match: (TESTQ (MOVQconst [c]) x) 38309 // cond: is32Bit(c) 38310 // result: (TESTQconst [c] x) 38311 for { 38312 _ = v.Args[1] 38313 v_0 := v.Args[0] 38314 if v_0.Op != OpAMD64MOVQconst { 38315 break 38316 } 38317 c := v_0.AuxInt 38318 x := v.Args[1] 38319 if !(is32Bit(c)) { 38320 break 38321 } 38322 v.reset(OpAMD64TESTQconst) 38323 v.AuxInt = c 38324 v.AddArg(x) 38325 return true 38326 } 38327 // match: (TESTQ x (MOVQconst [c])) 38328 // cond: is32Bit(c) 38329 // result: (TESTQconst [c] x) 38330 for { 38331 _ = v.Args[1] 38332 x := v.Args[0] 38333 v_1 := v.Args[1] 38334 if v_1.Op != OpAMD64MOVQconst { 38335 break 38336 } 38337 c := v_1.AuxInt 38338 if !(is32Bit(c)) { 38339 break 38340 } 38341 v.reset(OpAMD64TESTQconst) 38342 v.AuxInt = c 38343 v.AddArg(x) 38344 return true 38345 } 38346 return false 38347 } 38348 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 38349 // match: (TESTW (MOVLconst [c]) x) 38350 // cond: 38351 // result: (TESTWconst [c] x) 38352 for { 38353 _ = v.Args[1] 38354 v_0 := v.Args[0] 38355 if v_0.Op != OpAMD64MOVLconst { 38356 break 38357 } 38358 c := v_0.AuxInt 38359 x := v.Args[1] 38360 v.reset(OpAMD64TESTWconst) 38361 v.AuxInt = c 38362 v.AddArg(x) 38363 return true 38364 } 38365 // match: (TESTW x (MOVLconst [c])) 38366 // cond: 38367 // result: (TESTWconst [c] x) 38368 for { 38369 _ = v.Args[1] 38370 x := v.Args[0] 38371 v_1 := v.Args[1] 38372 if v_1.Op != OpAMD64MOVLconst { 38373 break 38374 } 38375 c := v_1.AuxInt 38376 v.reset(OpAMD64TESTWconst) 38377 v.AuxInt = c 38378 v.AddArg(x) 38379 return true 38380 } 38381 return false 38382 } 38383 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 38384 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 38385 // cond: is32Bit(off1+off2) 38386 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 38387 for { 38388 off1 := v.AuxInt 38389 sym := v.Aux 38390 _ = v.Args[2] 38391 val := v.Args[0] 38392 v_1 := v.Args[1] 38393 if v_1.Op != OpAMD64ADDQconst { 38394 break 38395 } 38396 off2 := v_1.AuxInt 38397 ptr := v_1.Args[0] 38398 mem := v.Args[2] 38399 if !(is32Bit(off1 + off2)) { 38400 break 38401 } 38402 v.reset(OpAMD64XADDLlock) 38403 v.AuxInt = off1 + off2 38404 v.Aux = sym 38405 v.AddArg(val) 38406 v.AddArg(ptr) 38407 v.AddArg(mem) 38408 return true 38409 } 38410 return false 38411 } 38412 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 38413 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 38414 // cond: is32Bit(off1+off2) 38415 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 38416 for { 38417 off1 := v.AuxInt 38418 sym := v.Aux 38419 _ = v.Args[2] 38420 val := v.Args[0] 38421 v_1 := v.Args[1] 38422 if v_1.Op != OpAMD64ADDQconst { 38423 break 38424 } 38425 off2 := v_1.AuxInt 38426 ptr := v_1.Args[0] 38427 mem := v.Args[2] 38428 if !(is32Bit(off1 + off2)) { 38429 break 38430 } 38431 v.reset(OpAMD64XADDQlock) 38432 v.AuxInt = off1 + off2 38433 v.Aux = sym 38434 v.AddArg(val) 38435 v.AddArg(ptr) 38436 v.AddArg(mem) 38437 return true 38438 } 38439 return false 38440 } 38441 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 38442 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 38443 // cond: is32Bit(off1+off2) 38444 // result: (XCHGL [off1+off2] {sym} val ptr mem) 38445 for { 38446 off1 := v.AuxInt 38447 sym := v.Aux 38448 _ = v.Args[2] 38449 val := v.Args[0] 38450 v_1 := v.Args[1] 38451 if v_1.Op != OpAMD64ADDQconst { 38452 break 38453 } 38454 off2 := v_1.AuxInt 38455 ptr := v_1.Args[0] 38456 mem := v.Args[2] 38457 if !(is32Bit(off1 + off2)) { 38458 break 38459 } 38460 v.reset(OpAMD64XCHGL) 38461 v.AuxInt = off1 + off2 38462 v.Aux = sym 38463 v.AddArg(val) 38464 v.AddArg(ptr) 38465 v.AddArg(mem) 38466 return true 38467 } 38468 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 38469 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 38470 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 38471 for { 38472 off1 := v.AuxInt 38473 sym1 := v.Aux 38474 _ = v.Args[2] 38475 val := v.Args[0] 38476 v_1 := v.Args[1] 38477 if v_1.Op != OpAMD64LEAQ { 38478 break 38479 } 38480 off2 := v_1.AuxInt 38481 sym2 := v_1.Aux 38482 ptr := v_1.Args[0] 38483 mem := v.Args[2] 38484 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 38485 break 38486 } 38487 v.reset(OpAMD64XCHGL) 38488 v.AuxInt = off1 + off2 38489 v.Aux = mergeSym(sym1, sym2) 38490 v.AddArg(val) 38491 v.AddArg(ptr) 38492 v.AddArg(mem) 38493 return true 38494 } 38495 return false 38496 } 38497 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 38498 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 38499 // cond: is32Bit(off1+off2) 38500 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 38501 for { 38502 off1 := v.AuxInt 38503 sym := v.Aux 38504 _ = v.Args[2] 38505 val := v.Args[0] 38506 v_1 := v.Args[1] 38507 if v_1.Op != OpAMD64ADDQconst { 38508 break 38509 } 38510 off2 := v_1.AuxInt 38511 ptr := v_1.Args[0] 38512 mem := v.Args[2] 38513 if !(is32Bit(off1 + off2)) { 38514 break 38515 } 38516 v.reset(OpAMD64XCHGQ) 38517 v.AuxInt = off1 + off2 38518 v.Aux = sym 38519 v.AddArg(val) 38520 v.AddArg(ptr) 38521 v.AddArg(mem) 38522 return true 38523 } 38524 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 38525 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 38526 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 38527 for { 38528 off1 := v.AuxInt 38529 sym1 := v.Aux 38530 _ = v.Args[2] 38531 val := v.Args[0] 38532 v_1 := v.Args[1] 38533 if v_1.Op != OpAMD64LEAQ { 38534 break 38535 } 38536 off2 := v_1.AuxInt 38537 sym2 := v_1.Aux 38538 ptr := v_1.Args[0] 38539 mem := v.Args[2] 38540 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 38541 break 38542 } 38543 v.reset(OpAMD64XCHGQ) 38544 v.AuxInt = off1 + off2 38545 v.Aux = mergeSym(sym1, sym2) 38546 v.AddArg(val) 38547 v.AddArg(ptr) 38548 v.AddArg(mem) 38549 return true 38550 } 38551 return false 38552 } 38553 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 38554 // match: (XORL x (MOVLconst [c])) 38555 // cond: 38556 // result: (XORLconst [c] x) 38557 for { 38558 _ = v.Args[1] 38559 x := v.Args[0] 38560 v_1 := v.Args[1] 38561 if v_1.Op != OpAMD64MOVLconst { 38562 break 38563 } 38564 c := v_1.AuxInt 38565 v.reset(OpAMD64XORLconst) 38566 v.AuxInt = c 38567 v.AddArg(x) 38568 return true 38569 } 38570 // match: (XORL (MOVLconst [c]) x) 38571 // cond: 38572 // result: (XORLconst [c] x) 38573 for { 38574 _ = v.Args[1] 38575 v_0 := v.Args[0] 38576 if v_0.Op != OpAMD64MOVLconst { 38577 break 38578 } 38579 c := v_0.AuxInt 38580 x := v.Args[1] 38581 v.reset(OpAMD64XORLconst) 38582 v.AuxInt = c 38583 v.AddArg(x) 38584 return true 38585 } 38586 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 38587 // cond: d==32-c 38588 // result: (ROLLconst x [c]) 38589 for { 38590 _ = v.Args[1] 38591 v_0 := v.Args[0] 38592 if v_0.Op != OpAMD64SHLLconst { 38593 break 38594 } 38595 c := v_0.AuxInt 38596 x := v_0.Args[0] 38597 v_1 := v.Args[1] 38598 if v_1.Op != OpAMD64SHRLconst { 38599 break 38600 } 38601 d := v_1.AuxInt 38602 if x != v_1.Args[0] { 38603 break 38604 } 38605 if !(d == 32-c) { 38606 break 38607 } 38608 v.reset(OpAMD64ROLLconst) 38609 v.AuxInt = c 38610 v.AddArg(x) 38611 return true 38612 } 38613 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 38614 // cond: d==32-c 38615 // result: (ROLLconst x [c]) 38616 for { 38617 _ = v.Args[1] 38618 v_0 := v.Args[0] 38619 if v_0.Op != OpAMD64SHRLconst { 38620 break 38621 } 38622 d := v_0.AuxInt 38623 x := v_0.Args[0] 38624 v_1 := v.Args[1] 38625 if v_1.Op != OpAMD64SHLLconst { 38626 break 38627 } 38628 c := v_1.AuxInt 38629 if x != v_1.Args[0] { 38630 break 38631 } 38632 if !(d == 32-c) { 38633 break 38634 } 38635 v.reset(OpAMD64ROLLconst) 38636 v.AuxInt = c 38637 v.AddArg(x) 38638 return true 38639 } 38640 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 38641 // cond: d==16-c && c < 16 && t.Size() == 2 38642 // result: (ROLWconst x [c]) 38643 for { 38644 t := v.Type 38645 _ = v.Args[1] 38646 v_0 := v.Args[0] 38647 if v_0.Op != OpAMD64SHLLconst { 38648 break 38649 } 38650 c := v_0.AuxInt 38651 x := v_0.Args[0] 38652 v_1 := v.Args[1] 38653 if v_1.Op != OpAMD64SHRWconst { 38654 break 38655 } 38656 d := v_1.AuxInt 38657 if x != v_1.Args[0] { 38658 break 38659 } 38660 if !(d == 16-c && c < 16 && t.Size() == 2) { 38661 break 38662 } 38663 v.reset(OpAMD64ROLWconst) 38664 v.AuxInt = c 38665 v.AddArg(x) 38666 return true 38667 } 38668 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 38669 // cond: d==16-c && c < 16 && t.Size() == 2 38670 // result: (ROLWconst x [c]) 38671 for { 38672 t := v.Type 38673 _ = v.Args[1] 38674 v_0 := v.Args[0] 38675 if v_0.Op != OpAMD64SHRWconst { 38676 break 38677 } 38678 d := v_0.AuxInt 38679 x := v_0.Args[0] 38680 v_1 := v.Args[1] 38681 if v_1.Op != OpAMD64SHLLconst { 38682 break 38683 } 38684 c := v_1.AuxInt 38685 if x != v_1.Args[0] { 38686 break 38687 } 38688 if !(d == 16-c && c < 16 && t.Size() == 2) { 38689 break 38690 } 38691 v.reset(OpAMD64ROLWconst) 38692 v.AuxInt = c 38693 v.AddArg(x) 38694 return true 38695 } 38696 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 38697 // cond: d==8-c && c < 8 && t.Size() == 1 38698 // result: (ROLBconst x [c]) 38699 for { 38700 t := v.Type 38701 _ = v.Args[1] 38702 v_0 := v.Args[0] 38703 if v_0.Op != OpAMD64SHLLconst { 38704 break 38705 } 38706 c := v_0.AuxInt 38707 x := v_0.Args[0] 38708 v_1 := v.Args[1] 38709 if v_1.Op != OpAMD64SHRBconst { 38710 break 38711 } 38712 d := v_1.AuxInt 38713 if x != v_1.Args[0] { 38714 break 38715 } 38716 if !(d == 8-c && c < 8 && t.Size() == 1) { 38717 break 38718 } 38719 v.reset(OpAMD64ROLBconst) 38720 v.AuxInt = c 38721 v.AddArg(x) 38722 return true 38723 } 38724 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 38725 // cond: d==8-c && c < 8 && t.Size() == 1 38726 // result: (ROLBconst x [c]) 38727 for { 38728 t := v.Type 38729 _ = v.Args[1] 38730 v_0 := v.Args[0] 38731 if v_0.Op != OpAMD64SHRBconst { 38732 break 38733 } 38734 d := v_0.AuxInt 38735 x := v_0.Args[0] 38736 v_1 := v.Args[1] 38737 if v_1.Op != OpAMD64SHLLconst { 38738 break 38739 } 38740 c := v_1.AuxInt 38741 if x != v_1.Args[0] { 38742 break 38743 } 38744 if !(d == 8-c && c < 8 && t.Size() == 1) { 38745 break 38746 } 38747 v.reset(OpAMD64ROLBconst) 38748 v.AuxInt = c 38749 v.AddArg(x) 38750 return true 38751 } 38752 // match: (XORL x x) 38753 // cond: 38754 // result: (MOVLconst [0]) 38755 for { 38756 _ = v.Args[1] 38757 x := v.Args[0] 38758 if x != v.Args[1] { 38759 break 38760 } 38761 v.reset(OpAMD64MOVLconst) 38762 v.AuxInt = 0 38763 return true 38764 } 38765 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 38766 // cond: canMergeLoad(v, l, x) && clobber(l) 38767 // result: (XORLmem x [off] {sym} ptr mem) 38768 for { 38769 _ = v.Args[1] 38770 x := v.Args[0] 38771 l := v.Args[1] 38772 if l.Op != OpAMD64MOVLload { 38773 break 38774 } 38775 off := l.AuxInt 38776 sym := l.Aux 38777 _ = l.Args[1] 38778 ptr := l.Args[0] 38779 mem := l.Args[1] 38780 if !(canMergeLoad(v, l, x) && clobber(l)) { 38781 break 38782 } 38783 v.reset(OpAMD64XORLmem) 38784 v.AuxInt = off 38785 v.Aux = sym 38786 v.AddArg(x) 38787 v.AddArg(ptr) 38788 v.AddArg(mem) 38789 return true 38790 } 38791 return false 38792 } 38793 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 38794 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 38795 // cond: canMergeLoad(v, l, x) && clobber(l) 38796 // result: (XORLmem x [off] {sym} ptr mem) 38797 for { 38798 _ = v.Args[1] 38799 l := v.Args[0] 38800 if l.Op != OpAMD64MOVLload { 38801 break 38802 } 38803 off := l.AuxInt 38804 sym := l.Aux 38805 _ = l.Args[1] 38806 ptr := l.Args[0] 38807 mem := l.Args[1] 38808 x := v.Args[1] 38809 if !(canMergeLoad(v, l, x) && clobber(l)) { 38810 break 38811 } 38812 v.reset(OpAMD64XORLmem) 38813 v.AuxInt = off 38814 v.Aux = sym 38815 v.AddArg(x) 38816 v.AddArg(ptr) 38817 v.AddArg(mem) 38818 return true 38819 } 38820 return false 38821 } 38822 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 38823 // match: (XORLconst [1] (SETNE x)) 38824 // cond: 38825 // result: (SETEQ x) 38826 for { 38827 if v.AuxInt != 1 { 38828 break 38829 } 38830 v_0 := v.Args[0] 38831 if v_0.Op != OpAMD64SETNE { 38832 break 38833 } 38834 x := v_0.Args[0] 38835 v.reset(OpAMD64SETEQ) 38836 v.AddArg(x) 38837 return true 38838 } 38839 // match: (XORLconst [1] (SETEQ x)) 38840 // cond: 38841 // result: (SETNE x) 38842 for { 38843 if v.AuxInt != 1 { 38844 break 38845 } 38846 v_0 := v.Args[0] 38847 if v_0.Op != OpAMD64SETEQ { 38848 break 38849 } 38850 x := v_0.Args[0] 38851 v.reset(OpAMD64SETNE) 38852 v.AddArg(x) 38853 return true 38854 } 38855 // match: (XORLconst [1] (SETL x)) 38856 // cond: 38857 // result: (SETGE x) 38858 for { 38859 if v.AuxInt != 1 { 38860 break 38861 } 38862 v_0 := v.Args[0] 38863 if v_0.Op != OpAMD64SETL { 38864 break 38865 } 38866 x := v_0.Args[0] 38867 v.reset(OpAMD64SETGE) 38868 v.AddArg(x) 38869 return true 38870 } 38871 // match: (XORLconst [1] (SETGE x)) 38872 // cond: 38873 // result: (SETL x) 38874 for { 38875 if v.AuxInt != 1 { 38876 break 38877 } 38878 v_0 := v.Args[0] 38879 if v_0.Op != OpAMD64SETGE { 38880 break 38881 } 38882 x := v_0.Args[0] 38883 v.reset(OpAMD64SETL) 38884 v.AddArg(x) 38885 return true 38886 } 38887 // match: (XORLconst [1] (SETLE x)) 38888 // cond: 38889 // result: (SETG x) 38890 for { 38891 if v.AuxInt != 1 { 38892 break 38893 } 38894 v_0 := v.Args[0] 38895 if v_0.Op != OpAMD64SETLE { 38896 break 38897 } 38898 x := v_0.Args[0] 38899 v.reset(OpAMD64SETG) 38900 v.AddArg(x) 38901 return true 38902 } 38903 // match: (XORLconst [1] (SETG x)) 38904 // cond: 38905 // result: (SETLE x) 38906 for { 38907 if v.AuxInt != 1 { 38908 break 38909 } 38910 v_0 := v.Args[0] 38911 if v_0.Op != OpAMD64SETG { 38912 break 38913 } 38914 x := v_0.Args[0] 38915 v.reset(OpAMD64SETLE) 38916 v.AddArg(x) 38917 return true 38918 } 38919 // match: (XORLconst [1] (SETB x)) 38920 // cond: 38921 // result: (SETAE x) 38922 for { 38923 if v.AuxInt != 1 { 38924 break 38925 } 38926 v_0 := v.Args[0] 38927 if v_0.Op != OpAMD64SETB { 38928 break 38929 } 38930 x := v_0.Args[0] 38931 v.reset(OpAMD64SETAE) 38932 v.AddArg(x) 38933 return true 38934 } 38935 // match: (XORLconst [1] (SETAE x)) 38936 // cond: 38937 // result: (SETB x) 38938 for { 38939 if v.AuxInt != 1 { 38940 break 38941 } 38942 v_0 := v.Args[0] 38943 if v_0.Op != OpAMD64SETAE { 38944 break 38945 } 38946 x := v_0.Args[0] 38947 v.reset(OpAMD64SETB) 38948 v.AddArg(x) 38949 return true 38950 } 38951 // match: (XORLconst [1] (SETBE x)) 38952 // cond: 38953 // result: (SETA x) 38954 for { 38955 if v.AuxInt != 1 { 38956 break 38957 } 38958 v_0 := v.Args[0] 38959 if v_0.Op != OpAMD64SETBE { 38960 break 38961 } 38962 x := v_0.Args[0] 38963 v.reset(OpAMD64SETA) 38964 v.AddArg(x) 38965 return true 38966 } 38967 // match: (XORLconst [1] (SETA x)) 38968 // cond: 38969 // result: (SETBE x) 38970 for { 38971 if v.AuxInt != 1 { 38972 break 38973 } 38974 v_0 := v.Args[0] 38975 if v_0.Op != OpAMD64SETA { 38976 break 38977 } 38978 x := v_0.Args[0] 38979 v.reset(OpAMD64SETBE) 38980 v.AddArg(x) 38981 return true 38982 } 38983 return false 38984 } 38985 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { 38986 // match: (XORLconst [c] (XORLconst [d] x)) 38987 // cond: 38988 // result: (XORLconst [c ^ d] x) 38989 for { 38990 c := v.AuxInt 38991 v_0 := v.Args[0] 38992 if v_0.Op != OpAMD64XORLconst { 38993 break 38994 } 38995 d := v_0.AuxInt 38996 x := v_0.Args[0] 38997 v.reset(OpAMD64XORLconst) 38998 v.AuxInt = c ^ d 38999 v.AddArg(x) 39000 return true 39001 } 39002 // match: (XORLconst [c] x) 39003 // cond: int32(c)==0 39004 // result: x 39005 for { 39006 c := v.AuxInt 39007 x := v.Args[0] 39008 if !(int32(c) == 0) { 39009 break 39010 } 39011 v.reset(OpCopy) 39012 v.Type = x.Type 39013 v.AddArg(x) 39014 return true 39015 } 39016 // match: (XORLconst [c] (MOVLconst [d])) 39017 // cond: 39018 // result: (MOVLconst [c^d]) 39019 for { 39020 c := v.AuxInt 39021 v_0 := v.Args[0] 39022 if v_0.Op != OpAMD64MOVLconst { 39023 break 39024 } 39025 d := v_0.AuxInt 39026 v.reset(OpAMD64MOVLconst) 39027 v.AuxInt = c ^ d 39028 return true 39029 } 39030 return false 39031 } 39032 func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool { 39033 b := v.Block 39034 _ = b 39035 typ := &b.Func.Config.Types 39036 _ = typ 39037 // match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 39038 // cond: 39039 // result: (XORL x (MOVLf2i y)) 39040 for { 39041 off := v.AuxInt 39042 sym := v.Aux 39043 _ = v.Args[2] 39044 x := v.Args[0] 39045 ptr := v.Args[1] 39046 v_2 := v.Args[2] 39047 if v_2.Op != OpAMD64MOVSSstore { 39048 break 39049 } 39050 if v_2.AuxInt != off { 39051 break 39052 } 39053 if v_2.Aux != sym { 39054 break 39055 } 39056 _ = v_2.Args[2] 39057 if ptr != v_2.Args[0] { 39058 break 39059 } 39060 y := v_2.Args[1] 39061 v.reset(OpAMD64XORL) 39062 v.AddArg(x) 39063 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 39064 v0.AddArg(y) 39065 v.AddArg(v0) 39066 return true 39067 } 39068 return false 39069 } 39070 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 39071 // match: (XORQ x (MOVQconst [c])) 39072 // cond: is32Bit(c) 39073 // result: (XORQconst [c] x) 39074 for { 39075 _ = v.Args[1] 39076 x := v.Args[0] 39077 v_1 := v.Args[1] 39078 if v_1.Op != OpAMD64MOVQconst { 39079 break 39080 } 39081 c := v_1.AuxInt 39082 if !(is32Bit(c)) { 39083 break 39084 } 39085 v.reset(OpAMD64XORQconst) 39086 v.AuxInt = c 39087 v.AddArg(x) 39088 return true 39089 } 39090 // match: (XORQ (MOVQconst [c]) x) 39091 // cond: is32Bit(c) 39092 // result: (XORQconst [c] x) 39093 for { 39094 _ = v.Args[1] 39095 v_0 := v.Args[0] 39096 if v_0.Op != OpAMD64MOVQconst { 39097 break 39098 } 39099 c := v_0.AuxInt 39100 x := v.Args[1] 39101 if !(is32Bit(c)) { 39102 break 39103 } 39104 v.reset(OpAMD64XORQconst) 39105 v.AuxInt = c 39106 v.AddArg(x) 39107 return true 39108 } 39109 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 39110 // cond: d==64-c 39111 // result: (ROLQconst x [c]) 39112 for { 39113 _ = v.Args[1] 39114 v_0 := v.Args[0] 39115 if v_0.Op != OpAMD64SHLQconst { 39116 break 39117 } 39118 c := v_0.AuxInt 39119 x := v_0.Args[0] 39120 v_1 := v.Args[1] 39121 if v_1.Op != OpAMD64SHRQconst { 39122 break 39123 } 39124 d := v_1.AuxInt 39125 if x != v_1.Args[0] { 39126 break 39127 } 39128 if !(d == 64-c) { 39129 break 39130 } 39131 v.reset(OpAMD64ROLQconst) 39132 v.AuxInt = c 39133 v.AddArg(x) 39134 return true 39135 } 39136 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 39137 // cond: d==64-c 39138 // result: (ROLQconst x [c]) 39139 for { 39140 _ = v.Args[1] 39141 v_0 := v.Args[0] 39142 if v_0.Op != OpAMD64SHRQconst { 39143 break 39144 } 39145 d := v_0.AuxInt 39146 x := v_0.Args[0] 39147 v_1 := v.Args[1] 39148 if v_1.Op != OpAMD64SHLQconst { 39149 break 39150 } 39151 c := v_1.AuxInt 39152 if x != v_1.Args[0] { 39153 break 39154 } 39155 if !(d == 64-c) { 39156 break 39157 } 39158 v.reset(OpAMD64ROLQconst) 39159 v.AuxInt = c 39160 v.AddArg(x) 39161 return true 39162 } 39163 // match: (XORQ x x) 39164 // cond: 39165 // result: (MOVQconst [0]) 39166 for { 39167 _ = v.Args[1] 39168 x := v.Args[0] 39169 if x != v.Args[1] { 39170 break 39171 } 39172 v.reset(OpAMD64MOVQconst) 39173 v.AuxInt = 0 39174 return true 39175 } 39176 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 39177 // cond: canMergeLoad(v, l, x) && clobber(l) 39178 // result: (XORQmem x [off] {sym} ptr mem) 39179 for { 39180 _ = v.Args[1] 39181 x := v.Args[0] 39182 l := v.Args[1] 39183 if l.Op != OpAMD64MOVQload { 39184 break 39185 } 39186 off := l.AuxInt 39187 sym := l.Aux 39188 _ = l.Args[1] 39189 ptr := l.Args[0] 39190 mem := l.Args[1] 39191 if !(canMergeLoad(v, l, x) && clobber(l)) { 39192 break 39193 } 39194 v.reset(OpAMD64XORQmem) 39195 v.AuxInt = off 39196 v.Aux = sym 39197 v.AddArg(x) 39198 v.AddArg(ptr) 39199 v.AddArg(mem) 39200 return true 39201 } 39202 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 39203 // cond: canMergeLoad(v, l, x) && clobber(l) 39204 // result: (XORQmem x [off] {sym} ptr mem) 39205 for { 39206 _ = v.Args[1] 39207 l := v.Args[0] 39208 if l.Op != OpAMD64MOVQload { 39209 break 39210 } 39211 off := l.AuxInt 39212 sym := l.Aux 39213 _ = l.Args[1] 39214 ptr := l.Args[0] 39215 mem := l.Args[1] 39216 x := v.Args[1] 39217 if !(canMergeLoad(v, l, x) && clobber(l)) { 39218 break 39219 } 39220 v.reset(OpAMD64XORQmem) 39221 v.AuxInt = off 39222 v.Aux = sym 39223 v.AddArg(x) 39224 v.AddArg(ptr) 39225 v.AddArg(mem) 39226 return true 39227 } 39228 return false 39229 } 39230 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 39231 // match: (XORQconst [c] (XORQconst [d] x)) 39232 // cond: 39233 // result: (XORQconst [c ^ d] x) 39234 for { 39235 c := v.AuxInt 39236 v_0 := v.Args[0] 39237 if v_0.Op != OpAMD64XORQconst { 39238 break 39239 } 39240 d := v_0.AuxInt 39241 x := v_0.Args[0] 39242 v.reset(OpAMD64XORQconst) 39243 v.AuxInt = c ^ d 39244 v.AddArg(x) 39245 return true 39246 } 39247 // match: (XORQconst [0] x) 39248 // cond: 39249 // result: x 39250 for { 39251 if v.AuxInt != 0 { 39252 break 39253 } 39254 x := v.Args[0] 39255 v.reset(OpCopy) 39256 v.Type = x.Type 39257 v.AddArg(x) 39258 return true 39259 } 39260 // match: (XORQconst [c] (MOVQconst [d])) 39261 // cond: 39262 // result: (MOVQconst [c^d]) 39263 for { 39264 c := v.AuxInt 39265 v_0 := v.Args[0] 39266 if v_0.Op != OpAMD64MOVQconst { 39267 break 39268 } 39269 d := v_0.AuxInt 39270 v.reset(OpAMD64MOVQconst) 39271 v.AuxInt = c ^ d 39272 return true 39273 } 39274 return false 39275 } 39276 func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool { 39277 b := v.Block 39278 _ = b 39279 typ := &b.Func.Config.Types 39280 _ = typ 39281 // match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 39282 // cond: 39283 // result: (XORQ x (MOVQf2i y)) 39284 for { 39285 off := v.AuxInt 39286 sym := v.Aux 39287 _ = v.Args[2] 39288 x := v.Args[0] 39289 ptr := v.Args[1] 39290 v_2 := v.Args[2] 39291 if v_2.Op != OpAMD64MOVSDstore { 39292 break 39293 } 39294 if v_2.AuxInt != off { 39295 break 39296 } 39297 if v_2.Aux != sym { 39298 break 39299 } 39300 _ = v_2.Args[2] 39301 if ptr != v_2.Args[0] { 39302 break 39303 } 39304 y := v_2.Args[1] 39305 v.reset(OpAMD64XORQ) 39306 v.AddArg(x) 39307 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 39308 v0.AddArg(y) 39309 v.AddArg(v0) 39310 return true 39311 } 39312 return false 39313 } 39314 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 39315 // match: (Add16 x y) 39316 // cond: 39317 // result: (ADDL x y) 39318 for { 39319 _ = v.Args[1] 39320 x := v.Args[0] 39321 y := v.Args[1] 39322 v.reset(OpAMD64ADDL) 39323 v.AddArg(x) 39324 v.AddArg(y) 39325 return true 39326 } 39327 } 39328 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 39329 // match: (Add32 x y) 39330 // cond: 39331 // result: (ADDL x y) 39332 for { 39333 _ = v.Args[1] 39334 x := v.Args[0] 39335 y := v.Args[1] 39336 v.reset(OpAMD64ADDL) 39337 v.AddArg(x) 39338 v.AddArg(y) 39339 return true 39340 } 39341 } 39342 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 39343 // match: (Add32F x y) 39344 // cond: 39345 // result: (ADDSS x y) 39346 for { 39347 _ = v.Args[1] 39348 x := v.Args[0] 39349 y := v.Args[1] 39350 v.reset(OpAMD64ADDSS) 39351 v.AddArg(x) 39352 v.AddArg(y) 39353 return true 39354 } 39355 } 39356 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 39357 // match: (Add64 x y) 39358 // cond: 39359 // result: (ADDQ x y) 39360 for { 39361 _ = v.Args[1] 39362 x := v.Args[0] 39363 y := v.Args[1] 39364 v.reset(OpAMD64ADDQ) 39365 v.AddArg(x) 39366 v.AddArg(y) 39367 return true 39368 } 39369 } 39370 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 39371 // match: (Add64F x y) 39372 // cond: 39373 // result: (ADDSD x y) 39374 for { 39375 _ = v.Args[1] 39376 x := v.Args[0] 39377 y := v.Args[1] 39378 v.reset(OpAMD64ADDSD) 39379 v.AddArg(x) 39380 v.AddArg(y) 39381 return true 39382 } 39383 } 39384 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 39385 // match: (Add8 x y) 39386 // cond: 39387 // result: (ADDL x y) 39388 for { 39389 _ = v.Args[1] 39390 x := v.Args[0] 39391 y := v.Args[1] 39392 v.reset(OpAMD64ADDL) 39393 v.AddArg(x) 39394 v.AddArg(y) 39395 return true 39396 } 39397 } 39398 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 39399 b := v.Block 39400 _ = b 39401 config := b.Func.Config 39402 _ = config 39403 // match: (AddPtr x y) 39404 // cond: config.PtrSize == 8 39405 // result: (ADDQ x y) 39406 for { 39407 _ = v.Args[1] 39408 x := v.Args[0] 39409 y := v.Args[1] 39410 if !(config.PtrSize == 8) { 39411 break 39412 } 39413 v.reset(OpAMD64ADDQ) 39414 v.AddArg(x) 39415 v.AddArg(y) 39416 return true 39417 } 39418 // match: (AddPtr x y) 39419 // cond: config.PtrSize == 4 39420 // result: (ADDL x y) 39421 for { 39422 _ = v.Args[1] 39423 x := v.Args[0] 39424 y := v.Args[1] 39425 if !(config.PtrSize == 4) { 39426 break 39427 } 39428 v.reset(OpAMD64ADDL) 39429 v.AddArg(x) 39430 v.AddArg(y) 39431 return true 39432 } 39433 return false 39434 } 39435 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 39436 b := v.Block 39437 _ = b 39438 config := b.Func.Config 39439 _ = config 39440 // match: (Addr {sym} base) 39441 // cond: config.PtrSize == 8 39442 // result: (LEAQ {sym} base) 39443 for { 39444 sym := v.Aux 39445 base := v.Args[0] 39446 if !(config.PtrSize == 8) { 39447 break 39448 } 39449 v.reset(OpAMD64LEAQ) 39450 v.Aux = sym 39451 v.AddArg(base) 39452 return true 39453 } 39454 // match: (Addr {sym} base) 39455 // cond: config.PtrSize == 4 39456 // result: (LEAL {sym} base) 39457 for { 39458 sym := v.Aux 39459 base := v.Args[0] 39460 if !(config.PtrSize == 4) { 39461 break 39462 } 39463 v.reset(OpAMD64LEAL) 39464 v.Aux = sym 39465 v.AddArg(base) 39466 return true 39467 } 39468 return false 39469 } 39470 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 39471 // match: (And16 x y) 39472 // cond: 39473 // result: (ANDL x y) 39474 for { 39475 _ = v.Args[1] 39476 x := v.Args[0] 39477 y := v.Args[1] 39478 v.reset(OpAMD64ANDL) 39479 v.AddArg(x) 39480 v.AddArg(y) 39481 return true 39482 } 39483 } 39484 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 39485 // match: (And32 x y) 39486 // cond: 39487 // result: (ANDL x y) 39488 for { 39489 _ = v.Args[1] 39490 x := v.Args[0] 39491 y := v.Args[1] 39492 v.reset(OpAMD64ANDL) 39493 v.AddArg(x) 39494 v.AddArg(y) 39495 return true 39496 } 39497 } 39498 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 39499 // match: (And64 x y) 39500 // cond: 39501 // result: (ANDQ x y) 39502 for { 39503 _ = v.Args[1] 39504 x := v.Args[0] 39505 y := v.Args[1] 39506 v.reset(OpAMD64ANDQ) 39507 v.AddArg(x) 39508 v.AddArg(y) 39509 return true 39510 } 39511 } 39512 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 39513 // match: (And8 x y) 39514 // cond: 39515 // result: (ANDL x y) 39516 for { 39517 _ = v.Args[1] 39518 x := v.Args[0] 39519 y := v.Args[1] 39520 v.reset(OpAMD64ANDL) 39521 v.AddArg(x) 39522 v.AddArg(y) 39523 return true 39524 } 39525 } 39526 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 39527 // match: (AndB x y) 39528 // cond: 39529 // result: (ANDL x y) 39530 for { 39531 _ = v.Args[1] 39532 x := v.Args[0] 39533 y := v.Args[1] 39534 v.reset(OpAMD64ANDL) 39535 v.AddArg(x) 39536 v.AddArg(y) 39537 return true 39538 } 39539 } 39540 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 39541 b := v.Block 39542 _ = b 39543 typ := &b.Func.Config.Types 39544 _ = typ 39545 // match: (AtomicAdd32 ptr val mem) 39546 // cond: 39547 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 39548 for { 39549 _ = v.Args[2] 39550 ptr := v.Args[0] 39551 val := v.Args[1] 39552 mem := v.Args[2] 39553 v.reset(OpAMD64AddTupleFirst32) 39554 v.AddArg(val) 39555 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 39556 v0.AddArg(val) 39557 v0.AddArg(ptr) 39558 v0.AddArg(mem) 39559 v.AddArg(v0) 39560 return true 39561 } 39562 } 39563 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 39564 b := v.Block 39565 _ = b 39566 typ := &b.Func.Config.Types 39567 _ = typ 39568 // match: (AtomicAdd64 ptr val mem) 39569 // cond: 39570 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 39571 for { 39572 _ = v.Args[2] 39573 ptr := v.Args[0] 39574 val := v.Args[1] 39575 mem := v.Args[2] 39576 v.reset(OpAMD64AddTupleFirst64) 39577 v.AddArg(val) 39578 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 39579 v0.AddArg(val) 39580 v0.AddArg(ptr) 39581 v0.AddArg(mem) 39582 v.AddArg(v0) 39583 return true 39584 } 39585 } 39586 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 39587 // match: (AtomicAnd8 ptr val mem) 39588 // cond: 39589 // result: (ANDBlock ptr val mem) 39590 for { 39591 _ = v.Args[2] 39592 ptr := v.Args[0] 39593 val := v.Args[1] 39594 mem := v.Args[2] 39595 v.reset(OpAMD64ANDBlock) 39596 v.AddArg(ptr) 39597 v.AddArg(val) 39598 v.AddArg(mem) 39599 return true 39600 } 39601 } 39602 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 39603 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 39604 // cond: 39605 // result: (CMPXCHGLlock ptr old new_ mem) 39606 for { 39607 _ = v.Args[3] 39608 ptr := v.Args[0] 39609 old := v.Args[1] 39610 new_ := v.Args[2] 39611 mem := v.Args[3] 39612 v.reset(OpAMD64CMPXCHGLlock) 39613 v.AddArg(ptr) 39614 v.AddArg(old) 39615 v.AddArg(new_) 39616 v.AddArg(mem) 39617 return true 39618 } 39619 } 39620 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 39621 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 39622 // cond: 39623 // result: (CMPXCHGQlock ptr old new_ mem) 39624 for { 39625 _ = v.Args[3] 39626 ptr := v.Args[0] 39627 old := v.Args[1] 39628 new_ := v.Args[2] 39629 mem := v.Args[3] 39630 v.reset(OpAMD64CMPXCHGQlock) 39631 v.AddArg(ptr) 39632 v.AddArg(old) 39633 v.AddArg(new_) 39634 v.AddArg(mem) 39635 return true 39636 } 39637 } 39638 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 39639 // match: (AtomicExchange32 ptr val mem) 39640 // cond: 39641 // result: (XCHGL val ptr mem) 39642 for { 39643 _ = v.Args[2] 39644 ptr := v.Args[0] 39645 val := v.Args[1] 39646 mem := v.Args[2] 39647 v.reset(OpAMD64XCHGL) 39648 v.AddArg(val) 39649 v.AddArg(ptr) 39650 v.AddArg(mem) 39651 return true 39652 } 39653 } 39654 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 39655 // match: (AtomicExchange64 ptr val mem) 39656 // cond: 39657 // result: (XCHGQ val ptr mem) 39658 for { 39659 _ = v.Args[2] 39660 ptr := v.Args[0] 39661 val := v.Args[1] 39662 mem := v.Args[2] 39663 v.reset(OpAMD64XCHGQ) 39664 v.AddArg(val) 39665 v.AddArg(ptr) 39666 v.AddArg(mem) 39667 return true 39668 } 39669 } 39670 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 39671 // match: (AtomicLoad32 ptr mem) 39672 // cond: 39673 // result: (MOVLatomicload ptr mem) 39674 for { 39675 _ = v.Args[1] 39676 ptr := v.Args[0] 39677 mem := v.Args[1] 39678 v.reset(OpAMD64MOVLatomicload) 39679 v.AddArg(ptr) 39680 v.AddArg(mem) 39681 return true 39682 } 39683 } 39684 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 39685 // match: (AtomicLoad64 ptr mem) 39686 // cond: 39687 // result: (MOVQatomicload ptr mem) 39688 for { 39689 _ = v.Args[1] 39690 ptr := v.Args[0] 39691 mem := v.Args[1] 39692 v.reset(OpAMD64MOVQatomicload) 39693 v.AddArg(ptr) 39694 v.AddArg(mem) 39695 return true 39696 } 39697 } 39698 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 39699 b := v.Block 39700 _ = b 39701 config := b.Func.Config 39702 _ = config 39703 // match: (AtomicLoadPtr ptr mem) 39704 // cond: config.PtrSize == 8 39705 // result: (MOVQatomicload ptr mem) 39706 for { 39707 _ = v.Args[1] 39708 ptr := v.Args[0] 39709 mem := v.Args[1] 39710 if !(config.PtrSize == 8) { 39711 break 39712 } 39713 v.reset(OpAMD64MOVQatomicload) 39714 v.AddArg(ptr) 39715 v.AddArg(mem) 39716 return true 39717 } 39718 // match: (AtomicLoadPtr ptr mem) 39719 // cond: config.PtrSize == 4 39720 // result: (MOVLatomicload ptr mem) 39721 for { 39722 _ = v.Args[1] 39723 ptr := v.Args[0] 39724 mem := v.Args[1] 39725 if !(config.PtrSize == 4) { 39726 break 39727 } 39728 v.reset(OpAMD64MOVLatomicload) 39729 v.AddArg(ptr) 39730 v.AddArg(mem) 39731 return true 39732 } 39733 return false 39734 } 39735 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 39736 // match: (AtomicOr8 ptr val mem) 39737 // cond: 39738 // result: (ORBlock ptr val mem) 39739 for { 39740 _ = v.Args[2] 39741 ptr := v.Args[0] 39742 val := v.Args[1] 39743 mem := v.Args[2] 39744 v.reset(OpAMD64ORBlock) 39745 v.AddArg(ptr) 39746 v.AddArg(val) 39747 v.AddArg(mem) 39748 return true 39749 } 39750 } 39751 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 39752 b := v.Block 39753 _ = b 39754 typ := &b.Func.Config.Types 39755 _ = typ 39756 // match: (AtomicStore32 ptr val mem) 39757 // cond: 39758 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 39759 for { 39760 _ = v.Args[2] 39761 ptr := v.Args[0] 39762 val := v.Args[1] 39763 mem := v.Args[2] 39764 v.reset(OpSelect1) 39765 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 39766 v0.AddArg(val) 39767 v0.AddArg(ptr) 39768 v0.AddArg(mem) 39769 v.AddArg(v0) 39770 return true 39771 } 39772 } 39773 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 39774 b := v.Block 39775 _ = b 39776 typ := &b.Func.Config.Types 39777 _ = typ 39778 // match: (AtomicStore64 ptr val mem) 39779 // cond: 39780 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 39781 for { 39782 _ = v.Args[2] 39783 ptr := v.Args[0] 39784 val := v.Args[1] 39785 mem := v.Args[2] 39786 v.reset(OpSelect1) 39787 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 39788 v0.AddArg(val) 39789 v0.AddArg(ptr) 39790 v0.AddArg(mem) 39791 v.AddArg(v0) 39792 return true 39793 } 39794 } 39795 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 39796 b := v.Block 39797 _ = b 39798 config := b.Func.Config 39799 _ = config 39800 typ := &b.Func.Config.Types 39801 _ = typ 39802 // match: (AtomicStorePtrNoWB ptr val mem) 39803 // cond: config.PtrSize == 8 39804 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 39805 for { 39806 _ = v.Args[2] 39807 ptr := v.Args[0] 39808 val := v.Args[1] 39809 mem := v.Args[2] 39810 if !(config.PtrSize == 8) { 39811 break 39812 } 39813 v.reset(OpSelect1) 39814 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 39815 v0.AddArg(val) 39816 v0.AddArg(ptr) 39817 v0.AddArg(mem) 39818 v.AddArg(v0) 39819 return true 39820 } 39821 // match: (AtomicStorePtrNoWB ptr val mem) 39822 // cond: config.PtrSize == 4 39823 // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 39824 for { 39825 _ = v.Args[2] 39826 ptr := v.Args[0] 39827 val := v.Args[1] 39828 mem := v.Args[2] 39829 if !(config.PtrSize == 4) { 39830 break 39831 } 39832 v.reset(OpSelect1) 39833 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem)) 39834 v0.AddArg(val) 39835 v0.AddArg(ptr) 39836 v0.AddArg(mem) 39837 v.AddArg(v0) 39838 return true 39839 } 39840 return false 39841 } 39842 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 39843 // match: (Avg64u x y) 39844 // cond: 39845 // result: (AVGQU x y) 39846 for { 39847 _ = v.Args[1] 39848 x := v.Args[0] 39849 y := v.Args[1] 39850 v.reset(OpAMD64AVGQU) 39851 v.AddArg(x) 39852 v.AddArg(y) 39853 return true 39854 } 39855 } 39856 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 39857 b := v.Block 39858 _ = b 39859 typ := &b.Func.Config.Types 39860 _ = typ 39861 // match: (BitLen32 x) 39862 // cond: 39863 // result: (BitLen64 (MOVLQZX <typ.UInt64> x)) 39864 for { 39865 x := v.Args[0] 39866 v.reset(OpBitLen64) 39867 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 39868 v0.AddArg(x) 39869 v.AddArg(v0) 39870 return true 39871 } 39872 } 39873 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 39874 b := v.Block 39875 _ = b 39876 typ := &b.Func.Config.Types 39877 _ = typ 39878 // match: (BitLen64 <t> x) 39879 // cond: 39880 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 39881 for { 39882 t := v.Type 39883 x := v.Args[0] 39884 v.reset(OpAMD64ADDQconst) 39885 v.AuxInt = 1 39886 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 39887 v1 := b.NewValue0(v.Pos, OpSelect0, t) 39888 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 39889 v2.AddArg(x) 39890 v1.AddArg(v2) 39891 v0.AddArg(v1) 39892 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 39893 v3.AuxInt = -1 39894 v0.AddArg(v3) 39895 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 39896 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 39897 v5.AddArg(x) 39898 v4.AddArg(v5) 39899 v0.AddArg(v4) 39900 v.AddArg(v0) 39901 return true 39902 } 39903 } 39904 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 39905 // match: (Bswap32 x) 39906 // cond: 39907 // result: (BSWAPL x) 39908 for { 39909 x := v.Args[0] 39910 v.reset(OpAMD64BSWAPL) 39911 v.AddArg(x) 39912 return true 39913 } 39914 } 39915 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 39916 // match: (Bswap64 x) 39917 // cond: 39918 // result: (BSWAPQ x) 39919 for { 39920 x := v.Args[0] 39921 v.reset(OpAMD64BSWAPQ) 39922 v.AddArg(x) 39923 return true 39924 } 39925 } 39926 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 39927 // match: (ClosureCall [argwid] entry closure mem) 39928 // cond: 39929 // result: (CALLclosure [argwid] entry closure mem) 39930 for { 39931 argwid := v.AuxInt 39932 _ = v.Args[2] 39933 entry := v.Args[0] 39934 closure := v.Args[1] 39935 mem := v.Args[2] 39936 v.reset(OpAMD64CALLclosure) 39937 v.AuxInt = argwid 39938 v.AddArg(entry) 39939 v.AddArg(closure) 39940 v.AddArg(mem) 39941 return true 39942 } 39943 } 39944 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 39945 // match: (Com16 x) 39946 // cond: 39947 // result: (NOTL x) 39948 for { 39949 x := v.Args[0] 39950 v.reset(OpAMD64NOTL) 39951 v.AddArg(x) 39952 return true 39953 } 39954 } 39955 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 39956 // match: (Com32 x) 39957 // cond: 39958 // result: (NOTL x) 39959 for { 39960 x := v.Args[0] 39961 v.reset(OpAMD64NOTL) 39962 v.AddArg(x) 39963 return true 39964 } 39965 } 39966 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 39967 // match: (Com64 x) 39968 // cond: 39969 // result: (NOTQ x) 39970 for { 39971 x := v.Args[0] 39972 v.reset(OpAMD64NOTQ) 39973 v.AddArg(x) 39974 return true 39975 } 39976 } 39977 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 39978 // match: (Com8 x) 39979 // cond: 39980 // result: (NOTL x) 39981 for { 39982 x := v.Args[0] 39983 v.reset(OpAMD64NOTL) 39984 v.AddArg(x) 39985 return true 39986 } 39987 } 39988 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 39989 // match: (Const16 [val]) 39990 // cond: 39991 // result: (MOVLconst [val]) 39992 for { 39993 val := v.AuxInt 39994 v.reset(OpAMD64MOVLconst) 39995 v.AuxInt = val 39996 return true 39997 } 39998 } 39999 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 40000 // match: (Const32 [val]) 40001 // cond: 40002 // result: (MOVLconst [val]) 40003 for { 40004 val := v.AuxInt 40005 v.reset(OpAMD64MOVLconst) 40006 v.AuxInt = val 40007 return true 40008 } 40009 } 40010 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 40011 // match: (Const32F [val]) 40012 // cond: 40013 // result: (MOVSSconst [val]) 40014 for { 40015 val := v.AuxInt 40016 v.reset(OpAMD64MOVSSconst) 40017 v.AuxInt = val 40018 return true 40019 } 40020 } 40021 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 40022 // match: (Const64 [val]) 40023 // cond: 40024 // result: (MOVQconst [val]) 40025 for { 40026 val := v.AuxInt 40027 v.reset(OpAMD64MOVQconst) 40028 v.AuxInt = val 40029 return true 40030 } 40031 } 40032 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 40033 // match: (Const64F [val]) 40034 // cond: 40035 // result: (MOVSDconst [val]) 40036 for { 40037 val := v.AuxInt 40038 v.reset(OpAMD64MOVSDconst) 40039 v.AuxInt = val 40040 return true 40041 } 40042 } 40043 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 40044 // match: (Const8 [val]) 40045 // cond: 40046 // result: (MOVLconst [val]) 40047 for { 40048 val := v.AuxInt 40049 v.reset(OpAMD64MOVLconst) 40050 v.AuxInt = val 40051 return true 40052 } 40053 } 40054 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 40055 // match: (ConstBool [b]) 40056 // cond: 40057 // result: (MOVLconst [b]) 40058 for { 40059 b := v.AuxInt 40060 v.reset(OpAMD64MOVLconst) 40061 v.AuxInt = b 40062 return true 40063 } 40064 } 40065 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 40066 b := v.Block 40067 _ = b 40068 config := b.Func.Config 40069 _ = config 40070 // match: (ConstNil) 40071 // cond: config.PtrSize == 8 40072 // result: (MOVQconst [0]) 40073 for { 40074 if !(config.PtrSize == 8) { 40075 break 40076 } 40077 v.reset(OpAMD64MOVQconst) 40078 v.AuxInt = 0 40079 return true 40080 } 40081 // match: (ConstNil) 40082 // cond: config.PtrSize == 4 40083 // result: (MOVLconst [0]) 40084 for { 40085 if !(config.PtrSize == 4) { 40086 break 40087 } 40088 v.reset(OpAMD64MOVLconst) 40089 v.AuxInt = 0 40090 return true 40091 } 40092 return false 40093 } 40094 func rewriteValueAMD64_OpConvert_0(v *Value) bool { 40095 b := v.Block 40096 _ = b 40097 config := b.Func.Config 40098 _ = config 40099 // match: (Convert <t> x mem) 40100 // cond: config.PtrSize == 8 40101 // result: (MOVQconvert <t> x mem) 40102 for { 40103 t := v.Type 40104 _ = v.Args[1] 40105 x := v.Args[0] 40106 mem := v.Args[1] 40107 if !(config.PtrSize == 8) { 40108 break 40109 } 40110 v.reset(OpAMD64MOVQconvert) 40111 v.Type = t 40112 v.AddArg(x) 40113 v.AddArg(mem) 40114 return true 40115 } 40116 // match: (Convert <t> x mem) 40117 // cond: config.PtrSize == 4 40118 // result: (MOVLconvert <t> x mem) 40119 for { 40120 t := v.Type 40121 _ = v.Args[1] 40122 x := v.Args[0] 40123 mem := v.Args[1] 40124 if !(config.PtrSize == 4) { 40125 break 40126 } 40127 v.reset(OpAMD64MOVLconvert) 40128 v.Type = t 40129 v.AddArg(x) 40130 v.AddArg(mem) 40131 return true 40132 } 40133 return false 40134 } 40135 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 40136 b := v.Block 40137 _ = b 40138 typ := &b.Func.Config.Types 40139 _ = typ 40140 // match: (Ctz32 x) 40141 // cond: 40142 // result: (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x))) 40143 for { 40144 x := v.Args[0] 40145 v.reset(OpSelect0) 40146 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 40147 v1 := b.NewValue0(v.Pos, OpAMD64ORQ, typ.UInt64) 40148 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 40149 v2.AuxInt = 1 << 32 40150 v1.AddArg(v2) 40151 v1.AddArg(x) 40152 v0.AddArg(v1) 40153 v.AddArg(v0) 40154 return true 40155 } 40156 } 40157 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 40158 b := v.Block 40159 _ = b 40160 typ := &b.Func.Config.Types 40161 _ = typ 40162 // match: (Ctz64 <t> x) 40163 // cond: 40164 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 40165 for { 40166 t := v.Type 40167 x := v.Args[0] 40168 v.reset(OpAMD64CMOVQEQ) 40169 v0 := b.NewValue0(v.Pos, OpSelect0, t) 40170 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 40171 v1.AddArg(x) 40172 v0.AddArg(v1) 40173 v.AddArg(v0) 40174 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 40175 v2.AuxInt = 64 40176 v.AddArg(v2) 40177 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 40178 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 40179 v4.AddArg(x) 40180 v3.AddArg(v4) 40181 v.AddArg(v3) 40182 return true 40183 } 40184 } 40185 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 40186 // match: (Cvt32Fto32 x) 40187 // cond: 40188 // result: (CVTTSS2SL x) 40189 for { 40190 x := v.Args[0] 40191 v.reset(OpAMD64CVTTSS2SL) 40192 v.AddArg(x) 40193 return true 40194 } 40195 } 40196 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 40197 // match: (Cvt32Fto64 x) 40198 // cond: 40199 // result: (CVTTSS2SQ x) 40200 for { 40201 x := v.Args[0] 40202 v.reset(OpAMD64CVTTSS2SQ) 40203 v.AddArg(x) 40204 return true 40205 } 40206 } 40207 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 40208 // match: (Cvt32Fto64F x) 40209 // cond: 40210 // result: (CVTSS2SD x) 40211 for { 40212 x := v.Args[0] 40213 v.reset(OpAMD64CVTSS2SD) 40214 v.AddArg(x) 40215 return true 40216 } 40217 } 40218 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 40219 // match: (Cvt32to32F x) 40220 // cond: 40221 // result: (CVTSL2SS x) 40222 for { 40223 x := v.Args[0] 40224 v.reset(OpAMD64CVTSL2SS) 40225 v.AddArg(x) 40226 return true 40227 } 40228 } 40229 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 40230 // match: (Cvt32to64F x) 40231 // cond: 40232 // result: (CVTSL2SD x) 40233 for { 40234 x := v.Args[0] 40235 v.reset(OpAMD64CVTSL2SD) 40236 v.AddArg(x) 40237 return true 40238 } 40239 } 40240 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 40241 // match: (Cvt64Fto32 x) 40242 // cond: 40243 // result: (CVTTSD2SL x) 40244 for { 40245 x := v.Args[0] 40246 v.reset(OpAMD64CVTTSD2SL) 40247 v.AddArg(x) 40248 return true 40249 } 40250 } 40251 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 40252 // match: (Cvt64Fto32F x) 40253 // cond: 40254 // result: (CVTSD2SS x) 40255 for { 40256 x := v.Args[0] 40257 v.reset(OpAMD64CVTSD2SS) 40258 v.AddArg(x) 40259 return true 40260 } 40261 } 40262 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 40263 // match: (Cvt64Fto64 x) 40264 // cond: 40265 // result: (CVTTSD2SQ x) 40266 for { 40267 x := v.Args[0] 40268 v.reset(OpAMD64CVTTSD2SQ) 40269 v.AddArg(x) 40270 return true 40271 } 40272 } 40273 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 40274 // match: (Cvt64to32F x) 40275 // cond: 40276 // result: (CVTSQ2SS x) 40277 for { 40278 x := v.Args[0] 40279 v.reset(OpAMD64CVTSQ2SS) 40280 v.AddArg(x) 40281 return true 40282 } 40283 } 40284 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 40285 // match: (Cvt64to64F x) 40286 // cond: 40287 // result: (CVTSQ2SD x) 40288 for { 40289 x := v.Args[0] 40290 v.reset(OpAMD64CVTSQ2SD) 40291 v.AddArg(x) 40292 return true 40293 } 40294 } 40295 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 40296 // match: (Div128u xhi xlo y) 40297 // cond: 40298 // result: (DIVQU2 xhi xlo y) 40299 for { 40300 _ = v.Args[2] 40301 xhi := v.Args[0] 40302 xlo := v.Args[1] 40303 y := v.Args[2] 40304 v.reset(OpAMD64DIVQU2) 40305 v.AddArg(xhi) 40306 v.AddArg(xlo) 40307 v.AddArg(y) 40308 return true 40309 } 40310 } 40311 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 40312 b := v.Block 40313 _ = b 40314 typ := &b.Func.Config.Types 40315 _ = typ 40316 // match: (Div16 x y) 40317 // cond: 40318 // result: (Select0 (DIVW x y)) 40319 for { 40320 _ = v.Args[1] 40321 x := v.Args[0] 40322 y := v.Args[1] 40323 v.reset(OpSelect0) 40324 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 40325 v0.AddArg(x) 40326 v0.AddArg(y) 40327 v.AddArg(v0) 40328 return true 40329 } 40330 } 40331 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 40332 b := v.Block 40333 _ = b 40334 typ := &b.Func.Config.Types 40335 _ = typ 40336 // match: (Div16u x y) 40337 // cond: 40338 // result: (Select0 (DIVWU x y)) 40339 for { 40340 _ = v.Args[1] 40341 x := v.Args[0] 40342 y := v.Args[1] 40343 v.reset(OpSelect0) 40344 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 40345 v0.AddArg(x) 40346 v0.AddArg(y) 40347 v.AddArg(v0) 40348 return true 40349 } 40350 } 40351 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 40352 b := v.Block 40353 _ = b 40354 typ := &b.Func.Config.Types 40355 _ = typ 40356 // match: (Div32 x y) 40357 // cond: 40358 // result: (Select0 (DIVL x y)) 40359 for { 40360 _ = v.Args[1] 40361 x := v.Args[0] 40362 y := v.Args[1] 40363 v.reset(OpSelect0) 40364 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 40365 v0.AddArg(x) 40366 v0.AddArg(y) 40367 v.AddArg(v0) 40368 return true 40369 } 40370 } 40371 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 40372 // match: (Div32F x y) 40373 // cond: 40374 // result: (DIVSS x y) 40375 for { 40376 _ = v.Args[1] 40377 x := v.Args[0] 40378 y := v.Args[1] 40379 v.reset(OpAMD64DIVSS) 40380 v.AddArg(x) 40381 v.AddArg(y) 40382 return true 40383 } 40384 } 40385 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 40386 b := v.Block 40387 _ = b 40388 typ := &b.Func.Config.Types 40389 _ = typ 40390 // match: (Div32u x y) 40391 // cond: 40392 // result: (Select0 (DIVLU x y)) 40393 for { 40394 _ = v.Args[1] 40395 x := v.Args[0] 40396 y := v.Args[1] 40397 v.reset(OpSelect0) 40398 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 40399 v0.AddArg(x) 40400 v0.AddArg(y) 40401 v.AddArg(v0) 40402 return true 40403 } 40404 } 40405 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 40406 b := v.Block 40407 _ = b 40408 typ := &b.Func.Config.Types 40409 _ = typ 40410 // match: (Div64 x y) 40411 // cond: 40412 // result: (Select0 (DIVQ x y)) 40413 for { 40414 _ = v.Args[1] 40415 x := v.Args[0] 40416 y := v.Args[1] 40417 v.reset(OpSelect0) 40418 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 40419 v0.AddArg(x) 40420 v0.AddArg(y) 40421 v.AddArg(v0) 40422 return true 40423 } 40424 } 40425 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 40426 // match: (Div64F x y) 40427 // cond: 40428 // result: (DIVSD x y) 40429 for { 40430 _ = v.Args[1] 40431 x := v.Args[0] 40432 y := v.Args[1] 40433 v.reset(OpAMD64DIVSD) 40434 v.AddArg(x) 40435 v.AddArg(y) 40436 return true 40437 } 40438 } 40439 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 40440 b := v.Block 40441 _ = b 40442 typ := &b.Func.Config.Types 40443 _ = typ 40444 // match: (Div64u x y) 40445 // cond: 40446 // result: (Select0 (DIVQU x y)) 40447 for { 40448 _ = v.Args[1] 40449 x := v.Args[0] 40450 y := v.Args[1] 40451 v.reset(OpSelect0) 40452 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 40453 v0.AddArg(x) 40454 v0.AddArg(y) 40455 v.AddArg(v0) 40456 return true 40457 } 40458 } 40459 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 40460 b := v.Block 40461 _ = b 40462 typ := &b.Func.Config.Types 40463 _ = typ 40464 // match: (Div8 x y) 40465 // cond: 40466 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 40467 for { 40468 _ = v.Args[1] 40469 x := v.Args[0] 40470 y := v.Args[1] 40471 v.reset(OpSelect0) 40472 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 40473 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 40474 v1.AddArg(x) 40475 v0.AddArg(v1) 40476 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 40477 v2.AddArg(y) 40478 v0.AddArg(v2) 40479 v.AddArg(v0) 40480 return true 40481 } 40482 } 40483 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 40484 b := v.Block 40485 _ = b 40486 typ := &b.Func.Config.Types 40487 _ = typ 40488 // match: (Div8u x y) 40489 // cond: 40490 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 40491 for { 40492 _ = v.Args[1] 40493 x := v.Args[0] 40494 y := v.Args[1] 40495 v.reset(OpSelect0) 40496 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 40497 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 40498 v1.AddArg(x) 40499 v0.AddArg(v1) 40500 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 40501 v2.AddArg(y) 40502 v0.AddArg(v2) 40503 v.AddArg(v0) 40504 return true 40505 } 40506 } 40507 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 40508 b := v.Block 40509 _ = b 40510 // match: (Eq16 x y) 40511 // cond: 40512 // result: (SETEQ (CMPW x y)) 40513 for { 40514 _ = v.Args[1] 40515 x := v.Args[0] 40516 y := v.Args[1] 40517 v.reset(OpAMD64SETEQ) 40518 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 40519 v0.AddArg(x) 40520 v0.AddArg(y) 40521 v.AddArg(v0) 40522 return true 40523 } 40524 } 40525 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 40526 b := v.Block 40527 _ = b 40528 // match: (Eq32 x y) 40529 // cond: 40530 // result: (SETEQ (CMPL x y)) 40531 for { 40532 _ = v.Args[1] 40533 x := v.Args[0] 40534 y := v.Args[1] 40535 v.reset(OpAMD64SETEQ) 40536 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 40537 v0.AddArg(x) 40538 v0.AddArg(y) 40539 v.AddArg(v0) 40540 return true 40541 } 40542 } 40543 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 40544 b := v.Block 40545 _ = b 40546 // match: (Eq32F x y) 40547 // cond: 40548 // result: (SETEQF (UCOMISS x y)) 40549 for { 40550 _ = v.Args[1] 40551 x := v.Args[0] 40552 y := v.Args[1] 40553 v.reset(OpAMD64SETEQF) 40554 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 40555 v0.AddArg(x) 40556 v0.AddArg(y) 40557 v.AddArg(v0) 40558 return true 40559 } 40560 } 40561 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 40562 b := v.Block 40563 _ = b 40564 // match: (Eq64 x y) 40565 // cond: 40566 // result: (SETEQ (CMPQ x y)) 40567 for { 40568 _ = v.Args[1] 40569 x := v.Args[0] 40570 y := v.Args[1] 40571 v.reset(OpAMD64SETEQ) 40572 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 40573 v0.AddArg(x) 40574 v0.AddArg(y) 40575 v.AddArg(v0) 40576 return true 40577 } 40578 } 40579 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 40580 b := v.Block 40581 _ = b 40582 // match: (Eq64F x y) 40583 // cond: 40584 // result: (SETEQF (UCOMISD x y)) 40585 for { 40586 _ = v.Args[1] 40587 x := v.Args[0] 40588 y := v.Args[1] 40589 v.reset(OpAMD64SETEQF) 40590 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 40591 v0.AddArg(x) 40592 v0.AddArg(y) 40593 v.AddArg(v0) 40594 return true 40595 } 40596 } 40597 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 40598 b := v.Block 40599 _ = b 40600 // match: (Eq8 x y) 40601 // cond: 40602 // result: (SETEQ (CMPB x y)) 40603 for { 40604 _ = v.Args[1] 40605 x := v.Args[0] 40606 y := v.Args[1] 40607 v.reset(OpAMD64SETEQ) 40608 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 40609 v0.AddArg(x) 40610 v0.AddArg(y) 40611 v.AddArg(v0) 40612 return true 40613 } 40614 } 40615 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 40616 b := v.Block 40617 _ = b 40618 // match: (EqB x y) 40619 // cond: 40620 // result: (SETEQ (CMPB x y)) 40621 for { 40622 _ = v.Args[1] 40623 x := v.Args[0] 40624 y := v.Args[1] 40625 v.reset(OpAMD64SETEQ) 40626 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 40627 v0.AddArg(x) 40628 v0.AddArg(y) 40629 v.AddArg(v0) 40630 return true 40631 } 40632 } 40633 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 40634 b := v.Block 40635 _ = b 40636 config := b.Func.Config 40637 _ = config 40638 // match: (EqPtr x y) 40639 // cond: config.PtrSize == 8 40640 // result: (SETEQ (CMPQ x y)) 40641 for { 40642 _ = v.Args[1] 40643 x := v.Args[0] 40644 y := v.Args[1] 40645 if !(config.PtrSize == 8) { 40646 break 40647 } 40648 v.reset(OpAMD64SETEQ) 40649 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 40650 v0.AddArg(x) 40651 v0.AddArg(y) 40652 v.AddArg(v0) 40653 return true 40654 } 40655 // match: (EqPtr x y) 40656 // cond: config.PtrSize == 4 40657 // result: (SETEQ (CMPL x y)) 40658 for { 40659 _ = v.Args[1] 40660 x := v.Args[0] 40661 y := v.Args[1] 40662 if !(config.PtrSize == 4) { 40663 break 40664 } 40665 v.reset(OpAMD64SETEQ) 40666 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 40667 v0.AddArg(x) 40668 v0.AddArg(y) 40669 v.AddArg(v0) 40670 return true 40671 } 40672 return false 40673 } 40674 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 40675 b := v.Block 40676 _ = b 40677 // match: (Geq16 x y) 40678 // cond: 40679 // result: (SETGE (CMPW x y)) 40680 for { 40681 _ = v.Args[1] 40682 x := v.Args[0] 40683 y := v.Args[1] 40684 v.reset(OpAMD64SETGE) 40685 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 40686 v0.AddArg(x) 40687 v0.AddArg(y) 40688 v.AddArg(v0) 40689 return true 40690 } 40691 } 40692 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 40693 b := v.Block 40694 _ = b 40695 // match: (Geq16U x y) 40696 // cond: 40697 // result: (SETAE (CMPW x y)) 40698 for { 40699 _ = v.Args[1] 40700 x := v.Args[0] 40701 y := v.Args[1] 40702 v.reset(OpAMD64SETAE) 40703 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 40704 v0.AddArg(x) 40705 v0.AddArg(y) 40706 v.AddArg(v0) 40707 return true 40708 } 40709 } 40710 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 40711 b := v.Block 40712 _ = b 40713 // match: (Geq32 x y) 40714 // cond: 40715 // result: (SETGE (CMPL x y)) 40716 for { 40717 _ = v.Args[1] 40718 x := v.Args[0] 40719 y := v.Args[1] 40720 v.reset(OpAMD64SETGE) 40721 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 40722 v0.AddArg(x) 40723 v0.AddArg(y) 40724 v.AddArg(v0) 40725 return true 40726 } 40727 } 40728 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 40729 b := v.Block 40730 _ = b 40731 // match: (Geq32F x y) 40732 // cond: 40733 // result: (SETGEF (UCOMISS x y)) 40734 for { 40735 _ = v.Args[1] 40736 x := v.Args[0] 40737 y := v.Args[1] 40738 v.reset(OpAMD64SETGEF) 40739 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 40740 v0.AddArg(x) 40741 v0.AddArg(y) 40742 v.AddArg(v0) 40743 return true 40744 } 40745 } 40746 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 40747 b := v.Block 40748 _ = b 40749 // match: (Geq32U x y) 40750 // cond: 40751 // result: (SETAE (CMPL x y)) 40752 for { 40753 _ = v.Args[1] 40754 x := v.Args[0] 40755 y := v.Args[1] 40756 v.reset(OpAMD64SETAE) 40757 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 40758 v0.AddArg(x) 40759 v0.AddArg(y) 40760 v.AddArg(v0) 40761 return true 40762 } 40763 } 40764 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 40765 b := v.Block 40766 _ = b 40767 // match: (Geq64 x y) 40768 // cond: 40769 // result: (SETGE (CMPQ x y)) 40770 for { 40771 _ = v.Args[1] 40772 x := v.Args[0] 40773 y := v.Args[1] 40774 v.reset(OpAMD64SETGE) 40775 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 40776 v0.AddArg(x) 40777 v0.AddArg(y) 40778 v.AddArg(v0) 40779 return true 40780 } 40781 } 40782 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 40783 b := v.Block 40784 _ = b 40785 // match: (Geq64F x y) 40786 // cond: 40787 // result: (SETGEF (UCOMISD x y)) 40788 for { 40789 _ = v.Args[1] 40790 x := v.Args[0] 40791 y := v.Args[1] 40792 v.reset(OpAMD64SETGEF) 40793 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 40794 v0.AddArg(x) 40795 v0.AddArg(y) 40796 v.AddArg(v0) 40797 return true 40798 } 40799 } 40800 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 40801 b := v.Block 40802 _ = b 40803 // match: (Geq64U x y) 40804 // cond: 40805 // result: (SETAE (CMPQ x y)) 40806 for { 40807 _ = v.Args[1] 40808 x := v.Args[0] 40809 y := v.Args[1] 40810 v.reset(OpAMD64SETAE) 40811 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 40812 v0.AddArg(x) 40813 v0.AddArg(y) 40814 v.AddArg(v0) 40815 return true 40816 } 40817 } 40818 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 40819 b := v.Block 40820 _ = b 40821 // match: (Geq8 x y) 40822 // cond: 40823 // result: (SETGE (CMPB x y)) 40824 for { 40825 _ = v.Args[1] 40826 x := v.Args[0] 40827 y := v.Args[1] 40828 v.reset(OpAMD64SETGE) 40829 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 40830 v0.AddArg(x) 40831 v0.AddArg(y) 40832 v.AddArg(v0) 40833 return true 40834 } 40835 } 40836 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 40837 b := v.Block 40838 _ = b 40839 // match: (Geq8U x y) 40840 // cond: 40841 // result: (SETAE (CMPB x y)) 40842 for { 40843 _ = v.Args[1] 40844 x := v.Args[0] 40845 y := v.Args[1] 40846 v.reset(OpAMD64SETAE) 40847 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 40848 v0.AddArg(x) 40849 v0.AddArg(y) 40850 v.AddArg(v0) 40851 return true 40852 } 40853 } 40854 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 40855 // match: (GetClosurePtr) 40856 // cond: 40857 // result: (LoweredGetClosurePtr) 40858 for { 40859 v.reset(OpAMD64LoweredGetClosurePtr) 40860 return true 40861 } 40862 } 40863 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 40864 // match: (GetG mem) 40865 // cond: 40866 // result: (LoweredGetG mem) 40867 for { 40868 mem := v.Args[0] 40869 v.reset(OpAMD64LoweredGetG) 40870 v.AddArg(mem) 40871 return true 40872 } 40873 } 40874 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 40875 b := v.Block 40876 _ = b 40877 // match: (Greater16 x y) 40878 // cond: 40879 // result: (SETG (CMPW x y)) 40880 for { 40881 _ = v.Args[1] 40882 x := v.Args[0] 40883 y := v.Args[1] 40884 v.reset(OpAMD64SETG) 40885 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 40886 v0.AddArg(x) 40887 v0.AddArg(y) 40888 v.AddArg(v0) 40889 return true 40890 } 40891 } 40892 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 40893 b := v.Block 40894 _ = b 40895 // match: (Greater16U x y) 40896 // cond: 40897 // result: (SETA (CMPW x y)) 40898 for { 40899 _ = v.Args[1] 40900 x := v.Args[0] 40901 y := v.Args[1] 40902 v.reset(OpAMD64SETA) 40903 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 40904 v0.AddArg(x) 40905 v0.AddArg(y) 40906 v.AddArg(v0) 40907 return true 40908 } 40909 } 40910 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 40911 b := v.Block 40912 _ = b 40913 // match: (Greater32 x y) 40914 // cond: 40915 // result: (SETG (CMPL x y)) 40916 for { 40917 _ = v.Args[1] 40918 x := v.Args[0] 40919 y := v.Args[1] 40920 v.reset(OpAMD64SETG) 40921 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 40922 v0.AddArg(x) 40923 v0.AddArg(y) 40924 v.AddArg(v0) 40925 return true 40926 } 40927 } 40928 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 40929 b := v.Block 40930 _ = b 40931 // match: (Greater32F x y) 40932 // cond: 40933 // result: (SETGF (UCOMISS x y)) 40934 for { 40935 _ = v.Args[1] 40936 x := v.Args[0] 40937 y := v.Args[1] 40938 v.reset(OpAMD64SETGF) 40939 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 40940 v0.AddArg(x) 40941 v0.AddArg(y) 40942 v.AddArg(v0) 40943 return true 40944 } 40945 } 40946 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 40947 b := v.Block 40948 _ = b 40949 // match: (Greater32U x y) 40950 // cond: 40951 // result: (SETA (CMPL x y)) 40952 for { 40953 _ = v.Args[1] 40954 x := v.Args[0] 40955 y := v.Args[1] 40956 v.reset(OpAMD64SETA) 40957 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 40958 v0.AddArg(x) 40959 v0.AddArg(y) 40960 v.AddArg(v0) 40961 return true 40962 } 40963 } 40964 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 40965 b := v.Block 40966 _ = b 40967 // match: (Greater64 x y) 40968 // cond: 40969 // result: (SETG (CMPQ x y)) 40970 for { 40971 _ = v.Args[1] 40972 x := v.Args[0] 40973 y := v.Args[1] 40974 v.reset(OpAMD64SETG) 40975 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 40976 v0.AddArg(x) 40977 v0.AddArg(y) 40978 v.AddArg(v0) 40979 return true 40980 } 40981 } 40982 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 40983 b := v.Block 40984 _ = b 40985 // match: (Greater64F x y) 40986 // cond: 40987 // result: (SETGF (UCOMISD x y)) 40988 for { 40989 _ = v.Args[1] 40990 x := v.Args[0] 40991 y := v.Args[1] 40992 v.reset(OpAMD64SETGF) 40993 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 40994 v0.AddArg(x) 40995 v0.AddArg(y) 40996 v.AddArg(v0) 40997 return true 40998 } 40999 } 41000 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 41001 b := v.Block 41002 _ = b 41003 // match: (Greater64U x y) 41004 // cond: 41005 // result: (SETA (CMPQ x y)) 41006 for { 41007 _ = v.Args[1] 41008 x := v.Args[0] 41009 y := v.Args[1] 41010 v.reset(OpAMD64SETA) 41011 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41012 v0.AddArg(x) 41013 v0.AddArg(y) 41014 v.AddArg(v0) 41015 return true 41016 } 41017 } 41018 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 41019 b := v.Block 41020 _ = b 41021 // match: (Greater8 x y) 41022 // cond: 41023 // result: (SETG (CMPB x y)) 41024 for { 41025 _ = v.Args[1] 41026 x := v.Args[0] 41027 y := v.Args[1] 41028 v.reset(OpAMD64SETG) 41029 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41030 v0.AddArg(x) 41031 v0.AddArg(y) 41032 v.AddArg(v0) 41033 return true 41034 } 41035 } 41036 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 41037 b := v.Block 41038 _ = b 41039 // match: (Greater8U x y) 41040 // cond: 41041 // result: (SETA (CMPB x y)) 41042 for { 41043 _ = v.Args[1] 41044 x := v.Args[0] 41045 y := v.Args[1] 41046 v.reset(OpAMD64SETA) 41047 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41048 v0.AddArg(x) 41049 v0.AddArg(y) 41050 v.AddArg(v0) 41051 return true 41052 } 41053 } 41054 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 41055 // match: (Hmul32 x y) 41056 // cond: 41057 // result: (HMULL x y) 41058 for { 41059 _ = v.Args[1] 41060 x := v.Args[0] 41061 y := v.Args[1] 41062 v.reset(OpAMD64HMULL) 41063 v.AddArg(x) 41064 v.AddArg(y) 41065 return true 41066 } 41067 } 41068 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 41069 // match: (Hmul32u x y) 41070 // cond: 41071 // result: (HMULLU x y) 41072 for { 41073 _ = v.Args[1] 41074 x := v.Args[0] 41075 y := v.Args[1] 41076 v.reset(OpAMD64HMULLU) 41077 v.AddArg(x) 41078 v.AddArg(y) 41079 return true 41080 } 41081 } 41082 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 41083 // match: (Hmul64 x y) 41084 // cond: 41085 // result: (HMULQ x y) 41086 for { 41087 _ = v.Args[1] 41088 x := v.Args[0] 41089 y := v.Args[1] 41090 v.reset(OpAMD64HMULQ) 41091 v.AddArg(x) 41092 v.AddArg(y) 41093 return true 41094 } 41095 } 41096 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 41097 // match: (Hmul64u x y) 41098 // cond: 41099 // result: (HMULQU x y) 41100 for { 41101 _ = v.Args[1] 41102 x := v.Args[0] 41103 y := v.Args[1] 41104 v.reset(OpAMD64HMULQU) 41105 v.AddArg(x) 41106 v.AddArg(y) 41107 return true 41108 } 41109 } 41110 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool { 41111 // match: (Int64Hi x) 41112 // cond: 41113 // result: (SHRQconst [32] x) 41114 for { 41115 x := v.Args[0] 41116 v.reset(OpAMD64SHRQconst) 41117 v.AuxInt = 32 41118 v.AddArg(x) 41119 return true 41120 } 41121 } 41122 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 41123 // match: (InterCall [argwid] entry mem) 41124 // cond: 41125 // result: (CALLinter [argwid] entry mem) 41126 for { 41127 argwid := v.AuxInt 41128 _ = v.Args[1] 41129 entry := v.Args[0] 41130 mem := v.Args[1] 41131 v.reset(OpAMD64CALLinter) 41132 v.AuxInt = argwid 41133 v.AddArg(entry) 41134 v.AddArg(mem) 41135 return true 41136 } 41137 } 41138 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 41139 b := v.Block 41140 _ = b 41141 config := b.Func.Config 41142 _ = config 41143 // match: (IsInBounds idx len) 41144 // cond: config.PtrSize == 8 41145 // result: (SETB (CMPQ idx len)) 41146 for { 41147 _ = v.Args[1] 41148 idx := v.Args[0] 41149 len := v.Args[1] 41150 if !(config.PtrSize == 8) { 41151 break 41152 } 41153 v.reset(OpAMD64SETB) 41154 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41155 v0.AddArg(idx) 41156 v0.AddArg(len) 41157 v.AddArg(v0) 41158 return true 41159 } 41160 // match: (IsInBounds idx len) 41161 // cond: config.PtrSize == 4 41162 // result: (SETB (CMPL idx len)) 41163 for { 41164 _ = v.Args[1] 41165 idx := v.Args[0] 41166 len := v.Args[1] 41167 if !(config.PtrSize == 4) { 41168 break 41169 } 41170 v.reset(OpAMD64SETB) 41171 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41172 v0.AddArg(idx) 41173 v0.AddArg(len) 41174 v.AddArg(v0) 41175 return true 41176 } 41177 return false 41178 } 41179 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 41180 b := v.Block 41181 _ = b 41182 config := b.Func.Config 41183 _ = config 41184 // match: (IsNonNil p) 41185 // cond: config.PtrSize == 8 41186 // result: (SETNE (TESTQ p p)) 41187 for { 41188 p := v.Args[0] 41189 if !(config.PtrSize == 8) { 41190 break 41191 } 41192 v.reset(OpAMD64SETNE) 41193 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 41194 v0.AddArg(p) 41195 v0.AddArg(p) 41196 v.AddArg(v0) 41197 return true 41198 } 41199 // match: (IsNonNil p) 41200 // cond: config.PtrSize == 4 41201 // result: (SETNE (TESTL p p)) 41202 for { 41203 p := v.Args[0] 41204 if !(config.PtrSize == 4) { 41205 break 41206 } 41207 v.reset(OpAMD64SETNE) 41208 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags) 41209 v0.AddArg(p) 41210 v0.AddArg(p) 41211 v.AddArg(v0) 41212 return true 41213 } 41214 return false 41215 } 41216 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 41217 b := v.Block 41218 _ = b 41219 config := b.Func.Config 41220 _ = config 41221 // match: (IsSliceInBounds idx len) 41222 // cond: config.PtrSize == 8 41223 // result: (SETBE (CMPQ idx len)) 41224 for { 41225 _ = v.Args[1] 41226 idx := v.Args[0] 41227 len := v.Args[1] 41228 if !(config.PtrSize == 8) { 41229 break 41230 } 41231 v.reset(OpAMD64SETBE) 41232 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41233 v0.AddArg(idx) 41234 v0.AddArg(len) 41235 v.AddArg(v0) 41236 return true 41237 } 41238 // match: (IsSliceInBounds idx len) 41239 // cond: config.PtrSize == 4 41240 // result: (SETBE (CMPL idx len)) 41241 for { 41242 _ = v.Args[1] 41243 idx := v.Args[0] 41244 len := v.Args[1] 41245 if !(config.PtrSize == 4) { 41246 break 41247 } 41248 v.reset(OpAMD64SETBE) 41249 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41250 v0.AddArg(idx) 41251 v0.AddArg(len) 41252 v.AddArg(v0) 41253 return true 41254 } 41255 return false 41256 } 41257 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 41258 b := v.Block 41259 _ = b 41260 // match: (Leq16 x y) 41261 // cond: 41262 // result: (SETLE (CMPW x y)) 41263 for { 41264 _ = v.Args[1] 41265 x := v.Args[0] 41266 y := v.Args[1] 41267 v.reset(OpAMD64SETLE) 41268 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 41269 v0.AddArg(x) 41270 v0.AddArg(y) 41271 v.AddArg(v0) 41272 return true 41273 } 41274 } 41275 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 41276 b := v.Block 41277 _ = b 41278 // match: (Leq16U x y) 41279 // cond: 41280 // result: (SETBE (CMPW x y)) 41281 for { 41282 _ = v.Args[1] 41283 x := v.Args[0] 41284 y := v.Args[1] 41285 v.reset(OpAMD64SETBE) 41286 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 41287 v0.AddArg(x) 41288 v0.AddArg(y) 41289 v.AddArg(v0) 41290 return true 41291 } 41292 } 41293 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 41294 b := v.Block 41295 _ = b 41296 // match: (Leq32 x y) 41297 // cond: 41298 // result: (SETLE (CMPL x y)) 41299 for { 41300 _ = v.Args[1] 41301 x := v.Args[0] 41302 y := v.Args[1] 41303 v.reset(OpAMD64SETLE) 41304 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41305 v0.AddArg(x) 41306 v0.AddArg(y) 41307 v.AddArg(v0) 41308 return true 41309 } 41310 } 41311 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 41312 b := v.Block 41313 _ = b 41314 // match: (Leq32F x y) 41315 // cond: 41316 // result: (SETGEF (UCOMISS y x)) 41317 for { 41318 _ = v.Args[1] 41319 x := v.Args[0] 41320 y := v.Args[1] 41321 v.reset(OpAMD64SETGEF) 41322 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 41323 v0.AddArg(y) 41324 v0.AddArg(x) 41325 v.AddArg(v0) 41326 return true 41327 } 41328 } 41329 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 41330 b := v.Block 41331 _ = b 41332 // match: (Leq32U x y) 41333 // cond: 41334 // result: (SETBE (CMPL x y)) 41335 for { 41336 _ = v.Args[1] 41337 x := v.Args[0] 41338 y := v.Args[1] 41339 v.reset(OpAMD64SETBE) 41340 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41341 v0.AddArg(x) 41342 v0.AddArg(y) 41343 v.AddArg(v0) 41344 return true 41345 } 41346 } 41347 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 41348 b := v.Block 41349 _ = b 41350 // match: (Leq64 x y) 41351 // cond: 41352 // result: (SETLE (CMPQ x y)) 41353 for { 41354 _ = v.Args[1] 41355 x := v.Args[0] 41356 y := v.Args[1] 41357 v.reset(OpAMD64SETLE) 41358 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41359 v0.AddArg(x) 41360 v0.AddArg(y) 41361 v.AddArg(v0) 41362 return true 41363 } 41364 } 41365 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 41366 b := v.Block 41367 _ = b 41368 // match: (Leq64F x y) 41369 // cond: 41370 // result: (SETGEF (UCOMISD y x)) 41371 for { 41372 _ = v.Args[1] 41373 x := v.Args[0] 41374 y := v.Args[1] 41375 v.reset(OpAMD64SETGEF) 41376 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 41377 v0.AddArg(y) 41378 v0.AddArg(x) 41379 v.AddArg(v0) 41380 return true 41381 } 41382 } 41383 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 41384 b := v.Block 41385 _ = b 41386 // match: (Leq64U x y) 41387 // cond: 41388 // result: (SETBE (CMPQ x y)) 41389 for { 41390 _ = v.Args[1] 41391 x := v.Args[0] 41392 y := v.Args[1] 41393 v.reset(OpAMD64SETBE) 41394 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41395 v0.AddArg(x) 41396 v0.AddArg(y) 41397 v.AddArg(v0) 41398 return true 41399 } 41400 } 41401 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 41402 b := v.Block 41403 _ = b 41404 // match: (Leq8 x y) 41405 // cond: 41406 // result: (SETLE (CMPB x y)) 41407 for { 41408 _ = v.Args[1] 41409 x := v.Args[0] 41410 y := v.Args[1] 41411 v.reset(OpAMD64SETLE) 41412 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41413 v0.AddArg(x) 41414 v0.AddArg(y) 41415 v.AddArg(v0) 41416 return true 41417 } 41418 } 41419 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 41420 b := v.Block 41421 _ = b 41422 // match: (Leq8U x y) 41423 // cond: 41424 // result: (SETBE (CMPB x y)) 41425 for { 41426 _ = v.Args[1] 41427 x := v.Args[0] 41428 y := v.Args[1] 41429 v.reset(OpAMD64SETBE) 41430 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41431 v0.AddArg(x) 41432 v0.AddArg(y) 41433 v.AddArg(v0) 41434 return true 41435 } 41436 } 41437 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 41438 b := v.Block 41439 _ = b 41440 // match: (Less16 x y) 41441 // cond: 41442 // result: (SETL (CMPW x y)) 41443 for { 41444 _ = v.Args[1] 41445 x := v.Args[0] 41446 y := v.Args[1] 41447 v.reset(OpAMD64SETL) 41448 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 41449 v0.AddArg(x) 41450 v0.AddArg(y) 41451 v.AddArg(v0) 41452 return true 41453 } 41454 } 41455 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 41456 b := v.Block 41457 _ = b 41458 // match: (Less16U x y) 41459 // cond: 41460 // result: (SETB (CMPW x y)) 41461 for { 41462 _ = v.Args[1] 41463 x := v.Args[0] 41464 y := v.Args[1] 41465 v.reset(OpAMD64SETB) 41466 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 41467 v0.AddArg(x) 41468 v0.AddArg(y) 41469 v.AddArg(v0) 41470 return true 41471 } 41472 } 41473 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 41474 b := v.Block 41475 _ = b 41476 // match: (Less32 x y) 41477 // cond: 41478 // result: (SETL (CMPL x y)) 41479 for { 41480 _ = v.Args[1] 41481 x := v.Args[0] 41482 y := v.Args[1] 41483 v.reset(OpAMD64SETL) 41484 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41485 v0.AddArg(x) 41486 v0.AddArg(y) 41487 v.AddArg(v0) 41488 return true 41489 } 41490 } 41491 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 41492 b := v.Block 41493 _ = b 41494 // match: (Less32F x y) 41495 // cond: 41496 // result: (SETGF (UCOMISS y x)) 41497 for { 41498 _ = v.Args[1] 41499 x := v.Args[0] 41500 y := v.Args[1] 41501 v.reset(OpAMD64SETGF) 41502 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 41503 v0.AddArg(y) 41504 v0.AddArg(x) 41505 v.AddArg(v0) 41506 return true 41507 } 41508 } 41509 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 41510 b := v.Block 41511 _ = b 41512 // match: (Less32U x y) 41513 // cond: 41514 // result: (SETB (CMPL x y)) 41515 for { 41516 _ = v.Args[1] 41517 x := v.Args[0] 41518 y := v.Args[1] 41519 v.reset(OpAMD64SETB) 41520 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41521 v0.AddArg(x) 41522 v0.AddArg(y) 41523 v.AddArg(v0) 41524 return true 41525 } 41526 } 41527 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 41528 b := v.Block 41529 _ = b 41530 // match: (Less64 x y) 41531 // cond: 41532 // result: (SETL (CMPQ x y)) 41533 for { 41534 _ = v.Args[1] 41535 x := v.Args[0] 41536 y := v.Args[1] 41537 v.reset(OpAMD64SETL) 41538 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41539 v0.AddArg(x) 41540 v0.AddArg(y) 41541 v.AddArg(v0) 41542 return true 41543 } 41544 } 41545 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 41546 b := v.Block 41547 _ = b 41548 // match: (Less64F x y) 41549 // cond: 41550 // result: (SETGF (UCOMISD y x)) 41551 for { 41552 _ = v.Args[1] 41553 x := v.Args[0] 41554 y := v.Args[1] 41555 v.reset(OpAMD64SETGF) 41556 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 41557 v0.AddArg(y) 41558 v0.AddArg(x) 41559 v.AddArg(v0) 41560 return true 41561 } 41562 } 41563 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 41564 b := v.Block 41565 _ = b 41566 // match: (Less64U x y) 41567 // cond: 41568 // result: (SETB (CMPQ x y)) 41569 for { 41570 _ = v.Args[1] 41571 x := v.Args[0] 41572 y := v.Args[1] 41573 v.reset(OpAMD64SETB) 41574 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41575 v0.AddArg(x) 41576 v0.AddArg(y) 41577 v.AddArg(v0) 41578 return true 41579 } 41580 } 41581 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 41582 b := v.Block 41583 _ = b 41584 // match: (Less8 x y) 41585 // cond: 41586 // result: (SETL (CMPB x y)) 41587 for { 41588 _ = v.Args[1] 41589 x := v.Args[0] 41590 y := v.Args[1] 41591 v.reset(OpAMD64SETL) 41592 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41593 v0.AddArg(x) 41594 v0.AddArg(y) 41595 v.AddArg(v0) 41596 return true 41597 } 41598 } 41599 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 41600 b := v.Block 41601 _ = b 41602 // match: (Less8U x y) 41603 // cond: 41604 // result: (SETB (CMPB x y)) 41605 for { 41606 _ = v.Args[1] 41607 x := v.Args[0] 41608 y := v.Args[1] 41609 v.reset(OpAMD64SETB) 41610 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41611 v0.AddArg(x) 41612 v0.AddArg(y) 41613 v.AddArg(v0) 41614 return true 41615 } 41616 } 41617 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 41618 b := v.Block 41619 _ = b 41620 config := b.Func.Config 41621 _ = config 41622 // match: (Load <t> ptr mem) 41623 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 41624 // result: (MOVQload ptr mem) 41625 for { 41626 t := v.Type 41627 _ = v.Args[1] 41628 ptr := v.Args[0] 41629 mem := v.Args[1] 41630 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 41631 break 41632 } 41633 v.reset(OpAMD64MOVQload) 41634 v.AddArg(ptr) 41635 v.AddArg(mem) 41636 return true 41637 } 41638 // match: (Load <t> ptr mem) 41639 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 41640 // result: (MOVLload ptr mem) 41641 for { 41642 t := v.Type 41643 _ = v.Args[1] 41644 ptr := v.Args[0] 41645 mem := v.Args[1] 41646 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 41647 break 41648 } 41649 v.reset(OpAMD64MOVLload) 41650 v.AddArg(ptr) 41651 v.AddArg(mem) 41652 return true 41653 } 41654 // match: (Load <t> ptr mem) 41655 // cond: is16BitInt(t) 41656 // result: (MOVWload ptr mem) 41657 for { 41658 t := v.Type 41659 _ = v.Args[1] 41660 ptr := v.Args[0] 41661 mem := v.Args[1] 41662 if !(is16BitInt(t)) { 41663 break 41664 } 41665 v.reset(OpAMD64MOVWload) 41666 v.AddArg(ptr) 41667 v.AddArg(mem) 41668 return true 41669 } 41670 // match: (Load <t> ptr mem) 41671 // cond: (t.IsBoolean() || is8BitInt(t)) 41672 // result: (MOVBload ptr mem) 41673 for { 41674 t := v.Type 41675 _ = v.Args[1] 41676 ptr := v.Args[0] 41677 mem := v.Args[1] 41678 if !(t.IsBoolean() || is8BitInt(t)) { 41679 break 41680 } 41681 v.reset(OpAMD64MOVBload) 41682 v.AddArg(ptr) 41683 v.AddArg(mem) 41684 return true 41685 } 41686 // match: (Load <t> ptr mem) 41687 // cond: is32BitFloat(t) 41688 // result: (MOVSSload ptr mem) 41689 for { 41690 t := v.Type 41691 _ = v.Args[1] 41692 ptr := v.Args[0] 41693 mem := v.Args[1] 41694 if !(is32BitFloat(t)) { 41695 break 41696 } 41697 v.reset(OpAMD64MOVSSload) 41698 v.AddArg(ptr) 41699 v.AddArg(mem) 41700 return true 41701 } 41702 // match: (Load <t> ptr mem) 41703 // cond: is64BitFloat(t) 41704 // result: (MOVSDload ptr mem) 41705 for { 41706 t := v.Type 41707 _ = v.Args[1] 41708 ptr := v.Args[0] 41709 mem := v.Args[1] 41710 if !(is64BitFloat(t)) { 41711 break 41712 } 41713 v.reset(OpAMD64MOVSDload) 41714 v.AddArg(ptr) 41715 v.AddArg(mem) 41716 return true 41717 } 41718 return false 41719 } 41720 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 41721 b := v.Block 41722 _ = b 41723 // match: (Lsh16x16 <t> x y) 41724 // cond: 41725 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 41726 for { 41727 t := v.Type 41728 _ = v.Args[1] 41729 x := v.Args[0] 41730 y := v.Args[1] 41731 v.reset(OpAMD64ANDL) 41732 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 41733 v0.AddArg(x) 41734 v0.AddArg(y) 41735 v.AddArg(v0) 41736 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41737 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41738 v2.AuxInt = 32 41739 v2.AddArg(y) 41740 v1.AddArg(v2) 41741 v.AddArg(v1) 41742 return true 41743 } 41744 } 41745 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 41746 b := v.Block 41747 _ = b 41748 // match: (Lsh16x32 <t> x y) 41749 // cond: 41750 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 41751 for { 41752 t := v.Type 41753 _ = v.Args[1] 41754 x := v.Args[0] 41755 y := v.Args[1] 41756 v.reset(OpAMD64ANDL) 41757 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 41758 v0.AddArg(x) 41759 v0.AddArg(y) 41760 v.AddArg(v0) 41761 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41762 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41763 v2.AuxInt = 32 41764 v2.AddArg(y) 41765 v1.AddArg(v2) 41766 v.AddArg(v1) 41767 return true 41768 } 41769 } 41770 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 41771 b := v.Block 41772 _ = b 41773 // match: (Lsh16x64 <t> x y) 41774 // cond: 41775 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 41776 for { 41777 t := v.Type 41778 _ = v.Args[1] 41779 x := v.Args[0] 41780 y := v.Args[1] 41781 v.reset(OpAMD64ANDL) 41782 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 41783 v0.AddArg(x) 41784 v0.AddArg(y) 41785 v.AddArg(v0) 41786 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41787 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41788 v2.AuxInt = 32 41789 v2.AddArg(y) 41790 v1.AddArg(v2) 41791 v.AddArg(v1) 41792 return true 41793 } 41794 } 41795 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 41796 b := v.Block 41797 _ = b 41798 // match: (Lsh16x8 <t> x y) 41799 // cond: 41800 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 41801 for { 41802 t := v.Type 41803 _ = v.Args[1] 41804 x := v.Args[0] 41805 y := v.Args[1] 41806 v.reset(OpAMD64ANDL) 41807 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 41808 v0.AddArg(x) 41809 v0.AddArg(y) 41810 v.AddArg(v0) 41811 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41812 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41813 v2.AuxInt = 32 41814 v2.AddArg(y) 41815 v1.AddArg(v2) 41816 v.AddArg(v1) 41817 return true 41818 } 41819 } 41820 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 41821 b := v.Block 41822 _ = b 41823 // match: (Lsh32x16 <t> x y) 41824 // cond: 41825 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 41826 for { 41827 t := v.Type 41828 _ = v.Args[1] 41829 x := v.Args[0] 41830 y := v.Args[1] 41831 v.reset(OpAMD64ANDL) 41832 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 41833 v0.AddArg(x) 41834 v0.AddArg(y) 41835 v.AddArg(v0) 41836 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41837 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41838 v2.AuxInt = 32 41839 v2.AddArg(y) 41840 v1.AddArg(v2) 41841 v.AddArg(v1) 41842 return true 41843 } 41844 } 41845 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 41846 b := v.Block 41847 _ = b 41848 // match: (Lsh32x32 <t> x y) 41849 // cond: 41850 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 41851 for { 41852 t := v.Type 41853 _ = v.Args[1] 41854 x := v.Args[0] 41855 y := v.Args[1] 41856 v.reset(OpAMD64ANDL) 41857 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 41858 v0.AddArg(x) 41859 v0.AddArg(y) 41860 v.AddArg(v0) 41861 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41862 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41863 v2.AuxInt = 32 41864 v2.AddArg(y) 41865 v1.AddArg(v2) 41866 v.AddArg(v1) 41867 return true 41868 } 41869 } 41870 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 41871 b := v.Block 41872 _ = b 41873 // match: (Lsh32x64 <t> x y) 41874 // cond: 41875 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 41876 for { 41877 t := v.Type 41878 _ = v.Args[1] 41879 x := v.Args[0] 41880 y := v.Args[1] 41881 v.reset(OpAMD64ANDL) 41882 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 41883 v0.AddArg(x) 41884 v0.AddArg(y) 41885 v.AddArg(v0) 41886 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41887 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41888 v2.AuxInt = 32 41889 v2.AddArg(y) 41890 v1.AddArg(v2) 41891 v.AddArg(v1) 41892 return true 41893 } 41894 } 41895 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 41896 b := v.Block 41897 _ = b 41898 // match: (Lsh32x8 <t> x y) 41899 // cond: 41900 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 41901 for { 41902 t := v.Type 41903 _ = v.Args[1] 41904 x := v.Args[0] 41905 y := v.Args[1] 41906 v.reset(OpAMD64ANDL) 41907 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 41908 v0.AddArg(x) 41909 v0.AddArg(y) 41910 v.AddArg(v0) 41911 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41912 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41913 v2.AuxInt = 32 41914 v2.AddArg(y) 41915 v1.AddArg(v2) 41916 v.AddArg(v1) 41917 return true 41918 } 41919 } 41920 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 41921 b := v.Block 41922 _ = b 41923 // match: (Lsh64x16 <t> x y) 41924 // cond: 41925 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 41926 for { 41927 t := v.Type 41928 _ = v.Args[1] 41929 x := v.Args[0] 41930 y := v.Args[1] 41931 v.reset(OpAMD64ANDQ) 41932 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 41933 v0.AddArg(x) 41934 v0.AddArg(y) 41935 v.AddArg(v0) 41936 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41937 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41938 v2.AuxInt = 64 41939 v2.AddArg(y) 41940 v1.AddArg(v2) 41941 v.AddArg(v1) 41942 return true 41943 } 41944 } 41945 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 41946 b := v.Block 41947 _ = b 41948 // match: (Lsh64x32 <t> x y) 41949 // cond: 41950 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 41951 for { 41952 t := v.Type 41953 _ = v.Args[1] 41954 x := v.Args[0] 41955 y := v.Args[1] 41956 v.reset(OpAMD64ANDQ) 41957 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 41958 v0.AddArg(x) 41959 v0.AddArg(y) 41960 v.AddArg(v0) 41961 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41962 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41963 v2.AuxInt = 64 41964 v2.AddArg(y) 41965 v1.AddArg(v2) 41966 v.AddArg(v1) 41967 return true 41968 } 41969 } 41970 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 41971 b := v.Block 41972 _ = b 41973 // match: (Lsh64x64 <t> x y) 41974 // cond: 41975 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 41976 for { 41977 t := v.Type 41978 _ = v.Args[1] 41979 x := v.Args[0] 41980 y := v.Args[1] 41981 v.reset(OpAMD64ANDQ) 41982 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 41983 v0.AddArg(x) 41984 v0.AddArg(y) 41985 v.AddArg(v0) 41986 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41987 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41988 v2.AuxInt = 64 41989 v2.AddArg(y) 41990 v1.AddArg(v2) 41991 v.AddArg(v1) 41992 return true 41993 } 41994 } 41995 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 41996 b := v.Block 41997 _ = b 41998 // match: (Lsh64x8 <t> x y) 41999 // cond: 42000 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 42001 for { 42002 t := v.Type 42003 _ = v.Args[1] 42004 x := v.Args[0] 42005 y := v.Args[1] 42006 v.reset(OpAMD64ANDQ) 42007 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 42008 v0.AddArg(x) 42009 v0.AddArg(y) 42010 v.AddArg(v0) 42011 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 42012 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42013 v2.AuxInt = 64 42014 v2.AddArg(y) 42015 v1.AddArg(v2) 42016 v.AddArg(v1) 42017 return true 42018 } 42019 } 42020 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 42021 b := v.Block 42022 _ = b 42023 // match: (Lsh8x16 <t> x y) 42024 // cond: 42025 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 42026 for { 42027 t := v.Type 42028 _ = v.Args[1] 42029 x := v.Args[0] 42030 y := v.Args[1] 42031 v.reset(OpAMD64ANDL) 42032 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 42033 v0.AddArg(x) 42034 v0.AddArg(y) 42035 v.AddArg(v0) 42036 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42037 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 42038 v2.AuxInt = 32 42039 v2.AddArg(y) 42040 v1.AddArg(v2) 42041 v.AddArg(v1) 42042 return true 42043 } 42044 } 42045 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 42046 b := v.Block 42047 _ = b 42048 // match: (Lsh8x32 <t> x y) 42049 // cond: 42050 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 42051 for { 42052 t := v.Type 42053 _ = v.Args[1] 42054 x := v.Args[0] 42055 y := v.Args[1] 42056 v.reset(OpAMD64ANDL) 42057 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 42058 v0.AddArg(x) 42059 v0.AddArg(y) 42060 v.AddArg(v0) 42061 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42062 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 42063 v2.AuxInt = 32 42064 v2.AddArg(y) 42065 v1.AddArg(v2) 42066 v.AddArg(v1) 42067 return true 42068 } 42069 } 42070 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 42071 b := v.Block 42072 _ = b 42073 // match: (Lsh8x64 <t> x y) 42074 // cond: 42075 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 42076 for { 42077 t := v.Type 42078 _ = v.Args[1] 42079 x := v.Args[0] 42080 y := v.Args[1] 42081 v.reset(OpAMD64ANDL) 42082 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 42083 v0.AddArg(x) 42084 v0.AddArg(y) 42085 v.AddArg(v0) 42086 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42087 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 42088 v2.AuxInt = 32 42089 v2.AddArg(y) 42090 v1.AddArg(v2) 42091 v.AddArg(v1) 42092 return true 42093 } 42094 } 42095 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 42096 b := v.Block 42097 _ = b 42098 // match: (Lsh8x8 <t> x y) 42099 // cond: 42100 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 42101 for { 42102 t := v.Type 42103 _ = v.Args[1] 42104 x := v.Args[0] 42105 y := v.Args[1] 42106 v.reset(OpAMD64ANDL) 42107 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 42108 v0.AddArg(x) 42109 v0.AddArg(y) 42110 v.AddArg(v0) 42111 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42112 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42113 v2.AuxInt = 32 42114 v2.AddArg(y) 42115 v1.AddArg(v2) 42116 v.AddArg(v1) 42117 return true 42118 } 42119 } 42120 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 42121 b := v.Block 42122 _ = b 42123 typ := &b.Func.Config.Types 42124 _ = typ 42125 // match: (Mod16 x y) 42126 // cond: 42127 // result: (Select1 (DIVW x y)) 42128 for { 42129 _ = v.Args[1] 42130 x := v.Args[0] 42131 y := v.Args[1] 42132 v.reset(OpSelect1) 42133 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 42134 v0.AddArg(x) 42135 v0.AddArg(y) 42136 v.AddArg(v0) 42137 return true 42138 } 42139 } 42140 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 42141 b := v.Block 42142 _ = b 42143 typ := &b.Func.Config.Types 42144 _ = typ 42145 // match: (Mod16u x y) 42146 // cond: 42147 // result: (Select1 (DIVWU x y)) 42148 for { 42149 _ = v.Args[1] 42150 x := v.Args[0] 42151 y := v.Args[1] 42152 v.reset(OpSelect1) 42153 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 42154 v0.AddArg(x) 42155 v0.AddArg(y) 42156 v.AddArg(v0) 42157 return true 42158 } 42159 } 42160 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 42161 b := v.Block 42162 _ = b 42163 typ := &b.Func.Config.Types 42164 _ = typ 42165 // match: (Mod32 x y) 42166 // cond: 42167 // result: (Select1 (DIVL x y)) 42168 for { 42169 _ = v.Args[1] 42170 x := v.Args[0] 42171 y := v.Args[1] 42172 v.reset(OpSelect1) 42173 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 42174 v0.AddArg(x) 42175 v0.AddArg(y) 42176 v.AddArg(v0) 42177 return true 42178 } 42179 } 42180 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 42181 b := v.Block 42182 _ = b 42183 typ := &b.Func.Config.Types 42184 _ = typ 42185 // match: (Mod32u x y) 42186 // cond: 42187 // result: (Select1 (DIVLU x y)) 42188 for { 42189 _ = v.Args[1] 42190 x := v.Args[0] 42191 y := v.Args[1] 42192 v.reset(OpSelect1) 42193 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 42194 v0.AddArg(x) 42195 v0.AddArg(y) 42196 v.AddArg(v0) 42197 return true 42198 } 42199 } 42200 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 42201 b := v.Block 42202 _ = b 42203 typ := &b.Func.Config.Types 42204 _ = typ 42205 // match: (Mod64 x y) 42206 // cond: 42207 // result: (Select1 (DIVQ x y)) 42208 for { 42209 _ = v.Args[1] 42210 x := v.Args[0] 42211 y := v.Args[1] 42212 v.reset(OpSelect1) 42213 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 42214 v0.AddArg(x) 42215 v0.AddArg(y) 42216 v.AddArg(v0) 42217 return true 42218 } 42219 } 42220 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 42221 b := v.Block 42222 _ = b 42223 typ := &b.Func.Config.Types 42224 _ = typ 42225 // match: (Mod64u x y) 42226 // cond: 42227 // result: (Select1 (DIVQU x y)) 42228 for { 42229 _ = v.Args[1] 42230 x := v.Args[0] 42231 y := v.Args[1] 42232 v.reset(OpSelect1) 42233 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 42234 v0.AddArg(x) 42235 v0.AddArg(y) 42236 v.AddArg(v0) 42237 return true 42238 } 42239 } 42240 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 42241 b := v.Block 42242 _ = b 42243 typ := &b.Func.Config.Types 42244 _ = typ 42245 // match: (Mod8 x y) 42246 // cond: 42247 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 42248 for { 42249 _ = v.Args[1] 42250 x := v.Args[0] 42251 y := v.Args[1] 42252 v.reset(OpSelect1) 42253 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 42254 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 42255 v1.AddArg(x) 42256 v0.AddArg(v1) 42257 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 42258 v2.AddArg(y) 42259 v0.AddArg(v2) 42260 v.AddArg(v0) 42261 return true 42262 } 42263 } 42264 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 42265 b := v.Block 42266 _ = b 42267 typ := &b.Func.Config.Types 42268 _ = typ 42269 // match: (Mod8u x y) 42270 // cond: 42271 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 42272 for { 42273 _ = v.Args[1] 42274 x := v.Args[0] 42275 y := v.Args[1] 42276 v.reset(OpSelect1) 42277 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 42278 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 42279 v1.AddArg(x) 42280 v0.AddArg(v1) 42281 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 42282 v2.AddArg(y) 42283 v0.AddArg(v2) 42284 v.AddArg(v0) 42285 return true 42286 } 42287 } 42288 func rewriteValueAMD64_OpMove_0(v *Value) bool { 42289 b := v.Block 42290 _ = b 42291 config := b.Func.Config 42292 _ = config 42293 typ := &b.Func.Config.Types 42294 _ = typ 42295 // match: (Move [0] _ _ mem) 42296 // cond: 42297 // result: mem 42298 for { 42299 if v.AuxInt != 0 { 42300 break 42301 } 42302 _ = v.Args[2] 42303 mem := v.Args[2] 42304 v.reset(OpCopy) 42305 v.Type = mem.Type 42306 v.AddArg(mem) 42307 return true 42308 } 42309 // match: (Move [1] dst src mem) 42310 // cond: 42311 // result: (MOVBstore dst (MOVBload src mem) mem) 42312 for { 42313 if v.AuxInt != 1 { 42314 break 42315 } 42316 _ = v.Args[2] 42317 dst := v.Args[0] 42318 src := v.Args[1] 42319 mem := v.Args[2] 42320 v.reset(OpAMD64MOVBstore) 42321 v.AddArg(dst) 42322 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 42323 v0.AddArg(src) 42324 v0.AddArg(mem) 42325 v.AddArg(v0) 42326 v.AddArg(mem) 42327 return true 42328 } 42329 // match: (Move [2] dst src mem) 42330 // cond: 42331 // result: (MOVWstore dst (MOVWload src mem) mem) 42332 for { 42333 if v.AuxInt != 2 { 42334 break 42335 } 42336 _ = v.Args[2] 42337 dst := v.Args[0] 42338 src := v.Args[1] 42339 mem := v.Args[2] 42340 v.reset(OpAMD64MOVWstore) 42341 v.AddArg(dst) 42342 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 42343 v0.AddArg(src) 42344 v0.AddArg(mem) 42345 v.AddArg(v0) 42346 v.AddArg(mem) 42347 return true 42348 } 42349 // match: (Move [4] dst src mem) 42350 // cond: 42351 // result: (MOVLstore dst (MOVLload src mem) mem) 42352 for { 42353 if v.AuxInt != 4 { 42354 break 42355 } 42356 _ = v.Args[2] 42357 dst := v.Args[0] 42358 src := v.Args[1] 42359 mem := v.Args[2] 42360 v.reset(OpAMD64MOVLstore) 42361 v.AddArg(dst) 42362 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 42363 v0.AddArg(src) 42364 v0.AddArg(mem) 42365 v.AddArg(v0) 42366 v.AddArg(mem) 42367 return true 42368 } 42369 // match: (Move [8] dst src mem) 42370 // cond: 42371 // result: (MOVQstore dst (MOVQload src mem) mem) 42372 for { 42373 if v.AuxInt != 8 { 42374 break 42375 } 42376 _ = v.Args[2] 42377 dst := v.Args[0] 42378 src := v.Args[1] 42379 mem := v.Args[2] 42380 v.reset(OpAMD64MOVQstore) 42381 v.AddArg(dst) 42382 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 42383 v0.AddArg(src) 42384 v0.AddArg(mem) 42385 v.AddArg(v0) 42386 v.AddArg(mem) 42387 return true 42388 } 42389 // match: (Move [16] dst src mem) 42390 // cond: config.useSSE 42391 // result: (MOVOstore dst (MOVOload src mem) mem) 42392 for { 42393 if v.AuxInt != 16 { 42394 break 42395 } 42396 _ = v.Args[2] 42397 dst := v.Args[0] 42398 src := v.Args[1] 42399 mem := v.Args[2] 42400 if !(config.useSSE) { 42401 break 42402 } 42403 v.reset(OpAMD64MOVOstore) 42404 v.AddArg(dst) 42405 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 42406 v0.AddArg(src) 42407 v0.AddArg(mem) 42408 v.AddArg(v0) 42409 v.AddArg(mem) 42410 return true 42411 } 42412 // match: (Move [16] dst src mem) 42413 // cond: !config.useSSE 42414 // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 42415 for { 42416 if v.AuxInt != 16 { 42417 break 42418 } 42419 _ = v.Args[2] 42420 dst := v.Args[0] 42421 src := v.Args[1] 42422 mem := v.Args[2] 42423 if !(!config.useSSE) { 42424 break 42425 } 42426 v.reset(OpAMD64MOVQstore) 42427 v.AuxInt = 8 42428 v.AddArg(dst) 42429 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 42430 v0.AuxInt = 8 42431 v0.AddArg(src) 42432 v0.AddArg(mem) 42433 v.AddArg(v0) 42434 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 42435 v1.AddArg(dst) 42436 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 42437 v2.AddArg(src) 42438 v2.AddArg(mem) 42439 v1.AddArg(v2) 42440 v1.AddArg(mem) 42441 v.AddArg(v1) 42442 return true 42443 } 42444 // match: (Move [3] dst src mem) 42445 // cond: 42446 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 42447 for { 42448 if v.AuxInt != 3 { 42449 break 42450 } 42451 _ = v.Args[2] 42452 dst := v.Args[0] 42453 src := v.Args[1] 42454 mem := v.Args[2] 42455 v.reset(OpAMD64MOVBstore) 42456 v.AuxInt = 2 42457 v.AddArg(dst) 42458 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 42459 v0.AuxInt = 2 42460 v0.AddArg(src) 42461 v0.AddArg(mem) 42462 v.AddArg(v0) 42463 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 42464 v1.AddArg(dst) 42465 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 42466 v2.AddArg(src) 42467 v2.AddArg(mem) 42468 v1.AddArg(v2) 42469 v1.AddArg(mem) 42470 v.AddArg(v1) 42471 return true 42472 } 42473 // match: (Move [5] dst src mem) 42474 // cond: 42475 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 42476 for { 42477 if v.AuxInt != 5 { 42478 break 42479 } 42480 _ = v.Args[2] 42481 dst := v.Args[0] 42482 src := v.Args[1] 42483 mem := v.Args[2] 42484 v.reset(OpAMD64MOVBstore) 42485 v.AuxInt = 4 42486 v.AddArg(dst) 42487 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 42488 v0.AuxInt = 4 42489 v0.AddArg(src) 42490 v0.AddArg(mem) 42491 v.AddArg(v0) 42492 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 42493 v1.AddArg(dst) 42494 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 42495 v2.AddArg(src) 42496 v2.AddArg(mem) 42497 v1.AddArg(v2) 42498 v1.AddArg(mem) 42499 v.AddArg(v1) 42500 return true 42501 } 42502 // match: (Move [6] dst src mem) 42503 // cond: 42504 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 42505 for { 42506 if v.AuxInt != 6 { 42507 break 42508 } 42509 _ = v.Args[2] 42510 dst := v.Args[0] 42511 src := v.Args[1] 42512 mem := v.Args[2] 42513 v.reset(OpAMD64MOVWstore) 42514 v.AuxInt = 4 42515 v.AddArg(dst) 42516 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 42517 v0.AuxInt = 4 42518 v0.AddArg(src) 42519 v0.AddArg(mem) 42520 v.AddArg(v0) 42521 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 42522 v1.AddArg(dst) 42523 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 42524 v2.AddArg(src) 42525 v2.AddArg(mem) 42526 v1.AddArg(v2) 42527 v1.AddArg(mem) 42528 v.AddArg(v1) 42529 return true 42530 } 42531 return false 42532 } 42533 func rewriteValueAMD64_OpMove_10(v *Value) bool { 42534 b := v.Block 42535 _ = b 42536 config := b.Func.Config 42537 _ = config 42538 typ := &b.Func.Config.Types 42539 _ = typ 42540 // match: (Move [7] dst src mem) 42541 // cond: 42542 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 42543 for { 42544 if v.AuxInt != 7 { 42545 break 42546 } 42547 _ = v.Args[2] 42548 dst := v.Args[0] 42549 src := v.Args[1] 42550 mem := v.Args[2] 42551 v.reset(OpAMD64MOVLstore) 42552 v.AuxInt = 3 42553 v.AddArg(dst) 42554 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 42555 v0.AuxInt = 3 42556 v0.AddArg(src) 42557 v0.AddArg(mem) 42558 v.AddArg(v0) 42559 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 42560 v1.AddArg(dst) 42561 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 42562 v2.AddArg(src) 42563 v2.AddArg(mem) 42564 v1.AddArg(v2) 42565 v1.AddArg(mem) 42566 v.AddArg(v1) 42567 return true 42568 } 42569 // match: (Move [s] dst src mem) 42570 // cond: s > 8 && s < 16 42571 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 42572 for { 42573 s := v.AuxInt 42574 _ = v.Args[2] 42575 dst := v.Args[0] 42576 src := v.Args[1] 42577 mem := v.Args[2] 42578 if !(s > 8 && s < 16) { 42579 break 42580 } 42581 v.reset(OpAMD64MOVQstore) 42582 v.AuxInt = s - 8 42583 v.AddArg(dst) 42584 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 42585 v0.AuxInt = s - 8 42586 v0.AddArg(src) 42587 v0.AddArg(mem) 42588 v.AddArg(v0) 42589 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 42590 v1.AddArg(dst) 42591 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 42592 v2.AddArg(src) 42593 v2.AddArg(mem) 42594 v1.AddArg(v2) 42595 v1.AddArg(mem) 42596 v.AddArg(v1) 42597 return true 42598 } 42599 // match: (Move [s] dst src mem) 42600 // cond: s > 16 && s%16 != 0 && s%16 <= 8 42601 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 42602 for { 42603 s := v.AuxInt 42604 _ = v.Args[2] 42605 dst := v.Args[0] 42606 src := v.Args[1] 42607 mem := v.Args[2] 42608 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 42609 break 42610 } 42611 v.reset(OpMove) 42612 v.AuxInt = s - s%16 42613 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 42614 v0.AuxInt = s % 16 42615 v0.AddArg(dst) 42616 v.AddArg(v0) 42617 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 42618 v1.AuxInt = s % 16 42619 v1.AddArg(src) 42620 v.AddArg(v1) 42621 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 42622 v2.AddArg(dst) 42623 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 42624 v3.AddArg(src) 42625 v3.AddArg(mem) 42626 v2.AddArg(v3) 42627 v2.AddArg(mem) 42628 v.AddArg(v2) 42629 return true 42630 } 42631 // match: (Move [s] dst src mem) 42632 // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE 42633 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 42634 for { 42635 s := v.AuxInt 42636 _ = v.Args[2] 42637 dst := v.Args[0] 42638 src := v.Args[1] 42639 mem := v.Args[2] 42640 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { 42641 break 42642 } 42643 v.reset(OpMove) 42644 v.AuxInt = s - s%16 42645 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 42646 v0.AuxInt = s % 16 42647 v0.AddArg(dst) 42648 v.AddArg(v0) 42649 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 42650 v1.AuxInt = s % 16 42651 v1.AddArg(src) 42652 v.AddArg(v1) 42653 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 42654 v2.AddArg(dst) 42655 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 42656 v3.AddArg(src) 42657 v3.AddArg(mem) 42658 v2.AddArg(v3) 42659 v2.AddArg(mem) 42660 v.AddArg(v2) 42661 return true 42662 } 42663 // match: (Move [s] dst src mem) 42664 // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE 42665 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) 42666 for { 42667 s := v.AuxInt 42668 _ = v.Args[2] 42669 dst := v.Args[0] 42670 src := v.Args[1] 42671 mem := v.Args[2] 42672 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { 42673 break 42674 } 42675 v.reset(OpMove) 42676 v.AuxInt = s - s%16 42677 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 42678 v0.AuxInt = s % 16 42679 v0.AddArg(dst) 42680 v.AddArg(v0) 42681 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 42682 v1.AuxInt = s % 16 42683 v1.AddArg(src) 42684 v.AddArg(v1) 42685 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 42686 v2.AuxInt = 8 42687 v2.AddArg(dst) 42688 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 42689 v3.AuxInt = 8 42690 v3.AddArg(src) 42691 v3.AddArg(mem) 42692 v2.AddArg(v3) 42693 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 42694 v4.AddArg(dst) 42695 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 42696 v5.AddArg(src) 42697 v5.AddArg(mem) 42698 v4.AddArg(v5) 42699 v4.AddArg(mem) 42700 v2.AddArg(v4) 42701 v.AddArg(v2) 42702 return true 42703 } 42704 // match: (Move [s] dst src mem) 42705 // cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 42706 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 42707 for { 42708 s := v.AuxInt 42709 _ = v.Args[2] 42710 dst := v.Args[0] 42711 src := v.Args[1] 42712 mem := v.Args[2] 42713 if !(s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 42714 break 42715 } 42716 v.reset(OpAMD64DUFFCOPY) 42717 v.AuxInt = 14 * (64 - s/16) 42718 v.AddArg(dst) 42719 v.AddArg(src) 42720 v.AddArg(mem) 42721 return true 42722 } 42723 // match: (Move [s] dst src mem) 42724 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 42725 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 42726 for { 42727 s := v.AuxInt 42728 _ = v.Args[2] 42729 dst := v.Args[0] 42730 src := v.Args[1] 42731 mem := v.Args[2] 42732 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 42733 break 42734 } 42735 v.reset(OpAMD64REPMOVSQ) 42736 v.AddArg(dst) 42737 v.AddArg(src) 42738 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 42739 v0.AuxInt = s / 8 42740 v.AddArg(v0) 42741 v.AddArg(mem) 42742 return true 42743 } 42744 return false 42745 } 42746 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 42747 // match: (Mul16 x y) 42748 // cond: 42749 // result: (MULL x y) 42750 for { 42751 _ = v.Args[1] 42752 x := v.Args[0] 42753 y := v.Args[1] 42754 v.reset(OpAMD64MULL) 42755 v.AddArg(x) 42756 v.AddArg(y) 42757 return true 42758 } 42759 } 42760 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 42761 // match: (Mul32 x y) 42762 // cond: 42763 // result: (MULL x y) 42764 for { 42765 _ = v.Args[1] 42766 x := v.Args[0] 42767 y := v.Args[1] 42768 v.reset(OpAMD64MULL) 42769 v.AddArg(x) 42770 v.AddArg(y) 42771 return true 42772 } 42773 } 42774 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 42775 // match: (Mul32F x y) 42776 // cond: 42777 // result: (MULSS x y) 42778 for { 42779 _ = v.Args[1] 42780 x := v.Args[0] 42781 y := v.Args[1] 42782 v.reset(OpAMD64MULSS) 42783 v.AddArg(x) 42784 v.AddArg(y) 42785 return true 42786 } 42787 } 42788 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 42789 // match: (Mul64 x y) 42790 // cond: 42791 // result: (MULQ x y) 42792 for { 42793 _ = v.Args[1] 42794 x := v.Args[0] 42795 y := v.Args[1] 42796 v.reset(OpAMD64MULQ) 42797 v.AddArg(x) 42798 v.AddArg(y) 42799 return true 42800 } 42801 } 42802 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 42803 // match: (Mul64F x y) 42804 // cond: 42805 // result: (MULSD x y) 42806 for { 42807 _ = v.Args[1] 42808 x := v.Args[0] 42809 y := v.Args[1] 42810 v.reset(OpAMD64MULSD) 42811 v.AddArg(x) 42812 v.AddArg(y) 42813 return true 42814 } 42815 } 42816 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 42817 // match: (Mul64uhilo x y) 42818 // cond: 42819 // result: (MULQU2 x y) 42820 for { 42821 _ = v.Args[1] 42822 x := v.Args[0] 42823 y := v.Args[1] 42824 v.reset(OpAMD64MULQU2) 42825 v.AddArg(x) 42826 v.AddArg(y) 42827 return true 42828 } 42829 } 42830 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 42831 // match: (Mul8 x y) 42832 // cond: 42833 // result: (MULL x y) 42834 for { 42835 _ = v.Args[1] 42836 x := v.Args[0] 42837 y := v.Args[1] 42838 v.reset(OpAMD64MULL) 42839 v.AddArg(x) 42840 v.AddArg(y) 42841 return true 42842 } 42843 } 42844 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 42845 // match: (Neg16 x) 42846 // cond: 42847 // result: (NEGL x) 42848 for { 42849 x := v.Args[0] 42850 v.reset(OpAMD64NEGL) 42851 v.AddArg(x) 42852 return true 42853 } 42854 } 42855 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 42856 // match: (Neg32 x) 42857 // cond: 42858 // result: (NEGL x) 42859 for { 42860 x := v.Args[0] 42861 v.reset(OpAMD64NEGL) 42862 v.AddArg(x) 42863 return true 42864 } 42865 } 42866 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 42867 b := v.Block 42868 _ = b 42869 typ := &b.Func.Config.Types 42870 _ = typ 42871 // match: (Neg32F x) 42872 // cond: 42873 // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))])) 42874 for { 42875 x := v.Args[0] 42876 v.reset(OpAMD64PXOR) 42877 v.AddArg(x) 42878 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 42879 v0.AuxInt = f2i(math.Copysign(0, -1)) 42880 v.AddArg(v0) 42881 return true 42882 } 42883 } 42884 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 42885 // match: (Neg64 x) 42886 // cond: 42887 // result: (NEGQ x) 42888 for { 42889 x := v.Args[0] 42890 v.reset(OpAMD64NEGQ) 42891 v.AddArg(x) 42892 return true 42893 } 42894 } 42895 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 42896 b := v.Block 42897 _ = b 42898 typ := &b.Func.Config.Types 42899 _ = typ 42900 // match: (Neg64F x) 42901 // cond: 42902 // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))])) 42903 for { 42904 x := v.Args[0] 42905 v.reset(OpAMD64PXOR) 42906 v.AddArg(x) 42907 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 42908 v0.AuxInt = f2i(math.Copysign(0, -1)) 42909 v.AddArg(v0) 42910 return true 42911 } 42912 } 42913 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 42914 // match: (Neg8 x) 42915 // cond: 42916 // result: (NEGL x) 42917 for { 42918 x := v.Args[0] 42919 v.reset(OpAMD64NEGL) 42920 v.AddArg(x) 42921 return true 42922 } 42923 } 42924 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 42925 b := v.Block 42926 _ = b 42927 // match: (Neq16 x y) 42928 // cond: 42929 // result: (SETNE (CMPW x y)) 42930 for { 42931 _ = v.Args[1] 42932 x := v.Args[0] 42933 y := v.Args[1] 42934 v.reset(OpAMD64SETNE) 42935 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42936 v0.AddArg(x) 42937 v0.AddArg(y) 42938 v.AddArg(v0) 42939 return true 42940 } 42941 } 42942 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 42943 b := v.Block 42944 _ = b 42945 // match: (Neq32 x y) 42946 // cond: 42947 // result: (SETNE (CMPL x y)) 42948 for { 42949 _ = v.Args[1] 42950 x := v.Args[0] 42951 y := v.Args[1] 42952 v.reset(OpAMD64SETNE) 42953 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42954 v0.AddArg(x) 42955 v0.AddArg(y) 42956 v.AddArg(v0) 42957 return true 42958 } 42959 } 42960 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 42961 b := v.Block 42962 _ = b 42963 // match: (Neq32F x y) 42964 // cond: 42965 // result: (SETNEF (UCOMISS x y)) 42966 for { 42967 _ = v.Args[1] 42968 x := v.Args[0] 42969 y := v.Args[1] 42970 v.reset(OpAMD64SETNEF) 42971 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 42972 v0.AddArg(x) 42973 v0.AddArg(y) 42974 v.AddArg(v0) 42975 return true 42976 } 42977 } 42978 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 42979 b := v.Block 42980 _ = b 42981 // match: (Neq64 x y) 42982 // cond: 42983 // result: (SETNE (CMPQ x y)) 42984 for { 42985 _ = v.Args[1] 42986 x := v.Args[0] 42987 y := v.Args[1] 42988 v.reset(OpAMD64SETNE) 42989 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42990 v0.AddArg(x) 42991 v0.AddArg(y) 42992 v.AddArg(v0) 42993 return true 42994 } 42995 } 42996 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 42997 b := v.Block 42998 _ = b 42999 // match: (Neq64F x y) 43000 // cond: 43001 // result: (SETNEF (UCOMISD x y)) 43002 for { 43003 _ = v.Args[1] 43004 x := v.Args[0] 43005 y := v.Args[1] 43006 v.reset(OpAMD64SETNEF) 43007 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43008 v0.AddArg(x) 43009 v0.AddArg(y) 43010 v.AddArg(v0) 43011 return true 43012 } 43013 } 43014 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 43015 b := v.Block 43016 _ = b 43017 // match: (Neq8 x y) 43018 // cond: 43019 // result: (SETNE (CMPB x y)) 43020 for { 43021 _ = v.Args[1] 43022 x := v.Args[0] 43023 y := v.Args[1] 43024 v.reset(OpAMD64SETNE) 43025 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43026 v0.AddArg(x) 43027 v0.AddArg(y) 43028 v.AddArg(v0) 43029 return true 43030 } 43031 } 43032 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 43033 b := v.Block 43034 _ = b 43035 // match: (NeqB x y) 43036 // cond: 43037 // result: (SETNE (CMPB x y)) 43038 for { 43039 _ = v.Args[1] 43040 x := v.Args[0] 43041 y := v.Args[1] 43042 v.reset(OpAMD64SETNE) 43043 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43044 v0.AddArg(x) 43045 v0.AddArg(y) 43046 v.AddArg(v0) 43047 return true 43048 } 43049 } 43050 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 43051 b := v.Block 43052 _ = b 43053 config := b.Func.Config 43054 _ = config 43055 // match: (NeqPtr x y) 43056 // cond: config.PtrSize == 8 43057 // result: (SETNE (CMPQ x y)) 43058 for { 43059 _ = v.Args[1] 43060 x := v.Args[0] 43061 y := v.Args[1] 43062 if !(config.PtrSize == 8) { 43063 break 43064 } 43065 v.reset(OpAMD64SETNE) 43066 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43067 v0.AddArg(x) 43068 v0.AddArg(y) 43069 v.AddArg(v0) 43070 return true 43071 } 43072 // match: (NeqPtr x y) 43073 // cond: config.PtrSize == 4 43074 // result: (SETNE (CMPL x y)) 43075 for { 43076 _ = v.Args[1] 43077 x := v.Args[0] 43078 y := v.Args[1] 43079 if !(config.PtrSize == 4) { 43080 break 43081 } 43082 v.reset(OpAMD64SETNE) 43083 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43084 v0.AddArg(x) 43085 v0.AddArg(y) 43086 v.AddArg(v0) 43087 return true 43088 } 43089 return false 43090 } 43091 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 43092 // match: (NilCheck ptr mem) 43093 // cond: 43094 // result: (LoweredNilCheck ptr mem) 43095 for { 43096 _ = v.Args[1] 43097 ptr := v.Args[0] 43098 mem := v.Args[1] 43099 v.reset(OpAMD64LoweredNilCheck) 43100 v.AddArg(ptr) 43101 v.AddArg(mem) 43102 return true 43103 } 43104 } 43105 func rewriteValueAMD64_OpNot_0(v *Value) bool { 43106 // match: (Not x) 43107 // cond: 43108 // result: (XORLconst [1] x) 43109 for { 43110 x := v.Args[0] 43111 v.reset(OpAMD64XORLconst) 43112 v.AuxInt = 1 43113 v.AddArg(x) 43114 return true 43115 } 43116 } 43117 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 43118 b := v.Block 43119 _ = b 43120 config := b.Func.Config 43121 _ = config 43122 typ := &b.Func.Config.Types 43123 _ = typ 43124 // match: (OffPtr [off] ptr) 43125 // cond: config.PtrSize == 8 && is32Bit(off) 43126 // result: (ADDQconst [off] ptr) 43127 for { 43128 off := v.AuxInt 43129 ptr := v.Args[0] 43130 if !(config.PtrSize == 8 && is32Bit(off)) { 43131 break 43132 } 43133 v.reset(OpAMD64ADDQconst) 43134 v.AuxInt = off 43135 v.AddArg(ptr) 43136 return true 43137 } 43138 // match: (OffPtr [off] ptr) 43139 // cond: config.PtrSize == 8 43140 // result: (ADDQ (MOVQconst [off]) ptr) 43141 for { 43142 off := v.AuxInt 43143 ptr := v.Args[0] 43144 if !(config.PtrSize == 8) { 43145 break 43146 } 43147 v.reset(OpAMD64ADDQ) 43148 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 43149 v0.AuxInt = off 43150 v.AddArg(v0) 43151 v.AddArg(ptr) 43152 return true 43153 } 43154 // match: (OffPtr [off] ptr) 43155 // cond: config.PtrSize == 4 43156 // result: (ADDLconst [off] ptr) 43157 for { 43158 off := v.AuxInt 43159 ptr := v.Args[0] 43160 if !(config.PtrSize == 4) { 43161 break 43162 } 43163 v.reset(OpAMD64ADDLconst) 43164 v.AuxInt = off 43165 v.AddArg(ptr) 43166 return true 43167 } 43168 return false 43169 } 43170 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 43171 // match: (Or16 x y) 43172 // cond: 43173 // result: (ORL x y) 43174 for { 43175 _ = v.Args[1] 43176 x := v.Args[0] 43177 y := v.Args[1] 43178 v.reset(OpAMD64ORL) 43179 v.AddArg(x) 43180 v.AddArg(y) 43181 return true 43182 } 43183 } 43184 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 43185 // match: (Or32 x y) 43186 // cond: 43187 // result: (ORL x y) 43188 for { 43189 _ = v.Args[1] 43190 x := v.Args[0] 43191 y := v.Args[1] 43192 v.reset(OpAMD64ORL) 43193 v.AddArg(x) 43194 v.AddArg(y) 43195 return true 43196 } 43197 } 43198 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 43199 // match: (Or64 x y) 43200 // cond: 43201 // result: (ORQ x y) 43202 for { 43203 _ = v.Args[1] 43204 x := v.Args[0] 43205 y := v.Args[1] 43206 v.reset(OpAMD64ORQ) 43207 v.AddArg(x) 43208 v.AddArg(y) 43209 return true 43210 } 43211 } 43212 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 43213 // match: (Or8 x y) 43214 // cond: 43215 // result: (ORL x y) 43216 for { 43217 _ = v.Args[1] 43218 x := v.Args[0] 43219 y := v.Args[1] 43220 v.reset(OpAMD64ORL) 43221 v.AddArg(x) 43222 v.AddArg(y) 43223 return true 43224 } 43225 } 43226 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 43227 // match: (OrB x y) 43228 // cond: 43229 // result: (ORL x y) 43230 for { 43231 _ = v.Args[1] 43232 x := v.Args[0] 43233 y := v.Args[1] 43234 v.reset(OpAMD64ORL) 43235 v.AddArg(x) 43236 v.AddArg(y) 43237 return true 43238 } 43239 } 43240 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 43241 b := v.Block 43242 _ = b 43243 typ := &b.Func.Config.Types 43244 _ = typ 43245 // match: (PopCount16 x) 43246 // cond: 43247 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 43248 for { 43249 x := v.Args[0] 43250 v.reset(OpAMD64POPCNTL) 43251 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 43252 v0.AddArg(x) 43253 v.AddArg(v0) 43254 return true 43255 } 43256 } 43257 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 43258 // match: (PopCount32 x) 43259 // cond: 43260 // result: (POPCNTL x) 43261 for { 43262 x := v.Args[0] 43263 v.reset(OpAMD64POPCNTL) 43264 v.AddArg(x) 43265 return true 43266 } 43267 } 43268 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 43269 // match: (PopCount64 x) 43270 // cond: 43271 // result: (POPCNTQ x) 43272 for { 43273 x := v.Args[0] 43274 v.reset(OpAMD64POPCNTQ) 43275 v.AddArg(x) 43276 return true 43277 } 43278 } 43279 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 43280 b := v.Block 43281 _ = b 43282 typ := &b.Func.Config.Types 43283 _ = typ 43284 // match: (PopCount8 x) 43285 // cond: 43286 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 43287 for { 43288 x := v.Args[0] 43289 v.reset(OpAMD64POPCNTL) 43290 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 43291 v0.AddArg(x) 43292 v.AddArg(v0) 43293 return true 43294 } 43295 } 43296 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 43297 // match: (Round32F x) 43298 // cond: 43299 // result: x 43300 for { 43301 x := v.Args[0] 43302 v.reset(OpCopy) 43303 v.Type = x.Type 43304 v.AddArg(x) 43305 return true 43306 } 43307 } 43308 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 43309 // match: (Round64F x) 43310 // cond: 43311 // result: x 43312 for { 43313 x := v.Args[0] 43314 v.reset(OpCopy) 43315 v.Type = x.Type 43316 v.AddArg(x) 43317 return true 43318 } 43319 } 43320 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 43321 b := v.Block 43322 _ = b 43323 // match: (Rsh16Ux16 <t> x y) 43324 // cond: 43325 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 43326 for { 43327 t := v.Type 43328 _ = v.Args[1] 43329 x := v.Args[0] 43330 y := v.Args[1] 43331 v.reset(OpAMD64ANDL) 43332 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 43333 v0.AddArg(x) 43334 v0.AddArg(y) 43335 v.AddArg(v0) 43336 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43337 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 43338 v2.AuxInt = 16 43339 v2.AddArg(y) 43340 v1.AddArg(v2) 43341 v.AddArg(v1) 43342 return true 43343 } 43344 } 43345 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 43346 b := v.Block 43347 _ = b 43348 // match: (Rsh16Ux32 <t> x y) 43349 // cond: 43350 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 43351 for { 43352 t := v.Type 43353 _ = v.Args[1] 43354 x := v.Args[0] 43355 y := v.Args[1] 43356 v.reset(OpAMD64ANDL) 43357 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 43358 v0.AddArg(x) 43359 v0.AddArg(y) 43360 v.AddArg(v0) 43361 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43362 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 43363 v2.AuxInt = 16 43364 v2.AddArg(y) 43365 v1.AddArg(v2) 43366 v.AddArg(v1) 43367 return true 43368 } 43369 } 43370 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 43371 b := v.Block 43372 _ = b 43373 // match: (Rsh16Ux64 <t> x y) 43374 // cond: 43375 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 43376 for { 43377 t := v.Type 43378 _ = v.Args[1] 43379 x := v.Args[0] 43380 y := v.Args[1] 43381 v.reset(OpAMD64ANDL) 43382 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 43383 v0.AddArg(x) 43384 v0.AddArg(y) 43385 v.AddArg(v0) 43386 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43387 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 43388 v2.AuxInt = 16 43389 v2.AddArg(y) 43390 v1.AddArg(v2) 43391 v.AddArg(v1) 43392 return true 43393 } 43394 } 43395 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 43396 b := v.Block 43397 _ = b 43398 // match: (Rsh16Ux8 <t> x y) 43399 // cond: 43400 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 43401 for { 43402 t := v.Type 43403 _ = v.Args[1] 43404 x := v.Args[0] 43405 y := v.Args[1] 43406 v.reset(OpAMD64ANDL) 43407 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 43408 v0.AddArg(x) 43409 v0.AddArg(y) 43410 v.AddArg(v0) 43411 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43412 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 43413 v2.AuxInt = 16 43414 v2.AddArg(y) 43415 v1.AddArg(v2) 43416 v.AddArg(v1) 43417 return true 43418 } 43419 } 43420 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 43421 b := v.Block 43422 _ = b 43423 // match: (Rsh16x16 <t> x y) 43424 // cond: 43425 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 43426 for { 43427 t := v.Type 43428 _ = v.Args[1] 43429 x := v.Args[0] 43430 y := v.Args[1] 43431 v.reset(OpAMD64SARW) 43432 v.Type = t 43433 v.AddArg(x) 43434 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 43435 v0.AddArg(y) 43436 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 43437 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 43438 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 43439 v3.AuxInt = 16 43440 v3.AddArg(y) 43441 v2.AddArg(v3) 43442 v1.AddArg(v2) 43443 v0.AddArg(v1) 43444 v.AddArg(v0) 43445 return true 43446 } 43447 } 43448 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 43449 b := v.Block 43450 _ = b 43451 // match: (Rsh16x32 <t> x y) 43452 // cond: 43453 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 43454 for { 43455 t := v.Type 43456 _ = v.Args[1] 43457 x := v.Args[0] 43458 y := v.Args[1] 43459 v.reset(OpAMD64SARW) 43460 v.Type = t 43461 v.AddArg(x) 43462 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 43463 v0.AddArg(y) 43464 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 43465 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 43466 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 43467 v3.AuxInt = 16 43468 v3.AddArg(y) 43469 v2.AddArg(v3) 43470 v1.AddArg(v2) 43471 v0.AddArg(v1) 43472 v.AddArg(v0) 43473 return true 43474 } 43475 } 43476 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 43477 b := v.Block 43478 _ = b 43479 // match: (Rsh16x64 <t> x y) 43480 // cond: 43481 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 43482 for { 43483 t := v.Type 43484 _ = v.Args[1] 43485 x := v.Args[0] 43486 y := v.Args[1] 43487 v.reset(OpAMD64SARW) 43488 v.Type = t 43489 v.AddArg(x) 43490 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 43491 v0.AddArg(y) 43492 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 43493 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 43494 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 43495 v3.AuxInt = 16 43496 v3.AddArg(y) 43497 v2.AddArg(v3) 43498 v1.AddArg(v2) 43499 v0.AddArg(v1) 43500 v.AddArg(v0) 43501 return true 43502 } 43503 } 43504 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 43505 b := v.Block 43506 _ = b 43507 // match: (Rsh16x8 <t> x y) 43508 // cond: 43509 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 43510 for { 43511 t := v.Type 43512 _ = v.Args[1] 43513 x := v.Args[0] 43514 y := v.Args[1] 43515 v.reset(OpAMD64SARW) 43516 v.Type = t 43517 v.AddArg(x) 43518 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 43519 v0.AddArg(y) 43520 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 43521 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 43522 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 43523 v3.AuxInt = 16 43524 v3.AddArg(y) 43525 v2.AddArg(v3) 43526 v1.AddArg(v2) 43527 v0.AddArg(v1) 43528 v.AddArg(v0) 43529 return true 43530 } 43531 } 43532 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 43533 b := v.Block 43534 _ = b 43535 // match: (Rsh32Ux16 <t> x y) 43536 // cond: 43537 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 43538 for { 43539 t := v.Type 43540 _ = v.Args[1] 43541 x := v.Args[0] 43542 y := v.Args[1] 43543 v.reset(OpAMD64ANDL) 43544 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 43545 v0.AddArg(x) 43546 v0.AddArg(y) 43547 v.AddArg(v0) 43548 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43549 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 43550 v2.AuxInt = 32 43551 v2.AddArg(y) 43552 v1.AddArg(v2) 43553 v.AddArg(v1) 43554 return true 43555 } 43556 } 43557 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 43558 b := v.Block 43559 _ = b 43560 // match: (Rsh32Ux32 <t> x y) 43561 // cond: 43562 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 43563 for { 43564 t := v.Type 43565 _ = v.Args[1] 43566 x := v.Args[0] 43567 y := v.Args[1] 43568 v.reset(OpAMD64ANDL) 43569 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 43570 v0.AddArg(x) 43571 v0.AddArg(y) 43572 v.AddArg(v0) 43573 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43574 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 43575 v2.AuxInt = 32 43576 v2.AddArg(y) 43577 v1.AddArg(v2) 43578 v.AddArg(v1) 43579 return true 43580 } 43581 } 43582 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 43583 b := v.Block 43584 _ = b 43585 // match: (Rsh32Ux64 <t> x y) 43586 // cond: 43587 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 43588 for { 43589 t := v.Type 43590 _ = v.Args[1] 43591 x := v.Args[0] 43592 y := v.Args[1] 43593 v.reset(OpAMD64ANDL) 43594 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 43595 v0.AddArg(x) 43596 v0.AddArg(y) 43597 v.AddArg(v0) 43598 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43599 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 43600 v2.AuxInt = 32 43601 v2.AddArg(y) 43602 v1.AddArg(v2) 43603 v.AddArg(v1) 43604 return true 43605 } 43606 } 43607 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 43608 b := v.Block 43609 _ = b 43610 // match: (Rsh32Ux8 <t> x y) 43611 // cond: 43612 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 43613 for { 43614 t := v.Type 43615 _ = v.Args[1] 43616 x := v.Args[0] 43617 y := v.Args[1] 43618 v.reset(OpAMD64ANDL) 43619 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 43620 v0.AddArg(x) 43621 v0.AddArg(y) 43622 v.AddArg(v0) 43623 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43624 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 43625 v2.AuxInt = 32 43626 v2.AddArg(y) 43627 v1.AddArg(v2) 43628 v.AddArg(v1) 43629 return true 43630 } 43631 } 43632 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 43633 b := v.Block 43634 _ = b 43635 // match: (Rsh32x16 <t> x y) 43636 // cond: 43637 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 43638 for { 43639 t := v.Type 43640 _ = v.Args[1] 43641 x := v.Args[0] 43642 y := v.Args[1] 43643 v.reset(OpAMD64SARL) 43644 v.Type = t 43645 v.AddArg(x) 43646 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 43647 v0.AddArg(y) 43648 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 43649 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 43650 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 43651 v3.AuxInt = 32 43652 v3.AddArg(y) 43653 v2.AddArg(v3) 43654 v1.AddArg(v2) 43655 v0.AddArg(v1) 43656 v.AddArg(v0) 43657 return true 43658 } 43659 } 43660 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 43661 b := v.Block 43662 _ = b 43663 // match: (Rsh32x32 <t> x y) 43664 // cond: 43665 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 43666 for { 43667 t := v.Type 43668 _ = v.Args[1] 43669 x := v.Args[0] 43670 y := v.Args[1] 43671 v.reset(OpAMD64SARL) 43672 v.Type = t 43673 v.AddArg(x) 43674 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 43675 v0.AddArg(y) 43676 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 43677 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 43678 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 43679 v3.AuxInt = 32 43680 v3.AddArg(y) 43681 v2.AddArg(v3) 43682 v1.AddArg(v2) 43683 v0.AddArg(v1) 43684 v.AddArg(v0) 43685 return true 43686 } 43687 } 43688 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 43689 b := v.Block 43690 _ = b 43691 // match: (Rsh32x64 <t> x y) 43692 // cond: 43693 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 43694 for { 43695 t := v.Type 43696 _ = v.Args[1] 43697 x := v.Args[0] 43698 y := v.Args[1] 43699 v.reset(OpAMD64SARL) 43700 v.Type = t 43701 v.AddArg(x) 43702 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 43703 v0.AddArg(y) 43704 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 43705 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 43706 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 43707 v3.AuxInt = 32 43708 v3.AddArg(y) 43709 v2.AddArg(v3) 43710 v1.AddArg(v2) 43711 v0.AddArg(v1) 43712 v.AddArg(v0) 43713 return true 43714 } 43715 } 43716 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 43717 b := v.Block 43718 _ = b 43719 // match: (Rsh32x8 <t> x y) 43720 // cond: 43721 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 43722 for { 43723 t := v.Type 43724 _ = v.Args[1] 43725 x := v.Args[0] 43726 y := v.Args[1] 43727 v.reset(OpAMD64SARL) 43728 v.Type = t 43729 v.AddArg(x) 43730 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 43731 v0.AddArg(y) 43732 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 43733 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 43734 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 43735 v3.AuxInt = 32 43736 v3.AddArg(y) 43737 v2.AddArg(v3) 43738 v1.AddArg(v2) 43739 v0.AddArg(v1) 43740 v.AddArg(v0) 43741 return true 43742 } 43743 } 43744 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 43745 b := v.Block 43746 _ = b 43747 // match: (Rsh64Ux16 <t> x y) 43748 // cond: 43749 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 43750 for { 43751 t := v.Type 43752 _ = v.Args[1] 43753 x := v.Args[0] 43754 y := v.Args[1] 43755 v.reset(OpAMD64ANDQ) 43756 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 43757 v0.AddArg(x) 43758 v0.AddArg(y) 43759 v.AddArg(v0) 43760 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 43761 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 43762 v2.AuxInt = 64 43763 v2.AddArg(y) 43764 v1.AddArg(v2) 43765 v.AddArg(v1) 43766 return true 43767 } 43768 } 43769 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 43770 b := v.Block 43771 _ = b 43772 // match: (Rsh64Ux32 <t> x y) 43773 // cond: 43774 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 43775 for { 43776 t := v.Type 43777 _ = v.Args[1] 43778 x := v.Args[0] 43779 y := v.Args[1] 43780 v.reset(OpAMD64ANDQ) 43781 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 43782 v0.AddArg(x) 43783 v0.AddArg(y) 43784 v.AddArg(v0) 43785 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 43786 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 43787 v2.AuxInt = 64 43788 v2.AddArg(y) 43789 v1.AddArg(v2) 43790 v.AddArg(v1) 43791 return true 43792 } 43793 } 43794 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 43795 b := v.Block 43796 _ = b 43797 // match: (Rsh64Ux64 <t> x y) 43798 // cond: 43799 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 43800 for { 43801 t := v.Type 43802 _ = v.Args[1] 43803 x := v.Args[0] 43804 y := v.Args[1] 43805 v.reset(OpAMD64ANDQ) 43806 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 43807 v0.AddArg(x) 43808 v0.AddArg(y) 43809 v.AddArg(v0) 43810 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 43811 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 43812 v2.AuxInt = 64 43813 v2.AddArg(y) 43814 v1.AddArg(v2) 43815 v.AddArg(v1) 43816 return true 43817 } 43818 } 43819 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 43820 b := v.Block 43821 _ = b 43822 // match: (Rsh64Ux8 <t> x y) 43823 // cond: 43824 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 43825 for { 43826 t := v.Type 43827 _ = v.Args[1] 43828 x := v.Args[0] 43829 y := v.Args[1] 43830 v.reset(OpAMD64ANDQ) 43831 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 43832 v0.AddArg(x) 43833 v0.AddArg(y) 43834 v.AddArg(v0) 43835 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 43836 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 43837 v2.AuxInt = 64 43838 v2.AddArg(y) 43839 v1.AddArg(v2) 43840 v.AddArg(v1) 43841 return true 43842 } 43843 } 43844 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 43845 b := v.Block 43846 _ = b 43847 // match: (Rsh64x16 <t> x y) 43848 // cond: 43849 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 43850 for { 43851 t := v.Type 43852 _ = v.Args[1] 43853 x := v.Args[0] 43854 y := v.Args[1] 43855 v.reset(OpAMD64SARQ) 43856 v.Type = t 43857 v.AddArg(x) 43858 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 43859 v0.AddArg(y) 43860 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 43861 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 43862 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 43863 v3.AuxInt = 64 43864 v3.AddArg(y) 43865 v2.AddArg(v3) 43866 v1.AddArg(v2) 43867 v0.AddArg(v1) 43868 v.AddArg(v0) 43869 return true 43870 } 43871 } 43872 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 43873 b := v.Block 43874 _ = b 43875 // match: (Rsh64x32 <t> x y) 43876 // cond: 43877 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 43878 for { 43879 t := v.Type 43880 _ = v.Args[1] 43881 x := v.Args[0] 43882 y := v.Args[1] 43883 v.reset(OpAMD64SARQ) 43884 v.Type = t 43885 v.AddArg(x) 43886 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 43887 v0.AddArg(y) 43888 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 43889 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 43890 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 43891 v3.AuxInt = 64 43892 v3.AddArg(y) 43893 v2.AddArg(v3) 43894 v1.AddArg(v2) 43895 v0.AddArg(v1) 43896 v.AddArg(v0) 43897 return true 43898 } 43899 } 43900 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 43901 b := v.Block 43902 _ = b 43903 // match: (Rsh64x64 <t> x y) 43904 // cond: 43905 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 43906 for { 43907 t := v.Type 43908 _ = v.Args[1] 43909 x := v.Args[0] 43910 y := v.Args[1] 43911 v.reset(OpAMD64SARQ) 43912 v.Type = t 43913 v.AddArg(x) 43914 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 43915 v0.AddArg(y) 43916 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 43917 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 43918 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 43919 v3.AuxInt = 64 43920 v3.AddArg(y) 43921 v2.AddArg(v3) 43922 v1.AddArg(v2) 43923 v0.AddArg(v1) 43924 v.AddArg(v0) 43925 return true 43926 } 43927 } 43928 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 43929 b := v.Block 43930 _ = b 43931 // match: (Rsh64x8 <t> x y) 43932 // cond: 43933 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 43934 for { 43935 t := v.Type 43936 _ = v.Args[1] 43937 x := v.Args[0] 43938 y := v.Args[1] 43939 v.reset(OpAMD64SARQ) 43940 v.Type = t 43941 v.AddArg(x) 43942 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 43943 v0.AddArg(y) 43944 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 43945 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 43946 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 43947 v3.AuxInt = 64 43948 v3.AddArg(y) 43949 v2.AddArg(v3) 43950 v1.AddArg(v2) 43951 v0.AddArg(v1) 43952 v.AddArg(v0) 43953 return true 43954 } 43955 } 43956 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 43957 b := v.Block 43958 _ = b 43959 // match: (Rsh8Ux16 <t> x y) 43960 // cond: 43961 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 43962 for { 43963 t := v.Type 43964 _ = v.Args[1] 43965 x := v.Args[0] 43966 y := v.Args[1] 43967 v.reset(OpAMD64ANDL) 43968 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 43969 v0.AddArg(x) 43970 v0.AddArg(y) 43971 v.AddArg(v0) 43972 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43973 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 43974 v2.AuxInt = 8 43975 v2.AddArg(y) 43976 v1.AddArg(v2) 43977 v.AddArg(v1) 43978 return true 43979 } 43980 } 43981 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 43982 b := v.Block 43983 _ = b 43984 // match: (Rsh8Ux32 <t> x y) 43985 // cond: 43986 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 43987 for { 43988 t := v.Type 43989 _ = v.Args[1] 43990 x := v.Args[0] 43991 y := v.Args[1] 43992 v.reset(OpAMD64ANDL) 43993 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 43994 v0.AddArg(x) 43995 v0.AddArg(y) 43996 v.AddArg(v0) 43997 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43998 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 43999 v2.AuxInt = 8 44000 v2.AddArg(y) 44001 v1.AddArg(v2) 44002 v.AddArg(v1) 44003 return true 44004 } 44005 } 44006 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 44007 b := v.Block 44008 _ = b 44009 // match: (Rsh8Ux64 <t> x y) 44010 // cond: 44011 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 44012 for { 44013 t := v.Type 44014 _ = v.Args[1] 44015 x := v.Args[0] 44016 y := v.Args[1] 44017 v.reset(OpAMD64ANDL) 44018 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 44019 v0.AddArg(x) 44020 v0.AddArg(y) 44021 v.AddArg(v0) 44022 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44023 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44024 v2.AuxInt = 8 44025 v2.AddArg(y) 44026 v1.AddArg(v2) 44027 v.AddArg(v1) 44028 return true 44029 } 44030 } 44031 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 44032 b := v.Block 44033 _ = b 44034 // match: (Rsh8Ux8 <t> x y) 44035 // cond: 44036 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 44037 for { 44038 t := v.Type 44039 _ = v.Args[1] 44040 x := v.Args[0] 44041 y := v.Args[1] 44042 v.reset(OpAMD64ANDL) 44043 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 44044 v0.AddArg(x) 44045 v0.AddArg(y) 44046 v.AddArg(v0) 44047 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44048 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44049 v2.AuxInt = 8 44050 v2.AddArg(y) 44051 v1.AddArg(v2) 44052 v.AddArg(v1) 44053 return true 44054 } 44055 } 44056 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 44057 b := v.Block 44058 _ = b 44059 // match: (Rsh8x16 <t> x y) 44060 // cond: 44061 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 44062 for { 44063 t := v.Type 44064 _ = v.Args[1] 44065 x := v.Args[0] 44066 y := v.Args[1] 44067 v.reset(OpAMD64SARB) 44068 v.Type = t 44069 v.AddArg(x) 44070 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 44071 v0.AddArg(y) 44072 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 44073 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 44074 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44075 v3.AuxInt = 8 44076 v3.AddArg(y) 44077 v2.AddArg(v3) 44078 v1.AddArg(v2) 44079 v0.AddArg(v1) 44080 v.AddArg(v0) 44081 return true 44082 } 44083 } 44084 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 44085 b := v.Block 44086 _ = b 44087 // match: (Rsh8x32 <t> x y) 44088 // cond: 44089 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 44090 for { 44091 t := v.Type 44092 _ = v.Args[1] 44093 x := v.Args[0] 44094 y := v.Args[1] 44095 v.reset(OpAMD64SARB) 44096 v.Type = t 44097 v.AddArg(x) 44098 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 44099 v0.AddArg(y) 44100 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 44101 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 44102 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44103 v3.AuxInt = 8 44104 v3.AddArg(y) 44105 v2.AddArg(v3) 44106 v1.AddArg(v2) 44107 v0.AddArg(v1) 44108 v.AddArg(v0) 44109 return true 44110 } 44111 } 44112 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 44113 b := v.Block 44114 _ = b 44115 // match: (Rsh8x64 <t> x y) 44116 // cond: 44117 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 44118 for { 44119 t := v.Type 44120 _ = v.Args[1] 44121 x := v.Args[0] 44122 y := v.Args[1] 44123 v.reset(OpAMD64SARB) 44124 v.Type = t 44125 v.AddArg(x) 44126 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 44127 v0.AddArg(y) 44128 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 44129 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 44130 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44131 v3.AuxInt = 8 44132 v3.AddArg(y) 44133 v2.AddArg(v3) 44134 v1.AddArg(v2) 44135 v0.AddArg(v1) 44136 v.AddArg(v0) 44137 return true 44138 } 44139 } 44140 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 44141 b := v.Block 44142 _ = b 44143 // match: (Rsh8x8 <t> x y) 44144 // cond: 44145 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 44146 for { 44147 t := v.Type 44148 _ = v.Args[1] 44149 x := v.Args[0] 44150 y := v.Args[1] 44151 v.reset(OpAMD64SARB) 44152 v.Type = t 44153 v.AddArg(x) 44154 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 44155 v0.AddArg(y) 44156 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 44157 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 44158 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44159 v3.AuxInt = 8 44160 v3.AddArg(y) 44161 v2.AddArg(v3) 44162 v1.AddArg(v2) 44163 v0.AddArg(v1) 44164 v.AddArg(v0) 44165 return true 44166 } 44167 } 44168 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 44169 b := v.Block 44170 _ = b 44171 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 44172 // cond: 44173 // result: (ADDL val (Select0 <t> tuple)) 44174 for { 44175 t := v.Type 44176 v_0 := v.Args[0] 44177 if v_0.Op != OpAMD64AddTupleFirst32 { 44178 break 44179 } 44180 _ = v_0.Args[1] 44181 val := v_0.Args[0] 44182 tuple := v_0.Args[1] 44183 v.reset(OpAMD64ADDL) 44184 v.AddArg(val) 44185 v0 := b.NewValue0(v.Pos, OpSelect0, t) 44186 v0.AddArg(tuple) 44187 v.AddArg(v0) 44188 return true 44189 } 44190 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 44191 // cond: 44192 // result: (ADDQ val (Select0 <t> tuple)) 44193 for { 44194 t := v.Type 44195 v_0 := v.Args[0] 44196 if v_0.Op != OpAMD64AddTupleFirst64 { 44197 break 44198 } 44199 _ = v_0.Args[1] 44200 val := v_0.Args[0] 44201 tuple := v_0.Args[1] 44202 v.reset(OpAMD64ADDQ) 44203 v.AddArg(val) 44204 v0 := b.NewValue0(v.Pos, OpSelect0, t) 44205 v0.AddArg(tuple) 44206 v.AddArg(v0) 44207 return true 44208 } 44209 return false 44210 } 44211 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 44212 // match: (Select1 (AddTupleFirst32 _ tuple)) 44213 // cond: 44214 // result: (Select1 tuple) 44215 for { 44216 v_0 := v.Args[0] 44217 if v_0.Op != OpAMD64AddTupleFirst32 { 44218 break 44219 } 44220 _ = v_0.Args[1] 44221 tuple := v_0.Args[1] 44222 v.reset(OpSelect1) 44223 v.AddArg(tuple) 44224 return true 44225 } 44226 // match: (Select1 (AddTupleFirst64 _ tuple)) 44227 // cond: 44228 // result: (Select1 tuple) 44229 for { 44230 v_0 := v.Args[0] 44231 if v_0.Op != OpAMD64AddTupleFirst64 { 44232 break 44233 } 44234 _ = v_0.Args[1] 44235 tuple := v_0.Args[1] 44236 v.reset(OpSelect1) 44237 v.AddArg(tuple) 44238 return true 44239 } 44240 return false 44241 } 44242 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 44243 // match: (SignExt16to32 x) 44244 // cond: 44245 // result: (MOVWQSX x) 44246 for { 44247 x := v.Args[0] 44248 v.reset(OpAMD64MOVWQSX) 44249 v.AddArg(x) 44250 return true 44251 } 44252 } 44253 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 44254 // match: (SignExt16to64 x) 44255 // cond: 44256 // result: (MOVWQSX x) 44257 for { 44258 x := v.Args[0] 44259 v.reset(OpAMD64MOVWQSX) 44260 v.AddArg(x) 44261 return true 44262 } 44263 } 44264 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 44265 // match: (SignExt32to64 x) 44266 // cond: 44267 // result: (MOVLQSX x) 44268 for { 44269 x := v.Args[0] 44270 v.reset(OpAMD64MOVLQSX) 44271 v.AddArg(x) 44272 return true 44273 } 44274 } 44275 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 44276 // match: (SignExt8to16 x) 44277 // cond: 44278 // result: (MOVBQSX x) 44279 for { 44280 x := v.Args[0] 44281 v.reset(OpAMD64MOVBQSX) 44282 v.AddArg(x) 44283 return true 44284 } 44285 } 44286 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 44287 // match: (SignExt8to32 x) 44288 // cond: 44289 // result: (MOVBQSX x) 44290 for { 44291 x := v.Args[0] 44292 v.reset(OpAMD64MOVBQSX) 44293 v.AddArg(x) 44294 return true 44295 } 44296 } 44297 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 44298 // match: (SignExt8to64 x) 44299 // cond: 44300 // result: (MOVBQSX x) 44301 for { 44302 x := v.Args[0] 44303 v.reset(OpAMD64MOVBQSX) 44304 v.AddArg(x) 44305 return true 44306 } 44307 } 44308 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 44309 b := v.Block 44310 _ = b 44311 // match: (Slicemask <t> x) 44312 // cond: 44313 // result: (SARQconst (NEGQ <t> x) [63]) 44314 for { 44315 t := v.Type 44316 x := v.Args[0] 44317 v.reset(OpAMD64SARQconst) 44318 v.AuxInt = 63 44319 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 44320 v0.AddArg(x) 44321 v.AddArg(v0) 44322 return true 44323 } 44324 } 44325 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 44326 // match: (Sqrt x) 44327 // cond: 44328 // result: (SQRTSD x) 44329 for { 44330 x := v.Args[0] 44331 v.reset(OpAMD64SQRTSD) 44332 v.AddArg(x) 44333 return true 44334 } 44335 } 44336 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 44337 // match: (StaticCall [argwid] {target} mem) 44338 // cond: 44339 // result: (CALLstatic [argwid] {target} mem) 44340 for { 44341 argwid := v.AuxInt 44342 target := v.Aux 44343 mem := v.Args[0] 44344 v.reset(OpAMD64CALLstatic) 44345 v.AuxInt = argwid 44346 v.Aux = target 44347 v.AddArg(mem) 44348 return true 44349 } 44350 } 44351 func rewriteValueAMD64_OpStore_0(v *Value) bool { 44352 // match: (Store {t} ptr val mem) 44353 // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) 44354 // result: (MOVSDstore ptr val mem) 44355 for { 44356 t := v.Aux 44357 _ = v.Args[2] 44358 ptr := v.Args[0] 44359 val := v.Args[1] 44360 mem := v.Args[2] 44361 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { 44362 break 44363 } 44364 v.reset(OpAMD64MOVSDstore) 44365 v.AddArg(ptr) 44366 v.AddArg(val) 44367 v.AddArg(mem) 44368 return true 44369 } 44370 // match: (Store {t} ptr val mem) 44371 // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) 44372 // result: (MOVSSstore ptr val mem) 44373 for { 44374 t := v.Aux 44375 _ = v.Args[2] 44376 ptr := v.Args[0] 44377 val := v.Args[1] 44378 mem := v.Args[2] 44379 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { 44380 break 44381 } 44382 v.reset(OpAMD64MOVSSstore) 44383 v.AddArg(ptr) 44384 v.AddArg(val) 44385 v.AddArg(mem) 44386 return true 44387 } 44388 // match: (Store {t} ptr val mem) 44389 // cond: t.(*types.Type).Size() == 8 44390 // result: (MOVQstore ptr val mem) 44391 for { 44392 t := v.Aux 44393 _ = v.Args[2] 44394 ptr := v.Args[0] 44395 val := v.Args[1] 44396 mem := v.Args[2] 44397 if !(t.(*types.Type).Size() == 8) { 44398 break 44399 } 44400 v.reset(OpAMD64MOVQstore) 44401 v.AddArg(ptr) 44402 v.AddArg(val) 44403 v.AddArg(mem) 44404 return true 44405 } 44406 // match: (Store {t} ptr val mem) 44407 // cond: t.(*types.Type).Size() == 4 44408 // result: (MOVLstore ptr val mem) 44409 for { 44410 t := v.Aux 44411 _ = v.Args[2] 44412 ptr := v.Args[0] 44413 val := v.Args[1] 44414 mem := v.Args[2] 44415 if !(t.(*types.Type).Size() == 4) { 44416 break 44417 } 44418 v.reset(OpAMD64MOVLstore) 44419 v.AddArg(ptr) 44420 v.AddArg(val) 44421 v.AddArg(mem) 44422 return true 44423 } 44424 // match: (Store {t} ptr val mem) 44425 // cond: t.(*types.Type).Size() == 2 44426 // result: (MOVWstore ptr val mem) 44427 for { 44428 t := v.Aux 44429 _ = v.Args[2] 44430 ptr := v.Args[0] 44431 val := v.Args[1] 44432 mem := v.Args[2] 44433 if !(t.(*types.Type).Size() == 2) { 44434 break 44435 } 44436 v.reset(OpAMD64MOVWstore) 44437 v.AddArg(ptr) 44438 v.AddArg(val) 44439 v.AddArg(mem) 44440 return true 44441 } 44442 // match: (Store {t} ptr val mem) 44443 // cond: t.(*types.Type).Size() == 1 44444 // result: (MOVBstore ptr val mem) 44445 for { 44446 t := v.Aux 44447 _ = v.Args[2] 44448 ptr := v.Args[0] 44449 val := v.Args[1] 44450 mem := v.Args[2] 44451 if !(t.(*types.Type).Size() == 1) { 44452 break 44453 } 44454 v.reset(OpAMD64MOVBstore) 44455 v.AddArg(ptr) 44456 v.AddArg(val) 44457 v.AddArg(mem) 44458 return true 44459 } 44460 return false 44461 } 44462 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 44463 // match: (Sub16 x y) 44464 // cond: 44465 // result: (SUBL x y) 44466 for { 44467 _ = v.Args[1] 44468 x := v.Args[0] 44469 y := v.Args[1] 44470 v.reset(OpAMD64SUBL) 44471 v.AddArg(x) 44472 v.AddArg(y) 44473 return true 44474 } 44475 } 44476 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 44477 // match: (Sub32 x y) 44478 // cond: 44479 // result: (SUBL x y) 44480 for { 44481 _ = v.Args[1] 44482 x := v.Args[0] 44483 y := v.Args[1] 44484 v.reset(OpAMD64SUBL) 44485 v.AddArg(x) 44486 v.AddArg(y) 44487 return true 44488 } 44489 } 44490 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 44491 // match: (Sub32F x y) 44492 // cond: 44493 // result: (SUBSS x y) 44494 for { 44495 _ = v.Args[1] 44496 x := v.Args[0] 44497 y := v.Args[1] 44498 v.reset(OpAMD64SUBSS) 44499 v.AddArg(x) 44500 v.AddArg(y) 44501 return true 44502 } 44503 } 44504 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 44505 // match: (Sub64 x y) 44506 // cond: 44507 // result: (SUBQ x y) 44508 for { 44509 _ = v.Args[1] 44510 x := v.Args[0] 44511 y := v.Args[1] 44512 v.reset(OpAMD64SUBQ) 44513 v.AddArg(x) 44514 v.AddArg(y) 44515 return true 44516 } 44517 } 44518 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 44519 // match: (Sub64F x y) 44520 // cond: 44521 // result: (SUBSD x y) 44522 for { 44523 _ = v.Args[1] 44524 x := v.Args[0] 44525 y := v.Args[1] 44526 v.reset(OpAMD64SUBSD) 44527 v.AddArg(x) 44528 v.AddArg(y) 44529 return true 44530 } 44531 } 44532 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 44533 // match: (Sub8 x y) 44534 // cond: 44535 // result: (SUBL x y) 44536 for { 44537 _ = v.Args[1] 44538 x := v.Args[0] 44539 y := v.Args[1] 44540 v.reset(OpAMD64SUBL) 44541 v.AddArg(x) 44542 v.AddArg(y) 44543 return true 44544 } 44545 } 44546 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 44547 b := v.Block 44548 _ = b 44549 config := b.Func.Config 44550 _ = config 44551 // match: (SubPtr x y) 44552 // cond: config.PtrSize == 8 44553 // result: (SUBQ x y) 44554 for { 44555 _ = v.Args[1] 44556 x := v.Args[0] 44557 y := v.Args[1] 44558 if !(config.PtrSize == 8) { 44559 break 44560 } 44561 v.reset(OpAMD64SUBQ) 44562 v.AddArg(x) 44563 v.AddArg(y) 44564 return true 44565 } 44566 // match: (SubPtr x y) 44567 // cond: config.PtrSize == 4 44568 // result: (SUBL x y) 44569 for { 44570 _ = v.Args[1] 44571 x := v.Args[0] 44572 y := v.Args[1] 44573 if !(config.PtrSize == 4) { 44574 break 44575 } 44576 v.reset(OpAMD64SUBL) 44577 v.AddArg(x) 44578 v.AddArg(y) 44579 return true 44580 } 44581 return false 44582 } 44583 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 44584 // match: (Trunc16to8 x) 44585 // cond: 44586 // result: x 44587 for { 44588 x := v.Args[0] 44589 v.reset(OpCopy) 44590 v.Type = x.Type 44591 v.AddArg(x) 44592 return true 44593 } 44594 } 44595 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 44596 // match: (Trunc32to16 x) 44597 // cond: 44598 // result: x 44599 for { 44600 x := v.Args[0] 44601 v.reset(OpCopy) 44602 v.Type = x.Type 44603 v.AddArg(x) 44604 return true 44605 } 44606 } 44607 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 44608 // match: (Trunc32to8 x) 44609 // cond: 44610 // result: x 44611 for { 44612 x := v.Args[0] 44613 v.reset(OpCopy) 44614 v.Type = x.Type 44615 v.AddArg(x) 44616 return true 44617 } 44618 } 44619 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 44620 // match: (Trunc64to16 x) 44621 // cond: 44622 // result: x 44623 for { 44624 x := v.Args[0] 44625 v.reset(OpCopy) 44626 v.Type = x.Type 44627 v.AddArg(x) 44628 return true 44629 } 44630 } 44631 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 44632 // match: (Trunc64to32 x) 44633 // cond: 44634 // result: x 44635 for { 44636 x := v.Args[0] 44637 v.reset(OpCopy) 44638 v.Type = x.Type 44639 v.AddArg(x) 44640 return true 44641 } 44642 } 44643 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 44644 // match: (Trunc64to8 x) 44645 // cond: 44646 // result: x 44647 for { 44648 x := v.Args[0] 44649 v.reset(OpCopy) 44650 v.Type = x.Type 44651 v.AddArg(x) 44652 return true 44653 } 44654 } 44655 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 44656 // match: (Xor16 x y) 44657 // cond: 44658 // result: (XORL x y) 44659 for { 44660 _ = v.Args[1] 44661 x := v.Args[0] 44662 y := v.Args[1] 44663 v.reset(OpAMD64XORL) 44664 v.AddArg(x) 44665 v.AddArg(y) 44666 return true 44667 } 44668 } 44669 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 44670 // match: (Xor32 x y) 44671 // cond: 44672 // result: (XORL x y) 44673 for { 44674 _ = v.Args[1] 44675 x := v.Args[0] 44676 y := v.Args[1] 44677 v.reset(OpAMD64XORL) 44678 v.AddArg(x) 44679 v.AddArg(y) 44680 return true 44681 } 44682 } 44683 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 44684 // match: (Xor64 x y) 44685 // cond: 44686 // result: (XORQ x y) 44687 for { 44688 _ = v.Args[1] 44689 x := v.Args[0] 44690 y := v.Args[1] 44691 v.reset(OpAMD64XORQ) 44692 v.AddArg(x) 44693 v.AddArg(y) 44694 return true 44695 } 44696 } 44697 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 44698 // match: (Xor8 x y) 44699 // cond: 44700 // result: (XORL x y) 44701 for { 44702 _ = v.Args[1] 44703 x := v.Args[0] 44704 y := v.Args[1] 44705 v.reset(OpAMD64XORL) 44706 v.AddArg(x) 44707 v.AddArg(y) 44708 return true 44709 } 44710 } 44711 func rewriteValueAMD64_OpZero_0(v *Value) bool { 44712 b := v.Block 44713 _ = b 44714 config := b.Func.Config 44715 _ = config 44716 // match: (Zero [0] _ mem) 44717 // cond: 44718 // result: mem 44719 for { 44720 if v.AuxInt != 0 { 44721 break 44722 } 44723 _ = v.Args[1] 44724 mem := v.Args[1] 44725 v.reset(OpCopy) 44726 v.Type = mem.Type 44727 v.AddArg(mem) 44728 return true 44729 } 44730 // match: (Zero [1] destptr mem) 44731 // cond: 44732 // result: (MOVBstoreconst [0] destptr mem) 44733 for { 44734 if v.AuxInt != 1 { 44735 break 44736 } 44737 _ = v.Args[1] 44738 destptr := v.Args[0] 44739 mem := v.Args[1] 44740 v.reset(OpAMD64MOVBstoreconst) 44741 v.AuxInt = 0 44742 v.AddArg(destptr) 44743 v.AddArg(mem) 44744 return true 44745 } 44746 // match: (Zero [2] destptr mem) 44747 // cond: 44748 // result: (MOVWstoreconst [0] destptr mem) 44749 for { 44750 if v.AuxInt != 2 { 44751 break 44752 } 44753 _ = v.Args[1] 44754 destptr := v.Args[0] 44755 mem := v.Args[1] 44756 v.reset(OpAMD64MOVWstoreconst) 44757 v.AuxInt = 0 44758 v.AddArg(destptr) 44759 v.AddArg(mem) 44760 return true 44761 } 44762 // match: (Zero [4] destptr mem) 44763 // cond: 44764 // result: (MOVLstoreconst [0] destptr mem) 44765 for { 44766 if v.AuxInt != 4 { 44767 break 44768 } 44769 _ = v.Args[1] 44770 destptr := v.Args[0] 44771 mem := v.Args[1] 44772 v.reset(OpAMD64MOVLstoreconst) 44773 v.AuxInt = 0 44774 v.AddArg(destptr) 44775 v.AddArg(mem) 44776 return true 44777 } 44778 // match: (Zero [8] destptr mem) 44779 // cond: 44780 // result: (MOVQstoreconst [0] destptr mem) 44781 for { 44782 if v.AuxInt != 8 { 44783 break 44784 } 44785 _ = v.Args[1] 44786 destptr := v.Args[0] 44787 mem := v.Args[1] 44788 v.reset(OpAMD64MOVQstoreconst) 44789 v.AuxInt = 0 44790 v.AddArg(destptr) 44791 v.AddArg(mem) 44792 return true 44793 } 44794 // match: (Zero [3] destptr mem) 44795 // cond: 44796 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 44797 for { 44798 if v.AuxInt != 3 { 44799 break 44800 } 44801 _ = v.Args[1] 44802 destptr := v.Args[0] 44803 mem := v.Args[1] 44804 v.reset(OpAMD64MOVBstoreconst) 44805 v.AuxInt = makeValAndOff(0, 2) 44806 v.AddArg(destptr) 44807 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 44808 v0.AuxInt = 0 44809 v0.AddArg(destptr) 44810 v0.AddArg(mem) 44811 v.AddArg(v0) 44812 return true 44813 } 44814 // match: (Zero [5] destptr mem) 44815 // cond: 44816 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 44817 for { 44818 if v.AuxInt != 5 { 44819 break 44820 } 44821 _ = v.Args[1] 44822 destptr := v.Args[0] 44823 mem := v.Args[1] 44824 v.reset(OpAMD64MOVBstoreconst) 44825 v.AuxInt = makeValAndOff(0, 4) 44826 v.AddArg(destptr) 44827 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 44828 v0.AuxInt = 0 44829 v0.AddArg(destptr) 44830 v0.AddArg(mem) 44831 v.AddArg(v0) 44832 return true 44833 } 44834 // match: (Zero [6] destptr mem) 44835 // cond: 44836 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 44837 for { 44838 if v.AuxInt != 6 { 44839 break 44840 } 44841 _ = v.Args[1] 44842 destptr := v.Args[0] 44843 mem := v.Args[1] 44844 v.reset(OpAMD64MOVWstoreconst) 44845 v.AuxInt = makeValAndOff(0, 4) 44846 v.AddArg(destptr) 44847 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 44848 v0.AuxInt = 0 44849 v0.AddArg(destptr) 44850 v0.AddArg(mem) 44851 v.AddArg(v0) 44852 return true 44853 } 44854 // match: (Zero [7] destptr mem) 44855 // cond: 44856 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 44857 for { 44858 if v.AuxInt != 7 { 44859 break 44860 } 44861 _ = v.Args[1] 44862 destptr := v.Args[0] 44863 mem := v.Args[1] 44864 v.reset(OpAMD64MOVLstoreconst) 44865 v.AuxInt = makeValAndOff(0, 3) 44866 v.AddArg(destptr) 44867 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 44868 v0.AuxInt = 0 44869 v0.AddArg(destptr) 44870 v0.AddArg(mem) 44871 v.AddArg(v0) 44872 return true 44873 } 44874 // match: (Zero [s] destptr mem) 44875 // cond: s%8 != 0 && s > 8 && !config.useSSE 44876 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 44877 for { 44878 s := v.AuxInt 44879 _ = v.Args[1] 44880 destptr := v.Args[0] 44881 mem := v.Args[1] 44882 if !(s%8 != 0 && s > 8 && !config.useSSE) { 44883 break 44884 } 44885 v.reset(OpZero) 44886 v.AuxInt = s - s%8 44887 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 44888 v0.AuxInt = s % 8 44889 v0.AddArg(destptr) 44890 v.AddArg(v0) 44891 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 44892 v1.AuxInt = 0 44893 v1.AddArg(destptr) 44894 v1.AddArg(mem) 44895 v.AddArg(v1) 44896 return true 44897 } 44898 return false 44899 } 44900 func rewriteValueAMD64_OpZero_10(v *Value) bool { 44901 b := v.Block 44902 _ = b 44903 config := b.Func.Config 44904 _ = config 44905 // match: (Zero [16] destptr mem) 44906 // cond: !config.useSSE 44907 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 44908 for { 44909 if v.AuxInt != 16 { 44910 break 44911 } 44912 _ = v.Args[1] 44913 destptr := v.Args[0] 44914 mem := v.Args[1] 44915 if !(!config.useSSE) { 44916 break 44917 } 44918 v.reset(OpAMD64MOVQstoreconst) 44919 v.AuxInt = makeValAndOff(0, 8) 44920 v.AddArg(destptr) 44921 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 44922 v0.AuxInt = 0 44923 v0.AddArg(destptr) 44924 v0.AddArg(mem) 44925 v.AddArg(v0) 44926 return true 44927 } 44928 // match: (Zero [24] destptr mem) 44929 // cond: !config.useSSE 44930 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 44931 for { 44932 if v.AuxInt != 24 { 44933 break 44934 } 44935 _ = v.Args[1] 44936 destptr := v.Args[0] 44937 mem := v.Args[1] 44938 if !(!config.useSSE) { 44939 break 44940 } 44941 v.reset(OpAMD64MOVQstoreconst) 44942 v.AuxInt = makeValAndOff(0, 16) 44943 v.AddArg(destptr) 44944 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 44945 v0.AuxInt = makeValAndOff(0, 8) 44946 v0.AddArg(destptr) 44947 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 44948 v1.AuxInt = 0 44949 v1.AddArg(destptr) 44950 v1.AddArg(mem) 44951 v0.AddArg(v1) 44952 v.AddArg(v0) 44953 return true 44954 } 44955 // match: (Zero [32] destptr mem) 44956 // cond: !config.useSSE 44957 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 44958 for { 44959 if v.AuxInt != 32 { 44960 break 44961 } 44962 _ = v.Args[1] 44963 destptr := v.Args[0] 44964 mem := v.Args[1] 44965 if !(!config.useSSE) { 44966 break 44967 } 44968 v.reset(OpAMD64MOVQstoreconst) 44969 v.AuxInt = makeValAndOff(0, 24) 44970 v.AddArg(destptr) 44971 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 44972 v0.AuxInt = makeValAndOff(0, 16) 44973 v0.AddArg(destptr) 44974 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 44975 v1.AuxInt = makeValAndOff(0, 8) 44976 v1.AddArg(destptr) 44977 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 44978 v2.AuxInt = 0 44979 v2.AddArg(destptr) 44980 v2.AddArg(mem) 44981 v1.AddArg(v2) 44982 v0.AddArg(v1) 44983 v.AddArg(v0) 44984 return true 44985 } 44986 // match: (Zero [s] destptr mem) 44987 // cond: s > 8 && s < 16 && config.useSSE 44988 // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem)) 44989 for { 44990 s := v.AuxInt 44991 _ = v.Args[1] 44992 destptr := v.Args[0] 44993 mem := v.Args[1] 44994 if !(s > 8 && s < 16 && config.useSSE) { 44995 break 44996 } 44997 v.reset(OpAMD64MOVQstoreconst) 44998 v.AuxInt = makeValAndOff(0, s-8) 44999 v.AddArg(destptr) 45000 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 45001 v0.AuxInt = 0 45002 v0.AddArg(destptr) 45003 v0.AddArg(mem) 45004 v.AddArg(v0) 45005 return true 45006 } 45007 // match: (Zero [s] destptr mem) 45008 // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE 45009 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) 45010 for { 45011 s := v.AuxInt 45012 _ = v.Args[1] 45013 destptr := v.Args[0] 45014 mem := v.Args[1] 45015 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { 45016 break 45017 } 45018 v.reset(OpZero) 45019 v.AuxInt = s - s%16 45020 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 45021 v0.AuxInt = s % 16 45022 v0.AddArg(destptr) 45023 v.AddArg(v0) 45024 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 45025 v1.AddArg(destptr) 45026 v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45027 v2.AuxInt = 0 45028 v1.AddArg(v2) 45029 v1.AddArg(mem) 45030 v.AddArg(v1) 45031 return true 45032 } 45033 // match: (Zero [s] destptr mem) 45034 // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE 45035 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [0] destptr mem)) 45036 for { 45037 s := v.AuxInt 45038 _ = v.Args[1] 45039 destptr := v.Args[0] 45040 mem := v.Args[1] 45041 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { 45042 break 45043 } 45044 v.reset(OpZero) 45045 v.AuxInt = s - s%16 45046 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 45047 v0.AuxInt = s % 16 45048 v0.AddArg(destptr) 45049 v.AddArg(v0) 45050 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 45051 v1.AuxInt = 0 45052 v1.AddArg(destptr) 45053 v1.AddArg(mem) 45054 v.AddArg(v1) 45055 return true 45056 } 45057 // match: (Zero [16] destptr mem) 45058 // cond: config.useSSE 45059 // result: (MOVOstore destptr (MOVOconst [0]) mem) 45060 for { 45061 if v.AuxInt != 16 { 45062 break 45063 } 45064 _ = v.Args[1] 45065 destptr := v.Args[0] 45066 mem := v.Args[1] 45067 if !(config.useSSE) { 45068 break 45069 } 45070 v.reset(OpAMD64MOVOstore) 45071 v.AddArg(destptr) 45072 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45073 v0.AuxInt = 0 45074 v.AddArg(v0) 45075 v.AddArg(mem) 45076 return true 45077 } 45078 // match: (Zero [32] destptr mem) 45079 // cond: config.useSSE 45080 // result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)) 45081 for { 45082 if v.AuxInt != 32 { 45083 break 45084 } 45085 _ = v.Args[1] 45086 destptr := v.Args[0] 45087 mem := v.Args[1] 45088 if !(config.useSSE) { 45089 break 45090 } 45091 v.reset(OpAMD64MOVOstore) 45092 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 45093 v0.AuxInt = 16 45094 v0.AddArg(destptr) 45095 v.AddArg(v0) 45096 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45097 v1.AuxInt = 0 45098 v.AddArg(v1) 45099 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 45100 v2.AddArg(destptr) 45101 v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45102 v3.AuxInt = 0 45103 v2.AddArg(v3) 45104 v2.AddArg(mem) 45105 v.AddArg(v2) 45106 return true 45107 } 45108 // match: (Zero [48] destptr mem) 45109 // cond: config.useSSE 45110 // result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))) 45111 for { 45112 if v.AuxInt != 48 { 45113 break 45114 } 45115 _ = v.Args[1] 45116 destptr := v.Args[0] 45117 mem := v.Args[1] 45118 if !(config.useSSE) { 45119 break 45120 } 45121 v.reset(OpAMD64MOVOstore) 45122 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 45123 v0.AuxInt = 32 45124 v0.AddArg(destptr) 45125 v.AddArg(v0) 45126 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45127 v1.AuxInt = 0 45128 v.AddArg(v1) 45129 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 45130 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 45131 v3.AuxInt = 16 45132 v3.AddArg(destptr) 45133 v2.AddArg(v3) 45134 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45135 v4.AuxInt = 0 45136 v2.AddArg(v4) 45137 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 45138 v5.AddArg(destptr) 45139 v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45140 v6.AuxInt = 0 45141 v5.AddArg(v6) 45142 v5.AddArg(mem) 45143 v2.AddArg(v5) 45144 v.AddArg(v2) 45145 return true 45146 } 45147 // match: (Zero [64] destptr mem) 45148 // cond: config.useSSE 45149 // result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))) 45150 for { 45151 if v.AuxInt != 64 { 45152 break 45153 } 45154 _ = v.Args[1] 45155 destptr := v.Args[0] 45156 mem := v.Args[1] 45157 if !(config.useSSE) { 45158 break 45159 } 45160 v.reset(OpAMD64MOVOstore) 45161 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 45162 v0.AuxInt = 48 45163 v0.AddArg(destptr) 45164 v.AddArg(v0) 45165 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45166 v1.AuxInt = 0 45167 v.AddArg(v1) 45168 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 45169 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 45170 v3.AuxInt = 32 45171 v3.AddArg(destptr) 45172 v2.AddArg(v3) 45173 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45174 v4.AuxInt = 0 45175 v2.AddArg(v4) 45176 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 45177 v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 45178 v6.AuxInt = 16 45179 v6.AddArg(destptr) 45180 v5.AddArg(v6) 45181 v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45182 v7.AuxInt = 0 45183 v5.AddArg(v7) 45184 v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 45185 v8.AddArg(destptr) 45186 v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45187 v9.AuxInt = 0 45188 v8.AddArg(v9) 45189 v8.AddArg(mem) 45190 v5.AddArg(v8) 45191 v2.AddArg(v5) 45192 v.AddArg(v2) 45193 return true 45194 } 45195 return false 45196 } 45197 func rewriteValueAMD64_OpZero_20(v *Value) bool { 45198 b := v.Block 45199 _ = b 45200 config := b.Func.Config 45201 _ = config 45202 typ := &b.Func.Config.Types 45203 _ = typ 45204 // match: (Zero [s] destptr mem) 45205 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice 45206 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 45207 for { 45208 s := v.AuxInt 45209 _ = v.Args[1] 45210 destptr := v.Args[0] 45211 mem := v.Args[1] 45212 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 45213 break 45214 } 45215 v.reset(OpAMD64DUFFZERO) 45216 v.AuxInt = s 45217 v.AddArg(destptr) 45218 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 45219 v0.AuxInt = 0 45220 v.AddArg(v0) 45221 v.AddArg(mem) 45222 return true 45223 } 45224 // match: (Zero [s] destptr mem) 45225 // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 45226 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 45227 for { 45228 s := v.AuxInt 45229 _ = v.Args[1] 45230 destptr := v.Args[0] 45231 mem := v.Args[1] 45232 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { 45233 break 45234 } 45235 v.reset(OpAMD64REPSTOSQ) 45236 v.AddArg(destptr) 45237 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 45238 v0.AuxInt = s / 8 45239 v.AddArg(v0) 45240 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 45241 v1.AuxInt = 0 45242 v.AddArg(v1) 45243 v.AddArg(mem) 45244 return true 45245 } 45246 return false 45247 } 45248 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 45249 // match: (ZeroExt16to32 x) 45250 // cond: 45251 // result: (MOVWQZX x) 45252 for { 45253 x := v.Args[0] 45254 v.reset(OpAMD64MOVWQZX) 45255 v.AddArg(x) 45256 return true 45257 } 45258 } 45259 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 45260 // match: (ZeroExt16to64 x) 45261 // cond: 45262 // result: (MOVWQZX x) 45263 for { 45264 x := v.Args[0] 45265 v.reset(OpAMD64MOVWQZX) 45266 v.AddArg(x) 45267 return true 45268 } 45269 } 45270 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 45271 // match: (ZeroExt32to64 x) 45272 // cond: 45273 // result: (MOVLQZX x) 45274 for { 45275 x := v.Args[0] 45276 v.reset(OpAMD64MOVLQZX) 45277 v.AddArg(x) 45278 return true 45279 } 45280 } 45281 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 45282 // match: (ZeroExt8to16 x) 45283 // cond: 45284 // result: (MOVBQZX x) 45285 for { 45286 x := v.Args[0] 45287 v.reset(OpAMD64MOVBQZX) 45288 v.AddArg(x) 45289 return true 45290 } 45291 } 45292 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 45293 // match: (ZeroExt8to32 x) 45294 // cond: 45295 // result: (MOVBQZX x) 45296 for { 45297 x := v.Args[0] 45298 v.reset(OpAMD64MOVBQZX) 45299 v.AddArg(x) 45300 return true 45301 } 45302 } 45303 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 45304 // match: (ZeroExt8to64 x) 45305 // cond: 45306 // result: (MOVBQZX x) 45307 for { 45308 x := v.Args[0] 45309 v.reset(OpAMD64MOVBQZX) 45310 v.AddArg(x) 45311 return true 45312 } 45313 } 45314 func rewriteBlockAMD64(b *Block) bool { 45315 config := b.Func.Config 45316 _ = config 45317 fe := b.Func.fe 45318 _ = fe 45319 typ := &config.Types 45320 _ = typ 45321 switch b.Kind { 45322 case BlockAMD64EQ: 45323 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 45324 // cond: !config.nacl 45325 // result: (UGE (BTL x y)) 45326 for { 45327 v := b.Control 45328 if v.Op != OpAMD64TESTL { 45329 break 45330 } 45331 _ = v.Args[1] 45332 v_0 := v.Args[0] 45333 if v_0.Op != OpAMD64SHLL { 45334 break 45335 } 45336 _ = v_0.Args[1] 45337 v_0_0 := v_0.Args[0] 45338 if v_0_0.Op != OpAMD64MOVLconst { 45339 break 45340 } 45341 if v_0_0.AuxInt != 1 { 45342 break 45343 } 45344 x := v_0.Args[1] 45345 y := v.Args[1] 45346 if !(!config.nacl) { 45347 break 45348 } 45349 b.Kind = BlockAMD64UGE 45350 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 45351 v0.AddArg(x) 45352 v0.AddArg(y) 45353 b.SetControl(v0) 45354 b.Aux = nil 45355 return true 45356 } 45357 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 45358 // cond: !config.nacl 45359 // result: (UGE (BTL x y)) 45360 for { 45361 v := b.Control 45362 if v.Op != OpAMD64TESTL { 45363 break 45364 } 45365 _ = v.Args[1] 45366 y := v.Args[0] 45367 v_1 := v.Args[1] 45368 if v_1.Op != OpAMD64SHLL { 45369 break 45370 } 45371 _ = v_1.Args[1] 45372 v_1_0 := v_1.Args[0] 45373 if v_1_0.Op != OpAMD64MOVLconst { 45374 break 45375 } 45376 if v_1_0.AuxInt != 1 { 45377 break 45378 } 45379 x := v_1.Args[1] 45380 if !(!config.nacl) { 45381 break 45382 } 45383 b.Kind = BlockAMD64UGE 45384 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 45385 v0.AddArg(x) 45386 v0.AddArg(y) 45387 b.SetControl(v0) 45388 b.Aux = nil 45389 return true 45390 } 45391 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 45392 // cond: !config.nacl 45393 // result: (UGE (BTQ x y)) 45394 for { 45395 v := b.Control 45396 if v.Op != OpAMD64TESTQ { 45397 break 45398 } 45399 _ = v.Args[1] 45400 v_0 := v.Args[0] 45401 if v_0.Op != OpAMD64SHLQ { 45402 break 45403 } 45404 _ = v_0.Args[1] 45405 v_0_0 := v_0.Args[0] 45406 if v_0_0.Op != OpAMD64MOVQconst { 45407 break 45408 } 45409 if v_0_0.AuxInt != 1 { 45410 break 45411 } 45412 x := v_0.Args[1] 45413 y := v.Args[1] 45414 if !(!config.nacl) { 45415 break 45416 } 45417 b.Kind = BlockAMD64UGE 45418 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 45419 v0.AddArg(x) 45420 v0.AddArg(y) 45421 b.SetControl(v0) 45422 b.Aux = nil 45423 return true 45424 } 45425 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 45426 // cond: !config.nacl 45427 // result: (UGE (BTQ x y)) 45428 for { 45429 v := b.Control 45430 if v.Op != OpAMD64TESTQ { 45431 break 45432 } 45433 _ = v.Args[1] 45434 y := v.Args[0] 45435 v_1 := v.Args[1] 45436 if v_1.Op != OpAMD64SHLQ { 45437 break 45438 } 45439 _ = v_1.Args[1] 45440 v_1_0 := v_1.Args[0] 45441 if v_1_0.Op != OpAMD64MOVQconst { 45442 break 45443 } 45444 if v_1_0.AuxInt != 1 { 45445 break 45446 } 45447 x := v_1.Args[1] 45448 if !(!config.nacl) { 45449 break 45450 } 45451 b.Kind = BlockAMD64UGE 45452 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 45453 v0.AddArg(x) 45454 v0.AddArg(y) 45455 b.SetControl(v0) 45456 b.Aux = nil 45457 return true 45458 } 45459 // match: (EQ (TESTLconst [c] x)) 45460 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 45461 // result: (UGE (BTLconst [log2(c)] x)) 45462 for { 45463 v := b.Control 45464 if v.Op != OpAMD64TESTLconst { 45465 break 45466 } 45467 c := v.AuxInt 45468 x := v.Args[0] 45469 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 45470 break 45471 } 45472 b.Kind = BlockAMD64UGE 45473 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 45474 v0.AuxInt = log2(c) 45475 v0.AddArg(x) 45476 b.SetControl(v0) 45477 b.Aux = nil 45478 return true 45479 } 45480 // match: (EQ (TESTQconst [c] x)) 45481 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 45482 // result: (UGE (BTQconst [log2(c)] x)) 45483 for { 45484 v := b.Control 45485 if v.Op != OpAMD64TESTQconst { 45486 break 45487 } 45488 c := v.AuxInt 45489 x := v.Args[0] 45490 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 45491 break 45492 } 45493 b.Kind = BlockAMD64UGE 45494 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45495 v0.AuxInt = log2(c) 45496 v0.AddArg(x) 45497 b.SetControl(v0) 45498 b.Aux = nil 45499 return true 45500 } 45501 // match: (EQ (TESTQ (MOVQconst [c]) x)) 45502 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 45503 // result: (UGE (BTQconst [log2(c)] x)) 45504 for { 45505 v := b.Control 45506 if v.Op != OpAMD64TESTQ { 45507 break 45508 } 45509 _ = v.Args[1] 45510 v_0 := v.Args[0] 45511 if v_0.Op != OpAMD64MOVQconst { 45512 break 45513 } 45514 c := v_0.AuxInt 45515 x := v.Args[1] 45516 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 45517 break 45518 } 45519 b.Kind = BlockAMD64UGE 45520 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45521 v0.AuxInt = log2(c) 45522 v0.AddArg(x) 45523 b.SetControl(v0) 45524 b.Aux = nil 45525 return true 45526 } 45527 // match: (EQ (TESTQ x (MOVQconst [c]))) 45528 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 45529 // result: (UGE (BTQconst [log2(c)] x)) 45530 for { 45531 v := b.Control 45532 if v.Op != OpAMD64TESTQ { 45533 break 45534 } 45535 _ = v.Args[1] 45536 x := v.Args[0] 45537 v_1 := v.Args[1] 45538 if v_1.Op != OpAMD64MOVQconst { 45539 break 45540 } 45541 c := v_1.AuxInt 45542 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 45543 break 45544 } 45545 b.Kind = BlockAMD64UGE 45546 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45547 v0.AuxInt = log2(c) 45548 v0.AddArg(x) 45549 b.SetControl(v0) 45550 b.Aux = nil 45551 return true 45552 } 45553 // match: (EQ (InvertFlags cmp) yes no) 45554 // cond: 45555 // result: (EQ cmp yes no) 45556 for { 45557 v := b.Control 45558 if v.Op != OpAMD64InvertFlags { 45559 break 45560 } 45561 cmp := v.Args[0] 45562 b.Kind = BlockAMD64EQ 45563 b.SetControl(cmp) 45564 b.Aux = nil 45565 return true 45566 } 45567 // match: (EQ (FlagEQ) yes no) 45568 // cond: 45569 // result: (First nil yes no) 45570 for { 45571 v := b.Control 45572 if v.Op != OpAMD64FlagEQ { 45573 break 45574 } 45575 b.Kind = BlockFirst 45576 b.SetControl(nil) 45577 b.Aux = nil 45578 return true 45579 } 45580 // match: (EQ (FlagLT_ULT) yes no) 45581 // cond: 45582 // result: (First nil no yes) 45583 for { 45584 v := b.Control 45585 if v.Op != OpAMD64FlagLT_ULT { 45586 break 45587 } 45588 b.Kind = BlockFirst 45589 b.SetControl(nil) 45590 b.Aux = nil 45591 b.swapSuccessors() 45592 return true 45593 } 45594 // match: (EQ (FlagLT_UGT) yes no) 45595 // cond: 45596 // result: (First nil no yes) 45597 for { 45598 v := b.Control 45599 if v.Op != OpAMD64FlagLT_UGT { 45600 break 45601 } 45602 b.Kind = BlockFirst 45603 b.SetControl(nil) 45604 b.Aux = nil 45605 b.swapSuccessors() 45606 return true 45607 } 45608 // match: (EQ (FlagGT_ULT) yes no) 45609 // cond: 45610 // result: (First nil no yes) 45611 for { 45612 v := b.Control 45613 if v.Op != OpAMD64FlagGT_ULT { 45614 break 45615 } 45616 b.Kind = BlockFirst 45617 b.SetControl(nil) 45618 b.Aux = nil 45619 b.swapSuccessors() 45620 return true 45621 } 45622 // match: (EQ (FlagGT_UGT) yes no) 45623 // cond: 45624 // result: (First nil no yes) 45625 for { 45626 v := b.Control 45627 if v.Op != OpAMD64FlagGT_UGT { 45628 break 45629 } 45630 b.Kind = BlockFirst 45631 b.SetControl(nil) 45632 b.Aux = nil 45633 b.swapSuccessors() 45634 return true 45635 } 45636 case BlockAMD64GE: 45637 // match: (GE (InvertFlags cmp) yes no) 45638 // cond: 45639 // result: (LE cmp yes no) 45640 for { 45641 v := b.Control 45642 if v.Op != OpAMD64InvertFlags { 45643 break 45644 } 45645 cmp := v.Args[0] 45646 b.Kind = BlockAMD64LE 45647 b.SetControl(cmp) 45648 b.Aux = nil 45649 return true 45650 } 45651 // match: (GE (FlagEQ) yes no) 45652 // cond: 45653 // result: (First nil yes no) 45654 for { 45655 v := b.Control 45656 if v.Op != OpAMD64FlagEQ { 45657 break 45658 } 45659 b.Kind = BlockFirst 45660 b.SetControl(nil) 45661 b.Aux = nil 45662 return true 45663 } 45664 // match: (GE (FlagLT_ULT) yes no) 45665 // cond: 45666 // result: (First nil no yes) 45667 for { 45668 v := b.Control 45669 if v.Op != OpAMD64FlagLT_ULT { 45670 break 45671 } 45672 b.Kind = BlockFirst 45673 b.SetControl(nil) 45674 b.Aux = nil 45675 b.swapSuccessors() 45676 return true 45677 } 45678 // match: (GE (FlagLT_UGT) yes no) 45679 // cond: 45680 // result: (First nil no yes) 45681 for { 45682 v := b.Control 45683 if v.Op != OpAMD64FlagLT_UGT { 45684 break 45685 } 45686 b.Kind = BlockFirst 45687 b.SetControl(nil) 45688 b.Aux = nil 45689 b.swapSuccessors() 45690 return true 45691 } 45692 // match: (GE (FlagGT_ULT) yes no) 45693 // cond: 45694 // result: (First nil yes no) 45695 for { 45696 v := b.Control 45697 if v.Op != OpAMD64FlagGT_ULT { 45698 break 45699 } 45700 b.Kind = BlockFirst 45701 b.SetControl(nil) 45702 b.Aux = nil 45703 return true 45704 } 45705 // match: (GE (FlagGT_UGT) yes no) 45706 // cond: 45707 // result: (First nil yes no) 45708 for { 45709 v := b.Control 45710 if v.Op != OpAMD64FlagGT_UGT { 45711 break 45712 } 45713 b.Kind = BlockFirst 45714 b.SetControl(nil) 45715 b.Aux = nil 45716 return true 45717 } 45718 case BlockAMD64GT: 45719 // match: (GT (InvertFlags cmp) yes no) 45720 // cond: 45721 // result: (LT cmp yes no) 45722 for { 45723 v := b.Control 45724 if v.Op != OpAMD64InvertFlags { 45725 break 45726 } 45727 cmp := v.Args[0] 45728 b.Kind = BlockAMD64LT 45729 b.SetControl(cmp) 45730 b.Aux = nil 45731 return true 45732 } 45733 // match: (GT (FlagEQ) yes no) 45734 // cond: 45735 // result: (First nil no yes) 45736 for { 45737 v := b.Control 45738 if v.Op != OpAMD64FlagEQ { 45739 break 45740 } 45741 b.Kind = BlockFirst 45742 b.SetControl(nil) 45743 b.Aux = nil 45744 b.swapSuccessors() 45745 return true 45746 } 45747 // match: (GT (FlagLT_ULT) yes no) 45748 // cond: 45749 // result: (First nil no yes) 45750 for { 45751 v := b.Control 45752 if v.Op != OpAMD64FlagLT_ULT { 45753 break 45754 } 45755 b.Kind = BlockFirst 45756 b.SetControl(nil) 45757 b.Aux = nil 45758 b.swapSuccessors() 45759 return true 45760 } 45761 // match: (GT (FlagLT_UGT) yes no) 45762 // cond: 45763 // result: (First nil no yes) 45764 for { 45765 v := b.Control 45766 if v.Op != OpAMD64FlagLT_UGT { 45767 break 45768 } 45769 b.Kind = BlockFirst 45770 b.SetControl(nil) 45771 b.Aux = nil 45772 b.swapSuccessors() 45773 return true 45774 } 45775 // match: (GT (FlagGT_ULT) yes no) 45776 // cond: 45777 // result: (First nil yes no) 45778 for { 45779 v := b.Control 45780 if v.Op != OpAMD64FlagGT_ULT { 45781 break 45782 } 45783 b.Kind = BlockFirst 45784 b.SetControl(nil) 45785 b.Aux = nil 45786 return true 45787 } 45788 // match: (GT (FlagGT_UGT) yes no) 45789 // cond: 45790 // result: (First nil yes no) 45791 for { 45792 v := b.Control 45793 if v.Op != OpAMD64FlagGT_UGT { 45794 break 45795 } 45796 b.Kind = BlockFirst 45797 b.SetControl(nil) 45798 b.Aux = nil 45799 return true 45800 } 45801 case BlockIf: 45802 // match: (If (SETL cmp) yes no) 45803 // cond: 45804 // result: (LT cmp yes no) 45805 for { 45806 v := b.Control 45807 if v.Op != OpAMD64SETL { 45808 break 45809 } 45810 cmp := v.Args[0] 45811 b.Kind = BlockAMD64LT 45812 b.SetControl(cmp) 45813 b.Aux = nil 45814 return true 45815 } 45816 // match: (If (SETLE cmp) yes no) 45817 // cond: 45818 // result: (LE cmp yes no) 45819 for { 45820 v := b.Control 45821 if v.Op != OpAMD64SETLE { 45822 break 45823 } 45824 cmp := v.Args[0] 45825 b.Kind = BlockAMD64LE 45826 b.SetControl(cmp) 45827 b.Aux = nil 45828 return true 45829 } 45830 // match: (If (SETG cmp) yes no) 45831 // cond: 45832 // result: (GT cmp yes no) 45833 for { 45834 v := b.Control 45835 if v.Op != OpAMD64SETG { 45836 break 45837 } 45838 cmp := v.Args[0] 45839 b.Kind = BlockAMD64GT 45840 b.SetControl(cmp) 45841 b.Aux = nil 45842 return true 45843 } 45844 // match: (If (SETGE cmp) yes no) 45845 // cond: 45846 // result: (GE cmp yes no) 45847 for { 45848 v := b.Control 45849 if v.Op != OpAMD64SETGE { 45850 break 45851 } 45852 cmp := v.Args[0] 45853 b.Kind = BlockAMD64GE 45854 b.SetControl(cmp) 45855 b.Aux = nil 45856 return true 45857 } 45858 // match: (If (SETEQ cmp) yes no) 45859 // cond: 45860 // result: (EQ cmp yes no) 45861 for { 45862 v := b.Control 45863 if v.Op != OpAMD64SETEQ { 45864 break 45865 } 45866 cmp := v.Args[0] 45867 b.Kind = BlockAMD64EQ 45868 b.SetControl(cmp) 45869 b.Aux = nil 45870 return true 45871 } 45872 // match: (If (SETNE cmp) yes no) 45873 // cond: 45874 // result: (NE cmp yes no) 45875 for { 45876 v := b.Control 45877 if v.Op != OpAMD64SETNE { 45878 break 45879 } 45880 cmp := v.Args[0] 45881 b.Kind = BlockAMD64NE 45882 b.SetControl(cmp) 45883 b.Aux = nil 45884 return true 45885 } 45886 // match: (If (SETB cmp) yes no) 45887 // cond: 45888 // result: (ULT cmp yes no) 45889 for { 45890 v := b.Control 45891 if v.Op != OpAMD64SETB { 45892 break 45893 } 45894 cmp := v.Args[0] 45895 b.Kind = BlockAMD64ULT 45896 b.SetControl(cmp) 45897 b.Aux = nil 45898 return true 45899 } 45900 // match: (If (SETBE cmp) yes no) 45901 // cond: 45902 // result: (ULE cmp yes no) 45903 for { 45904 v := b.Control 45905 if v.Op != OpAMD64SETBE { 45906 break 45907 } 45908 cmp := v.Args[0] 45909 b.Kind = BlockAMD64ULE 45910 b.SetControl(cmp) 45911 b.Aux = nil 45912 return true 45913 } 45914 // match: (If (SETA cmp) yes no) 45915 // cond: 45916 // result: (UGT cmp yes no) 45917 for { 45918 v := b.Control 45919 if v.Op != OpAMD64SETA { 45920 break 45921 } 45922 cmp := v.Args[0] 45923 b.Kind = BlockAMD64UGT 45924 b.SetControl(cmp) 45925 b.Aux = nil 45926 return true 45927 } 45928 // match: (If (SETAE cmp) yes no) 45929 // cond: 45930 // result: (UGE cmp yes no) 45931 for { 45932 v := b.Control 45933 if v.Op != OpAMD64SETAE { 45934 break 45935 } 45936 cmp := v.Args[0] 45937 b.Kind = BlockAMD64UGE 45938 b.SetControl(cmp) 45939 b.Aux = nil 45940 return true 45941 } 45942 // match: (If (SETGF cmp) yes no) 45943 // cond: 45944 // result: (UGT cmp yes no) 45945 for { 45946 v := b.Control 45947 if v.Op != OpAMD64SETGF { 45948 break 45949 } 45950 cmp := v.Args[0] 45951 b.Kind = BlockAMD64UGT 45952 b.SetControl(cmp) 45953 b.Aux = nil 45954 return true 45955 } 45956 // match: (If (SETGEF cmp) yes no) 45957 // cond: 45958 // result: (UGE cmp yes no) 45959 for { 45960 v := b.Control 45961 if v.Op != OpAMD64SETGEF { 45962 break 45963 } 45964 cmp := v.Args[0] 45965 b.Kind = BlockAMD64UGE 45966 b.SetControl(cmp) 45967 b.Aux = nil 45968 return true 45969 } 45970 // match: (If (SETEQF cmp) yes no) 45971 // cond: 45972 // result: (EQF cmp yes no) 45973 for { 45974 v := b.Control 45975 if v.Op != OpAMD64SETEQF { 45976 break 45977 } 45978 cmp := v.Args[0] 45979 b.Kind = BlockAMD64EQF 45980 b.SetControl(cmp) 45981 b.Aux = nil 45982 return true 45983 } 45984 // match: (If (SETNEF cmp) yes no) 45985 // cond: 45986 // result: (NEF cmp yes no) 45987 for { 45988 v := b.Control 45989 if v.Op != OpAMD64SETNEF { 45990 break 45991 } 45992 cmp := v.Args[0] 45993 b.Kind = BlockAMD64NEF 45994 b.SetControl(cmp) 45995 b.Aux = nil 45996 return true 45997 } 45998 // match: (If cond yes no) 45999 // cond: 46000 // result: (NE (TESTB cond cond) yes no) 46001 for { 46002 v := b.Control 46003 _ = v 46004 cond := b.Control 46005 b.Kind = BlockAMD64NE 46006 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags) 46007 v0.AddArg(cond) 46008 v0.AddArg(cond) 46009 b.SetControl(v0) 46010 b.Aux = nil 46011 return true 46012 } 46013 case BlockAMD64LE: 46014 // match: (LE (InvertFlags cmp) yes no) 46015 // cond: 46016 // result: (GE cmp yes no) 46017 for { 46018 v := b.Control 46019 if v.Op != OpAMD64InvertFlags { 46020 break 46021 } 46022 cmp := v.Args[0] 46023 b.Kind = BlockAMD64GE 46024 b.SetControl(cmp) 46025 b.Aux = nil 46026 return true 46027 } 46028 // match: (LE (FlagEQ) yes no) 46029 // cond: 46030 // result: (First nil yes no) 46031 for { 46032 v := b.Control 46033 if v.Op != OpAMD64FlagEQ { 46034 break 46035 } 46036 b.Kind = BlockFirst 46037 b.SetControl(nil) 46038 b.Aux = nil 46039 return true 46040 } 46041 // match: (LE (FlagLT_ULT) yes no) 46042 // cond: 46043 // result: (First nil yes no) 46044 for { 46045 v := b.Control 46046 if v.Op != OpAMD64FlagLT_ULT { 46047 break 46048 } 46049 b.Kind = BlockFirst 46050 b.SetControl(nil) 46051 b.Aux = nil 46052 return true 46053 } 46054 // match: (LE (FlagLT_UGT) yes no) 46055 // cond: 46056 // result: (First nil yes no) 46057 for { 46058 v := b.Control 46059 if v.Op != OpAMD64FlagLT_UGT { 46060 break 46061 } 46062 b.Kind = BlockFirst 46063 b.SetControl(nil) 46064 b.Aux = nil 46065 return true 46066 } 46067 // match: (LE (FlagGT_ULT) yes no) 46068 // cond: 46069 // result: (First nil no yes) 46070 for { 46071 v := b.Control 46072 if v.Op != OpAMD64FlagGT_ULT { 46073 break 46074 } 46075 b.Kind = BlockFirst 46076 b.SetControl(nil) 46077 b.Aux = nil 46078 b.swapSuccessors() 46079 return true 46080 } 46081 // match: (LE (FlagGT_UGT) yes no) 46082 // cond: 46083 // result: (First nil no yes) 46084 for { 46085 v := b.Control 46086 if v.Op != OpAMD64FlagGT_UGT { 46087 break 46088 } 46089 b.Kind = BlockFirst 46090 b.SetControl(nil) 46091 b.Aux = nil 46092 b.swapSuccessors() 46093 return true 46094 } 46095 case BlockAMD64LT: 46096 // match: (LT (InvertFlags cmp) yes no) 46097 // cond: 46098 // result: (GT cmp yes no) 46099 for { 46100 v := b.Control 46101 if v.Op != OpAMD64InvertFlags { 46102 break 46103 } 46104 cmp := v.Args[0] 46105 b.Kind = BlockAMD64GT 46106 b.SetControl(cmp) 46107 b.Aux = nil 46108 return true 46109 } 46110 // match: (LT (FlagEQ) yes no) 46111 // cond: 46112 // result: (First nil no yes) 46113 for { 46114 v := b.Control 46115 if v.Op != OpAMD64FlagEQ { 46116 break 46117 } 46118 b.Kind = BlockFirst 46119 b.SetControl(nil) 46120 b.Aux = nil 46121 b.swapSuccessors() 46122 return true 46123 } 46124 // match: (LT (FlagLT_ULT) yes no) 46125 // cond: 46126 // result: (First nil yes no) 46127 for { 46128 v := b.Control 46129 if v.Op != OpAMD64FlagLT_ULT { 46130 break 46131 } 46132 b.Kind = BlockFirst 46133 b.SetControl(nil) 46134 b.Aux = nil 46135 return true 46136 } 46137 // match: (LT (FlagLT_UGT) yes no) 46138 // cond: 46139 // result: (First nil yes no) 46140 for { 46141 v := b.Control 46142 if v.Op != OpAMD64FlagLT_UGT { 46143 break 46144 } 46145 b.Kind = BlockFirst 46146 b.SetControl(nil) 46147 b.Aux = nil 46148 return true 46149 } 46150 // match: (LT (FlagGT_ULT) yes no) 46151 // cond: 46152 // result: (First nil no yes) 46153 for { 46154 v := b.Control 46155 if v.Op != OpAMD64FlagGT_ULT { 46156 break 46157 } 46158 b.Kind = BlockFirst 46159 b.SetControl(nil) 46160 b.Aux = nil 46161 b.swapSuccessors() 46162 return true 46163 } 46164 // match: (LT (FlagGT_UGT) yes no) 46165 // cond: 46166 // result: (First nil no yes) 46167 for { 46168 v := b.Control 46169 if v.Op != OpAMD64FlagGT_UGT { 46170 break 46171 } 46172 b.Kind = BlockFirst 46173 b.SetControl(nil) 46174 b.Aux = nil 46175 b.swapSuccessors() 46176 return true 46177 } 46178 case BlockAMD64NE: 46179 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 46180 // cond: 46181 // result: (LT cmp yes no) 46182 for { 46183 v := b.Control 46184 if v.Op != OpAMD64TESTB { 46185 break 46186 } 46187 _ = v.Args[1] 46188 v_0 := v.Args[0] 46189 if v_0.Op != OpAMD64SETL { 46190 break 46191 } 46192 cmp := v_0.Args[0] 46193 v_1 := v.Args[1] 46194 if v_1.Op != OpAMD64SETL { 46195 break 46196 } 46197 if cmp != v_1.Args[0] { 46198 break 46199 } 46200 b.Kind = BlockAMD64LT 46201 b.SetControl(cmp) 46202 b.Aux = nil 46203 return true 46204 } 46205 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 46206 // cond: 46207 // result: (LT cmp yes no) 46208 for { 46209 v := b.Control 46210 if v.Op != OpAMD64TESTB { 46211 break 46212 } 46213 _ = v.Args[1] 46214 v_0 := v.Args[0] 46215 if v_0.Op != OpAMD64SETL { 46216 break 46217 } 46218 cmp := v_0.Args[0] 46219 v_1 := v.Args[1] 46220 if v_1.Op != OpAMD64SETL { 46221 break 46222 } 46223 if cmp != v_1.Args[0] { 46224 break 46225 } 46226 b.Kind = BlockAMD64LT 46227 b.SetControl(cmp) 46228 b.Aux = nil 46229 return true 46230 } 46231 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 46232 // cond: 46233 // result: (LE cmp yes no) 46234 for { 46235 v := b.Control 46236 if v.Op != OpAMD64TESTB { 46237 break 46238 } 46239 _ = v.Args[1] 46240 v_0 := v.Args[0] 46241 if v_0.Op != OpAMD64SETLE { 46242 break 46243 } 46244 cmp := v_0.Args[0] 46245 v_1 := v.Args[1] 46246 if v_1.Op != OpAMD64SETLE { 46247 break 46248 } 46249 if cmp != v_1.Args[0] { 46250 break 46251 } 46252 b.Kind = BlockAMD64LE 46253 b.SetControl(cmp) 46254 b.Aux = nil 46255 return true 46256 } 46257 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 46258 // cond: 46259 // result: (LE cmp yes no) 46260 for { 46261 v := b.Control 46262 if v.Op != OpAMD64TESTB { 46263 break 46264 } 46265 _ = v.Args[1] 46266 v_0 := v.Args[0] 46267 if v_0.Op != OpAMD64SETLE { 46268 break 46269 } 46270 cmp := v_0.Args[0] 46271 v_1 := v.Args[1] 46272 if v_1.Op != OpAMD64SETLE { 46273 break 46274 } 46275 if cmp != v_1.Args[0] { 46276 break 46277 } 46278 b.Kind = BlockAMD64LE 46279 b.SetControl(cmp) 46280 b.Aux = nil 46281 return true 46282 } 46283 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 46284 // cond: 46285 // result: (GT cmp yes no) 46286 for { 46287 v := b.Control 46288 if v.Op != OpAMD64TESTB { 46289 break 46290 } 46291 _ = v.Args[1] 46292 v_0 := v.Args[0] 46293 if v_0.Op != OpAMD64SETG { 46294 break 46295 } 46296 cmp := v_0.Args[0] 46297 v_1 := v.Args[1] 46298 if v_1.Op != OpAMD64SETG { 46299 break 46300 } 46301 if cmp != v_1.Args[0] { 46302 break 46303 } 46304 b.Kind = BlockAMD64GT 46305 b.SetControl(cmp) 46306 b.Aux = nil 46307 return true 46308 } 46309 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 46310 // cond: 46311 // result: (GT cmp yes no) 46312 for { 46313 v := b.Control 46314 if v.Op != OpAMD64TESTB { 46315 break 46316 } 46317 _ = v.Args[1] 46318 v_0 := v.Args[0] 46319 if v_0.Op != OpAMD64SETG { 46320 break 46321 } 46322 cmp := v_0.Args[0] 46323 v_1 := v.Args[1] 46324 if v_1.Op != OpAMD64SETG { 46325 break 46326 } 46327 if cmp != v_1.Args[0] { 46328 break 46329 } 46330 b.Kind = BlockAMD64GT 46331 b.SetControl(cmp) 46332 b.Aux = nil 46333 return true 46334 } 46335 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 46336 // cond: 46337 // result: (GE cmp yes no) 46338 for { 46339 v := b.Control 46340 if v.Op != OpAMD64TESTB { 46341 break 46342 } 46343 _ = v.Args[1] 46344 v_0 := v.Args[0] 46345 if v_0.Op != OpAMD64SETGE { 46346 break 46347 } 46348 cmp := v_0.Args[0] 46349 v_1 := v.Args[1] 46350 if v_1.Op != OpAMD64SETGE { 46351 break 46352 } 46353 if cmp != v_1.Args[0] { 46354 break 46355 } 46356 b.Kind = BlockAMD64GE 46357 b.SetControl(cmp) 46358 b.Aux = nil 46359 return true 46360 } 46361 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 46362 // cond: 46363 // result: (GE cmp yes no) 46364 for { 46365 v := b.Control 46366 if v.Op != OpAMD64TESTB { 46367 break 46368 } 46369 _ = v.Args[1] 46370 v_0 := v.Args[0] 46371 if v_0.Op != OpAMD64SETGE { 46372 break 46373 } 46374 cmp := v_0.Args[0] 46375 v_1 := v.Args[1] 46376 if v_1.Op != OpAMD64SETGE { 46377 break 46378 } 46379 if cmp != v_1.Args[0] { 46380 break 46381 } 46382 b.Kind = BlockAMD64GE 46383 b.SetControl(cmp) 46384 b.Aux = nil 46385 return true 46386 } 46387 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 46388 // cond: 46389 // result: (EQ cmp yes no) 46390 for { 46391 v := b.Control 46392 if v.Op != OpAMD64TESTB { 46393 break 46394 } 46395 _ = v.Args[1] 46396 v_0 := v.Args[0] 46397 if v_0.Op != OpAMD64SETEQ { 46398 break 46399 } 46400 cmp := v_0.Args[0] 46401 v_1 := v.Args[1] 46402 if v_1.Op != OpAMD64SETEQ { 46403 break 46404 } 46405 if cmp != v_1.Args[0] { 46406 break 46407 } 46408 b.Kind = BlockAMD64EQ 46409 b.SetControl(cmp) 46410 b.Aux = nil 46411 return true 46412 } 46413 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 46414 // cond: 46415 // result: (EQ cmp yes no) 46416 for { 46417 v := b.Control 46418 if v.Op != OpAMD64TESTB { 46419 break 46420 } 46421 _ = v.Args[1] 46422 v_0 := v.Args[0] 46423 if v_0.Op != OpAMD64SETEQ { 46424 break 46425 } 46426 cmp := v_0.Args[0] 46427 v_1 := v.Args[1] 46428 if v_1.Op != OpAMD64SETEQ { 46429 break 46430 } 46431 if cmp != v_1.Args[0] { 46432 break 46433 } 46434 b.Kind = BlockAMD64EQ 46435 b.SetControl(cmp) 46436 b.Aux = nil 46437 return true 46438 } 46439 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 46440 // cond: 46441 // result: (NE cmp yes no) 46442 for { 46443 v := b.Control 46444 if v.Op != OpAMD64TESTB { 46445 break 46446 } 46447 _ = v.Args[1] 46448 v_0 := v.Args[0] 46449 if v_0.Op != OpAMD64SETNE { 46450 break 46451 } 46452 cmp := v_0.Args[0] 46453 v_1 := v.Args[1] 46454 if v_1.Op != OpAMD64SETNE { 46455 break 46456 } 46457 if cmp != v_1.Args[0] { 46458 break 46459 } 46460 b.Kind = BlockAMD64NE 46461 b.SetControl(cmp) 46462 b.Aux = nil 46463 return true 46464 } 46465 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 46466 // cond: 46467 // result: (NE cmp yes no) 46468 for { 46469 v := b.Control 46470 if v.Op != OpAMD64TESTB { 46471 break 46472 } 46473 _ = v.Args[1] 46474 v_0 := v.Args[0] 46475 if v_0.Op != OpAMD64SETNE { 46476 break 46477 } 46478 cmp := v_0.Args[0] 46479 v_1 := v.Args[1] 46480 if v_1.Op != OpAMD64SETNE { 46481 break 46482 } 46483 if cmp != v_1.Args[0] { 46484 break 46485 } 46486 b.Kind = BlockAMD64NE 46487 b.SetControl(cmp) 46488 b.Aux = nil 46489 return true 46490 } 46491 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 46492 // cond: 46493 // result: (ULT cmp yes no) 46494 for { 46495 v := b.Control 46496 if v.Op != OpAMD64TESTB { 46497 break 46498 } 46499 _ = v.Args[1] 46500 v_0 := v.Args[0] 46501 if v_0.Op != OpAMD64SETB { 46502 break 46503 } 46504 cmp := v_0.Args[0] 46505 v_1 := v.Args[1] 46506 if v_1.Op != OpAMD64SETB { 46507 break 46508 } 46509 if cmp != v_1.Args[0] { 46510 break 46511 } 46512 b.Kind = BlockAMD64ULT 46513 b.SetControl(cmp) 46514 b.Aux = nil 46515 return true 46516 } 46517 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 46518 // cond: 46519 // result: (ULT cmp yes no) 46520 for { 46521 v := b.Control 46522 if v.Op != OpAMD64TESTB { 46523 break 46524 } 46525 _ = v.Args[1] 46526 v_0 := v.Args[0] 46527 if v_0.Op != OpAMD64SETB { 46528 break 46529 } 46530 cmp := v_0.Args[0] 46531 v_1 := v.Args[1] 46532 if v_1.Op != OpAMD64SETB { 46533 break 46534 } 46535 if cmp != v_1.Args[0] { 46536 break 46537 } 46538 b.Kind = BlockAMD64ULT 46539 b.SetControl(cmp) 46540 b.Aux = nil 46541 return true 46542 } 46543 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 46544 // cond: 46545 // result: (ULE cmp yes no) 46546 for { 46547 v := b.Control 46548 if v.Op != OpAMD64TESTB { 46549 break 46550 } 46551 _ = v.Args[1] 46552 v_0 := v.Args[0] 46553 if v_0.Op != OpAMD64SETBE { 46554 break 46555 } 46556 cmp := v_0.Args[0] 46557 v_1 := v.Args[1] 46558 if v_1.Op != OpAMD64SETBE { 46559 break 46560 } 46561 if cmp != v_1.Args[0] { 46562 break 46563 } 46564 b.Kind = BlockAMD64ULE 46565 b.SetControl(cmp) 46566 b.Aux = nil 46567 return true 46568 } 46569 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 46570 // cond: 46571 // result: (ULE cmp yes no) 46572 for { 46573 v := b.Control 46574 if v.Op != OpAMD64TESTB { 46575 break 46576 } 46577 _ = v.Args[1] 46578 v_0 := v.Args[0] 46579 if v_0.Op != OpAMD64SETBE { 46580 break 46581 } 46582 cmp := v_0.Args[0] 46583 v_1 := v.Args[1] 46584 if v_1.Op != OpAMD64SETBE { 46585 break 46586 } 46587 if cmp != v_1.Args[0] { 46588 break 46589 } 46590 b.Kind = BlockAMD64ULE 46591 b.SetControl(cmp) 46592 b.Aux = nil 46593 return true 46594 } 46595 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 46596 // cond: 46597 // result: (UGT cmp yes no) 46598 for { 46599 v := b.Control 46600 if v.Op != OpAMD64TESTB { 46601 break 46602 } 46603 _ = v.Args[1] 46604 v_0 := v.Args[0] 46605 if v_0.Op != OpAMD64SETA { 46606 break 46607 } 46608 cmp := v_0.Args[0] 46609 v_1 := v.Args[1] 46610 if v_1.Op != OpAMD64SETA { 46611 break 46612 } 46613 if cmp != v_1.Args[0] { 46614 break 46615 } 46616 b.Kind = BlockAMD64UGT 46617 b.SetControl(cmp) 46618 b.Aux = nil 46619 return true 46620 } 46621 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 46622 // cond: 46623 // result: (UGT cmp yes no) 46624 for { 46625 v := b.Control 46626 if v.Op != OpAMD64TESTB { 46627 break 46628 } 46629 _ = v.Args[1] 46630 v_0 := v.Args[0] 46631 if v_0.Op != OpAMD64SETA { 46632 break 46633 } 46634 cmp := v_0.Args[0] 46635 v_1 := v.Args[1] 46636 if v_1.Op != OpAMD64SETA { 46637 break 46638 } 46639 if cmp != v_1.Args[0] { 46640 break 46641 } 46642 b.Kind = BlockAMD64UGT 46643 b.SetControl(cmp) 46644 b.Aux = nil 46645 return true 46646 } 46647 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 46648 // cond: 46649 // result: (UGE cmp yes no) 46650 for { 46651 v := b.Control 46652 if v.Op != OpAMD64TESTB { 46653 break 46654 } 46655 _ = v.Args[1] 46656 v_0 := v.Args[0] 46657 if v_0.Op != OpAMD64SETAE { 46658 break 46659 } 46660 cmp := v_0.Args[0] 46661 v_1 := v.Args[1] 46662 if v_1.Op != OpAMD64SETAE { 46663 break 46664 } 46665 if cmp != v_1.Args[0] { 46666 break 46667 } 46668 b.Kind = BlockAMD64UGE 46669 b.SetControl(cmp) 46670 b.Aux = nil 46671 return true 46672 } 46673 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 46674 // cond: 46675 // result: (UGE cmp yes no) 46676 for { 46677 v := b.Control 46678 if v.Op != OpAMD64TESTB { 46679 break 46680 } 46681 _ = v.Args[1] 46682 v_0 := v.Args[0] 46683 if v_0.Op != OpAMD64SETAE { 46684 break 46685 } 46686 cmp := v_0.Args[0] 46687 v_1 := v.Args[1] 46688 if v_1.Op != OpAMD64SETAE { 46689 break 46690 } 46691 if cmp != v_1.Args[0] { 46692 break 46693 } 46694 b.Kind = BlockAMD64UGE 46695 b.SetControl(cmp) 46696 b.Aux = nil 46697 return true 46698 } 46699 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 46700 // cond: !config.nacl 46701 // result: (ULT (BTL x y)) 46702 for { 46703 v := b.Control 46704 if v.Op != OpAMD64TESTL { 46705 break 46706 } 46707 _ = v.Args[1] 46708 v_0 := v.Args[0] 46709 if v_0.Op != OpAMD64SHLL { 46710 break 46711 } 46712 _ = v_0.Args[1] 46713 v_0_0 := v_0.Args[0] 46714 if v_0_0.Op != OpAMD64MOVLconst { 46715 break 46716 } 46717 if v_0_0.AuxInt != 1 { 46718 break 46719 } 46720 x := v_0.Args[1] 46721 y := v.Args[1] 46722 if !(!config.nacl) { 46723 break 46724 } 46725 b.Kind = BlockAMD64ULT 46726 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 46727 v0.AddArg(x) 46728 v0.AddArg(y) 46729 b.SetControl(v0) 46730 b.Aux = nil 46731 return true 46732 } 46733 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 46734 // cond: !config.nacl 46735 // result: (ULT (BTL x y)) 46736 for { 46737 v := b.Control 46738 if v.Op != OpAMD64TESTL { 46739 break 46740 } 46741 _ = v.Args[1] 46742 y := v.Args[0] 46743 v_1 := v.Args[1] 46744 if v_1.Op != OpAMD64SHLL { 46745 break 46746 } 46747 _ = v_1.Args[1] 46748 v_1_0 := v_1.Args[0] 46749 if v_1_0.Op != OpAMD64MOVLconst { 46750 break 46751 } 46752 if v_1_0.AuxInt != 1 { 46753 break 46754 } 46755 x := v_1.Args[1] 46756 if !(!config.nacl) { 46757 break 46758 } 46759 b.Kind = BlockAMD64ULT 46760 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 46761 v0.AddArg(x) 46762 v0.AddArg(y) 46763 b.SetControl(v0) 46764 b.Aux = nil 46765 return true 46766 } 46767 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 46768 // cond: !config.nacl 46769 // result: (ULT (BTQ x y)) 46770 for { 46771 v := b.Control 46772 if v.Op != OpAMD64TESTQ { 46773 break 46774 } 46775 _ = v.Args[1] 46776 v_0 := v.Args[0] 46777 if v_0.Op != OpAMD64SHLQ { 46778 break 46779 } 46780 _ = v_0.Args[1] 46781 v_0_0 := v_0.Args[0] 46782 if v_0_0.Op != OpAMD64MOVQconst { 46783 break 46784 } 46785 if v_0_0.AuxInt != 1 { 46786 break 46787 } 46788 x := v_0.Args[1] 46789 y := v.Args[1] 46790 if !(!config.nacl) { 46791 break 46792 } 46793 b.Kind = BlockAMD64ULT 46794 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 46795 v0.AddArg(x) 46796 v0.AddArg(y) 46797 b.SetControl(v0) 46798 b.Aux = nil 46799 return true 46800 } 46801 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 46802 // cond: !config.nacl 46803 // result: (ULT (BTQ x y)) 46804 for { 46805 v := b.Control 46806 if v.Op != OpAMD64TESTQ { 46807 break 46808 } 46809 _ = v.Args[1] 46810 y := v.Args[0] 46811 v_1 := v.Args[1] 46812 if v_1.Op != OpAMD64SHLQ { 46813 break 46814 } 46815 _ = v_1.Args[1] 46816 v_1_0 := v_1.Args[0] 46817 if v_1_0.Op != OpAMD64MOVQconst { 46818 break 46819 } 46820 if v_1_0.AuxInt != 1 { 46821 break 46822 } 46823 x := v_1.Args[1] 46824 if !(!config.nacl) { 46825 break 46826 } 46827 b.Kind = BlockAMD64ULT 46828 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 46829 v0.AddArg(x) 46830 v0.AddArg(y) 46831 b.SetControl(v0) 46832 b.Aux = nil 46833 return true 46834 } 46835 // match: (NE (TESTLconst [c] x)) 46836 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 46837 // result: (ULT (BTLconst [log2(c)] x)) 46838 for { 46839 v := b.Control 46840 if v.Op != OpAMD64TESTLconst { 46841 break 46842 } 46843 c := v.AuxInt 46844 x := v.Args[0] 46845 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 46846 break 46847 } 46848 b.Kind = BlockAMD64ULT 46849 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 46850 v0.AuxInt = log2(c) 46851 v0.AddArg(x) 46852 b.SetControl(v0) 46853 b.Aux = nil 46854 return true 46855 } 46856 // match: (NE (TESTQconst [c] x)) 46857 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 46858 // result: (ULT (BTQconst [log2(c)] x)) 46859 for { 46860 v := b.Control 46861 if v.Op != OpAMD64TESTQconst { 46862 break 46863 } 46864 c := v.AuxInt 46865 x := v.Args[0] 46866 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 46867 break 46868 } 46869 b.Kind = BlockAMD64ULT 46870 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 46871 v0.AuxInt = log2(c) 46872 v0.AddArg(x) 46873 b.SetControl(v0) 46874 b.Aux = nil 46875 return true 46876 } 46877 // match: (NE (TESTQ (MOVQconst [c]) x)) 46878 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 46879 // result: (ULT (BTQconst [log2(c)] x)) 46880 for { 46881 v := b.Control 46882 if v.Op != OpAMD64TESTQ { 46883 break 46884 } 46885 _ = v.Args[1] 46886 v_0 := v.Args[0] 46887 if v_0.Op != OpAMD64MOVQconst { 46888 break 46889 } 46890 c := v_0.AuxInt 46891 x := v.Args[1] 46892 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 46893 break 46894 } 46895 b.Kind = BlockAMD64ULT 46896 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 46897 v0.AuxInt = log2(c) 46898 v0.AddArg(x) 46899 b.SetControl(v0) 46900 b.Aux = nil 46901 return true 46902 } 46903 // match: (NE (TESTQ x (MOVQconst [c]))) 46904 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 46905 // result: (ULT (BTQconst [log2(c)] x)) 46906 for { 46907 v := b.Control 46908 if v.Op != OpAMD64TESTQ { 46909 break 46910 } 46911 _ = v.Args[1] 46912 x := v.Args[0] 46913 v_1 := v.Args[1] 46914 if v_1.Op != OpAMD64MOVQconst { 46915 break 46916 } 46917 c := v_1.AuxInt 46918 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 46919 break 46920 } 46921 b.Kind = BlockAMD64ULT 46922 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 46923 v0.AuxInt = log2(c) 46924 v0.AddArg(x) 46925 b.SetControl(v0) 46926 b.Aux = nil 46927 return true 46928 } 46929 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 46930 // cond: 46931 // result: (UGT cmp yes no) 46932 for { 46933 v := b.Control 46934 if v.Op != OpAMD64TESTB { 46935 break 46936 } 46937 _ = v.Args[1] 46938 v_0 := v.Args[0] 46939 if v_0.Op != OpAMD64SETGF { 46940 break 46941 } 46942 cmp := v_0.Args[0] 46943 v_1 := v.Args[1] 46944 if v_1.Op != OpAMD64SETGF { 46945 break 46946 } 46947 if cmp != v_1.Args[0] { 46948 break 46949 } 46950 b.Kind = BlockAMD64UGT 46951 b.SetControl(cmp) 46952 b.Aux = nil 46953 return true 46954 } 46955 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 46956 // cond: 46957 // result: (UGT cmp yes no) 46958 for { 46959 v := b.Control 46960 if v.Op != OpAMD64TESTB { 46961 break 46962 } 46963 _ = v.Args[1] 46964 v_0 := v.Args[0] 46965 if v_0.Op != OpAMD64SETGF { 46966 break 46967 } 46968 cmp := v_0.Args[0] 46969 v_1 := v.Args[1] 46970 if v_1.Op != OpAMD64SETGF { 46971 break 46972 } 46973 if cmp != v_1.Args[0] { 46974 break 46975 } 46976 b.Kind = BlockAMD64UGT 46977 b.SetControl(cmp) 46978 b.Aux = nil 46979 return true 46980 } 46981 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 46982 // cond: 46983 // result: (UGE cmp yes no) 46984 for { 46985 v := b.Control 46986 if v.Op != OpAMD64TESTB { 46987 break 46988 } 46989 _ = v.Args[1] 46990 v_0 := v.Args[0] 46991 if v_0.Op != OpAMD64SETGEF { 46992 break 46993 } 46994 cmp := v_0.Args[0] 46995 v_1 := v.Args[1] 46996 if v_1.Op != OpAMD64SETGEF { 46997 break 46998 } 46999 if cmp != v_1.Args[0] { 47000 break 47001 } 47002 b.Kind = BlockAMD64UGE 47003 b.SetControl(cmp) 47004 b.Aux = nil 47005 return true 47006 } 47007 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 47008 // cond: 47009 // result: (UGE cmp yes no) 47010 for { 47011 v := b.Control 47012 if v.Op != OpAMD64TESTB { 47013 break 47014 } 47015 _ = v.Args[1] 47016 v_0 := v.Args[0] 47017 if v_0.Op != OpAMD64SETGEF { 47018 break 47019 } 47020 cmp := v_0.Args[0] 47021 v_1 := v.Args[1] 47022 if v_1.Op != OpAMD64SETGEF { 47023 break 47024 } 47025 if cmp != v_1.Args[0] { 47026 break 47027 } 47028 b.Kind = BlockAMD64UGE 47029 b.SetControl(cmp) 47030 b.Aux = nil 47031 return true 47032 } 47033 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 47034 // cond: 47035 // result: (EQF cmp yes no) 47036 for { 47037 v := b.Control 47038 if v.Op != OpAMD64TESTB { 47039 break 47040 } 47041 _ = v.Args[1] 47042 v_0 := v.Args[0] 47043 if v_0.Op != OpAMD64SETEQF { 47044 break 47045 } 47046 cmp := v_0.Args[0] 47047 v_1 := v.Args[1] 47048 if v_1.Op != OpAMD64SETEQF { 47049 break 47050 } 47051 if cmp != v_1.Args[0] { 47052 break 47053 } 47054 b.Kind = BlockAMD64EQF 47055 b.SetControl(cmp) 47056 b.Aux = nil 47057 return true 47058 } 47059 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 47060 // cond: 47061 // result: (EQF cmp yes no) 47062 for { 47063 v := b.Control 47064 if v.Op != OpAMD64TESTB { 47065 break 47066 } 47067 _ = v.Args[1] 47068 v_0 := v.Args[0] 47069 if v_0.Op != OpAMD64SETEQF { 47070 break 47071 } 47072 cmp := v_0.Args[0] 47073 v_1 := v.Args[1] 47074 if v_1.Op != OpAMD64SETEQF { 47075 break 47076 } 47077 if cmp != v_1.Args[0] { 47078 break 47079 } 47080 b.Kind = BlockAMD64EQF 47081 b.SetControl(cmp) 47082 b.Aux = nil 47083 return true 47084 } 47085 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 47086 // cond: 47087 // result: (NEF cmp yes no) 47088 for { 47089 v := b.Control 47090 if v.Op != OpAMD64TESTB { 47091 break 47092 } 47093 _ = v.Args[1] 47094 v_0 := v.Args[0] 47095 if v_0.Op != OpAMD64SETNEF { 47096 break 47097 } 47098 cmp := v_0.Args[0] 47099 v_1 := v.Args[1] 47100 if v_1.Op != OpAMD64SETNEF { 47101 break 47102 } 47103 if cmp != v_1.Args[0] { 47104 break 47105 } 47106 b.Kind = BlockAMD64NEF 47107 b.SetControl(cmp) 47108 b.Aux = nil 47109 return true 47110 } 47111 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 47112 // cond: 47113 // result: (NEF cmp yes no) 47114 for { 47115 v := b.Control 47116 if v.Op != OpAMD64TESTB { 47117 break 47118 } 47119 _ = v.Args[1] 47120 v_0 := v.Args[0] 47121 if v_0.Op != OpAMD64SETNEF { 47122 break 47123 } 47124 cmp := v_0.Args[0] 47125 v_1 := v.Args[1] 47126 if v_1.Op != OpAMD64SETNEF { 47127 break 47128 } 47129 if cmp != v_1.Args[0] { 47130 break 47131 } 47132 b.Kind = BlockAMD64NEF 47133 b.SetControl(cmp) 47134 b.Aux = nil 47135 return true 47136 } 47137 // match: (NE (InvertFlags cmp) yes no) 47138 // cond: 47139 // result: (NE cmp yes no) 47140 for { 47141 v := b.Control 47142 if v.Op != OpAMD64InvertFlags { 47143 break 47144 } 47145 cmp := v.Args[0] 47146 b.Kind = BlockAMD64NE 47147 b.SetControl(cmp) 47148 b.Aux = nil 47149 return true 47150 } 47151 // match: (NE (FlagEQ) yes no) 47152 // cond: 47153 // result: (First nil no yes) 47154 for { 47155 v := b.Control 47156 if v.Op != OpAMD64FlagEQ { 47157 break 47158 } 47159 b.Kind = BlockFirst 47160 b.SetControl(nil) 47161 b.Aux = nil 47162 b.swapSuccessors() 47163 return true 47164 } 47165 // match: (NE (FlagLT_ULT) yes no) 47166 // cond: 47167 // result: (First nil yes no) 47168 for { 47169 v := b.Control 47170 if v.Op != OpAMD64FlagLT_ULT { 47171 break 47172 } 47173 b.Kind = BlockFirst 47174 b.SetControl(nil) 47175 b.Aux = nil 47176 return true 47177 } 47178 // match: (NE (FlagLT_UGT) yes no) 47179 // cond: 47180 // result: (First nil yes no) 47181 for { 47182 v := b.Control 47183 if v.Op != OpAMD64FlagLT_UGT { 47184 break 47185 } 47186 b.Kind = BlockFirst 47187 b.SetControl(nil) 47188 b.Aux = nil 47189 return true 47190 } 47191 // match: (NE (FlagGT_ULT) yes no) 47192 // cond: 47193 // result: (First nil yes no) 47194 for { 47195 v := b.Control 47196 if v.Op != OpAMD64FlagGT_ULT { 47197 break 47198 } 47199 b.Kind = BlockFirst 47200 b.SetControl(nil) 47201 b.Aux = nil 47202 return true 47203 } 47204 // match: (NE (FlagGT_UGT) yes no) 47205 // cond: 47206 // result: (First nil yes no) 47207 for { 47208 v := b.Control 47209 if v.Op != OpAMD64FlagGT_UGT { 47210 break 47211 } 47212 b.Kind = BlockFirst 47213 b.SetControl(nil) 47214 b.Aux = nil 47215 return true 47216 } 47217 case BlockAMD64UGE: 47218 // match: (UGE (InvertFlags cmp) yes no) 47219 // cond: 47220 // result: (ULE cmp yes no) 47221 for { 47222 v := b.Control 47223 if v.Op != OpAMD64InvertFlags { 47224 break 47225 } 47226 cmp := v.Args[0] 47227 b.Kind = BlockAMD64ULE 47228 b.SetControl(cmp) 47229 b.Aux = nil 47230 return true 47231 } 47232 // match: (UGE (FlagEQ) yes no) 47233 // cond: 47234 // result: (First nil yes no) 47235 for { 47236 v := b.Control 47237 if v.Op != OpAMD64FlagEQ { 47238 break 47239 } 47240 b.Kind = BlockFirst 47241 b.SetControl(nil) 47242 b.Aux = nil 47243 return true 47244 } 47245 // match: (UGE (FlagLT_ULT) yes no) 47246 // cond: 47247 // result: (First nil no yes) 47248 for { 47249 v := b.Control 47250 if v.Op != OpAMD64FlagLT_ULT { 47251 break 47252 } 47253 b.Kind = BlockFirst 47254 b.SetControl(nil) 47255 b.Aux = nil 47256 b.swapSuccessors() 47257 return true 47258 } 47259 // match: (UGE (FlagLT_UGT) yes no) 47260 // cond: 47261 // result: (First nil yes no) 47262 for { 47263 v := b.Control 47264 if v.Op != OpAMD64FlagLT_UGT { 47265 break 47266 } 47267 b.Kind = BlockFirst 47268 b.SetControl(nil) 47269 b.Aux = nil 47270 return true 47271 } 47272 // match: (UGE (FlagGT_ULT) yes no) 47273 // cond: 47274 // result: (First nil no yes) 47275 for { 47276 v := b.Control 47277 if v.Op != OpAMD64FlagGT_ULT { 47278 break 47279 } 47280 b.Kind = BlockFirst 47281 b.SetControl(nil) 47282 b.Aux = nil 47283 b.swapSuccessors() 47284 return true 47285 } 47286 // match: (UGE (FlagGT_UGT) yes no) 47287 // cond: 47288 // result: (First nil yes no) 47289 for { 47290 v := b.Control 47291 if v.Op != OpAMD64FlagGT_UGT { 47292 break 47293 } 47294 b.Kind = BlockFirst 47295 b.SetControl(nil) 47296 b.Aux = nil 47297 return true 47298 } 47299 case BlockAMD64UGT: 47300 // match: (UGT (InvertFlags cmp) yes no) 47301 // cond: 47302 // result: (ULT cmp yes no) 47303 for { 47304 v := b.Control 47305 if v.Op != OpAMD64InvertFlags { 47306 break 47307 } 47308 cmp := v.Args[0] 47309 b.Kind = BlockAMD64ULT 47310 b.SetControl(cmp) 47311 b.Aux = nil 47312 return true 47313 } 47314 // match: (UGT (FlagEQ) yes no) 47315 // cond: 47316 // result: (First nil no yes) 47317 for { 47318 v := b.Control 47319 if v.Op != OpAMD64FlagEQ { 47320 break 47321 } 47322 b.Kind = BlockFirst 47323 b.SetControl(nil) 47324 b.Aux = nil 47325 b.swapSuccessors() 47326 return true 47327 } 47328 // match: (UGT (FlagLT_ULT) yes no) 47329 // cond: 47330 // result: (First nil no yes) 47331 for { 47332 v := b.Control 47333 if v.Op != OpAMD64FlagLT_ULT { 47334 break 47335 } 47336 b.Kind = BlockFirst 47337 b.SetControl(nil) 47338 b.Aux = nil 47339 b.swapSuccessors() 47340 return true 47341 } 47342 // match: (UGT (FlagLT_UGT) yes no) 47343 // cond: 47344 // result: (First nil yes no) 47345 for { 47346 v := b.Control 47347 if v.Op != OpAMD64FlagLT_UGT { 47348 break 47349 } 47350 b.Kind = BlockFirst 47351 b.SetControl(nil) 47352 b.Aux = nil 47353 return true 47354 } 47355 // match: (UGT (FlagGT_ULT) yes no) 47356 // cond: 47357 // result: (First nil no yes) 47358 for { 47359 v := b.Control 47360 if v.Op != OpAMD64FlagGT_ULT { 47361 break 47362 } 47363 b.Kind = BlockFirst 47364 b.SetControl(nil) 47365 b.Aux = nil 47366 b.swapSuccessors() 47367 return true 47368 } 47369 // match: (UGT (FlagGT_UGT) yes no) 47370 // cond: 47371 // result: (First nil yes no) 47372 for { 47373 v := b.Control 47374 if v.Op != OpAMD64FlagGT_UGT { 47375 break 47376 } 47377 b.Kind = BlockFirst 47378 b.SetControl(nil) 47379 b.Aux = nil 47380 return true 47381 } 47382 case BlockAMD64ULE: 47383 // match: (ULE (InvertFlags cmp) yes no) 47384 // cond: 47385 // result: (UGE cmp yes no) 47386 for { 47387 v := b.Control 47388 if v.Op != OpAMD64InvertFlags { 47389 break 47390 } 47391 cmp := v.Args[0] 47392 b.Kind = BlockAMD64UGE 47393 b.SetControl(cmp) 47394 b.Aux = nil 47395 return true 47396 } 47397 // match: (ULE (FlagEQ) yes no) 47398 // cond: 47399 // result: (First nil yes no) 47400 for { 47401 v := b.Control 47402 if v.Op != OpAMD64FlagEQ { 47403 break 47404 } 47405 b.Kind = BlockFirst 47406 b.SetControl(nil) 47407 b.Aux = nil 47408 return true 47409 } 47410 // match: (ULE (FlagLT_ULT) yes no) 47411 // cond: 47412 // result: (First nil yes no) 47413 for { 47414 v := b.Control 47415 if v.Op != OpAMD64FlagLT_ULT { 47416 break 47417 } 47418 b.Kind = BlockFirst 47419 b.SetControl(nil) 47420 b.Aux = nil 47421 return true 47422 } 47423 // match: (ULE (FlagLT_UGT) yes no) 47424 // cond: 47425 // result: (First nil no yes) 47426 for { 47427 v := b.Control 47428 if v.Op != OpAMD64FlagLT_UGT { 47429 break 47430 } 47431 b.Kind = BlockFirst 47432 b.SetControl(nil) 47433 b.Aux = nil 47434 b.swapSuccessors() 47435 return true 47436 } 47437 // match: (ULE (FlagGT_ULT) yes no) 47438 // cond: 47439 // result: (First nil yes no) 47440 for { 47441 v := b.Control 47442 if v.Op != OpAMD64FlagGT_ULT { 47443 break 47444 } 47445 b.Kind = BlockFirst 47446 b.SetControl(nil) 47447 b.Aux = nil 47448 return true 47449 } 47450 // match: (ULE (FlagGT_UGT) yes no) 47451 // cond: 47452 // result: (First nil no yes) 47453 for { 47454 v := b.Control 47455 if v.Op != OpAMD64FlagGT_UGT { 47456 break 47457 } 47458 b.Kind = BlockFirst 47459 b.SetControl(nil) 47460 b.Aux = nil 47461 b.swapSuccessors() 47462 return true 47463 } 47464 case BlockAMD64ULT: 47465 // match: (ULT (InvertFlags cmp) yes no) 47466 // cond: 47467 // result: (UGT cmp yes no) 47468 for { 47469 v := b.Control 47470 if v.Op != OpAMD64InvertFlags { 47471 break 47472 } 47473 cmp := v.Args[0] 47474 b.Kind = BlockAMD64UGT 47475 b.SetControl(cmp) 47476 b.Aux = nil 47477 return true 47478 } 47479 // match: (ULT (FlagEQ) yes no) 47480 // cond: 47481 // result: (First nil no yes) 47482 for { 47483 v := b.Control 47484 if v.Op != OpAMD64FlagEQ { 47485 break 47486 } 47487 b.Kind = BlockFirst 47488 b.SetControl(nil) 47489 b.Aux = nil 47490 b.swapSuccessors() 47491 return true 47492 } 47493 // match: (ULT (FlagLT_ULT) yes no) 47494 // cond: 47495 // result: (First nil yes no) 47496 for { 47497 v := b.Control 47498 if v.Op != OpAMD64FlagLT_ULT { 47499 break 47500 } 47501 b.Kind = BlockFirst 47502 b.SetControl(nil) 47503 b.Aux = nil 47504 return true 47505 } 47506 // match: (ULT (FlagLT_UGT) yes no) 47507 // cond: 47508 // result: (First nil no yes) 47509 for { 47510 v := b.Control 47511 if v.Op != OpAMD64FlagLT_UGT { 47512 break 47513 } 47514 b.Kind = BlockFirst 47515 b.SetControl(nil) 47516 b.Aux = nil 47517 b.swapSuccessors() 47518 return true 47519 } 47520 // match: (ULT (FlagGT_ULT) yes no) 47521 // cond: 47522 // result: (First nil yes no) 47523 for { 47524 v := b.Control 47525 if v.Op != OpAMD64FlagGT_ULT { 47526 break 47527 } 47528 b.Kind = BlockFirst 47529 b.SetControl(nil) 47530 b.Aux = nil 47531 return true 47532 } 47533 // match: (ULT (FlagGT_UGT) yes no) 47534 // cond: 47535 // result: (First nil no yes) 47536 for { 47537 v := b.Control 47538 if v.Op != OpAMD64FlagGT_UGT { 47539 break 47540 } 47541 b.Kind = BlockFirst 47542 b.SetControl(nil) 47543 b.Aux = nil 47544 b.swapSuccessors() 47545 return true 47546 } 47547 } 47548 return false 47549 }