github.com/ltltlt/go-source-code@v0.0.0-20190830023027-95be009773aa/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "cmd/internal/obj" 8 import "cmd/internal/objabi" 9 import "cmd/compile/internal/types" 10 11 var _ = math.MinInt8 // in case not otherwise used 12 var _ = obj.ANOP // in case not otherwise used 13 var _ = objabi.GOROOT // in case not otherwise used 14 var _ = types.TypeMem // in case not otherwise used 15 16 func rewriteValueAMD64(v *Value) bool { 17 switch v.Op { 18 case OpAMD64ADDL: 19 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) 20 case OpAMD64ADDLconst: 21 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) 22 case OpAMD64ADDLconstmem: 23 return rewriteValueAMD64_OpAMD64ADDLconstmem_0(v) 24 case OpAMD64ADDLmem: 25 return rewriteValueAMD64_OpAMD64ADDLmem_0(v) 26 case OpAMD64ADDQ: 27 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 28 case OpAMD64ADDQconst: 29 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) 30 case OpAMD64ADDQconstmem: 31 return rewriteValueAMD64_OpAMD64ADDQconstmem_0(v) 32 case OpAMD64ADDQmem: 33 return rewriteValueAMD64_OpAMD64ADDQmem_0(v) 34 case OpAMD64ADDSD: 35 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 36 case OpAMD64ADDSDmem: 37 return rewriteValueAMD64_OpAMD64ADDSDmem_0(v) 38 case OpAMD64ADDSS: 39 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 40 case OpAMD64ADDSSmem: 41 return rewriteValueAMD64_OpAMD64ADDSSmem_0(v) 42 case OpAMD64ANDL: 43 return rewriteValueAMD64_OpAMD64ANDL_0(v) 44 case OpAMD64ANDLconst: 45 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 46 case OpAMD64ANDLmem: 47 return rewriteValueAMD64_OpAMD64ANDLmem_0(v) 48 case OpAMD64ANDQ: 49 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 50 case OpAMD64ANDQconst: 51 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 52 case OpAMD64ANDQmem: 53 return rewriteValueAMD64_OpAMD64ANDQmem_0(v) 54 case OpAMD64BSFQ: 55 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 56 case OpAMD64BTQconst: 57 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 58 case OpAMD64CMOVQEQ: 59 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 60 case OpAMD64CMPB: 61 return rewriteValueAMD64_OpAMD64CMPB_0(v) 62 case OpAMD64CMPBconst: 63 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 64 case OpAMD64CMPL: 65 return rewriteValueAMD64_OpAMD64CMPL_0(v) 66 case OpAMD64CMPLconst: 67 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) 68 case OpAMD64CMPQ: 69 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 70 case OpAMD64CMPQconst: 71 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 72 case OpAMD64CMPW: 73 return rewriteValueAMD64_OpAMD64CMPW_0(v) 74 case OpAMD64CMPWconst: 75 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 76 case OpAMD64CMPXCHGLlock: 77 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 78 case OpAMD64CMPXCHGQlock: 79 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 80 case OpAMD64LEAL: 81 return rewriteValueAMD64_OpAMD64LEAL_0(v) 82 case OpAMD64LEAQ: 83 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 84 case OpAMD64LEAQ1: 85 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 86 case OpAMD64LEAQ2: 87 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 88 case OpAMD64LEAQ4: 89 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 90 case OpAMD64LEAQ8: 91 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 92 case OpAMD64MOVBQSX: 93 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 94 case OpAMD64MOVBQSXload: 95 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 96 case OpAMD64MOVBQZX: 97 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 98 case OpAMD64MOVBload: 99 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 100 case OpAMD64MOVBloadidx1: 101 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 102 case OpAMD64MOVBstore: 103 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) 104 case OpAMD64MOVBstoreconst: 105 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 106 case OpAMD64MOVBstoreconstidx1: 107 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 108 case OpAMD64MOVBstoreidx1: 109 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) 110 case OpAMD64MOVLQSX: 111 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 112 case OpAMD64MOVLQSXload: 113 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 114 case OpAMD64MOVLQZX: 115 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 116 case OpAMD64MOVLatomicload: 117 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 118 case OpAMD64MOVLf2i: 119 return rewriteValueAMD64_OpAMD64MOVLf2i_0(v) 120 case OpAMD64MOVLi2f: 121 return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) 122 case OpAMD64MOVLload: 123 return rewriteValueAMD64_OpAMD64MOVLload_0(v) 124 case OpAMD64MOVLloadidx1: 125 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 126 case OpAMD64MOVLloadidx4: 127 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 128 case OpAMD64MOVLloadidx8: 129 return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v) 130 case OpAMD64MOVLstore: 131 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) 132 case OpAMD64MOVLstoreconst: 133 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 134 case OpAMD64MOVLstoreconstidx1: 135 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 136 case OpAMD64MOVLstoreconstidx4: 137 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 138 case OpAMD64MOVLstoreidx1: 139 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 140 case OpAMD64MOVLstoreidx4: 141 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 142 case OpAMD64MOVLstoreidx8: 143 return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v) 144 case OpAMD64MOVOload: 145 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 146 case OpAMD64MOVOstore: 147 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 148 case OpAMD64MOVQatomicload: 149 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 150 case OpAMD64MOVQf2i: 151 return rewriteValueAMD64_OpAMD64MOVQf2i_0(v) 152 case OpAMD64MOVQi2f: 153 return rewriteValueAMD64_OpAMD64MOVQi2f_0(v) 154 case OpAMD64MOVQload: 155 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 156 case OpAMD64MOVQloadidx1: 157 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 158 case OpAMD64MOVQloadidx8: 159 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 160 case OpAMD64MOVQstore: 161 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) 162 case OpAMD64MOVQstoreconst: 163 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 164 case OpAMD64MOVQstoreconstidx1: 165 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 166 case OpAMD64MOVQstoreconstidx8: 167 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 168 case OpAMD64MOVQstoreidx1: 169 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 170 case OpAMD64MOVQstoreidx8: 171 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 172 case OpAMD64MOVSDload: 173 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 174 case OpAMD64MOVSDloadidx1: 175 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 176 case OpAMD64MOVSDloadidx8: 177 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 178 case OpAMD64MOVSDstore: 179 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 180 case OpAMD64MOVSDstoreidx1: 181 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 182 case OpAMD64MOVSDstoreidx8: 183 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 184 case OpAMD64MOVSSload: 185 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 186 case OpAMD64MOVSSloadidx1: 187 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 188 case OpAMD64MOVSSloadidx4: 189 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 190 case OpAMD64MOVSSstore: 191 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 192 case OpAMD64MOVSSstoreidx1: 193 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 194 case OpAMD64MOVSSstoreidx4: 195 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 196 case OpAMD64MOVWQSX: 197 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 198 case OpAMD64MOVWQSXload: 199 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 200 case OpAMD64MOVWQZX: 201 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 202 case OpAMD64MOVWload: 203 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 204 case OpAMD64MOVWloadidx1: 205 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 206 case OpAMD64MOVWloadidx2: 207 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 208 case OpAMD64MOVWstore: 209 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 210 case OpAMD64MOVWstoreconst: 211 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 212 case OpAMD64MOVWstoreconstidx1: 213 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 214 case OpAMD64MOVWstoreconstidx2: 215 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 216 case OpAMD64MOVWstoreidx1: 217 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 218 case OpAMD64MOVWstoreidx2: 219 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 220 case OpAMD64MULL: 221 return rewriteValueAMD64_OpAMD64MULL_0(v) 222 case OpAMD64MULLconst: 223 return rewriteValueAMD64_OpAMD64MULLconst_0(v) 224 case OpAMD64MULQ: 225 return rewriteValueAMD64_OpAMD64MULQ_0(v) 226 case OpAMD64MULQconst: 227 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) 228 case OpAMD64MULSD: 229 return rewriteValueAMD64_OpAMD64MULSD_0(v) 230 case OpAMD64MULSDmem: 231 return rewriteValueAMD64_OpAMD64MULSDmem_0(v) 232 case OpAMD64MULSS: 233 return rewriteValueAMD64_OpAMD64MULSS_0(v) 234 case OpAMD64MULSSmem: 235 return rewriteValueAMD64_OpAMD64MULSSmem_0(v) 236 case OpAMD64NEGL: 237 return rewriteValueAMD64_OpAMD64NEGL_0(v) 238 case OpAMD64NEGQ: 239 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 240 case OpAMD64NOTL: 241 return rewriteValueAMD64_OpAMD64NOTL_0(v) 242 case OpAMD64NOTQ: 243 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 244 case OpAMD64ORL: 245 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 246 case OpAMD64ORLconst: 247 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 248 case OpAMD64ORLmem: 249 return rewriteValueAMD64_OpAMD64ORLmem_0(v) 250 case OpAMD64ORQ: 251 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 252 case OpAMD64ORQconst: 253 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 254 case OpAMD64ORQmem: 255 return rewriteValueAMD64_OpAMD64ORQmem_0(v) 256 case OpAMD64ROLB: 257 return rewriteValueAMD64_OpAMD64ROLB_0(v) 258 case OpAMD64ROLBconst: 259 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 260 case OpAMD64ROLL: 261 return rewriteValueAMD64_OpAMD64ROLL_0(v) 262 case OpAMD64ROLLconst: 263 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 264 case OpAMD64ROLQ: 265 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 266 case OpAMD64ROLQconst: 267 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 268 case OpAMD64ROLW: 269 return rewriteValueAMD64_OpAMD64ROLW_0(v) 270 case OpAMD64ROLWconst: 271 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 272 case OpAMD64RORB: 273 return rewriteValueAMD64_OpAMD64RORB_0(v) 274 case OpAMD64RORL: 275 return rewriteValueAMD64_OpAMD64RORL_0(v) 276 case OpAMD64RORQ: 277 return rewriteValueAMD64_OpAMD64RORQ_0(v) 278 case OpAMD64RORW: 279 return rewriteValueAMD64_OpAMD64RORW_0(v) 280 case OpAMD64SARB: 281 return rewriteValueAMD64_OpAMD64SARB_0(v) 282 case OpAMD64SARBconst: 283 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 284 case OpAMD64SARL: 285 return rewriteValueAMD64_OpAMD64SARL_0(v) 286 case OpAMD64SARLconst: 287 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 288 case OpAMD64SARQ: 289 return rewriteValueAMD64_OpAMD64SARQ_0(v) 290 case OpAMD64SARQconst: 291 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 292 case OpAMD64SARW: 293 return rewriteValueAMD64_OpAMD64SARW_0(v) 294 case OpAMD64SARWconst: 295 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 296 case OpAMD64SBBLcarrymask: 297 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 298 case OpAMD64SBBQcarrymask: 299 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 300 case OpAMD64SETA: 301 return rewriteValueAMD64_OpAMD64SETA_0(v) 302 case OpAMD64SETAE: 303 return rewriteValueAMD64_OpAMD64SETAE_0(v) 304 case OpAMD64SETAEmem: 305 return rewriteValueAMD64_OpAMD64SETAEmem_0(v) 306 case OpAMD64SETAmem: 307 return rewriteValueAMD64_OpAMD64SETAmem_0(v) 308 case OpAMD64SETB: 309 return rewriteValueAMD64_OpAMD64SETB_0(v) 310 case OpAMD64SETBE: 311 return rewriteValueAMD64_OpAMD64SETBE_0(v) 312 case OpAMD64SETBEmem: 313 return rewriteValueAMD64_OpAMD64SETBEmem_0(v) 314 case OpAMD64SETBmem: 315 return rewriteValueAMD64_OpAMD64SETBmem_0(v) 316 case OpAMD64SETEQ: 317 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) 318 case OpAMD64SETEQmem: 319 return rewriteValueAMD64_OpAMD64SETEQmem_0(v) || rewriteValueAMD64_OpAMD64SETEQmem_10(v) 320 case OpAMD64SETG: 321 return rewriteValueAMD64_OpAMD64SETG_0(v) 322 case OpAMD64SETGE: 323 return rewriteValueAMD64_OpAMD64SETGE_0(v) 324 case OpAMD64SETGEmem: 325 return rewriteValueAMD64_OpAMD64SETGEmem_0(v) 326 case OpAMD64SETGmem: 327 return rewriteValueAMD64_OpAMD64SETGmem_0(v) 328 case OpAMD64SETL: 329 return rewriteValueAMD64_OpAMD64SETL_0(v) 330 case OpAMD64SETLE: 331 return rewriteValueAMD64_OpAMD64SETLE_0(v) 332 case OpAMD64SETLEmem: 333 return rewriteValueAMD64_OpAMD64SETLEmem_0(v) 334 case OpAMD64SETLmem: 335 return rewriteValueAMD64_OpAMD64SETLmem_0(v) 336 case OpAMD64SETNE: 337 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) 338 case OpAMD64SETNEmem: 339 return rewriteValueAMD64_OpAMD64SETNEmem_0(v) || rewriteValueAMD64_OpAMD64SETNEmem_10(v) 340 case OpAMD64SHLL: 341 return rewriteValueAMD64_OpAMD64SHLL_0(v) 342 case OpAMD64SHLLconst: 343 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 344 case OpAMD64SHLQ: 345 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 346 case OpAMD64SHLQconst: 347 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 348 case OpAMD64SHRB: 349 return rewriteValueAMD64_OpAMD64SHRB_0(v) 350 case OpAMD64SHRBconst: 351 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 352 case OpAMD64SHRL: 353 return rewriteValueAMD64_OpAMD64SHRL_0(v) 354 case OpAMD64SHRLconst: 355 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 356 case OpAMD64SHRQ: 357 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 358 case OpAMD64SHRQconst: 359 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 360 case OpAMD64SHRW: 361 return rewriteValueAMD64_OpAMD64SHRW_0(v) 362 case OpAMD64SHRWconst: 363 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 364 case OpAMD64SUBL: 365 return rewriteValueAMD64_OpAMD64SUBL_0(v) 366 case OpAMD64SUBLconst: 367 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 368 case OpAMD64SUBLmem: 369 return rewriteValueAMD64_OpAMD64SUBLmem_0(v) 370 case OpAMD64SUBQ: 371 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 372 case OpAMD64SUBQconst: 373 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 374 case OpAMD64SUBQmem: 375 return rewriteValueAMD64_OpAMD64SUBQmem_0(v) 376 case OpAMD64SUBSD: 377 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 378 case OpAMD64SUBSDmem: 379 return rewriteValueAMD64_OpAMD64SUBSDmem_0(v) 380 case OpAMD64SUBSS: 381 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 382 case OpAMD64SUBSSmem: 383 return rewriteValueAMD64_OpAMD64SUBSSmem_0(v) 384 case OpAMD64TESTB: 385 return rewriteValueAMD64_OpAMD64TESTB_0(v) 386 case OpAMD64TESTL: 387 return rewriteValueAMD64_OpAMD64TESTL_0(v) 388 case OpAMD64TESTQ: 389 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 390 case OpAMD64TESTW: 391 return rewriteValueAMD64_OpAMD64TESTW_0(v) 392 case OpAMD64XADDLlock: 393 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 394 case OpAMD64XADDQlock: 395 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 396 case OpAMD64XCHGL: 397 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 398 case OpAMD64XCHGQ: 399 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 400 case OpAMD64XORL: 401 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 402 case OpAMD64XORLconst: 403 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) 404 case OpAMD64XORLmem: 405 return rewriteValueAMD64_OpAMD64XORLmem_0(v) 406 case OpAMD64XORQ: 407 return rewriteValueAMD64_OpAMD64XORQ_0(v) 408 case OpAMD64XORQconst: 409 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 410 case OpAMD64XORQmem: 411 return rewriteValueAMD64_OpAMD64XORQmem_0(v) 412 case OpAdd16: 413 return rewriteValueAMD64_OpAdd16_0(v) 414 case OpAdd32: 415 return rewriteValueAMD64_OpAdd32_0(v) 416 case OpAdd32F: 417 return rewriteValueAMD64_OpAdd32F_0(v) 418 case OpAdd64: 419 return rewriteValueAMD64_OpAdd64_0(v) 420 case OpAdd64F: 421 return rewriteValueAMD64_OpAdd64F_0(v) 422 case OpAdd8: 423 return rewriteValueAMD64_OpAdd8_0(v) 424 case OpAddPtr: 425 return rewriteValueAMD64_OpAddPtr_0(v) 426 case OpAddr: 427 return rewriteValueAMD64_OpAddr_0(v) 428 case OpAnd16: 429 return rewriteValueAMD64_OpAnd16_0(v) 430 case OpAnd32: 431 return rewriteValueAMD64_OpAnd32_0(v) 432 case OpAnd64: 433 return rewriteValueAMD64_OpAnd64_0(v) 434 case OpAnd8: 435 return rewriteValueAMD64_OpAnd8_0(v) 436 case OpAndB: 437 return rewriteValueAMD64_OpAndB_0(v) 438 case OpAtomicAdd32: 439 return rewriteValueAMD64_OpAtomicAdd32_0(v) 440 case OpAtomicAdd64: 441 return rewriteValueAMD64_OpAtomicAdd64_0(v) 442 case OpAtomicAnd8: 443 return rewriteValueAMD64_OpAtomicAnd8_0(v) 444 case OpAtomicCompareAndSwap32: 445 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 446 case OpAtomicCompareAndSwap64: 447 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 448 case OpAtomicExchange32: 449 return rewriteValueAMD64_OpAtomicExchange32_0(v) 450 case OpAtomicExchange64: 451 return rewriteValueAMD64_OpAtomicExchange64_0(v) 452 case OpAtomicLoad32: 453 return rewriteValueAMD64_OpAtomicLoad32_0(v) 454 case OpAtomicLoad64: 455 return rewriteValueAMD64_OpAtomicLoad64_0(v) 456 case OpAtomicLoadPtr: 457 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 458 case OpAtomicOr8: 459 return rewriteValueAMD64_OpAtomicOr8_0(v) 460 case OpAtomicStore32: 461 return rewriteValueAMD64_OpAtomicStore32_0(v) 462 case OpAtomicStore64: 463 return rewriteValueAMD64_OpAtomicStore64_0(v) 464 case OpAtomicStorePtrNoWB: 465 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 466 case OpAvg64u: 467 return rewriteValueAMD64_OpAvg64u_0(v) 468 case OpBitLen32: 469 return rewriteValueAMD64_OpBitLen32_0(v) 470 case OpBitLen64: 471 return rewriteValueAMD64_OpBitLen64_0(v) 472 case OpBswap32: 473 return rewriteValueAMD64_OpBswap32_0(v) 474 case OpBswap64: 475 return rewriteValueAMD64_OpBswap64_0(v) 476 case OpCeil: 477 return rewriteValueAMD64_OpCeil_0(v) 478 case OpClosureCall: 479 return rewriteValueAMD64_OpClosureCall_0(v) 480 case OpCom16: 481 return rewriteValueAMD64_OpCom16_0(v) 482 case OpCom32: 483 return rewriteValueAMD64_OpCom32_0(v) 484 case OpCom64: 485 return rewriteValueAMD64_OpCom64_0(v) 486 case OpCom8: 487 return rewriteValueAMD64_OpCom8_0(v) 488 case OpConst16: 489 return rewriteValueAMD64_OpConst16_0(v) 490 case OpConst32: 491 return rewriteValueAMD64_OpConst32_0(v) 492 case OpConst32F: 493 return rewriteValueAMD64_OpConst32F_0(v) 494 case OpConst64: 495 return rewriteValueAMD64_OpConst64_0(v) 496 case OpConst64F: 497 return rewriteValueAMD64_OpConst64F_0(v) 498 case OpConst8: 499 return rewriteValueAMD64_OpConst8_0(v) 500 case OpConstBool: 501 return rewriteValueAMD64_OpConstBool_0(v) 502 case OpConstNil: 503 return rewriteValueAMD64_OpConstNil_0(v) 504 case OpConvert: 505 return rewriteValueAMD64_OpConvert_0(v) 506 case OpCtz32: 507 return rewriteValueAMD64_OpCtz32_0(v) 508 case OpCtz64: 509 return rewriteValueAMD64_OpCtz64_0(v) 510 case OpCvt32Fto32: 511 return rewriteValueAMD64_OpCvt32Fto32_0(v) 512 case OpCvt32Fto64: 513 return rewriteValueAMD64_OpCvt32Fto64_0(v) 514 case OpCvt32Fto64F: 515 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 516 case OpCvt32to32F: 517 return rewriteValueAMD64_OpCvt32to32F_0(v) 518 case OpCvt32to64F: 519 return rewriteValueAMD64_OpCvt32to64F_0(v) 520 case OpCvt64Fto32: 521 return rewriteValueAMD64_OpCvt64Fto32_0(v) 522 case OpCvt64Fto32F: 523 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 524 case OpCvt64Fto64: 525 return rewriteValueAMD64_OpCvt64Fto64_0(v) 526 case OpCvt64to32F: 527 return rewriteValueAMD64_OpCvt64to32F_0(v) 528 case OpCvt64to64F: 529 return rewriteValueAMD64_OpCvt64to64F_0(v) 530 case OpDiv128u: 531 return rewriteValueAMD64_OpDiv128u_0(v) 532 case OpDiv16: 533 return rewriteValueAMD64_OpDiv16_0(v) 534 case OpDiv16u: 535 return rewriteValueAMD64_OpDiv16u_0(v) 536 case OpDiv32: 537 return rewriteValueAMD64_OpDiv32_0(v) 538 case OpDiv32F: 539 return rewriteValueAMD64_OpDiv32F_0(v) 540 case OpDiv32u: 541 return rewriteValueAMD64_OpDiv32u_0(v) 542 case OpDiv64: 543 return rewriteValueAMD64_OpDiv64_0(v) 544 case OpDiv64F: 545 return rewriteValueAMD64_OpDiv64F_0(v) 546 case OpDiv64u: 547 return rewriteValueAMD64_OpDiv64u_0(v) 548 case OpDiv8: 549 return rewriteValueAMD64_OpDiv8_0(v) 550 case OpDiv8u: 551 return rewriteValueAMD64_OpDiv8u_0(v) 552 case OpEq16: 553 return rewriteValueAMD64_OpEq16_0(v) 554 case OpEq32: 555 return rewriteValueAMD64_OpEq32_0(v) 556 case OpEq32F: 557 return rewriteValueAMD64_OpEq32F_0(v) 558 case OpEq64: 559 return rewriteValueAMD64_OpEq64_0(v) 560 case OpEq64F: 561 return rewriteValueAMD64_OpEq64F_0(v) 562 case OpEq8: 563 return rewriteValueAMD64_OpEq8_0(v) 564 case OpEqB: 565 return rewriteValueAMD64_OpEqB_0(v) 566 case OpEqPtr: 567 return rewriteValueAMD64_OpEqPtr_0(v) 568 case OpFloor: 569 return rewriteValueAMD64_OpFloor_0(v) 570 case OpGeq16: 571 return rewriteValueAMD64_OpGeq16_0(v) 572 case OpGeq16U: 573 return rewriteValueAMD64_OpGeq16U_0(v) 574 case OpGeq32: 575 return rewriteValueAMD64_OpGeq32_0(v) 576 case OpGeq32F: 577 return rewriteValueAMD64_OpGeq32F_0(v) 578 case OpGeq32U: 579 return rewriteValueAMD64_OpGeq32U_0(v) 580 case OpGeq64: 581 return rewriteValueAMD64_OpGeq64_0(v) 582 case OpGeq64F: 583 return rewriteValueAMD64_OpGeq64F_0(v) 584 case OpGeq64U: 585 return rewriteValueAMD64_OpGeq64U_0(v) 586 case OpGeq8: 587 return rewriteValueAMD64_OpGeq8_0(v) 588 case OpGeq8U: 589 return rewriteValueAMD64_OpGeq8U_0(v) 590 case OpGetCallerPC: 591 return rewriteValueAMD64_OpGetCallerPC_0(v) 592 case OpGetCallerSP: 593 return rewriteValueAMD64_OpGetCallerSP_0(v) 594 case OpGetClosurePtr: 595 return rewriteValueAMD64_OpGetClosurePtr_0(v) 596 case OpGetG: 597 return rewriteValueAMD64_OpGetG_0(v) 598 case OpGreater16: 599 return rewriteValueAMD64_OpGreater16_0(v) 600 case OpGreater16U: 601 return rewriteValueAMD64_OpGreater16U_0(v) 602 case OpGreater32: 603 return rewriteValueAMD64_OpGreater32_0(v) 604 case OpGreater32F: 605 return rewriteValueAMD64_OpGreater32F_0(v) 606 case OpGreater32U: 607 return rewriteValueAMD64_OpGreater32U_0(v) 608 case OpGreater64: 609 return rewriteValueAMD64_OpGreater64_0(v) 610 case OpGreater64F: 611 return rewriteValueAMD64_OpGreater64F_0(v) 612 case OpGreater64U: 613 return rewriteValueAMD64_OpGreater64U_0(v) 614 case OpGreater8: 615 return rewriteValueAMD64_OpGreater8_0(v) 616 case OpGreater8U: 617 return rewriteValueAMD64_OpGreater8U_0(v) 618 case OpHmul32: 619 return rewriteValueAMD64_OpHmul32_0(v) 620 case OpHmul32u: 621 return rewriteValueAMD64_OpHmul32u_0(v) 622 case OpHmul64: 623 return rewriteValueAMD64_OpHmul64_0(v) 624 case OpHmul64u: 625 return rewriteValueAMD64_OpHmul64u_0(v) 626 case OpInt64Hi: 627 return rewriteValueAMD64_OpInt64Hi_0(v) 628 case OpInterCall: 629 return rewriteValueAMD64_OpInterCall_0(v) 630 case OpIsInBounds: 631 return rewriteValueAMD64_OpIsInBounds_0(v) 632 case OpIsNonNil: 633 return rewriteValueAMD64_OpIsNonNil_0(v) 634 case OpIsSliceInBounds: 635 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 636 case OpLeq16: 637 return rewriteValueAMD64_OpLeq16_0(v) 638 case OpLeq16U: 639 return rewriteValueAMD64_OpLeq16U_0(v) 640 case OpLeq32: 641 return rewriteValueAMD64_OpLeq32_0(v) 642 case OpLeq32F: 643 return rewriteValueAMD64_OpLeq32F_0(v) 644 case OpLeq32U: 645 return rewriteValueAMD64_OpLeq32U_0(v) 646 case OpLeq64: 647 return rewriteValueAMD64_OpLeq64_0(v) 648 case OpLeq64F: 649 return rewriteValueAMD64_OpLeq64F_0(v) 650 case OpLeq64U: 651 return rewriteValueAMD64_OpLeq64U_0(v) 652 case OpLeq8: 653 return rewriteValueAMD64_OpLeq8_0(v) 654 case OpLeq8U: 655 return rewriteValueAMD64_OpLeq8U_0(v) 656 case OpLess16: 657 return rewriteValueAMD64_OpLess16_0(v) 658 case OpLess16U: 659 return rewriteValueAMD64_OpLess16U_0(v) 660 case OpLess32: 661 return rewriteValueAMD64_OpLess32_0(v) 662 case OpLess32F: 663 return rewriteValueAMD64_OpLess32F_0(v) 664 case OpLess32U: 665 return rewriteValueAMD64_OpLess32U_0(v) 666 case OpLess64: 667 return rewriteValueAMD64_OpLess64_0(v) 668 case OpLess64F: 669 return rewriteValueAMD64_OpLess64F_0(v) 670 case OpLess64U: 671 return rewriteValueAMD64_OpLess64U_0(v) 672 case OpLess8: 673 return rewriteValueAMD64_OpLess8_0(v) 674 case OpLess8U: 675 return rewriteValueAMD64_OpLess8U_0(v) 676 case OpLoad: 677 return rewriteValueAMD64_OpLoad_0(v) 678 case OpLsh16x16: 679 return rewriteValueAMD64_OpLsh16x16_0(v) 680 case OpLsh16x32: 681 return rewriteValueAMD64_OpLsh16x32_0(v) 682 case OpLsh16x64: 683 return rewriteValueAMD64_OpLsh16x64_0(v) 684 case OpLsh16x8: 685 return rewriteValueAMD64_OpLsh16x8_0(v) 686 case OpLsh32x16: 687 return rewriteValueAMD64_OpLsh32x16_0(v) 688 case OpLsh32x32: 689 return rewriteValueAMD64_OpLsh32x32_0(v) 690 case OpLsh32x64: 691 return rewriteValueAMD64_OpLsh32x64_0(v) 692 case OpLsh32x8: 693 return rewriteValueAMD64_OpLsh32x8_0(v) 694 case OpLsh64x16: 695 return rewriteValueAMD64_OpLsh64x16_0(v) 696 case OpLsh64x32: 697 return rewriteValueAMD64_OpLsh64x32_0(v) 698 case OpLsh64x64: 699 return rewriteValueAMD64_OpLsh64x64_0(v) 700 case OpLsh64x8: 701 return rewriteValueAMD64_OpLsh64x8_0(v) 702 case OpLsh8x16: 703 return rewriteValueAMD64_OpLsh8x16_0(v) 704 case OpLsh8x32: 705 return rewriteValueAMD64_OpLsh8x32_0(v) 706 case OpLsh8x64: 707 return rewriteValueAMD64_OpLsh8x64_0(v) 708 case OpLsh8x8: 709 return rewriteValueAMD64_OpLsh8x8_0(v) 710 case OpMod16: 711 return rewriteValueAMD64_OpMod16_0(v) 712 case OpMod16u: 713 return rewriteValueAMD64_OpMod16u_0(v) 714 case OpMod32: 715 return rewriteValueAMD64_OpMod32_0(v) 716 case OpMod32u: 717 return rewriteValueAMD64_OpMod32u_0(v) 718 case OpMod64: 719 return rewriteValueAMD64_OpMod64_0(v) 720 case OpMod64u: 721 return rewriteValueAMD64_OpMod64u_0(v) 722 case OpMod8: 723 return rewriteValueAMD64_OpMod8_0(v) 724 case OpMod8u: 725 return rewriteValueAMD64_OpMod8u_0(v) 726 case OpMove: 727 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) 728 case OpMul16: 729 return rewriteValueAMD64_OpMul16_0(v) 730 case OpMul32: 731 return rewriteValueAMD64_OpMul32_0(v) 732 case OpMul32F: 733 return rewriteValueAMD64_OpMul32F_0(v) 734 case OpMul64: 735 return rewriteValueAMD64_OpMul64_0(v) 736 case OpMul64F: 737 return rewriteValueAMD64_OpMul64F_0(v) 738 case OpMul64uhilo: 739 return rewriteValueAMD64_OpMul64uhilo_0(v) 740 case OpMul8: 741 return rewriteValueAMD64_OpMul8_0(v) 742 case OpNeg16: 743 return rewriteValueAMD64_OpNeg16_0(v) 744 case OpNeg32: 745 return rewriteValueAMD64_OpNeg32_0(v) 746 case OpNeg32F: 747 return rewriteValueAMD64_OpNeg32F_0(v) 748 case OpNeg64: 749 return rewriteValueAMD64_OpNeg64_0(v) 750 case OpNeg64F: 751 return rewriteValueAMD64_OpNeg64F_0(v) 752 case OpNeg8: 753 return rewriteValueAMD64_OpNeg8_0(v) 754 case OpNeq16: 755 return rewriteValueAMD64_OpNeq16_0(v) 756 case OpNeq32: 757 return rewriteValueAMD64_OpNeq32_0(v) 758 case OpNeq32F: 759 return rewriteValueAMD64_OpNeq32F_0(v) 760 case OpNeq64: 761 return rewriteValueAMD64_OpNeq64_0(v) 762 case OpNeq64F: 763 return rewriteValueAMD64_OpNeq64F_0(v) 764 case OpNeq8: 765 return rewriteValueAMD64_OpNeq8_0(v) 766 case OpNeqB: 767 return rewriteValueAMD64_OpNeqB_0(v) 768 case OpNeqPtr: 769 return rewriteValueAMD64_OpNeqPtr_0(v) 770 case OpNilCheck: 771 return rewriteValueAMD64_OpNilCheck_0(v) 772 case OpNot: 773 return rewriteValueAMD64_OpNot_0(v) 774 case OpOffPtr: 775 return rewriteValueAMD64_OpOffPtr_0(v) 776 case OpOr16: 777 return rewriteValueAMD64_OpOr16_0(v) 778 case OpOr32: 779 return rewriteValueAMD64_OpOr32_0(v) 780 case OpOr64: 781 return rewriteValueAMD64_OpOr64_0(v) 782 case OpOr8: 783 return rewriteValueAMD64_OpOr8_0(v) 784 case OpOrB: 785 return rewriteValueAMD64_OpOrB_0(v) 786 case OpPopCount16: 787 return rewriteValueAMD64_OpPopCount16_0(v) 788 case OpPopCount32: 789 return rewriteValueAMD64_OpPopCount32_0(v) 790 case OpPopCount64: 791 return rewriteValueAMD64_OpPopCount64_0(v) 792 case OpPopCount8: 793 return rewriteValueAMD64_OpPopCount8_0(v) 794 case OpRound32F: 795 return rewriteValueAMD64_OpRound32F_0(v) 796 case OpRound64F: 797 return rewriteValueAMD64_OpRound64F_0(v) 798 case OpRoundToEven: 799 return rewriteValueAMD64_OpRoundToEven_0(v) 800 case OpRsh16Ux16: 801 return rewriteValueAMD64_OpRsh16Ux16_0(v) 802 case OpRsh16Ux32: 803 return rewriteValueAMD64_OpRsh16Ux32_0(v) 804 case OpRsh16Ux64: 805 return rewriteValueAMD64_OpRsh16Ux64_0(v) 806 case OpRsh16Ux8: 807 return rewriteValueAMD64_OpRsh16Ux8_0(v) 808 case OpRsh16x16: 809 return rewriteValueAMD64_OpRsh16x16_0(v) 810 case OpRsh16x32: 811 return rewriteValueAMD64_OpRsh16x32_0(v) 812 case OpRsh16x64: 813 return rewriteValueAMD64_OpRsh16x64_0(v) 814 case OpRsh16x8: 815 return rewriteValueAMD64_OpRsh16x8_0(v) 816 case OpRsh32Ux16: 817 return rewriteValueAMD64_OpRsh32Ux16_0(v) 818 case OpRsh32Ux32: 819 return rewriteValueAMD64_OpRsh32Ux32_0(v) 820 case OpRsh32Ux64: 821 return rewriteValueAMD64_OpRsh32Ux64_0(v) 822 case OpRsh32Ux8: 823 return rewriteValueAMD64_OpRsh32Ux8_0(v) 824 case OpRsh32x16: 825 return rewriteValueAMD64_OpRsh32x16_0(v) 826 case OpRsh32x32: 827 return rewriteValueAMD64_OpRsh32x32_0(v) 828 case OpRsh32x64: 829 return rewriteValueAMD64_OpRsh32x64_0(v) 830 case OpRsh32x8: 831 return rewriteValueAMD64_OpRsh32x8_0(v) 832 case OpRsh64Ux16: 833 return rewriteValueAMD64_OpRsh64Ux16_0(v) 834 case OpRsh64Ux32: 835 return rewriteValueAMD64_OpRsh64Ux32_0(v) 836 case OpRsh64Ux64: 837 return rewriteValueAMD64_OpRsh64Ux64_0(v) 838 case OpRsh64Ux8: 839 return rewriteValueAMD64_OpRsh64Ux8_0(v) 840 case OpRsh64x16: 841 return rewriteValueAMD64_OpRsh64x16_0(v) 842 case OpRsh64x32: 843 return rewriteValueAMD64_OpRsh64x32_0(v) 844 case OpRsh64x64: 845 return rewriteValueAMD64_OpRsh64x64_0(v) 846 case OpRsh64x8: 847 return rewriteValueAMD64_OpRsh64x8_0(v) 848 case OpRsh8Ux16: 849 return rewriteValueAMD64_OpRsh8Ux16_0(v) 850 case OpRsh8Ux32: 851 return rewriteValueAMD64_OpRsh8Ux32_0(v) 852 case OpRsh8Ux64: 853 return rewriteValueAMD64_OpRsh8Ux64_0(v) 854 case OpRsh8Ux8: 855 return rewriteValueAMD64_OpRsh8Ux8_0(v) 856 case OpRsh8x16: 857 return rewriteValueAMD64_OpRsh8x16_0(v) 858 case OpRsh8x32: 859 return rewriteValueAMD64_OpRsh8x32_0(v) 860 case OpRsh8x64: 861 return rewriteValueAMD64_OpRsh8x64_0(v) 862 case OpRsh8x8: 863 return rewriteValueAMD64_OpRsh8x8_0(v) 864 case OpSelect0: 865 return rewriteValueAMD64_OpSelect0_0(v) 866 case OpSelect1: 867 return rewriteValueAMD64_OpSelect1_0(v) 868 case OpSignExt16to32: 869 return rewriteValueAMD64_OpSignExt16to32_0(v) 870 case OpSignExt16to64: 871 return rewriteValueAMD64_OpSignExt16to64_0(v) 872 case OpSignExt32to64: 873 return rewriteValueAMD64_OpSignExt32to64_0(v) 874 case OpSignExt8to16: 875 return rewriteValueAMD64_OpSignExt8to16_0(v) 876 case OpSignExt8to32: 877 return rewriteValueAMD64_OpSignExt8to32_0(v) 878 case OpSignExt8to64: 879 return rewriteValueAMD64_OpSignExt8to64_0(v) 880 case OpSlicemask: 881 return rewriteValueAMD64_OpSlicemask_0(v) 882 case OpSqrt: 883 return rewriteValueAMD64_OpSqrt_0(v) 884 case OpStaticCall: 885 return rewriteValueAMD64_OpStaticCall_0(v) 886 case OpStore: 887 return rewriteValueAMD64_OpStore_0(v) 888 case OpSub16: 889 return rewriteValueAMD64_OpSub16_0(v) 890 case OpSub32: 891 return rewriteValueAMD64_OpSub32_0(v) 892 case OpSub32F: 893 return rewriteValueAMD64_OpSub32F_0(v) 894 case OpSub64: 895 return rewriteValueAMD64_OpSub64_0(v) 896 case OpSub64F: 897 return rewriteValueAMD64_OpSub64F_0(v) 898 case OpSub8: 899 return rewriteValueAMD64_OpSub8_0(v) 900 case OpSubPtr: 901 return rewriteValueAMD64_OpSubPtr_0(v) 902 case OpTrunc: 903 return rewriteValueAMD64_OpTrunc_0(v) 904 case OpTrunc16to8: 905 return rewriteValueAMD64_OpTrunc16to8_0(v) 906 case OpTrunc32to16: 907 return rewriteValueAMD64_OpTrunc32to16_0(v) 908 case OpTrunc32to8: 909 return rewriteValueAMD64_OpTrunc32to8_0(v) 910 case OpTrunc64to16: 911 return rewriteValueAMD64_OpTrunc64to16_0(v) 912 case OpTrunc64to32: 913 return rewriteValueAMD64_OpTrunc64to32_0(v) 914 case OpTrunc64to8: 915 return rewriteValueAMD64_OpTrunc64to8_0(v) 916 case OpWB: 917 return rewriteValueAMD64_OpWB_0(v) 918 case OpXor16: 919 return rewriteValueAMD64_OpXor16_0(v) 920 case OpXor32: 921 return rewriteValueAMD64_OpXor32_0(v) 922 case OpXor64: 923 return rewriteValueAMD64_OpXor64_0(v) 924 case OpXor8: 925 return rewriteValueAMD64_OpXor8_0(v) 926 case OpZero: 927 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v) 928 case OpZeroExt16to32: 929 return rewriteValueAMD64_OpZeroExt16to32_0(v) 930 case OpZeroExt16to64: 931 return rewriteValueAMD64_OpZeroExt16to64_0(v) 932 case OpZeroExt32to64: 933 return rewriteValueAMD64_OpZeroExt32to64_0(v) 934 case OpZeroExt8to16: 935 return rewriteValueAMD64_OpZeroExt8to16_0(v) 936 case OpZeroExt8to32: 937 return rewriteValueAMD64_OpZeroExt8to32_0(v) 938 case OpZeroExt8to64: 939 return rewriteValueAMD64_OpZeroExt8to64_0(v) 940 } 941 return false 942 } 943 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 944 // match: (ADDL x (MOVLconst [c])) 945 // cond: 946 // result: (ADDLconst [c] x) 947 for { 948 _ = v.Args[1] 949 x := v.Args[0] 950 v_1 := v.Args[1] 951 if v_1.Op != OpAMD64MOVLconst { 952 break 953 } 954 c := v_1.AuxInt 955 v.reset(OpAMD64ADDLconst) 956 v.AuxInt = c 957 v.AddArg(x) 958 return true 959 } 960 // match: (ADDL (MOVLconst [c]) x) 961 // cond: 962 // result: (ADDLconst [c] x) 963 for { 964 _ = v.Args[1] 965 v_0 := v.Args[0] 966 if v_0.Op != OpAMD64MOVLconst { 967 break 968 } 969 c := v_0.AuxInt 970 x := v.Args[1] 971 v.reset(OpAMD64ADDLconst) 972 v.AuxInt = c 973 v.AddArg(x) 974 return true 975 } 976 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 977 // cond: d==32-c 978 // result: (ROLLconst x [c]) 979 for { 980 _ = v.Args[1] 981 v_0 := v.Args[0] 982 if v_0.Op != OpAMD64SHLLconst { 983 break 984 } 985 c := v_0.AuxInt 986 x := v_0.Args[0] 987 v_1 := v.Args[1] 988 if v_1.Op != OpAMD64SHRLconst { 989 break 990 } 991 d := v_1.AuxInt 992 if x != v_1.Args[0] { 993 break 994 } 995 if !(d == 32-c) { 996 break 997 } 998 v.reset(OpAMD64ROLLconst) 999 v.AuxInt = c 1000 v.AddArg(x) 1001 return true 1002 } 1003 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 1004 // cond: d==32-c 1005 // result: (ROLLconst x [c]) 1006 for { 1007 _ = v.Args[1] 1008 v_0 := v.Args[0] 1009 if v_0.Op != OpAMD64SHRLconst { 1010 break 1011 } 1012 d := v_0.AuxInt 1013 x := v_0.Args[0] 1014 v_1 := v.Args[1] 1015 if v_1.Op != OpAMD64SHLLconst { 1016 break 1017 } 1018 c := v_1.AuxInt 1019 if x != v_1.Args[0] { 1020 break 1021 } 1022 if !(d == 32-c) { 1023 break 1024 } 1025 v.reset(OpAMD64ROLLconst) 1026 v.AuxInt = c 1027 v.AddArg(x) 1028 return true 1029 } 1030 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 1031 // cond: d==16-c && c < 16 && t.Size() == 2 1032 // result: (ROLWconst x [c]) 1033 for { 1034 t := v.Type 1035 _ = v.Args[1] 1036 v_0 := v.Args[0] 1037 if v_0.Op != OpAMD64SHLLconst { 1038 break 1039 } 1040 c := v_0.AuxInt 1041 x := v_0.Args[0] 1042 v_1 := v.Args[1] 1043 if v_1.Op != OpAMD64SHRWconst { 1044 break 1045 } 1046 d := v_1.AuxInt 1047 if x != v_1.Args[0] { 1048 break 1049 } 1050 if !(d == 16-c && c < 16 && t.Size() == 2) { 1051 break 1052 } 1053 v.reset(OpAMD64ROLWconst) 1054 v.AuxInt = c 1055 v.AddArg(x) 1056 return true 1057 } 1058 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 1059 // cond: d==16-c && c < 16 && t.Size() == 2 1060 // result: (ROLWconst x [c]) 1061 for { 1062 t := v.Type 1063 _ = v.Args[1] 1064 v_0 := v.Args[0] 1065 if v_0.Op != OpAMD64SHRWconst { 1066 break 1067 } 1068 d := v_0.AuxInt 1069 x := v_0.Args[0] 1070 v_1 := v.Args[1] 1071 if v_1.Op != OpAMD64SHLLconst { 1072 break 1073 } 1074 c := v_1.AuxInt 1075 if x != v_1.Args[0] { 1076 break 1077 } 1078 if !(d == 16-c && c < 16 && t.Size() == 2) { 1079 break 1080 } 1081 v.reset(OpAMD64ROLWconst) 1082 v.AuxInt = c 1083 v.AddArg(x) 1084 return true 1085 } 1086 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 1087 // cond: d==8-c && c < 8 && t.Size() == 1 1088 // result: (ROLBconst x [c]) 1089 for { 1090 t := v.Type 1091 _ = v.Args[1] 1092 v_0 := v.Args[0] 1093 if v_0.Op != OpAMD64SHLLconst { 1094 break 1095 } 1096 c := v_0.AuxInt 1097 x := v_0.Args[0] 1098 v_1 := v.Args[1] 1099 if v_1.Op != OpAMD64SHRBconst { 1100 break 1101 } 1102 d := v_1.AuxInt 1103 if x != v_1.Args[0] { 1104 break 1105 } 1106 if !(d == 8-c && c < 8 && t.Size() == 1) { 1107 break 1108 } 1109 v.reset(OpAMD64ROLBconst) 1110 v.AuxInt = c 1111 v.AddArg(x) 1112 return true 1113 } 1114 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1115 // cond: d==8-c && c < 8 && t.Size() == 1 1116 // result: (ROLBconst x [c]) 1117 for { 1118 t := v.Type 1119 _ = v.Args[1] 1120 v_0 := v.Args[0] 1121 if v_0.Op != OpAMD64SHRBconst { 1122 break 1123 } 1124 d := v_0.AuxInt 1125 x := v_0.Args[0] 1126 v_1 := v.Args[1] 1127 if v_1.Op != OpAMD64SHLLconst { 1128 break 1129 } 1130 c := v_1.AuxInt 1131 if x != v_1.Args[0] { 1132 break 1133 } 1134 if !(d == 8-c && c < 8 && t.Size() == 1) { 1135 break 1136 } 1137 v.reset(OpAMD64ROLBconst) 1138 v.AuxInt = c 1139 v.AddArg(x) 1140 return true 1141 } 1142 // match: (ADDL x (NEGL y)) 1143 // cond: 1144 // result: (SUBL x y) 1145 for { 1146 _ = v.Args[1] 1147 x := v.Args[0] 1148 v_1 := v.Args[1] 1149 if v_1.Op != OpAMD64NEGL { 1150 break 1151 } 1152 y := v_1.Args[0] 1153 v.reset(OpAMD64SUBL) 1154 v.AddArg(x) 1155 v.AddArg(y) 1156 return true 1157 } 1158 // match: (ADDL (NEGL y) x) 1159 // cond: 1160 // result: (SUBL x y) 1161 for { 1162 _ = v.Args[1] 1163 v_0 := v.Args[0] 1164 if v_0.Op != OpAMD64NEGL { 1165 break 1166 } 1167 y := v_0.Args[0] 1168 x := v.Args[1] 1169 v.reset(OpAMD64SUBL) 1170 v.AddArg(x) 1171 v.AddArg(y) 1172 return true 1173 } 1174 return false 1175 } 1176 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1177 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1178 // cond: canMergeLoad(v, l, x) && clobber(l) 1179 // result: (ADDLmem x [off] {sym} ptr mem) 1180 for { 1181 _ = v.Args[1] 1182 x := v.Args[0] 1183 l := v.Args[1] 1184 if l.Op != OpAMD64MOVLload { 1185 break 1186 } 1187 off := l.AuxInt 1188 sym := l.Aux 1189 _ = l.Args[1] 1190 ptr := l.Args[0] 1191 mem := l.Args[1] 1192 if !(canMergeLoad(v, l, x) && clobber(l)) { 1193 break 1194 } 1195 v.reset(OpAMD64ADDLmem) 1196 v.AuxInt = off 1197 v.Aux = sym 1198 v.AddArg(x) 1199 v.AddArg(ptr) 1200 v.AddArg(mem) 1201 return true 1202 } 1203 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1204 // cond: canMergeLoad(v, l, x) && clobber(l) 1205 // result: (ADDLmem x [off] {sym} ptr mem) 1206 for { 1207 _ = v.Args[1] 1208 l := v.Args[0] 1209 if l.Op != OpAMD64MOVLload { 1210 break 1211 } 1212 off := l.AuxInt 1213 sym := l.Aux 1214 _ = l.Args[1] 1215 ptr := l.Args[0] 1216 mem := l.Args[1] 1217 x := v.Args[1] 1218 if !(canMergeLoad(v, l, x) && clobber(l)) { 1219 break 1220 } 1221 v.reset(OpAMD64ADDLmem) 1222 v.AuxInt = off 1223 v.Aux = sym 1224 v.AddArg(x) 1225 v.AddArg(ptr) 1226 v.AddArg(mem) 1227 return true 1228 } 1229 return false 1230 } 1231 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1232 // match: (ADDLconst [c] x) 1233 // cond: int32(c)==0 1234 // result: x 1235 for { 1236 c := v.AuxInt 1237 x := v.Args[0] 1238 if !(int32(c) == 0) { 1239 break 1240 } 1241 v.reset(OpCopy) 1242 v.Type = x.Type 1243 v.AddArg(x) 1244 return true 1245 } 1246 // match: (ADDLconst [c] (MOVLconst [d])) 1247 // cond: 1248 // result: (MOVLconst [int64(int32(c+d))]) 1249 for { 1250 c := v.AuxInt 1251 v_0 := v.Args[0] 1252 if v_0.Op != OpAMD64MOVLconst { 1253 break 1254 } 1255 d := v_0.AuxInt 1256 v.reset(OpAMD64MOVLconst) 1257 v.AuxInt = int64(int32(c + d)) 1258 return true 1259 } 1260 // match: (ADDLconst [c] (ADDLconst [d] x)) 1261 // cond: 1262 // result: (ADDLconst [int64(int32(c+d))] x) 1263 for { 1264 c := v.AuxInt 1265 v_0 := v.Args[0] 1266 if v_0.Op != OpAMD64ADDLconst { 1267 break 1268 } 1269 d := v_0.AuxInt 1270 x := v_0.Args[0] 1271 v.reset(OpAMD64ADDLconst) 1272 v.AuxInt = int64(int32(c + d)) 1273 v.AddArg(x) 1274 return true 1275 } 1276 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1277 // cond: is32Bit(c+d) 1278 // result: (LEAL [c+d] {s} x) 1279 for { 1280 c := v.AuxInt 1281 v_0 := v.Args[0] 1282 if v_0.Op != OpAMD64LEAL { 1283 break 1284 } 1285 d := v_0.AuxInt 1286 s := v_0.Aux 1287 x := v_0.Args[0] 1288 if !(is32Bit(c + d)) { 1289 break 1290 } 1291 v.reset(OpAMD64LEAL) 1292 v.AuxInt = c + d 1293 v.Aux = s 1294 v.AddArg(x) 1295 return true 1296 } 1297 return false 1298 } 1299 func rewriteValueAMD64_OpAMD64ADDLconstmem_0(v *Value) bool { 1300 b := v.Block 1301 _ = b 1302 typ := &b.Func.Config.Types 1303 _ = typ 1304 // match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) 1305 // cond: 1306 // result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x)) 1307 for { 1308 valOff := v.AuxInt 1309 sym := v.Aux 1310 _ = v.Args[1] 1311 ptr := v.Args[0] 1312 v_1 := v.Args[1] 1313 if v_1.Op != OpAMD64MOVSSstore { 1314 break 1315 } 1316 if v_1.AuxInt != ValAndOff(valOff).Off() { 1317 break 1318 } 1319 if v_1.Aux != sym { 1320 break 1321 } 1322 _ = v_1.Args[2] 1323 if ptr != v_1.Args[0] { 1324 break 1325 } 1326 x := v_1.Args[1] 1327 v.reset(OpAMD64ADDLconst) 1328 v.AuxInt = ValAndOff(valOff).Val() 1329 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 1330 v0.AddArg(x) 1331 v.AddArg(v0) 1332 return true 1333 } 1334 return false 1335 } 1336 func rewriteValueAMD64_OpAMD64ADDLmem_0(v *Value) bool { 1337 b := v.Block 1338 _ = b 1339 typ := &b.Func.Config.Types 1340 _ = typ 1341 // match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 1342 // cond: 1343 // result: (ADDL x (MOVLf2i y)) 1344 for { 1345 off := v.AuxInt 1346 sym := v.Aux 1347 _ = v.Args[2] 1348 x := v.Args[0] 1349 ptr := v.Args[1] 1350 v_2 := v.Args[2] 1351 if v_2.Op != OpAMD64MOVSSstore { 1352 break 1353 } 1354 if v_2.AuxInt != off { 1355 break 1356 } 1357 if v_2.Aux != sym { 1358 break 1359 } 1360 _ = v_2.Args[2] 1361 if ptr != v_2.Args[0] { 1362 break 1363 } 1364 y := v_2.Args[1] 1365 v.reset(OpAMD64ADDL) 1366 v.AddArg(x) 1367 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 1368 v0.AddArg(y) 1369 v.AddArg(v0) 1370 return true 1371 } 1372 return false 1373 } 1374 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 1375 // match: (ADDQ x (MOVQconst [c])) 1376 // cond: is32Bit(c) 1377 // result: (ADDQconst [c] x) 1378 for { 1379 _ = v.Args[1] 1380 x := v.Args[0] 1381 v_1 := v.Args[1] 1382 if v_1.Op != OpAMD64MOVQconst { 1383 break 1384 } 1385 c := v_1.AuxInt 1386 if !(is32Bit(c)) { 1387 break 1388 } 1389 v.reset(OpAMD64ADDQconst) 1390 v.AuxInt = c 1391 v.AddArg(x) 1392 return true 1393 } 1394 // match: (ADDQ (MOVQconst [c]) x) 1395 // cond: is32Bit(c) 1396 // result: (ADDQconst [c] x) 1397 for { 1398 _ = v.Args[1] 1399 v_0 := v.Args[0] 1400 if v_0.Op != OpAMD64MOVQconst { 1401 break 1402 } 1403 c := v_0.AuxInt 1404 x := v.Args[1] 1405 if !(is32Bit(c)) { 1406 break 1407 } 1408 v.reset(OpAMD64ADDQconst) 1409 v.AuxInt = c 1410 v.AddArg(x) 1411 return true 1412 } 1413 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 1414 // cond: d==64-c 1415 // result: (ROLQconst x [c]) 1416 for { 1417 _ = v.Args[1] 1418 v_0 := v.Args[0] 1419 if v_0.Op != OpAMD64SHLQconst { 1420 break 1421 } 1422 c := v_0.AuxInt 1423 x := v_0.Args[0] 1424 v_1 := v.Args[1] 1425 if v_1.Op != OpAMD64SHRQconst { 1426 break 1427 } 1428 d := v_1.AuxInt 1429 if x != v_1.Args[0] { 1430 break 1431 } 1432 if !(d == 64-c) { 1433 break 1434 } 1435 v.reset(OpAMD64ROLQconst) 1436 v.AuxInt = c 1437 v.AddArg(x) 1438 return true 1439 } 1440 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 1441 // cond: d==64-c 1442 // result: (ROLQconst x [c]) 1443 for { 1444 _ = v.Args[1] 1445 v_0 := v.Args[0] 1446 if v_0.Op != OpAMD64SHRQconst { 1447 break 1448 } 1449 d := v_0.AuxInt 1450 x := v_0.Args[0] 1451 v_1 := v.Args[1] 1452 if v_1.Op != OpAMD64SHLQconst { 1453 break 1454 } 1455 c := v_1.AuxInt 1456 if x != v_1.Args[0] { 1457 break 1458 } 1459 if !(d == 64-c) { 1460 break 1461 } 1462 v.reset(OpAMD64ROLQconst) 1463 v.AuxInt = c 1464 v.AddArg(x) 1465 return true 1466 } 1467 // match: (ADDQ x (SHLQconst [3] y)) 1468 // cond: 1469 // result: (LEAQ8 x y) 1470 for { 1471 _ = v.Args[1] 1472 x := v.Args[0] 1473 v_1 := v.Args[1] 1474 if v_1.Op != OpAMD64SHLQconst { 1475 break 1476 } 1477 if v_1.AuxInt != 3 { 1478 break 1479 } 1480 y := v_1.Args[0] 1481 v.reset(OpAMD64LEAQ8) 1482 v.AddArg(x) 1483 v.AddArg(y) 1484 return true 1485 } 1486 // match: (ADDQ (SHLQconst [3] y) x) 1487 // cond: 1488 // result: (LEAQ8 x y) 1489 for { 1490 _ = v.Args[1] 1491 v_0 := v.Args[0] 1492 if v_0.Op != OpAMD64SHLQconst { 1493 break 1494 } 1495 if v_0.AuxInt != 3 { 1496 break 1497 } 1498 y := v_0.Args[0] 1499 x := v.Args[1] 1500 v.reset(OpAMD64LEAQ8) 1501 v.AddArg(x) 1502 v.AddArg(y) 1503 return true 1504 } 1505 // match: (ADDQ x (SHLQconst [2] y)) 1506 // cond: 1507 // result: (LEAQ4 x y) 1508 for { 1509 _ = v.Args[1] 1510 x := v.Args[0] 1511 v_1 := v.Args[1] 1512 if v_1.Op != OpAMD64SHLQconst { 1513 break 1514 } 1515 if v_1.AuxInt != 2 { 1516 break 1517 } 1518 y := v_1.Args[0] 1519 v.reset(OpAMD64LEAQ4) 1520 v.AddArg(x) 1521 v.AddArg(y) 1522 return true 1523 } 1524 // match: (ADDQ (SHLQconst [2] y) x) 1525 // cond: 1526 // result: (LEAQ4 x y) 1527 for { 1528 _ = v.Args[1] 1529 v_0 := v.Args[0] 1530 if v_0.Op != OpAMD64SHLQconst { 1531 break 1532 } 1533 if v_0.AuxInt != 2 { 1534 break 1535 } 1536 y := v_0.Args[0] 1537 x := v.Args[1] 1538 v.reset(OpAMD64LEAQ4) 1539 v.AddArg(x) 1540 v.AddArg(y) 1541 return true 1542 } 1543 // match: (ADDQ x (SHLQconst [1] y)) 1544 // cond: 1545 // result: (LEAQ2 x y) 1546 for { 1547 _ = v.Args[1] 1548 x := v.Args[0] 1549 v_1 := v.Args[1] 1550 if v_1.Op != OpAMD64SHLQconst { 1551 break 1552 } 1553 if v_1.AuxInt != 1 { 1554 break 1555 } 1556 y := v_1.Args[0] 1557 v.reset(OpAMD64LEAQ2) 1558 v.AddArg(x) 1559 v.AddArg(y) 1560 return true 1561 } 1562 // match: (ADDQ (SHLQconst [1] y) x) 1563 // cond: 1564 // result: (LEAQ2 x y) 1565 for { 1566 _ = v.Args[1] 1567 v_0 := v.Args[0] 1568 if v_0.Op != OpAMD64SHLQconst { 1569 break 1570 } 1571 if v_0.AuxInt != 1 { 1572 break 1573 } 1574 y := v_0.Args[0] 1575 x := v.Args[1] 1576 v.reset(OpAMD64LEAQ2) 1577 v.AddArg(x) 1578 v.AddArg(y) 1579 return true 1580 } 1581 return false 1582 } 1583 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 1584 // match: (ADDQ x (ADDQ y y)) 1585 // cond: 1586 // result: (LEAQ2 x y) 1587 for { 1588 _ = v.Args[1] 1589 x := v.Args[0] 1590 v_1 := v.Args[1] 1591 if v_1.Op != OpAMD64ADDQ { 1592 break 1593 } 1594 _ = v_1.Args[1] 1595 y := v_1.Args[0] 1596 if y != v_1.Args[1] { 1597 break 1598 } 1599 v.reset(OpAMD64LEAQ2) 1600 v.AddArg(x) 1601 v.AddArg(y) 1602 return true 1603 } 1604 // match: (ADDQ (ADDQ y y) x) 1605 // cond: 1606 // result: (LEAQ2 x y) 1607 for { 1608 _ = v.Args[1] 1609 v_0 := v.Args[0] 1610 if v_0.Op != OpAMD64ADDQ { 1611 break 1612 } 1613 _ = v_0.Args[1] 1614 y := v_0.Args[0] 1615 if y != v_0.Args[1] { 1616 break 1617 } 1618 x := v.Args[1] 1619 v.reset(OpAMD64LEAQ2) 1620 v.AddArg(x) 1621 v.AddArg(y) 1622 return true 1623 } 1624 // match: (ADDQ x (ADDQ x y)) 1625 // cond: 1626 // result: (LEAQ2 y x) 1627 for { 1628 _ = v.Args[1] 1629 x := v.Args[0] 1630 v_1 := v.Args[1] 1631 if v_1.Op != OpAMD64ADDQ { 1632 break 1633 } 1634 _ = v_1.Args[1] 1635 if x != v_1.Args[0] { 1636 break 1637 } 1638 y := v_1.Args[1] 1639 v.reset(OpAMD64LEAQ2) 1640 v.AddArg(y) 1641 v.AddArg(x) 1642 return true 1643 } 1644 // match: (ADDQ x (ADDQ y x)) 1645 // cond: 1646 // result: (LEAQ2 y x) 1647 for { 1648 _ = v.Args[1] 1649 x := v.Args[0] 1650 v_1 := v.Args[1] 1651 if v_1.Op != OpAMD64ADDQ { 1652 break 1653 } 1654 _ = v_1.Args[1] 1655 y := v_1.Args[0] 1656 if x != v_1.Args[1] { 1657 break 1658 } 1659 v.reset(OpAMD64LEAQ2) 1660 v.AddArg(y) 1661 v.AddArg(x) 1662 return true 1663 } 1664 // match: (ADDQ (ADDQ x y) x) 1665 // cond: 1666 // result: (LEAQ2 y x) 1667 for { 1668 _ = v.Args[1] 1669 v_0 := v.Args[0] 1670 if v_0.Op != OpAMD64ADDQ { 1671 break 1672 } 1673 _ = v_0.Args[1] 1674 x := v_0.Args[0] 1675 y := v_0.Args[1] 1676 if x != v.Args[1] { 1677 break 1678 } 1679 v.reset(OpAMD64LEAQ2) 1680 v.AddArg(y) 1681 v.AddArg(x) 1682 return true 1683 } 1684 // match: (ADDQ (ADDQ y x) x) 1685 // cond: 1686 // result: (LEAQ2 y x) 1687 for { 1688 _ = v.Args[1] 1689 v_0 := v.Args[0] 1690 if v_0.Op != OpAMD64ADDQ { 1691 break 1692 } 1693 _ = v_0.Args[1] 1694 y := v_0.Args[0] 1695 x := v_0.Args[1] 1696 if x != v.Args[1] { 1697 break 1698 } 1699 v.reset(OpAMD64LEAQ2) 1700 v.AddArg(y) 1701 v.AddArg(x) 1702 return true 1703 } 1704 // match: (ADDQ (ADDQconst [c] x) y) 1705 // cond: 1706 // result: (LEAQ1 [c] x y) 1707 for { 1708 _ = v.Args[1] 1709 v_0 := v.Args[0] 1710 if v_0.Op != OpAMD64ADDQconst { 1711 break 1712 } 1713 c := v_0.AuxInt 1714 x := v_0.Args[0] 1715 y := v.Args[1] 1716 v.reset(OpAMD64LEAQ1) 1717 v.AuxInt = c 1718 v.AddArg(x) 1719 v.AddArg(y) 1720 return true 1721 } 1722 // match: (ADDQ y (ADDQconst [c] x)) 1723 // cond: 1724 // result: (LEAQ1 [c] x y) 1725 for { 1726 _ = v.Args[1] 1727 y := v.Args[0] 1728 v_1 := v.Args[1] 1729 if v_1.Op != OpAMD64ADDQconst { 1730 break 1731 } 1732 c := v_1.AuxInt 1733 x := v_1.Args[0] 1734 v.reset(OpAMD64LEAQ1) 1735 v.AuxInt = c 1736 v.AddArg(x) 1737 v.AddArg(y) 1738 return true 1739 } 1740 // match: (ADDQ x (LEAQ [c] {s} y)) 1741 // cond: x.Op != OpSB && y.Op != OpSB 1742 // result: (LEAQ1 [c] {s} x y) 1743 for { 1744 _ = v.Args[1] 1745 x := v.Args[0] 1746 v_1 := v.Args[1] 1747 if v_1.Op != OpAMD64LEAQ { 1748 break 1749 } 1750 c := v_1.AuxInt 1751 s := v_1.Aux 1752 y := v_1.Args[0] 1753 if !(x.Op != OpSB && y.Op != OpSB) { 1754 break 1755 } 1756 v.reset(OpAMD64LEAQ1) 1757 v.AuxInt = c 1758 v.Aux = s 1759 v.AddArg(x) 1760 v.AddArg(y) 1761 return true 1762 } 1763 // match: (ADDQ (LEAQ [c] {s} y) x) 1764 // cond: x.Op != OpSB && y.Op != OpSB 1765 // result: (LEAQ1 [c] {s} x y) 1766 for { 1767 _ = v.Args[1] 1768 v_0 := v.Args[0] 1769 if v_0.Op != OpAMD64LEAQ { 1770 break 1771 } 1772 c := v_0.AuxInt 1773 s := v_0.Aux 1774 y := v_0.Args[0] 1775 x := v.Args[1] 1776 if !(x.Op != OpSB && y.Op != OpSB) { 1777 break 1778 } 1779 v.reset(OpAMD64LEAQ1) 1780 v.AuxInt = c 1781 v.Aux = s 1782 v.AddArg(x) 1783 v.AddArg(y) 1784 return true 1785 } 1786 return false 1787 } 1788 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 1789 // match: (ADDQ x (NEGQ y)) 1790 // cond: 1791 // result: (SUBQ x y) 1792 for { 1793 _ = v.Args[1] 1794 x := v.Args[0] 1795 v_1 := v.Args[1] 1796 if v_1.Op != OpAMD64NEGQ { 1797 break 1798 } 1799 y := v_1.Args[0] 1800 v.reset(OpAMD64SUBQ) 1801 v.AddArg(x) 1802 v.AddArg(y) 1803 return true 1804 } 1805 // match: (ADDQ (NEGQ y) x) 1806 // cond: 1807 // result: (SUBQ x y) 1808 for { 1809 _ = v.Args[1] 1810 v_0 := v.Args[0] 1811 if v_0.Op != OpAMD64NEGQ { 1812 break 1813 } 1814 y := v_0.Args[0] 1815 x := v.Args[1] 1816 v.reset(OpAMD64SUBQ) 1817 v.AddArg(x) 1818 v.AddArg(y) 1819 return true 1820 } 1821 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1822 // cond: canMergeLoad(v, l, x) && clobber(l) 1823 // result: (ADDQmem x [off] {sym} ptr mem) 1824 for { 1825 _ = v.Args[1] 1826 x := v.Args[0] 1827 l := v.Args[1] 1828 if l.Op != OpAMD64MOVQload { 1829 break 1830 } 1831 off := l.AuxInt 1832 sym := l.Aux 1833 _ = l.Args[1] 1834 ptr := l.Args[0] 1835 mem := l.Args[1] 1836 if !(canMergeLoad(v, l, x) && clobber(l)) { 1837 break 1838 } 1839 v.reset(OpAMD64ADDQmem) 1840 v.AuxInt = off 1841 v.Aux = sym 1842 v.AddArg(x) 1843 v.AddArg(ptr) 1844 v.AddArg(mem) 1845 return true 1846 } 1847 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 1848 // cond: canMergeLoad(v, l, x) && clobber(l) 1849 // result: (ADDQmem x [off] {sym} ptr mem) 1850 for { 1851 _ = v.Args[1] 1852 l := v.Args[0] 1853 if l.Op != OpAMD64MOVQload { 1854 break 1855 } 1856 off := l.AuxInt 1857 sym := l.Aux 1858 _ = l.Args[1] 1859 ptr := l.Args[0] 1860 mem := l.Args[1] 1861 x := v.Args[1] 1862 if !(canMergeLoad(v, l, x) && clobber(l)) { 1863 break 1864 } 1865 v.reset(OpAMD64ADDQmem) 1866 v.AuxInt = off 1867 v.Aux = sym 1868 v.AddArg(x) 1869 v.AddArg(ptr) 1870 v.AddArg(mem) 1871 return true 1872 } 1873 return false 1874 } 1875 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 1876 // match: (ADDQconst [c] (ADDQ x y)) 1877 // cond: 1878 // result: (LEAQ1 [c] x y) 1879 for { 1880 c := v.AuxInt 1881 v_0 := v.Args[0] 1882 if v_0.Op != OpAMD64ADDQ { 1883 break 1884 } 1885 _ = v_0.Args[1] 1886 x := v_0.Args[0] 1887 y := v_0.Args[1] 1888 v.reset(OpAMD64LEAQ1) 1889 v.AuxInt = c 1890 v.AddArg(x) 1891 v.AddArg(y) 1892 return true 1893 } 1894 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1895 // cond: is32Bit(c+d) 1896 // result: (LEAQ [c+d] {s} x) 1897 for { 1898 c := v.AuxInt 1899 v_0 := v.Args[0] 1900 if v_0.Op != OpAMD64LEAQ { 1901 break 1902 } 1903 d := v_0.AuxInt 1904 s := v_0.Aux 1905 x := v_0.Args[0] 1906 if !(is32Bit(c + d)) { 1907 break 1908 } 1909 v.reset(OpAMD64LEAQ) 1910 v.AuxInt = c + d 1911 v.Aux = s 1912 v.AddArg(x) 1913 return true 1914 } 1915 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1916 // cond: is32Bit(c+d) 1917 // result: (LEAQ1 [c+d] {s} x y) 1918 for { 1919 c := v.AuxInt 1920 v_0 := v.Args[0] 1921 if v_0.Op != OpAMD64LEAQ1 { 1922 break 1923 } 1924 d := v_0.AuxInt 1925 s := v_0.Aux 1926 _ = v_0.Args[1] 1927 x := v_0.Args[0] 1928 y := v_0.Args[1] 1929 if !(is32Bit(c + d)) { 1930 break 1931 } 1932 v.reset(OpAMD64LEAQ1) 1933 v.AuxInt = c + d 1934 v.Aux = s 1935 v.AddArg(x) 1936 v.AddArg(y) 1937 return true 1938 } 1939 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1940 // cond: is32Bit(c+d) 1941 // result: (LEAQ2 [c+d] {s} x y) 1942 for { 1943 c := v.AuxInt 1944 v_0 := v.Args[0] 1945 if v_0.Op != OpAMD64LEAQ2 { 1946 break 1947 } 1948 d := v_0.AuxInt 1949 s := v_0.Aux 1950 _ = v_0.Args[1] 1951 x := v_0.Args[0] 1952 y := v_0.Args[1] 1953 if !(is32Bit(c + d)) { 1954 break 1955 } 1956 v.reset(OpAMD64LEAQ2) 1957 v.AuxInt = c + d 1958 v.Aux = s 1959 v.AddArg(x) 1960 v.AddArg(y) 1961 return true 1962 } 1963 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1964 // cond: is32Bit(c+d) 1965 // result: (LEAQ4 [c+d] {s} x y) 1966 for { 1967 c := v.AuxInt 1968 v_0 := v.Args[0] 1969 if v_0.Op != OpAMD64LEAQ4 { 1970 break 1971 } 1972 d := v_0.AuxInt 1973 s := v_0.Aux 1974 _ = v_0.Args[1] 1975 x := v_0.Args[0] 1976 y := v_0.Args[1] 1977 if !(is32Bit(c + d)) { 1978 break 1979 } 1980 v.reset(OpAMD64LEAQ4) 1981 v.AuxInt = c + d 1982 v.Aux = s 1983 v.AddArg(x) 1984 v.AddArg(y) 1985 return true 1986 } 1987 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1988 // cond: is32Bit(c+d) 1989 // result: (LEAQ8 [c+d] {s} x y) 1990 for { 1991 c := v.AuxInt 1992 v_0 := v.Args[0] 1993 if v_0.Op != OpAMD64LEAQ8 { 1994 break 1995 } 1996 d := v_0.AuxInt 1997 s := v_0.Aux 1998 _ = v_0.Args[1] 1999 x := v_0.Args[0] 2000 y := v_0.Args[1] 2001 if !(is32Bit(c + d)) { 2002 break 2003 } 2004 v.reset(OpAMD64LEAQ8) 2005 v.AuxInt = c + d 2006 v.Aux = s 2007 v.AddArg(x) 2008 v.AddArg(y) 2009 return true 2010 } 2011 // match: (ADDQconst [0] x) 2012 // cond: 2013 // result: x 2014 for { 2015 if v.AuxInt != 0 { 2016 break 2017 } 2018 x := v.Args[0] 2019 v.reset(OpCopy) 2020 v.Type = x.Type 2021 v.AddArg(x) 2022 return true 2023 } 2024 // match: (ADDQconst [c] (MOVQconst [d])) 2025 // cond: 2026 // result: (MOVQconst [c+d]) 2027 for { 2028 c := v.AuxInt 2029 v_0 := v.Args[0] 2030 if v_0.Op != OpAMD64MOVQconst { 2031 break 2032 } 2033 d := v_0.AuxInt 2034 v.reset(OpAMD64MOVQconst) 2035 v.AuxInt = c + d 2036 return true 2037 } 2038 // match: (ADDQconst [c] (ADDQconst [d] x)) 2039 // cond: is32Bit(c+d) 2040 // result: (ADDQconst [c+d] x) 2041 for { 2042 c := v.AuxInt 2043 v_0 := v.Args[0] 2044 if v_0.Op != OpAMD64ADDQconst { 2045 break 2046 } 2047 d := v_0.AuxInt 2048 x := v_0.Args[0] 2049 if !(is32Bit(c + d)) { 2050 break 2051 } 2052 v.reset(OpAMD64ADDQconst) 2053 v.AuxInt = c + d 2054 v.AddArg(x) 2055 return true 2056 } 2057 return false 2058 } 2059 func rewriteValueAMD64_OpAMD64ADDQconstmem_0(v *Value) bool { 2060 b := v.Block 2061 _ = b 2062 typ := &b.Func.Config.Types 2063 _ = typ 2064 // match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) 2065 // cond: 2066 // result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x)) 2067 for { 2068 valOff := v.AuxInt 2069 sym := v.Aux 2070 _ = v.Args[1] 2071 ptr := v.Args[0] 2072 v_1 := v.Args[1] 2073 if v_1.Op != OpAMD64MOVSDstore { 2074 break 2075 } 2076 if v_1.AuxInt != ValAndOff(valOff).Off() { 2077 break 2078 } 2079 if v_1.Aux != sym { 2080 break 2081 } 2082 _ = v_1.Args[2] 2083 if ptr != v_1.Args[0] { 2084 break 2085 } 2086 x := v_1.Args[1] 2087 v.reset(OpAMD64ADDQconst) 2088 v.AuxInt = ValAndOff(valOff).Val() 2089 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2090 v0.AddArg(x) 2091 v.AddArg(v0) 2092 return true 2093 } 2094 return false 2095 } 2096 func rewriteValueAMD64_OpAMD64ADDQmem_0(v *Value) bool { 2097 b := v.Block 2098 _ = b 2099 typ := &b.Func.Config.Types 2100 _ = typ 2101 // match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2102 // cond: 2103 // result: (ADDQ x (MOVQf2i y)) 2104 for { 2105 off := v.AuxInt 2106 sym := v.Aux 2107 _ = v.Args[2] 2108 x := v.Args[0] 2109 ptr := v.Args[1] 2110 v_2 := v.Args[2] 2111 if v_2.Op != OpAMD64MOVSDstore { 2112 break 2113 } 2114 if v_2.AuxInt != off { 2115 break 2116 } 2117 if v_2.Aux != sym { 2118 break 2119 } 2120 _ = v_2.Args[2] 2121 if ptr != v_2.Args[0] { 2122 break 2123 } 2124 y := v_2.Args[1] 2125 v.reset(OpAMD64ADDQ) 2126 v.AddArg(x) 2127 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2128 v0.AddArg(y) 2129 v.AddArg(v0) 2130 return true 2131 } 2132 return false 2133 } 2134 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 2135 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 2136 // cond: canMergeLoad(v, l, x) && clobber(l) 2137 // result: (ADDSDmem x [off] {sym} ptr mem) 2138 for { 2139 _ = v.Args[1] 2140 x := v.Args[0] 2141 l := v.Args[1] 2142 if l.Op != OpAMD64MOVSDload { 2143 break 2144 } 2145 off := l.AuxInt 2146 sym := l.Aux 2147 _ = l.Args[1] 2148 ptr := l.Args[0] 2149 mem := l.Args[1] 2150 if !(canMergeLoad(v, l, x) && clobber(l)) { 2151 break 2152 } 2153 v.reset(OpAMD64ADDSDmem) 2154 v.AuxInt = off 2155 v.Aux = sym 2156 v.AddArg(x) 2157 v.AddArg(ptr) 2158 v.AddArg(mem) 2159 return true 2160 } 2161 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 2162 // cond: canMergeLoad(v, l, x) && clobber(l) 2163 // result: (ADDSDmem x [off] {sym} ptr mem) 2164 for { 2165 _ = v.Args[1] 2166 l := v.Args[0] 2167 if l.Op != OpAMD64MOVSDload { 2168 break 2169 } 2170 off := l.AuxInt 2171 sym := l.Aux 2172 _ = l.Args[1] 2173 ptr := l.Args[0] 2174 mem := l.Args[1] 2175 x := v.Args[1] 2176 if !(canMergeLoad(v, l, x) && clobber(l)) { 2177 break 2178 } 2179 v.reset(OpAMD64ADDSDmem) 2180 v.AuxInt = off 2181 v.Aux = sym 2182 v.AddArg(x) 2183 v.AddArg(ptr) 2184 v.AddArg(mem) 2185 return true 2186 } 2187 return false 2188 } 2189 func rewriteValueAMD64_OpAMD64ADDSDmem_0(v *Value) bool { 2190 b := v.Block 2191 _ = b 2192 typ := &b.Func.Config.Types 2193 _ = typ 2194 // match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 2195 // cond: 2196 // result: (ADDSD x (MOVQi2f y)) 2197 for { 2198 off := v.AuxInt 2199 sym := v.Aux 2200 _ = v.Args[2] 2201 x := v.Args[0] 2202 ptr := v.Args[1] 2203 v_2 := v.Args[2] 2204 if v_2.Op != OpAMD64MOVQstore { 2205 break 2206 } 2207 if v_2.AuxInt != off { 2208 break 2209 } 2210 if v_2.Aux != sym { 2211 break 2212 } 2213 _ = v_2.Args[2] 2214 if ptr != v_2.Args[0] { 2215 break 2216 } 2217 y := v_2.Args[1] 2218 v.reset(OpAMD64ADDSD) 2219 v.AddArg(x) 2220 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 2221 v0.AddArg(y) 2222 v.AddArg(v0) 2223 return true 2224 } 2225 return false 2226 } 2227 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 2228 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 2229 // cond: canMergeLoad(v, l, x) && clobber(l) 2230 // result: (ADDSSmem x [off] {sym} ptr mem) 2231 for { 2232 _ = v.Args[1] 2233 x := v.Args[0] 2234 l := v.Args[1] 2235 if l.Op != OpAMD64MOVSSload { 2236 break 2237 } 2238 off := l.AuxInt 2239 sym := l.Aux 2240 _ = l.Args[1] 2241 ptr := l.Args[0] 2242 mem := l.Args[1] 2243 if !(canMergeLoad(v, l, x) && clobber(l)) { 2244 break 2245 } 2246 v.reset(OpAMD64ADDSSmem) 2247 v.AuxInt = off 2248 v.Aux = sym 2249 v.AddArg(x) 2250 v.AddArg(ptr) 2251 v.AddArg(mem) 2252 return true 2253 } 2254 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 2255 // cond: canMergeLoad(v, l, x) && clobber(l) 2256 // result: (ADDSSmem x [off] {sym} ptr mem) 2257 for { 2258 _ = v.Args[1] 2259 l := v.Args[0] 2260 if l.Op != OpAMD64MOVSSload { 2261 break 2262 } 2263 off := l.AuxInt 2264 sym := l.Aux 2265 _ = l.Args[1] 2266 ptr := l.Args[0] 2267 mem := l.Args[1] 2268 x := v.Args[1] 2269 if !(canMergeLoad(v, l, x) && clobber(l)) { 2270 break 2271 } 2272 v.reset(OpAMD64ADDSSmem) 2273 v.AuxInt = off 2274 v.Aux = sym 2275 v.AddArg(x) 2276 v.AddArg(ptr) 2277 v.AddArg(mem) 2278 return true 2279 } 2280 return false 2281 } 2282 func rewriteValueAMD64_OpAMD64ADDSSmem_0(v *Value) bool { 2283 b := v.Block 2284 _ = b 2285 typ := &b.Func.Config.Types 2286 _ = typ 2287 // match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 2288 // cond: 2289 // result: (ADDSS x (MOVLi2f y)) 2290 for { 2291 off := v.AuxInt 2292 sym := v.Aux 2293 _ = v.Args[2] 2294 x := v.Args[0] 2295 ptr := v.Args[1] 2296 v_2 := v.Args[2] 2297 if v_2.Op != OpAMD64MOVLstore { 2298 break 2299 } 2300 if v_2.AuxInt != off { 2301 break 2302 } 2303 if v_2.Aux != sym { 2304 break 2305 } 2306 _ = v_2.Args[2] 2307 if ptr != v_2.Args[0] { 2308 break 2309 } 2310 y := v_2.Args[1] 2311 v.reset(OpAMD64ADDSS) 2312 v.AddArg(x) 2313 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 2314 v0.AddArg(y) 2315 v.AddArg(v0) 2316 return true 2317 } 2318 return false 2319 } 2320 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 2321 // match: (ANDL x (MOVLconst [c])) 2322 // cond: 2323 // result: (ANDLconst [c] x) 2324 for { 2325 _ = v.Args[1] 2326 x := v.Args[0] 2327 v_1 := v.Args[1] 2328 if v_1.Op != OpAMD64MOVLconst { 2329 break 2330 } 2331 c := v_1.AuxInt 2332 v.reset(OpAMD64ANDLconst) 2333 v.AuxInt = c 2334 v.AddArg(x) 2335 return true 2336 } 2337 // match: (ANDL (MOVLconst [c]) x) 2338 // cond: 2339 // result: (ANDLconst [c] x) 2340 for { 2341 _ = v.Args[1] 2342 v_0 := v.Args[0] 2343 if v_0.Op != OpAMD64MOVLconst { 2344 break 2345 } 2346 c := v_0.AuxInt 2347 x := v.Args[1] 2348 v.reset(OpAMD64ANDLconst) 2349 v.AuxInt = c 2350 v.AddArg(x) 2351 return true 2352 } 2353 // match: (ANDL x x) 2354 // cond: 2355 // result: x 2356 for { 2357 _ = v.Args[1] 2358 x := v.Args[0] 2359 if x != v.Args[1] { 2360 break 2361 } 2362 v.reset(OpCopy) 2363 v.Type = x.Type 2364 v.AddArg(x) 2365 return true 2366 } 2367 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 2368 // cond: canMergeLoad(v, l, x) && clobber(l) 2369 // result: (ANDLmem x [off] {sym} ptr mem) 2370 for { 2371 _ = v.Args[1] 2372 x := v.Args[0] 2373 l := v.Args[1] 2374 if l.Op != OpAMD64MOVLload { 2375 break 2376 } 2377 off := l.AuxInt 2378 sym := l.Aux 2379 _ = l.Args[1] 2380 ptr := l.Args[0] 2381 mem := l.Args[1] 2382 if !(canMergeLoad(v, l, x) && clobber(l)) { 2383 break 2384 } 2385 v.reset(OpAMD64ANDLmem) 2386 v.AuxInt = off 2387 v.Aux = sym 2388 v.AddArg(x) 2389 v.AddArg(ptr) 2390 v.AddArg(mem) 2391 return true 2392 } 2393 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 2394 // cond: canMergeLoad(v, l, x) && clobber(l) 2395 // result: (ANDLmem x [off] {sym} ptr mem) 2396 for { 2397 _ = v.Args[1] 2398 l := v.Args[0] 2399 if l.Op != OpAMD64MOVLload { 2400 break 2401 } 2402 off := l.AuxInt 2403 sym := l.Aux 2404 _ = l.Args[1] 2405 ptr := l.Args[0] 2406 mem := l.Args[1] 2407 x := v.Args[1] 2408 if !(canMergeLoad(v, l, x) && clobber(l)) { 2409 break 2410 } 2411 v.reset(OpAMD64ANDLmem) 2412 v.AuxInt = off 2413 v.Aux = sym 2414 v.AddArg(x) 2415 v.AddArg(ptr) 2416 v.AddArg(mem) 2417 return true 2418 } 2419 return false 2420 } 2421 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 2422 // match: (ANDLconst [c] (ANDLconst [d] x)) 2423 // cond: 2424 // result: (ANDLconst [c & d] x) 2425 for { 2426 c := v.AuxInt 2427 v_0 := v.Args[0] 2428 if v_0.Op != OpAMD64ANDLconst { 2429 break 2430 } 2431 d := v_0.AuxInt 2432 x := v_0.Args[0] 2433 v.reset(OpAMD64ANDLconst) 2434 v.AuxInt = c & d 2435 v.AddArg(x) 2436 return true 2437 } 2438 // match: (ANDLconst [0xFF] x) 2439 // cond: 2440 // result: (MOVBQZX x) 2441 for { 2442 if v.AuxInt != 0xFF { 2443 break 2444 } 2445 x := v.Args[0] 2446 v.reset(OpAMD64MOVBQZX) 2447 v.AddArg(x) 2448 return true 2449 } 2450 // match: (ANDLconst [0xFFFF] x) 2451 // cond: 2452 // result: (MOVWQZX x) 2453 for { 2454 if v.AuxInt != 0xFFFF { 2455 break 2456 } 2457 x := v.Args[0] 2458 v.reset(OpAMD64MOVWQZX) 2459 v.AddArg(x) 2460 return true 2461 } 2462 // match: (ANDLconst [c] _) 2463 // cond: int32(c)==0 2464 // result: (MOVLconst [0]) 2465 for { 2466 c := v.AuxInt 2467 if !(int32(c) == 0) { 2468 break 2469 } 2470 v.reset(OpAMD64MOVLconst) 2471 v.AuxInt = 0 2472 return true 2473 } 2474 // match: (ANDLconst [c] x) 2475 // cond: int32(c)==-1 2476 // result: x 2477 for { 2478 c := v.AuxInt 2479 x := v.Args[0] 2480 if !(int32(c) == -1) { 2481 break 2482 } 2483 v.reset(OpCopy) 2484 v.Type = x.Type 2485 v.AddArg(x) 2486 return true 2487 } 2488 // match: (ANDLconst [c] (MOVLconst [d])) 2489 // cond: 2490 // result: (MOVLconst [c&d]) 2491 for { 2492 c := v.AuxInt 2493 v_0 := v.Args[0] 2494 if v_0.Op != OpAMD64MOVLconst { 2495 break 2496 } 2497 d := v_0.AuxInt 2498 v.reset(OpAMD64MOVLconst) 2499 v.AuxInt = c & d 2500 return true 2501 } 2502 return false 2503 } 2504 func rewriteValueAMD64_OpAMD64ANDLmem_0(v *Value) bool { 2505 b := v.Block 2506 _ = b 2507 typ := &b.Func.Config.Types 2508 _ = typ 2509 // match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 2510 // cond: 2511 // result: (ANDL x (MOVLf2i y)) 2512 for { 2513 off := v.AuxInt 2514 sym := v.Aux 2515 _ = v.Args[2] 2516 x := v.Args[0] 2517 ptr := v.Args[1] 2518 v_2 := v.Args[2] 2519 if v_2.Op != OpAMD64MOVSSstore { 2520 break 2521 } 2522 if v_2.AuxInt != off { 2523 break 2524 } 2525 if v_2.Aux != sym { 2526 break 2527 } 2528 _ = v_2.Args[2] 2529 if ptr != v_2.Args[0] { 2530 break 2531 } 2532 y := v_2.Args[1] 2533 v.reset(OpAMD64ANDL) 2534 v.AddArg(x) 2535 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 2536 v0.AddArg(y) 2537 v.AddArg(v0) 2538 return true 2539 } 2540 return false 2541 } 2542 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 2543 // match: (ANDQ x (MOVQconst [c])) 2544 // cond: is32Bit(c) 2545 // result: (ANDQconst [c] x) 2546 for { 2547 _ = v.Args[1] 2548 x := v.Args[0] 2549 v_1 := v.Args[1] 2550 if v_1.Op != OpAMD64MOVQconst { 2551 break 2552 } 2553 c := v_1.AuxInt 2554 if !(is32Bit(c)) { 2555 break 2556 } 2557 v.reset(OpAMD64ANDQconst) 2558 v.AuxInt = c 2559 v.AddArg(x) 2560 return true 2561 } 2562 // match: (ANDQ (MOVQconst [c]) x) 2563 // cond: is32Bit(c) 2564 // result: (ANDQconst [c] x) 2565 for { 2566 _ = v.Args[1] 2567 v_0 := v.Args[0] 2568 if v_0.Op != OpAMD64MOVQconst { 2569 break 2570 } 2571 c := v_0.AuxInt 2572 x := v.Args[1] 2573 if !(is32Bit(c)) { 2574 break 2575 } 2576 v.reset(OpAMD64ANDQconst) 2577 v.AuxInt = c 2578 v.AddArg(x) 2579 return true 2580 } 2581 // match: (ANDQ x x) 2582 // cond: 2583 // result: x 2584 for { 2585 _ = v.Args[1] 2586 x := v.Args[0] 2587 if x != v.Args[1] { 2588 break 2589 } 2590 v.reset(OpCopy) 2591 v.Type = x.Type 2592 v.AddArg(x) 2593 return true 2594 } 2595 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 2596 // cond: canMergeLoad(v, l, x) && clobber(l) 2597 // result: (ANDQmem x [off] {sym} ptr mem) 2598 for { 2599 _ = v.Args[1] 2600 x := v.Args[0] 2601 l := v.Args[1] 2602 if l.Op != OpAMD64MOVQload { 2603 break 2604 } 2605 off := l.AuxInt 2606 sym := l.Aux 2607 _ = l.Args[1] 2608 ptr := l.Args[0] 2609 mem := l.Args[1] 2610 if !(canMergeLoad(v, l, x) && clobber(l)) { 2611 break 2612 } 2613 v.reset(OpAMD64ANDQmem) 2614 v.AuxInt = off 2615 v.Aux = sym 2616 v.AddArg(x) 2617 v.AddArg(ptr) 2618 v.AddArg(mem) 2619 return true 2620 } 2621 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 2622 // cond: canMergeLoad(v, l, x) && clobber(l) 2623 // result: (ANDQmem x [off] {sym} ptr mem) 2624 for { 2625 _ = v.Args[1] 2626 l := v.Args[0] 2627 if l.Op != OpAMD64MOVQload { 2628 break 2629 } 2630 off := l.AuxInt 2631 sym := l.Aux 2632 _ = l.Args[1] 2633 ptr := l.Args[0] 2634 mem := l.Args[1] 2635 x := v.Args[1] 2636 if !(canMergeLoad(v, l, x) && clobber(l)) { 2637 break 2638 } 2639 v.reset(OpAMD64ANDQmem) 2640 v.AuxInt = off 2641 v.Aux = sym 2642 v.AddArg(x) 2643 v.AddArg(ptr) 2644 v.AddArg(mem) 2645 return true 2646 } 2647 return false 2648 } 2649 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 2650 // match: (ANDQconst [c] (ANDQconst [d] x)) 2651 // cond: 2652 // result: (ANDQconst [c & d] x) 2653 for { 2654 c := v.AuxInt 2655 v_0 := v.Args[0] 2656 if v_0.Op != OpAMD64ANDQconst { 2657 break 2658 } 2659 d := v_0.AuxInt 2660 x := v_0.Args[0] 2661 v.reset(OpAMD64ANDQconst) 2662 v.AuxInt = c & d 2663 v.AddArg(x) 2664 return true 2665 } 2666 // match: (ANDQconst [0xFF] x) 2667 // cond: 2668 // result: (MOVBQZX x) 2669 for { 2670 if v.AuxInt != 0xFF { 2671 break 2672 } 2673 x := v.Args[0] 2674 v.reset(OpAMD64MOVBQZX) 2675 v.AddArg(x) 2676 return true 2677 } 2678 // match: (ANDQconst [0xFFFF] x) 2679 // cond: 2680 // result: (MOVWQZX x) 2681 for { 2682 if v.AuxInt != 0xFFFF { 2683 break 2684 } 2685 x := v.Args[0] 2686 v.reset(OpAMD64MOVWQZX) 2687 v.AddArg(x) 2688 return true 2689 } 2690 // match: (ANDQconst [0xFFFFFFFF] x) 2691 // cond: 2692 // result: (MOVLQZX x) 2693 for { 2694 if v.AuxInt != 0xFFFFFFFF { 2695 break 2696 } 2697 x := v.Args[0] 2698 v.reset(OpAMD64MOVLQZX) 2699 v.AddArg(x) 2700 return true 2701 } 2702 // match: (ANDQconst [0] _) 2703 // cond: 2704 // result: (MOVQconst [0]) 2705 for { 2706 if v.AuxInt != 0 { 2707 break 2708 } 2709 v.reset(OpAMD64MOVQconst) 2710 v.AuxInt = 0 2711 return true 2712 } 2713 // match: (ANDQconst [-1] x) 2714 // cond: 2715 // result: x 2716 for { 2717 if v.AuxInt != -1 { 2718 break 2719 } 2720 x := v.Args[0] 2721 v.reset(OpCopy) 2722 v.Type = x.Type 2723 v.AddArg(x) 2724 return true 2725 } 2726 // match: (ANDQconst [c] (MOVQconst [d])) 2727 // cond: 2728 // result: (MOVQconst [c&d]) 2729 for { 2730 c := v.AuxInt 2731 v_0 := v.Args[0] 2732 if v_0.Op != OpAMD64MOVQconst { 2733 break 2734 } 2735 d := v_0.AuxInt 2736 v.reset(OpAMD64MOVQconst) 2737 v.AuxInt = c & d 2738 return true 2739 } 2740 return false 2741 } 2742 func rewriteValueAMD64_OpAMD64ANDQmem_0(v *Value) bool { 2743 b := v.Block 2744 _ = b 2745 typ := &b.Func.Config.Types 2746 _ = typ 2747 // match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2748 // cond: 2749 // result: (ANDQ x (MOVQf2i y)) 2750 for { 2751 off := v.AuxInt 2752 sym := v.Aux 2753 _ = v.Args[2] 2754 x := v.Args[0] 2755 ptr := v.Args[1] 2756 v_2 := v.Args[2] 2757 if v_2.Op != OpAMD64MOVSDstore { 2758 break 2759 } 2760 if v_2.AuxInt != off { 2761 break 2762 } 2763 if v_2.Aux != sym { 2764 break 2765 } 2766 _ = v_2.Args[2] 2767 if ptr != v_2.Args[0] { 2768 break 2769 } 2770 y := v_2.Args[1] 2771 v.reset(OpAMD64ANDQ) 2772 v.AddArg(x) 2773 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2774 v0.AddArg(y) 2775 v.AddArg(v0) 2776 return true 2777 } 2778 return false 2779 } 2780 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 2781 b := v.Block 2782 _ = b 2783 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 2784 // cond: 2785 // result: (BSFQ (ORQconst <t> [1<<8] x)) 2786 for { 2787 v_0 := v.Args[0] 2788 if v_0.Op != OpAMD64ORQconst { 2789 break 2790 } 2791 t := v_0.Type 2792 if v_0.AuxInt != 1<<8 { 2793 break 2794 } 2795 v_0_0 := v_0.Args[0] 2796 if v_0_0.Op != OpAMD64MOVBQZX { 2797 break 2798 } 2799 x := v_0_0.Args[0] 2800 v.reset(OpAMD64BSFQ) 2801 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2802 v0.AuxInt = 1 << 8 2803 v0.AddArg(x) 2804 v.AddArg(v0) 2805 return true 2806 } 2807 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 2808 // cond: 2809 // result: (BSFQ (ORQconst <t> [1<<16] x)) 2810 for { 2811 v_0 := v.Args[0] 2812 if v_0.Op != OpAMD64ORQconst { 2813 break 2814 } 2815 t := v_0.Type 2816 if v_0.AuxInt != 1<<16 { 2817 break 2818 } 2819 v_0_0 := v_0.Args[0] 2820 if v_0_0.Op != OpAMD64MOVWQZX { 2821 break 2822 } 2823 x := v_0_0.Args[0] 2824 v.reset(OpAMD64BSFQ) 2825 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2826 v0.AuxInt = 1 << 16 2827 v0.AddArg(x) 2828 v.AddArg(v0) 2829 return true 2830 } 2831 return false 2832 } 2833 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 2834 // match: (BTQconst [c] x) 2835 // cond: c < 32 2836 // result: (BTLconst [c] x) 2837 for { 2838 c := v.AuxInt 2839 x := v.Args[0] 2840 if !(c < 32) { 2841 break 2842 } 2843 v.reset(OpAMD64BTLconst) 2844 v.AuxInt = c 2845 v.AddArg(x) 2846 return true 2847 } 2848 return false 2849 } 2850 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 2851 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 2852 // cond: c != 0 2853 // result: x 2854 for { 2855 _ = v.Args[2] 2856 x := v.Args[0] 2857 v_2 := v.Args[2] 2858 if v_2.Op != OpSelect1 { 2859 break 2860 } 2861 v_2_0 := v_2.Args[0] 2862 if v_2_0.Op != OpAMD64BSFQ { 2863 break 2864 } 2865 v_2_0_0 := v_2_0.Args[0] 2866 if v_2_0_0.Op != OpAMD64ORQconst { 2867 break 2868 } 2869 c := v_2_0_0.AuxInt 2870 if !(c != 0) { 2871 break 2872 } 2873 v.reset(OpCopy) 2874 v.Type = x.Type 2875 v.AddArg(x) 2876 return true 2877 } 2878 return false 2879 } 2880 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 2881 b := v.Block 2882 _ = b 2883 // match: (CMPB x (MOVLconst [c])) 2884 // cond: 2885 // result: (CMPBconst x [int64(int8(c))]) 2886 for { 2887 _ = v.Args[1] 2888 x := v.Args[0] 2889 v_1 := v.Args[1] 2890 if v_1.Op != OpAMD64MOVLconst { 2891 break 2892 } 2893 c := v_1.AuxInt 2894 v.reset(OpAMD64CMPBconst) 2895 v.AuxInt = int64(int8(c)) 2896 v.AddArg(x) 2897 return true 2898 } 2899 // match: (CMPB (MOVLconst [c]) x) 2900 // cond: 2901 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 2902 for { 2903 _ = v.Args[1] 2904 v_0 := v.Args[0] 2905 if v_0.Op != OpAMD64MOVLconst { 2906 break 2907 } 2908 c := v_0.AuxInt 2909 x := v.Args[1] 2910 v.reset(OpAMD64InvertFlags) 2911 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 2912 v0.AuxInt = int64(int8(c)) 2913 v0.AddArg(x) 2914 v.AddArg(v0) 2915 return true 2916 } 2917 return false 2918 } 2919 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 2920 // match: (CMPBconst (MOVLconst [x]) [y]) 2921 // cond: int8(x)==int8(y) 2922 // result: (FlagEQ) 2923 for { 2924 y := v.AuxInt 2925 v_0 := v.Args[0] 2926 if v_0.Op != OpAMD64MOVLconst { 2927 break 2928 } 2929 x := v_0.AuxInt 2930 if !(int8(x) == int8(y)) { 2931 break 2932 } 2933 v.reset(OpAMD64FlagEQ) 2934 return true 2935 } 2936 // match: (CMPBconst (MOVLconst [x]) [y]) 2937 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 2938 // result: (FlagLT_ULT) 2939 for { 2940 y := v.AuxInt 2941 v_0 := v.Args[0] 2942 if v_0.Op != OpAMD64MOVLconst { 2943 break 2944 } 2945 x := v_0.AuxInt 2946 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 2947 break 2948 } 2949 v.reset(OpAMD64FlagLT_ULT) 2950 return true 2951 } 2952 // match: (CMPBconst (MOVLconst [x]) [y]) 2953 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 2954 // result: (FlagLT_UGT) 2955 for { 2956 y := v.AuxInt 2957 v_0 := v.Args[0] 2958 if v_0.Op != OpAMD64MOVLconst { 2959 break 2960 } 2961 x := v_0.AuxInt 2962 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 2963 break 2964 } 2965 v.reset(OpAMD64FlagLT_UGT) 2966 return true 2967 } 2968 // match: (CMPBconst (MOVLconst [x]) [y]) 2969 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 2970 // result: (FlagGT_ULT) 2971 for { 2972 y := v.AuxInt 2973 v_0 := v.Args[0] 2974 if v_0.Op != OpAMD64MOVLconst { 2975 break 2976 } 2977 x := v_0.AuxInt 2978 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 2979 break 2980 } 2981 v.reset(OpAMD64FlagGT_ULT) 2982 return true 2983 } 2984 // match: (CMPBconst (MOVLconst [x]) [y]) 2985 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 2986 // result: (FlagGT_UGT) 2987 for { 2988 y := v.AuxInt 2989 v_0 := v.Args[0] 2990 if v_0.Op != OpAMD64MOVLconst { 2991 break 2992 } 2993 x := v_0.AuxInt 2994 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 2995 break 2996 } 2997 v.reset(OpAMD64FlagGT_UGT) 2998 return true 2999 } 3000 // match: (CMPBconst (ANDLconst _ [m]) [n]) 3001 // cond: 0 <= int8(m) && int8(m) < int8(n) 3002 // result: (FlagLT_ULT) 3003 for { 3004 n := v.AuxInt 3005 v_0 := v.Args[0] 3006 if v_0.Op != OpAMD64ANDLconst { 3007 break 3008 } 3009 m := v_0.AuxInt 3010 if !(0 <= int8(m) && int8(m) < int8(n)) { 3011 break 3012 } 3013 v.reset(OpAMD64FlagLT_ULT) 3014 return true 3015 } 3016 // match: (CMPBconst (ANDL x y) [0]) 3017 // cond: 3018 // result: (TESTB x y) 3019 for { 3020 if v.AuxInt != 0 { 3021 break 3022 } 3023 v_0 := v.Args[0] 3024 if v_0.Op != OpAMD64ANDL { 3025 break 3026 } 3027 _ = v_0.Args[1] 3028 x := v_0.Args[0] 3029 y := v_0.Args[1] 3030 v.reset(OpAMD64TESTB) 3031 v.AddArg(x) 3032 v.AddArg(y) 3033 return true 3034 } 3035 // match: (CMPBconst (ANDLconst [c] x) [0]) 3036 // cond: 3037 // result: (TESTBconst [int64(int8(c))] x) 3038 for { 3039 if v.AuxInt != 0 { 3040 break 3041 } 3042 v_0 := v.Args[0] 3043 if v_0.Op != OpAMD64ANDLconst { 3044 break 3045 } 3046 c := v_0.AuxInt 3047 x := v_0.Args[0] 3048 v.reset(OpAMD64TESTBconst) 3049 v.AuxInt = int64(int8(c)) 3050 v.AddArg(x) 3051 return true 3052 } 3053 // match: (CMPBconst x [0]) 3054 // cond: 3055 // result: (TESTB x x) 3056 for { 3057 if v.AuxInt != 0 { 3058 break 3059 } 3060 x := v.Args[0] 3061 v.reset(OpAMD64TESTB) 3062 v.AddArg(x) 3063 v.AddArg(x) 3064 return true 3065 } 3066 return false 3067 } 3068 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 3069 b := v.Block 3070 _ = b 3071 // match: (CMPL x (MOVLconst [c])) 3072 // cond: 3073 // result: (CMPLconst x [c]) 3074 for { 3075 _ = v.Args[1] 3076 x := v.Args[0] 3077 v_1 := v.Args[1] 3078 if v_1.Op != OpAMD64MOVLconst { 3079 break 3080 } 3081 c := v_1.AuxInt 3082 v.reset(OpAMD64CMPLconst) 3083 v.AuxInt = c 3084 v.AddArg(x) 3085 return true 3086 } 3087 // match: (CMPL (MOVLconst [c]) x) 3088 // cond: 3089 // result: (InvertFlags (CMPLconst x [c])) 3090 for { 3091 _ = v.Args[1] 3092 v_0 := v.Args[0] 3093 if v_0.Op != OpAMD64MOVLconst { 3094 break 3095 } 3096 c := v_0.AuxInt 3097 x := v.Args[1] 3098 v.reset(OpAMD64InvertFlags) 3099 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 3100 v0.AuxInt = c 3101 v0.AddArg(x) 3102 v.AddArg(v0) 3103 return true 3104 } 3105 return false 3106 } 3107 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 3108 // match: (CMPLconst (MOVLconst [x]) [y]) 3109 // cond: int32(x)==int32(y) 3110 // result: (FlagEQ) 3111 for { 3112 y := v.AuxInt 3113 v_0 := v.Args[0] 3114 if v_0.Op != OpAMD64MOVLconst { 3115 break 3116 } 3117 x := v_0.AuxInt 3118 if !(int32(x) == int32(y)) { 3119 break 3120 } 3121 v.reset(OpAMD64FlagEQ) 3122 return true 3123 } 3124 // match: (CMPLconst (MOVLconst [x]) [y]) 3125 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 3126 // result: (FlagLT_ULT) 3127 for { 3128 y := v.AuxInt 3129 v_0 := v.Args[0] 3130 if v_0.Op != OpAMD64MOVLconst { 3131 break 3132 } 3133 x := v_0.AuxInt 3134 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 3135 break 3136 } 3137 v.reset(OpAMD64FlagLT_ULT) 3138 return true 3139 } 3140 // match: (CMPLconst (MOVLconst [x]) [y]) 3141 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 3142 // result: (FlagLT_UGT) 3143 for { 3144 y := v.AuxInt 3145 v_0 := v.Args[0] 3146 if v_0.Op != OpAMD64MOVLconst { 3147 break 3148 } 3149 x := v_0.AuxInt 3150 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 3151 break 3152 } 3153 v.reset(OpAMD64FlagLT_UGT) 3154 return true 3155 } 3156 // match: (CMPLconst (MOVLconst [x]) [y]) 3157 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 3158 // result: (FlagGT_ULT) 3159 for { 3160 y := v.AuxInt 3161 v_0 := v.Args[0] 3162 if v_0.Op != OpAMD64MOVLconst { 3163 break 3164 } 3165 x := v_0.AuxInt 3166 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 3167 break 3168 } 3169 v.reset(OpAMD64FlagGT_ULT) 3170 return true 3171 } 3172 // match: (CMPLconst (MOVLconst [x]) [y]) 3173 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 3174 // result: (FlagGT_UGT) 3175 for { 3176 y := v.AuxInt 3177 v_0 := v.Args[0] 3178 if v_0.Op != OpAMD64MOVLconst { 3179 break 3180 } 3181 x := v_0.AuxInt 3182 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 3183 break 3184 } 3185 v.reset(OpAMD64FlagGT_UGT) 3186 return true 3187 } 3188 // match: (CMPLconst (SHRLconst _ [c]) [n]) 3189 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 3190 // result: (FlagLT_ULT) 3191 for { 3192 n := v.AuxInt 3193 v_0 := v.Args[0] 3194 if v_0.Op != OpAMD64SHRLconst { 3195 break 3196 } 3197 c := v_0.AuxInt 3198 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 3199 break 3200 } 3201 v.reset(OpAMD64FlagLT_ULT) 3202 return true 3203 } 3204 // match: (CMPLconst (ANDLconst _ [m]) [n]) 3205 // cond: 0 <= int32(m) && int32(m) < int32(n) 3206 // result: (FlagLT_ULT) 3207 for { 3208 n := v.AuxInt 3209 v_0 := v.Args[0] 3210 if v_0.Op != OpAMD64ANDLconst { 3211 break 3212 } 3213 m := v_0.AuxInt 3214 if !(0 <= int32(m) && int32(m) < int32(n)) { 3215 break 3216 } 3217 v.reset(OpAMD64FlagLT_ULT) 3218 return true 3219 } 3220 // match: (CMPLconst (ANDL x y) [0]) 3221 // cond: 3222 // result: (TESTL x y) 3223 for { 3224 if v.AuxInt != 0 { 3225 break 3226 } 3227 v_0 := v.Args[0] 3228 if v_0.Op != OpAMD64ANDL { 3229 break 3230 } 3231 _ = v_0.Args[1] 3232 x := v_0.Args[0] 3233 y := v_0.Args[1] 3234 v.reset(OpAMD64TESTL) 3235 v.AddArg(x) 3236 v.AddArg(y) 3237 return true 3238 } 3239 // match: (CMPLconst (ANDLconst [c] x) [0]) 3240 // cond: 3241 // result: (TESTLconst [c] x) 3242 for { 3243 if v.AuxInt != 0 { 3244 break 3245 } 3246 v_0 := v.Args[0] 3247 if v_0.Op != OpAMD64ANDLconst { 3248 break 3249 } 3250 c := v_0.AuxInt 3251 x := v_0.Args[0] 3252 v.reset(OpAMD64TESTLconst) 3253 v.AuxInt = c 3254 v.AddArg(x) 3255 return true 3256 } 3257 // match: (CMPLconst x [0]) 3258 // cond: 3259 // result: (TESTL x x) 3260 for { 3261 if v.AuxInt != 0 { 3262 break 3263 } 3264 x := v.Args[0] 3265 v.reset(OpAMD64TESTL) 3266 v.AddArg(x) 3267 v.AddArg(x) 3268 return true 3269 } 3270 return false 3271 } 3272 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 3273 b := v.Block 3274 _ = b 3275 // match: (CMPQ x (MOVQconst [c])) 3276 // cond: is32Bit(c) 3277 // result: (CMPQconst x [c]) 3278 for { 3279 _ = v.Args[1] 3280 x := v.Args[0] 3281 v_1 := v.Args[1] 3282 if v_1.Op != OpAMD64MOVQconst { 3283 break 3284 } 3285 c := v_1.AuxInt 3286 if !(is32Bit(c)) { 3287 break 3288 } 3289 v.reset(OpAMD64CMPQconst) 3290 v.AuxInt = c 3291 v.AddArg(x) 3292 return true 3293 } 3294 // match: (CMPQ (MOVQconst [c]) x) 3295 // cond: is32Bit(c) 3296 // result: (InvertFlags (CMPQconst x [c])) 3297 for { 3298 _ = v.Args[1] 3299 v_0 := v.Args[0] 3300 if v_0.Op != OpAMD64MOVQconst { 3301 break 3302 } 3303 c := v_0.AuxInt 3304 x := v.Args[1] 3305 if !(is32Bit(c)) { 3306 break 3307 } 3308 v.reset(OpAMD64InvertFlags) 3309 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 3310 v0.AuxInt = c 3311 v0.AddArg(x) 3312 v.AddArg(v0) 3313 return true 3314 } 3315 return false 3316 } 3317 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 3318 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 3319 // cond: 3320 // result: (FlagLT_ULT) 3321 for { 3322 if v.AuxInt != 32 { 3323 break 3324 } 3325 v_0 := v.Args[0] 3326 if v_0.Op != OpAMD64NEGQ { 3327 break 3328 } 3329 v_0_0 := v_0.Args[0] 3330 if v_0_0.Op != OpAMD64ADDQconst { 3331 break 3332 } 3333 if v_0_0.AuxInt != -16 { 3334 break 3335 } 3336 v_0_0_0 := v_0_0.Args[0] 3337 if v_0_0_0.Op != OpAMD64ANDQconst { 3338 break 3339 } 3340 if v_0_0_0.AuxInt != 15 { 3341 break 3342 } 3343 v.reset(OpAMD64FlagLT_ULT) 3344 return true 3345 } 3346 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 3347 // cond: 3348 // result: (FlagLT_ULT) 3349 for { 3350 if v.AuxInt != 32 { 3351 break 3352 } 3353 v_0 := v.Args[0] 3354 if v_0.Op != OpAMD64NEGQ { 3355 break 3356 } 3357 v_0_0 := v_0.Args[0] 3358 if v_0_0.Op != OpAMD64ADDQconst { 3359 break 3360 } 3361 if v_0_0.AuxInt != -8 { 3362 break 3363 } 3364 v_0_0_0 := v_0_0.Args[0] 3365 if v_0_0_0.Op != OpAMD64ANDQconst { 3366 break 3367 } 3368 if v_0_0_0.AuxInt != 7 { 3369 break 3370 } 3371 v.reset(OpAMD64FlagLT_ULT) 3372 return true 3373 } 3374 // match: (CMPQconst (MOVQconst [x]) [y]) 3375 // cond: x==y 3376 // result: (FlagEQ) 3377 for { 3378 y := v.AuxInt 3379 v_0 := v.Args[0] 3380 if v_0.Op != OpAMD64MOVQconst { 3381 break 3382 } 3383 x := v_0.AuxInt 3384 if !(x == y) { 3385 break 3386 } 3387 v.reset(OpAMD64FlagEQ) 3388 return true 3389 } 3390 // match: (CMPQconst (MOVQconst [x]) [y]) 3391 // cond: x<y && uint64(x)<uint64(y) 3392 // result: (FlagLT_ULT) 3393 for { 3394 y := v.AuxInt 3395 v_0 := v.Args[0] 3396 if v_0.Op != OpAMD64MOVQconst { 3397 break 3398 } 3399 x := v_0.AuxInt 3400 if !(x < y && uint64(x) < uint64(y)) { 3401 break 3402 } 3403 v.reset(OpAMD64FlagLT_ULT) 3404 return true 3405 } 3406 // match: (CMPQconst (MOVQconst [x]) [y]) 3407 // cond: x<y && uint64(x)>uint64(y) 3408 // result: (FlagLT_UGT) 3409 for { 3410 y := v.AuxInt 3411 v_0 := v.Args[0] 3412 if v_0.Op != OpAMD64MOVQconst { 3413 break 3414 } 3415 x := v_0.AuxInt 3416 if !(x < y && uint64(x) > uint64(y)) { 3417 break 3418 } 3419 v.reset(OpAMD64FlagLT_UGT) 3420 return true 3421 } 3422 // match: (CMPQconst (MOVQconst [x]) [y]) 3423 // cond: x>y && uint64(x)<uint64(y) 3424 // result: (FlagGT_ULT) 3425 for { 3426 y := v.AuxInt 3427 v_0 := v.Args[0] 3428 if v_0.Op != OpAMD64MOVQconst { 3429 break 3430 } 3431 x := v_0.AuxInt 3432 if !(x > y && uint64(x) < uint64(y)) { 3433 break 3434 } 3435 v.reset(OpAMD64FlagGT_ULT) 3436 return true 3437 } 3438 // match: (CMPQconst (MOVQconst [x]) [y]) 3439 // cond: x>y && uint64(x)>uint64(y) 3440 // result: (FlagGT_UGT) 3441 for { 3442 y := v.AuxInt 3443 v_0 := v.Args[0] 3444 if v_0.Op != OpAMD64MOVQconst { 3445 break 3446 } 3447 x := v_0.AuxInt 3448 if !(x > y && uint64(x) > uint64(y)) { 3449 break 3450 } 3451 v.reset(OpAMD64FlagGT_UGT) 3452 return true 3453 } 3454 // match: (CMPQconst (MOVBQZX _) [c]) 3455 // cond: 0xFF < c 3456 // result: (FlagLT_ULT) 3457 for { 3458 c := v.AuxInt 3459 v_0 := v.Args[0] 3460 if v_0.Op != OpAMD64MOVBQZX { 3461 break 3462 } 3463 if !(0xFF < c) { 3464 break 3465 } 3466 v.reset(OpAMD64FlagLT_ULT) 3467 return true 3468 } 3469 // match: (CMPQconst (MOVWQZX _) [c]) 3470 // cond: 0xFFFF < c 3471 // result: (FlagLT_ULT) 3472 for { 3473 c := v.AuxInt 3474 v_0 := v.Args[0] 3475 if v_0.Op != OpAMD64MOVWQZX { 3476 break 3477 } 3478 if !(0xFFFF < c) { 3479 break 3480 } 3481 v.reset(OpAMD64FlagLT_ULT) 3482 return true 3483 } 3484 // match: (CMPQconst (MOVLQZX _) [c]) 3485 // cond: 0xFFFFFFFF < c 3486 // result: (FlagLT_ULT) 3487 for { 3488 c := v.AuxInt 3489 v_0 := v.Args[0] 3490 if v_0.Op != OpAMD64MOVLQZX { 3491 break 3492 } 3493 if !(0xFFFFFFFF < c) { 3494 break 3495 } 3496 v.reset(OpAMD64FlagLT_ULT) 3497 return true 3498 } 3499 return false 3500 } 3501 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 3502 // match: (CMPQconst (SHRQconst _ [c]) [n]) 3503 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 3504 // result: (FlagLT_ULT) 3505 for { 3506 n := v.AuxInt 3507 v_0 := v.Args[0] 3508 if v_0.Op != OpAMD64SHRQconst { 3509 break 3510 } 3511 c := v_0.AuxInt 3512 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 3513 break 3514 } 3515 v.reset(OpAMD64FlagLT_ULT) 3516 return true 3517 } 3518 // match: (CMPQconst (ANDQconst _ [m]) [n]) 3519 // cond: 0 <= m && m < n 3520 // result: (FlagLT_ULT) 3521 for { 3522 n := v.AuxInt 3523 v_0 := v.Args[0] 3524 if v_0.Op != OpAMD64ANDQconst { 3525 break 3526 } 3527 m := v_0.AuxInt 3528 if !(0 <= m && m < n) { 3529 break 3530 } 3531 v.reset(OpAMD64FlagLT_ULT) 3532 return true 3533 } 3534 // match: (CMPQconst (ANDLconst _ [m]) [n]) 3535 // cond: 0 <= m && m < n 3536 // result: (FlagLT_ULT) 3537 for { 3538 n := v.AuxInt 3539 v_0 := v.Args[0] 3540 if v_0.Op != OpAMD64ANDLconst { 3541 break 3542 } 3543 m := v_0.AuxInt 3544 if !(0 <= m && m < n) { 3545 break 3546 } 3547 v.reset(OpAMD64FlagLT_ULT) 3548 return true 3549 } 3550 // match: (CMPQconst (ANDQ x y) [0]) 3551 // cond: 3552 // result: (TESTQ x y) 3553 for { 3554 if v.AuxInt != 0 { 3555 break 3556 } 3557 v_0 := v.Args[0] 3558 if v_0.Op != OpAMD64ANDQ { 3559 break 3560 } 3561 _ = v_0.Args[1] 3562 x := v_0.Args[0] 3563 y := v_0.Args[1] 3564 v.reset(OpAMD64TESTQ) 3565 v.AddArg(x) 3566 v.AddArg(y) 3567 return true 3568 } 3569 // match: (CMPQconst (ANDQconst [c] x) [0]) 3570 // cond: 3571 // result: (TESTQconst [c] x) 3572 for { 3573 if v.AuxInt != 0 { 3574 break 3575 } 3576 v_0 := v.Args[0] 3577 if v_0.Op != OpAMD64ANDQconst { 3578 break 3579 } 3580 c := v_0.AuxInt 3581 x := v_0.Args[0] 3582 v.reset(OpAMD64TESTQconst) 3583 v.AuxInt = c 3584 v.AddArg(x) 3585 return true 3586 } 3587 // match: (CMPQconst x [0]) 3588 // cond: 3589 // result: (TESTQ x x) 3590 for { 3591 if v.AuxInt != 0 { 3592 break 3593 } 3594 x := v.Args[0] 3595 v.reset(OpAMD64TESTQ) 3596 v.AddArg(x) 3597 v.AddArg(x) 3598 return true 3599 } 3600 return false 3601 } 3602 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 3603 b := v.Block 3604 _ = b 3605 // match: (CMPW x (MOVLconst [c])) 3606 // cond: 3607 // result: (CMPWconst x [int64(int16(c))]) 3608 for { 3609 _ = v.Args[1] 3610 x := v.Args[0] 3611 v_1 := v.Args[1] 3612 if v_1.Op != OpAMD64MOVLconst { 3613 break 3614 } 3615 c := v_1.AuxInt 3616 v.reset(OpAMD64CMPWconst) 3617 v.AuxInt = int64(int16(c)) 3618 v.AddArg(x) 3619 return true 3620 } 3621 // match: (CMPW (MOVLconst [c]) x) 3622 // cond: 3623 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 3624 for { 3625 _ = v.Args[1] 3626 v_0 := v.Args[0] 3627 if v_0.Op != OpAMD64MOVLconst { 3628 break 3629 } 3630 c := v_0.AuxInt 3631 x := v.Args[1] 3632 v.reset(OpAMD64InvertFlags) 3633 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 3634 v0.AuxInt = int64(int16(c)) 3635 v0.AddArg(x) 3636 v.AddArg(v0) 3637 return true 3638 } 3639 return false 3640 } 3641 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 3642 // match: (CMPWconst (MOVLconst [x]) [y]) 3643 // cond: int16(x)==int16(y) 3644 // result: (FlagEQ) 3645 for { 3646 y := v.AuxInt 3647 v_0 := v.Args[0] 3648 if v_0.Op != OpAMD64MOVLconst { 3649 break 3650 } 3651 x := v_0.AuxInt 3652 if !(int16(x) == int16(y)) { 3653 break 3654 } 3655 v.reset(OpAMD64FlagEQ) 3656 return true 3657 } 3658 // match: (CMPWconst (MOVLconst [x]) [y]) 3659 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 3660 // result: (FlagLT_ULT) 3661 for { 3662 y := v.AuxInt 3663 v_0 := v.Args[0] 3664 if v_0.Op != OpAMD64MOVLconst { 3665 break 3666 } 3667 x := v_0.AuxInt 3668 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 3669 break 3670 } 3671 v.reset(OpAMD64FlagLT_ULT) 3672 return true 3673 } 3674 // match: (CMPWconst (MOVLconst [x]) [y]) 3675 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 3676 // result: (FlagLT_UGT) 3677 for { 3678 y := v.AuxInt 3679 v_0 := v.Args[0] 3680 if v_0.Op != OpAMD64MOVLconst { 3681 break 3682 } 3683 x := v_0.AuxInt 3684 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 3685 break 3686 } 3687 v.reset(OpAMD64FlagLT_UGT) 3688 return true 3689 } 3690 // match: (CMPWconst (MOVLconst [x]) [y]) 3691 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 3692 // result: (FlagGT_ULT) 3693 for { 3694 y := v.AuxInt 3695 v_0 := v.Args[0] 3696 if v_0.Op != OpAMD64MOVLconst { 3697 break 3698 } 3699 x := v_0.AuxInt 3700 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 3701 break 3702 } 3703 v.reset(OpAMD64FlagGT_ULT) 3704 return true 3705 } 3706 // match: (CMPWconst (MOVLconst [x]) [y]) 3707 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 3708 // result: (FlagGT_UGT) 3709 for { 3710 y := v.AuxInt 3711 v_0 := v.Args[0] 3712 if v_0.Op != OpAMD64MOVLconst { 3713 break 3714 } 3715 x := v_0.AuxInt 3716 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 3717 break 3718 } 3719 v.reset(OpAMD64FlagGT_UGT) 3720 return true 3721 } 3722 // match: (CMPWconst (ANDLconst _ [m]) [n]) 3723 // cond: 0 <= int16(m) && int16(m) < int16(n) 3724 // result: (FlagLT_ULT) 3725 for { 3726 n := v.AuxInt 3727 v_0 := v.Args[0] 3728 if v_0.Op != OpAMD64ANDLconst { 3729 break 3730 } 3731 m := v_0.AuxInt 3732 if !(0 <= int16(m) && int16(m) < int16(n)) { 3733 break 3734 } 3735 v.reset(OpAMD64FlagLT_ULT) 3736 return true 3737 } 3738 // match: (CMPWconst (ANDL x y) [0]) 3739 // cond: 3740 // result: (TESTW x y) 3741 for { 3742 if v.AuxInt != 0 { 3743 break 3744 } 3745 v_0 := v.Args[0] 3746 if v_0.Op != OpAMD64ANDL { 3747 break 3748 } 3749 _ = v_0.Args[1] 3750 x := v_0.Args[0] 3751 y := v_0.Args[1] 3752 v.reset(OpAMD64TESTW) 3753 v.AddArg(x) 3754 v.AddArg(y) 3755 return true 3756 } 3757 // match: (CMPWconst (ANDLconst [c] x) [0]) 3758 // cond: 3759 // result: (TESTWconst [int64(int16(c))] x) 3760 for { 3761 if v.AuxInt != 0 { 3762 break 3763 } 3764 v_0 := v.Args[0] 3765 if v_0.Op != OpAMD64ANDLconst { 3766 break 3767 } 3768 c := v_0.AuxInt 3769 x := v_0.Args[0] 3770 v.reset(OpAMD64TESTWconst) 3771 v.AuxInt = int64(int16(c)) 3772 v.AddArg(x) 3773 return true 3774 } 3775 // match: (CMPWconst x [0]) 3776 // cond: 3777 // result: (TESTW x x) 3778 for { 3779 if v.AuxInt != 0 { 3780 break 3781 } 3782 x := v.Args[0] 3783 v.reset(OpAMD64TESTW) 3784 v.AddArg(x) 3785 v.AddArg(x) 3786 return true 3787 } 3788 return false 3789 } 3790 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 3791 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3792 // cond: is32Bit(off1+off2) 3793 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 3794 for { 3795 off1 := v.AuxInt 3796 sym := v.Aux 3797 _ = v.Args[3] 3798 v_0 := v.Args[0] 3799 if v_0.Op != OpAMD64ADDQconst { 3800 break 3801 } 3802 off2 := v_0.AuxInt 3803 ptr := v_0.Args[0] 3804 old := v.Args[1] 3805 new_ := v.Args[2] 3806 mem := v.Args[3] 3807 if !(is32Bit(off1 + off2)) { 3808 break 3809 } 3810 v.reset(OpAMD64CMPXCHGLlock) 3811 v.AuxInt = off1 + off2 3812 v.Aux = sym 3813 v.AddArg(ptr) 3814 v.AddArg(old) 3815 v.AddArg(new_) 3816 v.AddArg(mem) 3817 return true 3818 } 3819 return false 3820 } 3821 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 3822 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3823 // cond: is32Bit(off1+off2) 3824 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 3825 for { 3826 off1 := v.AuxInt 3827 sym := v.Aux 3828 _ = v.Args[3] 3829 v_0 := v.Args[0] 3830 if v_0.Op != OpAMD64ADDQconst { 3831 break 3832 } 3833 off2 := v_0.AuxInt 3834 ptr := v_0.Args[0] 3835 old := v.Args[1] 3836 new_ := v.Args[2] 3837 mem := v.Args[3] 3838 if !(is32Bit(off1 + off2)) { 3839 break 3840 } 3841 v.reset(OpAMD64CMPXCHGQlock) 3842 v.AuxInt = off1 + off2 3843 v.Aux = sym 3844 v.AddArg(ptr) 3845 v.AddArg(old) 3846 v.AddArg(new_) 3847 v.AddArg(mem) 3848 return true 3849 } 3850 return false 3851 } 3852 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 3853 // match: (LEAL [c] {s} (ADDLconst [d] x)) 3854 // cond: is32Bit(c+d) 3855 // result: (LEAL [c+d] {s} x) 3856 for { 3857 c := v.AuxInt 3858 s := v.Aux 3859 v_0 := v.Args[0] 3860 if v_0.Op != OpAMD64ADDLconst { 3861 break 3862 } 3863 d := v_0.AuxInt 3864 x := v_0.Args[0] 3865 if !(is32Bit(c + d)) { 3866 break 3867 } 3868 v.reset(OpAMD64LEAL) 3869 v.AuxInt = c + d 3870 v.Aux = s 3871 v.AddArg(x) 3872 return true 3873 } 3874 return false 3875 } 3876 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 3877 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 3878 // cond: is32Bit(c+d) 3879 // result: (LEAQ [c+d] {s} x) 3880 for { 3881 c := v.AuxInt 3882 s := v.Aux 3883 v_0 := v.Args[0] 3884 if v_0.Op != OpAMD64ADDQconst { 3885 break 3886 } 3887 d := v_0.AuxInt 3888 x := v_0.Args[0] 3889 if !(is32Bit(c + d)) { 3890 break 3891 } 3892 v.reset(OpAMD64LEAQ) 3893 v.AuxInt = c + d 3894 v.Aux = s 3895 v.AddArg(x) 3896 return true 3897 } 3898 // match: (LEAQ [c] {s} (ADDQ x y)) 3899 // cond: x.Op != OpSB && y.Op != OpSB 3900 // result: (LEAQ1 [c] {s} x y) 3901 for { 3902 c := v.AuxInt 3903 s := v.Aux 3904 v_0 := v.Args[0] 3905 if v_0.Op != OpAMD64ADDQ { 3906 break 3907 } 3908 _ = v_0.Args[1] 3909 x := v_0.Args[0] 3910 y := v_0.Args[1] 3911 if !(x.Op != OpSB && y.Op != OpSB) { 3912 break 3913 } 3914 v.reset(OpAMD64LEAQ1) 3915 v.AuxInt = c 3916 v.Aux = s 3917 v.AddArg(x) 3918 v.AddArg(y) 3919 return true 3920 } 3921 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 3922 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3923 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 3924 for { 3925 off1 := v.AuxInt 3926 sym1 := v.Aux 3927 v_0 := v.Args[0] 3928 if v_0.Op != OpAMD64LEAQ { 3929 break 3930 } 3931 off2 := v_0.AuxInt 3932 sym2 := v_0.Aux 3933 x := v_0.Args[0] 3934 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3935 break 3936 } 3937 v.reset(OpAMD64LEAQ) 3938 v.AuxInt = off1 + off2 3939 v.Aux = mergeSym(sym1, sym2) 3940 v.AddArg(x) 3941 return true 3942 } 3943 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 3944 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3945 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3946 for { 3947 off1 := v.AuxInt 3948 sym1 := v.Aux 3949 v_0 := v.Args[0] 3950 if v_0.Op != OpAMD64LEAQ1 { 3951 break 3952 } 3953 off2 := v_0.AuxInt 3954 sym2 := v_0.Aux 3955 _ = v_0.Args[1] 3956 x := v_0.Args[0] 3957 y := v_0.Args[1] 3958 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3959 break 3960 } 3961 v.reset(OpAMD64LEAQ1) 3962 v.AuxInt = off1 + off2 3963 v.Aux = mergeSym(sym1, sym2) 3964 v.AddArg(x) 3965 v.AddArg(y) 3966 return true 3967 } 3968 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 3969 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3970 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3971 for { 3972 off1 := v.AuxInt 3973 sym1 := v.Aux 3974 v_0 := v.Args[0] 3975 if v_0.Op != OpAMD64LEAQ2 { 3976 break 3977 } 3978 off2 := v_0.AuxInt 3979 sym2 := v_0.Aux 3980 _ = v_0.Args[1] 3981 x := v_0.Args[0] 3982 y := v_0.Args[1] 3983 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3984 break 3985 } 3986 v.reset(OpAMD64LEAQ2) 3987 v.AuxInt = off1 + off2 3988 v.Aux = mergeSym(sym1, sym2) 3989 v.AddArg(x) 3990 v.AddArg(y) 3991 return true 3992 } 3993 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 3994 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3995 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3996 for { 3997 off1 := v.AuxInt 3998 sym1 := v.Aux 3999 v_0 := v.Args[0] 4000 if v_0.Op != OpAMD64LEAQ4 { 4001 break 4002 } 4003 off2 := v_0.AuxInt 4004 sym2 := v_0.Aux 4005 _ = v_0.Args[1] 4006 x := v_0.Args[0] 4007 y := v_0.Args[1] 4008 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4009 break 4010 } 4011 v.reset(OpAMD64LEAQ4) 4012 v.AuxInt = off1 + off2 4013 v.Aux = mergeSym(sym1, sym2) 4014 v.AddArg(x) 4015 v.AddArg(y) 4016 return true 4017 } 4018 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 4019 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4020 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4021 for { 4022 off1 := v.AuxInt 4023 sym1 := v.Aux 4024 v_0 := v.Args[0] 4025 if v_0.Op != OpAMD64LEAQ8 { 4026 break 4027 } 4028 off2 := v_0.AuxInt 4029 sym2 := v_0.Aux 4030 _ = v_0.Args[1] 4031 x := v_0.Args[0] 4032 y := v_0.Args[1] 4033 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4034 break 4035 } 4036 v.reset(OpAMD64LEAQ8) 4037 v.AuxInt = off1 + off2 4038 v.Aux = mergeSym(sym1, sym2) 4039 v.AddArg(x) 4040 v.AddArg(y) 4041 return true 4042 } 4043 return false 4044 } 4045 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 4046 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 4047 // cond: is32Bit(c+d) && x.Op != OpSB 4048 // result: (LEAQ1 [c+d] {s} x y) 4049 for { 4050 c := v.AuxInt 4051 s := v.Aux 4052 _ = v.Args[1] 4053 v_0 := v.Args[0] 4054 if v_0.Op != OpAMD64ADDQconst { 4055 break 4056 } 4057 d := v_0.AuxInt 4058 x := v_0.Args[0] 4059 y := v.Args[1] 4060 if !(is32Bit(c+d) && x.Op != OpSB) { 4061 break 4062 } 4063 v.reset(OpAMD64LEAQ1) 4064 v.AuxInt = c + d 4065 v.Aux = s 4066 v.AddArg(x) 4067 v.AddArg(y) 4068 return true 4069 } 4070 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 4071 // cond: is32Bit(c+d) && x.Op != OpSB 4072 // result: (LEAQ1 [c+d] {s} x y) 4073 for { 4074 c := v.AuxInt 4075 s := v.Aux 4076 _ = v.Args[1] 4077 y := v.Args[0] 4078 v_1 := v.Args[1] 4079 if v_1.Op != OpAMD64ADDQconst { 4080 break 4081 } 4082 d := v_1.AuxInt 4083 x := v_1.Args[0] 4084 if !(is32Bit(c+d) && x.Op != OpSB) { 4085 break 4086 } 4087 v.reset(OpAMD64LEAQ1) 4088 v.AuxInt = c + d 4089 v.Aux = s 4090 v.AddArg(x) 4091 v.AddArg(y) 4092 return true 4093 } 4094 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 4095 // cond: 4096 // result: (LEAQ2 [c] {s} x y) 4097 for { 4098 c := v.AuxInt 4099 s := v.Aux 4100 _ = v.Args[1] 4101 x := v.Args[0] 4102 v_1 := v.Args[1] 4103 if v_1.Op != OpAMD64SHLQconst { 4104 break 4105 } 4106 if v_1.AuxInt != 1 { 4107 break 4108 } 4109 y := v_1.Args[0] 4110 v.reset(OpAMD64LEAQ2) 4111 v.AuxInt = c 4112 v.Aux = s 4113 v.AddArg(x) 4114 v.AddArg(y) 4115 return true 4116 } 4117 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 4118 // cond: 4119 // result: (LEAQ2 [c] {s} x y) 4120 for { 4121 c := v.AuxInt 4122 s := v.Aux 4123 _ = v.Args[1] 4124 v_0 := v.Args[0] 4125 if v_0.Op != OpAMD64SHLQconst { 4126 break 4127 } 4128 if v_0.AuxInt != 1 { 4129 break 4130 } 4131 y := v_0.Args[0] 4132 x := v.Args[1] 4133 v.reset(OpAMD64LEAQ2) 4134 v.AuxInt = c 4135 v.Aux = s 4136 v.AddArg(x) 4137 v.AddArg(y) 4138 return true 4139 } 4140 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 4141 // cond: 4142 // result: (LEAQ4 [c] {s} x y) 4143 for { 4144 c := v.AuxInt 4145 s := v.Aux 4146 _ = v.Args[1] 4147 x := v.Args[0] 4148 v_1 := v.Args[1] 4149 if v_1.Op != OpAMD64SHLQconst { 4150 break 4151 } 4152 if v_1.AuxInt != 2 { 4153 break 4154 } 4155 y := v_1.Args[0] 4156 v.reset(OpAMD64LEAQ4) 4157 v.AuxInt = c 4158 v.Aux = s 4159 v.AddArg(x) 4160 v.AddArg(y) 4161 return true 4162 } 4163 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 4164 // cond: 4165 // result: (LEAQ4 [c] {s} x y) 4166 for { 4167 c := v.AuxInt 4168 s := v.Aux 4169 _ = v.Args[1] 4170 v_0 := v.Args[0] 4171 if v_0.Op != OpAMD64SHLQconst { 4172 break 4173 } 4174 if v_0.AuxInt != 2 { 4175 break 4176 } 4177 y := v_0.Args[0] 4178 x := v.Args[1] 4179 v.reset(OpAMD64LEAQ4) 4180 v.AuxInt = c 4181 v.Aux = s 4182 v.AddArg(x) 4183 v.AddArg(y) 4184 return true 4185 } 4186 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 4187 // cond: 4188 // result: (LEAQ8 [c] {s} x y) 4189 for { 4190 c := v.AuxInt 4191 s := v.Aux 4192 _ = v.Args[1] 4193 x := v.Args[0] 4194 v_1 := v.Args[1] 4195 if v_1.Op != OpAMD64SHLQconst { 4196 break 4197 } 4198 if v_1.AuxInt != 3 { 4199 break 4200 } 4201 y := v_1.Args[0] 4202 v.reset(OpAMD64LEAQ8) 4203 v.AuxInt = c 4204 v.Aux = s 4205 v.AddArg(x) 4206 v.AddArg(y) 4207 return true 4208 } 4209 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 4210 // cond: 4211 // result: (LEAQ8 [c] {s} x y) 4212 for { 4213 c := v.AuxInt 4214 s := v.Aux 4215 _ = v.Args[1] 4216 v_0 := v.Args[0] 4217 if v_0.Op != OpAMD64SHLQconst { 4218 break 4219 } 4220 if v_0.AuxInt != 3 { 4221 break 4222 } 4223 y := v_0.Args[0] 4224 x := v.Args[1] 4225 v.reset(OpAMD64LEAQ8) 4226 v.AuxInt = c 4227 v.Aux = s 4228 v.AddArg(x) 4229 v.AddArg(y) 4230 return true 4231 } 4232 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4233 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4234 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4235 for { 4236 off1 := v.AuxInt 4237 sym1 := v.Aux 4238 _ = v.Args[1] 4239 v_0 := v.Args[0] 4240 if v_0.Op != OpAMD64LEAQ { 4241 break 4242 } 4243 off2 := v_0.AuxInt 4244 sym2 := v_0.Aux 4245 x := v_0.Args[0] 4246 y := v.Args[1] 4247 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4248 break 4249 } 4250 v.reset(OpAMD64LEAQ1) 4251 v.AuxInt = off1 + off2 4252 v.Aux = mergeSym(sym1, sym2) 4253 v.AddArg(x) 4254 v.AddArg(y) 4255 return true 4256 } 4257 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 4258 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4259 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4260 for { 4261 off1 := v.AuxInt 4262 sym1 := v.Aux 4263 _ = v.Args[1] 4264 y := v.Args[0] 4265 v_1 := v.Args[1] 4266 if v_1.Op != OpAMD64LEAQ { 4267 break 4268 } 4269 off2 := v_1.AuxInt 4270 sym2 := v_1.Aux 4271 x := v_1.Args[0] 4272 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4273 break 4274 } 4275 v.reset(OpAMD64LEAQ1) 4276 v.AuxInt = off1 + off2 4277 v.Aux = mergeSym(sym1, sym2) 4278 v.AddArg(x) 4279 v.AddArg(y) 4280 return true 4281 } 4282 return false 4283 } 4284 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 4285 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 4286 // cond: is32Bit(c+d) && x.Op != OpSB 4287 // result: (LEAQ2 [c+d] {s} x y) 4288 for { 4289 c := v.AuxInt 4290 s := v.Aux 4291 _ = v.Args[1] 4292 v_0 := v.Args[0] 4293 if v_0.Op != OpAMD64ADDQconst { 4294 break 4295 } 4296 d := v_0.AuxInt 4297 x := v_0.Args[0] 4298 y := v.Args[1] 4299 if !(is32Bit(c+d) && x.Op != OpSB) { 4300 break 4301 } 4302 v.reset(OpAMD64LEAQ2) 4303 v.AuxInt = c + d 4304 v.Aux = s 4305 v.AddArg(x) 4306 v.AddArg(y) 4307 return true 4308 } 4309 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 4310 // cond: is32Bit(c+2*d) && y.Op != OpSB 4311 // result: (LEAQ2 [c+2*d] {s} x y) 4312 for { 4313 c := v.AuxInt 4314 s := v.Aux 4315 _ = v.Args[1] 4316 x := v.Args[0] 4317 v_1 := v.Args[1] 4318 if v_1.Op != OpAMD64ADDQconst { 4319 break 4320 } 4321 d := v_1.AuxInt 4322 y := v_1.Args[0] 4323 if !(is32Bit(c+2*d) && y.Op != OpSB) { 4324 break 4325 } 4326 v.reset(OpAMD64LEAQ2) 4327 v.AuxInt = c + 2*d 4328 v.Aux = s 4329 v.AddArg(x) 4330 v.AddArg(y) 4331 return true 4332 } 4333 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 4334 // cond: 4335 // result: (LEAQ4 [c] {s} x y) 4336 for { 4337 c := v.AuxInt 4338 s := v.Aux 4339 _ = v.Args[1] 4340 x := v.Args[0] 4341 v_1 := v.Args[1] 4342 if v_1.Op != OpAMD64SHLQconst { 4343 break 4344 } 4345 if v_1.AuxInt != 1 { 4346 break 4347 } 4348 y := v_1.Args[0] 4349 v.reset(OpAMD64LEAQ4) 4350 v.AuxInt = c 4351 v.Aux = s 4352 v.AddArg(x) 4353 v.AddArg(y) 4354 return true 4355 } 4356 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 4357 // cond: 4358 // result: (LEAQ8 [c] {s} x y) 4359 for { 4360 c := v.AuxInt 4361 s := v.Aux 4362 _ = v.Args[1] 4363 x := v.Args[0] 4364 v_1 := v.Args[1] 4365 if v_1.Op != OpAMD64SHLQconst { 4366 break 4367 } 4368 if v_1.AuxInt != 2 { 4369 break 4370 } 4371 y := v_1.Args[0] 4372 v.reset(OpAMD64LEAQ8) 4373 v.AuxInt = c 4374 v.Aux = s 4375 v.AddArg(x) 4376 v.AddArg(y) 4377 return true 4378 } 4379 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4380 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4381 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 4382 for { 4383 off1 := v.AuxInt 4384 sym1 := v.Aux 4385 _ = v.Args[1] 4386 v_0 := v.Args[0] 4387 if v_0.Op != OpAMD64LEAQ { 4388 break 4389 } 4390 off2 := v_0.AuxInt 4391 sym2 := v_0.Aux 4392 x := v_0.Args[0] 4393 y := v.Args[1] 4394 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4395 break 4396 } 4397 v.reset(OpAMD64LEAQ2) 4398 v.AuxInt = off1 + off2 4399 v.Aux = mergeSym(sym1, sym2) 4400 v.AddArg(x) 4401 v.AddArg(y) 4402 return true 4403 } 4404 return false 4405 } 4406 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 4407 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 4408 // cond: is32Bit(c+d) && x.Op != OpSB 4409 // result: (LEAQ4 [c+d] {s} x y) 4410 for { 4411 c := v.AuxInt 4412 s := v.Aux 4413 _ = v.Args[1] 4414 v_0 := v.Args[0] 4415 if v_0.Op != OpAMD64ADDQconst { 4416 break 4417 } 4418 d := v_0.AuxInt 4419 x := v_0.Args[0] 4420 y := v.Args[1] 4421 if !(is32Bit(c+d) && x.Op != OpSB) { 4422 break 4423 } 4424 v.reset(OpAMD64LEAQ4) 4425 v.AuxInt = c + d 4426 v.Aux = s 4427 v.AddArg(x) 4428 v.AddArg(y) 4429 return true 4430 } 4431 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 4432 // cond: is32Bit(c+4*d) && y.Op != OpSB 4433 // result: (LEAQ4 [c+4*d] {s} x y) 4434 for { 4435 c := v.AuxInt 4436 s := v.Aux 4437 _ = v.Args[1] 4438 x := v.Args[0] 4439 v_1 := v.Args[1] 4440 if v_1.Op != OpAMD64ADDQconst { 4441 break 4442 } 4443 d := v_1.AuxInt 4444 y := v_1.Args[0] 4445 if !(is32Bit(c+4*d) && y.Op != OpSB) { 4446 break 4447 } 4448 v.reset(OpAMD64LEAQ4) 4449 v.AuxInt = c + 4*d 4450 v.Aux = s 4451 v.AddArg(x) 4452 v.AddArg(y) 4453 return true 4454 } 4455 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 4456 // cond: 4457 // result: (LEAQ8 [c] {s} x y) 4458 for { 4459 c := v.AuxInt 4460 s := v.Aux 4461 _ = v.Args[1] 4462 x := v.Args[0] 4463 v_1 := v.Args[1] 4464 if v_1.Op != OpAMD64SHLQconst { 4465 break 4466 } 4467 if v_1.AuxInt != 1 { 4468 break 4469 } 4470 y := v_1.Args[0] 4471 v.reset(OpAMD64LEAQ8) 4472 v.AuxInt = c 4473 v.Aux = s 4474 v.AddArg(x) 4475 v.AddArg(y) 4476 return true 4477 } 4478 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4479 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4480 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 4481 for { 4482 off1 := v.AuxInt 4483 sym1 := v.Aux 4484 _ = v.Args[1] 4485 v_0 := v.Args[0] 4486 if v_0.Op != OpAMD64LEAQ { 4487 break 4488 } 4489 off2 := v_0.AuxInt 4490 sym2 := v_0.Aux 4491 x := v_0.Args[0] 4492 y := v.Args[1] 4493 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4494 break 4495 } 4496 v.reset(OpAMD64LEAQ4) 4497 v.AuxInt = off1 + off2 4498 v.Aux = mergeSym(sym1, sym2) 4499 v.AddArg(x) 4500 v.AddArg(y) 4501 return true 4502 } 4503 return false 4504 } 4505 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 4506 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 4507 // cond: is32Bit(c+d) && x.Op != OpSB 4508 // result: (LEAQ8 [c+d] {s} x y) 4509 for { 4510 c := v.AuxInt 4511 s := v.Aux 4512 _ = v.Args[1] 4513 v_0 := v.Args[0] 4514 if v_0.Op != OpAMD64ADDQconst { 4515 break 4516 } 4517 d := v_0.AuxInt 4518 x := v_0.Args[0] 4519 y := v.Args[1] 4520 if !(is32Bit(c+d) && x.Op != OpSB) { 4521 break 4522 } 4523 v.reset(OpAMD64LEAQ8) 4524 v.AuxInt = c + d 4525 v.Aux = s 4526 v.AddArg(x) 4527 v.AddArg(y) 4528 return true 4529 } 4530 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 4531 // cond: is32Bit(c+8*d) && y.Op != OpSB 4532 // result: (LEAQ8 [c+8*d] {s} x y) 4533 for { 4534 c := v.AuxInt 4535 s := v.Aux 4536 _ = v.Args[1] 4537 x := v.Args[0] 4538 v_1 := v.Args[1] 4539 if v_1.Op != OpAMD64ADDQconst { 4540 break 4541 } 4542 d := v_1.AuxInt 4543 y := v_1.Args[0] 4544 if !(is32Bit(c+8*d) && y.Op != OpSB) { 4545 break 4546 } 4547 v.reset(OpAMD64LEAQ8) 4548 v.AuxInt = c + 8*d 4549 v.Aux = s 4550 v.AddArg(x) 4551 v.AddArg(y) 4552 return true 4553 } 4554 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4555 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4556 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4557 for { 4558 off1 := v.AuxInt 4559 sym1 := v.Aux 4560 _ = v.Args[1] 4561 v_0 := v.Args[0] 4562 if v_0.Op != OpAMD64LEAQ { 4563 break 4564 } 4565 off2 := v_0.AuxInt 4566 sym2 := v_0.Aux 4567 x := v_0.Args[0] 4568 y := v.Args[1] 4569 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4570 break 4571 } 4572 v.reset(OpAMD64LEAQ8) 4573 v.AuxInt = off1 + off2 4574 v.Aux = mergeSym(sym1, sym2) 4575 v.AddArg(x) 4576 v.AddArg(y) 4577 return true 4578 } 4579 return false 4580 } 4581 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 4582 b := v.Block 4583 _ = b 4584 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 4585 // cond: x.Uses == 1 && clobber(x) 4586 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4587 for { 4588 x := v.Args[0] 4589 if x.Op != OpAMD64MOVBload { 4590 break 4591 } 4592 off := x.AuxInt 4593 sym := x.Aux 4594 _ = x.Args[1] 4595 ptr := x.Args[0] 4596 mem := x.Args[1] 4597 if !(x.Uses == 1 && clobber(x)) { 4598 break 4599 } 4600 b = x.Block 4601 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4602 v.reset(OpCopy) 4603 v.AddArg(v0) 4604 v0.AuxInt = off 4605 v0.Aux = sym 4606 v0.AddArg(ptr) 4607 v0.AddArg(mem) 4608 return true 4609 } 4610 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 4611 // cond: x.Uses == 1 && clobber(x) 4612 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4613 for { 4614 x := v.Args[0] 4615 if x.Op != OpAMD64MOVWload { 4616 break 4617 } 4618 off := x.AuxInt 4619 sym := x.Aux 4620 _ = x.Args[1] 4621 ptr := x.Args[0] 4622 mem := x.Args[1] 4623 if !(x.Uses == 1 && clobber(x)) { 4624 break 4625 } 4626 b = x.Block 4627 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4628 v.reset(OpCopy) 4629 v.AddArg(v0) 4630 v0.AuxInt = off 4631 v0.Aux = sym 4632 v0.AddArg(ptr) 4633 v0.AddArg(mem) 4634 return true 4635 } 4636 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 4637 // cond: x.Uses == 1 && clobber(x) 4638 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4639 for { 4640 x := v.Args[0] 4641 if x.Op != OpAMD64MOVLload { 4642 break 4643 } 4644 off := x.AuxInt 4645 sym := x.Aux 4646 _ = x.Args[1] 4647 ptr := x.Args[0] 4648 mem := x.Args[1] 4649 if !(x.Uses == 1 && clobber(x)) { 4650 break 4651 } 4652 b = x.Block 4653 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4654 v.reset(OpCopy) 4655 v.AddArg(v0) 4656 v0.AuxInt = off 4657 v0.Aux = sym 4658 v0.AddArg(ptr) 4659 v0.AddArg(mem) 4660 return true 4661 } 4662 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 4663 // cond: x.Uses == 1 && clobber(x) 4664 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4665 for { 4666 x := v.Args[0] 4667 if x.Op != OpAMD64MOVQload { 4668 break 4669 } 4670 off := x.AuxInt 4671 sym := x.Aux 4672 _ = x.Args[1] 4673 ptr := x.Args[0] 4674 mem := x.Args[1] 4675 if !(x.Uses == 1 && clobber(x)) { 4676 break 4677 } 4678 b = x.Block 4679 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4680 v.reset(OpCopy) 4681 v.AddArg(v0) 4682 v0.AuxInt = off 4683 v0.Aux = sym 4684 v0.AddArg(ptr) 4685 v0.AddArg(mem) 4686 return true 4687 } 4688 // match: (MOVBQSX (ANDLconst [c] x)) 4689 // cond: c & 0x80 == 0 4690 // result: (ANDLconst [c & 0x7f] x) 4691 for { 4692 v_0 := v.Args[0] 4693 if v_0.Op != OpAMD64ANDLconst { 4694 break 4695 } 4696 c := v_0.AuxInt 4697 x := v_0.Args[0] 4698 if !(c&0x80 == 0) { 4699 break 4700 } 4701 v.reset(OpAMD64ANDLconst) 4702 v.AuxInt = c & 0x7f 4703 v.AddArg(x) 4704 return true 4705 } 4706 // match: (MOVBQSX (MOVBQSX x)) 4707 // cond: 4708 // result: (MOVBQSX x) 4709 for { 4710 v_0 := v.Args[0] 4711 if v_0.Op != OpAMD64MOVBQSX { 4712 break 4713 } 4714 x := v_0.Args[0] 4715 v.reset(OpAMD64MOVBQSX) 4716 v.AddArg(x) 4717 return true 4718 } 4719 return false 4720 } 4721 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 4722 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4723 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4724 // result: (MOVBQSX x) 4725 for { 4726 off := v.AuxInt 4727 sym := v.Aux 4728 _ = v.Args[1] 4729 ptr := v.Args[0] 4730 v_1 := v.Args[1] 4731 if v_1.Op != OpAMD64MOVBstore { 4732 break 4733 } 4734 off2 := v_1.AuxInt 4735 sym2 := v_1.Aux 4736 _ = v_1.Args[2] 4737 ptr2 := v_1.Args[0] 4738 x := v_1.Args[1] 4739 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4740 break 4741 } 4742 v.reset(OpAMD64MOVBQSX) 4743 v.AddArg(x) 4744 return true 4745 } 4746 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4747 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4748 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4749 for { 4750 off1 := v.AuxInt 4751 sym1 := v.Aux 4752 _ = v.Args[1] 4753 v_0 := v.Args[0] 4754 if v_0.Op != OpAMD64LEAQ { 4755 break 4756 } 4757 off2 := v_0.AuxInt 4758 sym2 := v_0.Aux 4759 base := v_0.Args[0] 4760 mem := v.Args[1] 4761 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4762 break 4763 } 4764 v.reset(OpAMD64MOVBQSXload) 4765 v.AuxInt = off1 + off2 4766 v.Aux = mergeSym(sym1, sym2) 4767 v.AddArg(base) 4768 v.AddArg(mem) 4769 return true 4770 } 4771 return false 4772 } 4773 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 4774 b := v.Block 4775 _ = b 4776 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 4777 // cond: x.Uses == 1 && clobber(x) 4778 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4779 for { 4780 x := v.Args[0] 4781 if x.Op != OpAMD64MOVBload { 4782 break 4783 } 4784 off := x.AuxInt 4785 sym := x.Aux 4786 _ = x.Args[1] 4787 ptr := x.Args[0] 4788 mem := x.Args[1] 4789 if !(x.Uses == 1 && clobber(x)) { 4790 break 4791 } 4792 b = x.Block 4793 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4794 v.reset(OpCopy) 4795 v.AddArg(v0) 4796 v0.AuxInt = off 4797 v0.Aux = sym 4798 v0.AddArg(ptr) 4799 v0.AddArg(mem) 4800 return true 4801 } 4802 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 4803 // cond: x.Uses == 1 && clobber(x) 4804 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4805 for { 4806 x := v.Args[0] 4807 if x.Op != OpAMD64MOVWload { 4808 break 4809 } 4810 off := x.AuxInt 4811 sym := x.Aux 4812 _ = x.Args[1] 4813 ptr := x.Args[0] 4814 mem := x.Args[1] 4815 if !(x.Uses == 1 && clobber(x)) { 4816 break 4817 } 4818 b = x.Block 4819 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4820 v.reset(OpCopy) 4821 v.AddArg(v0) 4822 v0.AuxInt = off 4823 v0.Aux = sym 4824 v0.AddArg(ptr) 4825 v0.AddArg(mem) 4826 return true 4827 } 4828 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 4829 // cond: x.Uses == 1 && clobber(x) 4830 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4831 for { 4832 x := v.Args[0] 4833 if x.Op != OpAMD64MOVLload { 4834 break 4835 } 4836 off := x.AuxInt 4837 sym := x.Aux 4838 _ = x.Args[1] 4839 ptr := x.Args[0] 4840 mem := x.Args[1] 4841 if !(x.Uses == 1 && clobber(x)) { 4842 break 4843 } 4844 b = x.Block 4845 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4846 v.reset(OpCopy) 4847 v.AddArg(v0) 4848 v0.AuxInt = off 4849 v0.Aux = sym 4850 v0.AddArg(ptr) 4851 v0.AddArg(mem) 4852 return true 4853 } 4854 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 4855 // cond: x.Uses == 1 && clobber(x) 4856 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4857 for { 4858 x := v.Args[0] 4859 if x.Op != OpAMD64MOVQload { 4860 break 4861 } 4862 off := x.AuxInt 4863 sym := x.Aux 4864 _ = x.Args[1] 4865 ptr := x.Args[0] 4866 mem := x.Args[1] 4867 if !(x.Uses == 1 && clobber(x)) { 4868 break 4869 } 4870 b = x.Block 4871 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4872 v.reset(OpCopy) 4873 v.AddArg(v0) 4874 v0.AuxInt = off 4875 v0.Aux = sym 4876 v0.AddArg(ptr) 4877 v0.AddArg(mem) 4878 return true 4879 } 4880 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 4881 // cond: x.Uses == 1 && clobber(x) 4882 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 4883 for { 4884 x := v.Args[0] 4885 if x.Op != OpAMD64MOVBloadidx1 { 4886 break 4887 } 4888 off := x.AuxInt 4889 sym := x.Aux 4890 _ = x.Args[2] 4891 ptr := x.Args[0] 4892 idx := x.Args[1] 4893 mem := x.Args[2] 4894 if !(x.Uses == 1 && clobber(x)) { 4895 break 4896 } 4897 b = x.Block 4898 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 4899 v.reset(OpCopy) 4900 v.AddArg(v0) 4901 v0.AuxInt = off 4902 v0.Aux = sym 4903 v0.AddArg(ptr) 4904 v0.AddArg(idx) 4905 v0.AddArg(mem) 4906 return true 4907 } 4908 // match: (MOVBQZX (ANDLconst [c] x)) 4909 // cond: 4910 // result: (ANDLconst [c & 0xff] x) 4911 for { 4912 v_0 := v.Args[0] 4913 if v_0.Op != OpAMD64ANDLconst { 4914 break 4915 } 4916 c := v_0.AuxInt 4917 x := v_0.Args[0] 4918 v.reset(OpAMD64ANDLconst) 4919 v.AuxInt = c & 0xff 4920 v.AddArg(x) 4921 return true 4922 } 4923 // match: (MOVBQZX (MOVBQZX x)) 4924 // cond: 4925 // result: (MOVBQZX x) 4926 for { 4927 v_0 := v.Args[0] 4928 if v_0.Op != OpAMD64MOVBQZX { 4929 break 4930 } 4931 x := v_0.Args[0] 4932 v.reset(OpAMD64MOVBQZX) 4933 v.AddArg(x) 4934 return true 4935 } 4936 return false 4937 } 4938 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 4939 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4940 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4941 // result: (MOVBQZX x) 4942 for { 4943 off := v.AuxInt 4944 sym := v.Aux 4945 _ = v.Args[1] 4946 ptr := v.Args[0] 4947 v_1 := v.Args[1] 4948 if v_1.Op != OpAMD64MOVBstore { 4949 break 4950 } 4951 off2 := v_1.AuxInt 4952 sym2 := v_1.Aux 4953 _ = v_1.Args[2] 4954 ptr2 := v_1.Args[0] 4955 x := v_1.Args[1] 4956 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4957 break 4958 } 4959 v.reset(OpAMD64MOVBQZX) 4960 v.AddArg(x) 4961 return true 4962 } 4963 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 4964 // cond: is32Bit(off1+off2) 4965 // result: (MOVBload [off1+off2] {sym} ptr mem) 4966 for { 4967 off1 := v.AuxInt 4968 sym := v.Aux 4969 _ = v.Args[1] 4970 v_0 := v.Args[0] 4971 if v_0.Op != OpAMD64ADDQconst { 4972 break 4973 } 4974 off2 := v_0.AuxInt 4975 ptr := v_0.Args[0] 4976 mem := v.Args[1] 4977 if !(is32Bit(off1 + off2)) { 4978 break 4979 } 4980 v.reset(OpAMD64MOVBload) 4981 v.AuxInt = off1 + off2 4982 v.Aux = sym 4983 v.AddArg(ptr) 4984 v.AddArg(mem) 4985 return true 4986 } 4987 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4988 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4989 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4990 for { 4991 off1 := v.AuxInt 4992 sym1 := v.Aux 4993 _ = v.Args[1] 4994 v_0 := v.Args[0] 4995 if v_0.Op != OpAMD64LEAQ { 4996 break 4997 } 4998 off2 := v_0.AuxInt 4999 sym2 := v_0.Aux 5000 base := v_0.Args[0] 5001 mem := v.Args[1] 5002 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5003 break 5004 } 5005 v.reset(OpAMD64MOVBload) 5006 v.AuxInt = off1 + off2 5007 v.Aux = mergeSym(sym1, sym2) 5008 v.AddArg(base) 5009 v.AddArg(mem) 5010 return true 5011 } 5012 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 5013 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5014 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 5015 for { 5016 off1 := v.AuxInt 5017 sym1 := v.Aux 5018 _ = v.Args[1] 5019 v_0 := v.Args[0] 5020 if v_0.Op != OpAMD64LEAQ1 { 5021 break 5022 } 5023 off2 := v_0.AuxInt 5024 sym2 := v_0.Aux 5025 _ = v_0.Args[1] 5026 ptr := v_0.Args[0] 5027 idx := v_0.Args[1] 5028 mem := v.Args[1] 5029 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5030 break 5031 } 5032 v.reset(OpAMD64MOVBloadidx1) 5033 v.AuxInt = off1 + off2 5034 v.Aux = mergeSym(sym1, sym2) 5035 v.AddArg(ptr) 5036 v.AddArg(idx) 5037 v.AddArg(mem) 5038 return true 5039 } 5040 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 5041 // cond: ptr.Op != OpSB 5042 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 5043 for { 5044 off := v.AuxInt 5045 sym := v.Aux 5046 _ = v.Args[1] 5047 v_0 := v.Args[0] 5048 if v_0.Op != OpAMD64ADDQ { 5049 break 5050 } 5051 _ = v_0.Args[1] 5052 ptr := v_0.Args[0] 5053 idx := v_0.Args[1] 5054 mem := v.Args[1] 5055 if !(ptr.Op != OpSB) { 5056 break 5057 } 5058 v.reset(OpAMD64MOVBloadidx1) 5059 v.AuxInt = off 5060 v.Aux = sym 5061 v.AddArg(ptr) 5062 v.AddArg(idx) 5063 v.AddArg(mem) 5064 return true 5065 } 5066 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 5067 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 5068 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5069 for { 5070 off1 := v.AuxInt 5071 sym1 := v.Aux 5072 _ = v.Args[1] 5073 v_0 := v.Args[0] 5074 if v_0.Op != OpAMD64LEAL { 5075 break 5076 } 5077 off2 := v_0.AuxInt 5078 sym2 := v_0.Aux 5079 base := v_0.Args[0] 5080 mem := v.Args[1] 5081 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 5082 break 5083 } 5084 v.reset(OpAMD64MOVBload) 5085 v.AuxInt = off1 + off2 5086 v.Aux = mergeSym(sym1, sym2) 5087 v.AddArg(base) 5088 v.AddArg(mem) 5089 return true 5090 } 5091 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 5092 // cond: is32Bit(off1+off2) 5093 // result: (MOVBload [off1+off2] {sym} ptr mem) 5094 for { 5095 off1 := v.AuxInt 5096 sym := v.Aux 5097 _ = v.Args[1] 5098 v_0 := v.Args[0] 5099 if v_0.Op != OpAMD64ADDLconst { 5100 break 5101 } 5102 off2 := v_0.AuxInt 5103 ptr := v_0.Args[0] 5104 mem := v.Args[1] 5105 if !(is32Bit(off1 + off2)) { 5106 break 5107 } 5108 v.reset(OpAMD64MOVBload) 5109 v.AuxInt = off1 + off2 5110 v.Aux = sym 5111 v.AddArg(ptr) 5112 v.AddArg(mem) 5113 return true 5114 } 5115 return false 5116 } 5117 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 5118 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 5119 // cond: is32Bit(c+d) 5120 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5121 for { 5122 c := v.AuxInt 5123 sym := v.Aux 5124 _ = v.Args[2] 5125 v_0 := v.Args[0] 5126 if v_0.Op != OpAMD64ADDQconst { 5127 break 5128 } 5129 d := v_0.AuxInt 5130 ptr := v_0.Args[0] 5131 idx := v.Args[1] 5132 mem := v.Args[2] 5133 if !(is32Bit(c + d)) { 5134 break 5135 } 5136 v.reset(OpAMD64MOVBloadidx1) 5137 v.AuxInt = c + d 5138 v.Aux = sym 5139 v.AddArg(ptr) 5140 v.AddArg(idx) 5141 v.AddArg(mem) 5142 return true 5143 } 5144 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 5145 // cond: is32Bit(c+d) 5146 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5147 for { 5148 c := v.AuxInt 5149 sym := v.Aux 5150 _ = v.Args[2] 5151 idx := v.Args[0] 5152 v_1 := v.Args[1] 5153 if v_1.Op != OpAMD64ADDQconst { 5154 break 5155 } 5156 d := v_1.AuxInt 5157 ptr := v_1.Args[0] 5158 mem := v.Args[2] 5159 if !(is32Bit(c + d)) { 5160 break 5161 } 5162 v.reset(OpAMD64MOVBloadidx1) 5163 v.AuxInt = c + d 5164 v.Aux = sym 5165 v.AddArg(ptr) 5166 v.AddArg(idx) 5167 v.AddArg(mem) 5168 return true 5169 } 5170 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 5171 // cond: is32Bit(c+d) 5172 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5173 for { 5174 c := v.AuxInt 5175 sym := v.Aux 5176 _ = v.Args[2] 5177 ptr := v.Args[0] 5178 v_1 := v.Args[1] 5179 if v_1.Op != OpAMD64ADDQconst { 5180 break 5181 } 5182 d := v_1.AuxInt 5183 idx := v_1.Args[0] 5184 mem := v.Args[2] 5185 if !(is32Bit(c + d)) { 5186 break 5187 } 5188 v.reset(OpAMD64MOVBloadidx1) 5189 v.AuxInt = c + d 5190 v.Aux = sym 5191 v.AddArg(ptr) 5192 v.AddArg(idx) 5193 v.AddArg(mem) 5194 return true 5195 } 5196 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 5197 // cond: is32Bit(c+d) 5198 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5199 for { 5200 c := v.AuxInt 5201 sym := v.Aux 5202 _ = v.Args[2] 5203 v_0 := v.Args[0] 5204 if v_0.Op != OpAMD64ADDQconst { 5205 break 5206 } 5207 d := v_0.AuxInt 5208 idx := v_0.Args[0] 5209 ptr := v.Args[1] 5210 mem := v.Args[2] 5211 if !(is32Bit(c + d)) { 5212 break 5213 } 5214 v.reset(OpAMD64MOVBloadidx1) 5215 v.AuxInt = c + d 5216 v.Aux = sym 5217 v.AddArg(ptr) 5218 v.AddArg(idx) 5219 v.AddArg(mem) 5220 return true 5221 } 5222 return false 5223 } 5224 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 5225 // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) 5226 // cond: y.Uses == 1 5227 // result: (SETLmem [off] {sym} ptr x mem) 5228 for { 5229 off := v.AuxInt 5230 sym := v.Aux 5231 _ = v.Args[2] 5232 ptr := v.Args[0] 5233 y := v.Args[1] 5234 if y.Op != OpAMD64SETL { 5235 break 5236 } 5237 x := y.Args[0] 5238 mem := v.Args[2] 5239 if !(y.Uses == 1) { 5240 break 5241 } 5242 v.reset(OpAMD64SETLmem) 5243 v.AuxInt = off 5244 v.Aux = sym 5245 v.AddArg(ptr) 5246 v.AddArg(x) 5247 v.AddArg(mem) 5248 return true 5249 } 5250 // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) 5251 // cond: y.Uses == 1 5252 // result: (SETLEmem [off] {sym} ptr x mem) 5253 for { 5254 off := v.AuxInt 5255 sym := v.Aux 5256 _ = v.Args[2] 5257 ptr := v.Args[0] 5258 y := v.Args[1] 5259 if y.Op != OpAMD64SETLE { 5260 break 5261 } 5262 x := y.Args[0] 5263 mem := v.Args[2] 5264 if !(y.Uses == 1) { 5265 break 5266 } 5267 v.reset(OpAMD64SETLEmem) 5268 v.AuxInt = off 5269 v.Aux = sym 5270 v.AddArg(ptr) 5271 v.AddArg(x) 5272 v.AddArg(mem) 5273 return true 5274 } 5275 // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) 5276 // cond: y.Uses == 1 5277 // result: (SETGmem [off] {sym} ptr x mem) 5278 for { 5279 off := v.AuxInt 5280 sym := v.Aux 5281 _ = v.Args[2] 5282 ptr := v.Args[0] 5283 y := v.Args[1] 5284 if y.Op != OpAMD64SETG { 5285 break 5286 } 5287 x := y.Args[0] 5288 mem := v.Args[2] 5289 if !(y.Uses == 1) { 5290 break 5291 } 5292 v.reset(OpAMD64SETGmem) 5293 v.AuxInt = off 5294 v.Aux = sym 5295 v.AddArg(ptr) 5296 v.AddArg(x) 5297 v.AddArg(mem) 5298 return true 5299 } 5300 // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) 5301 // cond: y.Uses == 1 5302 // result: (SETGEmem [off] {sym} ptr x mem) 5303 for { 5304 off := v.AuxInt 5305 sym := v.Aux 5306 _ = v.Args[2] 5307 ptr := v.Args[0] 5308 y := v.Args[1] 5309 if y.Op != OpAMD64SETGE { 5310 break 5311 } 5312 x := y.Args[0] 5313 mem := v.Args[2] 5314 if !(y.Uses == 1) { 5315 break 5316 } 5317 v.reset(OpAMD64SETGEmem) 5318 v.AuxInt = off 5319 v.Aux = sym 5320 v.AddArg(ptr) 5321 v.AddArg(x) 5322 v.AddArg(mem) 5323 return true 5324 } 5325 // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) 5326 // cond: y.Uses == 1 5327 // result: (SETEQmem [off] {sym} ptr x mem) 5328 for { 5329 off := v.AuxInt 5330 sym := v.Aux 5331 _ = v.Args[2] 5332 ptr := v.Args[0] 5333 y := v.Args[1] 5334 if y.Op != OpAMD64SETEQ { 5335 break 5336 } 5337 x := y.Args[0] 5338 mem := v.Args[2] 5339 if !(y.Uses == 1) { 5340 break 5341 } 5342 v.reset(OpAMD64SETEQmem) 5343 v.AuxInt = off 5344 v.Aux = sym 5345 v.AddArg(ptr) 5346 v.AddArg(x) 5347 v.AddArg(mem) 5348 return true 5349 } 5350 // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) 5351 // cond: y.Uses == 1 5352 // result: (SETNEmem [off] {sym} ptr x mem) 5353 for { 5354 off := v.AuxInt 5355 sym := v.Aux 5356 _ = v.Args[2] 5357 ptr := v.Args[0] 5358 y := v.Args[1] 5359 if y.Op != OpAMD64SETNE { 5360 break 5361 } 5362 x := y.Args[0] 5363 mem := v.Args[2] 5364 if !(y.Uses == 1) { 5365 break 5366 } 5367 v.reset(OpAMD64SETNEmem) 5368 v.AuxInt = off 5369 v.Aux = sym 5370 v.AddArg(ptr) 5371 v.AddArg(x) 5372 v.AddArg(mem) 5373 return true 5374 } 5375 // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) 5376 // cond: y.Uses == 1 5377 // result: (SETBmem [off] {sym} ptr x mem) 5378 for { 5379 off := v.AuxInt 5380 sym := v.Aux 5381 _ = v.Args[2] 5382 ptr := v.Args[0] 5383 y := v.Args[1] 5384 if y.Op != OpAMD64SETB { 5385 break 5386 } 5387 x := y.Args[0] 5388 mem := v.Args[2] 5389 if !(y.Uses == 1) { 5390 break 5391 } 5392 v.reset(OpAMD64SETBmem) 5393 v.AuxInt = off 5394 v.Aux = sym 5395 v.AddArg(ptr) 5396 v.AddArg(x) 5397 v.AddArg(mem) 5398 return true 5399 } 5400 // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) 5401 // cond: y.Uses == 1 5402 // result: (SETBEmem [off] {sym} ptr x mem) 5403 for { 5404 off := v.AuxInt 5405 sym := v.Aux 5406 _ = v.Args[2] 5407 ptr := v.Args[0] 5408 y := v.Args[1] 5409 if y.Op != OpAMD64SETBE { 5410 break 5411 } 5412 x := y.Args[0] 5413 mem := v.Args[2] 5414 if !(y.Uses == 1) { 5415 break 5416 } 5417 v.reset(OpAMD64SETBEmem) 5418 v.AuxInt = off 5419 v.Aux = sym 5420 v.AddArg(ptr) 5421 v.AddArg(x) 5422 v.AddArg(mem) 5423 return true 5424 } 5425 // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) 5426 // cond: y.Uses == 1 5427 // result: (SETAmem [off] {sym} ptr x mem) 5428 for { 5429 off := v.AuxInt 5430 sym := v.Aux 5431 _ = v.Args[2] 5432 ptr := v.Args[0] 5433 y := v.Args[1] 5434 if y.Op != OpAMD64SETA { 5435 break 5436 } 5437 x := y.Args[0] 5438 mem := v.Args[2] 5439 if !(y.Uses == 1) { 5440 break 5441 } 5442 v.reset(OpAMD64SETAmem) 5443 v.AuxInt = off 5444 v.Aux = sym 5445 v.AddArg(ptr) 5446 v.AddArg(x) 5447 v.AddArg(mem) 5448 return true 5449 } 5450 // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) 5451 // cond: y.Uses == 1 5452 // result: (SETAEmem [off] {sym} ptr x mem) 5453 for { 5454 off := v.AuxInt 5455 sym := v.Aux 5456 _ = v.Args[2] 5457 ptr := v.Args[0] 5458 y := v.Args[1] 5459 if y.Op != OpAMD64SETAE { 5460 break 5461 } 5462 x := y.Args[0] 5463 mem := v.Args[2] 5464 if !(y.Uses == 1) { 5465 break 5466 } 5467 v.reset(OpAMD64SETAEmem) 5468 v.AuxInt = off 5469 v.Aux = sym 5470 v.AddArg(ptr) 5471 v.AddArg(x) 5472 v.AddArg(mem) 5473 return true 5474 } 5475 return false 5476 } 5477 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 5478 b := v.Block 5479 _ = b 5480 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 5481 // cond: 5482 // result: (MOVBstore [off] {sym} ptr x mem) 5483 for { 5484 off := v.AuxInt 5485 sym := v.Aux 5486 _ = v.Args[2] 5487 ptr := v.Args[0] 5488 v_1 := v.Args[1] 5489 if v_1.Op != OpAMD64MOVBQSX { 5490 break 5491 } 5492 x := v_1.Args[0] 5493 mem := v.Args[2] 5494 v.reset(OpAMD64MOVBstore) 5495 v.AuxInt = off 5496 v.Aux = sym 5497 v.AddArg(ptr) 5498 v.AddArg(x) 5499 v.AddArg(mem) 5500 return true 5501 } 5502 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 5503 // cond: 5504 // result: (MOVBstore [off] {sym} ptr x mem) 5505 for { 5506 off := v.AuxInt 5507 sym := v.Aux 5508 _ = v.Args[2] 5509 ptr := v.Args[0] 5510 v_1 := v.Args[1] 5511 if v_1.Op != OpAMD64MOVBQZX { 5512 break 5513 } 5514 x := v_1.Args[0] 5515 mem := v.Args[2] 5516 v.reset(OpAMD64MOVBstore) 5517 v.AuxInt = off 5518 v.Aux = sym 5519 v.AddArg(ptr) 5520 v.AddArg(x) 5521 v.AddArg(mem) 5522 return true 5523 } 5524 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 5525 // cond: is32Bit(off1+off2) 5526 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 5527 for { 5528 off1 := v.AuxInt 5529 sym := v.Aux 5530 _ = v.Args[2] 5531 v_0 := v.Args[0] 5532 if v_0.Op != OpAMD64ADDQconst { 5533 break 5534 } 5535 off2 := v_0.AuxInt 5536 ptr := v_0.Args[0] 5537 val := v.Args[1] 5538 mem := v.Args[2] 5539 if !(is32Bit(off1 + off2)) { 5540 break 5541 } 5542 v.reset(OpAMD64MOVBstore) 5543 v.AuxInt = off1 + off2 5544 v.Aux = sym 5545 v.AddArg(ptr) 5546 v.AddArg(val) 5547 v.AddArg(mem) 5548 return true 5549 } 5550 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 5551 // cond: validOff(off) 5552 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 5553 for { 5554 off := v.AuxInt 5555 sym := v.Aux 5556 _ = v.Args[2] 5557 ptr := v.Args[0] 5558 v_1 := v.Args[1] 5559 if v_1.Op != OpAMD64MOVLconst { 5560 break 5561 } 5562 c := v_1.AuxInt 5563 mem := v.Args[2] 5564 if !(validOff(off)) { 5565 break 5566 } 5567 v.reset(OpAMD64MOVBstoreconst) 5568 v.AuxInt = makeValAndOff(int64(int8(c)), off) 5569 v.Aux = sym 5570 v.AddArg(ptr) 5571 v.AddArg(mem) 5572 return true 5573 } 5574 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5575 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5576 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5577 for { 5578 off1 := v.AuxInt 5579 sym1 := v.Aux 5580 _ = v.Args[2] 5581 v_0 := v.Args[0] 5582 if v_0.Op != OpAMD64LEAQ { 5583 break 5584 } 5585 off2 := v_0.AuxInt 5586 sym2 := v_0.Aux 5587 base := v_0.Args[0] 5588 val := v.Args[1] 5589 mem := v.Args[2] 5590 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5591 break 5592 } 5593 v.reset(OpAMD64MOVBstore) 5594 v.AuxInt = off1 + off2 5595 v.Aux = mergeSym(sym1, sym2) 5596 v.AddArg(base) 5597 v.AddArg(val) 5598 v.AddArg(mem) 5599 return true 5600 } 5601 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 5602 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5603 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5604 for { 5605 off1 := v.AuxInt 5606 sym1 := v.Aux 5607 _ = v.Args[2] 5608 v_0 := v.Args[0] 5609 if v_0.Op != OpAMD64LEAQ1 { 5610 break 5611 } 5612 off2 := v_0.AuxInt 5613 sym2 := v_0.Aux 5614 _ = v_0.Args[1] 5615 ptr := v_0.Args[0] 5616 idx := v_0.Args[1] 5617 val := v.Args[1] 5618 mem := v.Args[2] 5619 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5620 break 5621 } 5622 v.reset(OpAMD64MOVBstoreidx1) 5623 v.AuxInt = off1 + off2 5624 v.Aux = mergeSym(sym1, sym2) 5625 v.AddArg(ptr) 5626 v.AddArg(idx) 5627 v.AddArg(val) 5628 v.AddArg(mem) 5629 return true 5630 } 5631 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 5632 // cond: ptr.Op != OpSB 5633 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 5634 for { 5635 off := v.AuxInt 5636 sym := v.Aux 5637 _ = v.Args[2] 5638 v_0 := v.Args[0] 5639 if v_0.Op != OpAMD64ADDQ { 5640 break 5641 } 5642 _ = v_0.Args[1] 5643 ptr := v_0.Args[0] 5644 idx := v_0.Args[1] 5645 val := v.Args[1] 5646 mem := v.Args[2] 5647 if !(ptr.Op != OpSB) { 5648 break 5649 } 5650 v.reset(OpAMD64MOVBstoreidx1) 5651 v.AuxInt = off 5652 v.Aux = sym 5653 v.AddArg(ptr) 5654 v.AddArg(idx) 5655 v.AddArg(val) 5656 v.AddArg(mem) 5657 return true 5658 } 5659 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 5660 // cond: x0.Uses == 1 && clobber(x0) 5661 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 5662 for { 5663 i := v.AuxInt 5664 s := v.Aux 5665 _ = v.Args[2] 5666 p := v.Args[0] 5667 w := v.Args[1] 5668 x0 := v.Args[2] 5669 if x0.Op != OpAMD64MOVBstore { 5670 break 5671 } 5672 if x0.AuxInt != i-1 { 5673 break 5674 } 5675 if x0.Aux != s { 5676 break 5677 } 5678 _ = x0.Args[2] 5679 if p != x0.Args[0] { 5680 break 5681 } 5682 x0_1 := x0.Args[1] 5683 if x0_1.Op != OpAMD64SHRWconst { 5684 break 5685 } 5686 if x0_1.AuxInt != 8 { 5687 break 5688 } 5689 if w != x0_1.Args[0] { 5690 break 5691 } 5692 mem := x0.Args[2] 5693 if !(x0.Uses == 1 && clobber(x0)) { 5694 break 5695 } 5696 v.reset(OpAMD64MOVWstore) 5697 v.AuxInt = i - 1 5698 v.Aux = s 5699 v.AddArg(p) 5700 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5701 v0.AuxInt = 8 5702 v0.AddArg(w) 5703 v.AddArg(v0) 5704 v.AddArg(mem) 5705 return true 5706 } 5707 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 5708 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5709 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 5710 for { 5711 i := v.AuxInt 5712 s := v.Aux 5713 _ = v.Args[2] 5714 p := v.Args[0] 5715 w := v.Args[1] 5716 x2 := v.Args[2] 5717 if x2.Op != OpAMD64MOVBstore { 5718 break 5719 } 5720 if x2.AuxInt != i-1 { 5721 break 5722 } 5723 if x2.Aux != s { 5724 break 5725 } 5726 _ = x2.Args[2] 5727 if p != x2.Args[0] { 5728 break 5729 } 5730 x2_1 := x2.Args[1] 5731 if x2_1.Op != OpAMD64SHRLconst { 5732 break 5733 } 5734 if x2_1.AuxInt != 8 { 5735 break 5736 } 5737 if w != x2_1.Args[0] { 5738 break 5739 } 5740 x1 := x2.Args[2] 5741 if x1.Op != OpAMD64MOVBstore { 5742 break 5743 } 5744 if x1.AuxInt != i-2 { 5745 break 5746 } 5747 if x1.Aux != s { 5748 break 5749 } 5750 _ = x1.Args[2] 5751 if p != x1.Args[0] { 5752 break 5753 } 5754 x1_1 := x1.Args[1] 5755 if x1_1.Op != OpAMD64SHRLconst { 5756 break 5757 } 5758 if x1_1.AuxInt != 16 { 5759 break 5760 } 5761 if w != x1_1.Args[0] { 5762 break 5763 } 5764 x0 := x1.Args[2] 5765 if x0.Op != OpAMD64MOVBstore { 5766 break 5767 } 5768 if x0.AuxInt != i-3 { 5769 break 5770 } 5771 if x0.Aux != s { 5772 break 5773 } 5774 _ = x0.Args[2] 5775 if p != x0.Args[0] { 5776 break 5777 } 5778 x0_1 := x0.Args[1] 5779 if x0_1.Op != OpAMD64SHRLconst { 5780 break 5781 } 5782 if x0_1.AuxInt != 24 { 5783 break 5784 } 5785 if w != x0_1.Args[0] { 5786 break 5787 } 5788 mem := x0.Args[2] 5789 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5790 break 5791 } 5792 v.reset(OpAMD64MOVLstore) 5793 v.AuxInt = i - 3 5794 v.Aux = s 5795 v.AddArg(p) 5796 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5797 v0.AddArg(w) 5798 v.AddArg(v0) 5799 v.AddArg(mem) 5800 return true 5801 } 5802 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 5803 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5804 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 5805 for { 5806 i := v.AuxInt 5807 s := v.Aux 5808 _ = v.Args[2] 5809 p := v.Args[0] 5810 w := v.Args[1] 5811 x6 := v.Args[2] 5812 if x6.Op != OpAMD64MOVBstore { 5813 break 5814 } 5815 if x6.AuxInt != i-1 { 5816 break 5817 } 5818 if x6.Aux != s { 5819 break 5820 } 5821 _ = x6.Args[2] 5822 if p != x6.Args[0] { 5823 break 5824 } 5825 x6_1 := x6.Args[1] 5826 if x6_1.Op != OpAMD64SHRQconst { 5827 break 5828 } 5829 if x6_1.AuxInt != 8 { 5830 break 5831 } 5832 if w != x6_1.Args[0] { 5833 break 5834 } 5835 x5 := x6.Args[2] 5836 if x5.Op != OpAMD64MOVBstore { 5837 break 5838 } 5839 if x5.AuxInt != i-2 { 5840 break 5841 } 5842 if x5.Aux != s { 5843 break 5844 } 5845 _ = x5.Args[2] 5846 if p != x5.Args[0] { 5847 break 5848 } 5849 x5_1 := x5.Args[1] 5850 if x5_1.Op != OpAMD64SHRQconst { 5851 break 5852 } 5853 if x5_1.AuxInt != 16 { 5854 break 5855 } 5856 if w != x5_1.Args[0] { 5857 break 5858 } 5859 x4 := x5.Args[2] 5860 if x4.Op != OpAMD64MOVBstore { 5861 break 5862 } 5863 if x4.AuxInt != i-3 { 5864 break 5865 } 5866 if x4.Aux != s { 5867 break 5868 } 5869 _ = x4.Args[2] 5870 if p != x4.Args[0] { 5871 break 5872 } 5873 x4_1 := x4.Args[1] 5874 if x4_1.Op != OpAMD64SHRQconst { 5875 break 5876 } 5877 if x4_1.AuxInt != 24 { 5878 break 5879 } 5880 if w != x4_1.Args[0] { 5881 break 5882 } 5883 x3 := x4.Args[2] 5884 if x3.Op != OpAMD64MOVBstore { 5885 break 5886 } 5887 if x3.AuxInt != i-4 { 5888 break 5889 } 5890 if x3.Aux != s { 5891 break 5892 } 5893 _ = x3.Args[2] 5894 if p != x3.Args[0] { 5895 break 5896 } 5897 x3_1 := x3.Args[1] 5898 if x3_1.Op != OpAMD64SHRQconst { 5899 break 5900 } 5901 if x3_1.AuxInt != 32 { 5902 break 5903 } 5904 if w != x3_1.Args[0] { 5905 break 5906 } 5907 x2 := x3.Args[2] 5908 if x2.Op != OpAMD64MOVBstore { 5909 break 5910 } 5911 if x2.AuxInt != i-5 { 5912 break 5913 } 5914 if x2.Aux != s { 5915 break 5916 } 5917 _ = x2.Args[2] 5918 if p != x2.Args[0] { 5919 break 5920 } 5921 x2_1 := x2.Args[1] 5922 if x2_1.Op != OpAMD64SHRQconst { 5923 break 5924 } 5925 if x2_1.AuxInt != 40 { 5926 break 5927 } 5928 if w != x2_1.Args[0] { 5929 break 5930 } 5931 x1 := x2.Args[2] 5932 if x1.Op != OpAMD64MOVBstore { 5933 break 5934 } 5935 if x1.AuxInt != i-6 { 5936 break 5937 } 5938 if x1.Aux != s { 5939 break 5940 } 5941 _ = x1.Args[2] 5942 if p != x1.Args[0] { 5943 break 5944 } 5945 x1_1 := x1.Args[1] 5946 if x1_1.Op != OpAMD64SHRQconst { 5947 break 5948 } 5949 if x1_1.AuxInt != 48 { 5950 break 5951 } 5952 if w != x1_1.Args[0] { 5953 break 5954 } 5955 x0 := x1.Args[2] 5956 if x0.Op != OpAMD64MOVBstore { 5957 break 5958 } 5959 if x0.AuxInt != i-7 { 5960 break 5961 } 5962 if x0.Aux != s { 5963 break 5964 } 5965 _ = x0.Args[2] 5966 if p != x0.Args[0] { 5967 break 5968 } 5969 x0_1 := x0.Args[1] 5970 if x0_1.Op != OpAMD64SHRQconst { 5971 break 5972 } 5973 if x0_1.AuxInt != 56 { 5974 break 5975 } 5976 if w != x0_1.Args[0] { 5977 break 5978 } 5979 mem := x0.Args[2] 5980 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5981 break 5982 } 5983 v.reset(OpAMD64MOVQstore) 5984 v.AuxInt = i - 7 5985 v.Aux = s 5986 v.AddArg(p) 5987 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5988 v0.AddArg(w) 5989 v.AddArg(v0) 5990 v.AddArg(mem) 5991 return true 5992 } 5993 return false 5994 } 5995 func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { 5996 b := v.Block 5997 _ = b 5998 typ := &b.Func.Config.Types 5999 _ = typ 6000 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 6001 // cond: x.Uses == 1 && clobber(x) 6002 // result: (MOVWstore [i-1] {s} p w mem) 6003 for { 6004 i := v.AuxInt 6005 s := v.Aux 6006 _ = v.Args[2] 6007 p := v.Args[0] 6008 v_1 := v.Args[1] 6009 if v_1.Op != OpAMD64SHRQconst { 6010 break 6011 } 6012 if v_1.AuxInt != 8 { 6013 break 6014 } 6015 w := v_1.Args[0] 6016 x := v.Args[2] 6017 if x.Op != OpAMD64MOVBstore { 6018 break 6019 } 6020 if x.AuxInt != i-1 { 6021 break 6022 } 6023 if x.Aux != s { 6024 break 6025 } 6026 _ = x.Args[2] 6027 if p != x.Args[0] { 6028 break 6029 } 6030 if w != x.Args[1] { 6031 break 6032 } 6033 mem := x.Args[2] 6034 if !(x.Uses == 1 && clobber(x)) { 6035 break 6036 } 6037 v.reset(OpAMD64MOVWstore) 6038 v.AuxInt = i - 1 6039 v.Aux = s 6040 v.AddArg(p) 6041 v.AddArg(w) 6042 v.AddArg(mem) 6043 return true 6044 } 6045 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 6046 // cond: x.Uses == 1 && clobber(x) 6047 // result: (MOVWstore [i-1] {s} p w0 mem) 6048 for { 6049 i := v.AuxInt 6050 s := v.Aux 6051 _ = v.Args[2] 6052 p := v.Args[0] 6053 v_1 := v.Args[1] 6054 if v_1.Op != OpAMD64SHRQconst { 6055 break 6056 } 6057 j := v_1.AuxInt 6058 w := v_1.Args[0] 6059 x := v.Args[2] 6060 if x.Op != OpAMD64MOVBstore { 6061 break 6062 } 6063 if x.AuxInt != i-1 { 6064 break 6065 } 6066 if x.Aux != s { 6067 break 6068 } 6069 _ = x.Args[2] 6070 if p != x.Args[0] { 6071 break 6072 } 6073 w0 := x.Args[1] 6074 if w0.Op != OpAMD64SHRQconst { 6075 break 6076 } 6077 if w0.AuxInt != j-8 { 6078 break 6079 } 6080 if w != w0.Args[0] { 6081 break 6082 } 6083 mem := x.Args[2] 6084 if !(x.Uses == 1 && clobber(x)) { 6085 break 6086 } 6087 v.reset(OpAMD64MOVWstore) 6088 v.AuxInt = i - 1 6089 v.Aux = s 6090 v.AddArg(p) 6091 v.AddArg(w0) 6092 v.AddArg(mem) 6093 return true 6094 } 6095 // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) 6096 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 6097 // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) 6098 for { 6099 i := v.AuxInt 6100 s := v.Aux 6101 _ = v.Args[2] 6102 p := v.Args[0] 6103 x1 := v.Args[1] 6104 if x1.Op != OpAMD64MOVBload { 6105 break 6106 } 6107 j := x1.AuxInt 6108 s2 := x1.Aux 6109 _ = x1.Args[1] 6110 p2 := x1.Args[0] 6111 mem := x1.Args[1] 6112 mem2 := v.Args[2] 6113 if mem2.Op != OpAMD64MOVBstore { 6114 break 6115 } 6116 if mem2.AuxInt != i-1 { 6117 break 6118 } 6119 if mem2.Aux != s { 6120 break 6121 } 6122 _ = mem2.Args[2] 6123 if p != mem2.Args[0] { 6124 break 6125 } 6126 x2 := mem2.Args[1] 6127 if x2.Op != OpAMD64MOVBload { 6128 break 6129 } 6130 if x2.AuxInt != j-1 { 6131 break 6132 } 6133 if x2.Aux != s2 { 6134 break 6135 } 6136 _ = x2.Args[1] 6137 if p2 != x2.Args[0] { 6138 break 6139 } 6140 if mem != x2.Args[1] { 6141 break 6142 } 6143 if mem != mem2.Args[2] { 6144 break 6145 } 6146 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 6147 break 6148 } 6149 v.reset(OpAMD64MOVWstore) 6150 v.AuxInt = i - 1 6151 v.Aux = s 6152 v.AddArg(p) 6153 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 6154 v0.AuxInt = j - 1 6155 v0.Aux = s2 6156 v0.AddArg(p2) 6157 v0.AddArg(mem) 6158 v.AddArg(v0) 6159 v.AddArg(mem) 6160 return true 6161 } 6162 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 6163 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 6164 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6165 for { 6166 off1 := v.AuxInt 6167 sym1 := v.Aux 6168 _ = v.Args[2] 6169 v_0 := v.Args[0] 6170 if v_0.Op != OpAMD64LEAL { 6171 break 6172 } 6173 off2 := v_0.AuxInt 6174 sym2 := v_0.Aux 6175 base := v_0.Args[0] 6176 val := v.Args[1] 6177 mem := v.Args[2] 6178 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 6179 break 6180 } 6181 v.reset(OpAMD64MOVBstore) 6182 v.AuxInt = off1 + off2 6183 v.Aux = mergeSym(sym1, sym2) 6184 v.AddArg(base) 6185 v.AddArg(val) 6186 v.AddArg(mem) 6187 return true 6188 } 6189 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 6190 // cond: is32Bit(off1+off2) 6191 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 6192 for { 6193 off1 := v.AuxInt 6194 sym := v.Aux 6195 _ = v.Args[2] 6196 v_0 := v.Args[0] 6197 if v_0.Op != OpAMD64ADDLconst { 6198 break 6199 } 6200 off2 := v_0.AuxInt 6201 ptr := v_0.Args[0] 6202 val := v.Args[1] 6203 mem := v.Args[2] 6204 if !(is32Bit(off1 + off2)) { 6205 break 6206 } 6207 v.reset(OpAMD64MOVBstore) 6208 v.AuxInt = off1 + off2 6209 v.Aux = sym 6210 v.AddArg(ptr) 6211 v.AddArg(val) 6212 v.AddArg(mem) 6213 return true 6214 } 6215 return false 6216 } 6217 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 6218 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 6219 // cond: ValAndOff(sc).canAdd(off) 6220 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6221 for { 6222 sc := v.AuxInt 6223 s := v.Aux 6224 _ = v.Args[1] 6225 v_0 := v.Args[0] 6226 if v_0.Op != OpAMD64ADDQconst { 6227 break 6228 } 6229 off := v_0.AuxInt 6230 ptr := v_0.Args[0] 6231 mem := v.Args[1] 6232 if !(ValAndOff(sc).canAdd(off)) { 6233 break 6234 } 6235 v.reset(OpAMD64MOVBstoreconst) 6236 v.AuxInt = ValAndOff(sc).add(off) 6237 v.Aux = s 6238 v.AddArg(ptr) 6239 v.AddArg(mem) 6240 return true 6241 } 6242 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 6243 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6244 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6245 for { 6246 sc := v.AuxInt 6247 sym1 := v.Aux 6248 _ = v.Args[1] 6249 v_0 := v.Args[0] 6250 if v_0.Op != OpAMD64LEAQ { 6251 break 6252 } 6253 off := v_0.AuxInt 6254 sym2 := v_0.Aux 6255 ptr := v_0.Args[0] 6256 mem := v.Args[1] 6257 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6258 break 6259 } 6260 v.reset(OpAMD64MOVBstoreconst) 6261 v.AuxInt = ValAndOff(sc).add(off) 6262 v.Aux = mergeSym(sym1, sym2) 6263 v.AddArg(ptr) 6264 v.AddArg(mem) 6265 return true 6266 } 6267 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 6268 // cond: canMergeSym(sym1, sym2) 6269 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6270 for { 6271 x := v.AuxInt 6272 sym1 := v.Aux 6273 _ = v.Args[1] 6274 v_0 := v.Args[0] 6275 if v_0.Op != OpAMD64LEAQ1 { 6276 break 6277 } 6278 off := v_0.AuxInt 6279 sym2 := v_0.Aux 6280 _ = v_0.Args[1] 6281 ptr := v_0.Args[0] 6282 idx := v_0.Args[1] 6283 mem := v.Args[1] 6284 if !(canMergeSym(sym1, sym2)) { 6285 break 6286 } 6287 v.reset(OpAMD64MOVBstoreconstidx1) 6288 v.AuxInt = ValAndOff(x).add(off) 6289 v.Aux = mergeSym(sym1, sym2) 6290 v.AddArg(ptr) 6291 v.AddArg(idx) 6292 v.AddArg(mem) 6293 return true 6294 } 6295 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 6296 // cond: 6297 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 6298 for { 6299 x := v.AuxInt 6300 sym := v.Aux 6301 _ = v.Args[1] 6302 v_0 := v.Args[0] 6303 if v_0.Op != OpAMD64ADDQ { 6304 break 6305 } 6306 _ = v_0.Args[1] 6307 ptr := v_0.Args[0] 6308 idx := v_0.Args[1] 6309 mem := v.Args[1] 6310 v.reset(OpAMD64MOVBstoreconstidx1) 6311 v.AuxInt = x 6312 v.Aux = sym 6313 v.AddArg(ptr) 6314 v.AddArg(idx) 6315 v.AddArg(mem) 6316 return true 6317 } 6318 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 6319 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 6320 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 6321 for { 6322 c := v.AuxInt 6323 s := v.Aux 6324 _ = v.Args[1] 6325 p := v.Args[0] 6326 x := v.Args[1] 6327 if x.Op != OpAMD64MOVBstoreconst { 6328 break 6329 } 6330 a := x.AuxInt 6331 if x.Aux != s { 6332 break 6333 } 6334 _ = x.Args[1] 6335 if p != x.Args[0] { 6336 break 6337 } 6338 mem := x.Args[1] 6339 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 6340 break 6341 } 6342 v.reset(OpAMD64MOVWstoreconst) 6343 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 6344 v.Aux = s 6345 v.AddArg(p) 6346 v.AddArg(mem) 6347 return true 6348 } 6349 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 6350 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6351 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6352 for { 6353 sc := v.AuxInt 6354 sym1 := v.Aux 6355 _ = v.Args[1] 6356 v_0 := v.Args[0] 6357 if v_0.Op != OpAMD64LEAL { 6358 break 6359 } 6360 off := v_0.AuxInt 6361 sym2 := v_0.Aux 6362 ptr := v_0.Args[0] 6363 mem := v.Args[1] 6364 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6365 break 6366 } 6367 v.reset(OpAMD64MOVBstoreconst) 6368 v.AuxInt = ValAndOff(sc).add(off) 6369 v.Aux = mergeSym(sym1, sym2) 6370 v.AddArg(ptr) 6371 v.AddArg(mem) 6372 return true 6373 } 6374 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 6375 // cond: ValAndOff(sc).canAdd(off) 6376 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6377 for { 6378 sc := v.AuxInt 6379 s := v.Aux 6380 _ = v.Args[1] 6381 v_0 := v.Args[0] 6382 if v_0.Op != OpAMD64ADDLconst { 6383 break 6384 } 6385 off := v_0.AuxInt 6386 ptr := v_0.Args[0] 6387 mem := v.Args[1] 6388 if !(ValAndOff(sc).canAdd(off)) { 6389 break 6390 } 6391 v.reset(OpAMD64MOVBstoreconst) 6392 v.AuxInt = ValAndOff(sc).add(off) 6393 v.Aux = s 6394 v.AddArg(ptr) 6395 v.AddArg(mem) 6396 return true 6397 } 6398 return false 6399 } 6400 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 6401 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 6402 // cond: ValAndOff(x).canAdd(c) 6403 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6404 for { 6405 x := v.AuxInt 6406 sym := v.Aux 6407 _ = v.Args[2] 6408 v_0 := v.Args[0] 6409 if v_0.Op != OpAMD64ADDQconst { 6410 break 6411 } 6412 c := v_0.AuxInt 6413 ptr := v_0.Args[0] 6414 idx := v.Args[1] 6415 mem := v.Args[2] 6416 if !(ValAndOff(x).canAdd(c)) { 6417 break 6418 } 6419 v.reset(OpAMD64MOVBstoreconstidx1) 6420 v.AuxInt = ValAndOff(x).add(c) 6421 v.Aux = sym 6422 v.AddArg(ptr) 6423 v.AddArg(idx) 6424 v.AddArg(mem) 6425 return true 6426 } 6427 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 6428 // cond: ValAndOff(x).canAdd(c) 6429 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6430 for { 6431 x := v.AuxInt 6432 sym := v.Aux 6433 _ = v.Args[2] 6434 ptr := v.Args[0] 6435 v_1 := v.Args[1] 6436 if v_1.Op != OpAMD64ADDQconst { 6437 break 6438 } 6439 c := v_1.AuxInt 6440 idx := v_1.Args[0] 6441 mem := v.Args[2] 6442 if !(ValAndOff(x).canAdd(c)) { 6443 break 6444 } 6445 v.reset(OpAMD64MOVBstoreconstidx1) 6446 v.AuxInt = ValAndOff(x).add(c) 6447 v.Aux = sym 6448 v.AddArg(ptr) 6449 v.AddArg(idx) 6450 v.AddArg(mem) 6451 return true 6452 } 6453 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 6454 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 6455 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 6456 for { 6457 c := v.AuxInt 6458 s := v.Aux 6459 _ = v.Args[2] 6460 p := v.Args[0] 6461 i := v.Args[1] 6462 x := v.Args[2] 6463 if x.Op != OpAMD64MOVBstoreconstidx1 { 6464 break 6465 } 6466 a := x.AuxInt 6467 if x.Aux != s { 6468 break 6469 } 6470 _ = x.Args[2] 6471 if p != x.Args[0] { 6472 break 6473 } 6474 if i != x.Args[1] { 6475 break 6476 } 6477 mem := x.Args[2] 6478 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 6479 break 6480 } 6481 v.reset(OpAMD64MOVWstoreconstidx1) 6482 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 6483 v.Aux = s 6484 v.AddArg(p) 6485 v.AddArg(i) 6486 v.AddArg(mem) 6487 return true 6488 } 6489 return false 6490 } 6491 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 6492 b := v.Block 6493 _ = b 6494 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6495 // cond: is32Bit(c+d) 6496 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6497 for { 6498 c := v.AuxInt 6499 sym := v.Aux 6500 _ = v.Args[3] 6501 v_0 := v.Args[0] 6502 if v_0.Op != OpAMD64ADDQconst { 6503 break 6504 } 6505 d := v_0.AuxInt 6506 ptr := v_0.Args[0] 6507 idx := v.Args[1] 6508 val := v.Args[2] 6509 mem := v.Args[3] 6510 if !(is32Bit(c + d)) { 6511 break 6512 } 6513 v.reset(OpAMD64MOVBstoreidx1) 6514 v.AuxInt = c + d 6515 v.Aux = sym 6516 v.AddArg(ptr) 6517 v.AddArg(idx) 6518 v.AddArg(val) 6519 v.AddArg(mem) 6520 return true 6521 } 6522 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6523 // cond: is32Bit(c+d) 6524 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6525 for { 6526 c := v.AuxInt 6527 sym := v.Aux 6528 _ = v.Args[3] 6529 ptr := v.Args[0] 6530 v_1 := v.Args[1] 6531 if v_1.Op != OpAMD64ADDQconst { 6532 break 6533 } 6534 d := v_1.AuxInt 6535 idx := v_1.Args[0] 6536 val := v.Args[2] 6537 mem := v.Args[3] 6538 if !(is32Bit(c + d)) { 6539 break 6540 } 6541 v.reset(OpAMD64MOVBstoreidx1) 6542 v.AuxInt = c + d 6543 v.Aux = sym 6544 v.AddArg(ptr) 6545 v.AddArg(idx) 6546 v.AddArg(val) 6547 v.AddArg(mem) 6548 return true 6549 } 6550 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 6551 // cond: x0.Uses == 1 && clobber(x0) 6552 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 6553 for { 6554 i := v.AuxInt 6555 s := v.Aux 6556 _ = v.Args[3] 6557 p := v.Args[0] 6558 idx := v.Args[1] 6559 w := v.Args[2] 6560 x0 := v.Args[3] 6561 if x0.Op != OpAMD64MOVBstoreidx1 { 6562 break 6563 } 6564 if x0.AuxInt != i-1 { 6565 break 6566 } 6567 if x0.Aux != s { 6568 break 6569 } 6570 _ = x0.Args[3] 6571 if p != x0.Args[0] { 6572 break 6573 } 6574 if idx != x0.Args[1] { 6575 break 6576 } 6577 x0_2 := x0.Args[2] 6578 if x0_2.Op != OpAMD64SHRWconst { 6579 break 6580 } 6581 if x0_2.AuxInt != 8 { 6582 break 6583 } 6584 if w != x0_2.Args[0] { 6585 break 6586 } 6587 mem := x0.Args[3] 6588 if !(x0.Uses == 1 && clobber(x0)) { 6589 break 6590 } 6591 v.reset(OpAMD64MOVWstoreidx1) 6592 v.AuxInt = i - 1 6593 v.Aux = s 6594 v.AddArg(p) 6595 v.AddArg(idx) 6596 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 6597 v0.AuxInt = 8 6598 v0.AddArg(w) 6599 v.AddArg(v0) 6600 v.AddArg(mem) 6601 return true 6602 } 6603 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 6604 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 6605 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 6606 for { 6607 i := v.AuxInt 6608 s := v.Aux 6609 _ = v.Args[3] 6610 p := v.Args[0] 6611 idx := v.Args[1] 6612 w := v.Args[2] 6613 x2 := v.Args[3] 6614 if x2.Op != OpAMD64MOVBstoreidx1 { 6615 break 6616 } 6617 if x2.AuxInt != i-1 { 6618 break 6619 } 6620 if x2.Aux != s { 6621 break 6622 } 6623 _ = x2.Args[3] 6624 if p != x2.Args[0] { 6625 break 6626 } 6627 if idx != x2.Args[1] { 6628 break 6629 } 6630 x2_2 := x2.Args[2] 6631 if x2_2.Op != OpAMD64SHRLconst { 6632 break 6633 } 6634 if x2_2.AuxInt != 8 { 6635 break 6636 } 6637 if w != x2_2.Args[0] { 6638 break 6639 } 6640 x1 := x2.Args[3] 6641 if x1.Op != OpAMD64MOVBstoreidx1 { 6642 break 6643 } 6644 if x1.AuxInt != i-2 { 6645 break 6646 } 6647 if x1.Aux != s { 6648 break 6649 } 6650 _ = x1.Args[3] 6651 if p != x1.Args[0] { 6652 break 6653 } 6654 if idx != x1.Args[1] { 6655 break 6656 } 6657 x1_2 := x1.Args[2] 6658 if x1_2.Op != OpAMD64SHRLconst { 6659 break 6660 } 6661 if x1_2.AuxInt != 16 { 6662 break 6663 } 6664 if w != x1_2.Args[0] { 6665 break 6666 } 6667 x0 := x1.Args[3] 6668 if x0.Op != OpAMD64MOVBstoreidx1 { 6669 break 6670 } 6671 if x0.AuxInt != i-3 { 6672 break 6673 } 6674 if x0.Aux != s { 6675 break 6676 } 6677 _ = x0.Args[3] 6678 if p != x0.Args[0] { 6679 break 6680 } 6681 if idx != x0.Args[1] { 6682 break 6683 } 6684 x0_2 := x0.Args[2] 6685 if x0_2.Op != OpAMD64SHRLconst { 6686 break 6687 } 6688 if x0_2.AuxInt != 24 { 6689 break 6690 } 6691 if w != x0_2.Args[0] { 6692 break 6693 } 6694 mem := x0.Args[3] 6695 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 6696 break 6697 } 6698 v.reset(OpAMD64MOVLstoreidx1) 6699 v.AuxInt = i - 3 6700 v.Aux = s 6701 v.AddArg(p) 6702 v.AddArg(idx) 6703 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 6704 v0.AddArg(w) 6705 v.AddArg(v0) 6706 v.AddArg(mem) 6707 return true 6708 } 6709 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 6710 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 6711 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 6712 for { 6713 i := v.AuxInt 6714 s := v.Aux 6715 _ = v.Args[3] 6716 p := v.Args[0] 6717 idx := v.Args[1] 6718 w := v.Args[2] 6719 x6 := v.Args[3] 6720 if x6.Op != OpAMD64MOVBstoreidx1 { 6721 break 6722 } 6723 if x6.AuxInt != i-1 { 6724 break 6725 } 6726 if x6.Aux != s { 6727 break 6728 } 6729 _ = x6.Args[3] 6730 if p != x6.Args[0] { 6731 break 6732 } 6733 if idx != x6.Args[1] { 6734 break 6735 } 6736 x6_2 := x6.Args[2] 6737 if x6_2.Op != OpAMD64SHRQconst { 6738 break 6739 } 6740 if x6_2.AuxInt != 8 { 6741 break 6742 } 6743 if w != x6_2.Args[0] { 6744 break 6745 } 6746 x5 := x6.Args[3] 6747 if x5.Op != OpAMD64MOVBstoreidx1 { 6748 break 6749 } 6750 if x5.AuxInt != i-2 { 6751 break 6752 } 6753 if x5.Aux != s { 6754 break 6755 } 6756 _ = x5.Args[3] 6757 if p != x5.Args[0] { 6758 break 6759 } 6760 if idx != x5.Args[1] { 6761 break 6762 } 6763 x5_2 := x5.Args[2] 6764 if x5_2.Op != OpAMD64SHRQconst { 6765 break 6766 } 6767 if x5_2.AuxInt != 16 { 6768 break 6769 } 6770 if w != x5_2.Args[0] { 6771 break 6772 } 6773 x4 := x5.Args[3] 6774 if x4.Op != OpAMD64MOVBstoreidx1 { 6775 break 6776 } 6777 if x4.AuxInt != i-3 { 6778 break 6779 } 6780 if x4.Aux != s { 6781 break 6782 } 6783 _ = x4.Args[3] 6784 if p != x4.Args[0] { 6785 break 6786 } 6787 if idx != x4.Args[1] { 6788 break 6789 } 6790 x4_2 := x4.Args[2] 6791 if x4_2.Op != OpAMD64SHRQconst { 6792 break 6793 } 6794 if x4_2.AuxInt != 24 { 6795 break 6796 } 6797 if w != x4_2.Args[0] { 6798 break 6799 } 6800 x3 := x4.Args[3] 6801 if x3.Op != OpAMD64MOVBstoreidx1 { 6802 break 6803 } 6804 if x3.AuxInt != i-4 { 6805 break 6806 } 6807 if x3.Aux != s { 6808 break 6809 } 6810 _ = x3.Args[3] 6811 if p != x3.Args[0] { 6812 break 6813 } 6814 if idx != x3.Args[1] { 6815 break 6816 } 6817 x3_2 := x3.Args[2] 6818 if x3_2.Op != OpAMD64SHRQconst { 6819 break 6820 } 6821 if x3_2.AuxInt != 32 { 6822 break 6823 } 6824 if w != x3_2.Args[0] { 6825 break 6826 } 6827 x2 := x3.Args[3] 6828 if x2.Op != OpAMD64MOVBstoreidx1 { 6829 break 6830 } 6831 if x2.AuxInt != i-5 { 6832 break 6833 } 6834 if x2.Aux != s { 6835 break 6836 } 6837 _ = x2.Args[3] 6838 if p != x2.Args[0] { 6839 break 6840 } 6841 if idx != x2.Args[1] { 6842 break 6843 } 6844 x2_2 := x2.Args[2] 6845 if x2_2.Op != OpAMD64SHRQconst { 6846 break 6847 } 6848 if x2_2.AuxInt != 40 { 6849 break 6850 } 6851 if w != x2_2.Args[0] { 6852 break 6853 } 6854 x1 := x2.Args[3] 6855 if x1.Op != OpAMD64MOVBstoreidx1 { 6856 break 6857 } 6858 if x1.AuxInt != i-6 { 6859 break 6860 } 6861 if x1.Aux != s { 6862 break 6863 } 6864 _ = x1.Args[3] 6865 if p != x1.Args[0] { 6866 break 6867 } 6868 if idx != x1.Args[1] { 6869 break 6870 } 6871 x1_2 := x1.Args[2] 6872 if x1_2.Op != OpAMD64SHRQconst { 6873 break 6874 } 6875 if x1_2.AuxInt != 48 { 6876 break 6877 } 6878 if w != x1_2.Args[0] { 6879 break 6880 } 6881 x0 := x1.Args[3] 6882 if x0.Op != OpAMD64MOVBstoreidx1 { 6883 break 6884 } 6885 if x0.AuxInt != i-7 { 6886 break 6887 } 6888 if x0.Aux != s { 6889 break 6890 } 6891 _ = x0.Args[3] 6892 if p != x0.Args[0] { 6893 break 6894 } 6895 if idx != x0.Args[1] { 6896 break 6897 } 6898 x0_2 := x0.Args[2] 6899 if x0_2.Op != OpAMD64SHRQconst { 6900 break 6901 } 6902 if x0_2.AuxInt != 56 { 6903 break 6904 } 6905 if w != x0_2.Args[0] { 6906 break 6907 } 6908 mem := x0.Args[3] 6909 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 6910 break 6911 } 6912 v.reset(OpAMD64MOVQstoreidx1) 6913 v.AuxInt = i - 7 6914 v.Aux = s 6915 v.AddArg(p) 6916 v.AddArg(idx) 6917 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 6918 v0.AddArg(w) 6919 v.AddArg(v0) 6920 v.AddArg(mem) 6921 return true 6922 } 6923 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 6924 // cond: x.Uses == 1 && clobber(x) 6925 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 6926 for { 6927 i := v.AuxInt 6928 s := v.Aux 6929 _ = v.Args[3] 6930 p := v.Args[0] 6931 idx := v.Args[1] 6932 v_2 := v.Args[2] 6933 if v_2.Op != OpAMD64SHRQconst { 6934 break 6935 } 6936 if v_2.AuxInt != 8 { 6937 break 6938 } 6939 w := v_2.Args[0] 6940 x := v.Args[3] 6941 if x.Op != OpAMD64MOVBstoreidx1 { 6942 break 6943 } 6944 if x.AuxInt != i-1 { 6945 break 6946 } 6947 if x.Aux != s { 6948 break 6949 } 6950 _ = x.Args[3] 6951 if p != x.Args[0] { 6952 break 6953 } 6954 if idx != x.Args[1] { 6955 break 6956 } 6957 if w != x.Args[2] { 6958 break 6959 } 6960 mem := x.Args[3] 6961 if !(x.Uses == 1 && clobber(x)) { 6962 break 6963 } 6964 v.reset(OpAMD64MOVWstoreidx1) 6965 v.AuxInt = i - 1 6966 v.Aux = s 6967 v.AddArg(p) 6968 v.AddArg(idx) 6969 v.AddArg(w) 6970 v.AddArg(mem) 6971 return true 6972 } 6973 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 6974 // cond: x.Uses == 1 && clobber(x) 6975 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 6976 for { 6977 i := v.AuxInt 6978 s := v.Aux 6979 _ = v.Args[3] 6980 p := v.Args[0] 6981 idx := v.Args[1] 6982 v_2 := v.Args[2] 6983 if v_2.Op != OpAMD64SHRQconst { 6984 break 6985 } 6986 j := v_2.AuxInt 6987 w := v_2.Args[0] 6988 x := v.Args[3] 6989 if x.Op != OpAMD64MOVBstoreidx1 { 6990 break 6991 } 6992 if x.AuxInt != i-1 { 6993 break 6994 } 6995 if x.Aux != s { 6996 break 6997 } 6998 _ = x.Args[3] 6999 if p != x.Args[0] { 7000 break 7001 } 7002 if idx != x.Args[1] { 7003 break 7004 } 7005 w0 := x.Args[2] 7006 if w0.Op != OpAMD64SHRQconst { 7007 break 7008 } 7009 if w0.AuxInt != j-8 { 7010 break 7011 } 7012 if w != w0.Args[0] { 7013 break 7014 } 7015 mem := x.Args[3] 7016 if !(x.Uses == 1 && clobber(x)) { 7017 break 7018 } 7019 v.reset(OpAMD64MOVWstoreidx1) 7020 v.AuxInt = i - 1 7021 v.Aux = s 7022 v.AddArg(p) 7023 v.AddArg(idx) 7024 v.AddArg(w0) 7025 v.AddArg(mem) 7026 return true 7027 } 7028 return false 7029 } 7030 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 7031 b := v.Block 7032 _ = b 7033 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 7034 // cond: x.Uses == 1 && clobber(x) 7035 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 7036 for { 7037 x := v.Args[0] 7038 if x.Op != OpAMD64MOVLload { 7039 break 7040 } 7041 off := x.AuxInt 7042 sym := x.Aux 7043 _ = x.Args[1] 7044 ptr := x.Args[0] 7045 mem := x.Args[1] 7046 if !(x.Uses == 1 && clobber(x)) { 7047 break 7048 } 7049 b = x.Block 7050 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 7051 v.reset(OpCopy) 7052 v.AddArg(v0) 7053 v0.AuxInt = off 7054 v0.Aux = sym 7055 v0.AddArg(ptr) 7056 v0.AddArg(mem) 7057 return true 7058 } 7059 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 7060 // cond: x.Uses == 1 && clobber(x) 7061 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 7062 for { 7063 x := v.Args[0] 7064 if x.Op != OpAMD64MOVQload { 7065 break 7066 } 7067 off := x.AuxInt 7068 sym := x.Aux 7069 _ = x.Args[1] 7070 ptr := x.Args[0] 7071 mem := x.Args[1] 7072 if !(x.Uses == 1 && clobber(x)) { 7073 break 7074 } 7075 b = x.Block 7076 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 7077 v.reset(OpCopy) 7078 v.AddArg(v0) 7079 v0.AuxInt = off 7080 v0.Aux = sym 7081 v0.AddArg(ptr) 7082 v0.AddArg(mem) 7083 return true 7084 } 7085 // match: (MOVLQSX (ANDLconst [c] x)) 7086 // cond: c & 0x80000000 == 0 7087 // result: (ANDLconst [c & 0x7fffffff] x) 7088 for { 7089 v_0 := v.Args[0] 7090 if v_0.Op != OpAMD64ANDLconst { 7091 break 7092 } 7093 c := v_0.AuxInt 7094 x := v_0.Args[0] 7095 if !(c&0x80000000 == 0) { 7096 break 7097 } 7098 v.reset(OpAMD64ANDLconst) 7099 v.AuxInt = c & 0x7fffffff 7100 v.AddArg(x) 7101 return true 7102 } 7103 // match: (MOVLQSX (MOVLQSX x)) 7104 // cond: 7105 // result: (MOVLQSX x) 7106 for { 7107 v_0 := v.Args[0] 7108 if v_0.Op != OpAMD64MOVLQSX { 7109 break 7110 } 7111 x := v_0.Args[0] 7112 v.reset(OpAMD64MOVLQSX) 7113 v.AddArg(x) 7114 return true 7115 } 7116 // match: (MOVLQSX (MOVWQSX x)) 7117 // cond: 7118 // result: (MOVWQSX x) 7119 for { 7120 v_0 := v.Args[0] 7121 if v_0.Op != OpAMD64MOVWQSX { 7122 break 7123 } 7124 x := v_0.Args[0] 7125 v.reset(OpAMD64MOVWQSX) 7126 v.AddArg(x) 7127 return true 7128 } 7129 // match: (MOVLQSX (MOVBQSX x)) 7130 // cond: 7131 // result: (MOVBQSX x) 7132 for { 7133 v_0 := v.Args[0] 7134 if v_0.Op != OpAMD64MOVBQSX { 7135 break 7136 } 7137 x := v_0.Args[0] 7138 v.reset(OpAMD64MOVBQSX) 7139 v.AddArg(x) 7140 return true 7141 } 7142 return false 7143 } 7144 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 7145 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 7146 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7147 // result: (MOVLQSX x) 7148 for { 7149 off := v.AuxInt 7150 sym := v.Aux 7151 _ = v.Args[1] 7152 ptr := v.Args[0] 7153 v_1 := v.Args[1] 7154 if v_1.Op != OpAMD64MOVLstore { 7155 break 7156 } 7157 off2 := v_1.AuxInt 7158 sym2 := v_1.Aux 7159 _ = v_1.Args[2] 7160 ptr2 := v_1.Args[0] 7161 x := v_1.Args[1] 7162 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7163 break 7164 } 7165 v.reset(OpAMD64MOVLQSX) 7166 v.AddArg(x) 7167 return true 7168 } 7169 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7170 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7171 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7172 for { 7173 off1 := v.AuxInt 7174 sym1 := v.Aux 7175 _ = v.Args[1] 7176 v_0 := v.Args[0] 7177 if v_0.Op != OpAMD64LEAQ { 7178 break 7179 } 7180 off2 := v_0.AuxInt 7181 sym2 := v_0.Aux 7182 base := v_0.Args[0] 7183 mem := v.Args[1] 7184 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7185 break 7186 } 7187 v.reset(OpAMD64MOVLQSXload) 7188 v.AuxInt = off1 + off2 7189 v.Aux = mergeSym(sym1, sym2) 7190 v.AddArg(base) 7191 v.AddArg(mem) 7192 return true 7193 } 7194 return false 7195 } 7196 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 7197 b := v.Block 7198 _ = b 7199 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 7200 // cond: x.Uses == 1 && clobber(x) 7201 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 7202 for { 7203 x := v.Args[0] 7204 if x.Op != OpAMD64MOVLload { 7205 break 7206 } 7207 off := x.AuxInt 7208 sym := x.Aux 7209 _ = x.Args[1] 7210 ptr := x.Args[0] 7211 mem := x.Args[1] 7212 if !(x.Uses == 1 && clobber(x)) { 7213 break 7214 } 7215 b = x.Block 7216 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 7217 v.reset(OpCopy) 7218 v.AddArg(v0) 7219 v0.AuxInt = off 7220 v0.Aux = sym 7221 v0.AddArg(ptr) 7222 v0.AddArg(mem) 7223 return true 7224 } 7225 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 7226 // cond: x.Uses == 1 && clobber(x) 7227 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 7228 for { 7229 x := v.Args[0] 7230 if x.Op != OpAMD64MOVQload { 7231 break 7232 } 7233 off := x.AuxInt 7234 sym := x.Aux 7235 _ = x.Args[1] 7236 ptr := x.Args[0] 7237 mem := x.Args[1] 7238 if !(x.Uses == 1 && clobber(x)) { 7239 break 7240 } 7241 b = x.Block 7242 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 7243 v.reset(OpCopy) 7244 v.AddArg(v0) 7245 v0.AuxInt = off 7246 v0.Aux = sym 7247 v0.AddArg(ptr) 7248 v0.AddArg(mem) 7249 return true 7250 } 7251 // match: (MOVLQZX x) 7252 // cond: zeroUpper32Bits(x,3) 7253 // result: x 7254 for { 7255 x := v.Args[0] 7256 if !(zeroUpper32Bits(x, 3)) { 7257 break 7258 } 7259 v.reset(OpCopy) 7260 v.Type = x.Type 7261 v.AddArg(x) 7262 return true 7263 } 7264 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 7265 // cond: x.Uses == 1 && clobber(x) 7266 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 7267 for { 7268 x := v.Args[0] 7269 if x.Op != OpAMD64MOVLloadidx1 { 7270 break 7271 } 7272 off := x.AuxInt 7273 sym := x.Aux 7274 _ = x.Args[2] 7275 ptr := x.Args[0] 7276 idx := x.Args[1] 7277 mem := x.Args[2] 7278 if !(x.Uses == 1 && clobber(x)) { 7279 break 7280 } 7281 b = x.Block 7282 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 7283 v.reset(OpCopy) 7284 v.AddArg(v0) 7285 v0.AuxInt = off 7286 v0.Aux = sym 7287 v0.AddArg(ptr) 7288 v0.AddArg(idx) 7289 v0.AddArg(mem) 7290 return true 7291 } 7292 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 7293 // cond: x.Uses == 1 && clobber(x) 7294 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 7295 for { 7296 x := v.Args[0] 7297 if x.Op != OpAMD64MOVLloadidx4 { 7298 break 7299 } 7300 off := x.AuxInt 7301 sym := x.Aux 7302 _ = x.Args[2] 7303 ptr := x.Args[0] 7304 idx := x.Args[1] 7305 mem := x.Args[2] 7306 if !(x.Uses == 1 && clobber(x)) { 7307 break 7308 } 7309 b = x.Block 7310 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 7311 v.reset(OpCopy) 7312 v.AddArg(v0) 7313 v0.AuxInt = off 7314 v0.Aux = sym 7315 v0.AddArg(ptr) 7316 v0.AddArg(idx) 7317 v0.AddArg(mem) 7318 return true 7319 } 7320 // match: (MOVLQZX (ANDLconst [c] x)) 7321 // cond: 7322 // result: (ANDLconst [c] x) 7323 for { 7324 v_0 := v.Args[0] 7325 if v_0.Op != OpAMD64ANDLconst { 7326 break 7327 } 7328 c := v_0.AuxInt 7329 x := v_0.Args[0] 7330 v.reset(OpAMD64ANDLconst) 7331 v.AuxInt = c 7332 v.AddArg(x) 7333 return true 7334 } 7335 // match: (MOVLQZX (MOVLQZX x)) 7336 // cond: 7337 // result: (MOVLQZX x) 7338 for { 7339 v_0 := v.Args[0] 7340 if v_0.Op != OpAMD64MOVLQZX { 7341 break 7342 } 7343 x := v_0.Args[0] 7344 v.reset(OpAMD64MOVLQZX) 7345 v.AddArg(x) 7346 return true 7347 } 7348 // match: (MOVLQZX (MOVWQZX x)) 7349 // cond: 7350 // result: (MOVWQZX x) 7351 for { 7352 v_0 := v.Args[0] 7353 if v_0.Op != OpAMD64MOVWQZX { 7354 break 7355 } 7356 x := v_0.Args[0] 7357 v.reset(OpAMD64MOVWQZX) 7358 v.AddArg(x) 7359 return true 7360 } 7361 // match: (MOVLQZX (MOVBQZX x)) 7362 // cond: 7363 // result: (MOVBQZX x) 7364 for { 7365 v_0 := v.Args[0] 7366 if v_0.Op != OpAMD64MOVBQZX { 7367 break 7368 } 7369 x := v_0.Args[0] 7370 v.reset(OpAMD64MOVBQZX) 7371 v.AddArg(x) 7372 return true 7373 } 7374 return false 7375 } 7376 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 7377 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 7378 // cond: is32Bit(off1+off2) 7379 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 7380 for { 7381 off1 := v.AuxInt 7382 sym := v.Aux 7383 _ = v.Args[1] 7384 v_0 := v.Args[0] 7385 if v_0.Op != OpAMD64ADDQconst { 7386 break 7387 } 7388 off2 := v_0.AuxInt 7389 ptr := v_0.Args[0] 7390 mem := v.Args[1] 7391 if !(is32Bit(off1 + off2)) { 7392 break 7393 } 7394 v.reset(OpAMD64MOVLatomicload) 7395 v.AuxInt = off1 + off2 7396 v.Aux = sym 7397 v.AddArg(ptr) 7398 v.AddArg(mem) 7399 return true 7400 } 7401 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 7402 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7403 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 7404 for { 7405 off1 := v.AuxInt 7406 sym1 := v.Aux 7407 _ = v.Args[1] 7408 v_0 := v.Args[0] 7409 if v_0.Op != OpAMD64LEAQ { 7410 break 7411 } 7412 off2 := v_0.AuxInt 7413 sym2 := v_0.Aux 7414 ptr := v_0.Args[0] 7415 mem := v.Args[1] 7416 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7417 break 7418 } 7419 v.reset(OpAMD64MOVLatomicload) 7420 v.AuxInt = off1 + off2 7421 v.Aux = mergeSym(sym1, sym2) 7422 v.AddArg(ptr) 7423 v.AddArg(mem) 7424 return true 7425 } 7426 return false 7427 } 7428 func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool { 7429 b := v.Block 7430 _ = b 7431 // match: (MOVLf2i <t> (Arg [off] {sym})) 7432 // cond: 7433 // result: @b.Func.Entry (Arg <t> [off] {sym}) 7434 for { 7435 t := v.Type 7436 v_0 := v.Args[0] 7437 if v_0.Op != OpArg { 7438 break 7439 } 7440 off := v_0.AuxInt 7441 sym := v_0.Aux 7442 b = b.Func.Entry 7443 v0 := b.NewValue0(v.Pos, OpArg, t) 7444 v.reset(OpCopy) 7445 v.AddArg(v0) 7446 v0.AuxInt = off 7447 v0.Aux = sym 7448 return true 7449 } 7450 return false 7451 } 7452 func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool { 7453 b := v.Block 7454 _ = b 7455 // match: (MOVLi2f <t> (Arg [off] {sym})) 7456 // cond: 7457 // result: @b.Func.Entry (Arg <t> [off] {sym}) 7458 for { 7459 t := v.Type 7460 v_0 := v.Args[0] 7461 if v_0.Op != OpArg { 7462 break 7463 } 7464 off := v_0.AuxInt 7465 sym := v_0.Aux 7466 b = b.Func.Entry 7467 v0 := b.NewValue0(v.Pos, OpArg, t) 7468 v.reset(OpCopy) 7469 v.AddArg(v0) 7470 v0.AuxInt = off 7471 v0.Aux = sym 7472 return true 7473 } 7474 return false 7475 } 7476 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 7477 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 7478 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7479 // result: (MOVLQZX x) 7480 for { 7481 off := v.AuxInt 7482 sym := v.Aux 7483 _ = v.Args[1] 7484 ptr := v.Args[0] 7485 v_1 := v.Args[1] 7486 if v_1.Op != OpAMD64MOVLstore { 7487 break 7488 } 7489 off2 := v_1.AuxInt 7490 sym2 := v_1.Aux 7491 _ = v_1.Args[2] 7492 ptr2 := v_1.Args[0] 7493 x := v_1.Args[1] 7494 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7495 break 7496 } 7497 v.reset(OpAMD64MOVLQZX) 7498 v.AddArg(x) 7499 return true 7500 } 7501 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 7502 // cond: is32Bit(off1+off2) 7503 // result: (MOVLload [off1+off2] {sym} ptr mem) 7504 for { 7505 off1 := v.AuxInt 7506 sym := v.Aux 7507 _ = v.Args[1] 7508 v_0 := v.Args[0] 7509 if v_0.Op != OpAMD64ADDQconst { 7510 break 7511 } 7512 off2 := v_0.AuxInt 7513 ptr := v_0.Args[0] 7514 mem := v.Args[1] 7515 if !(is32Bit(off1 + off2)) { 7516 break 7517 } 7518 v.reset(OpAMD64MOVLload) 7519 v.AuxInt = off1 + off2 7520 v.Aux = sym 7521 v.AddArg(ptr) 7522 v.AddArg(mem) 7523 return true 7524 } 7525 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7526 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7527 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7528 for { 7529 off1 := v.AuxInt 7530 sym1 := v.Aux 7531 _ = v.Args[1] 7532 v_0 := v.Args[0] 7533 if v_0.Op != OpAMD64LEAQ { 7534 break 7535 } 7536 off2 := v_0.AuxInt 7537 sym2 := v_0.Aux 7538 base := v_0.Args[0] 7539 mem := v.Args[1] 7540 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7541 break 7542 } 7543 v.reset(OpAMD64MOVLload) 7544 v.AuxInt = off1 + off2 7545 v.Aux = mergeSym(sym1, sym2) 7546 v.AddArg(base) 7547 v.AddArg(mem) 7548 return true 7549 } 7550 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7551 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7552 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7553 for { 7554 off1 := v.AuxInt 7555 sym1 := v.Aux 7556 _ = v.Args[1] 7557 v_0 := v.Args[0] 7558 if v_0.Op != OpAMD64LEAQ1 { 7559 break 7560 } 7561 off2 := v_0.AuxInt 7562 sym2 := v_0.Aux 7563 _ = v_0.Args[1] 7564 ptr := v_0.Args[0] 7565 idx := v_0.Args[1] 7566 mem := v.Args[1] 7567 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7568 break 7569 } 7570 v.reset(OpAMD64MOVLloadidx1) 7571 v.AuxInt = off1 + off2 7572 v.Aux = mergeSym(sym1, sym2) 7573 v.AddArg(ptr) 7574 v.AddArg(idx) 7575 v.AddArg(mem) 7576 return true 7577 } 7578 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 7579 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7580 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7581 for { 7582 off1 := v.AuxInt 7583 sym1 := v.Aux 7584 _ = v.Args[1] 7585 v_0 := v.Args[0] 7586 if v_0.Op != OpAMD64LEAQ4 { 7587 break 7588 } 7589 off2 := v_0.AuxInt 7590 sym2 := v_0.Aux 7591 _ = v_0.Args[1] 7592 ptr := v_0.Args[0] 7593 idx := v_0.Args[1] 7594 mem := v.Args[1] 7595 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7596 break 7597 } 7598 v.reset(OpAMD64MOVLloadidx4) 7599 v.AuxInt = off1 + off2 7600 v.Aux = mergeSym(sym1, sym2) 7601 v.AddArg(ptr) 7602 v.AddArg(idx) 7603 v.AddArg(mem) 7604 return true 7605 } 7606 // match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 7607 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7608 // result: (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7609 for { 7610 off1 := v.AuxInt 7611 sym1 := v.Aux 7612 _ = v.Args[1] 7613 v_0 := v.Args[0] 7614 if v_0.Op != OpAMD64LEAQ8 { 7615 break 7616 } 7617 off2 := v_0.AuxInt 7618 sym2 := v_0.Aux 7619 _ = v_0.Args[1] 7620 ptr := v_0.Args[0] 7621 idx := v_0.Args[1] 7622 mem := v.Args[1] 7623 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7624 break 7625 } 7626 v.reset(OpAMD64MOVLloadidx8) 7627 v.AuxInt = off1 + off2 7628 v.Aux = mergeSym(sym1, sym2) 7629 v.AddArg(ptr) 7630 v.AddArg(idx) 7631 v.AddArg(mem) 7632 return true 7633 } 7634 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 7635 // cond: ptr.Op != OpSB 7636 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 7637 for { 7638 off := v.AuxInt 7639 sym := v.Aux 7640 _ = v.Args[1] 7641 v_0 := v.Args[0] 7642 if v_0.Op != OpAMD64ADDQ { 7643 break 7644 } 7645 _ = v_0.Args[1] 7646 ptr := v_0.Args[0] 7647 idx := v_0.Args[1] 7648 mem := v.Args[1] 7649 if !(ptr.Op != OpSB) { 7650 break 7651 } 7652 v.reset(OpAMD64MOVLloadidx1) 7653 v.AuxInt = off 7654 v.Aux = sym 7655 v.AddArg(ptr) 7656 v.AddArg(idx) 7657 v.AddArg(mem) 7658 return true 7659 } 7660 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 7661 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 7662 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7663 for { 7664 off1 := v.AuxInt 7665 sym1 := v.Aux 7666 _ = v.Args[1] 7667 v_0 := v.Args[0] 7668 if v_0.Op != OpAMD64LEAL { 7669 break 7670 } 7671 off2 := v_0.AuxInt 7672 sym2 := v_0.Aux 7673 base := v_0.Args[0] 7674 mem := v.Args[1] 7675 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 7676 break 7677 } 7678 v.reset(OpAMD64MOVLload) 7679 v.AuxInt = off1 + off2 7680 v.Aux = mergeSym(sym1, sym2) 7681 v.AddArg(base) 7682 v.AddArg(mem) 7683 return true 7684 } 7685 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 7686 // cond: is32Bit(off1+off2) 7687 // result: (MOVLload [off1+off2] {sym} ptr mem) 7688 for { 7689 off1 := v.AuxInt 7690 sym := v.Aux 7691 _ = v.Args[1] 7692 v_0 := v.Args[0] 7693 if v_0.Op != OpAMD64ADDLconst { 7694 break 7695 } 7696 off2 := v_0.AuxInt 7697 ptr := v_0.Args[0] 7698 mem := v.Args[1] 7699 if !(is32Bit(off1 + off2)) { 7700 break 7701 } 7702 v.reset(OpAMD64MOVLload) 7703 v.AuxInt = off1 + off2 7704 v.Aux = sym 7705 v.AddArg(ptr) 7706 v.AddArg(mem) 7707 return true 7708 } 7709 // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) 7710 // cond: 7711 // result: (MOVLf2i val) 7712 for { 7713 off := v.AuxInt 7714 sym := v.Aux 7715 _ = v.Args[1] 7716 ptr := v.Args[0] 7717 v_1 := v.Args[1] 7718 if v_1.Op != OpAMD64MOVSSstore { 7719 break 7720 } 7721 if v_1.AuxInt != off { 7722 break 7723 } 7724 if v_1.Aux != sym { 7725 break 7726 } 7727 _ = v_1.Args[2] 7728 if ptr != v_1.Args[0] { 7729 break 7730 } 7731 val := v_1.Args[1] 7732 v.reset(OpAMD64MOVLf2i) 7733 v.AddArg(val) 7734 return true 7735 } 7736 return false 7737 } 7738 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 7739 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7740 // cond: 7741 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 7742 for { 7743 c := v.AuxInt 7744 sym := v.Aux 7745 _ = v.Args[2] 7746 ptr := v.Args[0] 7747 v_1 := v.Args[1] 7748 if v_1.Op != OpAMD64SHLQconst { 7749 break 7750 } 7751 if v_1.AuxInt != 2 { 7752 break 7753 } 7754 idx := v_1.Args[0] 7755 mem := v.Args[2] 7756 v.reset(OpAMD64MOVLloadidx4) 7757 v.AuxInt = c 7758 v.Aux = sym 7759 v.AddArg(ptr) 7760 v.AddArg(idx) 7761 v.AddArg(mem) 7762 return true 7763 } 7764 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 7765 // cond: 7766 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 7767 for { 7768 c := v.AuxInt 7769 sym := v.Aux 7770 _ = v.Args[2] 7771 v_0 := v.Args[0] 7772 if v_0.Op != OpAMD64SHLQconst { 7773 break 7774 } 7775 if v_0.AuxInt != 2 { 7776 break 7777 } 7778 idx := v_0.Args[0] 7779 ptr := v.Args[1] 7780 mem := v.Args[2] 7781 v.reset(OpAMD64MOVLloadidx4) 7782 v.AuxInt = c 7783 v.Aux = sym 7784 v.AddArg(ptr) 7785 v.AddArg(idx) 7786 v.AddArg(mem) 7787 return true 7788 } 7789 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7790 // cond: 7791 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 7792 for { 7793 c := v.AuxInt 7794 sym := v.Aux 7795 _ = v.Args[2] 7796 ptr := v.Args[0] 7797 v_1 := v.Args[1] 7798 if v_1.Op != OpAMD64SHLQconst { 7799 break 7800 } 7801 if v_1.AuxInt != 3 { 7802 break 7803 } 7804 idx := v_1.Args[0] 7805 mem := v.Args[2] 7806 v.reset(OpAMD64MOVLloadidx8) 7807 v.AuxInt = c 7808 v.Aux = sym 7809 v.AddArg(ptr) 7810 v.AddArg(idx) 7811 v.AddArg(mem) 7812 return true 7813 } 7814 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 7815 // cond: 7816 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 7817 for { 7818 c := v.AuxInt 7819 sym := v.Aux 7820 _ = v.Args[2] 7821 v_0 := v.Args[0] 7822 if v_0.Op != OpAMD64SHLQconst { 7823 break 7824 } 7825 if v_0.AuxInt != 3 { 7826 break 7827 } 7828 idx := v_0.Args[0] 7829 ptr := v.Args[1] 7830 mem := v.Args[2] 7831 v.reset(OpAMD64MOVLloadidx8) 7832 v.AuxInt = c 7833 v.Aux = sym 7834 v.AddArg(ptr) 7835 v.AddArg(idx) 7836 v.AddArg(mem) 7837 return true 7838 } 7839 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7840 // cond: is32Bit(c+d) 7841 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7842 for { 7843 c := v.AuxInt 7844 sym := v.Aux 7845 _ = v.Args[2] 7846 v_0 := v.Args[0] 7847 if v_0.Op != OpAMD64ADDQconst { 7848 break 7849 } 7850 d := v_0.AuxInt 7851 ptr := v_0.Args[0] 7852 idx := v.Args[1] 7853 mem := v.Args[2] 7854 if !(is32Bit(c + d)) { 7855 break 7856 } 7857 v.reset(OpAMD64MOVLloadidx1) 7858 v.AuxInt = c + d 7859 v.Aux = sym 7860 v.AddArg(ptr) 7861 v.AddArg(idx) 7862 v.AddArg(mem) 7863 return true 7864 } 7865 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 7866 // cond: is32Bit(c+d) 7867 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7868 for { 7869 c := v.AuxInt 7870 sym := v.Aux 7871 _ = v.Args[2] 7872 idx := v.Args[0] 7873 v_1 := v.Args[1] 7874 if v_1.Op != OpAMD64ADDQconst { 7875 break 7876 } 7877 d := v_1.AuxInt 7878 ptr := v_1.Args[0] 7879 mem := v.Args[2] 7880 if !(is32Bit(c + d)) { 7881 break 7882 } 7883 v.reset(OpAMD64MOVLloadidx1) 7884 v.AuxInt = c + d 7885 v.Aux = sym 7886 v.AddArg(ptr) 7887 v.AddArg(idx) 7888 v.AddArg(mem) 7889 return true 7890 } 7891 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7892 // cond: is32Bit(c+d) 7893 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7894 for { 7895 c := v.AuxInt 7896 sym := v.Aux 7897 _ = v.Args[2] 7898 ptr := v.Args[0] 7899 v_1 := v.Args[1] 7900 if v_1.Op != OpAMD64ADDQconst { 7901 break 7902 } 7903 d := v_1.AuxInt 7904 idx := v_1.Args[0] 7905 mem := v.Args[2] 7906 if !(is32Bit(c + d)) { 7907 break 7908 } 7909 v.reset(OpAMD64MOVLloadidx1) 7910 v.AuxInt = c + d 7911 v.Aux = sym 7912 v.AddArg(ptr) 7913 v.AddArg(idx) 7914 v.AddArg(mem) 7915 return true 7916 } 7917 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 7918 // cond: is32Bit(c+d) 7919 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7920 for { 7921 c := v.AuxInt 7922 sym := v.Aux 7923 _ = v.Args[2] 7924 v_0 := v.Args[0] 7925 if v_0.Op != OpAMD64ADDQconst { 7926 break 7927 } 7928 d := v_0.AuxInt 7929 idx := v_0.Args[0] 7930 ptr := v.Args[1] 7931 mem := v.Args[2] 7932 if !(is32Bit(c + d)) { 7933 break 7934 } 7935 v.reset(OpAMD64MOVLloadidx1) 7936 v.AuxInt = c + d 7937 v.Aux = sym 7938 v.AddArg(ptr) 7939 v.AddArg(idx) 7940 v.AddArg(mem) 7941 return true 7942 } 7943 return false 7944 } 7945 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 7946 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 7947 // cond: is32Bit(c+d) 7948 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 7949 for { 7950 c := v.AuxInt 7951 sym := v.Aux 7952 _ = v.Args[2] 7953 v_0 := v.Args[0] 7954 if v_0.Op != OpAMD64ADDQconst { 7955 break 7956 } 7957 d := v_0.AuxInt 7958 ptr := v_0.Args[0] 7959 idx := v.Args[1] 7960 mem := v.Args[2] 7961 if !(is32Bit(c + d)) { 7962 break 7963 } 7964 v.reset(OpAMD64MOVLloadidx4) 7965 v.AuxInt = c + d 7966 v.Aux = sym 7967 v.AddArg(ptr) 7968 v.AddArg(idx) 7969 v.AddArg(mem) 7970 return true 7971 } 7972 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 7973 // cond: is32Bit(c+4*d) 7974 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 7975 for { 7976 c := v.AuxInt 7977 sym := v.Aux 7978 _ = v.Args[2] 7979 ptr := v.Args[0] 7980 v_1 := v.Args[1] 7981 if v_1.Op != OpAMD64ADDQconst { 7982 break 7983 } 7984 d := v_1.AuxInt 7985 idx := v_1.Args[0] 7986 mem := v.Args[2] 7987 if !(is32Bit(c + 4*d)) { 7988 break 7989 } 7990 v.reset(OpAMD64MOVLloadidx4) 7991 v.AuxInt = c + 4*d 7992 v.Aux = sym 7993 v.AddArg(ptr) 7994 v.AddArg(idx) 7995 v.AddArg(mem) 7996 return true 7997 } 7998 return false 7999 } 8000 func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { 8001 // match: (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 8002 // cond: is32Bit(c+d) 8003 // result: (MOVLloadidx8 [c+d] {sym} ptr idx mem) 8004 for { 8005 c := v.AuxInt 8006 sym := v.Aux 8007 _ = v.Args[2] 8008 v_0 := v.Args[0] 8009 if v_0.Op != OpAMD64ADDQconst { 8010 break 8011 } 8012 d := v_0.AuxInt 8013 ptr := v_0.Args[0] 8014 idx := v.Args[1] 8015 mem := v.Args[2] 8016 if !(is32Bit(c + d)) { 8017 break 8018 } 8019 v.reset(OpAMD64MOVLloadidx8) 8020 v.AuxInt = c + d 8021 v.Aux = sym 8022 v.AddArg(ptr) 8023 v.AddArg(idx) 8024 v.AddArg(mem) 8025 return true 8026 } 8027 // match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8028 // cond: is32Bit(c+8*d) 8029 // result: (MOVLloadidx8 [c+8*d] {sym} ptr idx mem) 8030 for { 8031 c := v.AuxInt 8032 sym := v.Aux 8033 _ = v.Args[2] 8034 ptr := v.Args[0] 8035 v_1 := v.Args[1] 8036 if v_1.Op != OpAMD64ADDQconst { 8037 break 8038 } 8039 d := v_1.AuxInt 8040 idx := v_1.Args[0] 8041 mem := v.Args[2] 8042 if !(is32Bit(c + 8*d)) { 8043 break 8044 } 8045 v.reset(OpAMD64MOVLloadidx8) 8046 v.AuxInt = c + 8*d 8047 v.Aux = sym 8048 v.AddArg(ptr) 8049 v.AddArg(idx) 8050 v.AddArg(mem) 8051 return true 8052 } 8053 return false 8054 } 8055 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 8056 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 8057 // cond: 8058 // result: (MOVLstore [off] {sym} ptr x mem) 8059 for { 8060 off := v.AuxInt 8061 sym := v.Aux 8062 _ = v.Args[2] 8063 ptr := v.Args[0] 8064 v_1 := v.Args[1] 8065 if v_1.Op != OpAMD64MOVLQSX { 8066 break 8067 } 8068 x := v_1.Args[0] 8069 mem := v.Args[2] 8070 v.reset(OpAMD64MOVLstore) 8071 v.AuxInt = off 8072 v.Aux = sym 8073 v.AddArg(ptr) 8074 v.AddArg(x) 8075 v.AddArg(mem) 8076 return true 8077 } 8078 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 8079 // cond: 8080 // result: (MOVLstore [off] {sym} ptr x mem) 8081 for { 8082 off := v.AuxInt 8083 sym := v.Aux 8084 _ = v.Args[2] 8085 ptr := v.Args[0] 8086 v_1 := v.Args[1] 8087 if v_1.Op != OpAMD64MOVLQZX { 8088 break 8089 } 8090 x := v_1.Args[0] 8091 mem := v.Args[2] 8092 v.reset(OpAMD64MOVLstore) 8093 v.AuxInt = off 8094 v.Aux = sym 8095 v.AddArg(ptr) 8096 v.AddArg(x) 8097 v.AddArg(mem) 8098 return true 8099 } 8100 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8101 // cond: is32Bit(off1+off2) 8102 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 8103 for { 8104 off1 := v.AuxInt 8105 sym := v.Aux 8106 _ = v.Args[2] 8107 v_0 := v.Args[0] 8108 if v_0.Op != OpAMD64ADDQconst { 8109 break 8110 } 8111 off2 := v_0.AuxInt 8112 ptr := v_0.Args[0] 8113 val := v.Args[1] 8114 mem := v.Args[2] 8115 if !(is32Bit(off1 + off2)) { 8116 break 8117 } 8118 v.reset(OpAMD64MOVLstore) 8119 v.AuxInt = off1 + off2 8120 v.Aux = sym 8121 v.AddArg(ptr) 8122 v.AddArg(val) 8123 v.AddArg(mem) 8124 return true 8125 } 8126 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 8127 // cond: validOff(off) 8128 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 8129 for { 8130 off := v.AuxInt 8131 sym := v.Aux 8132 _ = v.Args[2] 8133 ptr := v.Args[0] 8134 v_1 := v.Args[1] 8135 if v_1.Op != OpAMD64MOVLconst { 8136 break 8137 } 8138 c := v_1.AuxInt 8139 mem := v.Args[2] 8140 if !(validOff(off)) { 8141 break 8142 } 8143 v.reset(OpAMD64MOVLstoreconst) 8144 v.AuxInt = makeValAndOff(int64(int32(c)), off) 8145 v.Aux = sym 8146 v.AddArg(ptr) 8147 v.AddArg(mem) 8148 return true 8149 } 8150 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8151 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8152 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8153 for { 8154 off1 := v.AuxInt 8155 sym1 := v.Aux 8156 _ = v.Args[2] 8157 v_0 := v.Args[0] 8158 if v_0.Op != OpAMD64LEAQ { 8159 break 8160 } 8161 off2 := v_0.AuxInt 8162 sym2 := v_0.Aux 8163 base := v_0.Args[0] 8164 val := v.Args[1] 8165 mem := v.Args[2] 8166 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8167 break 8168 } 8169 v.reset(OpAMD64MOVLstore) 8170 v.AuxInt = off1 + off2 8171 v.Aux = mergeSym(sym1, sym2) 8172 v.AddArg(base) 8173 v.AddArg(val) 8174 v.AddArg(mem) 8175 return true 8176 } 8177 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8178 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8179 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8180 for { 8181 off1 := v.AuxInt 8182 sym1 := v.Aux 8183 _ = v.Args[2] 8184 v_0 := v.Args[0] 8185 if v_0.Op != OpAMD64LEAQ1 { 8186 break 8187 } 8188 off2 := v_0.AuxInt 8189 sym2 := v_0.Aux 8190 _ = v_0.Args[1] 8191 ptr := v_0.Args[0] 8192 idx := v_0.Args[1] 8193 val := v.Args[1] 8194 mem := v.Args[2] 8195 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8196 break 8197 } 8198 v.reset(OpAMD64MOVLstoreidx1) 8199 v.AuxInt = off1 + off2 8200 v.Aux = mergeSym(sym1, sym2) 8201 v.AddArg(ptr) 8202 v.AddArg(idx) 8203 v.AddArg(val) 8204 v.AddArg(mem) 8205 return true 8206 } 8207 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 8208 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8209 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8210 for { 8211 off1 := v.AuxInt 8212 sym1 := v.Aux 8213 _ = v.Args[2] 8214 v_0 := v.Args[0] 8215 if v_0.Op != OpAMD64LEAQ4 { 8216 break 8217 } 8218 off2 := v_0.AuxInt 8219 sym2 := v_0.Aux 8220 _ = v_0.Args[1] 8221 ptr := v_0.Args[0] 8222 idx := v_0.Args[1] 8223 val := v.Args[1] 8224 mem := v.Args[2] 8225 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8226 break 8227 } 8228 v.reset(OpAMD64MOVLstoreidx4) 8229 v.AuxInt = off1 + off2 8230 v.Aux = mergeSym(sym1, sym2) 8231 v.AddArg(ptr) 8232 v.AddArg(idx) 8233 v.AddArg(val) 8234 v.AddArg(mem) 8235 return true 8236 } 8237 // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8238 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8239 // result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8240 for { 8241 off1 := v.AuxInt 8242 sym1 := v.Aux 8243 _ = v.Args[2] 8244 v_0 := v.Args[0] 8245 if v_0.Op != OpAMD64LEAQ8 { 8246 break 8247 } 8248 off2 := v_0.AuxInt 8249 sym2 := v_0.Aux 8250 _ = v_0.Args[1] 8251 ptr := v_0.Args[0] 8252 idx := v_0.Args[1] 8253 val := v.Args[1] 8254 mem := v.Args[2] 8255 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8256 break 8257 } 8258 v.reset(OpAMD64MOVLstoreidx8) 8259 v.AuxInt = off1 + off2 8260 v.Aux = mergeSym(sym1, sym2) 8261 v.AddArg(ptr) 8262 v.AddArg(idx) 8263 v.AddArg(val) 8264 v.AddArg(mem) 8265 return true 8266 } 8267 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 8268 // cond: ptr.Op != OpSB 8269 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 8270 for { 8271 off := v.AuxInt 8272 sym := v.Aux 8273 _ = v.Args[2] 8274 v_0 := v.Args[0] 8275 if v_0.Op != OpAMD64ADDQ { 8276 break 8277 } 8278 _ = v_0.Args[1] 8279 ptr := v_0.Args[0] 8280 idx := v_0.Args[1] 8281 val := v.Args[1] 8282 mem := v.Args[2] 8283 if !(ptr.Op != OpSB) { 8284 break 8285 } 8286 v.reset(OpAMD64MOVLstoreidx1) 8287 v.AuxInt = off 8288 v.Aux = sym 8289 v.AddArg(ptr) 8290 v.AddArg(idx) 8291 v.AddArg(val) 8292 v.AddArg(mem) 8293 return true 8294 } 8295 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 8296 // cond: x.Uses == 1 && clobber(x) 8297 // result: (MOVQstore [i-4] {s} p w mem) 8298 for { 8299 i := v.AuxInt 8300 s := v.Aux 8301 _ = v.Args[2] 8302 p := v.Args[0] 8303 v_1 := v.Args[1] 8304 if v_1.Op != OpAMD64SHRQconst { 8305 break 8306 } 8307 if v_1.AuxInt != 32 { 8308 break 8309 } 8310 w := v_1.Args[0] 8311 x := v.Args[2] 8312 if x.Op != OpAMD64MOVLstore { 8313 break 8314 } 8315 if x.AuxInt != i-4 { 8316 break 8317 } 8318 if x.Aux != s { 8319 break 8320 } 8321 _ = x.Args[2] 8322 if p != x.Args[0] { 8323 break 8324 } 8325 if w != x.Args[1] { 8326 break 8327 } 8328 mem := x.Args[2] 8329 if !(x.Uses == 1 && clobber(x)) { 8330 break 8331 } 8332 v.reset(OpAMD64MOVQstore) 8333 v.AuxInt = i - 4 8334 v.Aux = s 8335 v.AddArg(p) 8336 v.AddArg(w) 8337 v.AddArg(mem) 8338 return true 8339 } 8340 return false 8341 } 8342 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 8343 b := v.Block 8344 _ = b 8345 typ := &b.Func.Config.Types 8346 _ = typ 8347 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 8348 // cond: x.Uses == 1 && clobber(x) 8349 // result: (MOVQstore [i-4] {s} p w0 mem) 8350 for { 8351 i := v.AuxInt 8352 s := v.Aux 8353 _ = v.Args[2] 8354 p := v.Args[0] 8355 v_1 := v.Args[1] 8356 if v_1.Op != OpAMD64SHRQconst { 8357 break 8358 } 8359 j := v_1.AuxInt 8360 w := v_1.Args[0] 8361 x := v.Args[2] 8362 if x.Op != OpAMD64MOVLstore { 8363 break 8364 } 8365 if x.AuxInt != i-4 { 8366 break 8367 } 8368 if x.Aux != s { 8369 break 8370 } 8371 _ = x.Args[2] 8372 if p != x.Args[0] { 8373 break 8374 } 8375 w0 := x.Args[1] 8376 if w0.Op != OpAMD64SHRQconst { 8377 break 8378 } 8379 if w0.AuxInt != j-32 { 8380 break 8381 } 8382 if w != w0.Args[0] { 8383 break 8384 } 8385 mem := x.Args[2] 8386 if !(x.Uses == 1 && clobber(x)) { 8387 break 8388 } 8389 v.reset(OpAMD64MOVQstore) 8390 v.AuxInt = i - 4 8391 v.Aux = s 8392 v.AddArg(p) 8393 v.AddArg(w0) 8394 v.AddArg(mem) 8395 return true 8396 } 8397 // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) 8398 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 8399 // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) 8400 for { 8401 i := v.AuxInt 8402 s := v.Aux 8403 _ = v.Args[2] 8404 p := v.Args[0] 8405 x1 := v.Args[1] 8406 if x1.Op != OpAMD64MOVLload { 8407 break 8408 } 8409 j := x1.AuxInt 8410 s2 := x1.Aux 8411 _ = x1.Args[1] 8412 p2 := x1.Args[0] 8413 mem := x1.Args[1] 8414 mem2 := v.Args[2] 8415 if mem2.Op != OpAMD64MOVLstore { 8416 break 8417 } 8418 if mem2.AuxInt != i-4 { 8419 break 8420 } 8421 if mem2.Aux != s { 8422 break 8423 } 8424 _ = mem2.Args[2] 8425 if p != mem2.Args[0] { 8426 break 8427 } 8428 x2 := mem2.Args[1] 8429 if x2.Op != OpAMD64MOVLload { 8430 break 8431 } 8432 if x2.AuxInt != j-4 { 8433 break 8434 } 8435 if x2.Aux != s2 { 8436 break 8437 } 8438 _ = x2.Args[1] 8439 if p2 != x2.Args[0] { 8440 break 8441 } 8442 if mem != x2.Args[1] { 8443 break 8444 } 8445 if mem != mem2.Args[2] { 8446 break 8447 } 8448 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 8449 break 8450 } 8451 v.reset(OpAMD64MOVQstore) 8452 v.AuxInt = i - 4 8453 v.Aux = s 8454 v.AddArg(p) 8455 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 8456 v0.AuxInt = j - 4 8457 v0.Aux = s2 8458 v0.AddArg(p2) 8459 v0.AddArg(mem) 8460 v.AddArg(v0) 8461 v.AddArg(mem) 8462 return true 8463 } 8464 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 8465 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 8466 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8467 for { 8468 off1 := v.AuxInt 8469 sym1 := v.Aux 8470 _ = v.Args[2] 8471 v_0 := v.Args[0] 8472 if v_0.Op != OpAMD64LEAL { 8473 break 8474 } 8475 off2 := v_0.AuxInt 8476 sym2 := v_0.Aux 8477 base := v_0.Args[0] 8478 val := v.Args[1] 8479 mem := v.Args[2] 8480 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 8481 break 8482 } 8483 v.reset(OpAMD64MOVLstore) 8484 v.AuxInt = off1 + off2 8485 v.Aux = mergeSym(sym1, sym2) 8486 v.AddArg(base) 8487 v.AddArg(val) 8488 v.AddArg(mem) 8489 return true 8490 } 8491 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 8492 // cond: is32Bit(off1+off2) 8493 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 8494 for { 8495 off1 := v.AuxInt 8496 sym := v.Aux 8497 _ = v.Args[2] 8498 v_0 := v.Args[0] 8499 if v_0.Op != OpAMD64ADDLconst { 8500 break 8501 } 8502 off2 := v_0.AuxInt 8503 ptr := v_0.Args[0] 8504 val := v.Args[1] 8505 mem := v.Args[2] 8506 if !(is32Bit(off1 + off2)) { 8507 break 8508 } 8509 v.reset(OpAMD64MOVLstore) 8510 v.AuxInt = off1 + off2 8511 v.Aux = sym 8512 v.AddArg(ptr) 8513 v.AddArg(val) 8514 v.AddArg(mem) 8515 return true 8516 } 8517 // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 8518 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) 8519 // result: (ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem) 8520 for { 8521 off := v.AuxInt 8522 sym := v.Aux 8523 _ = v.Args[2] 8524 ptr := v.Args[0] 8525 a := v.Args[1] 8526 if a.Op != OpAMD64ADDLconst { 8527 break 8528 } 8529 c := a.AuxInt 8530 l := a.Args[0] 8531 if l.Op != OpAMD64MOVLload { 8532 break 8533 } 8534 if l.AuxInt != off { 8535 break 8536 } 8537 if l.Aux != sym { 8538 break 8539 } 8540 _ = l.Args[1] 8541 ptr2 := l.Args[0] 8542 mem := l.Args[1] 8543 if mem != v.Args[2] { 8544 break 8545 } 8546 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { 8547 break 8548 } 8549 v.reset(OpAMD64ADDLconstmem) 8550 v.AuxInt = makeValAndOff(c, off) 8551 v.Aux = sym 8552 v.AddArg(ptr) 8553 v.AddArg(mem) 8554 return true 8555 } 8556 // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) 8557 // cond: 8558 // result: (MOVSSstore [off] {sym} ptr val mem) 8559 for { 8560 off := v.AuxInt 8561 sym := v.Aux 8562 _ = v.Args[2] 8563 ptr := v.Args[0] 8564 v_1 := v.Args[1] 8565 if v_1.Op != OpAMD64MOVLf2i { 8566 break 8567 } 8568 val := v_1.Args[0] 8569 mem := v.Args[2] 8570 v.reset(OpAMD64MOVSSstore) 8571 v.AuxInt = off 8572 v.Aux = sym 8573 v.AddArg(ptr) 8574 v.AddArg(val) 8575 v.AddArg(mem) 8576 return true 8577 } 8578 return false 8579 } 8580 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 8581 b := v.Block 8582 _ = b 8583 typ := &b.Func.Config.Types 8584 _ = typ 8585 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 8586 // cond: ValAndOff(sc).canAdd(off) 8587 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8588 for { 8589 sc := v.AuxInt 8590 s := v.Aux 8591 _ = v.Args[1] 8592 v_0 := v.Args[0] 8593 if v_0.Op != OpAMD64ADDQconst { 8594 break 8595 } 8596 off := v_0.AuxInt 8597 ptr := v_0.Args[0] 8598 mem := v.Args[1] 8599 if !(ValAndOff(sc).canAdd(off)) { 8600 break 8601 } 8602 v.reset(OpAMD64MOVLstoreconst) 8603 v.AuxInt = ValAndOff(sc).add(off) 8604 v.Aux = s 8605 v.AddArg(ptr) 8606 v.AddArg(mem) 8607 return true 8608 } 8609 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 8610 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8611 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8612 for { 8613 sc := v.AuxInt 8614 sym1 := v.Aux 8615 _ = v.Args[1] 8616 v_0 := v.Args[0] 8617 if v_0.Op != OpAMD64LEAQ { 8618 break 8619 } 8620 off := v_0.AuxInt 8621 sym2 := v_0.Aux 8622 ptr := v_0.Args[0] 8623 mem := v.Args[1] 8624 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8625 break 8626 } 8627 v.reset(OpAMD64MOVLstoreconst) 8628 v.AuxInt = ValAndOff(sc).add(off) 8629 v.Aux = mergeSym(sym1, sym2) 8630 v.AddArg(ptr) 8631 v.AddArg(mem) 8632 return true 8633 } 8634 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 8635 // cond: canMergeSym(sym1, sym2) 8636 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8637 for { 8638 x := v.AuxInt 8639 sym1 := v.Aux 8640 _ = v.Args[1] 8641 v_0 := v.Args[0] 8642 if v_0.Op != OpAMD64LEAQ1 { 8643 break 8644 } 8645 off := v_0.AuxInt 8646 sym2 := v_0.Aux 8647 _ = v_0.Args[1] 8648 ptr := v_0.Args[0] 8649 idx := v_0.Args[1] 8650 mem := v.Args[1] 8651 if !(canMergeSym(sym1, sym2)) { 8652 break 8653 } 8654 v.reset(OpAMD64MOVLstoreconstidx1) 8655 v.AuxInt = ValAndOff(x).add(off) 8656 v.Aux = mergeSym(sym1, sym2) 8657 v.AddArg(ptr) 8658 v.AddArg(idx) 8659 v.AddArg(mem) 8660 return true 8661 } 8662 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 8663 // cond: canMergeSym(sym1, sym2) 8664 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8665 for { 8666 x := v.AuxInt 8667 sym1 := v.Aux 8668 _ = v.Args[1] 8669 v_0 := v.Args[0] 8670 if v_0.Op != OpAMD64LEAQ4 { 8671 break 8672 } 8673 off := v_0.AuxInt 8674 sym2 := v_0.Aux 8675 _ = v_0.Args[1] 8676 ptr := v_0.Args[0] 8677 idx := v_0.Args[1] 8678 mem := v.Args[1] 8679 if !(canMergeSym(sym1, sym2)) { 8680 break 8681 } 8682 v.reset(OpAMD64MOVLstoreconstidx4) 8683 v.AuxInt = ValAndOff(x).add(off) 8684 v.Aux = mergeSym(sym1, sym2) 8685 v.AddArg(ptr) 8686 v.AddArg(idx) 8687 v.AddArg(mem) 8688 return true 8689 } 8690 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 8691 // cond: 8692 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 8693 for { 8694 x := v.AuxInt 8695 sym := v.Aux 8696 _ = v.Args[1] 8697 v_0 := v.Args[0] 8698 if v_0.Op != OpAMD64ADDQ { 8699 break 8700 } 8701 _ = v_0.Args[1] 8702 ptr := v_0.Args[0] 8703 idx := v_0.Args[1] 8704 mem := v.Args[1] 8705 v.reset(OpAMD64MOVLstoreconstidx1) 8706 v.AuxInt = x 8707 v.Aux = sym 8708 v.AddArg(ptr) 8709 v.AddArg(idx) 8710 v.AddArg(mem) 8711 return true 8712 } 8713 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 8714 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8715 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8716 for { 8717 c := v.AuxInt 8718 s := v.Aux 8719 _ = v.Args[1] 8720 p := v.Args[0] 8721 x := v.Args[1] 8722 if x.Op != OpAMD64MOVLstoreconst { 8723 break 8724 } 8725 a := x.AuxInt 8726 if x.Aux != s { 8727 break 8728 } 8729 _ = x.Args[1] 8730 if p != x.Args[0] { 8731 break 8732 } 8733 mem := x.Args[1] 8734 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8735 break 8736 } 8737 v.reset(OpAMD64MOVQstore) 8738 v.AuxInt = ValAndOff(a).Off() 8739 v.Aux = s 8740 v.AddArg(p) 8741 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 8742 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 8743 v.AddArg(v0) 8744 v.AddArg(mem) 8745 return true 8746 } 8747 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 8748 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8749 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8750 for { 8751 sc := v.AuxInt 8752 sym1 := v.Aux 8753 _ = v.Args[1] 8754 v_0 := v.Args[0] 8755 if v_0.Op != OpAMD64LEAL { 8756 break 8757 } 8758 off := v_0.AuxInt 8759 sym2 := v_0.Aux 8760 ptr := v_0.Args[0] 8761 mem := v.Args[1] 8762 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8763 break 8764 } 8765 v.reset(OpAMD64MOVLstoreconst) 8766 v.AuxInt = ValAndOff(sc).add(off) 8767 v.Aux = mergeSym(sym1, sym2) 8768 v.AddArg(ptr) 8769 v.AddArg(mem) 8770 return true 8771 } 8772 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 8773 // cond: ValAndOff(sc).canAdd(off) 8774 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8775 for { 8776 sc := v.AuxInt 8777 s := v.Aux 8778 _ = v.Args[1] 8779 v_0 := v.Args[0] 8780 if v_0.Op != OpAMD64ADDLconst { 8781 break 8782 } 8783 off := v_0.AuxInt 8784 ptr := v_0.Args[0] 8785 mem := v.Args[1] 8786 if !(ValAndOff(sc).canAdd(off)) { 8787 break 8788 } 8789 v.reset(OpAMD64MOVLstoreconst) 8790 v.AuxInt = ValAndOff(sc).add(off) 8791 v.Aux = s 8792 v.AddArg(ptr) 8793 v.AddArg(mem) 8794 return true 8795 } 8796 return false 8797 } 8798 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 8799 b := v.Block 8800 _ = b 8801 typ := &b.Func.Config.Types 8802 _ = typ 8803 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 8804 // cond: 8805 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 8806 for { 8807 c := v.AuxInt 8808 sym := v.Aux 8809 _ = v.Args[2] 8810 ptr := v.Args[0] 8811 v_1 := v.Args[1] 8812 if v_1.Op != OpAMD64SHLQconst { 8813 break 8814 } 8815 if v_1.AuxInt != 2 { 8816 break 8817 } 8818 idx := v_1.Args[0] 8819 mem := v.Args[2] 8820 v.reset(OpAMD64MOVLstoreconstidx4) 8821 v.AuxInt = c 8822 v.Aux = sym 8823 v.AddArg(ptr) 8824 v.AddArg(idx) 8825 v.AddArg(mem) 8826 return true 8827 } 8828 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 8829 // cond: ValAndOff(x).canAdd(c) 8830 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8831 for { 8832 x := v.AuxInt 8833 sym := v.Aux 8834 _ = v.Args[2] 8835 v_0 := v.Args[0] 8836 if v_0.Op != OpAMD64ADDQconst { 8837 break 8838 } 8839 c := v_0.AuxInt 8840 ptr := v_0.Args[0] 8841 idx := v.Args[1] 8842 mem := v.Args[2] 8843 if !(ValAndOff(x).canAdd(c)) { 8844 break 8845 } 8846 v.reset(OpAMD64MOVLstoreconstidx1) 8847 v.AuxInt = ValAndOff(x).add(c) 8848 v.Aux = sym 8849 v.AddArg(ptr) 8850 v.AddArg(idx) 8851 v.AddArg(mem) 8852 return true 8853 } 8854 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 8855 // cond: ValAndOff(x).canAdd(c) 8856 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8857 for { 8858 x := v.AuxInt 8859 sym := v.Aux 8860 _ = v.Args[2] 8861 ptr := v.Args[0] 8862 v_1 := v.Args[1] 8863 if v_1.Op != OpAMD64ADDQconst { 8864 break 8865 } 8866 c := v_1.AuxInt 8867 idx := v_1.Args[0] 8868 mem := v.Args[2] 8869 if !(ValAndOff(x).canAdd(c)) { 8870 break 8871 } 8872 v.reset(OpAMD64MOVLstoreconstidx1) 8873 v.AuxInt = ValAndOff(x).add(c) 8874 v.Aux = sym 8875 v.AddArg(ptr) 8876 v.AddArg(idx) 8877 v.AddArg(mem) 8878 return true 8879 } 8880 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 8881 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8882 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8883 for { 8884 c := v.AuxInt 8885 s := v.Aux 8886 _ = v.Args[2] 8887 p := v.Args[0] 8888 i := v.Args[1] 8889 x := v.Args[2] 8890 if x.Op != OpAMD64MOVLstoreconstidx1 { 8891 break 8892 } 8893 a := x.AuxInt 8894 if x.Aux != s { 8895 break 8896 } 8897 _ = x.Args[2] 8898 if p != x.Args[0] { 8899 break 8900 } 8901 if i != x.Args[1] { 8902 break 8903 } 8904 mem := x.Args[2] 8905 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8906 break 8907 } 8908 v.reset(OpAMD64MOVQstoreidx1) 8909 v.AuxInt = ValAndOff(a).Off() 8910 v.Aux = s 8911 v.AddArg(p) 8912 v.AddArg(i) 8913 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 8914 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 8915 v.AddArg(v0) 8916 v.AddArg(mem) 8917 return true 8918 } 8919 return false 8920 } 8921 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 8922 b := v.Block 8923 _ = b 8924 typ := &b.Func.Config.Types 8925 _ = typ 8926 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 8927 // cond: ValAndOff(x).canAdd(c) 8928 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8929 for { 8930 x := v.AuxInt 8931 sym := v.Aux 8932 _ = v.Args[2] 8933 v_0 := v.Args[0] 8934 if v_0.Op != OpAMD64ADDQconst { 8935 break 8936 } 8937 c := v_0.AuxInt 8938 ptr := v_0.Args[0] 8939 idx := v.Args[1] 8940 mem := v.Args[2] 8941 if !(ValAndOff(x).canAdd(c)) { 8942 break 8943 } 8944 v.reset(OpAMD64MOVLstoreconstidx4) 8945 v.AuxInt = ValAndOff(x).add(c) 8946 v.Aux = sym 8947 v.AddArg(ptr) 8948 v.AddArg(idx) 8949 v.AddArg(mem) 8950 return true 8951 } 8952 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 8953 // cond: ValAndOff(x).canAdd(4*c) 8954 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 8955 for { 8956 x := v.AuxInt 8957 sym := v.Aux 8958 _ = v.Args[2] 8959 ptr := v.Args[0] 8960 v_1 := v.Args[1] 8961 if v_1.Op != OpAMD64ADDQconst { 8962 break 8963 } 8964 c := v_1.AuxInt 8965 idx := v_1.Args[0] 8966 mem := v.Args[2] 8967 if !(ValAndOff(x).canAdd(4 * c)) { 8968 break 8969 } 8970 v.reset(OpAMD64MOVLstoreconstidx4) 8971 v.AuxInt = ValAndOff(x).add(4 * c) 8972 v.Aux = sym 8973 v.AddArg(ptr) 8974 v.AddArg(idx) 8975 v.AddArg(mem) 8976 return true 8977 } 8978 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 8979 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8980 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8981 for { 8982 c := v.AuxInt 8983 s := v.Aux 8984 _ = v.Args[2] 8985 p := v.Args[0] 8986 i := v.Args[1] 8987 x := v.Args[2] 8988 if x.Op != OpAMD64MOVLstoreconstidx4 { 8989 break 8990 } 8991 a := x.AuxInt 8992 if x.Aux != s { 8993 break 8994 } 8995 _ = x.Args[2] 8996 if p != x.Args[0] { 8997 break 8998 } 8999 if i != x.Args[1] { 9000 break 9001 } 9002 mem := x.Args[2] 9003 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 9004 break 9005 } 9006 v.reset(OpAMD64MOVQstoreidx1) 9007 v.AuxInt = ValAndOff(a).Off() 9008 v.Aux = s 9009 v.AddArg(p) 9010 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 9011 v0.AuxInt = 2 9012 v0.AddArg(i) 9013 v.AddArg(v0) 9014 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 9015 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 9016 v.AddArg(v1) 9017 v.AddArg(mem) 9018 return true 9019 } 9020 return false 9021 } 9022 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 9023 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 9024 // cond: 9025 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 9026 for { 9027 c := v.AuxInt 9028 sym := v.Aux 9029 _ = v.Args[3] 9030 ptr := v.Args[0] 9031 v_1 := v.Args[1] 9032 if v_1.Op != OpAMD64SHLQconst { 9033 break 9034 } 9035 if v_1.AuxInt != 2 { 9036 break 9037 } 9038 idx := v_1.Args[0] 9039 val := v.Args[2] 9040 mem := v.Args[3] 9041 v.reset(OpAMD64MOVLstoreidx4) 9042 v.AuxInt = c 9043 v.Aux = sym 9044 v.AddArg(ptr) 9045 v.AddArg(idx) 9046 v.AddArg(val) 9047 v.AddArg(mem) 9048 return true 9049 } 9050 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9051 // cond: 9052 // result: (MOVLstoreidx8 [c] {sym} ptr idx val mem) 9053 for { 9054 c := v.AuxInt 9055 sym := v.Aux 9056 _ = v.Args[3] 9057 ptr := v.Args[0] 9058 v_1 := v.Args[1] 9059 if v_1.Op != OpAMD64SHLQconst { 9060 break 9061 } 9062 if v_1.AuxInt != 3 { 9063 break 9064 } 9065 idx := v_1.Args[0] 9066 val := v.Args[2] 9067 mem := v.Args[3] 9068 v.reset(OpAMD64MOVLstoreidx8) 9069 v.AuxInt = c 9070 v.Aux = sym 9071 v.AddArg(ptr) 9072 v.AddArg(idx) 9073 v.AddArg(val) 9074 v.AddArg(mem) 9075 return true 9076 } 9077 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9078 // cond: is32Bit(c+d) 9079 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 9080 for { 9081 c := v.AuxInt 9082 sym := v.Aux 9083 _ = v.Args[3] 9084 v_0 := v.Args[0] 9085 if v_0.Op != OpAMD64ADDQconst { 9086 break 9087 } 9088 d := v_0.AuxInt 9089 ptr := v_0.Args[0] 9090 idx := v.Args[1] 9091 val := v.Args[2] 9092 mem := v.Args[3] 9093 if !(is32Bit(c + d)) { 9094 break 9095 } 9096 v.reset(OpAMD64MOVLstoreidx1) 9097 v.AuxInt = c + d 9098 v.Aux = sym 9099 v.AddArg(ptr) 9100 v.AddArg(idx) 9101 v.AddArg(val) 9102 v.AddArg(mem) 9103 return true 9104 } 9105 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9106 // cond: is32Bit(c+d) 9107 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 9108 for { 9109 c := v.AuxInt 9110 sym := v.Aux 9111 _ = v.Args[3] 9112 ptr := v.Args[0] 9113 v_1 := v.Args[1] 9114 if v_1.Op != OpAMD64ADDQconst { 9115 break 9116 } 9117 d := v_1.AuxInt 9118 idx := v_1.Args[0] 9119 val := v.Args[2] 9120 mem := v.Args[3] 9121 if !(is32Bit(c + d)) { 9122 break 9123 } 9124 v.reset(OpAMD64MOVLstoreidx1) 9125 v.AuxInt = c + d 9126 v.Aux = sym 9127 v.AddArg(ptr) 9128 v.AddArg(idx) 9129 v.AddArg(val) 9130 v.AddArg(mem) 9131 return true 9132 } 9133 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 9134 // cond: x.Uses == 1 && clobber(x) 9135 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 9136 for { 9137 i := v.AuxInt 9138 s := v.Aux 9139 _ = v.Args[3] 9140 p := v.Args[0] 9141 idx := v.Args[1] 9142 v_2 := v.Args[2] 9143 if v_2.Op != OpAMD64SHRQconst { 9144 break 9145 } 9146 if v_2.AuxInt != 32 { 9147 break 9148 } 9149 w := v_2.Args[0] 9150 x := v.Args[3] 9151 if x.Op != OpAMD64MOVLstoreidx1 { 9152 break 9153 } 9154 if x.AuxInt != i-4 { 9155 break 9156 } 9157 if x.Aux != s { 9158 break 9159 } 9160 _ = x.Args[3] 9161 if p != x.Args[0] { 9162 break 9163 } 9164 if idx != x.Args[1] { 9165 break 9166 } 9167 if w != x.Args[2] { 9168 break 9169 } 9170 mem := x.Args[3] 9171 if !(x.Uses == 1 && clobber(x)) { 9172 break 9173 } 9174 v.reset(OpAMD64MOVQstoreidx1) 9175 v.AuxInt = i - 4 9176 v.Aux = s 9177 v.AddArg(p) 9178 v.AddArg(idx) 9179 v.AddArg(w) 9180 v.AddArg(mem) 9181 return true 9182 } 9183 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 9184 // cond: x.Uses == 1 && clobber(x) 9185 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 9186 for { 9187 i := v.AuxInt 9188 s := v.Aux 9189 _ = v.Args[3] 9190 p := v.Args[0] 9191 idx := v.Args[1] 9192 v_2 := v.Args[2] 9193 if v_2.Op != OpAMD64SHRQconst { 9194 break 9195 } 9196 j := v_2.AuxInt 9197 w := v_2.Args[0] 9198 x := v.Args[3] 9199 if x.Op != OpAMD64MOVLstoreidx1 { 9200 break 9201 } 9202 if x.AuxInt != i-4 { 9203 break 9204 } 9205 if x.Aux != s { 9206 break 9207 } 9208 _ = x.Args[3] 9209 if p != x.Args[0] { 9210 break 9211 } 9212 if idx != x.Args[1] { 9213 break 9214 } 9215 w0 := x.Args[2] 9216 if w0.Op != OpAMD64SHRQconst { 9217 break 9218 } 9219 if w0.AuxInt != j-32 { 9220 break 9221 } 9222 if w != w0.Args[0] { 9223 break 9224 } 9225 mem := x.Args[3] 9226 if !(x.Uses == 1 && clobber(x)) { 9227 break 9228 } 9229 v.reset(OpAMD64MOVQstoreidx1) 9230 v.AuxInt = i - 4 9231 v.Aux = s 9232 v.AddArg(p) 9233 v.AddArg(idx) 9234 v.AddArg(w0) 9235 v.AddArg(mem) 9236 return true 9237 } 9238 return false 9239 } 9240 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 9241 b := v.Block 9242 _ = b 9243 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9244 // cond: is32Bit(c+d) 9245 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 9246 for { 9247 c := v.AuxInt 9248 sym := v.Aux 9249 _ = v.Args[3] 9250 v_0 := v.Args[0] 9251 if v_0.Op != OpAMD64ADDQconst { 9252 break 9253 } 9254 d := v_0.AuxInt 9255 ptr := v_0.Args[0] 9256 idx := v.Args[1] 9257 val := v.Args[2] 9258 mem := v.Args[3] 9259 if !(is32Bit(c + d)) { 9260 break 9261 } 9262 v.reset(OpAMD64MOVLstoreidx4) 9263 v.AuxInt = c + d 9264 v.Aux = sym 9265 v.AddArg(ptr) 9266 v.AddArg(idx) 9267 v.AddArg(val) 9268 v.AddArg(mem) 9269 return true 9270 } 9271 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9272 // cond: is32Bit(c+4*d) 9273 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 9274 for { 9275 c := v.AuxInt 9276 sym := v.Aux 9277 _ = v.Args[3] 9278 ptr := v.Args[0] 9279 v_1 := v.Args[1] 9280 if v_1.Op != OpAMD64ADDQconst { 9281 break 9282 } 9283 d := v_1.AuxInt 9284 idx := v_1.Args[0] 9285 val := v.Args[2] 9286 mem := v.Args[3] 9287 if !(is32Bit(c + 4*d)) { 9288 break 9289 } 9290 v.reset(OpAMD64MOVLstoreidx4) 9291 v.AuxInt = c + 4*d 9292 v.Aux = sym 9293 v.AddArg(ptr) 9294 v.AddArg(idx) 9295 v.AddArg(val) 9296 v.AddArg(mem) 9297 return true 9298 } 9299 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 9300 // cond: x.Uses == 1 && clobber(x) 9301 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 9302 for { 9303 i := v.AuxInt 9304 s := v.Aux 9305 _ = v.Args[3] 9306 p := v.Args[0] 9307 idx := v.Args[1] 9308 v_2 := v.Args[2] 9309 if v_2.Op != OpAMD64SHRQconst { 9310 break 9311 } 9312 if v_2.AuxInt != 32 { 9313 break 9314 } 9315 w := v_2.Args[0] 9316 x := v.Args[3] 9317 if x.Op != OpAMD64MOVLstoreidx4 { 9318 break 9319 } 9320 if x.AuxInt != i-4 { 9321 break 9322 } 9323 if x.Aux != s { 9324 break 9325 } 9326 _ = x.Args[3] 9327 if p != x.Args[0] { 9328 break 9329 } 9330 if idx != x.Args[1] { 9331 break 9332 } 9333 if w != x.Args[2] { 9334 break 9335 } 9336 mem := x.Args[3] 9337 if !(x.Uses == 1 && clobber(x)) { 9338 break 9339 } 9340 v.reset(OpAMD64MOVQstoreidx1) 9341 v.AuxInt = i - 4 9342 v.Aux = s 9343 v.AddArg(p) 9344 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 9345 v0.AuxInt = 2 9346 v0.AddArg(idx) 9347 v.AddArg(v0) 9348 v.AddArg(w) 9349 v.AddArg(mem) 9350 return true 9351 } 9352 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 9353 // cond: x.Uses == 1 && clobber(x) 9354 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 9355 for { 9356 i := v.AuxInt 9357 s := v.Aux 9358 _ = v.Args[3] 9359 p := v.Args[0] 9360 idx := v.Args[1] 9361 v_2 := v.Args[2] 9362 if v_2.Op != OpAMD64SHRQconst { 9363 break 9364 } 9365 j := v_2.AuxInt 9366 w := v_2.Args[0] 9367 x := v.Args[3] 9368 if x.Op != OpAMD64MOVLstoreidx4 { 9369 break 9370 } 9371 if x.AuxInt != i-4 { 9372 break 9373 } 9374 if x.Aux != s { 9375 break 9376 } 9377 _ = x.Args[3] 9378 if p != x.Args[0] { 9379 break 9380 } 9381 if idx != x.Args[1] { 9382 break 9383 } 9384 w0 := x.Args[2] 9385 if w0.Op != OpAMD64SHRQconst { 9386 break 9387 } 9388 if w0.AuxInt != j-32 { 9389 break 9390 } 9391 if w != w0.Args[0] { 9392 break 9393 } 9394 mem := x.Args[3] 9395 if !(x.Uses == 1 && clobber(x)) { 9396 break 9397 } 9398 v.reset(OpAMD64MOVQstoreidx1) 9399 v.AuxInt = i - 4 9400 v.Aux = s 9401 v.AddArg(p) 9402 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 9403 v0.AuxInt = 2 9404 v0.AddArg(idx) 9405 v.AddArg(v0) 9406 v.AddArg(w0) 9407 v.AddArg(mem) 9408 return true 9409 } 9410 return false 9411 } 9412 func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { 9413 // match: (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9414 // cond: is32Bit(c+d) 9415 // result: (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) 9416 for { 9417 c := v.AuxInt 9418 sym := v.Aux 9419 _ = v.Args[3] 9420 v_0 := v.Args[0] 9421 if v_0.Op != OpAMD64ADDQconst { 9422 break 9423 } 9424 d := v_0.AuxInt 9425 ptr := v_0.Args[0] 9426 idx := v.Args[1] 9427 val := v.Args[2] 9428 mem := v.Args[3] 9429 if !(is32Bit(c + d)) { 9430 break 9431 } 9432 v.reset(OpAMD64MOVLstoreidx8) 9433 v.AuxInt = c + d 9434 v.Aux = sym 9435 v.AddArg(ptr) 9436 v.AddArg(idx) 9437 v.AddArg(val) 9438 v.AddArg(mem) 9439 return true 9440 } 9441 // match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9442 // cond: is32Bit(c+8*d) 9443 // result: (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem) 9444 for { 9445 c := v.AuxInt 9446 sym := v.Aux 9447 _ = v.Args[3] 9448 ptr := v.Args[0] 9449 v_1 := v.Args[1] 9450 if v_1.Op != OpAMD64ADDQconst { 9451 break 9452 } 9453 d := v_1.AuxInt 9454 idx := v_1.Args[0] 9455 val := v.Args[2] 9456 mem := v.Args[3] 9457 if !(is32Bit(c + 8*d)) { 9458 break 9459 } 9460 v.reset(OpAMD64MOVLstoreidx8) 9461 v.AuxInt = c + 8*d 9462 v.Aux = sym 9463 v.AddArg(ptr) 9464 v.AddArg(idx) 9465 v.AddArg(val) 9466 v.AddArg(mem) 9467 return true 9468 } 9469 return false 9470 } 9471 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 9472 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 9473 // cond: is32Bit(off1+off2) 9474 // result: (MOVOload [off1+off2] {sym} ptr mem) 9475 for { 9476 off1 := v.AuxInt 9477 sym := v.Aux 9478 _ = v.Args[1] 9479 v_0 := v.Args[0] 9480 if v_0.Op != OpAMD64ADDQconst { 9481 break 9482 } 9483 off2 := v_0.AuxInt 9484 ptr := v_0.Args[0] 9485 mem := v.Args[1] 9486 if !(is32Bit(off1 + off2)) { 9487 break 9488 } 9489 v.reset(OpAMD64MOVOload) 9490 v.AuxInt = off1 + off2 9491 v.Aux = sym 9492 v.AddArg(ptr) 9493 v.AddArg(mem) 9494 return true 9495 } 9496 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9497 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9498 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9499 for { 9500 off1 := v.AuxInt 9501 sym1 := v.Aux 9502 _ = v.Args[1] 9503 v_0 := v.Args[0] 9504 if v_0.Op != OpAMD64LEAQ { 9505 break 9506 } 9507 off2 := v_0.AuxInt 9508 sym2 := v_0.Aux 9509 base := v_0.Args[0] 9510 mem := v.Args[1] 9511 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9512 break 9513 } 9514 v.reset(OpAMD64MOVOload) 9515 v.AuxInt = off1 + off2 9516 v.Aux = mergeSym(sym1, sym2) 9517 v.AddArg(base) 9518 v.AddArg(mem) 9519 return true 9520 } 9521 return false 9522 } 9523 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 9524 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9525 // cond: is32Bit(off1+off2) 9526 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 9527 for { 9528 off1 := v.AuxInt 9529 sym := v.Aux 9530 _ = v.Args[2] 9531 v_0 := v.Args[0] 9532 if v_0.Op != OpAMD64ADDQconst { 9533 break 9534 } 9535 off2 := v_0.AuxInt 9536 ptr := v_0.Args[0] 9537 val := v.Args[1] 9538 mem := v.Args[2] 9539 if !(is32Bit(off1 + off2)) { 9540 break 9541 } 9542 v.reset(OpAMD64MOVOstore) 9543 v.AuxInt = off1 + off2 9544 v.Aux = sym 9545 v.AddArg(ptr) 9546 v.AddArg(val) 9547 v.AddArg(mem) 9548 return true 9549 } 9550 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9551 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9552 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9553 for { 9554 off1 := v.AuxInt 9555 sym1 := v.Aux 9556 _ = v.Args[2] 9557 v_0 := v.Args[0] 9558 if v_0.Op != OpAMD64LEAQ { 9559 break 9560 } 9561 off2 := v_0.AuxInt 9562 sym2 := v_0.Aux 9563 base := v_0.Args[0] 9564 val := v.Args[1] 9565 mem := v.Args[2] 9566 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9567 break 9568 } 9569 v.reset(OpAMD64MOVOstore) 9570 v.AuxInt = off1 + off2 9571 v.Aux = mergeSym(sym1, sym2) 9572 v.AddArg(base) 9573 v.AddArg(val) 9574 v.AddArg(mem) 9575 return true 9576 } 9577 return false 9578 } 9579 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 9580 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 9581 // cond: is32Bit(off1+off2) 9582 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 9583 for { 9584 off1 := v.AuxInt 9585 sym := v.Aux 9586 _ = v.Args[1] 9587 v_0 := v.Args[0] 9588 if v_0.Op != OpAMD64ADDQconst { 9589 break 9590 } 9591 off2 := v_0.AuxInt 9592 ptr := v_0.Args[0] 9593 mem := v.Args[1] 9594 if !(is32Bit(off1 + off2)) { 9595 break 9596 } 9597 v.reset(OpAMD64MOVQatomicload) 9598 v.AuxInt = off1 + off2 9599 v.Aux = sym 9600 v.AddArg(ptr) 9601 v.AddArg(mem) 9602 return true 9603 } 9604 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 9605 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9606 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 9607 for { 9608 off1 := v.AuxInt 9609 sym1 := v.Aux 9610 _ = v.Args[1] 9611 v_0 := v.Args[0] 9612 if v_0.Op != OpAMD64LEAQ { 9613 break 9614 } 9615 off2 := v_0.AuxInt 9616 sym2 := v_0.Aux 9617 ptr := v_0.Args[0] 9618 mem := v.Args[1] 9619 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9620 break 9621 } 9622 v.reset(OpAMD64MOVQatomicload) 9623 v.AuxInt = off1 + off2 9624 v.Aux = mergeSym(sym1, sym2) 9625 v.AddArg(ptr) 9626 v.AddArg(mem) 9627 return true 9628 } 9629 return false 9630 } 9631 func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool { 9632 b := v.Block 9633 _ = b 9634 // match: (MOVQf2i <t> (Arg [off] {sym})) 9635 // cond: 9636 // result: @b.Func.Entry (Arg <t> [off] {sym}) 9637 for { 9638 t := v.Type 9639 v_0 := v.Args[0] 9640 if v_0.Op != OpArg { 9641 break 9642 } 9643 off := v_0.AuxInt 9644 sym := v_0.Aux 9645 b = b.Func.Entry 9646 v0 := b.NewValue0(v.Pos, OpArg, t) 9647 v.reset(OpCopy) 9648 v.AddArg(v0) 9649 v0.AuxInt = off 9650 v0.Aux = sym 9651 return true 9652 } 9653 return false 9654 } 9655 func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { 9656 b := v.Block 9657 _ = b 9658 // match: (MOVQi2f <t> (Arg [off] {sym})) 9659 // cond: 9660 // result: @b.Func.Entry (Arg <t> [off] {sym}) 9661 for { 9662 t := v.Type 9663 v_0 := v.Args[0] 9664 if v_0.Op != OpArg { 9665 break 9666 } 9667 off := v_0.AuxInt 9668 sym := v_0.Aux 9669 b = b.Func.Entry 9670 v0 := b.NewValue0(v.Pos, OpArg, t) 9671 v.reset(OpCopy) 9672 v.AddArg(v0) 9673 v0.AuxInt = off 9674 v0.Aux = sym 9675 return true 9676 } 9677 return false 9678 } 9679 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 9680 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 9681 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 9682 // result: x 9683 for { 9684 off := v.AuxInt 9685 sym := v.Aux 9686 _ = v.Args[1] 9687 ptr := v.Args[0] 9688 v_1 := v.Args[1] 9689 if v_1.Op != OpAMD64MOVQstore { 9690 break 9691 } 9692 off2 := v_1.AuxInt 9693 sym2 := v_1.Aux 9694 _ = v_1.Args[2] 9695 ptr2 := v_1.Args[0] 9696 x := v_1.Args[1] 9697 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 9698 break 9699 } 9700 v.reset(OpCopy) 9701 v.Type = x.Type 9702 v.AddArg(x) 9703 return true 9704 } 9705 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 9706 // cond: is32Bit(off1+off2) 9707 // result: (MOVQload [off1+off2] {sym} ptr mem) 9708 for { 9709 off1 := v.AuxInt 9710 sym := v.Aux 9711 _ = v.Args[1] 9712 v_0 := v.Args[0] 9713 if v_0.Op != OpAMD64ADDQconst { 9714 break 9715 } 9716 off2 := v_0.AuxInt 9717 ptr := v_0.Args[0] 9718 mem := v.Args[1] 9719 if !(is32Bit(off1 + off2)) { 9720 break 9721 } 9722 v.reset(OpAMD64MOVQload) 9723 v.AuxInt = off1 + off2 9724 v.Aux = sym 9725 v.AddArg(ptr) 9726 v.AddArg(mem) 9727 return true 9728 } 9729 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9730 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9731 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9732 for { 9733 off1 := v.AuxInt 9734 sym1 := v.Aux 9735 _ = v.Args[1] 9736 v_0 := v.Args[0] 9737 if v_0.Op != OpAMD64LEAQ { 9738 break 9739 } 9740 off2 := v_0.AuxInt 9741 sym2 := v_0.Aux 9742 base := v_0.Args[0] 9743 mem := v.Args[1] 9744 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9745 break 9746 } 9747 v.reset(OpAMD64MOVQload) 9748 v.AuxInt = off1 + off2 9749 v.Aux = mergeSym(sym1, sym2) 9750 v.AddArg(base) 9751 v.AddArg(mem) 9752 return true 9753 } 9754 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9755 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9756 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9757 for { 9758 off1 := v.AuxInt 9759 sym1 := v.Aux 9760 _ = v.Args[1] 9761 v_0 := v.Args[0] 9762 if v_0.Op != OpAMD64LEAQ1 { 9763 break 9764 } 9765 off2 := v_0.AuxInt 9766 sym2 := v_0.Aux 9767 _ = v_0.Args[1] 9768 ptr := v_0.Args[0] 9769 idx := v_0.Args[1] 9770 mem := v.Args[1] 9771 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9772 break 9773 } 9774 v.reset(OpAMD64MOVQloadidx1) 9775 v.AuxInt = off1 + off2 9776 v.Aux = mergeSym(sym1, sym2) 9777 v.AddArg(ptr) 9778 v.AddArg(idx) 9779 v.AddArg(mem) 9780 return true 9781 } 9782 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 9783 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9784 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9785 for { 9786 off1 := v.AuxInt 9787 sym1 := v.Aux 9788 _ = v.Args[1] 9789 v_0 := v.Args[0] 9790 if v_0.Op != OpAMD64LEAQ8 { 9791 break 9792 } 9793 off2 := v_0.AuxInt 9794 sym2 := v_0.Aux 9795 _ = v_0.Args[1] 9796 ptr := v_0.Args[0] 9797 idx := v_0.Args[1] 9798 mem := v.Args[1] 9799 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9800 break 9801 } 9802 v.reset(OpAMD64MOVQloadidx8) 9803 v.AuxInt = off1 + off2 9804 v.Aux = mergeSym(sym1, sym2) 9805 v.AddArg(ptr) 9806 v.AddArg(idx) 9807 v.AddArg(mem) 9808 return true 9809 } 9810 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 9811 // cond: ptr.Op != OpSB 9812 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 9813 for { 9814 off := v.AuxInt 9815 sym := v.Aux 9816 _ = v.Args[1] 9817 v_0 := v.Args[0] 9818 if v_0.Op != OpAMD64ADDQ { 9819 break 9820 } 9821 _ = v_0.Args[1] 9822 ptr := v_0.Args[0] 9823 idx := v_0.Args[1] 9824 mem := v.Args[1] 9825 if !(ptr.Op != OpSB) { 9826 break 9827 } 9828 v.reset(OpAMD64MOVQloadidx1) 9829 v.AuxInt = off 9830 v.Aux = sym 9831 v.AddArg(ptr) 9832 v.AddArg(idx) 9833 v.AddArg(mem) 9834 return true 9835 } 9836 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 9837 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 9838 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9839 for { 9840 off1 := v.AuxInt 9841 sym1 := v.Aux 9842 _ = v.Args[1] 9843 v_0 := v.Args[0] 9844 if v_0.Op != OpAMD64LEAL { 9845 break 9846 } 9847 off2 := v_0.AuxInt 9848 sym2 := v_0.Aux 9849 base := v_0.Args[0] 9850 mem := v.Args[1] 9851 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 9852 break 9853 } 9854 v.reset(OpAMD64MOVQload) 9855 v.AuxInt = off1 + off2 9856 v.Aux = mergeSym(sym1, sym2) 9857 v.AddArg(base) 9858 v.AddArg(mem) 9859 return true 9860 } 9861 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 9862 // cond: is32Bit(off1+off2) 9863 // result: (MOVQload [off1+off2] {sym} ptr mem) 9864 for { 9865 off1 := v.AuxInt 9866 sym := v.Aux 9867 _ = v.Args[1] 9868 v_0 := v.Args[0] 9869 if v_0.Op != OpAMD64ADDLconst { 9870 break 9871 } 9872 off2 := v_0.AuxInt 9873 ptr := v_0.Args[0] 9874 mem := v.Args[1] 9875 if !(is32Bit(off1 + off2)) { 9876 break 9877 } 9878 v.reset(OpAMD64MOVQload) 9879 v.AuxInt = off1 + off2 9880 v.Aux = sym 9881 v.AddArg(ptr) 9882 v.AddArg(mem) 9883 return true 9884 } 9885 // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) 9886 // cond: 9887 // result: (MOVQf2i val) 9888 for { 9889 off := v.AuxInt 9890 sym := v.Aux 9891 _ = v.Args[1] 9892 ptr := v.Args[0] 9893 v_1 := v.Args[1] 9894 if v_1.Op != OpAMD64MOVSDstore { 9895 break 9896 } 9897 if v_1.AuxInt != off { 9898 break 9899 } 9900 if v_1.Aux != sym { 9901 break 9902 } 9903 _ = v_1.Args[2] 9904 if ptr != v_1.Args[0] { 9905 break 9906 } 9907 val := v_1.Args[1] 9908 v.reset(OpAMD64MOVQf2i) 9909 v.AddArg(val) 9910 return true 9911 } 9912 return false 9913 } 9914 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 9915 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9916 // cond: 9917 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 9918 for { 9919 c := v.AuxInt 9920 sym := v.Aux 9921 _ = v.Args[2] 9922 ptr := v.Args[0] 9923 v_1 := v.Args[1] 9924 if v_1.Op != OpAMD64SHLQconst { 9925 break 9926 } 9927 if v_1.AuxInt != 3 { 9928 break 9929 } 9930 idx := v_1.Args[0] 9931 mem := v.Args[2] 9932 v.reset(OpAMD64MOVQloadidx8) 9933 v.AuxInt = c 9934 v.Aux = sym 9935 v.AddArg(ptr) 9936 v.AddArg(idx) 9937 v.AddArg(mem) 9938 return true 9939 } 9940 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 9941 // cond: 9942 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 9943 for { 9944 c := v.AuxInt 9945 sym := v.Aux 9946 _ = v.Args[2] 9947 v_0 := v.Args[0] 9948 if v_0.Op != OpAMD64SHLQconst { 9949 break 9950 } 9951 if v_0.AuxInt != 3 { 9952 break 9953 } 9954 idx := v_0.Args[0] 9955 ptr := v.Args[1] 9956 mem := v.Args[2] 9957 v.reset(OpAMD64MOVQloadidx8) 9958 v.AuxInt = c 9959 v.Aux = sym 9960 v.AddArg(ptr) 9961 v.AddArg(idx) 9962 v.AddArg(mem) 9963 return true 9964 } 9965 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9966 // cond: is32Bit(c+d) 9967 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 9968 for { 9969 c := v.AuxInt 9970 sym := v.Aux 9971 _ = v.Args[2] 9972 v_0 := v.Args[0] 9973 if v_0.Op != OpAMD64ADDQconst { 9974 break 9975 } 9976 d := v_0.AuxInt 9977 ptr := v_0.Args[0] 9978 idx := v.Args[1] 9979 mem := v.Args[2] 9980 if !(is32Bit(c + d)) { 9981 break 9982 } 9983 v.reset(OpAMD64MOVQloadidx1) 9984 v.AuxInt = c + d 9985 v.Aux = sym 9986 v.AddArg(ptr) 9987 v.AddArg(idx) 9988 v.AddArg(mem) 9989 return true 9990 } 9991 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 9992 // cond: is32Bit(c+d) 9993 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 9994 for { 9995 c := v.AuxInt 9996 sym := v.Aux 9997 _ = v.Args[2] 9998 idx := v.Args[0] 9999 v_1 := v.Args[1] 10000 if v_1.Op != OpAMD64ADDQconst { 10001 break 10002 } 10003 d := v_1.AuxInt 10004 ptr := v_1.Args[0] 10005 mem := v.Args[2] 10006 if !(is32Bit(c + d)) { 10007 break 10008 } 10009 v.reset(OpAMD64MOVQloadidx1) 10010 v.AuxInt = c + d 10011 v.Aux = sym 10012 v.AddArg(ptr) 10013 v.AddArg(idx) 10014 v.AddArg(mem) 10015 return true 10016 } 10017 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 10018 // cond: is32Bit(c+d) 10019 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 10020 for { 10021 c := v.AuxInt 10022 sym := v.Aux 10023 _ = v.Args[2] 10024 ptr := v.Args[0] 10025 v_1 := v.Args[1] 10026 if v_1.Op != OpAMD64ADDQconst { 10027 break 10028 } 10029 d := v_1.AuxInt 10030 idx := v_1.Args[0] 10031 mem := v.Args[2] 10032 if !(is32Bit(c + d)) { 10033 break 10034 } 10035 v.reset(OpAMD64MOVQloadidx1) 10036 v.AuxInt = c + d 10037 v.Aux = sym 10038 v.AddArg(ptr) 10039 v.AddArg(idx) 10040 v.AddArg(mem) 10041 return true 10042 } 10043 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 10044 // cond: is32Bit(c+d) 10045 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 10046 for { 10047 c := v.AuxInt 10048 sym := v.Aux 10049 _ = v.Args[2] 10050 v_0 := v.Args[0] 10051 if v_0.Op != OpAMD64ADDQconst { 10052 break 10053 } 10054 d := v_0.AuxInt 10055 idx := v_0.Args[0] 10056 ptr := v.Args[1] 10057 mem := v.Args[2] 10058 if !(is32Bit(c + d)) { 10059 break 10060 } 10061 v.reset(OpAMD64MOVQloadidx1) 10062 v.AuxInt = c + d 10063 v.Aux = sym 10064 v.AddArg(ptr) 10065 v.AddArg(idx) 10066 v.AddArg(mem) 10067 return true 10068 } 10069 return false 10070 } 10071 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 10072 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 10073 // cond: is32Bit(c+d) 10074 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 10075 for { 10076 c := v.AuxInt 10077 sym := v.Aux 10078 _ = v.Args[2] 10079 v_0 := v.Args[0] 10080 if v_0.Op != OpAMD64ADDQconst { 10081 break 10082 } 10083 d := v_0.AuxInt 10084 ptr := v_0.Args[0] 10085 idx := v.Args[1] 10086 mem := v.Args[2] 10087 if !(is32Bit(c + d)) { 10088 break 10089 } 10090 v.reset(OpAMD64MOVQloadidx8) 10091 v.AuxInt = c + d 10092 v.Aux = sym 10093 v.AddArg(ptr) 10094 v.AddArg(idx) 10095 v.AddArg(mem) 10096 return true 10097 } 10098 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 10099 // cond: is32Bit(c+8*d) 10100 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 10101 for { 10102 c := v.AuxInt 10103 sym := v.Aux 10104 _ = v.Args[2] 10105 ptr := v.Args[0] 10106 v_1 := v.Args[1] 10107 if v_1.Op != OpAMD64ADDQconst { 10108 break 10109 } 10110 d := v_1.AuxInt 10111 idx := v_1.Args[0] 10112 mem := v.Args[2] 10113 if !(is32Bit(c + 8*d)) { 10114 break 10115 } 10116 v.reset(OpAMD64MOVQloadidx8) 10117 v.AuxInt = c + 8*d 10118 v.Aux = sym 10119 v.AddArg(ptr) 10120 v.AddArg(idx) 10121 v.AddArg(mem) 10122 return true 10123 } 10124 return false 10125 } 10126 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 10127 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10128 // cond: is32Bit(off1+off2) 10129 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 10130 for { 10131 off1 := v.AuxInt 10132 sym := v.Aux 10133 _ = v.Args[2] 10134 v_0 := v.Args[0] 10135 if v_0.Op != OpAMD64ADDQconst { 10136 break 10137 } 10138 off2 := v_0.AuxInt 10139 ptr := v_0.Args[0] 10140 val := v.Args[1] 10141 mem := v.Args[2] 10142 if !(is32Bit(off1 + off2)) { 10143 break 10144 } 10145 v.reset(OpAMD64MOVQstore) 10146 v.AuxInt = off1 + off2 10147 v.Aux = sym 10148 v.AddArg(ptr) 10149 v.AddArg(val) 10150 v.AddArg(mem) 10151 return true 10152 } 10153 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 10154 // cond: validValAndOff(c,off) 10155 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 10156 for { 10157 off := v.AuxInt 10158 sym := v.Aux 10159 _ = v.Args[2] 10160 ptr := v.Args[0] 10161 v_1 := v.Args[1] 10162 if v_1.Op != OpAMD64MOVQconst { 10163 break 10164 } 10165 c := v_1.AuxInt 10166 mem := v.Args[2] 10167 if !(validValAndOff(c, off)) { 10168 break 10169 } 10170 v.reset(OpAMD64MOVQstoreconst) 10171 v.AuxInt = makeValAndOff(c, off) 10172 v.Aux = sym 10173 v.AddArg(ptr) 10174 v.AddArg(mem) 10175 return true 10176 } 10177 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10178 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10179 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10180 for { 10181 off1 := v.AuxInt 10182 sym1 := v.Aux 10183 _ = v.Args[2] 10184 v_0 := v.Args[0] 10185 if v_0.Op != OpAMD64LEAQ { 10186 break 10187 } 10188 off2 := v_0.AuxInt 10189 sym2 := v_0.Aux 10190 base := v_0.Args[0] 10191 val := v.Args[1] 10192 mem := v.Args[2] 10193 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10194 break 10195 } 10196 v.reset(OpAMD64MOVQstore) 10197 v.AuxInt = off1 + off2 10198 v.Aux = mergeSym(sym1, sym2) 10199 v.AddArg(base) 10200 v.AddArg(val) 10201 v.AddArg(mem) 10202 return true 10203 } 10204 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 10205 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10206 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10207 for { 10208 off1 := v.AuxInt 10209 sym1 := v.Aux 10210 _ = v.Args[2] 10211 v_0 := v.Args[0] 10212 if v_0.Op != OpAMD64LEAQ1 { 10213 break 10214 } 10215 off2 := v_0.AuxInt 10216 sym2 := v_0.Aux 10217 _ = v_0.Args[1] 10218 ptr := v_0.Args[0] 10219 idx := v_0.Args[1] 10220 val := v.Args[1] 10221 mem := v.Args[2] 10222 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10223 break 10224 } 10225 v.reset(OpAMD64MOVQstoreidx1) 10226 v.AuxInt = off1 + off2 10227 v.Aux = mergeSym(sym1, sym2) 10228 v.AddArg(ptr) 10229 v.AddArg(idx) 10230 v.AddArg(val) 10231 v.AddArg(mem) 10232 return true 10233 } 10234 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 10235 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10236 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10237 for { 10238 off1 := v.AuxInt 10239 sym1 := v.Aux 10240 _ = v.Args[2] 10241 v_0 := v.Args[0] 10242 if v_0.Op != OpAMD64LEAQ8 { 10243 break 10244 } 10245 off2 := v_0.AuxInt 10246 sym2 := v_0.Aux 10247 _ = v_0.Args[1] 10248 ptr := v_0.Args[0] 10249 idx := v_0.Args[1] 10250 val := v.Args[1] 10251 mem := v.Args[2] 10252 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10253 break 10254 } 10255 v.reset(OpAMD64MOVQstoreidx8) 10256 v.AuxInt = off1 + off2 10257 v.Aux = mergeSym(sym1, sym2) 10258 v.AddArg(ptr) 10259 v.AddArg(idx) 10260 v.AddArg(val) 10261 v.AddArg(mem) 10262 return true 10263 } 10264 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 10265 // cond: ptr.Op != OpSB 10266 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 10267 for { 10268 off := v.AuxInt 10269 sym := v.Aux 10270 _ = v.Args[2] 10271 v_0 := v.Args[0] 10272 if v_0.Op != OpAMD64ADDQ { 10273 break 10274 } 10275 _ = v_0.Args[1] 10276 ptr := v_0.Args[0] 10277 idx := v_0.Args[1] 10278 val := v.Args[1] 10279 mem := v.Args[2] 10280 if !(ptr.Op != OpSB) { 10281 break 10282 } 10283 v.reset(OpAMD64MOVQstoreidx1) 10284 v.AuxInt = off 10285 v.Aux = sym 10286 v.AddArg(ptr) 10287 v.AddArg(idx) 10288 v.AddArg(val) 10289 v.AddArg(mem) 10290 return true 10291 } 10292 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 10293 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 10294 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10295 for { 10296 off1 := v.AuxInt 10297 sym1 := v.Aux 10298 _ = v.Args[2] 10299 v_0 := v.Args[0] 10300 if v_0.Op != OpAMD64LEAL { 10301 break 10302 } 10303 off2 := v_0.AuxInt 10304 sym2 := v_0.Aux 10305 base := v_0.Args[0] 10306 val := v.Args[1] 10307 mem := v.Args[2] 10308 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 10309 break 10310 } 10311 v.reset(OpAMD64MOVQstore) 10312 v.AuxInt = off1 + off2 10313 v.Aux = mergeSym(sym1, sym2) 10314 v.AddArg(base) 10315 v.AddArg(val) 10316 v.AddArg(mem) 10317 return true 10318 } 10319 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 10320 // cond: is32Bit(off1+off2) 10321 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 10322 for { 10323 off1 := v.AuxInt 10324 sym := v.Aux 10325 _ = v.Args[2] 10326 v_0 := v.Args[0] 10327 if v_0.Op != OpAMD64ADDLconst { 10328 break 10329 } 10330 off2 := v_0.AuxInt 10331 ptr := v_0.Args[0] 10332 val := v.Args[1] 10333 mem := v.Args[2] 10334 if !(is32Bit(off1 + off2)) { 10335 break 10336 } 10337 v.reset(OpAMD64MOVQstore) 10338 v.AuxInt = off1 + off2 10339 v.Aux = sym 10340 v.AddArg(ptr) 10341 v.AddArg(val) 10342 v.AddArg(mem) 10343 return true 10344 } 10345 // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 10346 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) 10347 // result: (ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem) 10348 for { 10349 off := v.AuxInt 10350 sym := v.Aux 10351 _ = v.Args[2] 10352 ptr := v.Args[0] 10353 a := v.Args[1] 10354 if a.Op != OpAMD64ADDQconst { 10355 break 10356 } 10357 c := a.AuxInt 10358 l := a.Args[0] 10359 if l.Op != OpAMD64MOVQload { 10360 break 10361 } 10362 if l.AuxInt != off { 10363 break 10364 } 10365 if l.Aux != sym { 10366 break 10367 } 10368 _ = l.Args[1] 10369 ptr2 := l.Args[0] 10370 mem := l.Args[1] 10371 if mem != v.Args[2] { 10372 break 10373 } 10374 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { 10375 break 10376 } 10377 v.reset(OpAMD64ADDQconstmem) 10378 v.AuxInt = makeValAndOff(c, off) 10379 v.Aux = sym 10380 v.AddArg(ptr) 10381 v.AddArg(mem) 10382 return true 10383 } 10384 // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) 10385 // cond: 10386 // result: (MOVSDstore [off] {sym} ptr val mem) 10387 for { 10388 off := v.AuxInt 10389 sym := v.Aux 10390 _ = v.Args[2] 10391 ptr := v.Args[0] 10392 v_1 := v.Args[1] 10393 if v_1.Op != OpAMD64MOVQf2i { 10394 break 10395 } 10396 val := v_1.Args[0] 10397 mem := v.Args[2] 10398 v.reset(OpAMD64MOVSDstore) 10399 v.AuxInt = off 10400 v.Aux = sym 10401 v.AddArg(ptr) 10402 v.AddArg(val) 10403 v.AddArg(mem) 10404 return true 10405 } 10406 return false 10407 } 10408 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 10409 b := v.Block 10410 _ = b 10411 config := b.Func.Config 10412 _ = config 10413 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 10414 // cond: ValAndOff(sc).canAdd(off) 10415 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10416 for { 10417 sc := v.AuxInt 10418 s := v.Aux 10419 _ = v.Args[1] 10420 v_0 := v.Args[0] 10421 if v_0.Op != OpAMD64ADDQconst { 10422 break 10423 } 10424 off := v_0.AuxInt 10425 ptr := v_0.Args[0] 10426 mem := v.Args[1] 10427 if !(ValAndOff(sc).canAdd(off)) { 10428 break 10429 } 10430 v.reset(OpAMD64MOVQstoreconst) 10431 v.AuxInt = ValAndOff(sc).add(off) 10432 v.Aux = s 10433 v.AddArg(ptr) 10434 v.AddArg(mem) 10435 return true 10436 } 10437 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 10438 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10439 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10440 for { 10441 sc := v.AuxInt 10442 sym1 := v.Aux 10443 _ = v.Args[1] 10444 v_0 := v.Args[0] 10445 if v_0.Op != OpAMD64LEAQ { 10446 break 10447 } 10448 off := v_0.AuxInt 10449 sym2 := v_0.Aux 10450 ptr := v_0.Args[0] 10451 mem := v.Args[1] 10452 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10453 break 10454 } 10455 v.reset(OpAMD64MOVQstoreconst) 10456 v.AuxInt = ValAndOff(sc).add(off) 10457 v.Aux = mergeSym(sym1, sym2) 10458 v.AddArg(ptr) 10459 v.AddArg(mem) 10460 return true 10461 } 10462 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 10463 // cond: canMergeSym(sym1, sym2) 10464 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10465 for { 10466 x := v.AuxInt 10467 sym1 := v.Aux 10468 _ = v.Args[1] 10469 v_0 := v.Args[0] 10470 if v_0.Op != OpAMD64LEAQ1 { 10471 break 10472 } 10473 off := v_0.AuxInt 10474 sym2 := v_0.Aux 10475 _ = v_0.Args[1] 10476 ptr := v_0.Args[0] 10477 idx := v_0.Args[1] 10478 mem := v.Args[1] 10479 if !(canMergeSym(sym1, sym2)) { 10480 break 10481 } 10482 v.reset(OpAMD64MOVQstoreconstidx1) 10483 v.AuxInt = ValAndOff(x).add(off) 10484 v.Aux = mergeSym(sym1, sym2) 10485 v.AddArg(ptr) 10486 v.AddArg(idx) 10487 v.AddArg(mem) 10488 return true 10489 } 10490 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 10491 // cond: canMergeSym(sym1, sym2) 10492 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10493 for { 10494 x := v.AuxInt 10495 sym1 := v.Aux 10496 _ = v.Args[1] 10497 v_0 := v.Args[0] 10498 if v_0.Op != OpAMD64LEAQ8 { 10499 break 10500 } 10501 off := v_0.AuxInt 10502 sym2 := v_0.Aux 10503 _ = v_0.Args[1] 10504 ptr := v_0.Args[0] 10505 idx := v_0.Args[1] 10506 mem := v.Args[1] 10507 if !(canMergeSym(sym1, sym2)) { 10508 break 10509 } 10510 v.reset(OpAMD64MOVQstoreconstidx8) 10511 v.AuxInt = ValAndOff(x).add(off) 10512 v.Aux = mergeSym(sym1, sym2) 10513 v.AddArg(ptr) 10514 v.AddArg(idx) 10515 v.AddArg(mem) 10516 return true 10517 } 10518 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 10519 // cond: 10520 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 10521 for { 10522 x := v.AuxInt 10523 sym := v.Aux 10524 _ = v.Args[1] 10525 v_0 := v.Args[0] 10526 if v_0.Op != OpAMD64ADDQ { 10527 break 10528 } 10529 _ = v_0.Args[1] 10530 ptr := v_0.Args[0] 10531 idx := v_0.Args[1] 10532 mem := v.Args[1] 10533 v.reset(OpAMD64MOVQstoreconstidx1) 10534 v.AuxInt = x 10535 v.Aux = sym 10536 v.AddArg(ptr) 10537 v.AddArg(idx) 10538 v.AddArg(mem) 10539 return true 10540 } 10541 // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) 10542 // cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x) 10543 // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) 10544 for { 10545 c := v.AuxInt 10546 s := v.Aux 10547 _ = v.Args[1] 10548 p := v.Args[0] 10549 x := v.Args[1] 10550 if x.Op != OpAMD64MOVQstoreconst { 10551 break 10552 } 10553 c2 := x.AuxInt 10554 if x.Aux != s { 10555 break 10556 } 10557 _ = x.Args[1] 10558 if p != x.Args[0] { 10559 break 10560 } 10561 mem := x.Args[1] 10562 if !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) { 10563 break 10564 } 10565 v.reset(OpAMD64MOVOstore) 10566 v.AuxInt = ValAndOff(c2).Off() 10567 v.Aux = s 10568 v.AddArg(p) 10569 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 10570 v0.AuxInt = 0 10571 v.AddArg(v0) 10572 v.AddArg(mem) 10573 return true 10574 } 10575 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 10576 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10577 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10578 for { 10579 sc := v.AuxInt 10580 sym1 := v.Aux 10581 _ = v.Args[1] 10582 v_0 := v.Args[0] 10583 if v_0.Op != OpAMD64LEAL { 10584 break 10585 } 10586 off := v_0.AuxInt 10587 sym2 := v_0.Aux 10588 ptr := v_0.Args[0] 10589 mem := v.Args[1] 10590 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10591 break 10592 } 10593 v.reset(OpAMD64MOVQstoreconst) 10594 v.AuxInt = ValAndOff(sc).add(off) 10595 v.Aux = mergeSym(sym1, sym2) 10596 v.AddArg(ptr) 10597 v.AddArg(mem) 10598 return true 10599 } 10600 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 10601 // cond: ValAndOff(sc).canAdd(off) 10602 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10603 for { 10604 sc := v.AuxInt 10605 s := v.Aux 10606 _ = v.Args[1] 10607 v_0 := v.Args[0] 10608 if v_0.Op != OpAMD64ADDLconst { 10609 break 10610 } 10611 off := v_0.AuxInt 10612 ptr := v_0.Args[0] 10613 mem := v.Args[1] 10614 if !(ValAndOff(sc).canAdd(off)) { 10615 break 10616 } 10617 v.reset(OpAMD64MOVQstoreconst) 10618 v.AuxInt = ValAndOff(sc).add(off) 10619 v.Aux = s 10620 v.AddArg(ptr) 10621 v.AddArg(mem) 10622 return true 10623 } 10624 return false 10625 } 10626 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 10627 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 10628 // cond: 10629 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 10630 for { 10631 c := v.AuxInt 10632 sym := v.Aux 10633 _ = v.Args[2] 10634 ptr := v.Args[0] 10635 v_1 := v.Args[1] 10636 if v_1.Op != OpAMD64SHLQconst { 10637 break 10638 } 10639 if v_1.AuxInt != 3 { 10640 break 10641 } 10642 idx := v_1.Args[0] 10643 mem := v.Args[2] 10644 v.reset(OpAMD64MOVQstoreconstidx8) 10645 v.AuxInt = c 10646 v.Aux = sym 10647 v.AddArg(ptr) 10648 v.AddArg(idx) 10649 v.AddArg(mem) 10650 return true 10651 } 10652 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 10653 // cond: ValAndOff(x).canAdd(c) 10654 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10655 for { 10656 x := v.AuxInt 10657 sym := v.Aux 10658 _ = v.Args[2] 10659 v_0 := v.Args[0] 10660 if v_0.Op != OpAMD64ADDQconst { 10661 break 10662 } 10663 c := v_0.AuxInt 10664 ptr := v_0.Args[0] 10665 idx := v.Args[1] 10666 mem := v.Args[2] 10667 if !(ValAndOff(x).canAdd(c)) { 10668 break 10669 } 10670 v.reset(OpAMD64MOVQstoreconstidx1) 10671 v.AuxInt = ValAndOff(x).add(c) 10672 v.Aux = sym 10673 v.AddArg(ptr) 10674 v.AddArg(idx) 10675 v.AddArg(mem) 10676 return true 10677 } 10678 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 10679 // cond: ValAndOff(x).canAdd(c) 10680 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10681 for { 10682 x := v.AuxInt 10683 sym := v.Aux 10684 _ = v.Args[2] 10685 ptr := v.Args[0] 10686 v_1 := v.Args[1] 10687 if v_1.Op != OpAMD64ADDQconst { 10688 break 10689 } 10690 c := v_1.AuxInt 10691 idx := v_1.Args[0] 10692 mem := v.Args[2] 10693 if !(ValAndOff(x).canAdd(c)) { 10694 break 10695 } 10696 v.reset(OpAMD64MOVQstoreconstidx1) 10697 v.AuxInt = ValAndOff(x).add(c) 10698 v.Aux = sym 10699 v.AddArg(ptr) 10700 v.AddArg(idx) 10701 v.AddArg(mem) 10702 return true 10703 } 10704 return false 10705 } 10706 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 10707 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 10708 // cond: ValAndOff(x).canAdd(c) 10709 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10710 for { 10711 x := v.AuxInt 10712 sym := v.Aux 10713 _ = v.Args[2] 10714 v_0 := v.Args[0] 10715 if v_0.Op != OpAMD64ADDQconst { 10716 break 10717 } 10718 c := v_0.AuxInt 10719 ptr := v_0.Args[0] 10720 idx := v.Args[1] 10721 mem := v.Args[2] 10722 if !(ValAndOff(x).canAdd(c)) { 10723 break 10724 } 10725 v.reset(OpAMD64MOVQstoreconstidx8) 10726 v.AuxInt = ValAndOff(x).add(c) 10727 v.Aux = sym 10728 v.AddArg(ptr) 10729 v.AddArg(idx) 10730 v.AddArg(mem) 10731 return true 10732 } 10733 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 10734 // cond: ValAndOff(x).canAdd(8*c) 10735 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 10736 for { 10737 x := v.AuxInt 10738 sym := v.Aux 10739 _ = v.Args[2] 10740 ptr := v.Args[0] 10741 v_1 := v.Args[1] 10742 if v_1.Op != OpAMD64ADDQconst { 10743 break 10744 } 10745 c := v_1.AuxInt 10746 idx := v_1.Args[0] 10747 mem := v.Args[2] 10748 if !(ValAndOff(x).canAdd(8 * c)) { 10749 break 10750 } 10751 v.reset(OpAMD64MOVQstoreconstidx8) 10752 v.AuxInt = ValAndOff(x).add(8 * c) 10753 v.Aux = sym 10754 v.AddArg(ptr) 10755 v.AddArg(idx) 10756 v.AddArg(mem) 10757 return true 10758 } 10759 return false 10760 } 10761 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 10762 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 10763 // cond: 10764 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 10765 for { 10766 c := v.AuxInt 10767 sym := v.Aux 10768 _ = v.Args[3] 10769 ptr := v.Args[0] 10770 v_1 := v.Args[1] 10771 if v_1.Op != OpAMD64SHLQconst { 10772 break 10773 } 10774 if v_1.AuxInt != 3 { 10775 break 10776 } 10777 idx := v_1.Args[0] 10778 val := v.Args[2] 10779 mem := v.Args[3] 10780 v.reset(OpAMD64MOVQstoreidx8) 10781 v.AuxInt = c 10782 v.Aux = sym 10783 v.AddArg(ptr) 10784 v.AddArg(idx) 10785 v.AddArg(val) 10786 v.AddArg(mem) 10787 return true 10788 } 10789 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10790 // cond: is32Bit(c+d) 10791 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 10792 for { 10793 c := v.AuxInt 10794 sym := v.Aux 10795 _ = v.Args[3] 10796 v_0 := v.Args[0] 10797 if v_0.Op != OpAMD64ADDQconst { 10798 break 10799 } 10800 d := v_0.AuxInt 10801 ptr := v_0.Args[0] 10802 idx := v.Args[1] 10803 val := v.Args[2] 10804 mem := v.Args[3] 10805 if !(is32Bit(c + d)) { 10806 break 10807 } 10808 v.reset(OpAMD64MOVQstoreidx1) 10809 v.AuxInt = c + d 10810 v.Aux = sym 10811 v.AddArg(ptr) 10812 v.AddArg(idx) 10813 v.AddArg(val) 10814 v.AddArg(mem) 10815 return true 10816 } 10817 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10818 // cond: is32Bit(c+d) 10819 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 10820 for { 10821 c := v.AuxInt 10822 sym := v.Aux 10823 _ = v.Args[3] 10824 ptr := v.Args[0] 10825 v_1 := v.Args[1] 10826 if v_1.Op != OpAMD64ADDQconst { 10827 break 10828 } 10829 d := v_1.AuxInt 10830 idx := v_1.Args[0] 10831 val := v.Args[2] 10832 mem := v.Args[3] 10833 if !(is32Bit(c + d)) { 10834 break 10835 } 10836 v.reset(OpAMD64MOVQstoreidx1) 10837 v.AuxInt = c + d 10838 v.Aux = sym 10839 v.AddArg(ptr) 10840 v.AddArg(idx) 10841 v.AddArg(val) 10842 v.AddArg(mem) 10843 return true 10844 } 10845 return false 10846 } 10847 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 10848 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10849 // cond: is32Bit(c+d) 10850 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 10851 for { 10852 c := v.AuxInt 10853 sym := v.Aux 10854 _ = v.Args[3] 10855 v_0 := v.Args[0] 10856 if v_0.Op != OpAMD64ADDQconst { 10857 break 10858 } 10859 d := v_0.AuxInt 10860 ptr := v_0.Args[0] 10861 idx := v.Args[1] 10862 val := v.Args[2] 10863 mem := v.Args[3] 10864 if !(is32Bit(c + d)) { 10865 break 10866 } 10867 v.reset(OpAMD64MOVQstoreidx8) 10868 v.AuxInt = c + d 10869 v.Aux = sym 10870 v.AddArg(ptr) 10871 v.AddArg(idx) 10872 v.AddArg(val) 10873 v.AddArg(mem) 10874 return true 10875 } 10876 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10877 // cond: is32Bit(c+8*d) 10878 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 10879 for { 10880 c := v.AuxInt 10881 sym := v.Aux 10882 _ = v.Args[3] 10883 ptr := v.Args[0] 10884 v_1 := v.Args[1] 10885 if v_1.Op != OpAMD64ADDQconst { 10886 break 10887 } 10888 d := v_1.AuxInt 10889 idx := v_1.Args[0] 10890 val := v.Args[2] 10891 mem := v.Args[3] 10892 if !(is32Bit(c + 8*d)) { 10893 break 10894 } 10895 v.reset(OpAMD64MOVQstoreidx8) 10896 v.AuxInt = c + 8*d 10897 v.Aux = sym 10898 v.AddArg(ptr) 10899 v.AddArg(idx) 10900 v.AddArg(val) 10901 v.AddArg(mem) 10902 return true 10903 } 10904 return false 10905 } 10906 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 10907 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 10908 // cond: is32Bit(off1+off2) 10909 // result: (MOVSDload [off1+off2] {sym} ptr mem) 10910 for { 10911 off1 := v.AuxInt 10912 sym := v.Aux 10913 _ = v.Args[1] 10914 v_0 := v.Args[0] 10915 if v_0.Op != OpAMD64ADDQconst { 10916 break 10917 } 10918 off2 := v_0.AuxInt 10919 ptr := v_0.Args[0] 10920 mem := v.Args[1] 10921 if !(is32Bit(off1 + off2)) { 10922 break 10923 } 10924 v.reset(OpAMD64MOVSDload) 10925 v.AuxInt = off1 + off2 10926 v.Aux = sym 10927 v.AddArg(ptr) 10928 v.AddArg(mem) 10929 return true 10930 } 10931 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10932 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10933 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10934 for { 10935 off1 := v.AuxInt 10936 sym1 := v.Aux 10937 _ = v.Args[1] 10938 v_0 := v.Args[0] 10939 if v_0.Op != OpAMD64LEAQ { 10940 break 10941 } 10942 off2 := v_0.AuxInt 10943 sym2 := v_0.Aux 10944 base := v_0.Args[0] 10945 mem := v.Args[1] 10946 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10947 break 10948 } 10949 v.reset(OpAMD64MOVSDload) 10950 v.AuxInt = off1 + off2 10951 v.Aux = mergeSym(sym1, sym2) 10952 v.AddArg(base) 10953 v.AddArg(mem) 10954 return true 10955 } 10956 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 10957 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10958 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10959 for { 10960 off1 := v.AuxInt 10961 sym1 := v.Aux 10962 _ = v.Args[1] 10963 v_0 := v.Args[0] 10964 if v_0.Op != OpAMD64LEAQ1 { 10965 break 10966 } 10967 off2 := v_0.AuxInt 10968 sym2 := v_0.Aux 10969 _ = v_0.Args[1] 10970 ptr := v_0.Args[0] 10971 idx := v_0.Args[1] 10972 mem := v.Args[1] 10973 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10974 break 10975 } 10976 v.reset(OpAMD64MOVSDloadidx1) 10977 v.AuxInt = off1 + off2 10978 v.Aux = mergeSym(sym1, sym2) 10979 v.AddArg(ptr) 10980 v.AddArg(idx) 10981 v.AddArg(mem) 10982 return true 10983 } 10984 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 10985 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10986 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10987 for { 10988 off1 := v.AuxInt 10989 sym1 := v.Aux 10990 _ = v.Args[1] 10991 v_0 := v.Args[0] 10992 if v_0.Op != OpAMD64LEAQ8 { 10993 break 10994 } 10995 off2 := v_0.AuxInt 10996 sym2 := v_0.Aux 10997 _ = v_0.Args[1] 10998 ptr := v_0.Args[0] 10999 idx := v_0.Args[1] 11000 mem := v.Args[1] 11001 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11002 break 11003 } 11004 v.reset(OpAMD64MOVSDloadidx8) 11005 v.AuxInt = off1 + off2 11006 v.Aux = mergeSym(sym1, sym2) 11007 v.AddArg(ptr) 11008 v.AddArg(idx) 11009 v.AddArg(mem) 11010 return true 11011 } 11012 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 11013 // cond: ptr.Op != OpSB 11014 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 11015 for { 11016 off := v.AuxInt 11017 sym := v.Aux 11018 _ = v.Args[1] 11019 v_0 := v.Args[0] 11020 if v_0.Op != OpAMD64ADDQ { 11021 break 11022 } 11023 _ = v_0.Args[1] 11024 ptr := v_0.Args[0] 11025 idx := v_0.Args[1] 11026 mem := v.Args[1] 11027 if !(ptr.Op != OpSB) { 11028 break 11029 } 11030 v.reset(OpAMD64MOVSDloadidx1) 11031 v.AuxInt = off 11032 v.Aux = sym 11033 v.AddArg(ptr) 11034 v.AddArg(idx) 11035 v.AddArg(mem) 11036 return true 11037 } 11038 // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) 11039 // cond: 11040 // result: (MOVQi2f val) 11041 for { 11042 off := v.AuxInt 11043 sym := v.Aux 11044 _ = v.Args[1] 11045 ptr := v.Args[0] 11046 v_1 := v.Args[1] 11047 if v_1.Op != OpAMD64MOVQstore { 11048 break 11049 } 11050 if v_1.AuxInt != off { 11051 break 11052 } 11053 if v_1.Aux != sym { 11054 break 11055 } 11056 _ = v_1.Args[2] 11057 if ptr != v_1.Args[0] { 11058 break 11059 } 11060 val := v_1.Args[1] 11061 v.reset(OpAMD64MOVQi2f) 11062 v.AddArg(val) 11063 return true 11064 } 11065 return false 11066 } 11067 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 11068 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 11069 // cond: 11070 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 11071 for { 11072 c := v.AuxInt 11073 sym := v.Aux 11074 _ = v.Args[2] 11075 ptr := v.Args[0] 11076 v_1 := v.Args[1] 11077 if v_1.Op != OpAMD64SHLQconst { 11078 break 11079 } 11080 if v_1.AuxInt != 3 { 11081 break 11082 } 11083 idx := v_1.Args[0] 11084 mem := v.Args[2] 11085 v.reset(OpAMD64MOVSDloadidx8) 11086 v.AuxInt = c 11087 v.Aux = sym 11088 v.AddArg(ptr) 11089 v.AddArg(idx) 11090 v.AddArg(mem) 11091 return true 11092 } 11093 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11094 // cond: is32Bit(c+d) 11095 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 11096 for { 11097 c := v.AuxInt 11098 sym := v.Aux 11099 _ = v.Args[2] 11100 v_0 := v.Args[0] 11101 if v_0.Op != OpAMD64ADDQconst { 11102 break 11103 } 11104 d := v_0.AuxInt 11105 ptr := v_0.Args[0] 11106 idx := v.Args[1] 11107 mem := v.Args[2] 11108 if !(is32Bit(c + d)) { 11109 break 11110 } 11111 v.reset(OpAMD64MOVSDloadidx1) 11112 v.AuxInt = c + d 11113 v.Aux = sym 11114 v.AddArg(ptr) 11115 v.AddArg(idx) 11116 v.AddArg(mem) 11117 return true 11118 } 11119 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11120 // cond: is32Bit(c+d) 11121 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 11122 for { 11123 c := v.AuxInt 11124 sym := v.Aux 11125 _ = v.Args[2] 11126 ptr := v.Args[0] 11127 v_1 := v.Args[1] 11128 if v_1.Op != OpAMD64ADDQconst { 11129 break 11130 } 11131 d := v_1.AuxInt 11132 idx := v_1.Args[0] 11133 mem := v.Args[2] 11134 if !(is32Bit(c + d)) { 11135 break 11136 } 11137 v.reset(OpAMD64MOVSDloadidx1) 11138 v.AuxInt = c + d 11139 v.Aux = sym 11140 v.AddArg(ptr) 11141 v.AddArg(idx) 11142 v.AddArg(mem) 11143 return true 11144 } 11145 return false 11146 } 11147 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 11148 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 11149 // cond: is32Bit(c+d) 11150 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 11151 for { 11152 c := v.AuxInt 11153 sym := v.Aux 11154 _ = v.Args[2] 11155 v_0 := v.Args[0] 11156 if v_0.Op != OpAMD64ADDQconst { 11157 break 11158 } 11159 d := v_0.AuxInt 11160 ptr := v_0.Args[0] 11161 idx := v.Args[1] 11162 mem := v.Args[2] 11163 if !(is32Bit(c + d)) { 11164 break 11165 } 11166 v.reset(OpAMD64MOVSDloadidx8) 11167 v.AuxInt = c + d 11168 v.Aux = sym 11169 v.AddArg(ptr) 11170 v.AddArg(idx) 11171 v.AddArg(mem) 11172 return true 11173 } 11174 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 11175 // cond: is32Bit(c+8*d) 11176 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 11177 for { 11178 c := v.AuxInt 11179 sym := v.Aux 11180 _ = v.Args[2] 11181 ptr := v.Args[0] 11182 v_1 := v.Args[1] 11183 if v_1.Op != OpAMD64ADDQconst { 11184 break 11185 } 11186 d := v_1.AuxInt 11187 idx := v_1.Args[0] 11188 mem := v.Args[2] 11189 if !(is32Bit(c + 8*d)) { 11190 break 11191 } 11192 v.reset(OpAMD64MOVSDloadidx8) 11193 v.AuxInt = c + 8*d 11194 v.Aux = sym 11195 v.AddArg(ptr) 11196 v.AddArg(idx) 11197 v.AddArg(mem) 11198 return true 11199 } 11200 return false 11201 } 11202 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 11203 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11204 // cond: is32Bit(off1+off2) 11205 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 11206 for { 11207 off1 := v.AuxInt 11208 sym := v.Aux 11209 _ = v.Args[2] 11210 v_0 := v.Args[0] 11211 if v_0.Op != OpAMD64ADDQconst { 11212 break 11213 } 11214 off2 := v_0.AuxInt 11215 ptr := v_0.Args[0] 11216 val := v.Args[1] 11217 mem := v.Args[2] 11218 if !(is32Bit(off1 + off2)) { 11219 break 11220 } 11221 v.reset(OpAMD64MOVSDstore) 11222 v.AuxInt = off1 + off2 11223 v.Aux = sym 11224 v.AddArg(ptr) 11225 v.AddArg(val) 11226 v.AddArg(mem) 11227 return true 11228 } 11229 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11230 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11231 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11232 for { 11233 off1 := v.AuxInt 11234 sym1 := v.Aux 11235 _ = v.Args[2] 11236 v_0 := v.Args[0] 11237 if v_0.Op != OpAMD64LEAQ { 11238 break 11239 } 11240 off2 := v_0.AuxInt 11241 sym2 := v_0.Aux 11242 base := v_0.Args[0] 11243 val := v.Args[1] 11244 mem := v.Args[2] 11245 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11246 break 11247 } 11248 v.reset(OpAMD64MOVSDstore) 11249 v.AuxInt = off1 + off2 11250 v.Aux = mergeSym(sym1, sym2) 11251 v.AddArg(base) 11252 v.AddArg(val) 11253 v.AddArg(mem) 11254 return true 11255 } 11256 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11257 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11258 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11259 for { 11260 off1 := v.AuxInt 11261 sym1 := v.Aux 11262 _ = v.Args[2] 11263 v_0 := v.Args[0] 11264 if v_0.Op != OpAMD64LEAQ1 { 11265 break 11266 } 11267 off2 := v_0.AuxInt 11268 sym2 := v_0.Aux 11269 _ = v_0.Args[1] 11270 ptr := v_0.Args[0] 11271 idx := v_0.Args[1] 11272 val := v.Args[1] 11273 mem := v.Args[2] 11274 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11275 break 11276 } 11277 v.reset(OpAMD64MOVSDstoreidx1) 11278 v.AuxInt = off1 + off2 11279 v.Aux = mergeSym(sym1, sym2) 11280 v.AddArg(ptr) 11281 v.AddArg(idx) 11282 v.AddArg(val) 11283 v.AddArg(mem) 11284 return true 11285 } 11286 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 11287 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11288 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11289 for { 11290 off1 := v.AuxInt 11291 sym1 := v.Aux 11292 _ = v.Args[2] 11293 v_0 := v.Args[0] 11294 if v_0.Op != OpAMD64LEAQ8 { 11295 break 11296 } 11297 off2 := v_0.AuxInt 11298 sym2 := v_0.Aux 11299 _ = v_0.Args[1] 11300 ptr := v_0.Args[0] 11301 idx := v_0.Args[1] 11302 val := v.Args[1] 11303 mem := v.Args[2] 11304 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11305 break 11306 } 11307 v.reset(OpAMD64MOVSDstoreidx8) 11308 v.AuxInt = off1 + off2 11309 v.Aux = mergeSym(sym1, sym2) 11310 v.AddArg(ptr) 11311 v.AddArg(idx) 11312 v.AddArg(val) 11313 v.AddArg(mem) 11314 return true 11315 } 11316 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 11317 // cond: ptr.Op != OpSB 11318 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 11319 for { 11320 off := v.AuxInt 11321 sym := v.Aux 11322 _ = v.Args[2] 11323 v_0 := v.Args[0] 11324 if v_0.Op != OpAMD64ADDQ { 11325 break 11326 } 11327 _ = v_0.Args[1] 11328 ptr := v_0.Args[0] 11329 idx := v_0.Args[1] 11330 val := v.Args[1] 11331 mem := v.Args[2] 11332 if !(ptr.Op != OpSB) { 11333 break 11334 } 11335 v.reset(OpAMD64MOVSDstoreidx1) 11336 v.AuxInt = off 11337 v.Aux = sym 11338 v.AddArg(ptr) 11339 v.AddArg(idx) 11340 v.AddArg(val) 11341 v.AddArg(mem) 11342 return true 11343 } 11344 // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) 11345 // cond: 11346 // result: (MOVQstore [off] {sym} ptr val mem) 11347 for { 11348 off := v.AuxInt 11349 sym := v.Aux 11350 _ = v.Args[2] 11351 ptr := v.Args[0] 11352 v_1 := v.Args[1] 11353 if v_1.Op != OpAMD64MOVQi2f { 11354 break 11355 } 11356 val := v_1.Args[0] 11357 mem := v.Args[2] 11358 v.reset(OpAMD64MOVQstore) 11359 v.AuxInt = off 11360 v.Aux = sym 11361 v.AddArg(ptr) 11362 v.AddArg(val) 11363 v.AddArg(mem) 11364 return true 11365 } 11366 return false 11367 } 11368 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 11369 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 11370 // cond: 11371 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 11372 for { 11373 c := v.AuxInt 11374 sym := v.Aux 11375 _ = v.Args[3] 11376 ptr := v.Args[0] 11377 v_1 := v.Args[1] 11378 if v_1.Op != OpAMD64SHLQconst { 11379 break 11380 } 11381 if v_1.AuxInt != 3 { 11382 break 11383 } 11384 idx := v_1.Args[0] 11385 val := v.Args[2] 11386 mem := v.Args[3] 11387 v.reset(OpAMD64MOVSDstoreidx8) 11388 v.AuxInt = c 11389 v.Aux = sym 11390 v.AddArg(ptr) 11391 v.AddArg(idx) 11392 v.AddArg(val) 11393 v.AddArg(mem) 11394 return true 11395 } 11396 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11397 // cond: is32Bit(c+d) 11398 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 11399 for { 11400 c := v.AuxInt 11401 sym := v.Aux 11402 _ = v.Args[3] 11403 v_0 := v.Args[0] 11404 if v_0.Op != OpAMD64ADDQconst { 11405 break 11406 } 11407 d := v_0.AuxInt 11408 ptr := v_0.Args[0] 11409 idx := v.Args[1] 11410 val := v.Args[2] 11411 mem := v.Args[3] 11412 if !(is32Bit(c + d)) { 11413 break 11414 } 11415 v.reset(OpAMD64MOVSDstoreidx1) 11416 v.AuxInt = c + d 11417 v.Aux = sym 11418 v.AddArg(ptr) 11419 v.AddArg(idx) 11420 v.AddArg(val) 11421 v.AddArg(mem) 11422 return true 11423 } 11424 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11425 // cond: is32Bit(c+d) 11426 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 11427 for { 11428 c := v.AuxInt 11429 sym := v.Aux 11430 _ = v.Args[3] 11431 ptr := v.Args[0] 11432 v_1 := v.Args[1] 11433 if v_1.Op != OpAMD64ADDQconst { 11434 break 11435 } 11436 d := v_1.AuxInt 11437 idx := v_1.Args[0] 11438 val := v.Args[2] 11439 mem := v.Args[3] 11440 if !(is32Bit(c + d)) { 11441 break 11442 } 11443 v.reset(OpAMD64MOVSDstoreidx1) 11444 v.AuxInt = c + d 11445 v.Aux = sym 11446 v.AddArg(ptr) 11447 v.AddArg(idx) 11448 v.AddArg(val) 11449 v.AddArg(mem) 11450 return true 11451 } 11452 return false 11453 } 11454 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 11455 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11456 // cond: is32Bit(c+d) 11457 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 11458 for { 11459 c := v.AuxInt 11460 sym := v.Aux 11461 _ = v.Args[3] 11462 v_0 := v.Args[0] 11463 if v_0.Op != OpAMD64ADDQconst { 11464 break 11465 } 11466 d := v_0.AuxInt 11467 ptr := v_0.Args[0] 11468 idx := v.Args[1] 11469 val := v.Args[2] 11470 mem := v.Args[3] 11471 if !(is32Bit(c + d)) { 11472 break 11473 } 11474 v.reset(OpAMD64MOVSDstoreidx8) 11475 v.AuxInt = c + d 11476 v.Aux = sym 11477 v.AddArg(ptr) 11478 v.AddArg(idx) 11479 v.AddArg(val) 11480 v.AddArg(mem) 11481 return true 11482 } 11483 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11484 // cond: is32Bit(c+8*d) 11485 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 11486 for { 11487 c := v.AuxInt 11488 sym := v.Aux 11489 _ = v.Args[3] 11490 ptr := v.Args[0] 11491 v_1 := v.Args[1] 11492 if v_1.Op != OpAMD64ADDQconst { 11493 break 11494 } 11495 d := v_1.AuxInt 11496 idx := v_1.Args[0] 11497 val := v.Args[2] 11498 mem := v.Args[3] 11499 if !(is32Bit(c + 8*d)) { 11500 break 11501 } 11502 v.reset(OpAMD64MOVSDstoreidx8) 11503 v.AuxInt = c + 8*d 11504 v.Aux = sym 11505 v.AddArg(ptr) 11506 v.AddArg(idx) 11507 v.AddArg(val) 11508 v.AddArg(mem) 11509 return true 11510 } 11511 return false 11512 } 11513 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 11514 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 11515 // cond: is32Bit(off1+off2) 11516 // result: (MOVSSload [off1+off2] {sym} ptr mem) 11517 for { 11518 off1 := v.AuxInt 11519 sym := v.Aux 11520 _ = v.Args[1] 11521 v_0 := v.Args[0] 11522 if v_0.Op != OpAMD64ADDQconst { 11523 break 11524 } 11525 off2 := v_0.AuxInt 11526 ptr := v_0.Args[0] 11527 mem := v.Args[1] 11528 if !(is32Bit(off1 + off2)) { 11529 break 11530 } 11531 v.reset(OpAMD64MOVSSload) 11532 v.AuxInt = off1 + off2 11533 v.Aux = sym 11534 v.AddArg(ptr) 11535 v.AddArg(mem) 11536 return true 11537 } 11538 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11539 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11540 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11541 for { 11542 off1 := v.AuxInt 11543 sym1 := v.Aux 11544 _ = v.Args[1] 11545 v_0 := v.Args[0] 11546 if v_0.Op != OpAMD64LEAQ { 11547 break 11548 } 11549 off2 := v_0.AuxInt 11550 sym2 := v_0.Aux 11551 base := v_0.Args[0] 11552 mem := v.Args[1] 11553 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11554 break 11555 } 11556 v.reset(OpAMD64MOVSSload) 11557 v.AuxInt = off1 + off2 11558 v.Aux = mergeSym(sym1, sym2) 11559 v.AddArg(base) 11560 v.AddArg(mem) 11561 return true 11562 } 11563 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 11564 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11565 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11566 for { 11567 off1 := v.AuxInt 11568 sym1 := v.Aux 11569 _ = v.Args[1] 11570 v_0 := v.Args[0] 11571 if v_0.Op != OpAMD64LEAQ1 { 11572 break 11573 } 11574 off2 := v_0.AuxInt 11575 sym2 := v_0.Aux 11576 _ = v_0.Args[1] 11577 ptr := v_0.Args[0] 11578 idx := v_0.Args[1] 11579 mem := v.Args[1] 11580 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11581 break 11582 } 11583 v.reset(OpAMD64MOVSSloadidx1) 11584 v.AuxInt = off1 + off2 11585 v.Aux = mergeSym(sym1, sym2) 11586 v.AddArg(ptr) 11587 v.AddArg(idx) 11588 v.AddArg(mem) 11589 return true 11590 } 11591 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 11592 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11593 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11594 for { 11595 off1 := v.AuxInt 11596 sym1 := v.Aux 11597 _ = v.Args[1] 11598 v_0 := v.Args[0] 11599 if v_0.Op != OpAMD64LEAQ4 { 11600 break 11601 } 11602 off2 := v_0.AuxInt 11603 sym2 := v_0.Aux 11604 _ = v_0.Args[1] 11605 ptr := v_0.Args[0] 11606 idx := v_0.Args[1] 11607 mem := v.Args[1] 11608 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11609 break 11610 } 11611 v.reset(OpAMD64MOVSSloadidx4) 11612 v.AuxInt = off1 + off2 11613 v.Aux = mergeSym(sym1, sym2) 11614 v.AddArg(ptr) 11615 v.AddArg(idx) 11616 v.AddArg(mem) 11617 return true 11618 } 11619 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 11620 // cond: ptr.Op != OpSB 11621 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 11622 for { 11623 off := v.AuxInt 11624 sym := v.Aux 11625 _ = v.Args[1] 11626 v_0 := v.Args[0] 11627 if v_0.Op != OpAMD64ADDQ { 11628 break 11629 } 11630 _ = v_0.Args[1] 11631 ptr := v_0.Args[0] 11632 idx := v_0.Args[1] 11633 mem := v.Args[1] 11634 if !(ptr.Op != OpSB) { 11635 break 11636 } 11637 v.reset(OpAMD64MOVSSloadidx1) 11638 v.AuxInt = off 11639 v.Aux = sym 11640 v.AddArg(ptr) 11641 v.AddArg(idx) 11642 v.AddArg(mem) 11643 return true 11644 } 11645 // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) 11646 // cond: 11647 // result: (MOVLi2f val) 11648 for { 11649 off := v.AuxInt 11650 sym := v.Aux 11651 _ = v.Args[1] 11652 ptr := v.Args[0] 11653 v_1 := v.Args[1] 11654 if v_1.Op != OpAMD64MOVLstore { 11655 break 11656 } 11657 if v_1.AuxInt != off { 11658 break 11659 } 11660 if v_1.Aux != sym { 11661 break 11662 } 11663 _ = v_1.Args[2] 11664 if ptr != v_1.Args[0] { 11665 break 11666 } 11667 val := v_1.Args[1] 11668 v.reset(OpAMD64MOVLi2f) 11669 v.AddArg(val) 11670 return true 11671 } 11672 return false 11673 } 11674 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 11675 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 11676 // cond: 11677 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 11678 for { 11679 c := v.AuxInt 11680 sym := v.Aux 11681 _ = v.Args[2] 11682 ptr := v.Args[0] 11683 v_1 := v.Args[1] 11684 if v_1.Op != OpAMD64SHLQconst { 11685 break 11686 } 11687 if v_1.AuxInt != 2 { 11688 break 11689 } 11690 idx := v_1.Args[0] 11691 mem := v.Args[2] 11692 v.reset(OpAMD64MOVSSloadidx4) 11693 v.AuxInt = c 11694 v.Aux = sym 11695 v.AddArg(ptr) 11696 v.AddArg(idx) 11697 v.AddArg(mem) 11698 return true 11699 } 11700 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11701 // cond: is32Bit(c+d) 11702 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 11703 for { 11704 c := v.AuxInt 11705 sym := v.Aux 11706 _ = v.Args[2] 11707 v_0 := v.Args[0] 11708 if v_0.Op != OpAMD64ADDQconst { 11709 break 11710 } 11711 d := v_0.AuxInt 11712 ptr := v_0.Args[0] 11713 idx := v.Args[1] 11714 mem := v.Args[2] 11715 if !(is32Bit(c + d)) { 11716 break 11717 } 11718 v.reset(OpAMD64MOVSSloadidx1) 11719 v.AuxInt = c + d 11720 v.Aux = sym 11721 v.AddArg(ptr) 11722 v.AddArg(idx) 11723 v.AddArg(mem) 11724 return true 11725 } 11726 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11727 // cond: is32Bit(c+d) 11728 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 11729 for { 11730 c := v.AuxInt 11731 sym := v.Aux 11732 _ = v.Args[2] 11733 ptr := v.Args[0] 11734 v_1 := v.Args[1] 11735 if v_1.Op != OpAMD64ADDQconst { 11736 break 11737 } 11738 d := v_1.AuxInt 11739 idx := v_1.Args[0] 11740 mem := v.Args[2] 11741 if !(is32Bit(c + d)) { 11742 break 11743 } 11744 v.reset(OpAMD64MOVSSloadidx1) 11745 v.AuxInt = c + d 11746 v.Aux = sym 11747 v.AddArg(ptr) 11748 v.AddArg(idx) 11749 v.AddArg(mem) 11750 return true 11751 } 11752 return false 11753 } 11754 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 11755 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 11756 // cond: is32Bit(c+d) 11757 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 11758 for { 11759 c := v.AuxInt 11760 sym := v.Aux 11761 _ = v.Args[2] 11762 v_0 := v.Args[0] 11763 if v_0.Op != OpAMD64ADDQconst { 11764 break 11765 } 11766 d := v_0.AuxInt 11767 ptr := v_0.Args[0] 11768 idx := v.Args[1] 11769 mem := v.Args[2] 11770 if !(is32Bit(c + d)) { 11771 break 11772 } 11773 v.reset(OpAMD64MOVSSloadidx4) 11774 v.AuxInt = c + d 11775 v.Aux = sym 11776 v.AddArg(ptr) 11777 v.AddArg(idx) 11778 v.AddArg(mem) 11779 return true 11780 } 11781 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 11782 // cond: is32Bit(c+4*d) 11783 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 11784 for { 11785 c := v.AuxInt 11786 sym := v.Aux 11787 _ = v.Args[2] 11788 ptr := v.Args[0] 11789 v_1 := v.Args[1] 11790 if v_1.Op != OpAMD64ADDQconst { 11791 break 11792 } 11793 d := v_1.AuxInt 11794 idx := v_1.Args[0] 11795 mem := v.Args[2] 11796 if !(is32Bit(c + 4*d)) { 11797 break 11798 } 11799 v.reset(OpAMD64MOVSSloadidx4) 11800 v.AuxInt = c + 4*d 11801 v.Aux = sym 11802 v.AddArg(ptr) 11803 v.AddArg(idx) 11804 v.AddArg(mem) 11805 return true 11806 } 11807 return false 11808 } 11809 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 11810 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11811 // cond: is32Bit(off1+off2) 11812 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 11813 for { 11814 off1 := v.AuxInt 11815 sym := v.Aux 11816 _ = v.Args[2] 11817 v_0 := v.Args[0] 11818 if v_0.Op != OpAMD64ADDQconst { 11819 break 11820 } 11821 off2 := v_0.AuxInt 11822 ptr := v_0.Args[0] 11823 val := v.Args[1] 11824 mem := v.Args[2] 11825 if !(is32Bit(off1 + off2)) { 11826 break 11827 } 11828 v.reset(OpAMD64MOVSSstore) 11829 v.AuxInt = off1 + off2 11830 v.Aux = sym 11831 v.AddArg(ptr) 11832 v.AddArg(val) 11833 v.AddArg(mem) 11834 return true 11835 } 11836 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11837 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11838 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11839 for { 11840 off1 := v.AuxInt 11841 sym1 := v.Aux 11842 _ = v.Args[2] 11843 v_0 := v.Args[0] 11844 if v_0.Op != OpAMD64LEAQ { 11845 break 11846 } 11847 off2 := v_0.AuxInt 11848 sym2 := v_0.Aux 11849 base := v_0.Args[0] 11850 val := v.Args[1] 11851 mem := v.Args[2] 11852 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11853 break 11854 } 11855 v.reset(OpAMD64MOVSSstore) 11856 v.AuxInt = off1 + off2 11857 v.Aux = mergeSym(sym1, sym2) 11858 v.AddArg(base) 11859 v.AddArg(val) 11860 v.AddArg(mem) 11861 return true 11862 } 11863 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11864 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11865 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11866 for { 11867 off1 := v.AuxInt 11868 sym1 := v.Aux 11869 _ = v.Args[2] 11870 v_0 := v.Args[0] 11871 if v_0.Op != OpAMD64LEAQ1 { 11872 break 11873 } 11874 off2 := v_0.AuxInt 11875 sym2 := v_0.Aux 11876 _ = v_0.Args[1] 11877 ptr := v_0.Args[0] 11878 idx := v_0.Args[1] 11879 val := v.Args[1] 11880 mem := v.Args[2] 11881 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11882 break 11883 } 11884 v.reset(OpAMD64MOVSSstoreidx1) 11885 v.AuxInt = off1 + off2 11886 v.Aux = mergeSym(sym1, sym2) 11887 v.AddArg(ptr) 11888 v.AddArg(idx) 11889 v.AddArg(val) 11890 v.AddArg(mem) 11891 return true 11892 } 11893 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 11894 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11895 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11896 for { 11897 off1 := v.AuxInt 11898 sym1 := v.Aux 11899 _ = v.Args[2] 11900 v_0 := v.Args[0] 11901 if v_0.Op != OpAMD64LEAQ4 { 11902 break 11903 } 11904 off2 := v_0.AuxInt 11905 sym2 := v_0.Aux 11906 _ = v_0.Args[1] 11907 ptr := v_0.Args[0] 11908 idx := v_0.Args[1] 11909 val := v.Args[1] 11910 mem := v.Args[2] 11911 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11912 break 11913 } 11914 v.reset(OpAMD64MOVSSstoreidx4) 11915 v.AuxInt = off1 + off2 11916 v.Aux = mergeSym(sym1, sym2) 11917 v.AddArg(ptr) 11918 v.AddArg(idx) 11919 v.AddArg(val) 11920 v.AddArg(mem) 11921 return true 11922 } 11923 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 11924 // cond: ptr.Op != OpSB 11925 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 11926 for { 11927 off := v.AuxInt 11928 sym := v.Aux 11929 _ = v.Args[2] 11930 v_0 := v.Args[0] 11931 if v_0.Op != OpAMD64ADDQ { 11932 break 11933 } 11934 _ = v_0.Args[1] 11935 ptr := v_0.Args[0] 11936 idx := v_0.Args[1] 11937 val := v.Args[1] 11938 mem := v.Args[2] 11939 if !(ptr.Op != OpSB) { 11940 break 11941 } 11942 v.reset(OpAMD64MOVSSstoreidx1) 11943 v.AuxInt = off 11944 v.Aux = sym 11945 v.AddArg(ptr) 11946 v.AddArg(idx) 11947 v.AddArg(val) 11948 v.AddArg(mem) 11949 return true 11950 } 11951 // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) 11952 // cond: 11953 // result: (MOVLstore [off] {sym} ptr val mem) 11954 for { 11955 off := v.AuxInt 11956 sym := v.Aux 11957 _ = v.Args[2] 11958 ptr := v.Args[0] 11959 v_1 := v.Args[1] 11960 if v_1.Op != OpAMD64MOVLi2f { 11961 break 11962 } 11963 val := v_1.Args[0] 11964 mem := v.Args[2] 11965 v.reset(OpAMD64MOVLstore) 11966 v.AuxInt = off 11967 v.Aux = sym 11968 v.AddArg(ptr) 11969 v.AddArg(val) 11970 v.AddArg(mem) 11971 return true 11972 } 11973 return false 11974 } 11975 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 11976 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 11977 // cond: 11978 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 11979 for { 11980 c := v.AuxInt 11981 sym := v.Aux 11982 _ = v.Args[3] 11983 ptr := v.Args[0] 11984 v_1 := v.Args[1] 11985 if v_1.Op != OpAMD64SHLQconst { 11986 break 11987 } 11988 if v_1.AuxInt != 2 { 11989 break 11990 } 11991 idx := v_1.Args[0] 11992 val := v.Args[2] 11993 mem := v.Args[3] 11994 v.reset(OpAMD64MOVSSstoreidx4) 11995 v.AuxInt = c 11996 v.Aux = sym 11997 v.AddArg(ptr) 11998 v.AddArg(idx) 11999 v.AddArg(val) 12000 v.AddArg(mem) 12001 return true 12002 } 12003 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12004 // cond: is32Bit(c+d) 12005 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 12006 for { 12007 c := v.AuxInt 12008 sym := v.Aux 12009 _ = v.Args[3] 12010 v_0 := v.Args[0] 12011 if v_0.Op != OpAMD64ADDQconst { 12012 break 12013 } 12014 d := v_0.AuxInt 12015 ptr := v_0.Args[0] 12016 idx := v.Args[1] 12017 val := v.Args[2] 12018 mem := v.Args[3] 12019 if !(is32Bit(c + d)) { 12020 break 12021 } 12022 v.reset(OpAMD64MOVSSstoreidx1) 12023 v.AuxInt = c + d 12024 v.Aux = sym 12025 v.AddArg(ptr) 12026 v.AddArg(idx) 12027 v.AddArg(val) 12028 v.AddArg(mem) 12029 return true 12030 } 12031 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12032 // cond: is32Bit(c+d) 12033 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 12034 for { 12035 c := v.AuxInt 12036 sym := v.Aux 12037 _ = v.Args[3] 12038 ptr := v.Args[0] 12039 v_1 := v.Args[1] 12040 if v_1.Op != OpAMD64ADDQconst { 12041 break 12042 } 12043 d := v_1.AuxInt 12044 idx := v_1.Args[0] 12045 val := v.Args[2] 12046 mem := v.Args[3] 12047 if !(is32Bit(c + d)) { 12048 break 12049 } 12050 v.reset(OpAMD64MOVSSstoreidx1) 12051 v.AuxInt = c + d 12052 v.Aux = sym 12053 v.AddArg(ptr) 12054 v.AddArg(idx) 12055 v.AddArg(val) 12056 v.AddArg(mem) 12057 return true 12058 } 12059 return false 12060 } 12061 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 12062 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12063 // cond: is32Bit(c+d) 12064 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 12065 for { 12066 c := v.AuxInt 12067 sym := v.Aux 12068 _ = v.Args[3] 12069 v_0 := v.Args[0] 12070 if v_0.Op != OpAMD64ADDQconst { 12071 break 12072 } 12073 d := v_0.AuxInt 12074 ptr := v_0.Args[0] 12075 idx := v.Args[1] 12076 val := v.Args[2] 12077 mem := v.Args[3] 12078 if !(is32Bit(c + d)) { 12079 break 12080 } 12081 v.reset(OpAMD64MOVSSstoreidx4) 12082 v.AuxInt = c + d 12083 v.Aux = sym 12084 v.AddArg(ptr) 12085 v.AddArg(idx) 12086 v.AddArg(val) 12087 v.AddArg(mem) 12088 return true 12089 } 12090 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12091 // cond: is32Bit(c+4*d) 12092 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 12093 for { 12094 c := v.AuxInt 12095 sym := v.Aux 12096 _ = v.Args[3] 12097 ptr := v.Args[0] 12098 v_1 := v.Args[1] 12099 if v_1.Op != OpAMD64ADDQconst { 12100 break 12101 } 12102 d := v_1.AuxInt 12103 idx := v_1.Args[0] 12104 val := v.Args[2] 12105 mem := v.Args[3] 12106 if !(is32Bit(c + 4*d)) { 12107 break 12108 } 12109 v.reset(OpAMD64MOVSSstoreidx4) 12110 v.AuxInt = c + 4*d 12111 v.Aux = sym 12112 v.AddArg(ptr) 12113 v.AddArg(idx) 12114 v.AddArg(val) 12115 v.AddArg(mem) 12116 return true 12117 } 12118 return false 12119 } 12120 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 12121 b := v.Block 12122 _ = b 12123 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 12124 // cond: x.Uses == 1 && clobber(x) 12125 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12126 for { 12127 x := v.Args[0] 12128 if x.Op != OpAMD64MOVWload { 12129 break 12130 } 12131 off := x.AuxInt 12132 sym := x.Aux 12133 _ = x.Args[1] 12134 ptr := x.Args[0] 12135 mem := x.Args[1] 12136 if !(x.Uses == 1 && clobber(x)) { 12137 break 12138 } 12139 b = x.Block 12140 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12141 v.reset(OpCopy) 12142 v.AddArg(v0) 12143 v0.AuxInt = off 12144 v0.Aux = sym 12145 v0.AddArg(ptr) 12146 v0.AddArg(mem) 12147 return true 12148 } 12149 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 12150 // cond: x.Uses == 1 && clobber(x) 12151 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12152 for { 12153 x := v.Args[0] 12154 if x.Op != OpAMD64MOVLload { 12155 break 12156 } 12157 off := x.AuxInt 12158 sym := x.Aux 12159 _ = x.Args[1] 12160 ptr := x.Args[0] 12161 mem := x.Args[1] 12162 if !(x.Uses == 1 && clobber(x)) { 12163 break 12164 } 12165 b = x.Block 12166 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12167 v.reset(OpCopy) 12168 v.AddArg(v0) 12169 v0.AuxInt = off 12170 v0.Aux = sym 12171 v0.AddArg(ptr) 12172 v0.AddArg(mem) 12173 return true 12174 } 12175 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 12176 // cond: x.Uses == 1 && clobber(x) 12177 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12178 for { 12179 x := v.Args[0] 12180 if x.Op != OpAMD64MOVQload { 12181 break 12182 } 12183 off := x.AuxInt 12184 sym := x.Aux 12185 _ = x.Args[1] 12186 ptr := x.Args[0] 12187 mem := x.Args[1] 12188 if !(x.Uses == 1 && clobber(x)) { 12189 break 12190 } 12191 b = x.Block 12192 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12193 v.reset(OpCopy) 12194 v.AddArg(v0) 12195 v0.AuxInt = off 12196 v0.Aux = sym 12197 v0.AddArg(ptr) 12198 v0.AddArg(mem) 12199 return true 12200 } 12201 // match: (MOVWQSX (ANDLconst [c] x)) 12202 // cond: c & 0x8000 == 0 12203 // result: (ANDLconst [c & 0x7fff] x) 12204 for { 12205 v_0 := v.Args[0] 12206 if v_0.Op != OpAMD64ANDLconst { 12207 break 12208 } 12209 c := v_0.AuxInt 12210 x := v_0.Args[0] 12211 if !(c&0x8000 == 0) { 12212 break 12213 } 12214 v.reset(OpAMD64ANDLconst) 12215 v.AuxInt = c & 0x7fff 12216 v.AddArg(x) 12217 return true 12218 } 12219 // match: (MOVWQSX (MOVWQSX x)) 12220 // cond: 12221 // result: (MOVWQSX x) 12222 for { 12223 v_0 := v.Args[0] 12224 if v_0.Op != OpAMD64MOVWQSX { 12225 break 12226 } 12227 x := v_0.Args[0] 12228 v.reset(OpAMD64MOVWQSX) 12229 v.AddArg(x) 12230 return true 12231 } 12232 // match: (MOVWQSX (MOVBQSX x)) 12233 // cond: 12234 // result: (MOVBQSX x) 12235 for { 12236 v_0 := v.Args[0] 12237 if v_0.Op != OpAMD64MOVBQSX { 12238 break 12239 } 12240 x := v_0.Args[0] 12241 v.reset(OpAMD64MOVBQSX) 12242 v.AddArg(x) 12243 return true 12244 } 12245 return false 12246 } 12247 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 12248 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12249 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12250 // result: (MOVWQSX x) 12251 for { 12252 off := v.AuxInt 12253 sym := v.Aux 12254 _ = v.Args[1] 12255 ptr := v.Args[0] 12256 v_1 := v.Args[1] 12257 if v_1.Op != OpAMD64MOVWstore { 12258 break 12259 } 12260 off2 := v_1.AuxInt 12261 sym2 := v_1.Aux 12262 _ = v_1.Args[2] 12263 ptr2 := v_1.Args[0] 12264 x := v_1.Args[1] 12265 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12266 break 12267 } 12268 v.reset(OpAMD64MOVWQSX) 12269 v.AddArg(x) 12270 return true 12271 } 12272 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12273 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12274 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12275 for { 12276 off1 := v.AuxInt 12277 sym1 := v.Aux 12278 _ = v.Args[1] 12279 v_0 := v.Args[0] 12280 if v_0.Op != OpAMD64LEAQ { 12281 break 12282 } 12283 off2 := v_0.AuxInt 12284 sym2 := v_0.Aux 12285 base := v_0.Args[0] 12286 mem := v.Args[1] 12287 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12288 break 12289 } 12290 v.reset(OpAMD64MOVWQSXload) 12291 v.AuxInt = off1 + off2 12292 v.Aux = mergeSym(sym1, sym2) 12293 v.AddArg(base) 12294 v.AddArg(mem) 12295 return true 12296 } 12297 return false 12298 } 12299 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 12300 b := v.Block 12301 _ = b 12302 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 12303 // cond: x.Uses == 1 && clobber(x) 12304 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12305 for { 12306 x := v.Args[0] 12307 if x.Op != OpAMD64MOVWload { 12308 break 12309 } 12310 off := x.AuxInt 12311 sym := x.Aux 12312 _ = x.Args[1] 12313 ptr := x.Args[0] 12314 mem := x.Args[1] 12315 if !(x.Uses == 1 && clobber(x)) { 12316 break 12317 } 12318 b = x.Block 12319 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12320 v.reset(OpCopy) 12321 v.AddArg(v0) 12322 v0.AuxInt = off 12323 v0.Aux = sym 12324 v0.AddArg(ptr) 12325 v0.AddArg(mem) 12326 return true 12327 } 12328 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 12329 // cond: x.Uses == 1 && clobber(x) 12330 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12331 for { 12332 x := v.Args[0] 12333 if x.Op != OpAMD64MOVLload { 12334 break 12335 } 12336 off := x.AuxInt 12337 sym := x.Aux 12338 _ = x.Args[1] 12339 ptr := x.Args[0] 12340 mem := x.Args[1] 12341 if !(x.Uses == 1 && clobber(x)) { 12342 break 12343 } 12344 b = x.Block 12345 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12346 v.reset(OpCopy) 12347 v.AddArg(v0) 12348 v0.AuxInt = off 12349 v0.Aux = sym 12350 v0.AddArg(ptr) 12351 v0.AddArg(mem) 12352 return true 12353 } 12354 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 12355 // cond: x.Uses == 1 && clobber(x) 12356 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12357 for { 12358 x := v.Args[0] 12359 if x.Op != OpAMD64MOVQload { 12360 break 12361 } 12362 off := x.AuxInt 12363 sym := x.Aux 12364 _ = x.Args[1] 12365 ptr := x.Args[0] 12366 mem := x.Args[1] 12367 if !(x.Uses == 1 && clobber(x)) { 12368 break 12369 } 12370 b = x.Block 12371 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12372 v.reset(OpCopy) 12373 v.AddArg(v0) 12374 v0.AuxInt = off 12375 v0.Aux = sym 12376 v0.AddArg(ptr) 12377 v0.AddArg(mem) 12378 return true 12379 } 12380 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 12381 // cond: x.Uses == 1 && clobber(x) 12382 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 12383 for { 12384 x := v.Args[0] 12385 if x.Op != OpAMD64MOVWloadidx1 { 12386 break 12387 } 12388 off := x.AuxInt 12389 sym := x.Aux 12390 _ = x.Args[2] 12391 ptr := x.Args[0] 12392 idx := x.Args[1] 12393 mem := x.Args[2] 12394 if !(x.Uses == 1 && clobber(x)) { 12395 break 12396 } 12397 b = x.Block 12398 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 12399 v.reset(OpCopy) 12400 v.AddArg(v0) 12401 v0.AuxInt = off 12402 v0.Aux = sym 12403 v0.AddArg(ptr) 12404 v0.AddArg(idx) 12405 v0.AddArg(mem) 12406 return true 12407 } 12408 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 12409 // cond: x.Uses == 1 && clobber(x) 12410 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 12411 for { 12412 x := v.Args[0] 12413 if x.Op != OpAMD64MOVWloadidx2 { 12414 break 12415 } 12416 off := x.AuxInt 12417 sym := x.Aux 12418 _ = x.Args[2] 12419 ptr := x.Args[0] 12420 idx := x.Args[1] 12421 mem := x.Args[2] 12422 if !(x.Uses == 1 && clobber(x)) { 12423 break 12424 } 12425 b = x.Block 12426 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 12427 v.reset(OpCopy) 12428 v.AddArg(v0) 12429 v0.AuxInt = off 12430 v0.Aux = sym 12431 v0.AddArg(ptr) 12432 v0.AddArg(idx) 12433 v0.AddArg(mem) 12434 return true 12435 } 12436 // match: (MOVWQZX (ANDLconst [c] x)) 12437 // cond: 12438 // result: (ANDLconst [c & 0xffff] x) 12439 for { 12440 v_0 := v.Args[0] 12441 if v_0.Op != OpAMD64ANDLconst { 12442 break 12443 } 12444 c := v_0.AuxInt 12445 x := v_0.Args[0] 12446 v.reset(OpAMD64ANDLconst) 12447 v.AuxInt = c & 0xffff 12448 v.AddArg(x) 12449 return true 12450 } 12451 // match: (MOVWQZX (MOVWQZX x)) 12452 // cond: 12453 // result: (MOVWQZX x) 12454 for { 12455 v_0 := v.Args[0] 12456 if v_0.Op != OpAMD64MOVWQZX { 12457 break 12458 } 12459 x := v_0.Args[0] 12460 v.reset(OpAMD64MOVWQZX) 12461 v.AddArg(x) 12462 return true 12463 } 12464 // match: (MOVWQZX (MOVBQZX x)) 12465 // cond: 12466 // result: (MOVBQZX x) 12467 for { 12468 v_0 := v.Args[0] 12469 if v_0.Op != OpAMD64MOVBQZX { 12470 break 12471 } 12472 x := v_0.Args[0] 12473 v.reset(OpAMD64MOVBQZX) 12474 v.AddArg(x) 12475 return true 12476 } 12477 return false 12478 } 12479 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 12480 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12481 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12482 // result: (MOVWQZX x) 12483 for { 12484 off := v.AuxInt 12485 sym := v.Aux 12486 _ = v.Args[1] 12487 ptr := v.Args[0] 12488 v_1 := v.Args[1] 12489 if v_1.Op != OpAMD64MOVWstore { 12490 break 12491 } 12492 off2 := v_1.AuxInt 12493 sym2 := v_1.Aux 12494 _ = v_1.Args[2] 12495 ptr2 := v_1.Args[0] 12496 x := v_1.Args[1] 12497 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12498 break 12499 } 12500 v.reset(OpAMD64MOVWQZX) 12501 v.AddArg(x) 12502 return true 12503 } 12504 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 12505 // cond: is32Bit(off1+off2) 12506 // result: (MOVWload [off1+off2] {sym} ptr mem) 12507 for { 12508 off1 := v.AuxInt 12509 sym := v.Aux 12510 _ = v.Args[1] 12511 v_0 := v.Args[0] 12512 if v_0.Op != OpAMD64ADDQconst { 12513 break 12514 } 12515 off2 := v_0.AuxInt 12516 ptr := v_0.Args[0] 12517 mem := v.Args[1] 12518 if !(is32Bit(off1 + off2)) { 12519 break 12520 } 12521 v.reset(OpAMD64MOVWload) 12522 v.AuxInt = off1 + off2 12523 v.Aux = sym 12524 v.AddArg(ptr) 12525 v.AddArg(mem) 12526 return true 12527 } 12528 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12529 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12530 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12531 for { 12532 off1 := v.AuxInt 12533 sym1 := v.Aux 12534 _ = v.Args[1] 12535 v_0 := v.Args[0] 12536 if v_0.Op != OpAMD64LEAQ { 12537 break 12538 } 12539 off2 := v_0.AuxInt 12540 sym2 := v_0.Aux 12541 base := v_0.Args[0] 12542 mem := v.Args[1] 12543 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12544 break 12545 } 12546 v.reset(OpAMD64MOVWload) 12547 v.AuxInt = off1 + off2 12548 v.Aux = mergeSym(sym1, sym2) 12549 v.AddArg(base) 12550 v.AddArg(mem) 12551 return true 12552 } 12553 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 12554 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12555 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12556 for { 12557 off1 := v.AuxInt 12558 sym1 := v.Aux 12559 _ = v.Args[1] 12560 v_0 := v.Args[0] 12561 if v_0.Op != OpAMD64LEAQ1 { 12562 break 12563 } 12564 off2 := v_0.AuxInt 12565 sym2 := v_0.Aux 12566 _ = v_0.Args[1] 12567 ptr := v_0.Args[0] 12568 idx := v_0.Args[1] 12569 mem := v.Args[1] 12570 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12571 break 12572 } 12573 v.reset(OpAMD64MOVWloadidx1) 12574 v.AuxInt = off1 + off2 12575 v.Aux = mergeSym(sym1, sym2) 12576 v.AddArg(ptr) 12577 v.AddArg(idx) 12578 v.AddArg(mem) 12579 return true 12580 } 12581 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 12582 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12583 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12584 for { 12585 off1 := v.AuxInt 12586 sym1 := v.Aux 12587 _ = v.Args[1] 12588 v_0 := v.Args[0] 12589 if v_0.Op != OpAMD64LEAQ2 { 12590 break 12591 } 12592 off2 := v_0.AuxInt 12593 sym2 := v_0.Aux 12594 _ = v_0.Args[1] 12595 ptr := v_0.Args[0] 12596 idx := v_0.Args[1] 12597 mem := v.Args[1] 12598 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12599 break 12600 } 12601 v.reset(OpAMD64MOVWloadidx2) 12602 v.AuxInt = off1 + off2 12603 v.Aux = mergeSym(sym1, sym2) 12604 v.AddArg(ptr) 12605 v.AddArg(idx) 12606 v.AddArg(mem) 12607 return true 12608 } 12609 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 12610 // cond: ptr.Op != OpSB 12611 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 12612 for { 12613 off := v.AuxInt 12614 sym := v.Aux 12615 _ = v.Args[1] 12616 v_0 := v.Args[0] 12617 if v_0.Op != OpAMD64ADDQ { 12618 break 12619 } 12620 _ = v_0.Args[1] 12621 ptr := v_0.Args[0] 12622 idx := v_0.Args[1] 12623 mem := v.Args[1] 12624 if !(ptr.Op != OpSB) { 12625 break 12626 } 12627 v.reset(OpAMD64MOVWloadidx1) 12628 v.AuxInt = off 12629 v.Aux = sym 12630 v.AddArg(ptr) 12631 v.AddArg(idx) 12632 v.AddArg(mem) 12633 return true 12634 } 12635 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 12636 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 12637 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12638 for { 12639 off1 := v.AuxInt 12640 sym1 := v.Aux 12641 _ = v.Args[1] 12642 v_0 := v.Args[0] 12643 if v_0.Op != OpAMD64LEAL { 12644 break 12645 } 12646 off2 := v_0.AuxInt 12647 sym2 := v_0.Aux 12648 base := v_0.Args[0] 12649 mem := v.Args[1] 12650 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 12651 break 12652 } 12653 v.reset(OpAMD64MOVWload) 12654 v.AuxInt = off1 + off2 12655 v.Aux = mergeSym(sym1, sym2) 12656 v.AddArg(base) 12657 v.AddArg(mem) 12658 return true 12659 } 12660 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 12661 // cond: is32Bit(off1+off2) 12662 // result: (MOVWload [off1+off2] {sym} ptr mem) 12663 for { 12664 off1 := v.AuxInt 12665 sym := v.Aux 12666 _ = v.Args[1] 12667 v_0 := v.Args[0] 12668 if v_0.Op != OpAMD64ADDLconst { 12669 break 12670 } 12671 off2 := v_0.AuxInt 12672 ptr := v_0.Args[0] 12673 mem := v.Args[1] 12674 if !(is32Bit(off1 + off2)) { 12675 break 12676 } 12677 v.reset(OpAMD64MOVWload) 12678 v.AuxInt = off1 + off2 12679 v.Aux = sym 12680 v.AddArg(ptr) 12681 v.AddArg(mem) 12682 return true 12683 } 12684 return false 12685 } 12686 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 12687 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 12688 // cond: 12689 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 12690 for { 12691 c := v.AuxInt 12692 sym := v.Aux 12693 _ = v.Args[2] 12694 ptr := v.Args[0] 12695 v_1 := v.Args[1] 12696 if v_1.Op != OpAMD64SHLQconst { 12697 break 12698 } 12699 if v_1.AuxInt != 1 { 12700 break 12701 } 12702 idx := v_1.Args[0] 12703 mem := v.Args[2] 12704 v.reset(OpAMD64MOVWloadidx2) 12705 v.AuxInt = c 12706 v.Aux = sym 12707 v.AddArg(ptr) 12708 v.AddArg(idx) 12709 v.AddArg(mem) 12710 return true 12711 } 12712 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 12713 // cond: 12714 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 12715 for { 12716 c := v.AuxInt 12717 sym := v.Aux 12718 _ = v.Args[2] 12719 v_0 := v.Args[0] 12720 if v_0.Op != OpAMD64SHLQconst { 12721 break 12722 } 12723 if v_0.AuxInt != 1 { 12724 break 12725 } 12726 idx := v_0.Args[0] 12727 ptr := v.Args[1] 12728 mem := v.Args[2] 12729 v.reset(OpAMD64MOVWloadidx2) 12730 v.AuxInt = c 12731 v.Aux = sym 12732 v.AddArg(ptr) 12733 v.AddArg(idx) 12734 v.AddArg(mem) 12735 return true 12736 } 12737 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 12738 // cond: is32Bit(c+d) 12739 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12740 for { 12741 c := v.AuxInt 12742 sym := v.Aux 12743 _ = v.Args[2] 12744 v_0 := v.Args[0] 12745 if v_0.Op != OpAMD64ADDQconst { 12746 break 12747 } 12748 d := v_0.AuxInt 12749 ptr := v_0.Args[0] 12750 idx := v.Args[1] 12751 mem := v.Args[2] 12752 if !(is32Bit(c + d)) { 12753 break 12754 } 12755 v.reset(OpAMD64MOVWloadidx1) 12756 v.AuxInt = c + d 12757 v.Aux = sym 12758 v.AddArg(ptr) 12759 v.AddArg(idx) 12760 v.AddArg(mem) 12761 return true 12762 } 12763 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 12764 // cond: is32Bit(c+d) 12765 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12766 for { 12767 c := v.AuxInt 12768 sym := v.Aux 12769 _ = v.Args[2] 12770 idx := v.Args[0] 12771 v_1 := v.Args[1] 12772 if v_1.Op != OpAMD64ADDQconst { 12773 break 12774 } 12775 d := v_1.AuxInt 12776 ptr := v_1.Args[0] 12777 mem := v.Args[2] 12778 if !(is32Bit(c + d)) { 12779 break 12780 } 12781 v.reset(OpAMD64MOVWloadidx1) 12782 v.AuxInt = c + d 12783 v.Aux = sym 12784 v.AddArg(ptr) 12785 v.AddArg(idx) 12786 v.AddArg(mem) 12787 return true 12788 } 12789 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 12790 // cond: is32Bit(c+d) 12791 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12792 for { 12793 c := v.AuxInt 12794 sym := v.Aux 12795 _ = v.Args[2] 12796 ptr := v.Args[0] 12797 v_1 := v.Args[1] 12798 if v_1.Op != OpAMD64ADDQconst { 12799 break 12800 } 12801 d := v_1.AuxInt 12802 idx := v_1.Args[0] 12803 mem := v.Args[2] 12804 if !(is32Bit(c + d)) { 12805 break 12806 } 12807 v.reset(OpAMD64MOVWloadidx1) 12808 v.AuxInt = c + d 12809 v.Aux = sym 12810 v.AddArg(ptr) 12811 v.AddArg(idx) 12812 v.AddArg(mem) 12813 return true 12814 } 12815 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 12816 // cond: is32Bit(c+d) 12817 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12818 for { 12819 c := v.AuxInt 12820 sym := v.Aux 12821 _ = v.Args[2] 12822 v_0 := v.Args[0] 12823 if v_0.Op != OpAMD64ADDQconst { 12824 break 12825 } 12826 d := v_0.AuxInt 12827 idx := v_0.Args[0] 12828 ptr := v.Args[1] 12829 mem := v.Args[2] 12830 if !(is32Bit(c + d)) { 12831 break 12832 } 12833 v.reset(OpAMD64MOVWloadidx1) 12834 v.AuxInt = c + d 12835 v.Aux = sym 12836 v.AddArg(ptr) 12837 v.AddArg(idx) 12838 v.AddArg(mem) 12839 return true 12840 } 12841 return false 12842 } 12843 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 12844 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 12845 // cond: is32Bit(c+d) 12846 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 12847 for { 12848 c := v.AuxInt 12849 sym := v.Aux 12850 _ = v.Args[2] 12851 v_0 := v.Args[0] 12852 if v_0.Op != OpAMD64ADDQconst { 12853 break 12854 } 12855 d := v_0.AuxInt 12856 ptr := v_0.Args[0] 12857 idx := v.Args[1] 12858 mem := v.Args[2] 12859 if !(is32Bit(c + d)) { 12860 break 12861 } 12862 v.reset(OpAMD64MOVWloadidx2) 12863 v.AuxInt = c + d 12864 v.Aux = sym 12865 v.AddArg(ptr) 12866 v.AddArg(idx) 12867 v.AddArg(mem) 12868 return true 12869 } 12870 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 12871 // cond: is32Bit(c+2*d) 12872 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 12873 for { 12874 c := v.AuxInt 12875 sym := v.Aux 12876 _ = v.Args[2] 12877 ptr := v.Args[0] 12878 v_1 := v.Args[1] 12879 if v_1.Op != OpAMD64ADDQconst { 12880 break 12881 } 12882 d := v_1.AuxInt 12883 idx := v_1.Args[0] 12884 mem := v.Args[2] 12885 if !(is32Bit(c + 2*d)) { 12886 break 12887 } 12888 v.reset(OpAMD64MOVWloadidx2) 12889 v.AuxInt = c + 2*d 12890 v.Aux = sym 12891 v.AddArg(ptr) 12892 v.AddArg(idx) 12893 v.AddArg(mem) 12894 return true 12895 } 12896 return false 12897 } 12898 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 12899 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 12900 // cond: 12901 // result: (MOVWstore [off] {sym} ptr x mem) 12902 for { 12903 off := v.AuxInt 12904 sym := v.Aux 12905 _ = v.Args[2] 12906 ptr := v.Args[0] 12907 v_1 := v.Args[1] 12908 if v_1.Op != OpAMD64MOVWQSX { 12909 break 12910 } 12911 x := v_1.Args[0] 12912 mem := v.Args[2] 12913 v.reset(OpAMD64MOVWstore) 12914 v.AuxInt = off 12915 v.Aux = sym 12916 v.AddArg(ptr) 12917 v.AddArg(x) 12918 v.AddArg(mem) 12919 return true 12920 } 12921 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 12922 // cond: 12923 // result: (MOVWstore [off] {sym} ptr x mem) 12924 for { 12925 off := v.AuxInt 12926 sym := v.Aux 12927 _ = v.Args[2] 12928 ptr := v.Args[0] 12929 v_1 := v.Args[1] 12930 if v_1.Op != OpAMD64MOVWQZX { 12931 break 12932 } 12933 x := v_1.Args[0] 12934 mem := v.Args[2] 12935 v.reset(OpAMD64MOVWstore) 12936 v.AuxInt = off 12937 v.Aux = sym 12938 v.AddArg(ptr) 12939 v.AddArg(x) 12940 v.AddArg(mem) 12941 return true 12942 } 12943 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 12944 // cond: is32Bit(off1+off2) 12945 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 12946 for { 12947 off1 := v.AuxInt 12948 sym := v.Aux 12949 _ = v.Args[2] 12950 v_0 := v.Args[0] 12951 if v_0.Op != OpAMD64ADDQconst { 12952 break 12953 } 12954 off2 := v_0.AuxInt 12955 ptr := v_0.Args[0] 12956 val := v.Args[1] 12957 mem := v.Args[2] 12958 if !(is32Bit(off1 + off2)) { 12959 break 12960 } 12961 v.reset(OpAMD64MOVWstore) 12962 v.AuxInt = off1 + off2 12963 v.Aux = sym 12964 v.AddArg(ptr) 12965 v.AddArg(val) 12966 v.AddArg(mem) 12967 return true 12968 } 12969 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 12970 // cond: validOff(off) 12971 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 12972 for { 12973 off := v.AuxInt 12974 sym := v.Aux 12975 _ = v.Args[2] 12976 ptr := v.Args[0] 12977 v_1 := v.Args[1] 12978 if v_1.Op != OpAMD64MOVLconst { 12979 break 12980 } 12981 c := v_1.AuxInt 12982 mem := v.Args[2] 12983 if !(validOff(off)) { 12984 break 12985 } 12986 v.reset(OpAMD64MOVWstoreconst) 12987 v.AuxInt = makeValAndOff(int64(int16(c)), off) 12988 v.Aux = sym 12989 v.AddArg(ptr) 12990 v.AddArg(mem) 12991 return true 12992 } 12993 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 12994 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12995 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 12996 for { 12997 off1 := v.AuxInt 12998 sym1 := v.Aux 12999 _ = v.Args[2] 13000 v_0 := v.Args[0] 13001 if v_0.Op != OpAMD64LEAQ { 13002 break 13003 } 13004 off2 := v_0.AuxInt 13005 sym2 := v_0.Aux 13006 base := v_0.Args[0] 13007 val := v.Args[1] 13008 mem := v.Args[2] 13009 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13010 break 13011 } 13012 v.reset(OpAMD64MOVWstore) 13013 v.AuxInt = off1 + off2 13014 v.Aux = mergeSym(sym1, sym2) 13015 v.AddArg(base) 13016 v.AddArg(val) 13017 v.AddArg(mem) 13018 return true 13019 } 13020 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 13021 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13022 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 13023 for { 13024 off1 := v.AuxInt 13025 sym1 := v.Aux 13026 _ = v.Args[2] 13027 v_0 := v.Args[0] 13028 if v_0.Op != OpAMD64LEAQ1 { 13029 break 13030 } 13031 off2 := v_0.AuxInt 13032 sym2 := v_0.Aux 13033 _ = v_0.Args[1] 13034 ptr := v_0.Args[0] 13035 idx := v_0.Args[1] 13036 val := v.Args[1] 13037 mem := v.Args[2] 13038 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13039 break 13040 } 13041 v.reset(OpAMD64MOVWstoreidx1) 13042 v.AuxInt = off1 + off2 13043 v.Aux = mergeSym(sym1, sym2) 13044 v.AddArg(ptr) 13045 v.AddArg(idx) 13046 v.AddArg(val) 13047 v.AddArg(mem) 13048 return true 13049 } 13050 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 13051 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13052 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 13053 for { 13054 off1 := v.AuxInt 13055 sym1 := v.Aux 13056 _ = v.Args[2] 13057 v_0 := v.Args[0] 13058 if v_0.Op != OpAMD64LEAQ2 { 13059 break 13060 } 13061 off2 := v_0.AuxInt 13062 sym2 := v_0.Aux 13063 _ = v_0.Args[1] 13064 ptr := v_0.Args[0] 13065 idx := v_0.Args[1] 13066 val := v.Args[1] 13067 mem := v.Args[2] 13068 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13069 break 13070 } 13071 v.reset(OpAMD64MOVWstoreidx2) 13072 v.AuxInt = off1 + off2 13073 v.Aux = mergeSym(sym1, sym2) 13074 v.AddArg(ptr) 13075 v.AddArg(idx) 13076 v.AddArg(val) 13077 v.AddArg(mem) 13078 return true 13079 } 13080 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 13081 // cond: ptr.Op != OpSB 13082 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 13083 for { 13084 off := v.AuxInt 13085 sym := v.Aux 13086 _ = v.Args[2] 13087 v_0 := v.Args[0] 13088 if v_0.Op != OpAMD64ADDQ { 13089 break 13090 } 13091 _ = v_0.Args[1] 13092 ptr := v_0.Args[0] 13093 idx := v_0.Args[1] 13094 val := v.Args[1] 13095 mem := v.Args[2] 13096 if !(ptr.Op != OpSB) { 13097 break 13098 } 13099 v.reset(OpAMD64MOVWstoreidx1) 13100 v.AuxInt = off 13101 v.Aux = sym 13102 v.AddArg(ptr) 13103 v.AddArg(idx) 13104 v.AddArg(val) 13105 v.AddArg(mem) 13106 return true 13107 } 13108 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 13109 // cond: x.Uses == 1 && clobber(x) 13110 // result: (MOVLstore [i-2] {s} p w mem) 13111 for { 13112 i := v.AuxInt 13113 s := v.Aux 13114 _ = v.Args[2] 13115 p := v.Args[0] 13116 v_1 := v.Args[1] 13117 if v_1.Op != OpAMD64SHRQconst { 13118 break 13119 } 13120 if v_1.AuxInt != 16 { 13121 break 13122 } 13123 w := v_1.Args[0] 13124 x := v.Args[2] 13125 if x.Op != OpAMD64MOVWstore { 13126 break 13127 } 13128 if x.AuxInt != i-2 { 13129 break 13130 } 13131 if x.Aux != s { 13132 break 13133 } 13134 _ = x.Args[2] 13135 if p != x.Args[0] { 13136 break 13137 } 13138 if w != x.Args[1] { 13139 break 13140 } 13141 mem := x.Args[2] 13142 if !(x.Uses == 1 && clobber(x)) { 13143 break 13144 } 13145 v.reset(OpAMD64MOVLstore) 13146 v.AuxInt = i - 2 13147 v.Aux = s 13148 v.AddArg(p) 13149 v.AddArg(w) 13150 v.AddArg(mem) 13151 return true 13152 } 13153 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 13154 // cond: x.Uses == 1 && clobber(x) 13155 // result: (MOVLstore [i-2] {s} p w0 mem) 13156 for { 13157 i := v.AuxInt 13158 s := v.Aux 13159 _ = v.Args[2] 13160 p := v.Args[0] 13161 v_1 := v.Args[1] 13162 if v_1.Op != OpAMD64SHRQconst { 13163 break 13164 } 13165 j := v_1.AuxInt 13166 w := v_1.Args[0] 13167 x := v.Args[2] 13168 if x.Op != OpAMD64MOVWstore { 13169 break 13170 } 13171 if x.AuxInt != i-2 { 13172 break 13173 } 13174 if x.Aux != s { 13175 break 13176 } 13177 _ = x.Args[2] 13178 if p != x.Args[0] { 13179 break 13180 } 13181 w0 := x.Args[1] 13182 if w0.Op != OpAMD64SHRQconst { 13183 break 13184 } 13185 if w0.AuxInt != j-16 { 13186 break 13187 } 13188 if w != w0.Args[0] { 13189 break 13190 } 13191 mem := x.Args[2] 13192 if !(x.Uses == 1 && clobber(x)) { 13193 break 13194 } 13195 v.reset(OpAMD64MOVLstore) 13196 v.AuxInt = i - 2 13197 v.Aux = s 13198 v.AddArg(p) 13199 v.AddArg(w0) 13200 v.AddArg(mem) 13201 return true 13202 } 13203 return false 13204 } 13205 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 13206 b := v.Block 13207 _ = b 13208 typ := &b.Func.Config.Types 13209 _ = typ 13210 // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) 13211 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 13212 // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) 13213 for { 13214 i := v.AuxInt 13215 s := v.Aux 13216 _ = v.Args[2] 13217 p := v.Args[0] 13218 x1 := v.Args[1] 13219 if x1.Op != OpAMD64MOVWload { 13220 break 13221 } 13222 j := x1.AuxInt 13223 s2 := x1.Aux 13224 _ = x1.Args[1] 13225 p2 := x1.Args[0] 13226 mem := x1.Args[1] 13227 mem2 := v.Args[2] 13228 if mem2.Op != OpAMD64MOVWstore { 13229 break 13230 } 13231 if mem2.AuxInt != i-2 { 13232 break 13233 } 13234 if mem2.Aux != s { 13235 break 13236 } 13237 _ = mem2.Args[2] 13238 if p != mem2.Args[0] { 13239 break 13240 } 13241 x2 := mem2.Args[1] 13242 if x2.Op != OpAMD64MOVWload { 13243 break 13244 } 13245 if x2.AuxInt != j-2 { 13246 break 13247 } 13248 if x2.Aux != s2 { 13249 break 13250 } 13251 _ = x2.Args[1] 13252 if p2 != x2.Args[0] { 13253 break 13254 } 13255 if mem != x2.Args[1] { 13256 break 13257 } 13258 if mem != mem2.Args[2] { 13259 break 13260 } 13261 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 13262 break 13263 } 13264 v.reset(OpAMD64MOVLstore) 13265 v.AuxInt = i - 2 13266 v.Aux = s 13267 v.AddArg(p) 13268 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 13269 v0.AuxInt = j - 2 13270 v0.Aux = s2 13271 v0.AddArg(p2) 13272 v0.AddArg(mem) 13273 v.AddArg(v0) 13274 v.AddArg(mem) 13275 return true 13276 } 13277 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 13278 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 13279 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13280 for { 13281 off1 := v.AuxInt 13282 sym1 := v.Aux 13283 _ = v.Args[2] 13284 v_0 := v.Args[0] 13285 if v_0.Op != OpAMD64LEAL { 13286 break 13287 } 13288 off2 := v_0.AuxInt 13289 sym2 := v_0.Aux 13290 base := v_0.Args[0] 13291 val := v.Args[1] 13292 mem := v.Args[2] 13293 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 13294 break 13295 } 13296 v.reset(OpAMD64MOVWstore) 13297 v.AuxInt = off1 + off2 13298 v.Aux = mergeSym(sym1, sym2) 13299 v.AddArg(base) 13300 v.AddArg(val) 13301 v.AddArg(mem) 13302 return true 13303 } 13304 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 13305 // cond: is32Bit(off1+off2) 13306 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 13307 for { 13308 off1 := v.AuxInt 13309 sym := v.Aux 13310 _ = v.Args[2] 13311 v_0 := v.Args[0] 13312 if v_0.Op != OpAMD64ADDLconst { 13313 break 13314 } 13315 off2 := v_0.AuxInt 13316 ptr := v_0.Args[0] 13317 val := v.Args[1] 13318 mem := v.Args[2] 13319 if !(is32Bit(off1 + off2)) { 13320 break 13321 } 13322 v.reset(OpAMD64MOVWstore) 13323 v.AuxInt = off1 + off2 13324 v.Aux = sym 13325 v.AddArg(ptr) 13326 v.AddArg(val) 13327 v.AddArg(mem) 13328 return true 13329 } 13330 return false 13331 } 13332 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 13333 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 13334 // cond: ValAndOff(sc).canAdd(off) 13335 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13336 for { 13337 sc := v.AuxInt 13338 s := v.Aux 13339 _ = v.Args[1] 13340 v_0 := v.Args[0] 13341 if v_0.Op != OpAMD64ADDQconst { 13342 break 13343 } 13344 off := v_0.AuxInt 13345 ptr := v_0.Args[0] 13346 mem := v.Args[1] 13347 if !(ValAndOff(sc).canAdd(off)) { 13348 break 13349 } 13350 v.reset(OpAMD64MOVWstoreconst) 13351 v.AuxInt = ValAndOff(sc).add(off) 13352 v.Aux = s 13353 v.AddArg(ptr) 13354 v.AddArg(mem) 13355 return true 13356 } 13357 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 13358 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13359 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13360 for { 13361 sc := v.AuxInt 13362 sym1 := v.Aux 13363 _ = v.Args[1] 13364 v_0 := v.Args[0] 13365 if v_0.Op != OpAMD64LEAQ { 13366 break 13367 } 13368 off := v_0.AuxInt 13369 sym2 := v_0.Aux 13370 ptr := v_0.Args[0] 13371 mem := v.Args[1] 13372 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13373 break 13374 } 13375 v.reset(OpAMD64MOVWstoreconst) 13376 v.AuxInt = ValAndOff(sc).add(off) 13377 v.Aux = mergeSym(sym1, sym2) 13378 v.AddArg(ptr) 13379 v.AddArg(mem) 13380 return true 13381 } 13382 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 13383 // cond: canMergeSym(sym1, sym2) 13384 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13385 for { 13386 x := v.AuxInt 13387 sym1 := v.Aux 13388 _ = v.Args[1] 13389 v_0 := v.Args[0] 13390 if v_0.Op != OpAMD64LEAQ1 { 13391 break 13392 } 13393 off := v_0.AuxInt 13394 sym2 := v_0.Aux 13395 _ = v_0.Args[1] 13396 ptr := v_0.Args[0] 13397 idx := v_0.Args[1] 13398 mem := v.Args[1] 13399 if !(canMergeSym(sym1, sym2)) { 13400 break 13401 } 13402 v.reset(OpAMD64MOVWstoreconstidx1) 13403 v.AuxInt = ValAndOff(x).add(off) 13404 v.Aux = mergeSym(sym1, sym2) 13405 v.AddArg(ptr) 13406 v.AddArg(idx) 13407 v.AddArg(mem) 13408 return true 13409 } 13410 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 13411 // cond: canMergeSym(sym1, sym2) 13412 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13413 for { 13414 x := v.AuxInt 13415 sym1 := v.Aux 13416 _ = v.Args[1] 13417 v_0 := v.Args[0] 13418 if v_0.Op != OpAMD64LEAQ2 { 13419 break 13420 } 13421 off := v_0.AuxInt 13422 sym2 := v_0.Aux 13423 _ = v_0.Args[1] 13424 ptr := v_0.Args[0] 13425 idx := v_0.Args[1] 13426 mem := v.Args[1] 13427 if !(canMergeSym(sym1, sym2)) { 13428 break 13429 } 13430 v.reset(OpAMD64MOVWstoreconstidx2) 13431 v.AuxInt = ValAndOff(x).add(off) 13432 v.Aux = mergeSym(sym1, sym2) 13433 v.AddArg(ptr) 13434 v.AddArg(idx) 13435 v.AddArg(mem) 13436 return true 13437 } 13438 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 13439 // cond: 13440 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 13441 for { 13442 x := v.AuxInt 13443 sym := v.Aux 13444 _ = v.Args[1] 13445 v_0 := v.Args[0] 13446 if v_0.Op != OpAMD64ADDQ { 13447 break 13448 } 13449 _ = v_0.Args[1] 13450 ptr := v_0.Args[0] 13451 idx := v_0.Args[1] 13452 mem := v.Args[1] 13453 v.reset(OpAMD64MOVWstoreconstidx1) 13454 v.AuxInt = x 13455 v.Aux = sym 13456 v.AddArg(ptr) 13457 v.AddArg(idx) 13458 v.AddArg(mem) 13459 return true 13460 } 13461 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 13462 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13463 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 13464 for { 13465 c := v.AuxInt 13466 s := v.Aux 13467 _ = v.Args[1] 13468 p := v.Args[0] 13469 x := v.Args[1] 13470 if x.Op != OpAMD64MOVWstoreconst { 13471 break 13472 } 13473 a := x.AuxInt 13474 if x.Aux != s { 13475 break 13476 } 13477 _ = x.Args[1] 13478 if p != x.Args[0] { 13479 break 13480 } 13481 mem := x.Args[1] 13482 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13483 break 13484 } 13485 v.reset(OpAMD64MOVLstoreconst) 13486 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13487 v.Aux = s 13488 v.AddArg(p) 13489 v.AddArg(mem) 13490 return true 13491 } 13492 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 13493 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13494 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13495 for { 13496 sc := v.AuxInt 13497 sym1 := v.Aux 13498 _ = v.Args[1] 13499 v_0 := v.Args[0] 13500 if v_0.Op != OpAMD64LEAL { 13501 break 13502 } 13503 off := v_0.AuxInt 13504 sym2 := v_0.Aux 13505 ptr := v_0.Args[0] 13506 mem := v.Args[1] 13507 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13508 break 13509 } 13510 v.reset(OpAMD64MOVWstoreconst) 13511 v.AuxInt = ValAndOff(sc).add(off) 13512 v.Aux = mergeSym(sym1, sym2) 13513 v.AddArg(ptr) 13514 v.AddArg(mem) 13515 return true 13516 } 13517 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 13518 // cond: ValAndOff(sc).canAdd(off) 13519 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13520 for { 13521 sc := v.AuxInt 13522 s := v.Aux 13523 _ = v.Args[1] 13524 v_0 := v.Args[0] 13525 if v_0.Op != OpAMD64ADDLconst { 13526 break 13527 } 13528 off := v_0.AuxInt 13529 ptr := v_0.Args[0] 13530 mem := v.Args[1] 13531 if !(ValAndOff(sc).canAdd(off)) { 13532 break 13533 } 13534 v.reset(OpAMD64MOVWstoreconst) 13535 v.AuxInt = ValAndOff(sc).add(off) 13536 v.Aux = s 13537 v.AddArg(ptr) 13538 v.AddArg(mem) 13539 return true 13540 } 13541 return false 13542 } 13543 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 13544 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 13545 // cond: 13546 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 13547 for { 13548 c := v.AuxInt 13549 sym := v.Aux 13550 _ = v.Args[2] 13551 ptr := v.Args[0] 13552 v_1 := v.Args[1] 13553 if v_1.Op != OpAMD64SHLQconst { 13554 break 13555 } 13556 if v_1.AuxInt != 1 { 13557 break 13558 } 13559 idx := v_1.Args[0] 13560 mem := v.Args[2] 13561 v.reset(OpAMD64MOVWstoreconstidx2) 13562 v.AuxInt = c 13563 v.Aux = sym 13564 v.AddArg(ptr) 13565 v.AddArg(idx) 13566 v.AddArg(mem) 13567 return true 13568 } 13569 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 13570 // cond: ValAndOff(x).canAdd(c) 13571 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13572 for { 13573 x := v.AuxInt 13574 sym := v.Aux 13575 _ = v.Args[2] 13576 v_0 := v.Args[0] 13577 if v_0.Op != OpAMD64ADDQconst { 13578 break 13579 } 13580 c := v_0.AuxInt 13581 ptr := v_0.Args[0] 13582 idx := v.Args[1] 13583 mem := v.Args[2] 13584 if !(ValAndOff(x).canAdd(c)) { 13585 break 13586 } 13587 v.reset(OpAMD64MOVWstoreconstidx1) 13588 v.AuxInt = ValAndOff(x).add(c) 13589 v.Aux = sym 13590 v.AddArg(ptr) 13591 v.AddArg(idx) 13592 v.AddArg(mem) 13593 return true 13594 } 13595 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 13596 // cond: ValAndOff(x).canAdd(c) 13597 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13598 for { 13599 x := v.AuxInt 13600 sym := v.Aux 13601 _ = v.Args[2] 13602 ptr := v.Args[0] 13603 v_1 := v.Args[1] 13604 if v_1.Op != OpAMD64ADDQconst { 13605 break 13606 } 13607 c := v_1.AuxInt 13608 idx := v_1.Args[0] 13609 mem := v.Args[2] 13610 if !(ValAndOff(x).canAdd(c)) { 13611 break 13612 } 13613 v.reset(OpAMD64MOVWstoreconstidx1) 13614 v.AuxInt = ValAndOff(x).add(c) 13615 v.Aux = sym 13616 v.AddArg(ptr) 13617 v.AddArg(idx) 13618 v.AddArg(mem) 13619 return true 13620 } 13621 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 13622 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13623 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 13624 for { 13625 c := v.AuxInt 13626 s := v.Aux 13627 _ = v.Args[2] 13628 p := v.Args[0] 13629 i := v.Args[1] 13630 x := v.Args[2] 13631 if x.Op != OpAMD64MOVWstoreconstidx1 { 13632 break 13633 } 13634 a := x.AuxInt 13635 if x.Aux != s { 13636 break 13637 } 13638 _ = x.Args[2] 13639 if p != x.Args[0] { 13640 break 13641 } 13642 if i != x.Args[1] { 13643 break 13644 } 13645 mem := x.Args[2] 13646 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13647 break 13648 } 13649 v.reset(OpAMD64MOVLstoreconstidx1) 13650 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13651 v.Aux = s 13652 v.AddArg(p) 13653 v.AddArg(i) 13654 v.AddArg(mem) 13655 return true 13656 } 13657 return false 13658 } 13659 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 13660 b := v.Block 13661 _ = b 13662 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 13663 // cond: ValAndOff(x).canAdd(c) 13664 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13665 for { 13666 x := v.AuxInt 13667 sym := v.Aux 13668 _ = v.Args[2] 13669 v_0 := v.Args[0] 13670 if v_0.Op != OpAMD64ADDQconst { 13671 break 13672 } 13673 c := v_0.AuxInt 13674 ptr := v_0.Args[0] 13675 idx := v.Args[1] 13676 mem := v.Args[2] 13677 if !(ValAndOff(x).canAdd(c)) { 13678 break 13679 } 13680 v.reset(OpAMD64MOVWstoreconstidx2) 13681 v.AuxInt = ValAndOff(x).add(c) 13682 v.Aux = sym 13683 v.AddArg(ptr) 13684 v.AddArg(idx) 13685 v.AddArg(mem) 13686 return true 13687 } 13688 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 13689 // cond: ValAndOff(x).canAdd(2*c) 13690 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 13691 for { 13692 x := v.AuxInt 13693 sym := v.Aux 13694 _ = v.Args[2] 13695 ptr := v.Args[0] 13696 v_1 := v.Args[1] 13697 if v_1.Op != OpAMD64ADDQconst { 13698 break 13699 } 13700 c := v_1.AuxInt 13701 idx := v_1.Args[0] 13702 mem := v.Args[2] 13703 if !(ValAndOff(x).canAdd(2 * c)) { 13704 break 13705 } 13706 v.reset(OpAMD64MOVWstoreconstidx2) 13707 v.AuxInt = ValAndOff(x).add(2 * c) 13708 v.Aux = sym 13709 v.AddArg(ptr) 13710 v.AddArg(idx) 13711 v.AddArg(mem) 13712 return true 13713 } 13714 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 13715 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13716 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 13717 for { 13718 c := v.AuxInt 13719 s := v.Aux 13720 _ = v.Args[2] 13721 p := v.Args[0] 13722 i := v.Args[1] 13723 x := v.Args[2] 13724 if x.Op != OpAMD64MOVWstoreconstidx2 { 13725 break 13726 } 13727 a := x.AuxInt 13728 if x.Aux != s { 13729 break 13730 } 13731 _ = x.Args[2] 13732 if p != x.Args[0] { 13733 break 13734 } 13735 if i != x.Args[1] { 13736 break 13737 } 13738 mem := x.Args[2] 13739 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13740 break 13741 } 13742 v.reset(OpAMD64MOVLstoreconstidx1) 13743 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13744 v.Aux = s 13745 v.AddArg(p) 13746 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 13747 v0.AuxInt = 1 13748 v0.AddArg(i) 13749 v.AddArg(v0) 13750 v.AddArg(mem) 13751 return true 13752 } 13753 return false 13754 } 13755 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 13756 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 13757 // cond: 13758 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 13759 for { 13760 c := v.AuxInt 13761 sym := v.Aux 13762 _ = v.Args[3] 13763 ptr := v.Args[0] 13764 v_1 := v.Args[1] 13765 if v_1.Op != OpAMD64SHLQconst { 13766 break 13767 } 13768 if v_1.AuxInt != 1 { 13769 break 13770 } 13771 idx := v_1.Args[0] 13772 val := v.Args[2] 13773 mem := v.Args[3] 13774 v.reset(OpAMD64MOVWstoreidx2) 13775 v.AuxInt = c 13776 v.Aux = sym 13777 v.AddArg(ptr) 13778 v.AddArg(idx) 13779 v.AddArg(val) 13780 v.AddArg(mem) 13781 return true 13782 } 13783 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 13784 // cond: is32Bit(c+d) 13785 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 13786 for { 13787 c := v.AuxInt 13788 sym := v.Aux 13789 _ = v.Args[3] 13790 v_0 := v.Args[0] 13791 if v_0.Op != OpAMD64ADDQconst { 13792 break 13793 } 13794 d := v_0.AuxInt 13795 ptr := v_0.Args[0] 13796 idx := v.Args[1] 13797 val := v.Args[2] 13798 mem := v.Args[3] 13799 if !(is32Bit(c + d)) { 13800 break 13801 } 13802 v.reset(OpAMD64MOVWstoreidx1) 13803 v.AuxInt = c + d 13804 v.Aux = sym 13805 v.AddArg(ptr) 13806 v.AddArg(idx) 13807 v.AddArg(val) 13808 v.AddArg(mem) 13809 return true 13810 } 13811 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 13812 // cond: is32Bit(c+d) 13813 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 13814 for { 13815 c := v.AuxInt 13816 sym := v.Aux 13817 _ = v.Args[3] 13818 ptr := v.Args[0] 13819 v_1 := v.Args[1] 13820 if v_1.Op != OpAMD64ADDQconst { 13821 break 13822 } 13823 d := v_1.AuxInt 13824 idx := v_1.Args[0] 13825 val := v.Args[2] 13826 mem := v.Args[3] 13827 if !(is32Bit(c + d)) { 13828 break 13829 } 13830 v.reset(OpAMD64MOVWstoreidx1) 13831 v.AuxInt = c + d 13832 v.Aux = sym 13833 v.AddArg(ptr) 13834 v.AddArg(idx) 13835 v.AddArg(val) 13836 v.AddArg(mem) 13837 return true 13838 } 13839 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 13840 // cond: x.Uses == 1 && clobber(x) 13841 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 13842 for { 13843 i := v.AuxInt 13844 s := v.Aux 13845 _ = v.Args[3] 13846 p := v.Args[0] 13847 idx := v.Args[1] 13848 v_2 := v.Args[2] 13849 if v_2.Op != OpAMD64SHRQconst { 13850 break 13851 } 13852 if v_2.AuxInt != 16 { 13853 break 13854 } 13855 w := v_2.Args[0] 13856 x := v.Args[3] 13857 if x.Op != OpAMD64MOVWstoreidx1 { 13858 break 13859 } 13860 if x.AuxInt != i-2 { 13861 break 13862 } 13863 if x.Aux != s { 13864 break 13865 } 13866 _ = x.Args[3] 13867 if p != x.Args[0] { 13868 break 13869 } 13870 if idx != x.Args[1] { 13871 break 13872 } 13873 if w != x.Args[2] { 13874 break 13875 } 13876 mem := x.Args[3] 13877 if !(x.Uses == 1 && clobber(x)) { 13878 break 13879 } 13880 v.reset(OpAMD64MOVLstoreidx1) 13881 v.AuxInt = i - 2 13882 v.Aux = s 13883 v.AddArg(p) 13884 v.AddArg(idx) 13885 v.AddArg(w) 13886 v.AddArg(mem) 13887 return true 13888 } 13889 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 13890 // cond: x.Uses == 1 && clobber(x) 13891 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 13892 for { 13893 i := v.AuxInt 13894 s := v.Aux 13895 _ = v.Args[3] 13896 p := v.Args[0] 13897 idx := v.Args[1] 13898 v_2 := v.Args[2] 13899 if v_2.Op != OpAMD64SHRQconst { 13900 break 13901 } 13902 j := v_2.AuxInt 13903 w := v_2.Args[0] 13904 x := v.Args[3] 13905 if x.Op != OpAMD64MOVWstoreidx1 { 13906 break 13907 } 13908 if x.AuxInt != i-2 { 13909 break 13910 } 13911 if x.Aux != s { 13912 break 13913 } 13914 _ = x.Args[3] 13915 if p != x.Args[0] { 13916 break 13917 } 13918 if idx != x.Args[1] { 13919 break 13920 } 13921 w0 := x.Args[2] 13922 if w0.Op != OpAMD64SHRQconst { 13923 break 13924 } 13925 if w0.AuxInt != j-16 { 13926 break 13927 } 13928 if w != w0.Args[0] { 13929 break 13930 } 13931 mem := x.Args[3] 13932 if !(x.Uses == 1 && clobber(x)) { 13933 break 13934 } 13935 v.reset(OpAMD64MOVLstoreidx1) 13936 v.AuxInt = i - 2 13937 v.Aux = s 13938 v.AddArg(p) 13939 v.AddArg(idx) 13940 v.AddArg(w0) 13941 v.AddArg(mem) 13942 return true 13943 } 13944 return false 13945 } 13946 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 13947 b := v.Block 13948 _ = b 13949 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 13950 // cond: is32Bit(c+d) 13951 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 13952 for { 13953 c := v.AuxInt 13954 sym := v.Aux 13955 _ = v.Args[3] 13956 v_0 := v.Args[0] 13957 if v_0.Op != OpAMD64ADDQconst { 13958 break 13959 } 13960 d := v_0.AuxInt 13961 ptr := v_0.Args[0] 13962 idx := v.Args[1] 13963 val := v.Args[2] 13964 mem := v.Args[3] 13965 if !(is32Bit(c + d)) { 13966 break 13967 } 13968 v.reset(OpAMD64MOVWstoreidx2) 13969 v.AuxInt = c + d 13970 v.Aux = sym 13971 v.AddArg(ptr) 13972 v.AddArg(idx) 13973 v.AddArg(val) 13974 v.AddArg(mem) 13975 return true 13976 } 13977 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 13978 // cond: is32Bit(c+2*d) 13979 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 13980 for { 13981 c := v.AuxInt 13982 sym := v.Aux 13983 _ = v.Args[3] 13984 ptr := v.Args[0] 13985 v_1 := v.Args[1] 13986 if v_1.Op != OpAMD64ADDQconst { 13987 break 13988 } 13989 d := v_1.AuxInt 13990 idx := v_1.Args[0] 13991 val := v.Args[2] 13992 mem := v.Args[3] 13993 if !(is32Bit(c + 2*d)) { 13994 break 13995 } 13996 v.reset(OpAMD64MOVWstoreidx2) 13997 v.AuxInt = c + 2*d 13998 v.Aux = sym 13999 v.AddArg(ptr) 14000 v.AddArg(idx) 14001 v.AddArg(val) 14002 v.AddArg(mem) 14003 return true 14004 } 14005 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 14006 // cond: x.Uses == 1 && clobber(x) 14007 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 14008 for { 14009 i := v.AuxInt 14010 s := v.Aux 14011 _ = v.Args[3] 14012 p := v.Args[0] 14013 idx := v.Args[1] 14014 v_2 := v.Args[2] 14015 if v_2.Op != OpAMD64SHRQconst { 14016 break 14017 } 14018 if v_2.AuxInt != 16 { 14019 break 14020 } 14021 w := v_2.Args[0] 14022 x := v.Args[3] 14023 if x.Op != OpAMD64MOVWstoreidx2 { 14024 break 14025 } 14026 if x.AuxInt != i-2 { 14027 break 14028 } 14029 if x.Aux != s { 14030 break 14031 } 14032 _ = x.Args[3] 14033 if p != x.Args[0] { 14034 break 14035 } 14036 if idx != x.Args[1] { 14037 break 14038 } 14039 if w != x.Args[2] { 14040 break 14041 } 14042 mem := x.Args[3] 14043 if !(x.Uses == 1 && clobber(x)) { 14044 break 14045 } 14046 v.reset(OpAMD64MOVLstoreidx1) 14047 v.AuxInt = i - 2 14048 v.Aux = s 14049 v.AddArg(p) 14050 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 14051 v0.AuxInt = 1 14052 v0.AddArg(idx) 14053 v.AddArg(v0) 14054 v.AddArg(w) 14055 v.AddArg(mem) 14056 return true 14057 } 14058 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 14059 // cond: x.Uses == 1 && clobber(x) 14060 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 14061 for { 14062 i := v.AuxInt 14063 s := v.Aux 14064 _ = v.Args[3] 14065 p := v.Args[0] 14066 idx := v.Args[1] 14067 v_2 := v.Args[2] 14068 if v_2.Op != OpAMD64SHRQconst { 14069 break 14070 } 14071 j := v_2.AuxInt 14072 w := v_2.Args[0] 14073 x := v.Args[3] 14074 if x.Op != OpAMD64MOVWstoreidx2 { 14075 break 14076 } 14077 if x.AuxInt != i-2 { 14078 break 14079 } 14080 if x.Aux != s { 14081 break 14082 } 14083 _ = x.Args[3] 14084 if p != x.Args[0] { 14085 break 14086 } 14087 if idx != x.Args[1] { 14088 break 14089 } 14090 w0 := x.Args[2] 14091 if w0.Op != OpAMD64SHRQconst { 14092 break 14093 } 14094 if w0.AuxInt != j-16 { 14095 break 14096 } 14097 if w != w0.Args[0] { 14098 break 14099 } 14100 mem := x.Args[3] 14101 if !(x.Uses == 1 && clobber(x)) { 14102 break 14103 } 14104 v.reset(OpAMD64MOVLstoreidx1) 14105 v.AuxInt = i - 2 14106 v.Aux = s 14107 v.AddArg(p) 14108 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 14109 v0.AuxInt = 1 14110 v0.AddArg(idx) 14111 v.AddArg(v0) 14112 v.AddArg(w0) 14113 v.AddArg(mem) 14114 return true 14115 } 14116 return false 14117 } 14118 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 14119 // match: (MULL x (MOVLconst [c])) 14120 // cond: 14121 // result: (MULLconst [c] x) 14122 for { 14123 _ = v.Args[1] 14124 x := v.Args[0] 14125 v_1 := v.Args[1] 14126 if v_1.Op != OpAMD64MOVLconst { 14127 break 14128 } 14129 c := v_1.AuxInt 14130 v.reset(OpAMD64MULLconst) 14131 v.AuxInt = c 14132 v.AddArg(x) 14133 return true 14134 } 14135 // match: (MULL (MOVLconst [c]) x) 14136 // cond: 14137 // result: (MULLconst [c] x) 14138 for { 14139 _ = v.Args[1] 14140 v_0 := v.Args[0] 14141 if v_0.Op != OpAMD64MOVLconst { 14142 break 14143 } 14144 c := v_0.AuxInt 14145 x := v.Args[1] 14146 v.reset(OpAMD64MULLconst) 14147 v.AuxInt = c 14148 v.AddArg(x) 14149 return true 14150 } 14151 return false 14152 } 14153 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 14154 // match: (MULLconst [c] (MULLconst [d] x)) 14155 // cond: 14156 // result: (MULLconst [int64(int32(c * d))] x) 14157 for { 14158 c := v.AuxInt 14159 v_0 := v.Args[0] 14160 if v_0.Op != OpAMD64MULLconst { 14161 break 14162 } 14163 d := v_0.AuxInt 14164 x := v_0.Args[0] 14165 v.reset(OpAMD64MULLconst) 14166 v.AuxInt = int64(int32(c * d)) 14167 v.AddArg(x) 14168 return true 14169 } 14170 // match: (MULLconst [c] (MOVLconst [d])) 14171 // cond: 14172 // result: (MOVLconst [int64(int32(c*d))]) 14173 for { 14174 c := v.AuxInt 14175 v_0 := v.Args[0] 14176 if v_0.Op != OpAMD64MOVLconst { 14177 break 14178 } 14179 d := v_0.AuxInt 14180 v.reset(OpAMD64MOVLconst) 14181 v.AuxInt = int64(int32(c * d)) 14182 return true 14183 } 14184 return false 14185 } 14186 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 14187 // match: (MULQ x (MOVQconst [c])) 14188 // cond: is32Bit(c) 14189 // result: (MULQconst [c] x) 14190 for { 14191 _ = v.Args[1] 14192 x := v.Args[0] 14193 v_1 := v.Args[1] 14194 if v_1.Op != OpAMD64MOVQconst { 14195 break 14196 } 14197 c := v_1.AuxInt 14198 if !(is32Bit(c)) { 14199 break 14200 } 14201 v.reset(OpAMD64MULQconst) 14202 v.AuxInt = c 14203 v.AddArg(x) 14204 return true 14205 } 14206 // match: (MULQ (MOVQconst [c]) x) 14207 // cond: is32Bit(c) 14208 // result: (MULQconst [c] x) 14209 for { 14210 _ = v.Args[1] 14211 v_0 := v.Args[0] 14212 if v_0.Op != OpAMD64MOVQconst { 14213 break 14214 } 14215 c := v_0.AuxInt 14216 x := v.Args[1] 14217 if !(is32Bit(c)) { 14218 break 14219 } 14220 v.reset(OpAMD64MULQconst) 14221 v.AuxInt = c 14222 v.AddArg(x) 14223 return true 14224 } 14225 return false 14226 } 14227 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 14228 b := v.Block 14229 _ = b 14230 // match: (MULQconst [c] (MULQconst [d] x)) 14231 // cond: is32Bit(c*d) 14232 // result: (MULQconst [c * d] x) 14233 for { 14234 c := v.AuxInt 14235 v_0 := v.Args[0] 14236 if v_0.Op != OpAMD64MULQconst { 14237 break 14238 } 14239 d := v_0.AuxInt 14240 x := v_0.Args[0] 14241 if !(is32Bit(c * d)) { 14242 break 14243 } 14244 v.reset(OpAMD64MULQconst) 14245 v.AuxInt = c * d 14246 v.AddArg(x) 14247 return true 14248 } 14249 // match: (MULQconst [-1] x) 14250 // cond: 14251 // result: (NEGQ x) 14252 for { 14253 if v.AuxInt != -1 { 14254 break 14255 } 14256 x := v.Args[0] 14257 v.reset(OpAMD64NEGQ) 14258 v.AddArg(x) 14259 return true 14260 } 14261 // match: (MULQconst [0] _) 14262 // cond: 14263 // result: (MOVQconst [0]) 14264 for { 14265 if v.AuxInt != 0 { 14266 break 14267 } 14268 v.reset(OpAMD64MOVQconst) 14269 v.AuxInt = 0 14270 return true 14271 } 14272 // match: (MULQconst [1] x) 14273 // cond: 14274 // result: x 14275 for { 14276 if v.AuxInt != 1 { 14277 break 14278 } 14279 x := v.Args[0] 14280 v.reset(OpCopy) 14281 v.Type = x.Type 14282 v.AddArg(x) 14283 return true 14284 } 14285 // match: (MULQconst [3] x) 14286 // cond: 14287 // result: (LEAQ2 x x) 14288 for { 14289 if v.AuxInt != 3 { 14290 break 14291 } 14292 x := v.Args[0] 14293 v.reset(OpAMD64LEAQ2) 14294 v.AddArg(x) 14295 v.AddArg(x) 14296 return true 14297 } 14298 // match: (MULQconst [5] x) 14299 // cond: 14300 // result: (LEAQ4 x x) 14301 for { 14302 if v.AuxInt != 5 { 14303 break 14304 } 14305 x := v.Args[0] 14306 v.reset(OpAMD64LEAQ4) 14307 v.AddArg(x) 14308 v.AddArg(x) 14309 return true 14310 } 14311 // match: (MULQconst [7] x) 14312 // cond: 14313 // result: (LEAQ8 (NEGQ <v.Type> x) x) 14314 for { 14315 if v.AuxInt != 7 { 14316 break 14317 } 14318 x := v.Args[0] 14319 v.reset(OpAMD64LEAQ8) 14320 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 14321 v0.AddArg(x) 14322 v.AddArg(v0) 14323 v.AddArg(x) 14324 return true 14325 } 14326 // match: (MULQconst [9] x) 14327 // cond: 14328 // result: (LEAQ8 x x) 14329 for { 14330 if v.AuxInt != 9 { 14331 break 14332 } 14333 x := v.Args[0] 14334 v.reset(OpAMD64LEAQ8) 14335 v.AddArg(x) 14336 v.AddArg(x) 14337 return true 14338 } 14339 // match: (MULQconst [11] x) 14340 // cond: 14341 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 14342 for { 14343 if v.AuxInt != 11 { 14344 break 14345 } 14346 x := v.Args[0] 14347 v.reset(OpAMD64LEAQ2) 14348 v.AddArg(x) 14349 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14350 v0.AddArg(x) 14351 v0.AddArg(x) 14352 v.AddArg(v0) 14353 return true 14354 } 14355 // match: (MULQconst [13] x) 14356 // cond: 14357 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 14358 for { 14359 if v.AuxInt != 13 { 14360 break 14361 } 14362 x := v.Args[0] 14363 v.reset(OpAMD64LEAQ4) 14364 v.AddArg(x) 14365 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14366 v0.AddArg(x) 14367 v0.AddArg(x) 14368 v.AddArg(v0) 14369 return true 14370 } 14371 return false 14372 } 14373 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 14374 b := v.Block 14375 _ = b 14376 // match: (MULQconst [21] x) 14377 // cond: 14378 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 14379 for { 14380 if v.AuxInt != 21 { 14381 break 14382 } 14383 x := v.Args[0] 14384 v.reset(OpAMD64LEAQ4) 14385 v.AddArg(x) 14386 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14387 v0.AddArg(x) 14388 v0.AddArg(x) 14389 v.AddArg(v0) 14390 return true 14391 } 14392 // match: (MULQconst [25] x) 14393 // cond: 14394 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 14395 for { 14396 if v.AuxInt != 25 { 14397 break 14398 } 14399 x := v.Args[0] 14400 v.reset(OpAMD64LEAQ8) 14401 v.AddArg(x) 14402 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14403 v0.AddArg(x) 14404 v0.AddArg(x) 14405 v.AddArg(v0) 14406 return true 14407 } 14408 // match: (MULQconst [37] x) 14409 // cond: 14410 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 14411 for { 14412 if v.AuxInt != 37 { 14413 break 14414 } 14415 x := v.Args[0] 14416 v.reset(OpAMD64LEAQ4) 14417 v.AddArg(x) 14418 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14419 v0.AddArg(x) 14420 v0.AddArg(x) 14421 v.AddArg(v0) 14422 return true 14423 } 14424 // match: (MULQconst [41] x) 14425 // cond: 14426 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 14427 for { 14428 if v.AuxInt != 41 { 14429 break 14430 } 14431 x := v.Args[0] 14432 v.reset(OpAMD64LEAQ8) 14433 v.AddArg(x) 14434 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14435 v0.AddArg(x) 14436 v0.AddArg(x) 14437 v.AddArg(v0) 14438 return true 14439 } 14440 // match: (MULQconst [73] x) 14441 // cond: 14442 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 14443 for { 14444 if v.AuxInt != 73 { 14445 break 14446 } 14447 x := v.Args[0] 14448 v.reset(OpAMD64LEAQ8) 14449 v.AddArg(x) 14450 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14451 v0.AddArg(x) 14452 v0.AddArg(x) 14453 v.AddArg(v0) 14454 return true 14455 } 14456 // match: (MULQconst [c] x) 14457 // cond: isPowerOfTwo(c+1) && c >= 15 14458 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 14459 for { 14460 c := v.AuxInt 14461 x := v.Args[0] 14462 if !(isPowerOfTwo(c+1) && c >= 15) { 14463 break 14464 } 14465 v.reset(OpAMD64SUBQ) 14466 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14467 v0.AuxInt = log2(c + 1) 14468 v0.AddArg(x) 14469 v.AddArg(v0) 14470 v.AddArg(x) 14471 return true 14472 } 14473 // match: (MULQconst [c] x) 14474 // cond: isPowerOfTwo(c-1) && c >= 17 14475 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 14476 for { 14477 c := v.AuxInt 14478 x := v.Args[0] 14479 if !(isPowerOfTwo(c-1) && c >= 17) { 14480 break 14481 } 14482 v.reset(OpAMD64LEAQ1) 14483 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14484 v0.AuxInt = log2(c - 1) 14485 v0.AddArg(x) 14486 v.AddArg(v0) 14487 v.AddArg(x) 14488 return true 14489 } 14490 // match: (MULQconst [c] x) 14491 // cond: isPowerOfTwo(c-2) && c >= 34 14492 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 14493 for { 14494 c := v.AuxInt 14495 x := v.Args[0] 14496 if !(isPowerOfTwo(c-2) && c >= 34) { 14497 break 14498 } 14499 v.reset(OpAMD64LEAQ2) 14500 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14501 v0.AuxInt = log2(c - 2) 14502 v0.AddArg(x) 14503 v.AddArg(v0) 14504 v.AddArg(x) 14505 return true 14506 } 14507 // match: (MULQconst [c] x) 14508 // cond: isPowerOfTwo(c-4) && c >= 68 14509 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 14510 for { 14511 c := v.AuxInt 14512 x := v.Args[0] 14513 if !(isPowerOfTwo(c-4) && c >= 68) { 14514 break 14515 } 14516 v.reset(OpAMD64LEAQ4) 14517 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14518 v0.AuxInt = log2(c - 4) 14519 v0.AddArg(x) 14520 v.AddArg(v0) 14521 v.AddArg(x) 14522 return true 14523 } 14524 // match: (MULQconst [c] x) 14525 // cond: isPowerOfTwo(c-8) && c >= 136 14526 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 14527 for { 14528 c := v.AuxInt 14529 x := v.Args[0] 14530 if !(isPowerOfTwo(c-8) && c >= 136) { 14531 break 14532 } 14533 v.reset(OpAMD64LEAQ8) 14534 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14535 v0.AuxInt = log2(c - 8) 14536 v0.AddArg(x) 14537 v.AddArg(v0) 14538 v.AddArg(x) 14539 return true 14540 } 14541 return false 14542 } 14543 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 14544 b := v.Block 14545 _ = b 14546 // match: (MULQconst [c] x) 14547 // cond: c%3 == 0 && isPowerOfTwo(c/3) 14548 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 14549 for { 14550 c := v.AuxInt 14551 x := v.Args[0] 14552 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 14553 break 14554 } 14555 v.reset(OpAMD64SHLQconst) 14556 v.AuxInt = log2(c / 3) 14557 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14558 v0.AddArg(x) 14559 v0.AddArg(x) 14560 v.AddArg(v0) 14561 return true 14562 } 14563 // match: (MULQconst [c] x) 14564 // cond: c%5 == 0 && isPowerOfTwo(c/5) 14565 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 14566 for { 14567 c := v.AuxInt 14568 x := v.Args[0] 14569 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 14570 break 14571 } 14572 v.reset(OpAMD64SHLQconst) 14573 v.AuxInt = log2(c / 5) 14574 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14575 v0.AddArg(x) 14576 v0.AddArg(x) 14577 v.AddArg(v0) 14578 return true 14579 } 14580 // match: (MULQconst [c] x) 14581 // cond: c%9 == 0 && isPowerOfTwo(c/9) 14582 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 14583 for { 14584 c := v.AuxInt 14585 x := v.Args[0] 14586 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 14587 break 14588 } 14589 v.reset(OpAMD64SHLQconst) 14590 v.AuxInt = log2(c / 9) 14591 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14592 v0.AddArg(x) 14593 v0.AddArg(x) 14594 v.AddArg(v0) 14595 return true 14596 } 14597 // match: (MULQconst [c] (MOVQconst [d])) 14598 // cond: 14599 // result: (MOVQconst [c*d]) 14600 for { 14601 c := v.AuxInt 14602 v_0 := v.Args[0] 14603 if v_0.Op != OpAMD64MOVQconst { 14604 break 14605 } 14606 d := v_0.AuxInt 14607 v.reset(OpAMD64MOVQconst) 14608 v.AuxInt = c * d 14609 return true 14610 } 14611 return false 14612 } 14613 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 14614 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 14615 // cond: canMergeLoad(v, l, x) && clobber(l) 14616 // result: (MULSDmem x [off] {sym} ptr mem) 14617 for { 14618 _ = v.Args[1] 14619 x := v.Args[0] 14620 l := v.Args[1] 14621 if l.Op != OpAMD64MOVSDload { 14622 break 14623 } 14624 off := l.AuxInt 14625 sym := l.Aux 14626 _ = l.Args[1] 14627 ptr := l.Args[0] 14628 mem := l.Args[1] 14629 if !(canMergeLoad(v, l, x) && clobber(l)) { 14630 break 14631 } 14632 v.reset(OpAMD64MULSDmem) 14633 v.AuxInt = off 14634 v.Aux = sym 14635 v.AddArg(x) 14636 v.AddArg(ptr) 14637 v.AddArg(mem) 14638 return true 14639 } 14640 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 14641 // cond: canMergeLoad(v, l, x) && clobber(l) 14642 // result: (MULSDmem x [off] {sym} ptr mem) 14643 for { 14644 _ = v.Args[1] 14645 l := v.Args[0] 14646 if l.Op != OpAMD64MOVSDload { 14647 break 14648 } 14649 off := l.AuxInt 14650 sym := l.Aux 14651 _ = l.Args[1] 14652 ptr := l.Args[0] 14653 mem := l.Args[1] 14654 x := v.Args[1] 14655 if !(canMergeLoad(v, l, x) && clobber(l)) { 14656 break 14657 } 14658 v.reset(OpAMD64MULSDmem) 14659 v.AuxInt = off 14660 v.Aux = sym 14661 v.AddArg(x) 14662 v.AddArg(ptr) 14663 v.AddArg(mem) 14664 return true 14665 } 14666 return false 14667 } 14668 func rewriteValueAMD64_OpAMD64MULSDmem_0(v *Value) bool { 14669 b := v.Block 14670 _ = b 14671 typ := &b.Func.Config.Types 14672 _ = typ 14673 // match: (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 14674 // cond: 14675 // result: (MULSD x (MOVQi2f y)) 14676 for { 14677 off := v.AuxInt 14678 sym := v.Aux 14679 _ = v.Args[2] 14680 x := v.Args[0] 14681 ptr := v.Args[1] 14682 v_2 := v.Args[2] 14683 if v_2.Op != OpAMD64MOVQstore { 14684 break 14685 } 14686 if v_2.AuxInt != off { 14687 break 14688 } 14689 if v_2.Aux != sym { 14690 break 14691 } 14692 _ = v_2.Args[2] 14693 if ptr != v_2.Args[0] { 14694 break 14695 } 14696 y := v_2.Args[1] 14697 v.reset(OpAMD64MULSD) 14698 v.AddArg(x) 14699 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 14700 v0.AddArg(y) 14701 v.AddArg(v0) 14702 return true 14703 } 14704 return false 14705 } 14706 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 14707 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 14708 // cond: canMergeLoad(v, l, x) && clobber(l) 14709 // result: (MULSSmem x [off] {sym} ptr mem) 14710 for { 14711 _ = v.Args[1] 14712 x := v.Args[0] 14713 l := v.Args[1] 14714 if l.Op != OpAMD64MOVSSload { 14715 break 14716 } 14717 off := l.AuxInt 14718 sym := l.Aux 14719 _ = l.Args[1] 14720 ptr := l.Args[0] 14721 mem := l.Args[1] 14722 if !(canMergeLoad(v, l, x) && clobber(l)) { 14723 break 14724 } 14725 v.reset(OpAMD64MULSSmem) 14726 v.AuxInt = off 14727 v.Aux = sym 14728 v.AddArg(x) 14729 v.AddArg(ptr) 14730 v.AddArg(mem) 14731 return true 14732 } 14733 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 14734 // cond: canMergeLoad(v, l, x) && clobber(l) 14735 // result: (MULSSmem x [off] {sym} ptr mem) 14736 for { 14737 _ = v.Args[1] 14738 l := v.Args[0] 14739 if l.Op != OpAMD64MOVSSload { 14740 break 14741 } 14742 off := l.AuxInt 14743 sym := l.Aux 14744 _ = l.Args[1] 14745 ptr := l.Args[0] 14746 mem := l.Args[1] 14747 x := v.Args[1] 14748 if !(canMergeLoad(v, l, x) && clobber(l)) { 14749 break 14750 } 14751 v.reset(OpAMD64MULSSmem) 14752 v.AuxInt = off 14753 v.Aux = sym 14754 v.AddArg(x) 14755 v.AddArg(ptr) 14756 v.AddArg(mem) 14757 return true 14758 } 14759 return false 14760 } 14761 func rewriteValueAMD64_OpAMD64MULSSmem_0(v *Value) bool { 14762 b := v.Block 14763 _ = b 14764 typ := &b.Func.Config.Types 14765 _ = typ 14766 // match: (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 14767 // cond: 14768 // result: (MULSS x (MOVLi2f y)) 14769 for { 14770 off := v.AuxInt 14771 sym := v.Aux 14772 _ = v.Args[2] 14773 x := v.Args[0] 14774 ptr := v.Args[1] 14775 v_2 := v.Args[2] 14776 if v_2.Op != OpAMD64MOVLstore { 14777 break 14778 } 14779 if v_2.AuxInt != off { 14780 break 14781 } 14782 if v_2.Aux != sym { 14783 break 14784 } 14785 _ = v_2.Args[2] 14786 if ptr != v_2.Args[0] { 14787 break 14788 } 14789 y := v_2.Args[1] 14790 v.reset(OpAMD64MULSS) 14791 v.AddArg(x) 14792 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 14793 v0.AddArg(y) 14794 v.AddArg(v0) 14795 return true 14796 } 14797 return false 14798 } 14799 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 14800 // match: (NEGL (MOVLconst [c])) 14801 // cond: 14802 // result: (MOVLconst [int64(int32(-c))]) 14803 for { 14804 v_0 := v.Args[0] 14805 if v_0.Op != OpAMD64MOVLconst { 14806 break 14807 } 14808 c := v_0.AuxInt 14809 v.reset(OpAMD64MOVLconst) 14810 v.AuxInt = int64(int32(-c)) 14811 return true 14812 } 14813 return false 14814 } 14815 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 14816 // match: (NEGQ (MOVQconst [c])) 14817 // cond: 14818 // result: (MOVQconst [-c]) 14819 for { 14820 v_0 := v.Args[0] 14821 if v_0.Op != OpAMD64MOVQconst { 14822 break 14823 } 14824 c := v_0.AuxInt 14825 v.reset(OpAMD64MOVQconst) 14826 v.AuxInt = -c 14827 return true 14828 } 14829 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 14830 // cond: c != -(1<<31) 14831 // result: (ADDQconst [-c] x) 14832 for { 14833 v_0 := v.Args[0] 14834 if v_0.Op != OpAMD64ADDQconst { 14835 break 14836 } 14837 c := v_0.AuxInt 14838 v_0_0 := v_0.Args[0] 14839 if v_0_0.Op != OpAMD64NEGQ { 14840 break 14841 } 14842 x := v_0_0.Args[0] 14843 if !(c != -(1 << 31)) { 14844 break 14845 } 14846 v.reset(OpAMD64ADDQconst) 14847 v.AuxInt = -c 14848 v.AddArg(x) 14849 return true 14850 } 14851 return false 14852 } 14853 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 14854 // match: (NOTL (MOVLconst [c])) 14855 // cond: 14856 // result: (MOVLconst [^c]) 14857 for { 14858 v_0 := v.Args[0] 14859 if v_0.Op != OpAMD64MOVLconst { 14860 break 14861 } 14862 c := v_0.AuxInt 14863 v.reset(OpAMD64MOVLconst) 14864 v.AuxInt = ^c 14865 return true 14866 } 14867 return false 14868 } 14869 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 14870 // match: (NOTQ (MOVQconst [c])) 14871 // cond: 14872 // result: (MOVQconst [^c]) 14873 for { 14874 v_0 := v.Args[0] 14875 if v_0.Op != OpAMD64MOVQconst { 14876 break 14877 } 14878 c := v_0.AuxInt 14879 v.reset(OpAMD64MOVQconst) 14880 v.AuxInt = ^c 14881 return true 14882 } 14883 return false 14884 } 14885 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 14886 // match: (ORL x (MOVLconst [c])) 14887 // cond: 14888 // result: (ORLconst [c] x) 14889 for { 14890 _ = v.Args[1] 14891 x := v.Args[0] 14892 v_1 := v.Args[1] 14893 if v_1.Op != OpAMD64MOVLconst { 14894 break 14895 } 14896 c := v_1.AuxInt 14897 v.reset(OpAMD64ORLconst) 14898 v.AuxInt = c 14899 v.AddArg(x) 14900 return true 14901 } 14902 // match: (ORL (MOVLconst [c]) x) 14903 // cond: 14904 // result: (ORLconst [c] x) 14905 for { 14906 _ = v.Args[1] 14907 v_0 := v.Args[0] 14908 if v_0.Op != OpAMD64MOVLconst { 14909 break 14910 } 14911 c := v_0.AuxInt 14912 x := v.Args[1] 14913 v.reset(OpAMD64ORLconst) 14914 v.AuxInt = c 14915 v.AddArg(x) 14916 return true 14917 } 14918 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 14919 // cond: d==32-c 14920 // result: (ROLLconst x [c]) 14921 for { 14922 _ = v.Args[1] 14923 v_0 := v.Args[0] 14924 if v_0.Op != OpAMD64SHLLconst { 14925 break 14926 } 14927 c := v_0.AuxInt 14928 x := v_0.Args[0] 14929 v_1 := v.Args[1] 14930 if v_1.Op != OpAMD64SHRLconst { 14931 break 14932 } 14933 d := v_1.AuxInt 14934 if x != v_1.Args[0] { 14935 break 14936 } 14937 if !(d == 32-c) { 14938 break 14939 } 14940 v.reset(OpAMD64ROLLconst) 14941 v.AuxInt = c 14942 v.AddArg(x) 14943 return true 14944 } 14945 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 14946 // cond: d==32-c 14947 // result: (ROLLconst x [c]) 14948 for { 14949 _ = v.Args[1] 14950 v_0 := v.Args[0] 14951 if v_0.Op != OpAMD64SHRLconst { 14952 break 14953 } 14954 d := v_0.AuxInt 14955 x := v_0.Args[0] 14956 v_1 := v.Args[1] 14957 if v_1.Op != OpAMD64SHLLconst { 14958 break 14959 } 14960 c := v_1.AuxInt 14961 if x != v_1.Args[0] { 14962 break 14963 } 14964 if !(d == 32-c) { 14965 break 14966 } 14967 v.reset(OpAMD64ROLLconst) 14968 v.AuxInt = c 14969 v.AddArg(x) 14970 return true 14971 } 14972 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 14973 // cond: d==16-c && c < 16 && t.Size() == 2 14974 // result: (ROLWconst x [c]) 14975 for { 14976 t := v.Type 14977 _ = v.Args[1] 14978 v_0 := v.Args[0] 14979 if v_0.Op != OpAMD64SHLLconst { 14980 break 14981 } 14982 c := v_0.AuxInt 14983 x := v_0.Args[0] 14984 v_1 := v.Args[1] 14985 if v_1.Op != OpAMD64SHRWconst { 14986 break 14987 } 14988 d := v_1.AuxInt 14989 if x != v_1.Args[0] { 14990 break 14991 } 14992 if !(d == 16-c && c < 16 && t.Size() == 2) { 14993 break 14994 } 14995 v.reset(OpAMD64ROLWconst) 14996 v.AuxInt = c 14997 v.AddArg(x) 14998 return true 14999 } 15000 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 15001 // cond: d==16-c && c < 16 && t.Size() == 2 15002 // result: (ROLWconst x [c]) 15003 for { 15004 t := v.Type 15005 _ = v.Args[1] 15006 v_0 := v.Args[0] 15007 if v_0.Op != OpAMD64SHRWconst { 15008 break 15009 } 15010 d := v_0.AuxInt 15011 x := v_0.Args[0] 15012 v_1 := v.Args[1] 15013 if v_1.Op != OpAMD64SHLLconst { 15014 break 15015 } 15016 c := v_1.AuxInt 15017 if x != v_1.Args[0] { 15018 break 15019 } 15020 if !(d == 16-c && c < 16 && t.Size() == 2) { 15021 break 15022 } 15023 v.reset(OpAMD64ROLWconst) 15024 v.AuxInt = c 15025 v.AddArg(x) 15026 return true 15027 } 15028 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 15029 // cond: d==8-c && c < 8 && t.Size() == 1 15030 // result: (ROLBconst x [c]) 15031 for { 15032 t := v.Type 15033 _ = v.Args[1] 15034 v_0 := v.Args[0] 15035 if v_0.Op != OpAMD64SHLLconst { 15036 break 15037 } 15038 c := v_0.AuxInt 15039 x := v_0.Args[0] 15040 v_1 := v.Args[1] 15041 if v_1.Op != OpAMD64SHRBconst { 15042 break 15043 } 15044 d := v_1.AuxInt 15045 if x != v_1.Args[0] { 15046 break 15047 } 15048 if !(d == 8-c && c < 8 && t.Size() == 1) { 15049 break 15050 } 15051 v.reset(OpAMD64ROLBconst) 15052 v.AuxInt = c 15053 v.AddArg(x) 15054 return true 15055 } 15056 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 15057 // cond: d==8-c && c < 8 && t.Size() == 1 15058 // result: (ROLBconst x [c]) 15059 for { 15060 t := v.Type 15061 _ = v.Args[1] 15062 v_0 := v.Args[0] 15063 if v_0.Op != OpAMD64SHRBconst { 15064 break 15065 } 15066 d := v_0.AuxInt 15067 x := v_0.Args[0] 15068 v_1 := v.Args[1] 15069 if v_1.Op != OpAMD64SHLLconst { 15070 break 15071 } 15072 c := v_1.AuxInt 15073 if x != v_1.Args[0] { 15074 break 15075 } 15076 if !(d == 8-c && c < 8 && t.Size() == 1) { 15077 break 15078 } 15079 v.reset(OpAMD64ROLBconst) 15080 v.AuxInt = c 15081 v.AddArg(x) 15082 return true 15083 } 15084 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 15085 // cond: 15086 // result: (ROLL x y) 15087 for { 15088 _ = v.Args[1] 15089 v_0 := v.Args[0] 15090 if v_0.Op != OpAMD64SHLL { 15091 break 15092 } 15093 _ = v_0.Args[1] 15094 x := v_0.Args[0] 15095 y := v_0.Args[1] 15096 v_1 := v.Args[1] 15097 if v_1.Op != OpAMD64ANDL { 15098 break 15099 } 15100 _ = v_1.Args[1] 15101 v_1_0 := v_1.Args[0] 15102 if v_1_0.Op != OpAMD64SHRL { 15103 break 15104 } 15105 _ = v_1_0.Args[1] 15106 if x != v_1_0.Args[0] { 15107 break 15108 } 15109 v_1_0_1 := v_1_0.Args[1] 15110 if v_1_0_1.Op != OpAMD64NEGQ { 15111 break 15112 } 15113 if y != v_1_0_1.Args[0] { 15114 break 15115 } 15116 v_1_1 := v_1.Args[1] 15117 if v_1_1.Op != OpAMD64SBBLcarrymask { 15118 break 15119 } 15120 v_1_1_0 := v_1_1.Args[0] 15121 if v_1_1_0.Op != OpAMD64CMPQconst { 15122 break 15123 } 15124 if v_1_1_0.AuxInt != 32 { 15125 break 15126 } 15127 v_1_1_0_0 := v_1_1_0.Args[0] 15128 if v_1_1_0_0.Op != OpAMD64NEGQ { 15129 break 15130 } 15131 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15132 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15133 break 15134 } 15135 if v_1_1_0_0_0.AuxInt != -32 { 15136 break 15137 } 15138 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15139 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15140 break 15141 } 15142 if v_1_1_0_0_0_0.AuxInt != 31 { 15143 break 15144 } 15145 if y != v_1_1_0_0_0_0.Args[0] { 15146 break 15147 } 15148 v.reset(OpAMD64ROLL) 15149 v.AddArg(x) 15150 v.AddArg(y) 15151 return true 15152 } 15153 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 15154 // cond: 15155 // result: (ROLL x y) 15156 for { 15157 _ = v.Args[1] 15158 v_0 := v.Args[0] 15159 if v_0.Op != OpAMD64SHLL { 15160 break 15161 } 15162 _ = v_0.Args[1] 15163 x := v_0.Args[0] 15164 y := v_0.Args[1] 15165 v_1 := v.Args[1] 15166 if v_1.Op != OpAMD64ANDL { 15167 break 15168 } 15169 _ = v_1.Args[1] 15170 v_1_0 := v_1.Args[0] 15171 if v_1_0.Op != OpAMD64SBBLcarrymask { 15172 break 15173 } 15174 v_1_0_0 := v_1_0.Args[0] 15175 if v_1_0_0.Op != OpAMD64CMPQconst { 15176 break 15177 } 15178 if v_1_0_0.AuxInt != 32 { 15179 break 15180 } 15181 v_1_0_0_0 := v_1_0_0.Args[0] 15182 if v_1_0_0_0.Op != OpAMD64NEGQ { 15183 break 15184 } 15185 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15186 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15187 break 15188 } 15189 if v_1_0_0_0_0.AuxInt != -32 { 15190 break 15191 } 15192 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15193 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15194 break 15195 } 15196 if v_1_0_0_0_0_0.AuxInt != 31 { 15197 break 15198 } 15199 if y != v_1_0_0_0_0_0.Args[0] { 15200 break 15201 } 15202 v_1_1 := v_1.Args[1] 15203 if v_1_1.Op != OpAMD64SHRL { 15204 break 15205 } 15206 _ = v_1_1.Args[1] 15207 if x != v_1_1.Args[0] { 15208 break 15209 } 15210 v_1_1_1 := v_1_1.Args[1] 15211 if v_1_1_1.Op != OpAMD64NEGQ { 15212 break 15213 } 15214 if y != v_1_1_1.Args[0] { 15215 break 15216 } 15217 v.reset(OpAMD64ROLL) 15218 v.AddArg(x) 15219 v.AddArg(y) 15220 return true 15221 } 15222 return false 15223 } 15224 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 15225 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 15226 // cond: 15227 // result: (ROLL x y) 15228 for { 15229 _ = v.Args[1] 15230 v_0 := v.Args[0] 15231 if v_0.Op != OpAMD64ANDL { 15232 break 15233 } 15234 _ = v_0.Args[1] 15235 v_0_0 := v_0.Args[0] 15236 if v_0_0.Op != OpAMD64SHRL { 15237 break 15238 } 15239 _ = v_0_0.Args[1] 15240 x := v_0_0.Args[0] 15241 v_0_0_1 := v_0_0.Args[1] 15242 if v_0_0_1.Op != OpAMD64NEGQ { 15243 break 15244 } 15245 y := v_0_0_1.Args[0] 15246 v_0_1 := v_0.Args[1] 15247 if v_0_1.Op != OpAMD64SBBLcarrymask { 15248 break 15249 } 15250 v_0_1_0 := v_0_1.Args[0] 15251 if v_0_1_0.Op != OpAMD64CMPQconst { 15252 break 15253 } 15254 if v_0_1_0.AuxInt != 32 { 15255 break 15256 } 15257 v_0_1_0_0 := v_0_1_0.Args[0] 15258 if v_0_1_0_0.Op != OpAMD64NEGQ { 15259 break 15260 } 15261 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15262 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15263 break 15264 } 15265 if v_0_1_0_0_0.AuxInt != -32 { 15266 break 15267 } 15268 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15269 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15270 break 15271 } 15272 if v_0_1_0_0_0_0.AuxInt != 31 { 15273 break 15274 } 15275 if y != v_0_1_0_0_0_0.Args[0] { 15276 break 15277 } 15278 v_1 := v.Args[1] 15279 if v_1.Op != OpAMD64SHLL { 15280 break 15281 } 15282 _ = v_1.Args[1] 15283 if x != v_1.Args[0] { 15284 break 15285 } 15286 if y != v_1.Args[1] { 15287 break 15288 } 15289 v.reset(OpAMD64ROLL) 15290 v.AddArg(x) 15291 v.AddArg(y) 15292 return true 15293 } 15294 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 15295 // cond: 15296 // result: (ROLL x y) 15297 for { 15298 _ = v.Args[1] 15299 v_0 := v.Args[0] 15300 if v_0.Op != OpAMD64ANDL { 15301 break 15302 } 15303 _ = v_0.Args[1] 15304 v_0_0 := v_0.Args[0] 15305 if v_0_0.Op != OpAMD64SBBLcarrymask { 15306 break 15307 } 15308 v_0_0_0 := v_0_0.Args[0] 15309 if v_0_0_0.Op != OpAMD64CMPQconst { 15310 break 15311 } 15312 if v_0_0_0.AuxInt != 32 { 15313 break 15314 } 15315 v_0_0_0_0 := v_0_0_0.Args[0] 15316 if v_0_0_0_0.Op != OpAMD64NEGQ { 15317 break 15318 } 15319 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15320 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15321 break 15322 } 15323 if v_0_0_0_0_0.AuxInt != -32 { 15324 break 15325 } 15326 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15327 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15328 break 15329 } 15330 if v_0_0_0_0_0_0.AuxInt != 31 { 15331 break 15332 } 15333 y := v_0_0_0_0_0_0.Args[0] 15334 v_0_1 := v_0.Args[1] 15335 if v_0_1.Op != OpAMD64SHRL { 15336 break 15337 } 15338 _ = v_0_1.Args[1] 15339 x := v_0_1.Args[0] 15340 v_0_1_1 := v_0_1.Args[1] 15341 if v_0_1_1.Op != OpAMD64NEGQ { 15342 break 15343 } 15344 if y != v_0_1_1.Args[0] { 15345 break 15346 } 15347 v_1 := v.Args[1] 15348 if v_1.Op != OpAMD64SHLL { 15349 break 15350 } 15351 _ = v_1.Args[1] 15352 if x != v_1.Args[0] { 15353 break 15354 } 15355 if y != v_1.Args[1] { 15356 break 15357 } 15358 v.reset(OpAMD64ROLL) 15359 v.AddArg(x) 15360 v.AddArg(y) 15361 return true 15362 } 15363 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 15364 // cond: 15365 // result: (ROLL x y) 15366 for { 15367 _ = v.Args[1] 15368 v_0 := v.Args[0] 15369 if v_0.Op != OpAMD64SHLL { 15370 break 15371 } 15372 _ = v_0.Args[1] 15373 x := v_0.Args[0] 15374 y := v_0.Args[1] 15375 v_1 := v.Args[1] 15376 if v_1.Op != OpAMD64ANDL { 15377 break 15378 } 15379 _ = v_1.Args[1] 15380 v_1_0 := v_1.Args[0] 15381 if v_1_0.Op != OpAMD64SHRL { 15382 break 15383 } 15384 _ = v_1_0.Args[1] 15385 if x != v_1_0.Args[0] { 15386 break 15387 } 15388 v_1_0_1 := v_1_0.Args[1] 15389 if v_1_0_1.Op != OpAMD64NEGL { 15390 break 15391 } 15392 if y != v_1_0_1.Args[0] { 15393 break 15394 } 15395 v_1_1 := v_1.Args[1] 15396 if v_1_1.Op != OpAMD64SBBLcarrymask { 15397 break 15398 } 15399 v_1_1_0 := v_1_1.Args[0] 15400 if v_1_1_0.Op != OpAMD64CMPLconst { 15401 break 15402 } 15403 if v_1_1_0.AuxInt != 32 { 15404 break 15405 } 15406 v_1_1_0_0 := v_1_1_0.Args[0] 15407 if v_1_1_0_0.Op != OpAMD64NEGL { 15408 break 15409 } 15410 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15411 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15412 break 15413 } 15414 if v_1_1_0_0_0.AuxInt != -32 { 15415 break 15416 } 15417 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15418 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15419 break 15420 } 15421 if v_1_1_0_0_0_0.AuxInt != 31 { 15422 break 15423 } 15424 if y != v_1_1_0_0_0_0.Args[0] { 15425 break 15426 } 15427 v.reset(OpAMD64ROLL) 15428 v.AddArg(x) 15429 v.AddArg(y) 15430 return true 15431 } 15432 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 15433 // cond: 15434 // result: (ROLL x y) 15435 for { 15436 _ = v.Args[1] 15437 v_0 := v.Args[0] 15438 if v_0.Op != OpAMD64SHLL { 15439 break 15440 } 15441 _ = v_0.Args[1] 15442 x := v_0.Args[0] 15443 y := v_0.Args[1] 15444 v_1 := v.Args[1] 15445 if v_1.Op != OpAMD64ANDL { 15446 break 15447 } 15448 _ = v_1.Args[1] 15449 v_1_0 := v_1.Args[0] 15450 if v_1_0.Op != OpAMD64SBBLcarrymask { 15451 break 15452 } 15453 v_1_0_0 := v_1_0.Args[0] 15454 if v_1_0_0.Op != OpAMD64CMPLconst { 15455 break 15456 } 15457 if v_1_0_0.AuxInt != 32 { 15458 break 15459 } 15460 v_1_0_0_0 := v_1_0_0.Args[0] 15461 if v_1_0_0_0.Op != OpAMD64NEGL { 15462 break 15463 } 15464 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15465 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 15466 break 15467 } 15468 if v_1_0_0_0_0.AuxInt != -32 { 15469 break 15470 } 15471 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15472 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 15473 break 15474 } 15475 if v_1_0_0_0_0_0.AuxInt != 31 { 15476 break 15477 } 15478 if y != v_1_0_0_0_0_0.Args[0] { 15479 break 15480 } 15481 v_1_1 := v_1.Args[1] 15482 if v_1_1.Op != OpAMD64SHRL { 15483 break 15484 } 15485 _ = v_1_1.Args[1] 15486 if x != v_1_1.Args[0] { 15487 break 15488 } 15489 v_1_1_1 := v_1_1.Args[1] 15490 if v_1_1_1.Op != OpAMD64NEGL { 15491 break 15492 } 15493 if y != v_1_1_1.Args[0] { 15494 break 15495 } 15496 v.reset(OpAMD64ROLL) 15497 v.AddArg(x) 15498 v.AddArg(y) 15499 return true 15500 } 15501 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 15502 // cond: 15503 // result: (ROLL x y) 15504 for { 15505 _ = v.Args[1] 15506 v_0 := v.Args[0] 15507 if v_0.Op != OpAMD64ANDL { 15508 break 15509 } 15510 _ = v_0.Args[1] 15511 v_0_0 := v_0.Args[0] 15512 if v_0_0.Op != OpAMD64SHRL { 15513 break 15514 } 15515 _ = v_0_0.Args[1] 15516 x := v_0_0.Args[0] 15517 v_0_0_1 := v_0_0.Args[1] 15518 if v_0_0_1.Op != OpAMD64NEGL { 15519 break 15520 } 15521 y := v_0_0_1.Args[0] 15522 v_0_1 := v_0.Args[1] 15523 if v_0_1.Op != OpAMD64SBBLcarrymask { 15524 break 15525 } 15526 v_0_1_0 := v_0_1.Args[0] 15527 if v_0_1_0.Op != OpAMD64CMPLconst { 15528 break 15529 } 15530 if v_0_1_0.AuxInt != 32 { 15531 break 15532 } 15533 v_0_1_0_0 := v_0_1_0.Args[0] 15534 if v_0_1_0_0.Op != OpAMD64NEGL { 15535 break 15536 } 15537 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15538 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 15539 break 15540 } 15541 if v_0_1_0_0_0.AuxInt != -32 { 15542 break 15543 } 15544 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15545 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 15546 break 15547 } 15548 if v_0_1_0_0_0_0.AuxInt != 31 { 15549 break 15550 } 15551 if y != v_0_1_0_0_0_0.Args[0] { 15552 break 15553 } 15554 v_1 := v.Args[1] 15555 if v_1.Op != OpAMD64SHLL { 15556 break 15557 } 15558 _ = v_1.Args[1] 15559 if x != v_1.Args[0] { 15560 break 15561 } 15562 if y != v_1.Args[1] { 15563 break 15564 } 15565 v.reset(OpAMD64ROLL) 15566 v.AddArg(x) 15567 v.AddArg(y) 15568 return true 15569 } 15570 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 15571 // cond: 15572 // result: (ROLL x y) 15573 for { 15574 _ = v.Args[1] 15575 v_0 := v.Args[0] 15576 if v_0.Op != OpAMD64ANDL { 15577 break 15578 } 15579 _ = v_0.Args[1] 15580 v_0_0 := v_0.Args[0] 15581 if v_0_0.Op != OpAMD64SBBLcarrymask { 15582 break 15583 } 15584 v_0_0_0 := v_0_0.Args[0] 15585 if v_0_0_0.Op != OpAMD64CMPLconst { 15586 break 15587 } 15588 if v_0_0_0.AuxInt != 32 { 15589 break 15590 } 15591 v_0_0_0_0 := v_0_0_0.Args[0] 15592 if v_0_0_0_0.Op != OpAMD64NEGL { 15593 break 15594 } 15595 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15596 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 15597 break 15598 } 15599 if v_0_0_0_0_0.AuxInt != -32 { 15600 break 15601 } 15602 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15603 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 15604 break 15605 } 15606 if v_0_0_0_0_0_0.AuxInt != 31 { 15607 break 15608 } 15609 y := v_0_0_0_0_0_0.Args[0] 15610 v_0_1 := v_0.Args[1] 15611 if v_0_1.Op != OpAMD64SHRL { 15612 break 15613 } 15614 _ = v_0_1.Args[1] 15615 x := v_0_1.Args[0] 15616 v_0_1_1 := v_0_1.Args[1] 15617 if v_0_1_1.Op != OpAMD64NEGL { 15618 break 15619 } 15620 if y != v_0_1_1.Args[0] { 15621 break 15622 } 15623 v_1 := v.Args[1] 15624 if v_1.Op != OpAMD64SHLL { 15625 break 15626 } 15627 _ = v_1.Args[1] 15628 if x != v_1.Args[0] { 15629 break 15630 } 15631 if y != v_1.Args[1] { 15632 break 15633 } 15634 v.reset(OpAMD64ROLL) 15635 v.AddArg(x) 15636 v.AddArg(y) 15637 return true 15638 } 15639 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 15640 // cond: 15641 // result: (RORL x y) 15642 for { 15643 _ = v.Args[1] 15644 v_0 := v.Args[0] 15645 if v_0.Op != OpAMD64SHRL { 15646 break 15647 } 15648 _ = v_0.Args[1] 15649 x := v_0.Args[0] 15650 y := v_0.Args[1] 15651 v_1 := v.Args[1] 15652 if v_1.Op != OpAMD64ANDL { 15653 break 15654 } 15655 _ = v_1.Args[1] 15656 v_1_0 := v_1.Args[0] 15657 if v_1_0.Op != OpAMD64SHLL { 15658 break 15659 } 15660 _ = v_1_0.Args[1] 15661 if x != v_1_0.Args[0] { 15662 break 15663 } 15664 v_1_0_1 := v_1_0.Args[1] 15665 if v_1_0_1.Op != OpAMD64NEGQ { 15666 break 15667 } 15668 if y != v_1_0_1.Args[0] { 15669 break 15670 } 15671 v_1_1 := v_1.Args[1] 15672 if v_1_1.Op != OpAMD64SBBLcarrymask { 15673 break 15674 } 15675 v_1_1_0 := v_1_1.Args[0] 15676 if v_1_1_0.Op != OpAMD64CMPQconst { 15677 break 15678 } 15679 if v_1_1_0.AuxInt != 32 { 15680 break 15681 } 15682 v_1_1_0_0 := v_1_1_0.Args[0] 15683 if v_1_1_0_0.Op != OpAMD64NEGQ { 15684 break 15685 } 15686 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15687 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15688 break 15689 } 15690 if v_1_1_0_0_0.AuxInt != -32 { 15691 break 15692 } 15693 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15694 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15695 break 15696 } 15697 if v_1_1_0_0_0_0.AuxInt != 31 { 15698 break 15699 } 15700 if y != v_1_1_0_0_0_0.Args[0] { 15701 break 15702 } 15703 v.reset(OpAMD64RORL) 15704 v.AddArg(x) 15705 v.AddArg(y) 15706 return true 15707 } 15708 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 15709 // cond: 15710 // result: (RORL x y) 15711 for { 15712 _ = v.Args[1] 15713 v_0 := v.Args[0] 15714 if v_0.Op != OpAMD64SHRL { 15715 break 15716 } 15717 _ = v_0.Args[1] 15718 x := v_0.Args[0] 15719 y := v_0.Args[1] 15720 v_1 := v.Args[1] 15721 if v_1.Op != OpAMD64ANDL { 15722 break 15723 } 15724 _ = v_1.Args[1] 15725 v_1_0 := v_1.Args[0] 15726 if v_1_0.Op != OpAMD64SBBLcarrymask { 15727 break 15728 } 15729 v_1_0_0 := v_1_0.Args[0] 15730 if v_1_0_0.Op != OpAMD64CMPQconst { 15731 break 15732 } 15733 if v_1_0_0.AuxInt != 32 { 15734 break 15735 } 15736 v_1_0_0_0 := v_1_0_0.Args[0] 15737 if v_1_0_0_0.Op != OpAMD64NEGQ { 15738 break 15739 } 15740 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15741 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15742 break 15743 } 15744 if v_1_0_0_0_0.AuxInt != -32 { 15745 break 15746 } 15747 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15748 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15749 break 15750 } 15751 if v_1_0_0_0_0_0.AuxInt != 31 { 15752 break 15753 } 15754 if y != v_1_0_0_0_0_0.Args[0] { 15755 break 15756 } 15757 v_1_1 := v_1.Args[1] 15758 if v_1_1.Op != OpAMD64SHLL { 15759 break 15760 } 15761 _ = v_1_1.Args[1] 15762 if x != v_1_1.Args[0] { 15763 break 15764 } 15765 v_1_1_1 := v_1_1.Args[1] 15766 if v_1_1_1.Op != OpAMD64NEGQ { 15767 break 15768 } 15769 if y != v_1_1_1.Args[0] { 15770 break 15771 } 15772 v.reset(OpAMD64RORL) 15773 v.AddArg(x) 15774 v.AddArg(y) 15775 return true 15776 } 15777 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 15778 // cond: 15779 // result: (RORL x y) 15780 for { 15781 _ = v.Args[1] 15782 v_0 := v.Args[0] 15783 if v_0.Op != OpAMD64ANDL { 15784 break 15785 } 15786 _ = v_0.Args[1] 15787 v_0_0 := v_0.Args[0] 15788 if v_0_0.Op != OpAMD64SHLL { 15789 break 15790 } 15791 _ = v_0_0.Args[1] 15792 x := v_0_0.Args[0] 15793 v_0_0_1 := v_0_0.Args[1] 15794 if v_0_0_1.Op != OpAMD64NEGQ { 15795 break 15796 } 15797 y := v_0_0_1.Args[0] 15798 v_0_1 := v_0.Args[1] 15799 if v_0_1.Op != OpAMD64SBBLcarrymask { 15800 break 15801 } 15802 v_0_1_0 := v_0_1.Args[0] 15803 if v_0_1_0.Op != OpAMD64CMPQconst { 15804 break 15805 } 15806 if v_0_1_0.AuxInt != 32 { 15807 break 15808 } 15809 v_0_1_0_0 := v_0_1_0.Args[0] 15810 if v_0_1_0_0.Op != OpAMD64NEGQ { 15811 break 15812 } 15813 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15814 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15815 break 15816 } 15817 if v_0_1_0_0_0.AuxInt != -32 { 15818 break 15819 } 15820 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15821 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15822 break 15823 } 15824 if v_0_1_0_0_0_0.AuxInt != 31 { 15825 break 15826 } 15827 if y != v_0_1_0_0_0_0.Args[0] { 15828 break 15829 } 15830 v_1 := v.Args[1] 15831 if v_1.Op != OpAMD64SHRL { 15832 break 15833 } 15834 _ = v_1.Args[1] 15835 if x != v_1.Args[0] { 15836 break 15837 } 15838 if y != v_1.Args[1] { 15839 break 15840 } 15841 v.reset(OpAMD64RORL) 15842 v.AddArg(x) 15843 v.AddArg(y) 15844 return true 15845 } 15846 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 15847 // cond: 15848 // result: (RORL x y) 15849 for { 15850 _ = v.Args[1] 15851 v_0 := v.Args[0] 15852 if v_0.Op != OpAMD64ANDL { 15853 break 15854 } 15855 _ = v_0.Args[1] 15856 v_0_0 := v_0.Args[0] 15857 if v_0_0.Op != OpAMD64SBBLcarrymask { 15858 break 15859 } 15860 v_0_0_0 := v_0_0.Args[0] 15861 if v_0_0_0.Op != OpAMD64CMPQconst { 15862 break 15863 } 15864 if v_0_0_0.AuxInt != 32 { 15865 break 15866 } 15867 v_0_0_0_0 := v_0_0_0.Args[0] 15868 if v_0_0_0_0.Op != OpAMD64NEGQ { 15869 break 15870 } 15871 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15872 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15873 break 15874 } 15875 if v_0_0_0_0_0.AuxInt != -32 { 15876 break 15877 } 15878 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15879 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15880 break 15881 } 15882 if v_0_0_0_0_0_0.AuxInt != 31 { 15883 break 15884 } 15885 y := v_0_0_0_0_0_0.Args[0] 15886 v_0_1 := v_0.Args[1] 15887 if v_0_1.Op != OpAMD64SHLL { 15888 break 15889 } 15890 _ = v_0_1.Args[1] 15891 x := v_0_1.Args[0] 15892 v_0_1_1 := v_0_1.Args[1] 15893 if v_0_1_1.Op != OpAMD64NEGQ { 15894 break 15895 } 15896 if y != v_0_1_1.Args[0] { 15897 break 15898 } 15899 v_1 := v.Args[1] 15900 if v_1.Op != OpAMD64SHRL { 15901 break 15902 } 15903 _ = v_1.Args[1] 15904 if x != v_1.Args[0] { 15905 break 15906 } 15907 if y != v_1.Args[1] { 15908 break 15909 } 15910 v.reset(OpAMD64RORL) 15911 v.AddArg(x) 15912 v.AddArg(y) 15913 return true 15914 } 15915 return false 15916 } 15917 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 15918 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 15919 // cond: 15920 // result: (RORL x y) 15921 for { 15922 _ = v.Args[1] 15923 v_0 := v.Args[0] 15924 if v_0.Op != OpAMD64SHRL { 15925 break 15926 } 15927 _ = v_0.Args[1] 15928 x := v_0.Args[0] 15929 y := v_0.Args[1] 15930 v_1 := v.Args[1] 15931 if v_1.Op != OpAMD64ANDL { 15932 break 15933 } 15934 _ = v_1.Args[1] 15935 v_1_0 := v_1.Args[0] 15936 if v_1_0.Op != OpAMD64SHLL { 15937 break 15938 } 15939 _ = v_1_0.Args[1] 15940 if x != v_1_0.Args[0] { 15941 break 15942 } 15943 v_1_0_1 := v_1_0.Args[1] 15944 if v_1_0_1.Op != OpAMD64NEGL { 15945 break 15946 } 15947 if y != v_1_0_1.Args[0] { 15948 break 15949 } 15950 v_1_1 := v_1.Args[1] 15951 if v_1_1.Op != OpAMD64SBBLcarrymask { 15952 break 15953 } 15954 v_1_1_0 := v_1_1.Args[0] 15955 if v_1_1_0.Op != OpAMD64CMPLconst { 15956 break 15957 } 15958 if v_1_1_0.AuxInt != 32 { 15959 break 15960 } 15961 v_1_1_0_0 := v_1_1_0.Args[0] 15962 if v_1_1_0_0.Op != OpAMD64NEGL { 15963 break 15964 } 15965 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15966 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15967 break 15968 } 15969 if v_1_1_0_0_0.AuxInt != -32 { 15970 break 15971 } 15972 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15973 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15974 break 15975 } 15976 if v_1_1_0_0_0_0.AuxInt != 31 { 15977 break 15978 } 15979 if y != v_1_1_0_0_0_0.Args[0] { 15980 break 15981 } 15982 v.reset(OpAMD64RORL) 15983 v.AddArg(x) 15984 v.AddArg(y) 15985 return true 15986 } 15987 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 15988 // cond: 15989 // result: (RORL x y) 15990 for { 15991 _ = v.Args[1] 15992 v_0 := v.Args[0] 15993 if v_0.Op != OpAMD64SHRL { 15994 break 15995 } 15996 _ = v_0.Args[1] 15997 x := v_0.Args[0] 15998 y := v_0.Args[1] 15999 v_1 := v.Args[1] 16000 if v_1.Op != OpAMD64ANDL { 16001 break 16002 } 16003 _ = v_1.Args[1] 16004 v_1_0 := v_1.Args[0] 16005 if v_1_0.Op != OpAMD64SBBLcarrymask { 16006 break 16007 } 16008 v_1_0_0 := v_1_0.Args[0] 16009 if v_1_0_0.Op != OpAMD64CMPLconst { 16010 break 16011 } 16012 if v_1_0_0.AuxInt != 32 { 16013 break 16014 } 16015 v_1_0_0_0 := v_1_0_0.Args[0] 16016 if v_1_0_0_0.Op != OpAMD64NEGL { 16017 break 16018 } 16019 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16020 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 16021 break 16022 } 16023 if v_1_0_0_0_0.AuxInt != -32 { 16024 break 16025 } 16026 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16027 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 16028 break 16029 } 16030 if v_1_0_0_0_0_0.AuxInt != 31 { 16031 break 16032 } 16033 if y != v_1_0_0_0_0_0.Args[0] { 16034 break 16035 } 16036 v_1_1 := v_1.Args[1] 16037 if v_1_1.Op != OpAMD64SHLL { 16038 break 16039 } 16040 _ = v_1_1.Args[1] 16041 if x != v_1_1.Args[0] { 16042 break 16043 } 16044 v_1_1_1 := v_1_1.Args[1] 16045 if v_1_1_1.Op != OpAMD64NEGL { 16046 break 16047 } 16048 if y != v_1_1_1.Args[0] { 16049 break 16050 } 16051 v.reset(OpAMD64RORL) 16052 v.AddArg(x) 16053 v.AddArg(y) 16054 return true 16055 } 16056 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 16057 // cond: 16058 // result: (RORL x y) 16059 for { 16060 _ = v.Args[1] 16061 v_0 := v.Args[0] 16062 if v_0.Op != OpAMD64ANDL { 16063 break 16064 } 16065 _ = v_0.Args[1] 16066 v_0_0 := v_0.Args[0] 16067 if v_0_0.Op != OpAMD64SHLL { 16068 break 16069 } 16070 _ = v_0_0.Args[1] 16071 x := v_0_0.Args[0] 16072 v_0_0_1 := v_0_0.Args[1] 16073 if v_0_0_1.Op != OpAMD64NEGL { 16074 break 16075 } 16076 y := v_0_0_1.Args[0] 16077 v_0_1 := v_0.Args[1] 16078 if v_0_1.Op != OpAMD64SBBLcarrymask { 16079 break 16080 } 16081 v_0_1_0 := v_0_1.Args[0] 16082 if v_0_1_0.Op != OpAMD64CMPLconst { 16083 break 16084 } 16085 if v_0_1_0.AuxInt != 32 { 16086 break 16087 } 16088 v_0_1_0_0 := v_0_1_0.Args[0] 16089 if v_0_1_0_0.Op != OpAMD64NEGL { 16090 break 16091 } 16092 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16093 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16094 break 16095 } 16096 if v_0_1_0_0_0.AuxInt != -32 { 16097 break 16098 } 16099 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16100 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16101 break 16102 } 16103 if v_0_1_0_0_0_0.AuxInt != 31 { 16104 break 16105 } 16106 if y != v_0_1_0_0_0_0.Args[0] { 16107 break 16108 } 16109 v_1 := v.Args[1] 16110 if v_1.Op != OpAMD64SHRL { 16111 break 16112 } 16113 _ = v_1.Args[1] 16114 if x != v_1.Args[0] { 16115 break 16116 } 16117 if y != v_1.Args[1] { 16118 break 16119 } 16120 v.reset(OpAMD64RORL) 16121 v.AddArg(x) 16122 v.AddArg(y) 16123 return true 16124 } 16125 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 16126 // cond: 16127 // result: (RORL x y) 16128 for { 16129 _ = v.Args[1] 16130 v_0 := v.Args[0] 16131 if v_0.Op != OpAMD64ANDL { 16132 break 16133 } 16134 _ = v_0.Args[1] 16135 v_0_0 := v_0.Args[0] 16136 if v_0_0.Op != OpAMD64SBBLcarrymask { 16137 break 16138 } 16139 v_0_0_0 := v_0_0.Args[0] 16140 if v_0_0_0.Op != OpAMD64CMPLconst { 16141 break 16142 } 16143 if v_0_0_0.AuxInt != 32 { 16144 break 16145 } 16146 v_0_0_0_0 := v_0_0_0.Args[0] 16147 if v_0_0_0_0.Op != OpAMD64NEGL { 16148 break 16149 } 16150 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16151 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16152 break 16153 } 16154 if v_0_0_0_0_0.AuxInt != -32 { 16155 break 16156 } 16157 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16158 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16159 break 16160 } 16161 if v_0_0_0_0_0_0.AuxInt != 31 { 16162 break 16163 } 16164 y := v_0_0_0_0_0_0.Args[0] 16165 v_0_1 := v_0.Args[1] 16166 if v_0_1.Op != OpAMD64SHLL { 16167 break 16168 } 16169 _ = v_0_1.Args[1] 16170 x := v_0_1.Args[0] 16171 v_0_1_1 := v_0_1.Args[1] 16172 if v_0_1_1.Op != OpAMD64NEGL { 16173 break 16174 } 16175 if y != v_0_1_1.Args[0] { 16176 break 16177 } 16178 v_1 := v.Args[1] 16179 if v_1.Op != OpAMD64SHRL { 16180 break 16181 } 16182 _ = v_1.Args[1] 16183 if x != v_1.Args[0] { 16184 break 16185 } 16186 if y != v_1.Args[1] { 16187 break 16188 } 16189 v.reset(OpAMD64RORL) 16190 v.AddArg(x) 16191 v.AddArg(y) 16192 return true 16193 } 16194 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 16195 // cond: v.Type.Size() == 2 16196 // result: (ROLW x y) 16197 for { 16198 _ = v.Args[1] 16199 v_0 := v.Args[0] 16200 if v_0.Op != OpAMD64SHLL { 16201 break 16202 } 16203 _ = v_0.Args[1] 16204 x := v_0.Args[0] 16205 v_0_1 := v_0.Args[1] 16206 if v_0_1.Op != OpAMD64ANDQconst { 16207 break 16208 } 16209 if v_0_1.AuxInt != 15 { 16210 break 16211 } 16212 y := v_0_1.Args[0] 16213 v_1 := v.Args[1] 16214 if v_1.Op != OpAMD64ANDL { 16215 break 16216 } 16217 _ = v_1.Args[1] 16218 v_1_0 := v_1.Args[0] 16219 if v_1_0.Op != OpAMD64SHRW { 16220 break 16221 } 16222 _ = v_1_0.Args[1] 16223 if x != v_1_0.Args[0] { 16224 break 16225 } 16226 v_1_0_1 := v_1_0.Args[1] 16227 if v_1_0_1.Op != OpAMD64NEGQ { 16228 break 16229 } 16230 v_1_0_1_0 := v_1_0_1.Args[0] 16231 if v_1_0_1_0.Op != OpAMD64ADDQconst { 16232 break 16233 } 16234 if v_1_0_1_0.AuxInt != -16 { 16235 break 16236 } 16237 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 16238 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 16239 break 16240 } 16241 if v_1_0_1_0_0.AuxInt != 15 { 16242 break 16243 } 16244 if y != v_1_0_1_0_0.Args[0] { 16245 break 16246 } 16247 v_1_1 := v_1.Args[1] 16248 if v_1_1.Op != OpAMD64SBBLcarrymask { 16249 break 16250 } 16251 v_1_1_0 := v_1_1.Args[0] 16252 if v_1_1_0.Op != OpAMD64CMPQconst { 16253 break 16254 } 16255 if v_1_1_0.AuxInt != 16 { 16256 break 16257 } 16258 v_1_1_0_0 := v_1_1_0.Args[0] 16259 if v_1_1_0_0.Op != OpAMD64NEGQ { 16260 break 16261 } 16262 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16263 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 16264 break 16265 } 16266 if v_1_1_0_0_0.AuxInt != -16 { 16267 break 16268 } 16269 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16270 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 16271 break 16272 } 16273 if v_1_1_0_0_0_0.AuxInt != 15 { 16274 break 16275 } 16276 if y != v_1_1_0_0_0_0.Args[0] { 16277 break 16278 } 16279 if !(v.Type.Size() == 2) { 16280 break 16281 } 16282 v.reset(OpAMD64ROLW) 16283 v.AddArg(x) 16284 v.AddArg(y) 16285 return true 16286 } 16287 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 16288 // cond: v.Type.Size() == 2 16289 // result: (ROLW x y) 16290 for { 16291 _ = v.Args[1] 16292 v_0 := v.Args[0] 16293 if v_0.Op != OpAMD64SHLL { 16294 break 16295 } 16296 _ = v_0.Args[1] 16297 x := v_0.Args[0] 16298 v_0_1 := v_0.Args[1] 16299 if v_0_1.Op != OpAMD64ANDQconst { 16300 break 16301 } 16302 if v_0_1.AuxInt != 15 { 16303 break 16304 } 16305 y := v_0_1.Args[0] 16306 v_1 := v.Args[1] 16307 if v_1.Op != OpAMD64ANDL { 16308 break 16309 } 16310 _ = v_1.Args[1] 16311 v_1_0 := v_1.Args[0] 16312 if v_1_0.Op != OpAMD64SBBLcarrymask { 16313 break 16314 } 16315 v_1_0_0 := v_1_0.Args[0] 16316 if v_1_0_0.Op != OpAMD64CMPQconst { 16317 break 16318 } 16319 if v_1_0_0.AuxInt != 16 { 16320 break 16321 } 16322 v_1_0_0_0 := v_1_0_0.Args[0] 16323 if v_1_0_0_0.Op != OpAMD64NEGQ { 16324 break 16325 } 16326 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16327 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 16328 break 16329 } 16330 if v_1_0_0_0_0.AuxInt != -16 { 16331 break 16332 } 16333 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16334 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 16335 break 16336 } 16337 if v_1_0_0_0_0_0.AuxInt != 15 { 16338 break 16339 } 16340 if y != v_1_0_0_0_0_0.Args[0] { 16341 break 16342 } 16343 v_1_1 := v_1.Args[1] 16344 if v_1_1.Op != OpAMD64SHRW { 16345 break 16346 } 16347 _ = v_1_1.Args[1] 16348 if x != v_1_1.Args[0] { 16349 break 16350 } 16351 v_1_1_1 := v_1_1.Args[1] 16352 if v_1_1_1.Op != OpAMD64NEGQ { 16353 break 16354 } 16355 v_1_1_1_0 := v_1_1_1.Args[0] 16356 if v_1_1_1_0.Op != OpAMD64ADDQconst { 16357 break 16358 } 16359 if v_1_1_1_0.AuxInt != -16 { 16360 break 16361 } 16362 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 16363 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 16364 break 16365 } 16366 if v_1_1_1_0_0.AuxInt != 15 { 16367 break 16368 } 16369 if y != v_1_1_1_0_0.Args[0] { 16370 break 16371 } 16372 if !(v.Type.Size() == 2) { 16373 break 16374 } 16375 v.reset(OpAMD64ROLW) 16376 v.AddArg(x) 16377 v.AddArg(y) 16378 return true 16379 } 16380 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 16381 // cond: v.Type.Size() == 2 16382 // result: (ROLW x y) 16383 for { 16384 _ = v.Args[1] 16385 v_0 := v.Args[0] 16386 if v_0.Op != OpAMD64ANDL { 16387 break 16388 } 16389 _ = v_0.Args[1] 16390 v_0_0 := v_0.Args[0] 16391 if v_0_0.Op != OpAMD64SHRW { 16392 break 16393 } 16394 _ = v_0_0.Args[1] 16395 x := v_0_0.Args[0] 16396 v_0_0_1 := v_0_0.Args[1] 16397 if v_0_0_1.Op != OpAMD64NEGQ { 16398 break 16399 } 16400 v_0_0_1_0 := v_0_0_1.Args[0] 16401 if v_0_0_1_0.Op != OpAMD64ADDQconst { 16402 break 16403 } 16404 if v_0_0_1_0.AuxInt != -16 { 16405 break 16406 } 16407 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16408 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 16409 break 16410 } 16411 if v_0_0_1_0_0.AuxInt != 15 { 16412 break 16413 } 16414 y := v_0_0_1_0_0.Args[0] 16415 v_0_1 := v_0.Args[1] 16416 if v_0_1.Op != OpAMD64SBBLcarrymask { 16417 break 16418 } 16419 v_0_1_0 := v_0_1.Args[0] 16420 if v_0_1_0.Op != OpAMD64CMPQconst { 16421 break 16422 } 16423 if v_0_1_0.AuxInt != 16 { 16424 break 16425 } 16426 v_0_1_0_0 := v_0_1_0.Args[0] 16427 if v_0_1_0_0.Op != OpAMD64NEGQ { 16428 break 16429 } 16430 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16431 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 16432 break 16433 } 16434 if v_0_1_0_0_0.AuxInt != -16 { 16435 break 16436 } 16437 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16438 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 16439 break 16440 } 16441 if v_0_1_0_0_0_0.AuxInt != 15 { 16442 break 16443 } 16444 if y != v_0_1_0_0_0_0.Args[0] { 16445 break 16446 } 16447 v_1 := v.Args[1] 16448 if v_1.Op != OpAMD64SHLL { 16449 break 16450 } 16451 _ = v_1.Args[1] 16452 if x != v_1.Args[0] { 16453 break 16454 } 16455 v_1_1 := v_1.Args[1] 16456 if v_1_1.Op != OpAMD64ANDQconst { 16457 break 16458 } 16459 if v_1_1.AuxInt != 15 { 16460 break 16461 } 16462 if y != v_1_1.Args[0] { 16463 break 16464 } 16465 if !(v.Type.Size() == 2) { 16466 break 16467 } 16468 v.reset(OpAMD64ROLW) 16469 v.AddArg(x) 16470 v.AddArg(y) 16471 return true 16472 } 16473 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 16474 // cond: v.Type.Size() == 2 16475 // result: (ROLW x y) 16476 for { 16477 _ = v.Args[1] 16478 v_0 := v.Args[0] 16479 if v_0.Op != OpAMD64ANDL { 16480 break 16481 } 16482 _ = v_0.Args[1] 16483 v_0_0 := v_0.Args[0] 16484 if v_0_0.Op != OpAMD64SBBLcarrymask { 16485 break 16486 } 16487 v_0_0_0 := v_0_0.Args[0] 16488 if v_0_0_0.Op != OpAMD64CMPQconst { 16489 break 16490 } 16491 if v_0_0_0.AuxInt != 16 { 16492 break 16493 } 16494 v_0_0_0_0 := v_0_0_0.Args[0] 16495 if v_0_0_0_0.Op != OpAMD64NEGQ { 16496 break 16497 } 16498 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16499 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 16500 break 16501 } 16502 if v_0_0_0_0_0.AuxInt != -16 { 16503 break 16504 } 16505 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16506 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 16507 break 16508 } 16509 if v_0_0_0_0_0_0.AuxInt != 15 { 16510 break 16511 } 16512 y := v_0_0_0_0_0_0.Args[0] 16513 v_0_1 := v_0.Args[1] 16514 if v_0_1.Op != OpAMD64SHRW { 16515 break 16516 } 16517 _ = v_0_1.Args[1] 16518 x := v_0_1.Args[0] 16519 v_0_1_1 := v_0_1.Args[1] 16520 if v_0_1_1.Op != OpAMD64NEGQ { 16521 break 16522 } 16523 v_0_1_1_0 := v_0_1_1.Args[0] 16524 if v_0_1_1_0.Op != OpAMD64ADDQconst { 16525 break 16526 } 16527 if v_0_1_1_0.AuxInt != -16 { 16528 break 16529 } 16530 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16531 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 16532 break 16533 } 16534 if v_0_1_1_0_0.AuxInt != 15 { 16535 break 16536 } 16537 if y != v_0_1_1_0_0.Args[0] { 16538 break 16539 } 16540 v_1 := v.Args[1] 16541 if v_1.Op != OpAMD64SHLL { 16542 break 16543 } 16544 _ = v_1.Args[1] 16545 if x != v_1.Args[0] { 16546 break 16547 } 16548 v_1_1 := v_1.Args[1] 16549 if v_1_1.Op != OpAMD64ANDQconst { 16550 break 16551 } 16552 if v_1_1.AuxInt != 15 { 16553 break 16554 } 16555 if y != v_1_1.Args[0] { 16556 break 16557 } 16558 if !(v.Type.Size() == 2) { 16559 break 16560 } 16561 v.reset(OpAMD64ROLW) 16562 v.AddArg(x) 16563 v.AddArg(y) 16564 return true 16565 } 16566 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 16567 // cond: v.Type.Size() == 2 16568 // result: (ROLW x y) 16569 for { 16570 _ = v.Args[1] 16571 v_0 := v.Args[0] 16572 if v_0.Op != OpAMD64SHLL { 16573 break 16574 } 16575 _ = v_0.Args[1] 16576 x := v_0.Args[0] 16577 v_0_1 := v_0.Args[1] 16578 if v_0_1.Op != OpAMD64ANDLconst { 16579 break 16580 } 16581 if v_0_1.AuxInt != 15 { 16582 break 16583 } 16584 y := v_0_1.Args[0] 16585 v_1 := v.Args[1] 16586 if v_1.Op != OpAMD64ANDL { 16587 break 16588 } 16589 _ = v_1.Args[1] 16590 v_1_0 := v_1.Args[0] 16591 if v_1_0.Op != OpAMD64SHRW { 16592 break 16593 } 16594 _ = v_1_0.Args[1] 16595 if x != v_1_0.Args[0] { 16596 break 16597 } 16598 v_1_0_1 := v_1_0.Args[1] 16599 if v_1_0_1.Op != OpAMD64NEGL { 16600 break 16601 } 16602 v_1_0_1_0 := v_1_0_1.Args[0] 16603 if v_1_0_1_0.Op != OpAMD64ADDLconst { 16604 break 16605 } 16606 if v_1_0_1_0.AuxInt != -16 { 16607 break 16608 } 16609 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 16610 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 16611 break 16612 } 16613 if v_1_0_1_0_0.AuxInt != 15 { 16614 break 16615 } 16616 if y != v_1_0_1_0_0.Args[0] { 16617 break 16618 } 16619 v_1_1 := v_1.Args[1] 16620 if v_1_1.Op != OpAMD64SBBLcarrymask { 16621 break 16622 } 16623 v_1_1_0 := v_1_1.Args[0] 16624 if v_1_1_0.Op != OpAMD64CMPLconst { 16625 break 16626 } 16627 if v_1_1_0.AuxInt != 16 { 16628 break 16629 } 16630 v_1_1_0_0 := v_1_1_0.Args[0] 16631 if v_1_1_0_0.Op != OpAMD64NEGL { 16632 break 16633 } 16634 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16635 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 16636 break 16637 } 16638 if v_1_1_0_0_0.AuxInt != -16 { 16639 break 16640 } 16641 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16642 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 16643 break 16644 } 16645 if v_1_1_0_0_0_0.AuxInt != 15 { 16646 break 16647 } 16648 if y != v_1_1_0_0_0_0.Args[0] { 16649 break 16650 } 16651 if !(v.Type.Size() == 2) { 16652 break 16653 } 16654 v.reset(OpAMD64ROLW) 16655 v.AddArg(x) 16656 v.AddArg(y) 16657 return true 16658 } 16659 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 16660 // cond: v.Type.Size() == 2 16661 // result: (ROLW x y) 16662 for { 16663 _ = v.Args[1] 16664 v_0 := v.Args[0] 16665 if v_0.Op != OpAMD64SHLL { 16666 break 16667 } 16668 _ = v_0.Args[1] 16669 x := v_0.Args[0] 16670 v_0_1 := v_0.Args[1] 16671 if v_0_1.Op != OpAMD64ANDLconst { 16672 break 16673 } 16674 if v_0_1.AuxInt != 15 { 16675 break 16676 } 16677 y := v_0_1.Args[0] 16678 v_1 := v.Args[1] 16679 if v_1.Op != OpAMD64ANDL { 16680 break 16681 } 16682 _ = v_1.Args[1] 16683 v_1_0 := v_1.Args[0] 16684 if v_1_0.Op != OpAMD64SBBLcarrymask { 16685 break 16686 } 16687 v_1_0_0 := v_1_0.Args[0] 16688 if v_1_0_0.Op != OpAMD64CMPLconst { 16689 break 16690 } 16691 if v_1_0_0.AuxInt != 16 { 16692 break 16693 } 16694 v_1_0_0_0 := v_1_0_0.Args[0] 16695 if v_1_0_0_0.Op != OpAMD64NEGL { 16696 break 16697 } 16698 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16699 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 16700 break 16701 } 16702 if v_1_0_0_0_0.AuxInt != -16 { 16703 break 16704 } 16705 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16706 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 16707 break 16708 } 16709 if v_1_0_0_0_0_0.AuxInt != 15 { 16710 break 16711 } 16712 if y != v_1_0_0_0_0_0.Args[0] { 16713 break 16714 } 16715 v_1_1 := v_1.Args[1] 16716 if v_1_1.Op != OpAMD64SHRW { 16717 break 16718 } 16719 _ = v_1_1.Args[1] 16720 if x != v_1_1.Args[0] { 16721 break 16722 } 16723 v_1_1_1 := v_1_1.Args[1] 16724 if v_1_1_1.Op != OpAMD64NEGL { 16725 break 16726 } 16727 v_1_1_1_0 := v_1_1_1.Args[0] 16728 if v_1_1_1_0.Op != OpAMD64ADDLconst { 16729 break 16730 } 16731 if v_1_1_1_0.AuxInt != -16 { 16732 break 16733 } 16734 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 16735 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 16736 break 16737 } 16738 if v_1_1_1_0_0.AuxInt != 15 { 16739 break 16740 } 16741 if y != v_1_1_1_0_0.Args[0] { 16742 break 16743 } 16744 if !(v.Type.Size() == 2) { 16745 break 16746 } 16747 v.reset(OpAMD64ROLW) 16748 v.AddArg(x) 16749 v.AddArg(y) 16750 return true 16751 } 16752 return false 16753 } 16754 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 16755 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 16756 // cond: v.Type.Size() == 2 16757 // result: (ROLW x y) 16758 for { 16759 _ = v.Args[1] 16760 v_0 := v.Args[0] 16761 if v_0.Op != OpAMD64ANDL { 16762 break 16763 } 16764 _ = v_0.Args[1] 16765 v_0_0 := v_0.Args[0] 16766 if v_0_0.Op != OpAMD64SHRW { 16767 break 16768 } 16769 _ = v_0_0.Args[1] 16770 x := v_0_0.Args[0] 16771 v_0_0_1 := v_0_0.Args[1] 16772 if v_0_0_1.Op != OpAMD64NEGL { 16773 break 16774 } 16775 v_0_0_1_0 := v_0_0_1.Args[0] 16776 if v_0_0_1_0.Op != OpAMD64ADDLconst { 16777 break 16778 } 16779 if v_0_0_1_0.AuxInt != -16 { 16780 break 16781 } 16782 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16783 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 16784 break 16785 } 16786 if v_0_0_1_0_0.AuxInt != 15 { 16787 break 16788 } 16789 y := v_0_0_1_0_0.Args[0] 16790 v_0_1 := v_0.Args[1] 16791 if v_0_1.Op != OpAMD64SBBLcarrymask { 16792 break 16793 } 16794 v_0_1_0 := v_0_1.Args[0] 16795 if v_0_1_0.Op != OpAMD64CMPLconst { 16796 break 16797 } 16798 if v_0_1_0.AuxInt != 16 { 16799 break 16800 } 16801 v_0_1_0_0 := v_0_1_0.Args[0] 16802 if v_0_1_0_0.Op != OpAMD64NEGL { 16803 break 16804 } 16805 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16806 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16807 break 16808 } 16809 if v_0_1_0_0_0.AuxInt != -16 { 16810 break 16811 } 16812 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16813 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16814 break 16815 } 16816 if v_0_1_0_0_0_0.AuxInt != 15 { 16817 break 16818 } 16819 if y != v_0_1_0_0_0_0.Args[0] { 16820 break 16821 } 16822 v_1 := v.Args[1] 16823 if v_1.Op != OpAMD64SHLL { 16824 break 16825 } 16826 _ = v_1.Args[1] 16827 if x != v_1.Args[0] { 16828 break 16829 } 16830 v_1_1 := v_1.Args[1] 16831 if v_1_1.Op != OpAMD64ANDLconst { 16832 break 16833 } 16834 if v_1_1.AuxInt != 15 { 16835 break 16836 } 16837 if y != v_1_1.Args[0] { 16838 break 16839 } 16840 if !(v.Type.Size() == 2) { 16841 break 16842 } 16843 v.reset(OpAMD64ROLW) 16844 v.AddArg(x) 16845 v.AddArg(y) 16846 return true 16847 } 16848 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 16849 // cond: v.Type.Size() == 2 16850 // result: (ROLW x y) 16851 for { 16852 _ = v.Args[1] 16853 v_0 := v.Args[0] 16854 if v_0.Op != OpAMD64ANDL { 16855 break 16856 } 16857 _ = v_0.Args[1] 16858 v_0_0 := v_0.Args[0] 16859 if v_0_0.Op != OpAMD64SBBLcarrymask { 16860 break 16861 } 16862 v_0_0_0 := v_0_0.Args[0] 16863 if v_0_0_0.Op != OpAMD64CMPLconst { 16864 break 16865 } 16866 if v_0_0_0.AuxInt != 16 { 16867 break 16868 } 16869 v_0_0_0_0 := v_0_0_0.Args[0] 16870 if v_0_0_0_0.Op != OpAMD64NEGL { 16871 break 16872 } 16873 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16874 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16875 break 16876 } 16877 if v_0_0_0_0_0.AuxInt != -16 { 16878 break 16879 } 16880 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16881 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16882 break 16883 } 16884 if v_0_0_0_0_0_0.AuxInt != 15 { 16885 break 16886 } 16887 y := v_0_0_0_0_0_0.Args[0] 16888 v_0_1 := v_0.Args[1] 16889 if v_0_1.Op != OpAMD64SHRW { 16890 break 16891 } 16892 _ = v_0_1.Args[1] 16893 x := v_0_1.Args[0] 16894 v_0_1_1 := v_0_1.Args[1] 16895 if v_0_1_1.Op != OpAMD64NEGL { 16896 break 16897 } 16898 v_0_1_1_0 := v_0_1_1.Args[0] 16899 if v_0_1_1_0.Op != OpAMD64ADDLconst { 16900 break 16901 } 16902 if v_0_1_1_0.AuxInt != -16 { 16903 break 16904 } 16905 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16906 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 16907 break 16908 } 16909 if v_0_1_1_0_0.AuxInt != 15 { 16910 break 16911 } 16912 if y != v_0_1_1_0_0.Args[0] { 16913 break 16914 } 16915 v_1 := v.Args[1] 16916 if v_1.Op != OpAMD64SHLL { 16917 break 16918 } 16919 _ = v_1.Args[1] 16920 if x != v_1.Args[0] { 16921 break 16922 } 16923 v_1_1 := v_1.Args[1] 16924 if v_1_1.Op != OpAMD64ANDLconst { 16925 break 16926 } 16927 if v_1_1.AuxInt != 15 { 16928 break 16929 } 16930 if y != v_1_1.Args[0] { 16931 break 16932 } 16933 if !(v.Type.Size() == 2) { 16934 break 16935 } 16936 v.reset(OpAMD64ROLW) 16937 v.AddArg(x) 16938 v.AddArg(y) 16939 return true 16940 } 16941 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 16942 // cond: v.Type.Size() == 2 16943 // result: (RORW x y) 16944 for { 16945 _ = v.Args[1] 16946 v_0 := v.Args[0] 16947 if v_0.Op != OpAMD64SHRW { 16948 break 16949 } 16950 _ = v_0.Args[1] 16951 x := v_0.Args[0] 16952 v_0_1 := v_0.Args[1] 16953 if v_0_1.Op != OpAMD64ANDQconst { 16954 break 16955 } 16956 if v_0_1.AuxInt != 15 { 16957 break 16958 } 16959 y := v_0_1.Args[0] 16960 v_1 := v.Args[1] 16961 if v_1.Op != OpAMD64SHLL { 16962 break 16963 } 16964 _ = v_1.Args[1] 16965 if x != v_1.Args[0] { 16966 break 16967 } 16968 v_1_1 := v_1.Args[1] 16969 if v_1_1.Op != OpAMD64NEGQ { 16970 break 16971 } 16972 v_1_1_0 := v_1_1.Args[0] 16973 if v_1_1_0.Op != OpAMD64ADDQconst { 16974 break 16975 } 16976 if v_1_1_0.AuxInt != -16 { 16977 break 16978 } 16979 v_1_1_0_0 := v_1_1_0.Args[0] 16980 if v_1_1_0_0.Op != OpAMD64ANDQconst { 16981 break 16982 } 16983 if v_1_1_0_0.AuxInt != 15 { 16984 break 16985 } 16986 if y != v_1_1_0_0.Args[0] { 16987 break 16988 } 16989 if !(v.Type.Size() == 2) { 16990 break 16991 } 16992 v.reset(OpAMD64RORW) 16993 v.AddArg(x) 16994 v.AddArg(y) 16995 return true 16996 } 16997 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 16998 // cond: v.Type.Size() == 2 16999 // result: (RORW x y) 17000 for { 17001 _ = v.Args[1] 17002 v_0 := v.Args[0] 17003 if v_0.Op != OpAMD64SHLL { 17004 break 17005 } 17006 _ = v_0.Args[1] 17007 x := v_0.Args[0] 17008 v_0_1 := v_0.Args[1] 17009 if v_0_1.Op != OpAMD64NEGQ { 17010 break 17011 } 17012 v_0_1_0 := v_0_1.Args[0] 17013 if v_0_1_0.Op != OpAMD64ADDQconst { 17014 break 17015 } 17016 if v_0_1_0.AuxInt != -16 { 17017 break 17018 } 17019 v_0_1_0_0 := v_0_1_0.Args[0] 17020 if v_0_1_0_0.Op != OpAMD64ANDQconst { 17021 break 17022 } 17023 if v_0_1_0_0.AuxInt != 15 { 17024 break 17025 } 17026 y := v_0_1_0_0.Args[0] 17027 v_1 := v.Args[1] 17028 if v_1.Op != OpAMD64SHRW { 17029 break 17030 } 17031 _ = v_1.Args[1] 17032 if x != v_1.Args[0] { 17033 break 17034 } 17035 v_1_1 := v_1.Args[1] 17036 if v_1_1.Op != OpAMD64ANDQconst { 17037 break 17038 } 17039 if v_1_1.AuxInt != 15 { 17040 break 17041 } 17042 if y != v_1_1.Args[0] { 17043 break 17044 } 17045 if !(v.Type.Size() == 2) { 17046 break 17047 } 17048 v.reset(OpAMD64RORW) 17049 v.AddArg(x) 17050 v.AddArg(y) 17051 return true 17052 } 17053 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 17054 // cond: v.Type.Size() == 2 17055 // result: (RORW x y) 17056 for { 17057 _ = v.Args[1] 17058 v_0 := v.Args[0] 17059 if v_0.Op != OpAMD64SHRW { 17060 break 17061 } 17062 _ = v_0.Args[1] 17063 x := v_0.Args[0] 17064 v_0_1 := v_0.Args[1] 17065 if v_0_1.Op != OpAMD64ANDLconst { 17066 break 17067 } 17068 if v_0_1.AuxInt != 15 { 17069 break 17070 } 17071 y := v_0_1.Args[0] 17072 v_1 := v.Args[1] 17073 if v_1.Op != OpAMD64SHLL { 17074 break 17075 } 17076 _ = v_1.Args[1] 17077 if x != v_1.Args[0] { 17078 break 17079 } 17080 v_1_1 := v_1.Args[1] 17081 if v_1_1.Op != OpAMD64NEGL { 17082 break 17083 } 17084 v_1_1_0 := v_1_1.Args[0] 17085 if v_1_1_0.Op != OpAMD64ADDLconst { 17086 break 17087 } 17088 if v_1_1_0.AuxInt != -16 { 17089 break 17090 } 17091 v_1_1_0_0 := v_1_1_0.Args[0] 17092 if v_1_1_0_0.Op != OpAMD64ANDLconst { 17093 break 17094 } 17095 if v_1_1_0_0.AuxInt != 15 { 17096 break 17097 } 17098 if y != v_1_1_0_0.Args[0] { 17099 break 17100 } 17101 if !(v.Type.Size() == 2) { 17102 break 17103 } 17104 v.reset(OpAMD64RORW) 17105 v.AddArg(x) 17106 v.AddArg(y) 17107 return true 17108 } 17109 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 17110 // cond: v.Type.Size() == 2 17111 // result: (RORW x y) 17112 for { 17113 _ = v.Args[1] 17114 v_0 := v.Args[0] 17115 if v_0.Op != OpAMD64SHLL { 17116 break 17117 } 17118 _ = v_0.Args[1] 17119 x := v_0.Args[0] 17120 v_0_1 := v_0.Args[1] 17121 if v_0_1.Op != OpAMD64NEGL { 17122 break 17123 } 17124 v_0_1_0 := v_0_1.Args[0] 17125 if v_0_1_0.Op != OpAMD64ADDLconst { 17126 break 17127 } 17128 if v_0_1_0.AuxInt != -16 { 17129 break 17130 } 17131 v_0_1_0_0 := v_0_1_0.Args[0] 17132 if v_0_1_0_0.Op != OpAMD64ANDLconst { 17133 break 17134 } 17135 if v_0_1_0_0.AuxInt != 15 { 17136 break 17137 } 17138 y := v_0_1_0_0.Args[0] 17139 v_1 := v.Args[1] 17140 if v_1.Op != OpAMD64SHRW { 17141 break 17142 } 17143 _ = v_1.Args[1] 17144 if x != v_1.Args[0] { 17145 break 17146 } 17147 v_1_1 := v_1.Args[1] 17148 if v_1_1.Op != OpAMD64ANDLconst { 17149 break 17150 } 17151 if v_1_1.AuxInt != 15 { 17152 break 17153 } 17154 if y != v_1_1.Args[0] { 17155 break 17156 } 17157 if !(v.Type.Size() == 2) { 17158 break 17159 } 17160 v.reset(OpAMD64RORW) 17161 v.AddArg(x) 17162 v.AddArg(y) 17163 return true 17164 } 17165 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 17166 // cond: v.Type.Size() == 1 17167 // result: (ROLB x y) 17168 for { 17169 _ = v.Args[1] 17170 v_0 := v.Args[0] 17171 if v_0.Op != OpAMD64SHLL { 17172 break 17173 } 17174 _ = v_0.Args[1] 17175 x := v_0.Args[0] 17176 v_0_1 := v_0.Args[1] 17177 if v_0_1.Op != OpAMD64ANDQconst { 17178 break 17179 } 17180 if v_0_1.AuxInt != 7 { 17181 break 17182 } 17183 y := v_0_1.Args[0] 17184 v_1 := v.Args[1] 17185 if v_1.Op != OpAMD64ANDL { 17186 break 17187 } 17188 _ = v_1.Args[1] 17189 v_1_0 := v_1.Args[0] 17190 if v_1_0.Op != OpAMD64SHRB { 17191 break 17192 } 17193 _ = v_1_0.Args[1] 17194 if x != v_1_0.Args[0] { 17195 break 17196 } 17197 v_1_0_1 := v_1_0.Args[1] 17198 if v_1_0_1.Op != OpAMD64NEGQ { 17199 break 17200 } 17201 v_1_0_1_0 := v_1_0_1.Args[0] 17202 if v_1_0_1_0.Op != OpAMD64ADDQconst { 17203 break 17204 } 17205 if v_1_0_1_0.AuxInt != -8 { 17206 break 17207 } 17208 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 17209 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 17210 break 17211 } 17212 if v_1_0_1_0_0.AuxInt != 7 { 17213 break 17214 } 17215 if y != v_1_0_1_0_0.Args[0] { 17216 break 17217 } 17218 v_1_1 := v_1.Args[1] 17219 if v_1_1.Op != OpAMD64SBBLcarrymask { 17220 break 17221 } 17222 v_1_1_0 := v_1_1.Args[0] 17223 if v_1_1_0.Op != OpAMD64CMPQconst { 17224 break 17225 } 17226 if v_1_1_0.AuxInt != 8 { 17227 break 17228 } 17229 v_1_1_0_0 := v_1_1_0.Args[0] 17230 if v_1_1_0_0.Op != OpAMD64NEGQ { 17231 break 17232 } 17233 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 17234 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 17235 break 17236 } 17237 if v_1_1_0_0_0.AuxInt != -8 { 17238 break 17239 } 17240 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 17241 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 17242 break 17243 } 17244 if v_1_1_0_0_0_0.AuxInt != 7 { 17245 break 17246 } 17247 if y != v_1_1_0_0_0_0.Args[0] { 17248 break 17249 } 17250 if !(v.Type.Size() == 1) { 17251 break 17252 } 17253 v.reset(OpAMD64ROLB) 17254 v.AddArg(x) 17255 v.AddArg(y) 17256 return true 17257 } 17258 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 17259 // cond: v.Type.Size() == 1 17260 // result: (ROLB x y) 17261 for { 17262 _ = v.Args[1] 17263 v_0 := v.Args[0] 17264 if v_0.Op != OpAMD64SHLL { 17265 break 17266 } 17267 _ = v_0.Args[1] 17268 x := v_0.Args[0] 17269 v_0_1 := v_0.Args[1] 17270 if v_0_1.Op != OpAMD64ANDQconst { 17271 break 17272 } 17273 if v_0_1.AuxInt != 7 { 17274 break 17275 } 17276 y := v_0_1.Args[0] 17277 v_1 := v.Args[1] 17278 if v_1.Op != OpAMD64ANDL { 17279 break 17280 } 17281 _ = v_1.Args[1] 17282 v_1_0 := v_1.Args[0] 17283 if v_1_0.Op != OpAMD64SBBLcarrymask { 17284 break 17285 } 17286 v_1_0_0 := v_1_0.Args[0] 17287 if v_1_0_0.Op != OpAMD64CMPQconst { 17288 break 17289 } 17290 if v_1_0_0.AuxInt != 8 { 17291 break 17292 } 17293 v_1_0_0_0 := v_1_0_0.Args[0] 17294 if v_1_0_0_0.Op != OpAMD64NEGQ { 17295 break 17296 } 17297 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 17298 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 17299 break 17300 } 17301 if v_1_0_0_0_0.AuxInt != -8 { 17302 break 17303 } 17304 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 17305 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 17306 break 17307 } 17308 if v_1_0_0_0_0_0.AuxInt != 7 { 17309 break 17310 } 17311 if y != v_1_0_0_0_0_0.Args[0] { 17312 break 17313 } 17314 v_1_1 := v_1.Args[1] 17315 if v_1_1.Op != OpAMD64SHRB { 17316 break 17317 } 17318 _ = v_1_1.Args[1] 17319 if x != v_1_1.Args[0] { 17320 break 17321 } 17322 v_1_1_1 := v_1_1.Args[1] 17323 if v_1_1_1.Op != OpAMD64NEGQ { 17324 break 17325 } 17326 v_1_1_1_0 := v_1_1_1.Args[0] 17327 if v_1_1_1_0.Op != OpAMD64ADDQconst { 17328 break 17329 } 17330 if v_1_1_1_0.AuxInt != -8 { 17331 break 17332 } 17333 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 17334 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 17335 break 17336 } 17337 if v_1_1_1_0_0.AuxInt != 7 { 17338 break 17339 } 17340 if y != v_1_1_1_0_0.Args[0] { 17341 break 17342 } 17343 if !(v.Type.Size() == 1) { 17344 break 17345 } 17346 v.reset(OpAMD64ROLB) 17347 v.AddArg(x) 17348 v.AddArg(y) 17349 return true 17350 } 17351 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 17352 // cond: v.Type.Size() == 1 17353 // result: (ROLB x y) 17354 for { 17355 _ = v.Args[1] 17356 v_0 := v.Args[0] 17357 if v_0.Op != OpAMD64ANDL { 17358 break 17359 } 17360 _ = v_0.Args[1] 17361 v_0_0 := v_0.Args[0] 17362 if v_0_0.Op != OpAMD64SHRB { 17363 break 17364 } 17365 _ = v_0_0.Args[1] 17366 x := v_0_0.Args[0] 17367 v_0_0_1 := v_0_0.Args[1] 17368 if v_0_0_1.Op != OpAMD64NEGQ { 17369 break 17370 } 17371 v_0_0_1_0 := v_0_0_1.Args[0] 17372 if v_0_0_1_0.Op != OpAMD64ADDQconst { 17373 break 17374 } 17375 if v_0_0_1_0.AuxInt != -8 { 17376 break 17377 } 17378 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 17379 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 17380 break 17381 } 17382 if v_0_0_1_0_0.AuxInt != 7 { 17383 break 17384 } 17385 y := v_0_0_1_0_0.Args[0] 17386 v_0_1 := v_0.Args[1] 17387 if v_0_1.Op != OpAMD64SBBLcarrymask { 17388 break 17389 } 17390 v_0_1_0 := v_0_1.Args[0] 17391 if v_0_1_0.Op != OpAMD64CMPQconst { 17392 break 17393 } 17394 if v_0_1_0.AuxInt != 8 { 17395 break 17396 } 17397 v_0_1_0_0 := v_0_1_0.Args[0] 17398 if v_0_1_0_0.Op != OpAMD64NEGQ { 17399 break 17400 } 17401 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 17402 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 17403 break 17404 } 17405 if v_0_1_0_0_0.AuxInt != -8 { 17406 break 17407 } 17408 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 17409 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 17410 break 17411 } 17412 if v_0_1_0_0_0_0.AuxInt != 7 { 17413 break 17414 } 17415 if y != v_0_1_0_0_0_0.Args[0] { 17416 break 17417 } 17418 v_1 := v.Args[1] 17419 if v_1.Op != OpAMD64SHLL { 17420 break 17421 } 17422 _ = v_1.Args[1] 17423 if x != v_1.Args[0] { 17424 break 17425 } 17426 v_1_1 := v_1.Args[1] 17427 if v_1_1.Op != OpAMD64ANDQconst { 17428 break 17429 } 17430 if v_1_1.AuxInt != 7 { 17431 break 17432 } 17433 if y != v_1_1.Args[0] { 17434 break 17435 } 17436 if !(v.Type.Size() == 1) { 17437 break 17438 } 17439 v.reset(OpAMD64ROLB) 17440 v.AddArg(x) 17441 v.AddArg(y) 17442 return true 17443 } 17444 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 17445 // cond: v.Type.Size() == 1 17446 // result: (ROLB x y) 17447 for { 17448 _ = v.Args[1] 17449 v_0 := v.Args[0] 17450 if v_0.Op != OpAMD64ANDL { 17451 break 17452 } 17453 _ = v_0.Args[1] 17454 v_0_0 := v_0.Args[0] 17455 if v_0_0.Op != OpAMD64SBBLcarrymask { 17456 break 17457 } 17458 v_0_0_0 := v_0_0.Args[0] 17459 if v_0_0_0.Op != OpAMD64CMPQconst { 17460 break 17461 } 17462 if v_0_0_0.AuxInt != 8 { 17463 break 17464 } 17465 v_0_0_0_0 := v_0_0_0.Args[0] 17466 if v_0_0_0_0.Op != OpAMD64NEGQ { 17467 break 17468 } 17469 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 17470 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 17471 break 17472 } 17473 if v_0_0_0_0_0.AuxInt != -8 { 17474 break 17475 } 17476 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 17477 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 17478 break 17479 } 17480 if v_0_0_0_0_0_0.AuxInt != 7 { 17481 break 17482 } 17483 y := v_0_0_0_0_0_0.Args[0] 17484 v_0_1 := v_0.Args[1] 17485 if v_0_1.Op != OpAMD64SHRB { 17486 break 17487 } 17488 _ = v_0_1.Args[1] 17489 x := v_0_1.Args[0] 17490 v_0_1_1 := v_0_1.Args[1] 17491 if v_0_1_1.Op != OpAMD64NEGQ { 17492 break 17493 } 17494 v_0_1_1_0 := v_0_1_1.Args[0] 17495 if v_0_1_1_0.Op != OpAMD64ADDQconst { 17496 break 17497 } 17498 if v_0_1_1_0.AuxInt != -8 { 17499 break 17500 } 17501 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 17502 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 17503 break 17504 } 17505 if v_0_1_1_0_0.AuxInt != 7 { 17506 break 17507 } 17508 if y != v_0_1_1_0_0.Args[0] { 17509 break 17510 } 17511 v_1 := v.Args[1] 17512 if v_1.Op != OpAMD64SHLL { 17513 break 17514 } 17515 _ = v_1.Args[1] 17516 if x != v_1.Args[0] { 17517 break 17518 } 17519 v_1_1 := v_1.Args[1] 17520 if v_1_1.Op != OpAMD64ANDQconst { 17521 break 17522 } 17523 if v_1_1.AuxInt != 7 { 17524 break 17525 } 17526 if y != v_1_1.Args[0] { 17527 break 17528 } 17529 if !(v.Type.Size() == 1) { 17530 break 17531 } 17532 v.reset(OpAMD64ROLB) 17533 v.AddArg(x) 17534 v.AddArg(y) 17535 return true 17536 } 17537 return false 17538 } 17539 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 17540 b := v.Block 17541 _ = b 17542 typ := &b.Func.Config.Types 17543 _ = typ 17544 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 17545 // cond: v.Type.Size() == 1 17546 // result: (ROLB x y) 17547 for { 17548 _ = v.Args[1] 17549 v_0 := v.Args[0] 17550 if v_0.Op != OpAMD64SHLL { 17551 break 17552 } 17553 _ = v_0.Args[1] 17554 x := v_0.Args[0] 17555 v_0_1 := v_0.Args[1] 17556 if v_0_1.Op != OpAMD64ANDLconst { 17557 break 17558 } 17559 if v_0_1.AuxInt != 7 { 17560 break 17561 } 17562 y := v_0_1.Args[0] 17563 v_1 := v.Args[1] 17564 if v_1.Op != OpAMD64ANDL { 17565 break 17566 } 17567 _ = v_1.Args[1] 17568 v_1_0 := v_1.Args[0] 17569 if v_1_0.Op != OpAMD64SHRB { 17570 break 17571 } 17572 _ = v_1_0.Args[1] 17573 if x != v_1_0.Args[0] { 17574 break 17575 } 17576 v_1_0_1 := v_1_0.Args[1] 17577 if v_1_0_1.Op != OpAMD64NEGL { 17578 break 17579 } 17580 v_1_0_1_0 := v_1_0_1.Args[0] 17581 if v_1_0_1_0.Op != OpAMD64ADDLconst { 17582 break 17583 } 17584 if v_1_0_1_0.AuxInt != -8 { 17585 break 17586 } 17587 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 17588 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 17589 break 17590 } 17591 if v_1_0_1_0_0.AuxInt != 7 { 17592 break 17593 } 17594 if y != v_1_0_1_0_0.Args[0] { 17595 break 17596 } 17597 v_1_1 := v_1.Args[1] 17598 if v_1_1.Op != OpAMD64SBBLcarrymask { 17599 break 17600 } 17601 v_1_1_0 := v_1_1.Args[0] 17602 if v_1_1_0.Op != OpAMD64CMPLconst { 17603 break 17604 } 17605 if v_1_1_0.AuxInt != 8 { 17606 break 17607 } 17608 v_1_1_0_0 := v_1_1_0.Args[0] 17609 if v_1_1_0_0.Op != OpAMD64NEGL { 17610 break 17611 } 17612 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 17613 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 17614 break 17615 } 17616 if v_1_1_0_0_0.AuxInt != -8 { 17617 break 17618 } 17619 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 17620 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 17621 break 17622 } 17623 if v_1_1_0_0_0_0.AuxInt != 7 { 17624 break 17625 } 17626 if y != v_1_1_0_0_0_0.Args[0] { 17627 break 17628 } 17629 if !(v.Type.Size() == 1) { 17630 break 17631 } 17632 v.reset(OpAMD64ROLB) 17633 v.AddArg(x) 17634 v.AddArg(y) 17635 return true 17636 } 17637 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 17638 // cond: v.Type.Size() == 1 17639 // result: (ROLB x y) 17640 for { 17641 _ = v.Args[1] 17642 v_0 := v.Args[0] 17643 if v_0.Op != OpAMD64SHLL { 17644 break 17645 } 17646 _ = v_0.Args[1] 17647 x := v_0.Args[0] 17648 v_0_1 := v_0.Args[1] 17649 if v_0_1.Op != OpAMD64ANDLconst { 17650 break 17651 } 17652 if v_0_1.AuxInt != 7 { 17653 break 17654 } 17655 y := v_0_1.Args[0] 17656 v_1 := v.Args[1] 17657 if v_1.Op != OpAMD64ANDL { 17658 break 17659 } 17660 _ = v_1.Args[1] 17661 v_1_0 := v_1.Args[0] 17662 if v_1_0.Op != OpAMD64SBBLcarrymask { 17663 break 17664 } 17665 v_1_0_0 := v_1_0.Args[0] 17666 if v_1_0_0.Op != OpAMD64CMPLconst { 17667 break 17668 } 17669 if v_1_0_0.AuxInt != 8 { 17670 break 17671 } 17672 v_1_0_0_0 := v_1_0_0.Args[0] 17673 if v_1_0_0_0.Op != OpAMD64NEGL { 17674 break 17675 } 17676 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 17677 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 17678 break 17679 } 17680 if v_1_0_0_0_0.AuxInt != -8 { 17681 break 17682 } 17683 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 17684 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 17685 break 17686 } 17687 if v_1_0_0_0_0_0.AuxInt != 7 { 17688 break 17689 } 17690 if y != v_1_0_0_0_0_0.Args[0] { 17691 break 17692 } 17693 v_1_1 := v_1.Args[1] 17694 if v_1_1.Op != OpAMD64SHRB { 17695 break 17696 } 17697 _ = v_1_1.Args[1] 17698 if x != v_1_1.Args[0] { 17699 break 17700 } 17701 v_1_1_1 := v_1_1.Args[1] 17702 if v_1_1_1.Op != OpAMD64NEGL { 17703 break 17704 } 17705 v_1_1_1_0 := v_1_1_1.Args[0] 17706 if v_1_1_1_0.Op != OpAMD64ADDLconst { 17707 break 17708 } 17709 if v_1_1_1_0.AuxInt != -8 { 17710 break 17711 } 17712 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 17713 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 17714 break 17715 } 17716 if v_1_1_1_0_0.AuxInt != 7 { 17717 break 17718 } 17719 if y != v_1_1_1_0_0.Args[0] { 17720 break 17721 } 17722 if !(v.Type.Size() == 1) { 17723 break 17724 } 17725 v.reset(OpAMD64ROLB) 17726 v.AddArg(x) 17727 v.AddArg(y) 17728 return true 17729 } 17730 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 17731 // cond: v.Type.Size() == 1 17732 // result: (ROLB x y) 17733 for { 17734 _ = v.Args[1] 17735 v_0 := v.Args[0] 17736 if v_0.Op != OpAMD64ANDL { 17737 break 17738 } 17739 _ = v_0.Args[1] 17740 v_0_0 := v_0.Args[0] 17741 if v_0_0.Op != OpAMD64SHRB { 17742 break 17743 } 17744 _ = v_0_0.Args[1] 17745 x := v_0_0.Args[0] 17746 v_0_0_1 := v_0_0.Args[1] 17747 if v_0_0_1.Op != OpAMD64NEGL { 17748 break 17749 } 17750 v_0_0_1_0 := v_0_0_1.Args[0] 17751 if v_0_0_1_0.Op != OpAMD64ADDLconst { 17752 break 17753 } 17754 if v_0_0_1_0.AuxInt != -8 { 17755 break 17756 } 17757 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 17758 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 17759 break 17760 } 17761 if v_0_0_1_0_0.AuxInt != 7 { 17762 break 17763 } 17764 y := v_0_0_1_0_0.Args[0] 17765 v_0_1 := v_0.Args[1] 17766 if v_0_1.Op != OpAMD64SBBLcarrymask { 17767 break 17768 } 17769 v_0_1_0 := v_0_1.Args[0] 17770 if v_0_1_0.Op != OpAMD64CMPLconst { 17771 break 17772 } 17773 if v_0_1_0.AuxInt != 8 { 17774 break 17775 } 17776 v_0_1_0_0 := v_0_1_0.Args[0] 17777 if v_0_1_0_0.Op != OpAMD64NEGL { 17778 break 17779 } 17780 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 17781 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 17782 break 17783 } 17784 if v_0_1_0_0_0.AuxInt != -8 { 17785 break 17786 } 17787 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 17788 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 17789 break 17790 } 17791 if v_0_1_0_0_0_0.AuxInt != 7 { 17792 break 17793 } 17794 if y != v_0_1_0_0_0_0.Args[0] { 17795 break 17796 } 17797 v_1 := v.Args[1] 17798 if v_1.Op != OpAMD64SHLL { 17799 break 17800 } 17801 _ = v_1.Args[1] 17802 if x != v_1.Args[0] { 17803 break 17804 } 17805 v_1_1 := v_1.Args[1] 17806 if v_1_1.Op != OpAMD64ANDLconst { 17807 break 17808 } 17809 if v_1_1.AuxInt != 7 { 17810 break 17811 } 17812 if y != v_1_1.Args[0] { 17813 break 17814 } 17815 if !(v.Type.Size() == 1) { 17816 break 17817 } 17818 v.reset(OpAMD64ROLB) 17819 v.AddArg(x) 17820 v.AddArg(y) 17821 return true 17822 } 17823 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 17824 // cond: v.Type.Size() == 1 17825 // result: (ROLB x y) 17826 for { 17827 _ = v.Args[1] 17828 v_0 := v.Args[0] 17829 if v_0.Op != OpAMD64ANDL { 17830 break 17831 } 17832 _ = v_0.Args[1] 17833 v_0_0 := v_0.Args[0] 17834 if v_0_0.Op != OpAMD64SBBLcarrymask { 17835 break 17836 } 17837 v_0_0_0 := v_0_0.Args[0] 17838 if v_0_0_0.Op != OpAMD64CMPLconst { 17839 break 17840 } 17841 if v_0_0_0.AuxInt != 8 { 17842 break 17843 } 17844 v_0_0_0_0 := v_0_0_0.Args[0] 17845 if v_0_0_0_0.Op != OpAMD64NEGL { 17846 break 17847 } 17848 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 17849 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 17850 break 17851 } 17852 if v_0_0_0_0_0.AuxInt != -8 { 17853 break 17854 } 17855 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 17856 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 17857 break 17858 } 17859 if v_0_0_0_0_0_0.AuxInt != 7 { 17860 break 17861 } 17862 y := v_0_0_0_0_0_0.Args[0] 17863 v_0_1 := v_0.Args[1] 17864 if v_0_1.Op != OpAMD64SHRB { 17865 break 17866 } 17867 _ = v_0_1.Args[1] 17868 x := v_0_1.Args[0] 17869 v_0_1_1 := v_0_1.Args[1] 17870 if v_0_1_1.Op != OpAMD64NEGL { 17871 break 17872 } 17873 v_0_1_1_0 := v_0_1_1.Args[0] 17874 if v_0_1_1_0.Op != OpAMD64ADDLconst { 17875 break 17876 } 17877 if v_0_1_1_0.AuxInt != -8 { 17878 break 17879 } 17880 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 17881 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 17882 break 17883 } 17884 if v_0_1_1_0_0.AuxInt != 7 { 17885 break 17886 } 17887 if y != v_0_1_1_0_0.Args[0] { 17888 break 17889 } 17890 v_1 := v.Args[1] 17891 if v_1.Op != OpAMD64SHLL { 17892 break 17893 } 17894 _ = v_1.Args[1] 17895 if x != v_1.Args[0] { 17896 break 17897 } 17898 v_1_1 := v_1.Args[1] 17899 if v_1_1.Op != OpAMD64ANDLconst { 17900 break 17901 } 17902 if v_1_1.AuxInt != 7 { 17903 break 17904 } 17905 if y != v_1_1.Args[0] { 17906 break 17907 } 17908 if !(v.Type.Size() == 1) { 17909 break 17910 } 17911 v.reset(OpAMD64ROLB) 17912 v.AddArg(x) 17913 v.AddArg(y) 17914 return true 17915 } 17916 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 17917 // cond: v.Type.Size() == 1 17918 // result: (RORB x y) 17919 for { 17920 _ = v.Args[1] 17921 v_0 := v.Args[0] 17922 if v_0.Op != OpAMD64SHRB { 17923 break 17924 } 17925 _ = v_0.Args[1] 17926 x := v_0.Args[0] 17927 v_0_1 := v_0.Args[1] 17928 if v_0_1.Op != OpAMD64ANDQconst { 17929 break 17930 } 17931 if v_0_1.AuxInt != 7 { 17932 break 17933 } 17934 y := v_0_1.Args[0] 17935 v_1 := v.Args[1] 17936 if v_1.Op != OpAMD64SHLL { 17937 break 17938 } 17939 _ = v_1.Args[1] 17940 if x != v_1.Args[0] { 17941 break 17942 } 17943 v_1_1 := v_1.Args[1] 17944 if v_1_1.Op != OpAMD64NEGQ { 17945 break 17946 } 17947 v_1_1_0 := v_1_1.Args[0] 17948 if v_1_1_0.Op != OpAMD64ADDQconst { 17949 break 17950 } 17951 if v_1_1_0.AuxInt != -8 { 17952 break 17953 } 17954 v_1_1_0_0 := v_1_1_0.Args[0] 17955 if v_1_1_0_0.Op != OpAMD64ANDQconst { 17956 break 17957 } 17958 if v_1_1_0_0.AuxInt != 7 { 17959 break 17960 } 17961 if y != v_1_1_0_0.Args[0] { 17962 break 17963 } 17964 if !(v.Type.Size() == 1) { 17965 break 17966 } 17967 v.reset(OpAMD64RORB) 17968 v.AddArg(x) 17969 v.AddArg(y) 17970 return true 17971 } 17972 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 17973 // cond: v.Type.Size() == 1 17974 // result: (RORB x y) 17975 for { 17976 _ = v.Args[1] 17977 v_0 := v.Args[0] 17978 if v_0.Op != OpAMD64SHLL { 17979 break 17980 } 17981 _ = v_0.Args[1] 17982 x := v_0.Args[0] 17983 v_0_1 := v_0.Args[1] 17984 if v_0_1.Op != OpAMD64NEGQ { 17985 break 17986 } 17987 v_0_1_0 := v_0_1.Args[0] 17988 if v_0_1_0.Op != OpAMD64ADDQconst { 17989 break 17990 } 17991 if v_0_1_0.AuxInt != -8 { 17992 break 17993 } 17994 v_0_1_0_0 := v_0_1_0.Args[0] 17995 if v_0_1_0_0.Op != OpAMD64ANDQconst { 17996 break 17997 } 17998 if v_0_1_0_0.AuxInt != 7 { 17999 break 18000 } 18001 y := v_0_1_0_0.Args[0] 18002 v_1 := v.Args[1] 18003 if v_1.Op != OpAMD64SHRB { 18004 break 18005 } 18006 _ = v_1.Args[1] 18007 if x != v_1.Args[0] { 18008 break 18009 } 18010 v_1_1 := v_1.Args[1] 18011 if v_1_1.Op != OpAMD64ANDQconst { 18012 break 18013 } 18014 if v_1_1.AuxInt != 7 { 18015 break 18016 } 18017 if y != v_1_1.Args[0] { 18018 break 18019 } 18020 if !(v.Type.Size() == 1) { 18021 break 18022 } 18023 v.reset(OpAMD64RORB) 18024 v.AddArg(x) 18025 v.AddArg(y) 18026 return true 18027 } 18028 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 18029 // cond: v.Type.Size() == 1 18030 // result: (RORB x y) 18031 for { 18032 _ = v.Args[1] 18033 v_0 := v.Args[0] 18034 if v_0.Op != OpAMD64SHRB { 18035 break 18036 } 18037 _ = v_0.Args[1] 18038 x := v_0.Args[0] 18039 v_0_1 := v_0.Args[1] 18040 if v_0_1.Op != OpAMD64ANDLconst { 18041 break 18042 } 18043 if v_0_1.AuxInt != 7 { 18044 break 18045 } 18046 y := v_0_1.Args[0] 18047 v_1 := v.Args[1] 18048 if v_1.Op != OpAMD64SHLL { 18049 break 18050 } 18051 _ = v_1.Args[1] 18052 if x != v_1.Args[0] { 18053 break 18054 } 18055 v_1_1 := v_1.Args[1] 18056 if v_1_1.Op != OpAMD64NEGL { 18057 break 18058 } 18059 v_1_1_0 := v_1_1.Args[0] 18060 if v_1_1_0.Op != OpAMD64ADDLconst { 18061 break 18062 } 18063 if v_1_1_0.AuxInt != -8 { 18064 break 18065 } 18066 v_1_1_0_0 := v_1_1_0.Args[0] 18067 if v_1_1_0_0.Op != OpAMD64ANDLconst { 18068 break 18069 } 18070 if v_1_1_0_0.AuxInt != 7 { 18071 break 18072 } 18073 if y != v_1_1_0_0.Args[0] { 18074 break 18075 } 18076 if !(v.Type.Size() == 1) { 18077 break 18078 } 18079 v.reset(OpAMD64RORB) 18080 v.AddArg(x) 18081 v.AddArg(y) 18082 return true 18083 } 18084 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 18085 // cond: v.Type.Size() == 1 18086 // result: (RORB x y) 18087 for { 18088 _ = v.Args[1] 18089 v_0 := v.Args[0] 18090 if v_0.Op != OpAMD64SHLL { 18091 break 18092 } 18093 _ = v_0.Args[1] 18094 x := v_0.Args[0] 18095 v_0_1 := v_0.Args[1] 18096 if v_0_1.Op != OpAMD64NEGL { 18097 break 18098 } 18099 v_0_1_0 := v_0_1.Args[0] 18100 if v_0_1_0.Op != OpAMD64ADDLconst { 18101 break 18102 } 18103 if v_0_1_0.AuxInt != -8 { 18104 break 18105 } 18106 v_0_1_0_0 := v_0_1_0.Args[0] 18107 if v_0_1_0_0.Op != OpAMD64ANDLconst { 18108 break 18109 } 18110 if v_0_1_0_0.AuxInt != 7 { 18111 break 18112 } 18113 y := v_0_1_0_0.Args[0] 18114 v_1 := v.Args[1] 18115 if v_1.Op != OpAMD64SHRB { 18116 break 18117 } 18118 _ = v_1.Args[1] 18119 if x != v_1.Args[0] { 18120 break 18121 } 18122 v_1_1 := v_1.Args[1] 18123 if v_1_1.Op != OpAMD64ANDLconst { 18124 break 18125 } 18126 if v_1_1.AuxInt != 7 { 18127 break 18128 } 18129 if y != v_1_1.Args[0] { 18130 break 18131 } 18132 if !(v.Type.Size() == 1) { 18133 break 18134 } 18135 v.reset(OpAMD64RORB) 18136 v.AddArg(x) 18137 v.AddArg(y) 18138 return true 18139 } 18140 // match: (ORL x x) 18141 // cond: 18142 // result: x 18143 for { 18144 _ = v.Args[1] 18145 x := v.Args[0] 18146 if x != v.Args[1] { 18147 break 18148 } 18149 v.reset(OpCopy) 18150 v.Type = x.Type 18151 v.AddArg(x) 18152 return true 18153 } 18154 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 18155 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18156 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 18157 for { 18158 _ = v.Args[1] 18159 x0 := v.Args[0] 18160 if x0.Op != OpAMD64MOVBload { 18161 break 18162 } 18163 i0 := x0.AuxInt 18164 s := x0.Aux 18165 _ = x0.Args[1] 18166 p := x0.Args[0] 18167 mem := x0.Args[1] 18168 sh := v.Args[1] 18169 if sh.Op != OpAMD64SHLLconst { 18170 break 18171 } 18172 if sh.AuxInt != 8 { 18173 break 18174 } 18175 x1 := sh.Args[0] 18176 if x1.Op != OpAMD64MOVBload { 18177 break 18178 } 18179 i1 := x1.AuxInt 18180 if x1.Aux != s { 18181 break 18182 } 18183 _ = x1.Args[1] 18184 if p != x1.Args[0] { 18185 break 18186 } 18187 if mem != x1.Args[1] { 18188 break 18189 } 18190 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18191 break 18192 } 18193 b = mergePoint(b, x0, x1) 18194 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18195 v.reset(OpCopy) 18196 v.AddArg(v0) 18197 v0.AuxInt = i0 18198 v0.Aux = s 18199 v0.AddArg(p) 18200 v0.AddArg(mem) 18201 return true 18202 } 18203 return false 18204 } 18205 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 18206 b := v.Block 18207 _ = b 18208 typ := &b.Func.Config.Types 18209 _ = typ 18210 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 18211 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18212 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 18213 for { 18214 _ = v.Args[1] 18215 sh := v.Args[0] 18216 if sh.Op != OpAMD64SHLLconst { 18217 break 18218 } 18219 if sh.AuxInt != 8 { 18220 break 18221 } 18222 x1 := sh.Args[0] 18223 if x1.Op != OpAMD64MOVBload { 18224 break 18225 } 18226 i1 := x1.AuxInt 18227 s := x1.Aux 18228 _ = x1.Args[1] 18229 p := x1.Args[0] 18230 mem := x1.Args[1] 18231 x0 := v.Args[1] 18232 if x0.Op != OpAMD64MOVBload { 18233 break 18234 } 18235 i0 := x0.AuxInt 18236 if x0.Aux != s { 18237 break 18238 } 18239 _ = x0.Args[1] 18240 if p != x0.Args[0] { 18241 break 18242 } 18243 if mem != x0.Args[1] { 18244 break 18245 } 18246 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18247 break 18248 } 18249 b = mergePoint(b, x0, x1) 18250 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18251 v.reset(OpCopy) 18252 v.AddArg(v0) 18253 v0.AuxInt = i0 18254 v0.Aux = s 18255 v0.AddArg(p) 18256 v0.AddArg(mem) 18257 return true 18258 } 18259 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 18260 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18261 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 18262 for { 18263 _ = v.Args[1] 18264 x0 := v.Args[0] 18265 if x0.Op != OpAMD64MOVWload { 18266 break 18267 } 18268 i0 := x0.AuxInt 18269 s := x0.Aux 18270 _ = x0.Args[1] 18271 p := x0.Args[0] 18272 mem := x0.Args[1] 18273 sh := v.Args[1] 18274 if sh.Op != OpAMD64SHLLconst { 18275 break 18276 } 18277 if sh.AuxInt != 16 { 18278 break 18279 } 18280 x1 := sh.Args[0] 18281 if x1.Op != OpAMD64MOVWload { 18282 break 18283 } 18284 i1 := x1.AuxInt 18285 if x1.Aux != s { 18286 break 18287 } 18288 _ = x1.Args[1] 18289 if p != x1.Args[0] { 18290 break 18291 } 18292 if mem != x1.Args[1] { 18293 break 18294 } 18295 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18296 break 18297 } 18298 b = mergePoint(b, x0, x1) 18299 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 18300 v.reset(OpCopy) 18301 v.AddArg(v0) 18302 v0.AuxInt = i0 18303 v0.Aux = s 18304 v0.AddArg(p) 18305 v0.AddArg(mem) 18306 return true 18307 } 18308 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 18309 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18310 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 18311 for { 18312 _ = v.Args[1] 18313 sh := v.Args[0] 18314 if sh.Op != OpAMD64SHLLconst { 18315 break 18316 } 18317 if sh.AuxInt != 16 { 18318 break 18319 } 18320 x1 := sh.Args[0] 18321 if x1.Op != OpAMD64MOVWload { 18322 break 18323 } 18324 i1 := x1.AuxInt 18325 s := x1.Aux 18326 _ = x1.Args[1] 18327 p := x1.Args[0] 18328 mem := x1.Args[1] 18329 x0 := v.Args[1] 18330 if x0.Op != OpAMD64MOVWload { 18331 break 18332 } 18333 i0 := x0.AuxInt 18334 if x0.Aux != s { 18335 break 18336 } 18337 _ = x0.Args[1] 18338 if p != x0.Args[0] { 18339 break 18340 } 18341 if mem != x0.Args[1] { 18342 break 18343 } 18344 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18345 break 18346 } 18347 b = mergePoint(b, x0, x1) 18348 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 18349 v.reset(OpCopy) 18350 v.AddArg(v0) 18351 v0.AuxInt = i0 18352 v0.Aux = s 18353 v0.AddArg(p) 18354 v0.AddArg(mem) 18355 return true 18356 } 18357 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 18358 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18359 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18360 for { 18361 _ = v.Args[1] 18362 s1 := v.Args[0] 18363 if s1.Op != OpAMD64SHLLconst { 18364 break 18365 } 18366 j1 := s1.AuxInt 18367 x1 := s1.Args[0] 18368 if x1.Op != OpAMD64MOVBload { 18369 break 18370 } 18371 i1 := x1.AuxInt 18372 s := x1.Aux 18373 _ = x1.Args[1] 18374 p := x1.Args[0] 18375 mem := x1.Args[1] 18376 or := v.Args[1] 18377 if or.Op != OpAMD64ORL { 18378 break 18379 } 18380 _ = or.Args[1] 18381 s0 := or.Args[0] 18382 if s0.Op != OpAMD64SHLLconst { 18383 break 18384 } 18385 j0 := s0.AuxInt 18386 x0 := s0.Args[0] 18387 if x0.Op != OpAMD64MOVBload { 18388 break 18389 } 18390 i0 := x0.AuxInt 18391 if x0.Aux != s { 18392 break 18393 } 18394 _ = x0.Args[1] 18395 if p != x0.Args[0] { 18396 break 18397 } 18398 if mem != x0.Args[1] { 18399 break 18400 } 18401 y := or.Args[1] 18402 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18403 break 18404 } 18405 b = mergePoint(b, x0, x1) 18406 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18407 v.reset(OpCopy) 18408 v.AddArg(v0) 18409 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18410 v1.AuxInt = j0 18411 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18412 v2.AuxInt = i0 18413 v2.Aux = s 18414 v2.AddArg(p) 18415 v2.AddArg(mem) 18416 v1.AddArg(v2) 18417 v0.AddArg(v1) 18418 v0.AddArg(y) 18419 return true 18420 } 18421 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 18422 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18423 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18424 for { 18425 _ = v.Args[1] 18426 s1 := v.Args[0] 18427 if s1.Op != OpAMD64SHLLconst { 18428 break 18429 } 18430 j1 := s1.AuxInt 18431 x1 := s1.Args[0] 18432 if x1.Op != OpAMD64MOVBload { 18433 break 18434 } 18435 i1 := x1.AuxInt 18436 s := x1.Aux 18437 _ = x1.Args[1] 18438 p := x1.Args[0] 18439 mem := x1.Args[1] 18440 or := v.Args[1] 18441 if or.Op != OpAMD64ORL { 18442 break 18443 } 18444 _ = or.Args[1] 18445 y := or.Args[0] 18446 s0 := or.Args[1] 18447 if s0.Op != OpAMD64SHLLconst { 18448 break 18449 } 18450 j0 := s0.AuxInt 18451 x0 := s0.Args[0] 18452 if x0.Op != OpAMD64MOVBload { 18453 break 18454 } 18455 i0 := x0.AuxInt 18456 if x0.Aux != s { 18457 break 18458 } 18459 _ = x0.Args[1] 18460 if p != x0.Args[0] { 18461 break 18462 } 18463 if mem != x0.Args[1] { 18464 break 18465 } 18466 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18467 break 18468 } 18469 b = mergePoint(b, x0, x1) 18470 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18471 v.reset(OpCopy) 18472 v.AddArg(v0) 18473 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18474 v1.AuxInt = j0 18475 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18476 v2.AuxInt = i0 18477 v2.Aux = s 18478 v2.AddArg(p) 18479 v2.AddArg(mem) 18480 v1.AddArg(v2) 18481 v0.AddArg(v1) 18482 v0.AddArg(y) 18483 return true 18484 } 18485 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 18486 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18487 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18488 for { 18489 _ = v.Args[1] 18490 or := v.Args[0] 18491 if or.Op != OpAMD64ORL { 18492 break 18493 } 18494 _ = or.Args[1] 18495 s0 := or.Args[0] 18496 if s0.Op != OpAMD64SHLLconst { 18497 break 18498 } 18499 j0 := s0.AuxInt 18500 x0 := s0.Args[0] 18501 if x0.Op != OpAMD64MOVBload { 18502 break 18503 } 18504 i0 := x0.AuxInt 18505 s := x0.Aux 18506 _ = x0.Args[1] 18507 p := x0.Args[0] 18508 mem := x0.Args[1] 18509 y := or.Args[1] 18510 s1 := v.Args[1] 18511 if s1.Op != OpAMD64SHLLconst { 18512 break 18513 } 18514 j1 := s1.AuxInt 18515 x1 := s1.Args[0] 18516 if x1.Op != OpAMD64MOVBload { 18517 break 18518 } 18519 i1 := x1.AuxInt 18520 if x1.Aux != s { 18521 break 18522 } 18523 _ = x1.Args[1] 18524 if p != x1.Args[0] { 18525 break 18526 } 18527 if mem != x1.Args[1] { 18528 break 18529 } 18530 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18531 break 18532 } 18533 b = mergePoint(b, x0, x1) 18534 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18535 v.reset(OpCopy) 18536 v.AddArg(v0) 18537 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18538 v1.AuxInt = j0 18539 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18540 v2.AuxInt = i0 18541 v2.Aux = s 18542 v2.AddArg(p) 18543 v2.AddArg(mem) 18544 v1.AddArg(v2) 18545 v0.AddArg(v1) 18546 v0.AddArg(y) 18547 return true 18548 } 18549 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 18550 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18551 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18552 for { 18553 _ = v.Args[1] 18554 or := v.Args[0] 18555 if or.Op != OpAMD64ORL { 18556 break 18557 } 18558 _ = or.Args[1] 18559 y := or.Args[0] 18560 s0 := or.Args[1] 18561 if s0.Op != OpAMD64SHLLconst { 18562 break 18563 } 18564 j0 := s0.AuxInt 18565 x0 := s0.Args[0] 18566 if x0.Op != OpAMD64MOVBload { 18567 break 18568 } 18569 i0 := x0.AuxInt 18570 s := x0.Aux 18571 _ = x0.Args[1] 18572 p := x0.Args[0] 18573 mem := x0.Args[1] 18574 s1 := v.Args[1] 18575 if s1.Op != OpAMD64SHLLconst { 18576 break 18577 } 18578 j1 := s1.AuxInt 18579 x1 := s1.Args[0] 18580 if x1.Op != OpAMD64MOVBload { 18581 break 18582 } 18583 i1 := x1.AuxInt 18584 if x1.Aux != s { 18585 break 18586 } 18587 _ = x1.Args[1] 18588 if p != x1.Args[0] { 18589 break 18590 } 18591 if mem != x1.Args[1] { 18592 break 18593 } 18594 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18595 break 18596 } 18597 b = mergePoint(b, x0, x1) 18598 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18599 v.reset(OpCopy) 18600 v.AddArg(v0) 18601 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18602 v1.AuxInt = j0 18603 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18604 v2.AuxInt = i0 18605 v2.Aux = s 18606 v2.AddArg(p) 18607 v2.AddArg(mem) 18608 v1.AddArg(v2) 18609 v0.AddArg(v1) 18610 v0.AddArg(y) 18611 return true 18612 } 18613 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18614 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18615 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18616 for { 18617 _ = v.Args[1] 18618 x0 := v.Args[0] 18619 if x0.Op != OpAMD64MOVBloadidx1 { 18620 break 18621 } 18622 i0 := x0.AuxInt 18623 s := x0.Aux 18624 _ = x0.Args[2] 18625 p := x0.Args[0] 18626 idx := x0.Args[1] 18627 mem := x0.Args[2] 18628 sh := v.Args[1] 18629 if sh.Op != OpAMD64SHLLconst { 18630 break 18631 } 18632 if sh.AuxInt != 8 { 18633 break 18634 } 18635 x1 := sh.Args[0] 18636 if x1.Op != OpAMD64MOVBloadidx1 { 18637 break 18638 } 18639 i1 := x1.AuxInt 18640 if x1.Aux != s { 18641 break 18642 } 18643 _ = x1.Args[2] 18644 if p != x1.Args[0] { 18645 break 18646 } 18647 if idx != x1.Args[1] { 18648 break 18649 } 18650 if mem != x1.Args[2] { 18651 break 18652 } 18653 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18654 break 18655 } 18656 b = mergePoint(b, x0, x1) 18657 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18658 v.reset(OpCopy) 18659 v.AddArg(v0) 18660 v0.AuxInt = i0 18661 v0.Aux = s 18662 v0.AddArg(p) 18663 v0.AddArg(idx) 18664 v0.AddArg(mem) 18665 return true 18666 } 18667 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18668 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18669 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18670 for { 18671 _ = v.Args[1] 18672 x0 := v.Args[0] 18673 if x0.Op != OpAMD64MOVBloadidx1 { 18674 break 18675 } 18676 i0 := x0.AuxInt 18677 s := x0.Aux 18678 _ = x0.Args[2] 18679 idx := x0.Args[0] 18680 p := x0.Args[1] 18681 mem := x0.Args[2] 18682 sh := v.Args[1] 18683 if sh.Op != OpAMD64SHLLconst { 18684 break 18685 } 18686 if sh.AuxInt != 8 { 18687 break 18688 } 18689 x1 := sh.Args[0] 18690 if x1.Op != OpAMD64MOVBloadidx1 { 18691 break 18692 } 18693 i1 := x1.AuxInt 18694 if x1.Aux != s { 18695 break 18696 } 18697 _ = x1.Args[2] 18698 if p != x1.Args[0] { 18699 break 18700 } 18701 if idx != x1.Args[1] { 18702 break 18703 } 18704 if mem != x1.Args[2] { 18705 break 18706 } 18707 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18708 break 18709 } 18710 b = mergePoint(b, x0, x1) 18711 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18712 v.reset(OpCopy) 18713 v.AddArg(v0) 18714 v0.AuxInt = i0 18715 v0.Aux = s 18716 v0.AddArg(p) 18717 v0.AddArg(idx) 18718 v0.AddArg(mem) 18719 return true 18720 } 18721 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18722 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18723 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18724 for { 18725 _ = v.Args[1] 18726 x0 := v.Args[0] 18727 if x0.Op != OpAMD64MOVBloadidx1 { 18728 break 18729 } 18730 i0 := x0.AuxInt 18731 s := x0.Aux 18732 _ = x0.Args[2] 18733 p := x0.Args[0] 18734 idx := x0.Args[1] 18735 mem := x0.Args[2] 18736 sh := v.Args[1] 18737 if sh.Op != OpAMD64SHLLconst { 18738 break 18739 } 18740 if sh.AuxInt != 8 { 18741 break 18742 } 18743 x1 := sh.Args[0] 18744 if x1.Op != OpAMD64MOVBloadidx1 { 18745 break 18746 } 18747 i1 := x1.AuxInt 18748 if x1.Aux != s { 18749 break 18750 } 18751 _ = x1.Args[2] 18752 if idx != x1.Args[0] { 18753 break 18754 } 18755 if p != x1.Args[1] { 18756 break 18757 } 18758 if mem != x1.Args[2] { 18759 break 18760 } 18761 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18762 break 18763 } 18764 b = mergePoint(b, x0, x1) 18765 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18766 v.reset(OpCopy) 18767 v.AddArg(v0) 18768 v0.AuxInt = i0 18769 v0.Aux = s 18770 v0.AddArg(p) 18771 v0.AddArg(idx) 18772 v0.AddArg(mem) 18773 return true 18774 } 18775 return false 18776 } 18777 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 18778 b := v.Block 18779 _ = b 18780 typ := &b.Func.Config.Types 18781 _ = typ 18782 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18783 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18784 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18785 for { 18786 _ = v.Args[1] 18787 x0 := v.Args[0] 18788 if x0.Op != OpAMD64MOVBloadidx1 { 18789 break 18790 } 18791 i0 := x0.AuxInt 18792 s := x0.Aux 18793 _ = x0.Args[2] 18794 idx := x0.Args[0] 18795 p := x0.Args[1] 18796 mem := x0.Args[2] 18797 sh := v.Args[1] 18798 if sh.Op != OpAMD64SHLLconst { 18799 break 18800 } 18801 if sh.AuxInt != 8 { 18802 break 18803 } 18804 x1 := sh.Args[0] 18805 if x1.Op != OpAMD64MOVBloadidx1 { 18806 break 18807 } 18808 i1 := x1.AuxInt 18809 if x1.Aux != s { 18810 break 18811 } 18812 _ = x1.Args[2] 18813 if idx != x1.Args[0] { 18814 break 18815 } 18816 if p != x1.Args[1] { 18817 break 18818 } 18819 if mem != x1.Args[2] { 18820 break 18821 } 18822 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18823 break 18824 } 18825 b = mergePoint(b, x0, x1) 18826 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18827 v.reset(OpCopy) 18828 v.AddArg(v0) 18829 v0.AuxInt = i0 18830 v0.Aux = s 18831 v0.AddArg(p) 18832 v0.AddArg(idx) 18833 v0.AddArg(mem) 18834 return true 18835 } 18836 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18837 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18838 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18839 for { 18840 _ = v.Args[1] 18841 sh := v.Args[0] 18842 if sh.Op != OpAMD64SHLLconst { 18843 break 18844 } 18845 if sh.AuxInt != 8 { 18846 break 18847 } 18848 x1 := sh.Args[0] 18849 if x1.Op != OpAMD64MOVBloadidx1 { 18850 break 18851 } 18852 i1 := x1.AuxInt 18853 s := x1.Aux 18854 _ = x1.Args[2] 18855 p := x1.Args[0] 18856 idx := x1.Args[1] 18857 mem := x1.Args[2] 18858 x0 := v.Args[1] 18859 if x0.Op != OpAMD64MOVBloadidx1 { 18860 break 18861 } 18862 i0 := x0.AuxInt 18863 if x0.Aux != s { 18864 break 18865 } 18866 _ = x0.Args[2] 18867 if p != x0.Args[0] { 18868 break 18869 } 18870 if idx != x0.Args[1] { 18871 break 18872 } 18873 if mem != x0.Args[2] { 18874 break 18875 } 18876 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18877 break 18878 } 18879 b = mergePoint(b, x0, x1) 18880 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18881 v.reset(OpCopy) 18882 v.AddArg(v0) 18883 v0.AuxInt = i0 18884 v0.Aux = s 18885 v0.AddArg(p) 18886 v0.AddArg(idx) 18887 v0.AddArg(mem) 18888 return true 18889 } 18890 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18891 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18892 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18893 for { 18894 _ = v.Args[1] 18895 sh := v.Args[0] 18896 if sh.Op != OpAMD64SHLLconst { 18897 break 18898 } 18899 if sh.AuxInt != 8 { 18900 break 18901 } 18902 x1 := sh.Args[0] 18903 if x1.Op != OpAMD64MOVBloadidx1 { 18904 break 18905 } 18906 i1 := x1.AuxInt 18907 s := x1.Aux 18908 _ = x1.Args[2] 18909 idx := x1.Args[0] 18910 p := x1.Args[1] 18911 mem := x1.Args[2] 18912 x0 := v.Args[1] 18913 if x0.Op != OpAMD64MOVBloadidx1 { 18914 break 18915 } 18916 i0 := x0.AuxInt 18917 if x0.Aux != s { 18918 break 18919 } 18920 _ = x0.Args[2] 18921 if p != x0.Args[0] { 18922 break 18923 } 18924 if idx != x0.Args[1] { 18925 break 18926 } 18927 if mem != x0.Args[2] { 18928 break 18929 } 18930 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18931 break 18932 } 18933 b = mergePoint(b, x0, x1) 18934 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18935 v.reset(OpCopy) 18936 v.AddArg(v0) 18937 v0.AuxInt = i0 18938 v0.Aux = s 18939 v0.AddArg(p) 18940 v0.AddArg(idx) 18941 v0.AddArg(mem) 18942 return true 18943 } 18944 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 18945 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18946 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18947 for { 18948 _ = v.Args[1] 18949 sh := v.Args[0] 18950 if sh.Op != OpAMD64SHLLconst { 18951 break 18952 } 18953 if sh.AuxInt != 8 { 18954 break 18955 } 18956 x1 := sh.Args[0] 18957 if x1.Op != OpAMD64MOVBloadidx1 { 18958 break 18959 } 18960 i1 := x1.AuxInt 18961 s := x1.Aux 18962 _ = x1.Args[2] 18963 p := x1.Args[0] 18964 idx := x1.Args[1] 18965 mem := x1.Args[2] 18966 x0 := v.Args[1] 18967 if x0.Op != OpAMD64MOVBloadidx1 { 18968 break 18969 } 18970 i0 := x0.AuxInt 18971 if x0.Aux != s { 18972 break 18973 } 18974 _ = x0.Args[2] 18975 if idx != x0.Args[0] { 18976 break 18977 } 18978 if p != x0.Args[1] { 18979 break 18980 } 18981 if mem != x0.Args[2] { 18982 break 18983 } 18984 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18985 break 18986 } 18987 b = mergePoint(b, x0, x1) 18988 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18989 v.reset(OpCopy) 18990 v.AddArg(v0) 18991 v0.AuxInt = i0 18992 v0.Aux = s 18993 v0.AddArg(p) 18994 v0.AddArg(idx) 18995 v0.AddArg(mem) 18996 return true 18997 } 18998 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 18999 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19000 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 19001 for { 19002 _ = v.Args[1] 19003 sh := v.Args[0] 19004 if sh.Op != OpAMD64SHLLconst { 19005 break 19006 } 19007 if sh.AuxInt != 8 { 19008 break 19009 } 19010 x1 := sh.Args[0] 19011 if x1.Op != OpAMD64MOVBloadidx1 { 19012 break 19013 } 19014 i1 := x1.AuxInt 19015 s := x1.Aux 19016 _ = x1.Args[2] 19017 idx := x1.Args[0] 19018 p := x1.Args[1] 19019 mem := x1.Args[2] 19020 x0 := v.Args[1] 19021 if x0.Op != OpAMD64MOVBloadidx1 { 19022 break 19023 } 19024 i0 := x0.AuxInt 19025 if x0.Aux != s { 19026 break 19027 } 19028 _ = x0.Args[2] 19029 if idx != x0.Args[0] { 19030 break 19031 } 19032 if p != x0.Args[1] { 19033 break 19034 } 19035 if mem != x0.Args[2] { 19036 break 19037 } 19038 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19039 break 19040 } 19041 b = mergePoint(b, x0, x1) 19042 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 19043 v.reset(OpCopy) 19044 v.AddArg(v0) 19045 v0.AuxInt = i0 19046 v0.Aux = s 19047 v0.AddArg(p) 19048 v0.AddArg(idx) 19049 v0.AddArg(mem) 19050 return true 19051 } 19052 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 19053 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19054 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19055 for { 19056 _ = v.Args[1] 19057 x0 := v.Args[0] 19058 if x0.Op != OpAMD64MOVWloadidx1 { 19059 break 19060 } 19061 i0 := x0.AuxInt 19062 s := x0.Aux 19063 _ = x0.Args[2] 19064 p := x0.Args[0] 19065 idx := x0.Args[1] 19066 mem := x0.Args[2] 19067 sh := v.Args[1] 19068 if sh.Op != OpAMD64SHLLconst { 19069 break 19070 } 19071 if sh.AuxInt != 16 { 19072 break 19073 } 19074 x1 := sh.Args[0] 19075 if x1.Op != OpAMD64MOVWloadidx1 { 19076 break 19077 } 19078 i1 := x1.AuxInt 19079 if x1.Aux != s { 19080 break 19081 } 19082 _ = x1.Args[2] 19083 if p != x1.Args[0] { 19084 break 19085 } 19086 if idx != x1.Args[1] { 19087 break 19088 } 19089 if mem != x1.Args[2] { 19090 break 19091 } 19092 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19093 break 19094 } 19095 b = mergePoint(b, x0, x1) 19096 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19097 v.reset(OpCopy) 19098 v.AddArg(v0) 19099 v0.AuxInt = i0 19100 v0.Aux = s 19101 v0.AddArg(p) 19102 v0.AddArg(idx) 19103 v0.AddArg(mem) 19104 return true 19105 } 19106 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 19107 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19108 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19109 for { 19110 _ = v.Args[1] 19111 x0 := v.Args[0] 19112 if x0.Op != OpAMD64MOVWloadidx1 { 19113 break 19114 } 19115 i0 := x0.AuxInt 19116 s := x0.Aux 19117 _ = x0.Args[2] 19118 idx := x0.Args[0] 19119 p := x0.Args[1] 19120 mem := x0.Args[2] 19121 sh := v.Args[1] 19122 if sh.Op != OpAMD64SHLLconst { 19123 break 19124 } 19125 if sh.AuxInt != 16 { 19126 break 19127 } 19128 x1 := sh.Args[0] 19129 if x1.Op != OpAMD64MOVWloadidx1 { 19130 break 19131 } 19132 i1 := x1.AuxInt 19133 if x1.Aux != s { 19134 break 19135 } 19136 _ = x1.Args[2] 19137 if p != x1.Args[0] { 19138 break 19139 } 19140 if idx != x1.Args[1] { 19141 break 19142 } 19143 if mem != x1.Args[2] { 19144 break 19145 } 19146 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19147 break 19148 } 19149 b = mergePoint(b, x0, x1) 19150 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19151 v.reset(OpCopy) 19152 v.AddArg(v0) 19153 v0.AuxInt = i0 19154 v0.Aux = s 19155 v0.AddArg(p) 19156 v0.AddArg(idx) 19157 v0.AddArg(mem) 19158 return true 19159 } 19160 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 19161 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19162 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19163 for { 19164 _ = v.Args[1] 19165 x0 := v.Args[0] 19166 if x0.Op != OpAMD64MOVWloadidx1 { 19167 break 19168 } 19169 i0 := x0.AuxInt 19170 s := x0.Aux 19171 _ = x0.Args[2] 19172 p := x0.Args[0] 19173 idx := x0.Args[1] 19174 mem := x0.Args[2] 19175 sh := v.Args[1] 19176 if sh.Op != OpAMD64SHLLconst { 19177 break 19178 } 19179 if sh.AuxInt != 16 { 19180 break 19181 } 19182 x1 := sh.Args[0] 19183 if x1.Op != OpAMD64MOVWloadidx1 { 19184 break 19185 } 19186 i1 := x1.AuxInt 19187 if x1.Aux != s { 19188 break 19189 } 19190 _ = x1.Args[2] 19191 if idx != x1.Args[0] { 19192 break 19193 } 19194 if p != x1.Args[1] { 19195 break 19196 } 19197 if mem != x1.Args[2] { 19198 break 19199 } 19200 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19201 break 19202 } 19203 b = mergePoint(b, x0, x1) 19204 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19205 v.reset(OpCopy) 19206 v.AddArg(v0) 19207 v0.AuxInt = i0 19208 v0.Aux = s 19209 v0.AddArg(p) 19210 v0.AddArg(idx) 19211 v0.AddArg(mem) 19212 return true 19213 } 19214 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 19215 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19216 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19217 for { 19218 _ = v.Args[1] 19219 x0 := v.Args[0] 19220 if x0.Op != OpAMD64MOVWloadidx1 { 19221 break 19222 } 19223 i0 := x0.AuxInt 19224 s := x0.Aux 19225 _ = x0.Args[2] 19226 idx := x0.Args[0] 19227 p := x0.Args[1] 19228 mem := x0.Args[2] 19229 sh := v.Args[1] 19230 if sh.Op != OpAMD64SHLLconst { 19231 break 19232 } 19233 if sh.AuxInt != 16 { 19234 break 19235 } 19236 x1 := sh.Args[0] 19237 if x1.Op != OpAMD64MOVWloadidx1 { 19238 break 19239 } 19240 i1 := x1.AuxInt 19241 if x1.Aux != s { 19242 break 19243 } 19244 _ = x1.Args[2] 19245 if idx != x1.Args[0] { 19246 break 19247 } 19248 if p != x1.Args[1] { 19249 break 19250 } 19251 if mem != x1.Args[2] { 19252 break 19253 } 19254 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19255 break 19256 } 19257 b = mergePoint(b, x0, x1) 19258 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19259 v.reset(OpCopy) 19260 v.AddArg(v0) 19261 v0.AuxInt = i0 19262 v0.Aux = s 19263 v0.AddArg(p) 19264 v0.AddArg(idx) 19265 v0.AddArg(mem) 19266 return true 19267 } 19268 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19269 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19270 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19271 for { 19272 _ = v.Args[1] 19273 sh := v.Args[0] 19274 if sh.Op != OpAMD64SHLLconst { 19275 break 19276 } 19277 if sh.AuxInt != 16 { 19278 break 19279 } 19280 x1 := sh.Args[0] 19281 if x1.Op != OpAMD64MOVWloadidx1 { 19282 break 19283 } 19284 i1 := x1.AuxInt 19285 s := x1.Aux 19286 _ = x1.Args[2] 19287 p := x1.Args[0] 19288 idx := x1.Args[1] 19289 mem := x1.Args[2] 19290 x0 := v.Args[1] 19291 if x0.Op != OpAMD64MOVWloadidx1 { 19292 break 19293 } 19294 i0 := x0.AuxInt 19295 if x0.Aux != s { 19296 break 19297 } 19298 _ = x0.Args[2] 19299 if p != x0.Args[0] { 19300 break 19301 } 19302 if idx != x0.Args[1] { 19303 break 19304 } 19305 if mem != x0.Args[2] { 19306 break 19307 } 19308 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19309 break 19310 } 19311 b = mergePoint(b, x0, x1) 19312 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19313 v.reset(OpCopy) 19314 v.AddArg(v0) 19315 v0.AuxInt = i0 19316 v0.Aux = s 19317 v0.AddArg(p) 19318 v0.AddArg(idx) 19319 v0.AddArg(mem) 19320 return true 19321 } 19322 return false 19323 } 19324 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 19325 b := v.Block 19326 _ = b 19327 typ := &b.Func.Config.Types 19328 _ = typ 19329 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19330 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19331 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19332 for { 19333 _ = v.Args[1] 19334 sh := v.Args[0] 19335 if sh.Op != OpAMD64SHLLconst { 19336 break 19337 } 19338 if sh.AuxInt != 16 { 19339 break 19340 } 19341 x1 := sh.Args[0] 19342 if x1.Op != OpAMD64MOVWloadidx1 { 19343 break 19344 } 19345 i1 := x1.AuxInt 19346 s := x1.Aux 19347 _ = x1.Args[2] 19348 idx := x1.Args[0] 19349 p := x1.Args[1] 19350 mem := x1.Args[2] 19351 x0 := v.Args[1] 19352 if x0.Op != OpAMD64MOVWloadidx1 { 19353 break 19354 } 19355 i0 := x0.AuxInt 19356 if x0.Aux != s { 19357 break 19358 } 19359 _ = x0.Args[2] 19360 if p != x0.Args[0] { 19361 break 19362 } 19363 if idx != x0.Args[1] { 19364 break 19365 } 19366 if mem != x0.Args[2] { 19367 break 19368 } 19369 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19370 break 19371 } 19372 b = mergePoint(b, x0, x1) 19373 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19374 v.reset(OpCopy) 19375 v.AddArg(v0) 19376 v0.AuxInt = i0 19377 v0.Aux = s 19378 v0.AddArg(p) 19379 v0.AddArg(idx) 19380 v0.AddArg(mem) 19381 return true 19382 } 19383 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19384 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19385 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19386 for { 19387 _ = v.Args[1] 19388 sh := v.Args[0] 19389 if sh.Op != OpAMD64SHLLconst { 19390 break 19391 } 19392 if sh.AuxInt != 16 { 19393 break 19394 } 19395 x1 := sh.Args[0] 19396 if x1.Op != OpAMD64MOVWloadidx1 { 19397 break 19398 } 19399 i1 := x1.AuxInt 19400 s := x1.Aux 19401 _ = x1.Args[2] 19402 p := x1.Args[0] 19403 idx := x1.Args[1] 19404 mem := x1.Args[2] 19405 x0 := v.Args[1] 19406 if x0.Op != OpAMD64MOVWloadidx1 { 19407 break 19408 } 19409 i0 := x0.AuxInt 19410 if x0.Aux != s { 19411 break 19412 } 19413 _ = x0.Args[2] 19414 if idx != x0.Args[0] { 19415 break 19416 } 19417 if p != x0.Args[1] { 19418 break 19419 } 19420 if mem != x0.Args[2] { 19421 break 19422 } 19423 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19424 break 19425 } 19426 b = mergePoint(b, x0, x1) 19427 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19428 v.reset(OpCopy) 19429 v.AddArg(v0) 19430 v0.AuxInt = i0 19431 v0.Aux = s 19432 v0.AddArg(p) 19433 v0.AddArg(idx) 19434 v0.AddArg(mem) 19435 return true 19436 } 19437 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19438 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19439 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19440 for { 19441 _ = v.Args[1] 19442 sh := v.Args[0] 19443 if sh.Op != OpAMD64SHLLconst { 19444 break 19445 } 19446 if sh.AuxInt != 16 { 19447 break 19448 } 19449 x1 := sh.Args[0] 19450 if x1.Op != OpAMD64MOVWloadidx1 { 19451 break 19452 } 19453 i1 := x1.AuxInt 19454 s := x1.Aux 19455 _ = x1.Args[2] 19456 idx := x1.Args[0] 19457 p := x1.Args[1] 19458 mem := x1.Args[2] 19459 x0 := v.Args[1] 19460 if x0.Op != OpAMD64MOVWloadidx1 { 19461 break 19462 } 19463 i0 := x0.AuxInt 19464 if x0.Aux != s { 19465 break 19466 } 19467 _ = x0.Args[2] 19468 if idx != x0.Args[0] { 19469 break 19470 } 19471 if p != x0.Args[1] { 19472 break 19473 } 19474 if mem != x0.Args[2] { 19475 break 19476 } 19477 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19478 break 19479 } 19480 b = mergePoint(b, x0, x1) 19481 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19482 v.reset(OpCopy) 19483 v.AddArg(v0) 19484 v0.AuxInt = i0 19485 v0.Aux = s 19486 v0.AddArg(p) 19487 v0.AddArg(idx) 19488 v0.AddArg(mem) 19489 return true 19490 } 19491 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19492 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19493 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19494 for { 19495 _ = v.Args[1] 19496 s1 := v.Args[0] 19497 if s1.Op != OpAMD64SHLLconst { 19498 break 19499 } 19500 j1 := s1.AuxInt 19501 x1 := s1.Args[0] 19502 if x1.Op != OpAMD64MOVBloadidx1 { 19503 break 19504 } 19505 i1 := x1.AuxInt 19506 s := x1.Aux 19507 _ = x1.Args[2] 19508 p := x1.Args[0] 19509 idx := x1.Args[1] 19510 mem := x1.Args[2] 19511 or := v.Args[1] 19512 if or.Op != OpAMD64ORL { 19513 break 19514 } 19515 _ = or.Args[1] 19516 s0 := or.Args[0] 19517 if s0.Op != OpAMD64SHLLconst { 19518 break 19519 } 19520 j0 := s0.AuxInt 19521 x0 := s0.Args[0] 19522 if x0.Op != OpAMD64MOVBloadidx1 { 19523 break 19524 } 19525 i0 := x0.AuxInt 19526 if x0.Aux != s { 19527 break 19528 } 19529 _ = x0.Args[2] 19530 if p != x0.Args[0] { 19531 break 19532 } 19533 if idx != x0.Args[1] { 19534 break 19535 } 19536 if mem != x0.Args[2] { 19537 break 19538 } 19539 y := or.Args[1] 19540 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19541 break 19542 } 19543 b = mergePoint(b, x0, x1) 19544 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19545 v.reset(OpCopy) 19546 v.AddArg(v0) 19547 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19548 v1.AuxInt = j0 19549 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19550 v2.AuxInt = i0 19551 v2.Aux = s 19552 v2.AddArg(p) 19553 v2.AddArg(idx) 19554 v2.AddArg(mem) 19555 v1.AddArg(v2) 19556 v0.AddArg(v1) 19557 v0.AddArg(y) 19558 return true 19559 } 19560 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19561 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19562 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19563 for { 19564 _ = v.Args[1] 19565 s1 := v.Args[0] 19566 if s1.Op != OpAMD64SHLLconst { 19567 break 19568 } 19569 j1 := s1.AuxInt 19570 x1 := s1.Args[0] 19571 if x1.Op != OpAMD64MOVBloadidx1 { 19572 break 19573 } 19574 i1 := x1.AuxInt 19575 s := x1.Aux 19576 _ = x1.Args[2] 19577 idx := x1.Args[0] 19578 p := x1.Args[1] 19579 mem := x1.Args[2] 19580 or := v.Args[1] 19581 if or.Op != OpAMD64ORL { 19582 break 19583 } 19584 _ = or.Args[1] 19585 s0 := or.Args[0] 19586 if s0.Op != OpAMD64SHLLconst { 19587 break 19588 } 19589 j0 := s0.AuxInt 19590 x0 := s0.Args[0] 19591 if x0.Op != OpAMD64MOVBloadidx1 { 19592 break 19593 } 19594 i0 := x0.AuxInt 19595 if x0.Aux != s { 19596 break 19597 } 19598 _ = x0.Args[2] 19599 if p != x0.Args[0] { 19600 break 19601 } 19602 if idx != x0.Args[1] { 19603 break 19604 } 19605 if mem != x0.Args[2] { 19606 break 19607 } 19608 y := or.Args[1] 19609 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19610 break 19611 } 19612 b = mergePoint(b, x0, x1) 19613 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19614 v.reset(OpCopy) 19615 v.AddArg(v0) 19616 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19617 v1.AuxInt = j0 19618 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19619 v2.AuxInt = i0 19620 v2.Aux = s 19621 v2.AddArg(p) 19622 v2.AddArg(idx) 19623 v2.AddArg(mem) 19624 v1.AddArg(v2) 19625 v0.AddArg(v1) 19626 v0.AddArg(y) 19627 return true 19628 } 19629 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19630 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19631 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19632 for { 19633 _ = v.Args[1] 19634 s1 := v.Args[0] 19635 if s1.Op != OpAMD64SHLLconst { 19636 break 19637 } 19638 j1 := s1.AuxInt 19639 x1 := s1.Args[0] 19640 if x1.Op != OpAMD64MOVBloadidx1 { 19641 break 19642 } 19643 i1 := x1.AuxInt 19644 s := x1.Aux 19645 _ = x1.Args[2] 19646 p := x1.Args[0] 19647 idx := x1.Args[1] 19648 mem := x1.Args[2] 19649 or := v.Args[1] 19650 if or.Op != OpAMD64ORL { 19651 break 19652 } 19653 _ = or.Args[1] 19654 s0 := or.Args[0] 19655 if s0.Op != OpAMD64SHLLconst { 19656 break 19657 } 19658 j0 := s0.AuxInt 19659 x0 := s0.Args[0] 19660 if x0.Op != OpAMD64MOVBloadidx1 { 19661 break 19662 } 19663 i0 := x0.AuxInt 19664 if x0.Aux != s { 19665 break 19666 } 19667 _ = x0.Args[2] 19668 if idx != x0.Args[0] { 19669 break 19670 } 19671 if p != x0.Args[1] { 19672 break 19673 } 19674 if mem != x0.Args[2] { 19675 break 19676 } 19677 y := or.Args[1] 19678 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19679 break 19680 } 19681 b = mergePoint(b, x0, x1) 19682 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19683 v.reset(OpCopy) 19684 v.AddArg(v0) 19685 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19686 v1.AuxInt = j0 19687 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19688 v2.AuxInt = i0 19689 v2.Aux = s 19690 v2.AddArg(p) 19691 v2.AddArg(idx) 19692 v2.AddArg(mem) 19693 v1.AddArg(v2) 19694 v0.AddArg(v1) 19695 v0.AddArg(y) 19696 return true 19697 } 19698 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19699 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19700 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19701 for { 19702 _ = v.Args[1] 19703 s1 := v.Args[0] 19704 if s1.Op != OpAMD64SHLLconst { 19705 break 19706 } 19707 j1 := s1.AuxInt 19708 x1 := s1.Args[0] 19709 if x1.Op != OpAMD64MOVBloadidx1 { 19710 break 19711 } 19712 i1 := x1.AuxInt 19713 s := x1.Aux 19714 _ = x1.Args[2] 19715 idx := x1.Args[0] 19716 p := x1.Args[1] 19717 mem := x1.Args[2] 19718 or := v.Args[1] 19719 if or.Op != OpAMD64ORL { 19720 break 19721 } 19722 _ = or.Args[1] 19723 s0 := or.Args[0] 19724 if s0.Op != OpAMD64SHLLconst { 19725 break 19726 } 19727 j0 := s0.AuxInt 19728 x0 := s0.Args[0] 19729 if x0.Op != OpAMD64MOVBloadidx1 { 19730 break 19731 } 19732 i0 := x0.AuxInt 19733 if x0.Aux != s { 19734 break 19735 } 19736 _ = x0.Args[2] 19737 if idx != x0.Args[0] { 19738 break 19739 } 19740 if p != x0.Args[1] { 19741 break 19742 } 19743 if mem != x0.Args[2] { 19744 break 19745 } 19746 y := or.Args[1] 19747 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19748 break 19749 } 19750 b = mergePoint(b, x0, x1) 19751 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19752 v.reset(OpCopy) 19753 v.AddArg(v0) 19754 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19755 v1.AuxInt = j0 19756 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19757 v2.AuxInt = i0 19758 v2.Aux = s 19759 v2.AddArg(p) 19760 v2.AddArg(idx) 19761 v2.AddArg(mem) 19762 v1.AddArg(v2) 19763 v0.AddArg(v1) 19764 v0.AddArg(y) 19765 return true 19766 } 19767 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19768 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19769 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19770 for { 19771 _ = v.Args[1] 19772 s1 := v.Args[0] 19773 if s1.Op != OpAMD64SHLLconst { 19774 break 19775 } 19776 j1 := s1.AuxInt 19777 x1 := s1.Args[0] 19778 if x1.Op != OpAMD64MOVBloadidx1 { 19779 break 19780 } 19781 i1 := x1.AuxInt 19782 s := x1.Aux 19783 _ = x1.Args[2] 19784 p := x1.Args[0] 19785 idx := x1.Args[1] 19786 mem := x1.Args[2] 19787 or := v.Args[1] 19788 if or.Op != OpAMD64ORL { 19789 break 19790 } 19791 _ = or.Args[1] 19792 y := or.Args[0] 19793 s0 := or.Args[1] 19794 if s0.Op != OpAMD64SHLLconst { 19795 break 19796 } 19797 j0 := s0.AuxInt 19798 x0 := s0.Args[0] 19799 if x0.Op != OpAMD64MOVBloadidx1 { 19800 break 19801 } 19802 i0 := x0.AuxInt 19803 if x0.Aux != s { 19804 break 19805 } 19806 _ = x0.Args[2] 19807 if p != x0.Args[0] { 19808 break 19809 } 19810 if idx != x0.Args[1] { 19811 break 19812 } 19813 if mem != x0.Args[2] { 19814 break 19815 } 19816 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19817 break 19818 } 19819 b = mergePoint(b, x0, x1) 19820 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19821 v.reset(OpCopy) 19822 v.AddArg(v0) 19823 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19824 v1.AuxInt = j0 19825 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19826 v2.AuxInt = i0 19827 v2.Aux = s 19828 v2.AddArg(p) 19829 v2.AddArg(idx) 19830 v2.AddArg(mem) 19831 v1.AddArg(v2) 19832 v0.AddArg(v1) 19833 v0.AddArg(y) 19834 return true 19835 } 19836 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19837 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19838 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19839 for { 19840 _ = v.Args[1] 19841 s1 := v.Args[0] 19842 if s1.Op != OpAMD64SHLLconst { 19843 break 19844 } 19845 j1 := s1.AuxInt 19846 x1 := s1.Args[0] 19847 if x1.Op != OpAMD64MOVBloadidx1 { 19848 break 19849 } 19850 i1 := x1.AuxInt 19851 s := x1.Aux 19852 _ = x1.Args[2] 19853 idx := x1.Args[0] 19854 p := x1.Args[1] 19855 mem := x1.Args[2] 19856 or := v.Args[1] 19857 if or.Op != OpAMD64ORL { 19858 break 19859 } 19860 _ = or.Args[1] 19861 y := or.Args[0] 19862 s0 := or.Args[1] 19863 if s0.Op != OpAMD64SHLLconst { 19864 break 19865 } 19866 j0 := s0.AuxInt 19867 x0 := s0.Args[0] 19868 if x0.Op != OpAMD64MOVBloadidx1 { 19869 break 19870 } 19871 i0 := x0.AuxInt 19872 if x0.Aux != s { 19873 break 19874 } 19875 _ = x0.Args[2] 19876 if p != x0.Args[0] { 19877 break 19878 } 19879 if idx != x0.Args[1] { 19880 break 19881 } 19882 if mem != x0.Args[2] { 19883 break 19884 } 19885 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19886 break 19887 } 19888 b = mergePoint(b, x0, x1) 19889 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19890 v.reset(OpCopy) 19891 v.AddArg(v0) 19892 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19893 v1.AuxInt = j0 19894 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19895 v2.AuxInt = i0 19896 v2.Aux = s 19897 v2.AddArg(p) 19898 v2.AddArg(idx) 19899 v2.AddArg(mem) 19900 v1.AddArg(v2) 19901 v0.AddArg(v1) 19902 v0.AddArg(y) 19903 return true 19904 } 19905 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 19906 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19907 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19908 for { 19909 _ = v.Args[1] 19910 s1 := v.Args[0] 19911 if s1.Op != OpAMD64SHLLconst { 19912 break 19913 } 19914 j1 := s1.AuxInt 19915 x1 := s1.Args[0] 19916 if x1.Op != OpAMD64MOVBloadidx1 { 19917 break 19918 } 19919 i1 := x1.AuxInt 19920 s := x1.Aux 19921 _ = x1.Args[2] 19922 p := x1.Args[0] 19923 idx := x1.Args[1] 19924 mem := x1.Args[2] 19925 or := v.Args[1] 19926 if or.Op != OpAMD64ORL { 19927 break 19928 } 19929 _ = or.Args[1] 19930 y := or.Args[0] 19931 s0 := or.Args[1] 19932 if s0.Op != OpAMD64SHLLconst { 19933 break 19934 } 19935 j0 := s0.AuxInt 19936 x0 := s0.Args[0] 19937 if x0.Op != OpAMD64MOVBloadidx1 { 19938 break 19939 } 19940 i0 := x0.AuxInt 19941 if x0.Aux != s { 19942 break 19943 } 19944 _ = x0.Args[2] 19945 if idx != x0.Args[0] { 19946 break 19947 } 19948 if p != x0.Args[1] { 19949 break 19950 } 19951 if mem != x0.Args[2] { 19952 break 19953 } 19954 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19955 break 19956 } 19957 b = mergePoint(b, x0, x1) 19958 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19959 v.reset(OpCopy) 19960 v.AddArg(v0) 19961 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19962 v1.AuxInt = j0 19963 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19964 v2.AuxInt = i0 19965 v2.Aux = s 19966 v2.AddArg(p) 19967 v2.AddArg(idx) 19968 v2.AddArg(mem) 19969 v1.AddArg(v2) 19970 v0.AddArg(v1) 19971 v0.AddArg(y) 19972 return true 19973 } 19974 return false 19975 } 19976 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 19977 b := v.Block 19978 _ = b 19979 typ := &b.Func.Config.Types 19980 _ = typ 19981 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 19982 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19983 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19984 for { 19985 _ = v.Args[1] 19986 s1 := v.Args[0] 19987 if s1.Op != OpAMD64SHLLconst { 19988 break 19989 } 19990 j1 := s1.AuxInt 19991 x1 := s1.Args[0] 19992 if x1.Op != OpAMD64MOVBloadidx1 { 19993 break 19994 } 19995 i1 := x1.AuxInt 19996 s := x1.Aux 19997 _ = x1.Args[2] 19998 idx := x1.Args[0] 19999 p := x1.Args[1] 20000 mem := x1.Args[2] 20001 or := v.Args[1] 20002 if or.Op != OpAMD64ORL { 20003 break 20004 } 20005 _ = or.Args[1] 20006 y := or.Args[0] 20007 s0 := or.Args[1] 20008 if s0.Op != OpAMD64SHLLconst { 20009 break 20010 } 20011 j0 := s0.AuxInt 20012 x0 := s0.Args[0] 20013 if x0.Op != OpAMD64MOVBloadidx1 { 20014 break 20015 } 20016 i0 := x0.AuxInt 20017 if x0.Aux != s { 20018 break 20019 } 20020 _ = x0.Args[2] 20021 if idx != x0.Args[0] { 20022 break 20023 } 20024 if p != x0.Args[1] { 20025 break 20026 } 20027 if mem != x0.Args[2] { 20028 break 20029 } 20030 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20031 break 20032 } 20033 b = mergePoint(b, x0, x1) 20034 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20035 v.reset(OpCopy) 20036 v.AddArg(v0) 20037 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20038 v1.AuxInt = j0 20039 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20040 v2.AuxInt = i0 20041 v2.Aux = s 20042 v2.AddArg(p) 20043 v2.AddArg(idx) 20044 v2.AddArg(mem) 20045 v1.AddArg(v2) 20046 v0.AddArg(v1) 20047 v0.AddArg(y) 20048 return true 20049 } 20050 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20051 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20052 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20053 for { 20054 _ = v.Args[1] 20055 or := v.Args[0] 20056 if or.Op != OpAMD64ORL { 20057 break 20058 } 20059 _ = or.Args[1] 20060 s0 := or.Args[0] 20061 if s0.Op != OpAMD64SHLLconst { 20062 break 20063 } 20064 j0 := s0.AuxInt 20065 x0 := s0.Args[0] 20066 if x0.Op != OpAMD64MOVBloadidx1 { 20067 break 20068 } 20069 i0 := x0.AuxInt 20070 s := x0.Aux 20071 _ = x0.Args[2] 20072 p := x0.Args[0] 20073 idx := x0.Args[1] 20074 mem := x0.Args[2] 20075 y := or.Args[1] 20076 s1 := v.Args[1] 20077 if s1.Op != OpAMD64SHLLconst { 20078 break 20079 } 20080 j1 := s1.AuxInt 20081 x1 := s1.Args[0] 20082 if x1.Op != OpAMD64MOVBloadidx1 { 20083 break 20084 } 20085 i1 := x1.AuxInt 20086 if x1.Aux != s { 20087 break 20088 } 20089 _ = x1.Args[2] 20090 if p != x1.Args[0] { 20091 break 20092 } 20093 if idx != x1.Args[1] { 20094 break 20095 } 20096 if mem != x1.Args[2] { 20097 break 20098 } 20099 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20100 break 20101 } 20102 b = mergePoint(b, x0, x1) 20103 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20104 v.reset(OpCopy) 20105 v.AddArg(v0) 20106 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20107 v1.AuxInt = j0 20108 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20109 v2.AuxInt = i0 20110 v2.Aux = s 20111 v2.AddArg(p) 20112 v2.AddArg(idx) 20113 v2.AddArg(mem) 20114 v1.AddArg(v2) 20115 v0.AddArg(v1) 20116 v0.AddArg(y) 20117 return true 20118 } 20119 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20120 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20121 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20122 for { 20123 _ = v.Args[1] 20124 or := v.Args[0] 20125 if or.Op != OpAMD64ORL { 20126 break 20127 } 20128 _ = or.Args[1] 20129 s0 := or.Args[0] 20130 if s0.Op != OpAMD64SHLLconst { 20131 break 20132 } 20133 j0 := s0.AuxInt 20134 x0 := s0.Args[0] 20135 if x0.Op != OpAMD64MOVBloadidx1 { 20136 break 20137 } 20138 i0 := x0.AuxInt 20139 s := x0.Aux 20140 _ = x0.Args[2] 20141 idx := x0.Args[0] 20142 p := x0.Args[1] 20143 mem := x0.Args[2] 20144 y := or.Args[1] 20145 s1 := v.Args[1] 20146 if s1.Op != OpAMD64SHLLconst { 20147 break 20148 } 20149 j1 := s1.AuxInt 20150 x1 := s1.Args[0] 20151 if x1.Op != OpAMD64MOVBloadidx1 { 20152 break 20153 } 20154 i1 := x1.AuxInt 20155 if x1.Aux != s { 20156 break 20157 } 20158 _ = x1.Args[2] 20159 if p != x1.Args[0] { 20160 break 20161 } 20162 if idx != x1.Args[1] { 20163 break 20164 } 20165 if mem != x1.Args[2] { 20166 break 20167 } 20168 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20169 break 20170 } 20171 b = mergePoint(b, x0, x1) 20172 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20173 v.reset(OpCopy) 20174 v.AddArg(v0) 20175 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20176 v1.AuxInt = j0 20177 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20178 v2.AuxInt = i0 20179 v2.Aux = s 20180 v2.AddArg(p) 20181 v2.AddArg(idx) 20182 v2.AddArg(mem) 20183 v1.AddArg(v2) 20184 v0.AddArg(v1) 20185 v0.AddArg(y) 20186 return true 20187 } 20188 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20189 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20190 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20191 for { 20192 _ = v.Args[1] 20193 or := v.Args[0] 20194 if or.Op != OpAMD64ORL { 20195 break 20196 } 20197 _ = or.Args[1] 20198 y := or.Args[0] 20199 s0 := or.Args[1] 20200 if s0.Op != OpAMD64SHLLconst { 20201 break 20202 } 20203 j0 := s0.AuxInt 20204 x0 := s0.Args[0] 20205 if x0.Op != OpAMD64MOVBloadidx1 { 20206 break 20207 } 20208 i0 := x0.AuxInt 20209 s := x0.Aux 20210 _ = x0.Args[2] 20211 p := x0.Args[0] 20212 idx := x0.Args[1] 20213 mem := x0.Args[2] 20214 s1 := v.Args[1] 20215 if s1.Op != OpAMD64SHLLconst { 20216 break 20217 } 20218 j1 := s1.AuxInt 20219 x1 := s1.Args[0] 20220 if x1.Op != OpAMD64MOVBloadidx1 { 20221 break 20222 } 20223 i1 := x1.AuxInt 20224 if x1.Aux != s { 20225 break 20226 } 20227 _ = x1.Args[2] 20228 if p != x1.Args[0] { 20229 break 20230 } 20231 if idx != x1.Args[1] { 20232 break 20233 } 20234 if mem != x1.Args[2] { 20235 break 20236 } 20237 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20238 break 20239 } 20240 b = mergePoint(b, x0, x1) 20241 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20242 v.reset(OpCopy) 20243 v.AddArg(v0) 20244 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20245 v1.AuxInt = j0 20246 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20247 v2.AuxInt = i0 20248 v2.Aux = s 20249 v2.AddArg(p) 20250 v2.AddArg(idx) 20251 v2.AddArg(mem) 20252 v1.AddArg(v2) 20253 v0.AddArg(v1) 20254 v0.AddArg(y) 20255 return true 20256 } 20257 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20258 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20259 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20260 for { 20261 _ = v.Args[1] 20262 or := v.Args[0] 20263 if or.Op != OpAMD64ORL { 20264 break 20265 } 20266 _ = or.Args[1] 20267 y := or.Args[0] 20268 s0 := or.Args[1] 20269 if s0.Op != OpAMD64SHLLconst { 20270 break 20271 } 20272 j0 := s0.AuxInt 20273 x0 := s0.Args[0] 20274 if x0.Op != OpAMD64MOVBloadidx1 { 20275 break 20276 } 20277 i0 := x0.AuxInt 20278 s := x0.Aux 20279 _ = x0.Args[2] 20280 idx := x0.Args[0] 20281 p := x0.Args[1] 20282 mem := x0.Args[2] 20283 s1 := v.Args[1] 20284 if s1.Op != OpAMD64SHLLconst { 20285 break 20286 } 20287 j1 := s1.AuxInt 20288 x1 := s1.Args[0] 20289 if x1.Op != OpAMD64MOVBloadidx1 { 20290 break 20291 } 20292 i1 := x1.AuxInt 20293 if x1.Aux != s { 20294 break 20295 } 20296 _ = x1.Args[2] 20297 if p != x1.Args[0] { 20298 break 20299 } 20300 if idx != x1.Args[1] { 20301 break 20302 } 20303 if mem != x1.Args[2] { 20304 break 20305 } 20306 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20307 break 20308 } 20309 b = mergePoint(b, x0, x1) 20310 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20311 v.reset(OpCopy) 20312 v.AddArg(v0) 20313 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20314 v1.AuxInt = j0 20315 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20316 v2.AuxInt = i0 20317 v2.Aux = s 20318 v2.AddArg(p) 20319 v2.AddArg(idx) 20320 v2.AddArg(mem) 20321 v1.AddArg(v2) 20322 v0.AddArg(v1) 20323 v0.AddArg(y) 20324 return true 20325 } 20326 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20327 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20328 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20329 for { 20330 _ = v.Args[1] 20331 or := v.Args[0] 20332 if or.Op != OpAMD64ORL { 20333 break 20334 } 20335 _ = or.Args[1] 20336 s0 := or.Args[0] 20337 if s0.Op != OpAMD64SHLLconst { 20338 break 20339 } 20340 j0 := s0.AuxInt 20341 x0 := s0.Args[0] 20342 if x0.Op != OpAMD64MOVBloadidx1 { 20343 break 20344 } 20345 i0 := x0.AuxInt 20346 s := x0.Aux 20347 _ = x0.Args[2] 20348 p := x0.Args[0] 20349 idx := x0.Args[1] 20350 mem := x0.Args[2] 20351 y := or.Args[1] 20352 s1 := v.Args[1] 20353 if s1.Op != OpAMD64SHLLconst { 20354 break 20355 } 20356 j1 := s1.AuxInt 20357 x1 := s1.Args[0] 20358 if x1.Op != OpAMD64MOVBloadidx1 { 20359 break 20360 } 20361 i1 := x1.AuxInt 20362 if x1.Aux != s { 20363 break 20364 } 20365 _ = x1.Args[2] 20366 if idx != x1.Args[0] { 20367 break 20368 } 20369 if p != x1.Args[1] { 20370 break 20371 } 20372 if mem != x1.Args[2] { 20373 break 20374 } 20375 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20376 break 20377 } 20378 b = mergePoint(b, x0, x1) 20379 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20380 v.reset(OpCopy) 20381 v.AddArg(v0) 20382 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20383 v1.AuxInt = j0 20384 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20385 v2.AuxInt = i0 20386 v2.Aux = s 20387 v2.AddArg(p) 20388 v2.AddArg(idx) 20389 v2.AddArg(mem) 20390 v1.AddArg(v2) 20391 v0.AddArg(v1) 20392 v0.AddArg(y) 20393 return true 20394 } 20395 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20396 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20397 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20398 for { 20399 _ = v.Args[1] 20400 or := v.Args[0] 20401 if or.Op != OpAMD64ORL { 20402 break 20403 } 20404 _ = or.Args[1] 20405 s0 := or.Args[0] 20406 if s0.Op != OpAMD64SHLLconst { 20407 break 20408 } 20409 j0 := s0.AuxInt 20410 x0 := s0.Args[0] 20411 if x0.Op != OpAMD64MOVBloadidx1 { 20412 break 20413 } 20414 i0 := x0.AuxInt 20415 s := x0.Aux 20416 _ = x0.Args[2] 20417 idx := x0.Args[0] 20418 p := x0.Args[1] 20419 mem := x0.Args[2] 20420 y := or.Args[1] 20421 s1 := v.Args[1] 20422 if s1.Op != OpAMD64SHLLconst { 20423 break 20424 } 20425 j1 := s1.AuxInt 20426 x1 := s1.Args[0] 20427 if x1.Op != OpAMD64MOVBloadidx1 { 20428 break 20429 } 20430 i1 := x1.AuxInt 20431 if x1.Aux != s { 20432 break 20433 } 20434 _ = x1.Args[2] 20435 if idx != x1.Args[0] { 20436 break 20437 } 20438 if p != x1.Args[1] { 20439 break 20440 } 20441 if mem != x1.Args[2] { 20442 break 20443 } 20444 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20445 break 20446 } 20447 b = mergePoint(b, x0, x1) 20448 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20449 v.reset(OpCopy) 20450 v.AddArg(v0) 20451 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20452 v1.AuxInt = j0 20453 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20454 v2.AuxInt = i0 20455 v2.Aux = s 20456 v2.AddArg(p) 20457 v2.AddArg(idx) 20458 v2.AddArg(mem) 20459 v1.AddArg(v2) 20460 v0.AddArg(v1) 20461 v0.AddArg(y) 20462 return true 20463 } 20464 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20465 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20466 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20467 for { 20468 _ = v.Args[1] 20469 or := v.Args[0] 20470 if or.Op != OpAMD64ORL { 20471 break 20472 } 20473 _ = or.Args[1] 20474 y := or.Args[0] 20475 s0 := or.Args[1] 20476 if s0.Op != OpAMD64SHLLconst { 20477 break 20478 } 20479 j0 := s0.AuxInt 20480 x0 := s0.Args[0] 20481 if x0.Op != OpAMD64MOVBloadidx1 { 20482 break 20483 } 20484 i0 := x0.AuxInt 20485 s := x0.Aux 20486 _ = x0.Args[2] 20487 p := x0.Args[0] 20488 idx := x0.Args[1] 20489 mem := x0.Args[2] 20490 s1 := v.Args[1] 20491 if s1.Op != OpAMD64SHLLconst { 20492 break 20493 } 20494 j1 := s1.AuxInt 20495 x1 := s1.Args[0] 20496 if x1.Op != OpAMD64MOVBloadidx1 { 20497 break 20498 } 20499 i1 := x1.AuxInt 20500 if x1.Aux != s { 20501 break 20502 } 20503 _ = x1.Args[2] 20504 if idx != x1.Args[0] { 20505 break 20506 } 20507 if p != x1.Args[1] { 20508 break 20509 } 20510 if mem != x1.Args[2] { 20511 break 20512 } 20513 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20514 break 20515 } 20516 b = mergePoint(b, x0, x1) 20517 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20518 v.reset(OpCopy) 20519 v.AddArg(v0) 20520 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20521 v1.AuxInt = j0 20522 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20523 v2.AuxInt = i0 20524 v2.Aux = s 20525 v2.AddArg(p) 20526 v2.AddArg(idx) 20527 v2.AddArg(mem) 20528 v1.AddArg(v2) 20529 v0.AddArg(v1) 20530 v0.AddArg(y) 20531 return true 20532 } 20533 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20534 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20535 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20536 for { 20537 _ = v.Args[1] 20538 or := v.Args[0] 20539 if or.Op != OpAMD64ORL { 20540 break 20541 } 20542 _ = or.Args[1] 20543 y := or.Args[0] 20544 s0 := or.Args[1] 20545 if s0.Op != OpAMD64SHLLconst { 20546 break 20547 } 20548 j0 := s0.AuxInt 20549 x0 := s0.Args[0] 20550 if x0.Op != OpAMD64MOVBloadidx1 { 20551 break 20552 } 20553 i0 := x0.AuxInt 20554 s := x0.Aux 20555 _ = x0.Args[2] 20556 idx := x0.Args[0] 20557 p := x0.Args[1] 20558 mem := x0.Args[2] 20559 s1 := v.Args[1] 20560 if s1.Op != OpAMD64SHLLconst { 20561 break 20562 } 20563 j1 := s1.AuxInt 20564 x1 := s1.Args[0] 20565 if x1.Op != OpAMD64MOVBloadidx1 { 20566 break 20567 } 20568 i1 := x1.AuxInt 20569 if x1.Aux != s { 20570 break 20571 } 20572 _ = x1.Args[2] 20573 if idx != x1.Args[0] { 20574 break 20575 } 20576 if p != x1.Args[1] { 20577 break 20578 } 20579 if mem != x1.Args[2] { 20580 break 20581 } 20582 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20583 break 20584 } 20585 b = mergePoint(b, x0, x1) 20586 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20587 v.reset(OpCopy) 20588 v.AddArg(v0) 20589 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20590 v1.AuxInt = j0 20591 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20592 v2.AuxInt = i0 20593 v2.Aux = s 20594 v2.AddArg(p) 20595 v2.AddArg(idx) 20596 v2.AddArg(mem) 20597 v1.AddArg(v2) 20598 v0.AddArg(v1) 20599 v0.AddArg(y) 20600 return true 20601 } 20602 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 20603 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20604 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 20605 for { 20606 _ = v.Args[1] 20607 x1 := v.Args[0] 20608 if x1.Op != OpAMD64MOVBload { 20609 break 20610 } 20611 i1 := x1.AuxInt 20612 s := x1.Aux 20613 _ = x1.Args[1] 20614 p := x1.Args[0] 20615 mem := x1.Args[1] 20616 sh := v.Args[1] 20617 if sh.Op != OpAMD64SHLLconst { 20618 break 20619 } 20620 if sh.AuxInt != 8 { 20621 break 20622 } 20623 x0 := sh.Args[0] 20624 if x0.Op != OpAMD64MOVBload { 20625 break 20626 } 20627 i0 := x0.AuxInt 20628 if x0.Aux != s { 20629 break 20630 } 20631 _ = x0.Args[1] 20632 if p != x0.Args[0] { 20633 break 20634 } 20635 if mem != x0.Args[1] { 20636 break 20637 } 20638 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 20639 break 20640 } 20641 b = mergePoint(b, x0, x1) 20642 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 20643 v.reset(OpCopy) 20644 v.AddArg(v0) 20645 v0.AuxInt = 8 20646 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20647 v1.AuxInt = i0 20648 v1.Aux = s 20649 v1.AddArg(p) 20650 v1.AddArg(mem) 20651 v0.AddArg(v1) 20652 return true 20653 } 20654 return false 20655 } 20656 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 20657 b := v.Block 20658 _ = b 20659 typ := &b.Func.Config.Types 20660 _ = typ 20661 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 20662 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20663 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 20664 for { 20665 _ = v.Args[1] 20666 sh := v.Args[0] 20667 if sh.Op != OpAMD64SHLLconst { 20668 break 20669 } 20670 if sh.AuxInt != 8 { 20671 break 20672 } 20673 x0 := sh.Args[0] 20674 if x0.Op != OpAMD64MOVBload { 20675 break 20676 } 20677 i0 := x0.AuxInt 20678 s := x0.Aux 20679 _ = x0.Args[1] 20680 p := x0.Args[0] 20681 mem := x0.Args[1] 20682 x1 := v.Args[1] 20683 if x1.Op != OpAMD64MOVBload { 20684 break 20685 } 20686 i1 := x1.AuxInt 20687 if x1.Aux != s { 20688 break 20689 } 20690 _ = x1.Args[1] 20691 if p != x1.Args[0] { 20692 break 20693 } 20694 if mem != x1.Args[1] { 20695 break 20696 } 20697 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 20698 break 20699 } 20700 b = mergePoint(b, x0, x1) 20701 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 20702 v.reset(OpCopy) 20703 v.AddArg(v0) 20704 v0.AuxInt = 8 20705 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20706 v1.AuxInt = i0 20707 v1.Aux = s 20708 v1.AddArg(p) 20709 v1.AddArg(mem) 20710 v0.AddArg(v1) 20711 return true 20712 } 20713 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 20714 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20715 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 20716 for { 20717 _ = v.Args[1] 20718 r1 := v.Args[0] 20719 if r1.Op != OpAMD64ROLWconst { 20720 break 20721 } 20722 if r1.AuxInt != 8 { 20723 break 20724 } 20725 x1 := r1.Args[0] 20726 if x1.Op != OpAMD64MOVWload { 20727 break 20728 } 20729 i1 := x1.AuxInt 20730 s := x1.Aux 20731 _ = x1.Args[1] 20732 p := x1.Args[0] 20733 mem := x1.Args[1] 20734 sh := v.Args[1] 20735 if sh.Op != OpAMD64SHLLconst { 20736 break 20737 } 20738 if sh.AuxInt != 16 { 20739 break 20740 } 20741 r0 := sh.Args[0] 20742 if r0.Op != OpAMD64ROLWconst { 20743 break 20744 } 20745 if r0.AuxInt != 8 { 20746 break 20747 } 20748 x0 := r0.Args[0] 20749 if x0.Op != OpAMD64MOVWload { 20750 break 20751 } 20752 i0 := x0.AuxInt 20753 if x0.Aux != s { 20754 break 20755 } 20756 _ = x0.Args[1] 20757 if p != x0.Args[0] { 20758 break 20759 } 20760 if mem != x0.Args[1] { 20761 break 20762 } 20763 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20764 break 20765 } 20766 b = mergePoint(b, x0, x1) 20767 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20768 v.reset(OpCopy) 20769 v.AddArg(v0) 20770 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 20771 v1.AuxInt = i0 20772 v1.Aux = s 20773 v1.AddArg(p) 20774 v1.AddArg(mem) 20775 v0.AddArg(v1) 20776 return true 20777 } 20778 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 20779 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20780 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 20781 for { 20782 _ = v.Args[1] 20783 sh := v.Args[0] 20784 if sh.Op != OpAMD64SHLLconst { 20785 break 20786 } 20787 if sh.AuxInt != 16 { 20788 break 20789 } 20790 r0 := sh.Args[0] 20791 if r0.Op != OpAMD64ROLWconst { 20792 break 20793 } 20794 if r0.AuxInt != 8 { 20795 break 20796 } 20797 x0 := r0.Args[0] 20798 if x0.Op != OpAMD64MOVWload { 20799 break 20800 } 20801 i0 := x0.AuxInt 20802 s := x0.Aux 20803 _ = x0.Args[1] 20804 p := x0.Args[0] 20805 mem := x0.Args[1] 20806 r1 := v.Args[1] 20807 if r1.Op != OpAMD64ROLWconst { 20808 break 20809 } 20810 if r1.AuxInt != 8 { 20811 break 20812 } 20813 x1 := r1.Args[0] 20814 if x1.Op != OpAMD64MOVWload { 20815 break 20816 } 20817 i1 := x1.AuxInt 20818 if x1.Aux != s { 20819 break 20820 } 20821 _ = x1.Args[1] 20822 if p != x1.Args[0] { 20823 break 20824 } 20825 if mem != x1.Args[1] { 20826 break 20827 } 20828 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20829 break 20830 } 20831 b = mergePoint(b, x0, x1) 20832 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20833 v.reset(OpCopy) 20834 v.AddArg(v0) 20835 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 20836 v1.AuxInt = i0 20837 v1.Aux = s 20838 v1.AddArg(p) 20839 v1.AddArg(mem) 20840 v0.AddArg(v1) 20841 return true 20842 } 20843 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 20844 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20845 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20846 for { 20847 _ = v.Args[1] 20848 s0 := v.Args[0] 20849 if s0.Op != OpAMD64SHLLconst { 20850 break 20851 } 20852 j0 := s0.AuxInt 20853 x0 := s0.Args[0] 20854 if x0.Op != OpAMD64MOVBload { 20855 break 20856 } 20857 i0 := x0.AuxInt 20858 s := x0.Aux 20859 _ = x0.Args[1] 20860 p := x0.Args[0] 20861 mem := x0.Args[1] 20862 or := v.Args[1] 20863 if or.Op != OpAMD64ORL { 20864 break 20865 } 20866 _ = or.Args[1] 20867 s1 := or.Args[0] 20868 if s1.Op != OpAMD64SHLLconst { 20869 break 20870 } 20871 j1 := s1.AuxInt 20872 x1 := s1.Args[0] 20873 if x1.Op != OpAMD64MOVBload { 20874 break 20875 } 20876 i1 := x1.AuxInt 20877 if x1.Aux != s { 20878 break 20879 } 20880 _ = x1.Args[1] 20881 if p != x1.Args[0] { 20882 break 20883 } 20884 if mem != x1.Args[1] { 20885 break 20886 } 20887 y := or.Args[1] 20888 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20889 break 20890 } 20891 b = mergePoint(b, x0, x1) 20892 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20893 v.reset(OpCopy) 20894 v.AddArg(v0) 20895 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20896 v1.AuxInt = j1 20897 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20898 v2.AuxInt = 8 20899 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20900 v3.AuxInt = i0 20901 v3.Aux = s 20902 v3.AddArg(p) 20903 v3.AddArg(mem) 20904 v2.AddArg(v3) 20905 v1.AddArg(v2) 20906 v0.AddArg(v1) 20907 v0.AddArg(y) 20908 return true 20909 } 20910 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 20911 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20912 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20913 for { 20914 _ = v.Args[1] 20915 s0 := v.Args[0] 20916 if s0.Op != OpAMD64SHLLconst { 20917 break 20918 } 20919 j0 := s0.AuxInt 20920 x0 := s0.Args[0] 20921 if x0.Op != OpAMD64MOVBload { 20922 break 20923 } 20924 i0 := x0.AuxInt 20925 s := x0.Aux 20926 _ = x0.Args[1] 20927 p := x0.Args[0] 20928 mem := x0.Args[1] 20929 or := v.Args[1] 20930 if or.Op != OpAMD64ORL { 20931 break 20932 } 20933 _ = or.Args[1] 20934 y := or.Args[0] 20935 s1 := or.Args[1] 20936 if s1.Op != OpAMD64SHLLconst { 20937 break 20938 } 20939 j1 := s1.AuxInt 20940 x1 := s1.Args[0] 20941 if x1.Op != OpAMD64MOVBload { 20942 break 20943 } 20944 i1 := x1.AuxInt 20945 if x1.Aux != s { 20946 break 20947 } 20948 _ = x1.Args[1] 20949 if p != x1.Args[0] { 20950 break 20951 } 20952 if mem != x1.Args[1] { 20953 break 20954 } 20955 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20956 break 20957 } 20958 b = mergePoint(b, x0, x1) 20959 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20960 v.reset(OpCopy) 20961 v.AddArg(v0) 20962 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20963 v1.AuxInt = j1 20964 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20965 v2.AuxInt = 8 20966 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20967 v3.AuxInt = i0 20968 v3.Aux = s 20969 v3.AddArg(p) 20970 v3.AddArg(mem) 20971 v2.AddArg(v3) 20972 v1.AddArg(v2) 20973 v0.AddArg(v1) 20974 v0.AddArg(y) 20975 return true 20976 } 20977 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 20978 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20979 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20980 for { 20981 _ = v.Args[1] 20982 or := v.Args[0] 20983 if or.Op != OpAMD64ORL { 20984 break 20985 } 20986 _ = or.Args[1] 20987 s1 := or.Args[0] 20988 if s1.Op != OpAMD64SHLLconst { 20989 break 20990 } 20991 j1 := s1.AuxInt 20992 x1 := s1.Args[0] 20993 if x1.Op != OpAMD64MOVBload { 20994 break 20995 } 20996 i1 := x1.AuxInt 20997 s := x1.Aux 20998 _ = x1.Args[1] 20999 p := x1.Args[0] 21000 mem := x1.Args[1] 21001 y := or.Args[1] 21002 s0 := v.Args[1] 21003 if s0.Op != OpAMD64SHLLconst { 21004 break 21005 } 21006 j0 := s0.AuxInt 21007 x0 := s0.Args[0] 21008 if x0.Op != OpAMD64MOVBload { 21009 break 21010 } 21011 i0 := x0.AuxInt 21012 if x0.Aux != s { 21013 break 21014 } 21015 _ = x0.Args[1] 21016 if p != x0.Args[0] { 21017 break 21018 } 21019 if mem != x0.Args[1] { 21020 break 21021 } 21022 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21023 break 21024 } 21025 b = mergePoint(b, x0, x1) 21026 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21027 v.reset(OpCopy) 21028 v.AddArg(v0) 21029 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21030 v1.AuxInt = j1 21031 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21032 v2.AuxInt = 8 21033 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 21034 v3.AuxInt = i0 21035 v3.Aux = s 21036 v3.AddArg(p) 21037 v3.AddArg(mem) 21038 v2.AddArg(v3) 21039 v1.AddArg(v2) 21040 v0.AddArg(v1) 21041 v0.AddArg(y) 21042 return true 21043 } 21044 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 21045 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21046 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 21047 for { 21048 _ = v.Args[1] 21049 or := v.Args[0] 21050 if or.Op != OpAMD64ORL { 21051 break 21052 } 21053 _ = or.Args[1] 21054 y := or.Args[0] 21055 s1 := or.Args[1] 21056 if s1.Op != OpAMD64SHLLconst { 21057 break 21058 } 21059 j1 := s1.AuxInt 21060 x1 := s1.Args[0] 21061 if x1.Op != OpAMD64MOVBload { 21062 break 21063 } 21064 i1 := x1.AuxInt 21065 s := x1.Aux 21066 _ = x1.Args[1] 21067 p := x1.Args[0] 21068 mem := x1.Args[1] 21069 s0 := v.Args[1] 21070 if s0.Op != OpAMD64SHLLconst { 21071 break 21072 } 21073 j0 := s0.AuxInt 21074 x0 := s0.Args[0] 21075 if x0.Op != OpAMD64MOVBload { 21076 break 21077 } 21078 i0 := x0.AuxInt 21079 if x0.Aux != s { 21080 break 21081 } 21082 _ = x0.Args[1] 21083 if p != x0.Args[0] { 21084 break 21085 } 21086 if mem != x0.Args[1] { 21087 break 21088 } 21089 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21090 break 21091 } 21092 b = mergePoint(b, x0, x1) 21093 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21094 v.reset(OpCopy) 21095 v.AddArg(v0) 21096 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21097 v1.AuxInt = j1 21098 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21099 v2.AuxInt = 8 21100 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 21101 v3.AuxInt = i0 21102 v3.Aux = s 21103 v3.AddArg(p) 21104 v3.AddArg(mem) 21105 v2.AddArg(v3) 21106 v1.AddArg(v2) 21107 v0.AddArg(v1) 21108 v0.AddArg(y) 21109 return true 21110 } 21111 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21112 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21113 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21114 for { 21115 _ = v.Args[1] 21116 x1 := v.Args[0] 21117 if x1.Op != OpAMD64MOVBloadidx1 { 21118 break 21119 } 21120 i1 := x1.AuxInt 21121 s := x1.Aux 21122 _ = x1.Args[2] 21123 p := x1.Args[0] 21124 idx := x1.Args[1] 21125 mem := x1.Args[2] 21126 sh := v.Args[1] 21127 if sh.Op != OpAMD64SHLLconst { 21128 break 21129 } 21130 if sh.AuxInt != 8 { 21131 break 21132 } 21133 x0 := sh.Args[0] 21134 if x0.Op != OpAMD64MOVBloadidx1 { 21135 break 21136 } 21137 i0 := x0.AuxInt 21138 if x0.Aux != s { 21139 break 21140 } 21141 _ = x0.Args[2] 21142 if p != x0.Args[0] { 21143 break 21144 } 21145 if idx != x0.Args[1] { 21146 break 21147 } 21148 if mem != x0.Args[2] { 21149 break 21150 } 21151 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21152 break 21153 } 21154 b = mergePoint(b, x0, x1) 21155 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21156 v.reset(OpCopy) 21157 v.AddArg(v0) 21158 v0.AuxInt = 8 21159 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21160 v1.AuxInt = i0 21161 v1.Aux = s 21162 v1.AddArg(p) 21163 v1.AddArg(idx) 21164 v1.AddArg(mem) 21165 v0.AddArg(v1) 21166 return true 21167 } 21168 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21169 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21170 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21171 for { 21172 _ = v.Args[1] 21173 x1 := v.Args[0] 21174 if x1.Op != OpAMD64MOVBloadidx1 { 21175 break 21176 } 21177 i1 := x1.AuxInt 21178 s := x1.Aux 21179 _ = x1.Args[2] 21180 idx := x1.Args[0] 21181 p := x1.Args[1] 21182 mem := x1.Args[2] 21183 sh := v.Args[1] 21184 if sh.Op != OpAMD64SHLLconst { 21185 break 21186 } 21187 if sh.AuxInt != 8 { 21188 break 21189 } 21190 x0 := sh.Args[0] 21191 if x0.Op != OpAMD64MOVBloadidx1 { 21192 break 21193 } 21194 i0 := x0.AuxInt 21195 if x0.Aux != s { 21196 break 21197 } 21198 _ = x0.Args[2] 21199 if p != x0.Args[0] { 21200 break 21201 } 21202 if idx != x0.Args[1] { 21203 break 21204 } 21205 if mem != x0.Args[2] { 21206 break 21207 } 21208 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21209 break 21210 } 21211 b = mergePoint(b, x0, x1) 21212 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21213 v.reset(OpCopy) 21214 v.AddArg(v0) 21215 v0.AuxInt = 8 21216 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21217 v1.AuxInt = i0 21218 v1.Aux = s 21219 v1.AddArg(p) 21220 v1.AddArg(idx) 21221 v1.AddArg(mem) 21222 v0.AddArg(v1) 21223 return true 21224 } 21225 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21226 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21227 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21228 for { 21229 _ = v.Args[1] 21230 x1 := v.Args[0] 21231 if x1.Op != OpAMD64MOVBloadidx1 { 21232 break 21233 } 21234 i1 := x1.AuxInt 21235 s := x1.Aux 21236 _ = x1.Args[2] 21237 p := x1.Args[0] 21238 idx := x1.Args[1] 21239 mem := x1.Args[2] 21240 sh := v.Args[1] 21241 if sh.Op != OpAMD64SHLLconst { 21242 break 21243 } 21244 if sh.AuxInt != 8 { 21245 break 21246 } 21247 x0 := sh.Args[0] 21248 if x0.Op != OpAMD64MOVBloadidx1 { 21249 break 21250 } 21251 i0 := x0.AuxInt 21252 if x0.Aux != s { 21253 break 21254 } 21255 _ = x0.Args[2] 21256 if idx != x0.Args[0] { 21257 break 21258 } 21259 if p != x0.Args[1] { 21260 break 21261 } 21262 if mem != x0.Args[2] { 21263 break 21264 } 21265 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21266 break 21267 } 21268 b = mergePoint(b, x0, x1) 21269 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21270 v.reset(OpCopy) 21271 v.AddArg(v0) 21272 v0.AuxInt = 8 21273 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21274 v1.AuxInt = i0 21275 v1.Aux = s 21276 v1.AddArg(p) 21277 v1.AddArg(idx) 21278 v1.AddArg(mem) 21279 v0.AddArg(v1) 21280 return true 21281 } 21282 return false 21283 } 21284 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 21285 b := v.Block 21286 _ = b 21287 typ := &b.Func.Config.Types 21288 _ = typ 21289 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21290 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21291 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21292 for { 21293 _ = v.Args[1] 21294 x1 := v.Args[0] 21295 if x1.Op != OpAMD64MOVBloadidx1 { 21296 break 21297 } 21298 i1 := x1.AuxInt 21299 s := x1.Aux 21300 _ = x1.Args[2] 21301 idx := x1.Args[0] 21302 p := x1.Args[1] 21303 mem := x1.Args[2] 21304 sh := v.Args[1] 21305 if sh.Op != OpAMD64SHLLconst { 21306 break 21307 } 21308 if sh.AuxInt != 8 { 21309 break 21310 } 21311 x0 := sh.Args[0] 21312 if x0.Op != OpAMD64MOVBloadidx1 { 21313 break 21314 } 21315 i0 := x0.AuxInt 21316 if x0.Aux != s { 21317 break 21318 } 21319 _ = x0.Args[2] 21320 if idx != x0.Args[0] { 21321 break 21322 } 21323 if p != x0.Args[1] { 21324 break 21325 } 21326 if mem != x0.Args[2] { 21327 break 21328 } 21329 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21330 break 21331 } 21332 b = mergePoint(b, x0, x1) 21333 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21334 v.reset(OpCopy) 21335 v.AddArg(v0) 21336 v0.AuxInt = 8 21337 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21338 v1.AuxInt = i0 21339 v1.Aux = s 21340 v1.AddArg(p) 21341 v1.AddArg(idx) 21342 v1.AddArg(mem) 21343 v0.AddArg(v1) 21344 return true 21345 } 21346 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 21347 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21348 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21349 for { 21350 _ = v.Args[1] 21351 sh := v.Args[0] 21352 if sh.Op != OpAMD64SHLLconst { 21353 break 21354 } 21355 if sh.AuxInt != 8 { 21356 break 21357 } 21358 x0 := sh.Args[0] 21359 if x0.Op != OpAMD64MOVBloadidx1 { 21360 break 21361 } 21362 i0 := x0.AuxInt 21363 s := x0.Aux 21364 _ = x0.Args[2] 21365 p := x0.Args[0] 21366 idx := x0.Args[1] 21367 mem := x0.Args[2] 21368 x1 := v.Args[1] 21369 if x1.Op != OpAMD64MOVBloadidx1 { 21370 break 21371 } 21372 i1 := x1.AuxInt 21373 if x1.Aux != s { 21374 break 21375 } 21376 _ = x1.Args[2] 21377 if p != x1.Args[0] { 21378 break 21379 } 21380 if idx != x1.Args[1] { 21381 break 21382 } 21383 if mem != x1.Args[2] { 21384 break 21385 } 21386 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21387 break 21388 } 21389 b = mergePoint(b, x0, x1) 21390 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21391 v.reset(OpCopy) 21392 v.AddArg(v0) 21393 v0.AuxInt = 8 21394 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21395 v1.AuxInt = i0 21396 v1.Aux = s 21397 v1.AddArg(p) 21398 v1.AddArg(idx) 21399 v1.AddArg(mem) 21400 v0.AddArg(v1) 21401 return true 21402 } 21403 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 21404 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21405 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21406 for { 21407 _ = v.Args[1] 21408 sh := v.Args[0] 21409 if sh.Op != OpAMD64SHLLconst { 21410 break 21411 } 21412 if sh.AuxInt != 8 { 21413 break 21414 } 21415 x0 := sh.Args[0] 21416 if x0.Op != OpAMD64MOVBloadidx1 { 21417 break 21418 } 21419 i0 := x0.AuxInt 21420 s := x0.Aux 21421 _ = x0.Args[2] 21422 idx := x0.Args[0] 21423 p := x0.Args[1] 21424 mem := x0.Args[2] 21425 x1 := v.Args[1] 21426 if x1.Op != OpAMD64MOVBloadidx1 { 21427 break 21428 } 21429 i1 := x1.AuxInt 21430 if x1.Aux != s { 21431 break 21432 } 21433 _ = x1.Args[2] 21434 if p != x1.Args[0] { 21435 break 21436 } 21437 if idx != x1.Args[1] { 21438 break 21439 } 21440 if mem != x1.Args[2] { 21441 break 21442 } 21443 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21444 break 21445 } 21446 b = mergePoint(b, x0, x1) 21447 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21448 v.reset(OpCopy) 21449 v.AddArg(v0) 21450 v0.AuxInt = 8 21451 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21452 v1.AuxInt = i0 21453 v1.Aux = s 21454 v1.AddArg(p) 21455 v1.AddArg(idx) 21456 v1.AddArg(mem) 21457 v0.AddArg(v1) 21458 return true 21459 } 21460 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 21461 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21462 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21463 for { 21464 _ = v.Args[1] 21465 sh := v.Args[0] 21466 if sh.Op != OpAMD64SHLLconst { 21467 break 21468 } 21469 if sh.AuxInt != 8 { 21470 break 21471 } 21472 x0 := sh.Args[0] 21473 if x0.Op != OpAMD64MOVBloadidx1 { 21474 break 21475 } 21476 i0 := x0.AuxInt 21477 s := x0.Aux 21478 _ = x0.Args[2] 21479 p := x0.Args[0] 21480 idx := x0.Args[1] 21481 mem := x0.Args[2] 21482 x1 := v.Args[1] 21483 if x1.Op != OpAMD64MOVBloadidx1 { 21484 break 21485 } 21486 i1 := x1.AuxInt 21487 if x1.Aux != s { 21488 break 21489 } 21490 _ = x1.Args[2] 21491 if idx != x1.Args[0] { 21492 break 21493 } 21494 if p != x1.Args[1] { 21495 break 21496 } 21497 if mem != x1.Args[2] { 21498 break 21499 } 21500 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21501 break 21502 } 21503 b = mergePoint(b, x0, x1) 21504 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21505 v.reset(OpCopy) 21506 v.AddArg(v0) 21507 v0.AuxInt = 8 21508 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21509 v1.AuxInt = i0 21510 v1.Aux = s 21511 v1.AddArg(p) 21512 v1.AddArg(idx) 21513 v1.AddArg(mem) 21514 v0.AddArg(v1) 21515 return true 21516 } 21517 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 21518 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21519 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21520 for { 21521 _ = v.Args[1] 21522 sh := v.Args[0] 21523 if sh.Op != OpAMD64SHLLconst { 21524 break 21525 } 21526 if sh.AuxInt != 8 { 21527 break 21528 } 21529 x0 := sh.Args[0] 21530 if x0.Op != OpAMD64MOVBloadidx1 { 21531 break 21532 } 21533 i0 := x0.AuxInt 21534 s := x0.Aux 21535 _ = x0.Args[2] 21536 idx := x0.Args[0] 21537 p := x0.Args[1] 21538 mem := x0.Args[2] 21539 x1 := v.Args[1] 21540 if x1.Op != OpAMD64MOVBloadidx1 { 21541 break 21542 } 21543 i1 := x1.AuxInt 21544 if x1.Aux != s { 21545 break 21546 } 21547 _ = x1.Args[2] 21548 if idx != x1.Args[0] { 21549 break 21550 } 21551 if p != x1.Args[1] { 21552 break 21553 } 21554 if mem != x1.Args[2] { 21555 break 21556 } 21557 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21558 break 21559 } 21560 b = mergePoint(b, x0, x1) 21561 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21562 v.reset(OpCopy) 21563 v.AddArg(v0) 21564 v0.AuxInt = 8 21565 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21566 v1.AuxInt = i0 21567 v1.Aux = s 21568 v1.AddArg(p) 21569 v1.AddArg(idx) 21570 v1.AddArg(mem) 21571 v0.AddArg(v1) 21572 return true 21573 } 21574 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 21575 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21576 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21577 for { 21578 _ = v.Args[1] 21579 r1 := v.Args[0] 21580 if r1.Op != OpAMD64ROLWconst { 21581 break 21582 } 21583 if r1.AuxInt != 8 { 21584 break 21585 } 21586 x1 := r1.Args[0] 21587 if x1.Op != OpAMD64MOVWloadidx1 { 21588 break 21589 } 21590 i1 := x1.AuxInt 21591 s := x1.Aux 21592 _ = x1.Args[2] 21593 p := x1.Args[0] 21594 idx := x1.Args[1] 21595 mem := x1.Args[2] 21596 sh := v.Args[1] 21597 if sh.Op != OpAMD64SHLLconst { 21598 break 21599 } 21600 if sh.AuxInt != 16 { 21601 break 21602 } 21603 r0 := sh.Args[0] 21604 if r0.Op != OpAMD64ROLWconst { 21605 break 21606 } 21607 if r0.AuxInt != 8 { 21608 break 21609 } 21610 x0 := r0.Args[0] 21611 if x0.Op != OpAMD64MOVWloadidx1 { 21612 break 21613 } 21614 i0 := x0.AuxInt 21615 if x0.Aux != s { 21616 break 21617 } 21618 _ = x0.Args[2] 21619 if p != x0.Args[0] { 21620 break 21621 } 21622 if idx != x0.Args[1] { 21623 break 21624 } 21625 if mem != x0.Args[2] { 21626 break 21627 } 21628 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21629 break 21630 } 21631 b = mergePoint(b, x0, x1) 21632 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21633 v.reset(OpCopy) 21634 v.AddArg(v0) 21635 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21636 v1.AuxInt = i0 21637 v1.Aux = s 21638 v1.AddArg(p) 21639 v1.AddArg(idx) 21640 v1.AddArg(mem) 21641 v0.AddArg(v1) 21642 return true 21643 } 21644 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 21645 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21646 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21647 for { 21648 _ = v.Args[1] 21649 r1 := v.Args[0] 21650 if r1.Op != OpAMD64ROLWconst { 21651 break 21652 } 21653 if r1.AuxInt != 8 { 21654 break 21655 } 21656 x1 := r1.Args[0] 21657 if x1.Op != OpAMD64MOVWloadidx1 { 21658 break 21659 } 21660 i1 := x1.AuxInt 21661 s := x1.Aux 21662 _ = x1.Args[2] 21663 idx := x1.Args[0] 21664 p := x1.Args[1] 21665 mem := x1.Args[2] 21666 sh := v.Args[1] 21667 if sh.Op != OpAMD64SHLLconst { 21668 break 21669 } 21670 if sh.AuxInt != 16 { 21671 break 21672 } 21673 r0 := sh.Args[0] 21674 if r0.Op != OpAMD64ROLWconst { 21675 break 21676 } 21677 if r0.AuxInt != 8 { 21678 break 21679 } 21680 x0 := r0.Args[0] 21681 if x0.Op != OpAMD64MOVWloadidx1 { 21682 break 21683 } 21684 i0 := x0.AuxInt 21685 if x0.Aux != s { 21686 break 21687 } 21688 _ = x0.Args[2] 21689 if p != x0.Args[0] { 21690 break 21691 } 21692 if idx != x0.Args[1] { 21693 break 21694 } 21695 if mem != x0.Args[2] { 21696 break 21697 } 21698 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21699 break 21700 } 21701 b = mergePoint(b, x0, x1) 21702 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21703 v.reset(OpCopy) 21704 v.AddArg(v0) 21705 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21706 v1.AuxInt = i0 21707 v1.Aux = s 21708 v1.AddArg(p) 21709 v1.AddArg(idx) 21710 v1.AddArg(mem) 21711 v0.AddArg(v1) 21712 return true 21713 } 21714 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21715 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21716 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21717 for { 21718 _ = v.Args[1] 21719 r1 := v.Args[0] 21720 if r1.Op != OpAMD64ROLWconst { 21721 break 21722 } 21723 if r1.AuxInt != 8 { 21724 break 21725 } 21726 x1 := r1.Args[0] 21727 if x1.Op != OpAMD64MOVWloadidx1 { 21728 break 21729 } 21730 i1 := x1.AuxInt 21731 s := x1.Aux 21732 _ = x1.Args[2] 21733 p := x1.Args[0] 21734 idx := x1.Args[1] 21735 mem := x1.Args[2] 21736 sh := v.Args[1] 21737 if sh.Op != OpAMD64SHLLconst { 21738 break 21739 } 21740 if sh.AuxInt != 16 { 21741 break 21742 } 21743 r0 := sh.Args[0] 21744 if r0.Op != OpAMD64ROLWconst { 21745 break 21746 } 21747 if r0.AuxInt != 8 { 21748 break 21749 } 21750 x0 := r0.Args[0] 21751 if x0.Op != OpAMD64MOVWloadidx1 { 21752 break 21753 } 21754 i0 := x0.AuxInt 21755 if x0.Aux != s { 21756 break 21757 } 21758 _ = x0.Args[2] 21759 if idx != x0.Args[0] { 21760 break 21761 } 21762 if p != x0.Args[1] { 21763 break 21764 } 21765 if mem != x0.Args[2] { 21766 break 21767 } 21768 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21769 break 21770 } 21771 b = mergePoint(b, x0, x1) 21772 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21773 v.reset(OpCopy) 21774 v.AddArg(v0) 21775 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21776 v1.AuxInt = i0 21777 v1.Aux = s 21778 v1.AddArg(p) 21779 v1.AddArg(idx) 21780 v1.AddArg(mem) 21781 v0.AddArg(v1) 21782 return true 21783 } 21784 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21785 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21786 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21787 for { 21788 _ = v.Args[1] 21789 r1 := v.Args[0] 21790 if r1.Op != OpAMD64ROLWconst { 21791 break 21792 } 21793 if r1.AuxInt != 8 { 21794 break 21795 } 21796 x1 := r1.Args[0] 21797 if x1.Op != OpAMD64MOVWloadidx1 { 21798 break 21799 } 21800 i1 := x1.AuxInt 21801 s := x1.Aux 21802 _ = x1.Args[2] 21803 idx := x1.Args[0] 21804 p := x1.Args[1] 21805 mem := x1.Args[2] 21806 sh := v.Args[1] 21807 if sh.Op != OpAMD64SHLLconst { 21808 break 21809 } 21810 if sh.AuxInt != 16 { 21811 break 21812 } 21813 r0 := sh.Args[0] 21814 if r0.Op != OpAMD64ROLWconst { 21815 break 21816 } 21817 if r0.AuxInt != 8 { 21818 break 21819 } 21820 x0 := r0.Args[0] 21821 if x0.Op != OpAMD64MOVWloadidx1 { 21822 break 21823 } 21824 i0 := x0.AuxInt 21825 if x0.Aux != s { 21826 break 21827 } 21828 _ = x0.Args[2] 21829 if idx != x0.Args[0] { 21830 break 21831 } 21832 if p != x0.Args[1] { 21833 break 21834 } 21835 if mem != x0.Args[2] { 21836 break 21837 } 21838 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21839 break 21840 } 21841 b = mergePoint(b, x0, x1) 21842 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21843 v.reset(OpCopy) 21844 v.AddArg(v0) 21845 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21846 v1.AuxInt = i0 21847 v1.Aux = s 21848 v1.AddArg(p) 21849 v1.AddArg(idx) 21850 v1.AddArg(mem) 21851 v0.AddArg(v1) 21852 return true 21853 } 21854 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21855 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21856 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21857 for { 21858 _ = v.Args[1] 21859 sh := v.Args[0] 21860 if sh.Op != OpAMD64SHLLconst { 21861 break 21862 } 21863 if sh.AuxInt != 16 { 21864 break 21865 } 21866 r0 := sh.Args[0] 21867 if r0.Op != OpAMD64ROLWconst { 21868 break 21869 } 21870 if r0.AuxInt != 8 { 21871 break 21872 } 21873 x0 := r0.Args[0] 21874 if x0.Op != OpAMD64MOVWloadidx1 { 21875 break 21876 } 21877 i0 := x0.AuxInt 21878 s := x0.Aux 21879 _ = x0.Args[2] 21880 p := x0.Args[0] 21881 idx := x0.Args[1] 21882 mem := x0.Args[2] 21883 r1 := v.Args[1] 21884 if r1.Op != OpAMD64ROLWconst { 21885 break 21886 } 21887 if r1.AuxInt != 8 { 21888 break 21889 } 21890 x1 := r1.Args[0] 21891 if x1.Op != OpAMD64MOVWloadidx1 { 21892 break 21893 } 21894 i1 := x1.AuxInt 21895 if x1.Aux != s { 21896 break 21897 } 21898 _ = x1.Args[2] 21899 if p != x1.Args[0] { 21900 break 21901 } 21902 if idx != x1.Args[1] { 21903 break 21904 } 21905 if mem != x1.Args[2] { 21906 break 21907 } 21908 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21909 break 21910 } 21911 b = mergePoint(b, x0, x1) 21912 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21913 v.reset(OpCopy) 21914 v.AddArg(v0) 21915 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21916 v1.AuxInt = i0 21917 v1.Aux = s 21918 v1.AddArg(p) 21919 v1.AddArg(idx) 21920 v1.AddArg(mem) 21921 v0.AddArg(v1) 21922 return true 21923 } 21924 return false 21925 } 21926 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 21927 b := v.Block 21928 _ = b 21929 typ := &b.Func.Config.Types 21930 _ = typ 21931 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21932 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21933 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21934 for { 21935 _ = v.Args[1] 21936 sh := v.Args[0] 21937 if sh.Op != OpAMD64SHLLconst { 21938 break 21939 } 21940 if sh.AuxInt != 16 { 21941 break 21942 } 21943 r0 := sh.Args[0] 21944 if r0.Op != OpAMD64ROLWconst { 21945 break 21946 } 21947 if r0.AuxInt != 8 { 21948 break 21949 } 21950 x0 := r0.Args[0] 21951 if x0.Op != OpAMD64MOVWloadidx1 { 21952 break 21953 } 21954 i0 := x0.AuxInt 21955 s := x0.Aux 21956 _ = x0.Args[2] 21957 idx := x0.Args[0] 21958 p := x0.Args[1] 21959 mem := x0.Args[2] 21960 r1 := v.Args[1] 21961 if r1.Op != OpAMD64ROLWconst { 21962 break 21963 } 21964 if r1.AuxInt != 8 { 21965 break 21966 } 21967 x1 := r1.Args[0] 21968 if x1.Op != OpAMD64MOVWloadidx1 { 21969 break 21970 } 21971 i1 := x1.AuxInt 21972 if x1.Aux != s { 21973 break 21974 } 21975 _ = x1.Args[2] 21976 if p != x1.Args[0] { 21977 break 21978 } 21979 if idx != x1.Args[1] { 21980 break 21981 } 21982 if mem != x1.Args[2] { 21983 break 21984 } 21985 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21986 break 21987 } 21988 b = mergePoint(b, x0, x1) 21989 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21990 v.reset(OpCopy) 21991 v.AddArg(v0) 21992 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21993 v1.AuxInt = i0 21994 v1.Aux = s 21995 v1.AddArg(p) 21996 v1.AddArg(idx) 21997 v1.AddArg(mem) 21998 v0.AddArg(v1) 21999 return true 22000 } 22001 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 22002 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 22003 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 22004 for { 22005 _ = v.Args[1] 22006 sh := v.Args[0] 22007 if sh.Op != OpAMD64SHLLconst { 22008 break 22009 } 22010 if sh.AuxInt != 16 { 22011 break 22012 } 22013 r0 := sh.Args[0] 22014 if r0.Op != OpAMD64ROLWconst { 22015 break 22016 } 22017 if r0.AuxInt != 8 { 22018 break 22019 } 22020 x0 := r0.Args[0] 22021 if x0.Op != OpAMD64MOVWloadidx1 { 22022 break 22023 } 22024 i0 := x0.AuxInt 22025 s := x0.Aux 22026 _ = x0.Args[2] 22027 p := x0.Args[0] 22028 idx := x0.Args[1] 22029 mem := x0.Args[2] 22030 r1 := v.Args[1] 22031 if r1.Op != OpAMD64ROLWconst { 22032 break 22033 } 22034 if r1.AuxInt != 8 { 22035 break 22036 } 22037 x1 := r1.Args[0] 22038 if x1.Op != OpAMD64MOVWloadidx1 { 22039 break 22040 } 22041 i1 := x1.AuxInt 22042 if x1.Aux != s { 22043 break 22044 } 22045 _ = x1.Args[2] 22046 if idx != x1.Args[0] { 22047 break 22048 } 22049 if p != x1.Args[1] { 22050 break 22051 } 22052 if mem != x1.Args[2] { 22053 break 22054 } 22055 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22056 break 22057 } 22058 b = mergePoint(b, x0, x1) 22059 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 22060 v.reset(OpCopy) 22061 v.AddArg(v0) 22062 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 22063 v1.AuxInt = i0 22064 v1.Aux = s 22065 v1.AddArg(p) 22066 v1.AddArg(idx) 22067 v1.AddArg(mem) 22068 v0.AddArg(v1) 22069 return true 22070 } 22071 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 22072 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 22073 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 22074 for { 22075 _ = v.Args[1] 22076 sh := v.Args[0] 22077 if sh.Op != OpAMD64SHLLconst { 22078 break 22079 } 22080 if sh.AuxInt != 16 { 22081 break 22082 } 22083 r0 := sh.Args[0] 22084 if r0.Op != OpAMD64ROLWconst { 22085 break 22086 } 22087 if r0.AuxInt != 8 { 22088 break 22089 } 22090 x0 := r0.Args[0] 22091 if x0.Op != OpAMD64MOVWloadidx1 { 22092 break 22093 } 22094 i0 := x0.AuxInt 22095 s := x0.Aux 22096 _ = x0.Args[2] 22097 idx := x0.Args[0] 22098 p := x0.Args[1] 22099 mem := x0.Args[2] 22100 r1 := v.Args[1] 22101 if r1.Op != OpAMD64ROLWconst { 22102 break 22103 } 22104 if r1.AuxInt != 8 { 22105 break 22106 } 22107 x1 := r1.Args[0] 22108 if x1.Op != OpAMD64MOVWloadidx1 { 22109 break 22110 } 22111 i1 := x1.AuxInt 22112 if x1.Aux != s { 22113 break 22114 } 22115 _ = x1.Args[2] 22116 if idx != x1.Args[0] { 22117 break 22118 } 22119 if p != x1.Args[1] { 22120 break 22121 } 22122 if mem != x1.Args[2] { 22123 break 22124 } 22125 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22126 break 22127 } 22128 b = mergePoint(b, x0, x1) 22129 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 22130 v.reset(OpCopy) 22131 v.AddArg(v0) 22132 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 22133 v1.AuxInt = i0 22134 v1.Aux = s 22135 v1.AddArg(p) 22136 v1.AddArg(idx) 22137 v1.AddArg(mem) 22138 v0.AddArg(v1) 22139 return true 22140 } 22141 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 22142 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22143 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22144 for { 22145 _ = v.Args[1] 22146 s0 := v.Args[0] 22147 if s0.Op != OpAMD64SHLLconst { 22148 break 22149 } 22150 j0 := s0.AuxInt 22151 x0 := s0.Args[0] 22152 if x0.Op != OpAMD64MOVBloadidx1 { 22153 break 22154 } 22155 i0 := x0.AuxInt 22156 s := x0.Aux 22157 _ = x0.Args[2] 22158 p := x0.Args[0] 22159 idx := x0.Args[1] 22160 mem := x0.Args[2] 22161 or := v.Args[1] 22162 if or.Op != OpAMD64ORL { 22163 break 22164 } 22165 _ = or.Args[1] 22166 s1 := or.Args[0] 22167 if s1.Op != OpAMD64SHLLconst { 22168 break 22169 } 22170 j1 := s1.AuxInt 22171 x1 := s1.Args[0] 22172 if x1.Op != OpAMD64MOVBloadidx1 { 22173 break 22174 } 22175 i1 := x1.AuxInt 22176 if x1.Aux != s { 22177 break 22178 } 22179 _ = x1.Args[2] 22180 if p != x1.Args[0] { 22181 break 22182 } 22183 if idx != x1.Args[1] { 22184 break 22185 } 22186 if mem != x1.Args[2] { 22187 break 22188 } 22189 y := or.Args[1] 22190 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22191 break 22192 } 22193 b = mergePoint(b, x0, x1) 22194 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22195 v.reset(OpCopy) 22196 v.AddArg(v0) 22197 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22198 v1.AuxInt = j1 22199 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22200 v2.AuxInt = 8 22201 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22202 v3.AuxInt = i0 22203 v3.Aux = s 22204 v3.AddArg(p) 22205 v3.AddArg(idx) 22206 v3.AddArg(mem) 22207 v2.AddArg(v3) 22208 v1.AddArg(v2) 22209 v0.AddArg(v1) 22210 v0.AddArg(y) 22211 return true 22212 } 22213 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 22214 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22215 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22216 for { 22217 _ = v.Args[1] 22218 s0 := v.Args[0] 22219 if s0.Op != OpAMD64SHLLconst { 22220 break 22221 } 22222 j0 := s0.AuxInt 22223 x0 := s0.Args[0] 22224 if x0.Op != OpAMD64MOVBloadidx1 { 22225 break 22226 } 22227 i0 := x0.AuxInt 22228 s := x0.Aux 22229 _ = x0.Args[2] 22230 idx := x0.Args[0] 22231 p := x0.Args[1] 22232 mem := x0.Args[2] 22233 or := v.Args[1] 22234 if or.Op != OpAMD64ORL { 22235 break 22236 } 22237 _ = or.Args[1] 22238 s1 := or.Args[0] 22239 if s1.Op != OpAMD64SHLLconst { 22240 break 22241 } 22242 j1 := s1.AuxInt 22243 x1 := s1.Args[0] 22244 if x1.Op != OpAMD64MOVBloadidx1 { 22245 break 22246 } 22247 i1 := x1.AuxInt 22248 if x1.Aux != s { 22249 break 22250 } 22251 _ = x1.Args[2] 22252 if p != x1.Args[0] { 22253 break 22254 } 22255 if idx != x1.Args[1] { 22256 break 22257 } 22258 if mem != x1.Args[2] { 22259 break 22260 } 22261 y := or.Args[1] 22262 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22263 break 22264 } 22265 b = mergePoint(b, x0, x1) 22266 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22267 v.reset(OpCopy) 22268 v.AddArg(v0) 22269 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22270 v1.AuxInt = j1 22271 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22272 v2.AuxInt = 8 22273 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22274 v3.AuxInt = i0 22275 v3.Aux = s 22276 v3.AddArg(p) 22277 v3.AddArg(idx) 22278 v3.AddArg(mem) 22279 v2.AddArg(v3) 22280 v1.AddArg(v2) 22281 v0.AddArg(v1) 22282 v0.AddArg(y) 22283 return true 22284 } 22285 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 22286 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22287 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22288 for { 22289 _ = v.Args[1] 22290 s0 := v.Args[0] 22291 if s0.Op != OpAMD64SHLLconst { 22292 break 22293 } 22294 j0 := s0.AuxInt 22295 x0 := s0.Args[0] 22296 if x0.Op != OpAMD64MOVBloadidx1 { 22297 break 22298 } 22299 i0 := x0.AuxInt 22300 s := x0.Aux 22301 _ = x0.Args[2] 22302 p := x0.Args[0] 22303 idx := x0.Args[1] 22304 mem := x0.Args[2] 22305 or := v.Args[1] 22306 if or.Op != OpAMD64ORL { 22307 break 22308 } 22309 _ = or.Args[1] 22310 s1 := or.Args[0] 22311 if s1.Op != OpAMD64SHLLconst { 22312 break 22313 } 22314 j1 := s1.AuxInt 22315 x1 := s1.Args[0] 22316 if x1.Op != OpAMD64MOVBloadidx1 { 22317 break 22318 } 22319 i1 := x1.AuxInt 22320 if x1.Aux != s { 22321 break 22322 } 22323 _ = x1.Args[2] 22324 if idx != x1.Args[0] { 22325 break 22326 } 22327 if p != x1.Args[1] { 22328 break 22329 } 22330 if mem != x1.Args[2] { 22331 break 22332 } 22333 y := or.Args[1] 22334 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22335 break 22336 } 22337 b = mergePoint(b, x0, x1) 22338 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22339 v.reset(OpCopy) 22340 v.AddArg(v0) 22341 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22342 v1.AuxInt = j1 22343 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22344 v2.AuxInt = 8 22345 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22346 v3.AuxInt = i0 22347 v3.Aux = s 22348 v3.AddArg(p) 22349 v3.AddArg(idx) 22350 v3.AddArg(mem) 22351 v2.AddArg(v3) 22352 v1.AddArg(v2) 22353 v0.AddArg(v1) 22354 v0.AddArg(y) 22355 return true 22356 } 22357 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 22358 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22359 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22360 for { 22361 _ = v.Args[1] 22362 s0 := v.Args[0] 22363 if s0.Op != OpAMD64SHLLconst { 22364 break 22365 } 22366 j0 := s0.AuxInt 22367 x0 := s0.Args[0] 22368 if x0.Op != OpAMD64MOVBloadidx1 { 22369 break 22370 } 22371 i0 := x0.AuxInt 22372 s := x0.Aux 22373 _ = x0.Args[2] 22374 idx := x0.Args[0] 22375 p := x0.Args[1] 22376 mem := x0.Args[2] 22377 or := v.Args[1] 22378 if or.Op != OpAMD64ORL { 22379 break 22380 } 22381 _ = or.Args[1] 22382 s1 := or.Args[0] 22383 if s1.Op != OpAMD64SHLLconst { 22384 break 22385 } 22386 j1 := s1.AuxInt 22387 x1 := s1.Args[0] 22388 if x1.Op != OpAMD64MOVBloadidx1 { 22389 break 22390 } 22391 i1 := x1.AuxInt 22392 if x1.Aux != s { 22393 break 22394 } 22395 _ = x1.Args[2] 22396 if idx != x1.Args[0] { 22397 break 22398 } 22399 if p != x1.Args[1] { 22400 break 22401 } 22402 if mem != x1.Args[2] { 22403 break 22404 } 22405 y := or.Args[1] 22406 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22407 break 22408 } 22409 b = mergePoint(b, x0, x1) 22410 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22411 v.reset(OpCopy) 22412 v.AddArg(v0) 22413 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22414 v1.AuxInt = j1 22415 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22416 v2.AuxInt = 8 22417 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22418 v3.AuxInt = i0 22419 v3.Aux = s 22420 v3.AddArg(p) 22421 v3.AddArg(idx) 22422 v3.AddArg(mem) 22423 v2.AddArg(v3) 22424 v1.AddArg(v2) 22425 v0.AddArg(v1) 22426 v0.AddArg(y) 22427 return true 22428 } 22429 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 22430 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22431 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22432 for { 22433 _ = v.Args[1] 22434 s0 := v.Args[0] 22435 if s0.Op != OpAMD64SHLLconst { 22436 break 22437 } 22438 j0 := s0.AuxInt 22439 x0 := s0.Args[0] 22440 if x0.Op != OpAMD64MOVBloadidx1 { 22441 break 22442 } 22443 i0 := x0.AuxInt 22444 s := x0.Aux 22445 _ = x0.Args[2] 22446 p := x0.Args[0] 22447 idx := x0.Args[1] 22448 mem := x0.Args[2] 22449 or := v.Args[1] 22450 if or.Op != OpAMD64ORL { 22451 break 22452 } 22453 _ = or.Args[1] 22454 y := or.Args[0] 22455 s1 := or.Args[1] 22456 if s1.Op != OpAMD64SHLLconst { 22457 break 22458 } 22459 j1 := s1.AuxInt 22460 x1 := s1.Args[0] 22461 if x1.Op != OpAMD64MOVBloadidx1 { 22462 break 22463 } 22464 i1 := x1.AuxInt 22465 if x1.Aux != s { 22466 break 22467 } 22468 _ = x1.Args[2] 22469 if p != x1.Args[0] { 22470 break 22471 } 22472 if idx != x1.Args[1] { 22473 break 22474 } 22475 if mem != x1.Args[2] { 22476 break 22477 } 22478 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22479 break 22480 } 22481 b = mergePoint(b, x0, x1) 22482 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22483 v.reset(OpCopy) 22484 v.AddArg(v0) 22485 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22486 v1.AuxInt = j1 22487 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22488 v2.AuxInt = 8 22489 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22490 v3.AuxInt = i0 22491 v3.Aux = s 22492 v3.AddArg(p) 22493 v3.AddArg(idx) 22494 v3.AddArg(mem) 22495 v2.AddArg(v3) 22496 v1.AddArg(v2) 22497 v0.AddArg(v1) 22498 v0.AddArg(y) 22499 return true 22500 } 22501 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 22502 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22503 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22504 for { 22505 _ = v.Args[1] 22506 s0 := v.Args[0] 22507 if s0.Op != OpAMD64SHLLconst { 22508 break 22509 } 22510 j0 := s0.AuxInt 22511 x0 := s0.Args[0] 22512 if x0.Op != OpAMD64MOVBloadidx1 { 22513 break 22514 } 22515 i0 := x0.AuxInt 22516 s := x0.Aux 22517 _ = x0.Args[2] 22518 idx := x0.Args[0] 22519 p := x0.Args[1] 22520 mem := x0.Args[2] 22521 or := v.Args[1] 22522 if or.Op != OpAMD64ORL { 22523 break 22524 } 22525 _ = or.Args[1] 22526 y := or.Args[0] 22527 s1 := or.Args[1] 22528 if s1.Op != OpAMD64SHLLconst { 22529 break 22530 } 22531 j1 := s1.AuxInt 22532 x1 := s1.Args[0] 22533 if x1.Op != OpAMD64MOVBloadidx1 { 22534 break 22535 } 22536 i1 := x1.AuxInt 22537 if x1.Aux != s { 22538 break 22539 } 22540 _ = x1.Args[2] 22541 if p != x1.Args[0] { 22542 break 22543 } 22544 if idx != x1.Args[1] { 22545 break 22546 } 22547 if mem != x1.Args[2] { 22548 break 22549 } 22550 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22551 break 22552 } 22553 b = mergePoint(b, x0, x1) 22554 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22555 v.reset(OpCopy) 22556 v.AddArg(v0) 22557 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22558 v1.AuxInt = j1 22559 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22560 v2.AuxInt = 8 22561 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22562 v3.AuxInt = i0 22563 v3.Aux = s 22564 v3.AddArg(p) 22565 v3.AddArg(idx) 22566 v3.AddArg(mem) 22567 v2.AddArg(v3) 22568 v1.AddArg(v2) 22569 v0.AddArg(v1) 22570 v0.AddArg(y) 22571 return true 22572 } 22573 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 22574 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22575 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22576 for { 22577 _ = v.Args[1] 22578 s0 := v.Args[0] 22579 if s0.Op != OpAMD64SHLLconst { 22580 break 22581 } 22582 j0 := s0.AuxInt 22583 x0 := s0.Args[0] 22584 if x0.Op != OpAMD64MOVBloadidx1 { 22585 break 22586 } 22587 i0 := x0.AuxInt 22588 s := x0.Aux 22589 _ = x0.Args[2] 22590 p := x0.Args[0] 22591 idx := x0.Args[1] 22592 mem := x0.Args[2] 22593 or := v.Args[1] 22594 if or.Op != OpAMD64ORL { 22595 break 22596 } 22597 _ = or.Args[1] 22598 y := or.Args[0] 22599 s1 := or.Args[1] 22600 if s1.Op != OpAMD64SHLLconst { 22601 break 22602 } 22603 j1 := s1.AuxInt 22604 x1 := s1.Args[0] 22605 if x1.Op != OpAMD64MOVBloadidx1 { 22606 break 22607 } 22608 i1 := x1.AuxInt 22609 if x1.Aux != s { 22610 break 22611 } 22612 _ = x1.Args[2] 22613 if idx != x1.Args[0] { 22614 break 22615 } 22616 if p != x1.Args[1] { 22617 break 22618 } 22619 if mem != x1.Args[2] { 22620 break 22621 } 22622 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22623 break 22624 } 22625 b = mergePoint(b, x0, x1) 22626 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22627 v.reset(OpCopy) 22628 v.AddArg(v0) 22629 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22630 v1.AuxInt = j1 22631 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22632 v2.AuxInt = 8 22633 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22634 v3.AuxInt = i0 22635 v3.Aux = s 22636 v3.AddArg(p) 22637 v3.AddArg(idx) 22638 v3.AddArg(mem) 22639 v2.AddArg(v3) 22640 v1.AddArg(v2) 22641 v0.AddArg(v1) 22642 v0.AddArg(y) 22643 return true 22644 } 22645 return false 22646 } 22647 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 22648 b := v.Block 22649 _ = b 22650 typ := &b.Func.Config.Types 22651 _ = typ 22652 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 22653 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22654 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22655 for { 22656 _ = v.Args[1] 22657 s0 := v.Args[0] 22658 if s0.Op != OpAMD64SHLLconst { 22659 break 22660 } 22661 j0 := s0.AuxInt 22662 x0 := s0.Args[0] 22663 if x0.Op != OpAMD64MOVBloadidx1 { 22664 break 22665 } 22666 i0 := x0.AuxInt 22667 s := x0.Aux 22668 _ = x0.Args[2] 22669 idx := x0.Args[0] 22670 p := x0.Args[1] 22671 mem := x0.Args[2] 22672 or := v.Args[1] 22673 if or.Op != OpAMD64ORL { 22674 break 22675 } 22676 _ = or.Args[1] 22677 y := or.Args[0] 22678 s1 := or.Args[1] 22679 if s1.Op != OpAMD64SHLLconst { 22680 break 22681 } 22682 j1 := s1.AuxInt 22683 x1 := s1.Args[0] 22684 if x1.Op != OpAMD64MOVBloadidx1 { 22685 break 22686 } 22687 i1 := x1.AuxInt 22688 if x1.Aux != s { 22689 break 22690 } 22691 _ = x1.Args[2] 22692 if idx != x1.Args[0] { 22693 break 22694 } 22695 if p != x1.Args[1] { 22696 break 22697 } 22698 if mem != x1.Args[2] { 22699 break 22700 } 22701 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22702 break 22703 } 22704 b = mergePoint(b, x0, x1) 22705 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22706 v.reset(OpCopy) 22707 v.AddArg(v0) 22708 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22709 v1.AuxInt = j1 22710 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22711 v2.AuxInt = 8 22712 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22713 v3.AuxInt = i0 22714 v3.Aux = s 22715 v3.AddArg(p) 22716 v3.AddArg(idx) 22717 v3.AddArg(mem) 22718 v2.AddArg(v3) 22719 v1.AddArg(v2) 22720 v0.AddArg(v1) 22721 v0.AddArg(y) 22722 return true 22723 } 22724 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22725 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22726 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22727 for { 22728 _ = v.Args[1] 22729 or := v.Args[0] 22730 if or.Op != OpAMD64ORL { 22731 break 22732 } 22733 _ = or.Args[1] 22734 s1 := or.Args[0] 22735 if s1.Op != OpAMD64SHLLconst { 22736 break 22737 } 22738 j1 := s1.AuxInt 22739 x1 := s1.Args[0] 22740 if x1.Op != OpAMD64MOVBloadidx1 { 22741 break 22742 } 22743 i1 := x1.AuxInt 22744 s := x1.Aux 22745 _ = x1.Args[2] 22746 p := x1.Args[0] 22747 idx := x1.Args[1] 22748 mem := x1.Args[2] 22749 y := or.Args[1] 22750 s0 := v.Args[1] 22751 if s0.Op != OpAMD64SHLLconst { 22752 break 22753 } 22754 j0 := s0.AuxInt 22755 x0 := s0.Args[0] 22756 if x0.Op != OpAMD64MOVBloadidx1 { 22757 break 22758 } 22759 i0 := x0.AuxInt 22760 if x0.Aux != s { 22761 break 22762 } 22763 _ = x0.Args[2] 22764 if p != x0.Args[0] { 22765 break 22766 } 22767 if idx != x0.Args[1] { 22768 break 22769 } 22770 if mem != x0.Args[2] { 22771 break 22772 } 22773 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22774 break 22775 } 22776 b = mergePoint(b, x0, x1) 22777 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22778 v.reset(OpCopy) 22779 v.AddArg(v0) 22780 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22781 v1.AuxInt = j1 22782 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22783 v2.AuxInt = 8 22784 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22785 v3.AuxInt = i0 22786 v3.Aux = s 22787 v3.AddArg(p) 22788 v3.AddArg(idx) 22789 v3.AddArg(mem) 22790 v2.AddArg(v3) 22791 v1.AddArg(v2) 22792 v0.AddArg(v1) 22793 v0.AddArg(y) 22794 return true 22795 } 22796 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22797 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22798 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22799 for { 22800 _ = v.Args[1] 22801 or := v.Args[0] 22802 if or.Op != OpAMD64ORL { 22803 break 22804 } 22805 _ = or.Args[1] 22806 s1 := or.Args[0] 22807 if s1.Op != OpAMD64SHLLconst { 22808 break 22809 } 22810 j1 := s1.AuxInt 22811 x1 := s1.Args[0] 22812 if x1.Op != OpAMD64MOVBloadidx1 { 22813 break 22814 } 22815 i1 := x1.AuxInt 22816 s := x1.Aux 22817 _ = x1.Args[2] 22818 idx := x1.Args[0] 22819 p := x1.Args[1] 22820 mem := x1.Args[2] 22821 y := or.Args[1] 22822 s0 := v.Args[1] 22823 if s0.Op != OpAMD64SHLLconst { 22824 break 22825 } 22826 j0 := s0.AuxInt 22827 x0 := s0.Args[0] 22828 if x0.Op != OpAMD64MOVBloadidx1 { 22829 break 22830 } 22831 i0 := x0.AuxInt 22832 if x0.Aux != s { 22833 break 22834 } 22835 _ = x0.Args[2] 22836 if p != x0.Args[0] { 22837 break 22838 } 22839 if idx != x0.Args[1] { 22840 break 22841 } 22842 if mem != x0.Args[2] { 22843 break 22844 } 22845 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22846 break 22847 } 22848 b = mergePoint(b, x0, x1) 22849 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22850 v.reset(OpCopy) 22851 v.AddArg(v0) 22852 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22853 v1.AuxInt = j1 22854 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22855 v2.AuxInt = 8 22856 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22857 v3.AuxInt = i0 22858 v3.Aux = s 22859 v3.AddArg(p) 22860 v3.AddArg(idx) 22861 v3.AddArg(mem) 22862 v2.AddArg(v3) 22863 v1.AddArg(v2) 22864 v0.AddArg(v1) 22865 v0.AddArg(y) 22866 return true 22867 } 22868 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22869 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22870 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22871 for { 22872 _ = v.Args[1] 22873 or := v.Args[0] 22874 if or.Op != OpAMD64ORL { 22875 break 22876 } 22877 _ = or.Args[1] 22878 y := or.Args[0] 22879 s1 := or.Args[1] 22880 if s1.Op != OpAMD64SHLLconst { 22881 break 22882 } 22883 j1 := s1.AuxInt 22884 x1 := s1.Args[0] 22885 if x1.Op != OpAMD64MOVBloadidx1 { 22886 break 22887 } 22888 i1 := x1.AuxInt 22889 s := x1.Aux 22890 _ = x1.Args[2] 22891 p := x1.Args[0] 22892 idx := x1.Args[1] 22893 mem := x1.Args[2] 22894 s0 := v.Args[1] 22895 if s0.Op != OpAMD64SHLLconst { 22896 break 22897 } 22898 j0 := s0.AuxInt 22899 x0 := s0.Args[0] 22900 if x0.Op != OpAMD64MOVBloadidx1 { 22901 break 22902 } 22903 i0 := x0.AuxInt 22904 if x0.Aux != s { 22905 break 22906 } 22907 _ = x0.Args[2] 22908 if p != x0.Args[0] { 22909 break 22910 } 22911 if idx != x0.Args[1] { 22912 break 22913 } 22914 if mem != x0.Args[2] { 22915 break 22916 } 22917 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22918 break 22919 } 22920 b = mergePoint(b, x0, x1) 22921 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22922 v.reset(OpCopy) 22923 v.AddArg(v0) 22924 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22925 v1.AuxInt = j1 22926 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22927 v2.AuxInt = 8 22928 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22929 v3.AuxInt = i0 22930 v3.Aux = s 22931 v3.AddArg(p) 22932 v3.AddArg(idx) 22933 v3.AddArg(mem) 22934 v2.AddArg(v3) 22935 v1.AddArg(v2) 22936 v0.AddArg(v1) 22937 v0.AddArg(y) 22938 return true 22939 } 22940 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22941 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22942 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22943 for { 22944 _ = v.Args[1] 22945 or := v.Args[0] 22946 if or.Op != OpAMD64ORL { 22947 break 22948 } 22949 _ = or.Args[1] 22950 y := or.Args[0] 22951 s1 := or.Args[1] 22952 if s1.Op != OpAMD64SHLLconst { 22953 break 22954 } 22955 j1 := s1.AuxInt 22956 x1 := s1.Args[0] 22957 if x1.Op != OpAMD64MOVBloadidx1 { 22958 break 22959 } 22960 i1 := x1.AuxInt 22961 s := x1.Aux 22962 _ = x1.Args[2] 22963 idx := x1.Args[0] 22964 p := x1.Args[1] 22965 mem := x1.Args[2] 22966 s0 := v.Args[1] 22967 if s0.Op != OpAMD64SHLLconst { 22968 break 22969 } 22970 j0 := s0.AuxInt 22971 x0 := s0.Args[0] 22972 if x0.Op != OpAMD64MOVBloadidx1 { 22973 break 22974 } 22975 i0 := x0.AuxInt 22976 if x0.Aux != s { 22977 break 22978 } 22979 _ = x0.Args[2] 22980 if p != x0.Args[0] { 22981 break 22982 } 22983 if idx != x0.Args[1] { 22984 break 22985 } 22986 if mem != x0.Args[2] { 22987 break 22988 } 22989 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22990 break 22991 } 22992 b = mergePoint(b, x0, x1) 22993 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22994 v.reset(OpCopy) 22995 v.AddArg(v0) 22996 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22997 v1.AuxInt = j1 22998 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22999 v2.AuxInt = 8 23000 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23001 v3.AuxInt = i0 23002 v3.Aux = s 23003 v3.AddArg(p) 23004 v3.AddArg(idx) 23005 v3.AddArg(mem) 23006 v2.AddArg(v3) 23007 v1.AddArg(v2) 23008 v0.AddArg(v1) 23009 v0.AddArg(y) 23010 return true 23011 } 23012 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23013 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23014 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23015 for { 23016 _ = v.Args[1] 23017 or := v.Args[0] 23018 if or.Op != OpAMD64ORL { 23019 break 23020 } 23021 _ = or.Args[1] 23022 s1 := or.Args[0] 23023 if s1.Op != OpAMD64SHLLconst { 23024 break 23025 } 23026 j1 := s1.AuxInt 23027 x1 := s1.Args[0] 23028 if x1.Op != OpAMD64MOVBloadidx1 { 23029 break 23030 } 23031 i1 := x1.AuxInt 23032 s := x1.Aux 23033 _ = x1.Args[2] 23034 p := x1.Args[0] 23035 idx := x1.Args[1] 23036 mem := x1.Args[2] 23037 y := or.Args[1] 23038 s0 := v.Args[1] 23039 if s0.Op != OpAMD64SHLLconst { 23040 break 23041 } 23042 j0 := s0.AuxInt 23043 x0 := s0.Args[0] 23044 if x0.Op != OpAMD64MOVBloadidx1 { 23045 break 23046 } 23047 i0 := x0.AuxInt 23048 if x0.Aux != s { 23049 break 23050 } 23051 _ = x0.Args[2] 23052 if idx != x0.Args[0] { 23053 break 23054 } 23055 if p != x0.Args[1] { 23056 break 23057 } 23058 if mem != x0.Args[2] { 23059 break 23060 } 23061 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23062 break 23063 } 23064 b = mergePoint(b, x0, x1) 23065 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23066 v.reset(OpCopy) 23067 v.AddArg(v0) 23068 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23069 v1.AuxInt = j1 23070 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23071 v2.AuxInt = 8 23072 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23073 v3.AuxInt = i0 23074 v3.Aux = s 23075 v3.AddArg(p) 23076 v3.AddArg(idx) 23077 v3.AddArg(mem) 23078 v2.AddArg(v3) 23079 v1.AddArg(v2) 23080 v0.AddArg(v1) 23081 v0.AddArg(y) 23082 return true 23083 } 23084 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23085 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23086 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23087 for { 23088 _ = v.Args[1] 23089 or := v.Args[0] 23090 if or.Op != OpAMD64ORL { 23091 break 23092 } 23093 _ = or.Args[1] 23094 s1 := or.Args[0] 23095 if s1.Op != OpAMD64SHLLconst { 23096 break 23097 } 23098 j1 := s1.AuxInt 23099 x1 := s1.Args[0] 23100 if x1.Op != OpAMD64MOVBloadidx1 { 23101 break 23102 } 23103 i1 := x1.AuxInt 23104 s := x1.Aux 23105 _ = x1.Args[2] 23106 idx := x1.Args[0] 23107 p := x1.Args[1] 23108 mem := x1.Args[2] 23109 y := or.Args[1] 23110 s0 := v.Args[1] 23111 if s0.Op != OpAMD64SHLLconst { 23112 break 23113 } 23114 j0 := s0.AuxInt 23115 x0 := s0.Args[0] 23116 if x0.Op != OpAMD64MOVBloadidx1 { 23117 break 23118 } 23119 i0 := x0.AuxInt 23120 if x0.Aux != s { 23121 break 23122 } 23123 _ = x0.Args[2] 23124 if idx != x0.Args[0] { 23125 break 23126 } 23127 if p != x0.Args[1] { 23128 break 23129 } 23130 if mem != x0.Args[2] { 23131 break 23132 } 23133 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23134 break 23135 } 23136 b = mergePoint(b, x0, x1) 23137 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23138 v.reset(OpCopy) 23139 v.AddArg(v0) 23140 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23141 v1.AuxInt = j1 23142 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23143 v2.AuxInt = 8 23144 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23145 v3.AuxInt = i0 23146 v3.Aux = s 23147 v3.AddArg(p) 23148 v3.AddArg(idx) 23149 v3.AddArg(mem) 23150 v2.AddArg(v3) 23151 v1.AddArg(v2) 23152 v0.AddArg(v1) 23153 v0.AddArg(y) 23154 return true 23155 } 23156 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23157 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23158 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23159 for { 23160 _ = v.Args[1] 23161 or := v.Args[0] 23162 if or.Op != OpAMD64ORL { 23163 break 23164 } 23165 _ = or.Args[1] 23166 y := or.Args[0] 23167 s1 := or.Args[1] 23168 if s1.Op != OpAMD64SHLLconst { 23169 break 23170 } 23171 j1 := s1.AuxInt 23172 x1 := s1.Args[0] 23173 if x1.Op != OpAMD64MOVBloadidx1 { 23174 break 23175 } 23176 i1 := x1.AuxInt 23177 s := x1.Aux 23178 _ = x1.Args[2] 23179 p := x1.Args[0] 23180 idx := x1.Args[1] 23181 mem := x1.Args[2] 23182 s0 := v.Args[1] 23183 if s0.Op != OpAMD64SHLLconst { 23184 break 23185 } 23186 j0 := s0.AuxInt 23187 x0 := s0.Args[0] 23188 if x0.Op != OpAMD64MOVBloadidx1 { 23189 break 23190 } 23191 i0 := x0.AuxInt 23192 if x0.Aux != s { 23193 break 23194 } 23195 _ = x0.Args[2] 23196 if idx != x0.Args[0] { 23197 break 23198 } 23199 if p != x0.Args[1] { 23200 break 23201 } 23202 if mem != x0.Args[2] { 23203 break 23204 } 23205 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23206 break 23207 } 23208 b = mergePoint(b, x0, x1) 23209 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23210 v.reset(OpCopy) 23211 v.AddArg(v0) 23212 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23213 v1.AuxInt = j1 23214 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23215 v2.AuxInt = 8 23216 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23217 v3.AuxInt = i0 23218 v3.Aux = s 23219 v3.AddArg(p) 23220 v3.AddArg(idx) 23221 v3.AddArg(mem) 23222 v2.AddArg(v3) 23223 v1.AddArg(v2) 23224 v0.AddArg(v1) 23225 v0.AddArg(y) 23226 return true 23227 } 23228 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23229 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23230 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23231 for { 23232 _ = v.Args[1] 23233 or := v.Args[0] 23234 if or.Op != OpAMD64ORL { 23235 break 23236 } 23237 _ = or.Args[1] 23238 y := or.Args[0] 23239 s1 := or.Args[1] 23240 if s1.Op != OpAMD64SHLLconst { 23241 break 23242 } 23243 j1 := s1.AuxInt 23244 x1 := s1.Args[0] 23245 if x1.Op != OpAMD64MOVBloadidx1 { 23246 break 23247 } 23248 i1 := x1.AuxInt 23249 s := x1.Aux 23250 _ = x1.Args[2] 23251 idx := x1.Args[0] 23252 p := x1.Args[1] 23253 mem := x1.Args[2] 23254 s0 := v.Args[1] 23255 if s0.Op != OpAMD64SHLLconst { 23256 break 23257 } 23258 j0 := s0.AuxInt 23259 x0 := s0.Args[0] 23260 if x0.Op != OpAMD64MOVBloadidx1 { 23261 break 23262 } 23263 i0 := x0.AuxInt 23264 if x0.Aux != s { 23265 break 23266 } 23267 _ = x0.Args[2] 23268 if idx != x0.Args[0] { 23269 break 23270 } 23271 if p != x0.Args[1] { 23272 break 23273 } 23274 if mem != x0.Args[2] { 23275 break 23276 } 23277 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23278 break 23279 } 23280 b = mergePoint(b, x0, x1) 23281 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23282 v.reset(OpCopy) 23283 v.AddArg(v0) 23284 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23285 v1.AuxInt = j1 23286 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23287 v2.AuxInt = 8 23288 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23289 v3.AuxInt = i0 23290 v3.Aux = s 23291 v3.AddArg(p) 23292 v3.AddArg(idx) 23293 v3.AddArg(mem) 23294 v2.AddArg(v3) 23295 v1.AddArg(v2) 23296 v0.AddArg(v1) 23297 v0.AddArg(y) 23298 return true 23299 } 23300 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 23301 // cond: canMergeLoad(v, l, x) && clobber(l) 23302 // result: (ORLmem x [off] {sym} ptr mem) 23303 for { 23304 _ = v.Args[1] 23305 x := v.Args[0] 23306 l := v.Args[1] 23307 if l.Op != OpAMD64MOVLload { 23308 break 23309 } 23310 off := l.AuxInt 23311 sym := l.Aux 23312 _ = l.Args[1] 23313 ptr := l.Args[0] 23314 mem := l.Args[1] 23315 if !(canMergeLoad(v, l, x) && clobber(l)) { 23316 break 23317 } 23318 v.reset(OpAMD64ORLmem) 23319 v.AuxInt = off 23320 v.Aux = sym 23321 v.AddArg(x) 23322 v.AddArg(ptr) 23323 v.AddArg(mem) 23324 return true 23325 } 23326 return false 23327 } 23328 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 23329 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 23330 // cond: canMergeLoad(v, l, x) && clobber(l) 23331 // result: (ORLmem x [off] {sym} ptr mem) 23332 for { 23333 _ = v.Args[1] 23334 l := v.Args[0] 23335 if l.Op != OpAMD64MOVLload { 23336 break 23337 } 23338 off := l.AuxInt 23339 sym := l.Aux 23340 _ = l.Args[1] 23341 ptr := l.Args[0] 23342 mem := l.Args[1] 23343 x := v.Args[1] 23344 if !(canMergeLoad(v, l, x) && clobber(l)) { 23345 break 23346 } 23347 v.reset(OpAMD64ORLmem) 23348 v.AuxInt = off 23349 v.Aux = sym 23350 v.AddArg(x) 23351 v.AddArg(ptr) 23352 v.AddArg(mem) 23353 return true 23354 } 23355 return false 23356 } 23357 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 23358 // match: (ORLconst [c] x) 23359 // cond: int32(c)==0 23360 // result: x 23361 for { 23362 c := v.AuxInt 23363 x := v.Args[0] 23364 if !(int32(c) == 0) { 23365 break 23366 } 23367 v.reset(OpCopy) 23368 v.Type = x.Type 23369 v.AddArg(x) 23370 return true 23371 } 23372 // match: (ORLconst [c] _) 23373 // cond: int32(c)==-1 23374 // result: (MOVLconst [-1]) 23375 for { 23376 c := v.AuxInt 23377 if !(int32(c) == -1) { 23378 break 23379 } 23380 v.reset(OpAMD64MOVLconst) 23381 v.AuxInt = -1 23382 return true 23383 } 23384 // match: (ORLconst [c] (MOVLconst [d])) 23385 // cond: 23386 // result: (MOVLconst [c|d]) 23387 for { 23388 c := v.AuxInt 23389 v_0 := v.Args[0] 23390 if v_0.Op != OpAMD64MOVLconst { 23391 break 23392 } 23393 d := v_0.AuxInt 23394 v.reset(OpAMD64MOVLconst) 23395 v.AuxInt = c | d 23396 return true 23397 } 23398 return false 23399 } 23400 func rewriteValueAMD64_OpAMD64ORLmem_0(v *Value) bool { 23401 b := v.Block 23402 _ = b 23403 typ := &b.Func.Config.Types 23404 _ = typ 23405 // match: (ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 23406 // cond: 23407 // result: ( ORL x (MOVLf2i y)) 23408 for { 23409 off := v.AuxInt 23410 sym := v.Aux 23411 _ = v.Args[2] 23412 x := v.Args[0] 23413 ptr := v.Args[1] 23414 v_2 := v.Args[2] 23415 if v_2.Op != OpAMD64MOVSSstore { 23416 break 23417 } 23418 if v_2.AuxInt != off { 23419 break 23420 } 23421 if v_2.Aux != sym { 23422 break 23423 } 23424 _ = v_2.Args[2] 23425 if ptr != v_2.Args[0] { 23426 break 23427 } 23428 y := v_2.Args[1] 23429 v.reset(OpAMD64ORL) 23430 v.AddArg(x) 23431 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 23432 v0.AddArg(y) 23433 v.AddArg(v0) 23434 return true 23435 } 23436 return false 23437 } 23438 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 23439 // match: (ORQ x (MOVQconst [c])) 23440 // cond: is32Bit(c) 23441 // result: (ORQconst [c] x) 23442 for { 23443 _ = v.Args[1] 23444 x := v.Args[0] 23445 v_1 := v.Args[1] 23446 if v_1.Op != OpAMD64MOVQconst { 23447 break 23448 } 23449 c := v_1.AuxInt 23450 if !(is32Bit(c)) { 23451 break 23452 } 23453 v.reset(OpAMD64ORQconst) 23454 v.AuxInt = c 23455 v.AddArg(x) 23456 return true 23457 } 23458 // match: (ORQ (MOVQconst [c]) x) 23459 // cond: is32Bit(c) 23460 // result: (ORQconst [c] x) 23461 for { 23462 _ = v.Args[1] 23463 v_0 := v.Args[0] 23464 if v_0.Op != OpAMD64MOVQconst { 23465 break 23466 } 23467 c := v_0.AuxInt 23468 x := v.Args[1] 23469 if !(is32Bit(c)) { 23470 break 23471 } 23472 v.reset(OpAMD64ORQconst) 23473 v.AuxInt = c 23474 v.AddArg(x) 23475 return true 23476 } 23477 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 23478 // cond: d==64-c 23479 // result: (ROLQconst x [c]) 23480 for { 23481 _ = v.Args[1] 23482 v_0 := v.Args[0] 23483 if v_0.Op != OpAMD64SHLQconst { 23484 break 23485 } 23486 c := v_0.AuxInt 23487 x := v_0.Args[0] 23488 v_1 := v.Args[1] 23489 if v_1.Op != OpAMD64SHRQconst { 23490 break 23491 } 23492 d := v_1.AuxInt 23493 if x != v_1.Args[0] { 23494 break 23495 } 23496 if !(d == 64-c) { 23497 break 23498 } 23499 v.reset(OpAMD64ROLQconst) 23500 v.AuxInt = c 23501 v.AddArg(x) 23502 return true 23503 } 23504 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 23505 // cond: d==64-c 23506 // result: (ROLQconst x [c]) 23507 for { 23508 _ = v.Args[1] 23509 v_0 := v.Args[0] 23510 if v_0.Op != OpAMD64SHRQconst { 23511 break 23512 } 23513 d := v_0.AuxInt 23514 x := v_0.Args[0] 23515 v_1 := v.Args[1] 23516 if v_1.Op != OpAMD64SHLQconst { 23517 break 23518 } 23519 c := v_1.AuxInt 23520 if x != v_1.Args[0] { 23521 break 23522 } 23523 if !(d == 64-c) { 23524 break 23525 } 23526 v.reset(OpAMD64ROLQconst) 23527 v.AuxInt = c 23528 v.AddArg(x) 23529 return true 23530 } 23531 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 23532 // cond: 23533 // result: (ROLQ x y) 23534 for { 23535 _ = v.Args[1] 23536 v_0 := v.Args[0] 23537 if v_0.Op != OpAMD64SHLQ { 23538 break 23539 } 23540 _ = v_0.Args[1] 23541 x := v_0.Args[0] 23542 y := v_0.Args[1] 23543 v_1 := v.Args[1] 23544 if v_1.Op != OpAMD64ANDQ { 23545 break 23546 } 23547 _ = v_1.Args[1] 23548 v_1_0 := v_1.Args[0] 23549 if v_1_0.Op != OpAMD64SHRQ { 23550 break 23551 } 23552 _ = v_1_0.Args[1] 23553 if x != v_1_0.Args[0] { 23554 break 23555 } 23556 v_1_0_1 := v_1_0.Args[1] 23557 if v_1_0_1.Op != OpAMD64NEGQ { 23558 break 23559 } 23560 if y != v_1_0_1.Args[0] { 23561 break 23562 } 23563 v_1_1 := v_1.Args[1] 23564 if v_1_1.Op != OpAMD64SBBQcarrymask { 23565 break 23566 } 23567 v_1_1_0 := v_1_1.Args[0] 23568 if v_1_1_0.Op != OpAMD64CMPQconst { 23569 break 23570 } 23571 if v_1_1_0.AuxInt != 64 { 23572 break 23573 } 23574 v_1_1_0_0 := v_1_1_0.Args[0] 23575 if v_1_1_0_0.Op != OpAMD64NEGQ { 23576 break 23577 } 23578 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 23579 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 23580 break 23581 } 23582 if v_1_1_0_0_0.AuxInt != -64 { 23583 break 23584 } 23585 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 23586 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 23587 break 23588 } 23589 if v_1_1_0_0_0_0.AuxInt != 63 { 23590 break 23591 } 23592 if y != v_1_1_0_0_0_0.Args[0] { 23593 break 23594 } 23595 v.reset(OpAMD64ROLQ) 23596 v.AddArg(x) 23597 v.AddArg(y) 23598 return true 23599 } 23600 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 23601 // cond: 23602 // result: (ROLQ x y) 23603 for { 23604 _ = v.Args[1] 23605 v_0 := v.Args[0] 23606 if v_0.Op != OpAMD64SHLQ { 23607 break 23608 } 23609 _ = v_0.Args[1] 23610 x := v_0.Args[0] 23611 y := v_0.Args[1] 23612 v_1 := v.Args[1] 23613 if v_1.Op != OpAMD64ANDQ { 23614 break 23615 } 23616 _ = v_1.Args[1] 23617 v_1_0 := v_1.Args[0] 23618 if v_1_0.Op != OpAMD64SBBQcarrymask { 23619 break 23620 } 23621 v_1_0_0 := v_1_0.Args[0] 23622 if v_1_0_0.Op != OpAMD64CMPQconst { 23623 break 23624 } 23625 if v_1_0_0.AuxInt != 64 { 23626 break 23627 } 23628 v_1_0_0_0 := v_1_0_0.Args[0] 23629 if v_1_0_0_0.Op != OpAMD64NEGQ { 23630 break 23631 } 23632 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 23633 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 23634 break 23635 } 23636 if v_1_0_0_0_0.AuxInt != -64 { 23637 break 23638 } 23639 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 23640 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 23641 break 23642 } 23643 if v_1_0_0_0_0_0.AuxInt != 63 { 23644 break 23645 } 23646 if y != v_1_0_0_0_0_0.Args[0] { 23647 break 23648 } 23649 v_1_1 := v_1.Args[1] 23650 if v_1_1.Op != OpAMD64SHRQ { 23651 break 23652 } 23653 _ = v_1_1.Args[1] 23654 if x != v_1_1.Args[0] { 23655 break 23656 } 23657 v_1_1_1 := v_1_1.Args[1] 23658 if v_1_1_1.Op != OpAMD64NEGQ { 23659 break 23660 } 23661 if y != v_1_1_1.Args[0] { 23662 break 23663 } 23664 v.reset(OpAMD64ROLQ) 23665 v.AddArg(x) 23666 v.AddArg(y) 23667 return true 23668 } 23669 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 23670 // cond: 23671 // result: (ROLQ x y) 23672 for { 23673 _ = v.Args[1] 23674 v_0 := v.Args[0] 23675 if v_0.Op != OpAMD64ANDQ { 23676 break 23677 } 23678 _ = v_0.Args[1] 23679 v_0_0 := v_0.Args[0] 23680 if v_0_0.Op != OpAMD64SHRQ { 23681 break 23682 } 23683 _ = v_0_0.Args[1] 23684 x := v_0_0.Args[0] 23685 v_0_0_1 := v_0_0.Args[1] 23686 if v_0_0_1.Op != OpAMD64NEGQ { 23687 break 23688 } 23689 y := v_0_0_1.Args[0] 23690 v_0_1 := v_0.Args[1] 23691 if v_0_1.Op != OpAMD64SBBQcarrymask { 23692 break 23693 } 23694 v_0_1_0 := v_0_1.Args[0] 23695 if v_0_1_0.Op != OpAMD64CMPQconst { 23696 break 23697 } 23698 if v_0_1_0.AuxInt != 64 { 23699 break 23700 } 23701 v_0_1_0_0 := v_0_1_0.Args[0] 23702 if v_0_1_0_0.Op != OpAMD64NEGQ { 23703 break 23704 } 23705 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 23706 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 23707 break 23708 } 23709 if v_0_1_0_0_0.AuxInt != -64 { 23710 break 23711 } 23712 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 23713 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 23714 break 23715 } 23716 if v_0_1_0_0_0_0.AuxInt != 63 { 23717 break 23718 } 23719 if y != v_0_1_0_0_0_0.Args[0] { 23720 break 23721 } 23722 v_1 := v.Args[1] 23723 if v_1.Op != OpAMD64SHLQ { 23724 break 23725 } 23726 _ = v_1.Args[1] 23727 if x != v_1.Args[0] { 23728 break 23729 } 23730 if y != v_1.Args[1] { 23731 break 23732 } 23733 v.reset(OpAMD64ROLQ) 23734 v.AddArg(x) 23735 v.AddArg(y) 23736 return true 23737 } 23738 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 23739 // cond: 23740 // result: (ROLQ x y) 23741 for { 23742 _ = v.Args[1] 23743 v_0 := v.Args[0] 23744 if v_0.Op != OpAMD64ANDQ { 23745 break 23746 } 23747 _ = v_0.Args[1] 23748 v_0_0 := v_0.Args[0] 23749 if v_0_0.Op != OpAMD64SBBQcarrymask { 23750 break 23751 } 23752 v_0_0_0 := v_0_0.Args[0] 23753 if v_0_0_0.Op != OpAMD64CMPQconst { 23754 break 23755 } 23756 if v_0_0_0.AuxInt != 64 { 23757 break 23758 } 23759 v_0_0_0_0 := v_0_0_0.Args[0] 23760 if v_0_0_0_0.Op != OpAMD64NEGQ { 23761 break 23762 } 23763 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 23764 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 23765 break 23766 } 23767 if v_0_0_0_0_0.AuxInt != -64 { 23768 break 23769 } 23770 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 23771 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 23772 break 23773 } 23774 if v_0_0_0_0_0_0.AuxInt != 63 { 23775 break 23776 } 23777 y := v_0_0_0_0_0_0.Args[0] 23778 v_0_1 := v_0.Args[1] 23779 if v_0_1.Op != OpAMD64SHRQ { 23780 break 23781 } 23782 _ = v_0_1.Args[1] 23783 x := v_0_1.Args[0] 23784 v_0_1_1 := v_0_1.Args[1] 23785 if v_0_1_1.Op != OpAMD64NEGQ { 23786 break 23787 } 23788 if y != v_0_1_1.Args[0] { 23789 break 23790 } 23791 v_1 := v.Args[1] 23792 if v_1.Op != OpAMD64SHLQ { 23793 break 23794 } 23795 _ = v_1.Args[1] 23796 if x != v_1.Args[0] { 23797 break 23798 } 23799 if y != v_1.Args[1] { 23800 break 23801 } 23802 v.reset(OpAMD64ROLQ) 23803 v.AddArg(x) 23804 v.AddArg(y) 23805 return true 23806 } 23807 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 23808 // cond: 23809 // result: (ROLQ x y) 23810 for { 23811 _ = v.Args[1] 23812 v_0 := v.Args[0] 23813 if v_0.Op != OpAMD64SHLQ { 23814 break 23815 } 23816 _ = v_0.Args[1] 23817 x := v_0.Args[0] 23818 y := v_0.Args[1] 23819 v_1 := v.Args[1] 23820 if v_1.Op != OpAMD64ANDQ { 23821 break 23822 } 23823 _ = v_1.Args[1] 23824 v_1_0 := v_1.Args[0] 23825 if v_1_0.Op != OpAMD64SHRQ { 23826 break 23827 } 23828 _ = v_1_0.Args[1] 23829 if x != v_1_0.Args[0] { 23830 break 23831 } 23832 v_1_0_1 := v_1_0.Args[1] 23833 if v_1_0_1.Op != OpAMD64NEGL { 23834 break 23835 } 23836 if y != v_1_0_1.Args[0] { 23837 break 23838 } 23839 v_1_1 := v_1.Args[1] 23840 if v_1_1.Op != OpAMD64SBBQcarrymask { 23841 break 23842 } 23843 v_1_1_0 := v_1_1.Args[0] 23844 if v_1_1_0.Op != OpAMD64CMPLconst { 23845 break 23846 } 23847 if v_1_1_0.AuxInt != 64 { 23848 break 23849 } 23850 v_1_1_0_0 := v_1_1_0.Args[0] 23851 if v_1_1_0_0.Op != OpAMD64NEGL { 23852 break 23853 } 23854 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 23855 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 23856 break 23857 } 23858 if v_1_1_0_0_0.AuxInt != -64 { 23859 break 23860 } 23861 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 23862 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 23863 break 23864 } 23865 if v_1_1_0_0_0_0.AuxInt != 63 { 23866 break 23867 } 23868 if y != v_1_1_0_0_0_0.Args[0] { 23869 break 23870 } 23871 v.reset(OpAMD64ROLQ) 23872 v.AddArg(x) 23873 v.AddArg(y) 23874 return true 23875 } 23876 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 23877 // cond: 23878 // result: (ROLQ x y) 23879 for { 23880 _ = v.Args[1] 23881 v_0 := v.Args[0] 23882 if v_0.Op != OpAMD64SHLQ { 23883 break 23884 } 23885 _ = v_0.Args[1] 23886 x := v_0.Args[0] 23887 y := v_0.Args[1] 23888 v_1 := v.Args[1] 23889 if v_1.Op != OpAMD64ANDQ { 23890 break 23891 } 23892 _ = v_1.Args[1] 23893 v_1_0 := v_1.Args[0] 23894 if v_1_0.Op != OpAMD64SBBQcarrymask { 23895 break 23896 } 23897 v_1_0_0 := v_1_0.Args[0] 23898 if v_1_0_0.Op != OpAMD64CMPLconst { 23899 break 23900 } 23901 if v_1_0_0.AuxInt != 64 { 23902 break 23903 } 23904 v_1_0_0_0 := v_1_0_0.Args[0] 23905 if v_1_0_0_0.Op != OpAMD64NEGL { 23906 break 23907 } 23908 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 23909 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 23910 break 23911 } 23912 if v_1_0_0_0_0.AuxInt != -64 { 23913 break 23914 } 23915 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 23916 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 23917 break 23918 } 23919 if v_1_0_0_0_0_0.AuxInt != 63 { 23920 break 23921 } 23922 if y != v_1_0_0_0_0_0.Args[0] { 23923 break 23924 } 23925 v_1_1 := v_1.Args[1] 23926 if v_1_1.Op != OpAMD64SHRQ { 23927 break 23928 } 23929 _ = v_1_1.Args[1] 23930 if x != v_1_1.Args[0] { 23931 break 23932 } 23933 v_1_1_1 := v_1_1.Args[1] 23934 if v_1_1_1.Op != OpAMD64NEGL { 23935 break 23936 } 23937 if y != v_1_1_1.Args[0] { 23938 break 23939 } 23940 v.reset(OpAMD64ROLQ) 23941 v.AddArg(x) 23942 v.AddArg(y) 23943 return true 23944 } 23945 return false 23946 } 23947 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 23948 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 23949 // cond: 23950 // result: (ROLQ x y) 23951 for { 23952 _ = v.Args[1] 23953 v_0 := v.Args[0] 23954 if v_0.Op != OpAMD64ANDQ { 23955 break 23956 } 23957 _ = v_0.Args[1] 23958 v_0_0 := v_0.Args[0] 23959 if v_0_0.Op != OpAMD64SHRQ { 23960 break 23961 } 23962 _ = v_0_0.Args[1] 23963 x := v_0_0.Args[0] 23964 v_0_0_1 := v_0_0.Args[1] 23965 if v_0_0_1.Op != OpAMD64NEGL { 23966 break 23967 } 23968 y := v_0_0_1.Args[0] 23969 v_0_1 := v_0.Args[1] 23970 if v_0_1.Op != OpAMD64SBBQcarrymask { 23971 break 23972 } 23973 v_0_1_0 := v_0_1.Args[0] 23974 if v_0_1_0.Op != OpAMD64CMPLconst { 23975 break 23976 } 23977 if v_0_1_0.AuxInt != 64 { 23978 break 23979 } 23980 v_0_1_0_0 := v_0_1_0.Args[0] 23981 if v_0_1_0_0.Op != OpAMD64NEGL { 23982 break 23983 } 23984 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 23985 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 23986 break 23987 } 23988 if v_0_1_0_0_0.AuxInt != -64 { 23989 break 23990 } 23991 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 23992 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 23993 break 23994 } 23995 if v_0_1_0_0_0_0.AuxInt != 63 { 23996 break 23997 } 23998 if y != v_0_1_0_0_0_0.Args[0] { 23999 break 24000 } 24001 v_1 := v.Args[1] 24002 if v_1.Op != OpAMD64SHLQ { 24003 break 24004 } 24005 _ = v_1.Args[1] 24006 if x != v_1.Args[0] { 24007 break 24008 } 24009 if y != v_1.Args[1] { 24010 break 24011 } 24012 v.reset(OpAMD64ROLQ) 24013 v.AddArg(x) 24014 v.AddArg(y) 24015 return true 24016 } 24017 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 24018 // cond: 24019 // result: (ROLQ x y) 24020 for { 24021 _ = v.Args[1] 24022 v_0 := v.Args[0] 24023 if v_0.Op != OpAMD64ANDQ { 24024 break 24025 } 24026 _ = v_0.Args[1] 24027 v_0_0 := v_0.Args[0] 24028 if v_0_0.Op != OpAMD64SBBQcarrymask { 24029 break 24030 } 24031 v_0_0_0 := v_0_0.Args[0] 24032 if v_0_0_0.Op != OpAMD64CMPLconst { 24033 break 24034 } 24035 if v_0_0_0.AuxInt != 64 { 24036 break 24037 } 24038 v_0_0_0_0 := v_0_0_0.Args[0] 24039 if v_0_0_0_0.Op != OpAMD64NEGL { 24040 break 24041 } 24042 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24043 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 24044 break 24045 } 24046 if v_0_0_0_0_0.AuxInt != -64 { 24047 break 24048 } 24049 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24050 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 24051 break 24052 } 24053 if v_0_0_0_0_0_0.AuxInt != 63 { 24054 break 24055 } 24056 y := v_0_0_0_0_0_0.Args[0] 24057 v_0_1 := v_0.Args[1] 24058 if v_0_1.Op != OpAMD64SHRQ { 24059 break 24060 } 24061 _ = v_0_1.Args[1] 24062 x := v_0_1.Args[0] 24063 v_0_1_1 := v_0_1.Args[1] 24064 if v_0_1_1.Op != OpAMD64NEGL { 24065 break 24066 } 24067 if y != v_0_1_1.Args[0] { 24068 break 24069 } 24070 v_1 := v.Args[1] 24071 if v_1.Op != OpAMD64SHLQ { 24072 break 24073 } 24074 _ = v_1.Args[1] 24075 if x != v_1.Args[0] { 24076 break 24077 } 24078 if y != v_1.Args[1] { 24079 break 24080 } 24081 v.reset(OpAMD64ROLQ) 24082 v.AddArg(x) 24083 v.AddArg(y) 24084 return true 24085 } 24086 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 24087 // cond: 24088 // result: (RORQ x y) 24089 for { 24090 _ = v.Args[1] 24091 v_0 := v.Args[0] 24092 if v_0.Op != OpAMD64SHRQ { 24093 break 24094 } 24095 _ = v_0.Args[1] 24096 x := v_0.Args[0] 24097 y := v_0.Args[1] 24098 v_1 := v.Args[1] 24099 if v_1.Op != OpAMD64ANDQ { 24100 break 24101 } 24102 _ = v_1.Args[1] 24103 v_1_0 := v_1.Args[0] 24104 if v_1_0.Op != OpAMD64SHLQ { 24105 break 24106 } 24107 _ = v_1_0.Args[1] 24108 if x != v_1_0.Args[0] { 24109 break 24110 } 24111 v_1_0_1 := v_1_0.Args[1] 24112 if v_1_0_1.Op != OpAMD64NEGQ { 24113 break 24114 } 24115 if y != v_1_0_1.Args[0] { 24116 break 24117 } 24118 v_1_1 := v_1.Args[1] 24119 if v_1_1.Op != OpAMD64SBBQcarrymask { 24120 break 24121 } 24122 v_1_1_0 := v_1_1.Args[0] 24123 if v_1_1_0.Op != OpAMD64CMPQconst { 24124 break 24125 } 24126 if v_1_1_0.AuxInt != 64 { 24127 break 24128 } 24129 v_1_1_0_0 := v_1_1_0.Args[0] 24130 if v_1_1_0_0.Op != OpAMD64NEGQ { 24131 break 24132 } 24133 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24134 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 24135 break 24136 } 24137 if v_1_1_0_0_0.AuxInt != -64 { 24138 break 24139 } 24140 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24141 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 24142 break 24143 } 24144 if v_1_1_0_0_0_0.AuxInt != 63 { 24145 break 24146 } 24147 if y != v_1_1_0_0_0_0.Args[0] { 24148 break 24149 } 24150 v.reset(OpAMD64RORQ) 24151 v.AddArg(x) 24152 v.AddArg(y) 24153 return true 24154 } 24155 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 24156 // cond: 24157 // result: (RORQ x y) 24158 for { 24159 _ = v.Args[1] 24160 v_0 := v.Args[0] 24161 if v_0.Op != OpAMD64SHRQ { 24162 break 24163 } 24164 _ = v_0.Args[1] 24165 x := v_0.Args[0] 24166 y := v_0.Args[1] 24167 v_1 := v.Args[1] 24168 if v_1.Op != OpAMD64ANDQ { 24169 break 24170 } 24171 _ = v_1.Args[1] 24172 v_1_0 := v_1.Args[0] 24173 if v_1_0.Op != OpAMD64SBBQcarrymask { 24174 break 24175 } 24176 v_1_0_0 := v_1_0.Args[0] 24177 if v_1_0_0.Op != OpAMD64CMPQconst { 24178 break 24179 } 24180 if v_1_0_0.AuxInt != 64 { 24181 break 24182 } 24183 v_1_0_0_0 := v_1_0_0.Args[0] 24184 if v_1_0_0_0.Op != OpAMD64NEGQ { 24185 break 24186 } 24187 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24188 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 24189 break 24190 } 24191 if v_1_0_0_0_0.AuxInt != -64 { 24192 break 24193 } 24194 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24195 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 24196 break 24197 } 24198 if v_1_0_0_0_0_0.AuxInt != 63 { 24199 break 24200 } 24201 if y != v_1_0_0_0_0_0.Args[0] { 24202 break 24203 } 24204 v_1_1 := v_1.Args[1] 24205 if v_1_1.Op != OpAMD64SHLQ { 24206 break 24207 } 24208 _ = v_1_1.Args[1] 24209 if x != v_1_1.Args[0] { 24210 break 24211 } 24212 v_1_1_1 := v_1_1.Args[1] 24213 if v_1_1_1.Op != OpAMD64NEGQ { 24214 break 24215 } 24216 if y != v_1_1_1.Args[0] { 24217 break 24218 } 24219 v.reset(OpAMD64RORQ) 24220 v.AddArg(x) 24221 v.AddArg(y) 24222 return true 24223 } 24224 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 24225 // cond: 24226 // result: (RORQ x y) 24227 for { 24228 _ = v.Args[1] 24229 v_0 := v.Args[0] 24230 if v_0.Op != OpAMD64ANDQ { 24231 break 24232 } 24233 _ = v_0.Args[1] 24234 v_0_0 := v_0.Args[0] 24235 if v_0_0.Op != OpAMD64SHLQ { 24236 break 24237 } 24238 _ = v_0_0.Args[1] 24239 x := v_0_0.Args[0] 24240 v_0_0_1 := v_0_0.Args[1] 24241 if v_0_0_1.Op != OpAMD64NEGQ { 24242 break 24243 } 24244 y := v_0_0_1.Args[0] 24245 v_0_1 := v_0.Args[1] 24246 if v_0_1.Op != OpAMD64SBBQcarrymask { 24247 break 24248 } 24249 v_0_1_0 := v_0_1.Args[0] 24250 if v_0_1_0.Op != OpAMD64CMPQconst { 24251 break 24252 } 24253 if v_0_1_0.AuxInt != 64 { 24254 break 24255 } 24256 v_0_1_0_0 := v_0_1_0.Args[0] 24257 if v_0_1_0_0.Op != OpAMD64NEGQ { 24258 break 24259 } 24260 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24261 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 24262 break 24263 } 24264 if v_0_1_0_0_0.AuxInt != -64 { 24265 break 24266 } 24267 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24268 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 24269 break 24270 } 24271 if v_0_1_0_0_0_0.AuxInt != 63 { 24272 break 24273 } 24274 if y != v_0_1_0_0_0_0.Args[0] { 24275 break 24276 } 24277 v_1 := v.Args[1] 24278 if v_1.Op != OpAMD64SHRQ { 24279 break 24280 } 24281 _ = v_1.Args[1] 24282 if x != v_1.Args[0] { 24283 break 24284 } 24285 if y != v_1.Args[1] { 24286 break 24287 } 24288 v.reset(OpAMD64RORQ) 24289 v.AddArg(x) 24290 v.AddArg(y) 24291 return true 24292 } 24293 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 24294 // cond: 24295 // result: (RORQ x y) 24296 for { 24297 _ = v.Args[1] 24298 v_0 := v.Args[0] 24299 if v_0.Op != OpAMD64ANDQ { 24300 break 24301 } 24302 _ = v_0.Args[1] 24303 v_0_0 := v_0.Args[0] 24304 if v_0_0.Op != OpAMD64SBBQcarrymask { 24305 break 24306 } 24307 v_0_0_0 := v_0_0.Args[0] 24308 if v_0_0_0.Op != OpAMD64CMPQconst { 24309 break 24310 } 24311 if v_0_0_0.AuxInt != 64 { 24312 break 24313 } 24314 v_0_0_0_0 := v_0_0_0.Args[0] 24315 if v_0_0_0_0.Op != OpAMD64NEGQ { 24316 break 24317 } 24318 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24319 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 24320 break 24321 } 24322 if v_0_0_0_0_0.AuxInt != -64 { 24323 break 24324 } 24325 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24326 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 24327 break 24328 } 24329 if v_0_0_0_0_0_0.AuxInt != 63 { 24330 break 24331 } 24332 y := v_0_0_0_0_0_0.Args[0] 24333 v_0_1 := v_0.Args[1] 24334 if v_0_1.Op != OpAMD64SHLQ { 24335 break 24336 } 24337 _ = v_0_1.Args[1] 24338 x := v_0_1.Args[0] 24339 v_0_1_1 := v_0_1.Args[1] 24340 if v_0_1_1.Op != OpAMD64NEGQ { 24341 break 24342 } 24343 if y != v_0_1_1.Args[0] { 24344 break 24345 } 24346 v_1 := v.Args[1] 24347 if v_1.Op != OpAMD64SHRQ { 24348 break 24349 } 24350 _ = v_1.Args[1] 24351 if x != v_1.Args[0] { 24352 break 24353 } 24354 if y != v_1.Args[1] { 24355 break 24356 } 24357 v.reset(OpAMD64RORQ) 24358 v.AddArg(x) 24359 v.AddArg(y) 24360 return true 24361 } 24362 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 24363 // cond: 24364 // result: (RORQ x y) 24365 for { 24366 _ = v.Args[1] 24367 v_0 := v.Args[0] 24368 if v_0.Op != OpAMD64SHRQ { 24369 break 24370 } 24371 _ = v_0.Args[1] 24372 x := v_0.Args[0] 24373 y := v_0.Args[1] 24374 v_1 := v.Args[1] 24375 if v_1.Op != OpAMD64ANDQ { 24376 break 24377 } 24378 _ = v_1.Args[1] 24379 v_1_0 := v_1.Args[0] 24380 if v_1_0.Op != OpAMD64SHLQ { 24381 break 24382 } 24383 _ = v_1_0.Args[1] 24384 if x != v_1_0.Args[0] { 24385 break 24386 } 24387 v_1_0_1 := v_1_0.Args[1] 24388 if v_1_0_1.Op != OpAMD64NEGL { 24389 break 24390 } 24391 if y != v_1_0_1.Args[0] { 24392 break 24393 } 24394 v_1_1 := v_1.Args[1] 24395 if v_1_1.Op != OpAMD64SBBQcarrymask { 24396 break 24397 } 24398 v_1_1_0 := v_1_1.Args[0] 24399 if v_1_1_0.Op != OpAMD64CMPLconst { 24400 break 24401 } 24402 if v_1_1_0.AuxInt != 64 { 24403 break 24404 } 24405 v_1_1_0_0 := v_1_1_0.Args[0] 24406 if v_1_1_0_0.Op != OpAMD64NEGL { 24407 break 24408 } 24409 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24410 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 24411 break 24412 } 24413 if v_1_1_0_0_0.AuxInt != -64 { 24414 break 24415 } 24416 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24417 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 24418 break 24419 } 24420 if v_1_1_0_0_0_0.AuxInt != 63 { 24421 break 24422 } 24423 if y != v_1_1_0_0_0_0.Args[0] { 24424 break 24425 } 24426 v.reset(OpAMD64RORQ) 24427 v.AddArg(x) 24428 v.AddArg(y) 24429 return true 24430 } 24431 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 24432 // cond: 24433 // result: (RORQ x y) 24434 for { 24435 _ = v.Args[1] 24436 v_0 := v.Args[0] 24437 if v_0.Op != OpAMD64SHRQ { 24438 break 24439 } 24440 _ = v_0.Args[1] 24441 x := v_0.Args[0] 24442 y := v_0.Args[1] 24443 v_1 := v.Args[1] 24444 if v_1.Op != OpAMD64ANDQ { 24445 break 24446 } 24447 _ = v_1.Args[1] 24448 v_1_0 := v_1.Args[0] 24449 if v_1_0.Op != OpAMD64SBBQcarrymask { 24450 break 24451 } 24452 v_1_0_0 := v_1_0.Args[0] 24453 if v_1_0_0.Op != OpAMD64CMPLconst { 24454 break 24455 } 24456 if v_1_0_0.AuxInt != 64 { 24457 break 24458 } 24459 v_1_0_0_0 := v_1_0_0.Args[0] 24460 if v_1_0_0_0.Op != OpAMD64NEGL { 24461 break 24462 } 24463 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24464 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 24465 break 24466 } 24467 if v_1_0_0_0_0.AuxInt != -64 { 24468 break 24469 } 24470 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24471 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 24472 break 24473 } 24474 if v_1_0_0_0_0_0.AuxInt != 63 { 24475 break 24476 } 24477 if y != v_1_0_0_0_0_0.Args[0] { 24478 break 24479 } 24480 v_1_1 := v_1.Args[1] 24481 if v_1_1.Op != OpAMD64SHLQ { 24482 break 24483 } 24484 _ = v_1_1.Args[1] 24485 if x != v_1_1.Args[0] { 24486 break 24487 } 24488 v_1_1_1 := v_1_1.Args[1] 24489 if v_1_1_1.Op != OpAMD64NEGL { 24490 break 24491 } 24492 if y != v_1_1_1.Args[0] { 24493 break 24494 } 24495 v.reset(OpAMD64RORQ) 24496 v.AddArg(x) 24497 v.AddArg(y) 24498 return true 24499 } 24500 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 24501 // cond: 24502 // result: (RORQ x y) 24503 for { 24504 _ = v.Args[1] 24505 v_0 := v.Args[0] 24506 if v_0.Op != OpAMD64ANDQ { 24507 break 24508 } 24509 _ = v_0.Args[1] 24510 v_0_0 := v_0.Args[0] 24511 if v_0_0.Op != OpAMD64SHLQ { 24512 break 24513 } 24514 _ = v_0_0.Args[1] 24515 x := v_0_0.Args[0] 24516 v_0_0_1 := v_0_0.Args[1] 24517 if v_0_0_1.Op != OpAMD64NEGL { 24518 break 24519 } 24520 y := v_0_0_1.Args[0] 24521 v_0_1 := v_0.Args[1] 24522 if v_0_1.Op != OpAMD64SBBQcarrymask { 24523 break 24524 } 24525 v_0_1_0 := v_0_1.Args[0] 24526 if v_0_1_0.Op != OpAMD64CMPLconst { 24527 break 24528 } 24529 if v_0_1_0.AuxInt != 64 { 24530 break 24531 } 24532 v_0_1_0_0 := v_0_1_0.Args[0] 24533 if v_0_1_0_0.Op != OpAMD64NEGL { 24534 break 24535 } 24536 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24537 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 24538 break 24539 } 24540 if v_0_1_0_0_0.AuxInt != -64 { 24541 break 24542 } 24543 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24544 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 24545 break 24546 } 24547 if v_0_1_0_0_0_0.AuxInt != 63 { 24548 break 24549 } 24550 if y != v_0_1_0_0_0_0.Args[0] { 24551 break 24552 } 24553 v_1 := v.Args[1] 24554 if v_1.Op != OpAMD64SHRQ { 24555 break 24556 } 24557 _ = v_1.Args[1] 24558 if x != v_1.Args[0] { 24559 break 24560 } 24561 if y != v_1.Args[1] { 24562 break 24563 } 24564 v.reset(OpAMD64RORQ) 24565 v.AddArg(x) 24566 v.AddArg(y) 24567 return true 24568 } 24569 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 24570 // cond: 24571 // result: (RORQ x y) 24572 for { 24573 _ = v.Args[1] 24574 v_0 := v.Args[0] 24575 if v_0.Op != OpAMD64ANDQ { 24576 break 24577 } 24578 _ = v_0.Args[1] 24579 v_0_0 := v_0.Args[0] 24580 if v_0_0.Op != OpAMD64SBBQcarrymask { 24581 break 24582 } 24583 v_0_0_0 := v_0_0.Args[0] 24584 if v_0_0_0.Op != OpAMD64CMPLconst { 24585 break 24586 } 24587 if v_0_0_0.AuxInt != 64 { 24588 break 24589 } 24590 v_0_0_0_0 := v_0_0_0.Args[0] 24591 if v_0_0_0_0.Op != OpAMD64NEGL { 24592 break 24593 } 24594 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24595 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 24596 break 24597 } 24598 if v_0_0_0_0_0.AuxInt != -64 { 24599 break 24600 } 24601 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24602 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 24603 break 24604 } 24605 if v_0_0_0_0_0_0.AuxInt != 63 { 24606 break 24607 } 24608 y := v_0_0_0_0_0_0.Args[0] 24609 v_0_1 := v_0.Args[1] 24610 if v_0_1.Op != OpAMD64SHLQ { 24611 break 24612 } 24613 _ = v_0_1.Args[1] 24614 x := v_0_1.Args[0] 24615 v_0_1_1 := v_0_1.Args[1] 24616 if v_0_1_1.Op != OpAMD64NEGL { 24617 break 24618 } 24619 if y != v_0_1_1.Args[0] { 24620 break 24621 } 24622 v_1 := v.Args[1] 24623 if v_1.Op != OpAMD64SHRQ { 24624 break 24625 } 24626 _ = v_1.Args[1] 24627 if x != v_1.Args[0] { 24628 break 24629 } 24630 if y != v_1.Args[1] { 24631 break 24632 } 24633 v.reset(OpAMD64RORQ) 24634 v.AddArg(x) 24635 v.AddArg(y) 24636 return true 24637 } 24638 return false 24639 } 24640 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 24641 b := v.Block 24642 _ = b 24643 typ := &b.Func.Config.Types 24644 _ = typ 24645 // match: (ORQ x x) 24646 // cond: 24647 // result: x 24648 for { 24649 _ = v.Args[1] 24650 x := v.Args[0] 24651 if x != v.Args[1] { 24652 break 24653 } 24654 v.reset(OpCopy) 24655 v.Type = x.Type 24656 v.AddArg(x) 24657 return true 24658 } 24659 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 24660 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24661 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 24662 for { 24663 _ = v.Args[1] 24664 x0 := v.Args[0] 24665 if x0.Op != OpAMD64MOVBload { 24666 break 24667 } 24668 i0 := x0.AuxInt 24669 s := x0.Aux 24670 _ = x0.Args[1] 24671 p := x0.Args[0] 24672 mem := x0.Args[1] 24673 sh := v.Args[1] 24674 if sh.Op != OpAMD64SHLQconst { 24675 break 24676 } 24677 if sh.AuxInt != 8 { 24678 break 24679 } 24680 x1 := sh.Args[0] 24681 if x1.Op != OpAMD64MOVBload { 24682 break 24683 } 24684 i1 := x1.AuxInt 24685 if x1.Aux != s { 24686 break 24687 } 24688 _ = x1.Args[1] 24689 if p != x1.Args[0] { 24690 break 24691 } 24692 if mem != x1.Args[1] { 24693 break 24694 } 24695 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24696 break 24697 } 24698 b = mergePoint(b, x0, x1) 24699 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24700 v.reset(OpCopy) 24701 v.AddArg(v0) 24702 v0.AuxInt = i0 24703 v0.Aux = s 24704 v0.AddArg(p) 24705 v0.AddArg(mem) 24706 return true 24707 } 24708 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 24709 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24710 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 24711 for { 24712 _ = v.Args[1] 24713 sh := v.Args[0] 24714 if sh.Op != OpAMD64SHLQconst { 24715 break 24716 } 24717 if sh.AuxInt != 8 { 24718 break 24719 } 24720 x1 := sh.Args[0] 24721 if x1.Op != OpAMD64MOVBload { 24722 break 24723 } 24724 i1 := x1.AuxInt 24725 s := x1.Aux 24726 _ = x1.Args[1] 24727 p := x1.Args[0] 24728 mem := x1.Args[1] 24729 x0 := v.Args[1] 24730 if x0.Op != OpAMD64MOVBload { 24731 break 24732 } 24733 i0 := x0.AuxInt 24734 if x0.Aux != s { 24735 break 24736 } 24737 _ = x0.Args[1] 24738 if p != x0.Args[0] { 24739 break 24740 } 24741 if mem != x0.Args[1] { 24742 break 24743 } 24744 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24745 break 24746 } 24747 b = mergePoint(b, x0, x1) 24748 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24749 v.reset(OpCopy) 24750 v.AddArg(v0) 24751 v0.AuxInt = i0 24752 v0.Aux = s 24753 v0.AddArg(p) 24754 v0.AddArg(mem) 24755 return true 24756 } 24757 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 24758 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24759 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 24760 for { 24761 _ = v.Args[1] 24762 x0 := v.Args[0] 24763 if x0.Op != OpAMD64MOVWload { 24764 break 24765 } 24766 i0 := x0.AuxInt 24767 s := x0.Aux 24768 _ = x0.Args[1] 24769 p := x0.Args[0] 24770 mem := x0.Args[1] 24771 sh := v.Args[1] 24772 if sh.Op != OpAMD64SHLQconst { 24773 break 24774 } 24775 if sh.AuxInt != 16 { 24776 break 24777 } 24778 x1 := sh.Args[0] 24779 if x1.Op != OpAMD64MOVWload { 24780 break 24781 } 24782 i1 := x1.AuxInt 24783 if x1.Aux != s { 24784 break 24785 } 24786 _ = x1.Args[1] 24787 if p != x1.Args[0] { 24788 break 24789 } 24790 if mem != x1.Args[1] { 24791 break 24792 } 24793 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24794 break 24795 } 24796 b = mergePoint(b, x0, x1) 24797 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24798 v.reset(OpCopy) 24799 v.AddArg(v0) 24800 v0.AuxInt = i0 24801 v0.Aux = s 24802 v0.AddArg(p) 24803 v0.AddArg(mem) 24804 return true 24805 } 24806 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 24807 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24808 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 24809 for { 24810 _ = v.Args[1] 24811 sh := v.Args[0] 24812 if sh.Op != OpAMD64SHLQconst { 24813 break 24814 } 24815 if sh.AuxInt != 16 { 24816 break 24817 } 24818 x1 := sh.Args[0] 24819 if x1.Op != OpAMD64MOVWload { 24820 break 24821 } 24822 i1 := x1.AuxInt 24823 s := x1.Aux 24824 _ = x1.Args[1] 24825 p := x1.Args[0] 24826 mem := x1.Args[1] 24827 x0 := v.Args[1] 24828 if x0.Op != OpAMD64MOVWload { 24829 break 24830 } 24831 i0 := x0.AuxInt 24832 if x0.Aux != s { 24833 break 24834 } 24835 _ = x0.Args[1] 24836 if p != x0.Args[0] { 24837 break 24838 } 24839 if mem != x0.Args[1] { 24840 break 24841 } 24842 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24843 break 24844 } 24845 b = mergePoint(b, x0, x1) 24846 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24847 v.reset(OpCopy) 24848 v.AddArg(v0) 24849 v0.AuxInt = i0 24850 v0.Aux = s 24851 v0.AddArg(p) 24852 v0.AddArg(mem) 24853 return true 24854 } 24855 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 24856 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24857 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 24858 for { 24859 _ = v.Args[1] 24860 x0 := v.Args[0] 24861 if x0.Op != OpAMD64MOVLload { 24862 break 24863 } 24864 i0 := x0.AuxInt 24865 s := x0.Aux 24866 _ = x0.Args[1] 24867 p := x0.Args[0] 24868 mem := x0.Args[1] 24869 sh := v.Args[1] 24870 if sh.Op != OpAMD64SHLQconst { 24871 break 24872 } 24873 if sh.AuxInt != 32 { 24874 break 24875 } 24876 x1 := sh.Args[0] 24877 if x1.Op != OpAMD64MOVLload { 24878 break 24879 } 24880 i1 := x1.AuxInt 24881 if x1.Aux != s { 24882 break 24883 } 24884 _ = x1.Args[1] 24885 if p != x1.Args[0] { 24886 break 24887 } 24888 if mem != x1.Args[1] { 24889 break 24890 } 24891 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24892 break 24893 } 24894 b = mergePoint(b, x0, x1) 24895 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24896 v.reset(OpCopy) 24897 v.AddArg(v0) 24898 v0.AuxInt = i0 24899 v0.Aux = s 24900 v0.AddArg(p) 24901 v0.AddArg(mem) 24902 return true 24903 } 24904 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 24905 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24906 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 24907 for { 24908 _ = v.Args[1] 24909 sh := v.Args[0] 24910 if sh.Op != OpAMD64SHLQconst { 24911 break 24912 } 24913 if sh.AuxInt != 32 { 24914 break 24915 } 24916 x1 := sh.Args[0] 24917 if x1.Op != OpAMD64MOVLload { 24918 break 24919 } 24920 i1 := x1.AuxInt 24921 s := x1.Aux 24922 _ = x1.Args[1] 24923 p := x1.Args[0] 24924 mem := x1.Args[1] 24925 x0 := v.Args[1] 24926 if x0.Op != OpAMD64MOVLload { 24927 break 24928 } 24929 i0 := x0.AuxInt 24930 if x0.Aux != s { 24931 break 24932 } 24933 _ = x0.Args[1] 24934 if p != x0.Args[0] { 24935 break 24936 } 24937 if mem != x0.Args[1] { 24938 break 24939 } 24940 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24941 break 24942 } 24943 b = mergePoint(b, x0, x1) 24944 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24945 v.reset(OpCopy) 24946 v.AddArg(v0) 24947 v0.AuxInt = i0 24948 v0.Aux = s 24949 v0.AddArg(p) 24950 v0.AddArg(mem) 24951 return true 24952 } 24953 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 24954 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24955 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 24956 for { 24957 _ = v.Args[1] 24958 s1 := v.Args[0] 24959 if s1.Op != OpAMD64SHLQconst { 24960 break 24961 } 24962 j1 := s1.AuxInt 24963 x1 := s1.Args[0] 24964 if x1.Op != OpAMD64MOVBload { 24965 break 24966 } 24967 i1 := x1.AuxInt 24968 s := x1.Aux 24969 _ = x1.Args[1] 24970 p := x1.Args[0] 24971 mem := x1.Args[1] 24972 or := v.Args[1] 24973 if or.Op != OpAMD64ORQ { 24974 break 24975 } 24976 _ = or.Args[1] 24977 s0 := or.Args[0] 24978 if s0.Op != OpAMD64SHLQconst { 24979 break 24980 } 24981 j0 := s0.AuxInt 24982 x0 := s0.Args[0] 24983 if x0.Op != OpAMD64MOVBload { 24984 break 24985 } 24986 i0 := x0.AuxInt 24987 if x0.Aux != s { 24988 break 24989 } 24990 _ = x0.Args[1] 24991 if p != x0.Args[0] { 24992 break 24993 } 24994 if mem != x0.Args[1] { 24995 break 24996 } 24997 y := or.Args[1] 24998 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24999 break 25000 } 25001 b = mergePoint(b, x0, x1) 25002 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25003 v.reset(OpCopy) 25004 v.AddArg(v0) 25005 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25006 v1.AuxInt = j0 25007 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25008 v2.AuxInt = i0 25009 v2.Aux = s 25010 v2.AddArg(p) 25011 v2.AddArg(mem) 25012 v1.AddArg(v2) 25013 v0.AddArg(v1) 25014 v0.AddArg(y) 25015 return true 25016 } 25017 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 25018 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25019 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25020 for { 25021 _ = v.Args[1] 25022 s1 := v.Args[0] 25023 if s1.Op != OpAMD64SHLQconst { 25024 break 25025 } 25026 j1 := s1.AuxInt 25027 x1 := s1.Args[0] 25028 if x1.Op != OpAMD64MOVBload { 25029 break 25030 } 25031 i1 := x1.AuxInt 25032 s := x1.Aux 25033 _ = x1.Args[1] 25034 p := x1.Args[0] 25035 mem := x1.Args[1] 25036 or := v.Args[1] 25037 if or.Op != OpAMD64ORQ { 25038 break 25039 } 25040 _ = or.Args[1] 25041 y := or.Args[0] 25042 s0 := or.Args[1] 25043 if s0.Op != OpAMD64SHLQconst { 25044 break 25045 } 25046 j0 := s0.AuxInt 25047 x0 := s0.Args[0] 25048 if x0.Op != OpAMD64MOVBload { 25049 break 25050 } 25051 i0 := x0.AuxInt 25052 if x0.Aux != s { 25053 break 25054 } 25055 _ = x0.Args[1] 25056 if p != x0.Args[0] { 25057 break 25058 } 25059 if mem != x0.Args[1] { 25060 break 25061 } 25062 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25063 break 25064 } 25065 b = mergePoint(b, x0, x1) 25066 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25067 v.reset(OpCopy) 25068 v.AddArg(v0) 25069 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25070 v1.AuxInt = j0 25071 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25072 v2.AuxInt = i0 25073 v2.Aux = s 25074 v2.AddArg(p) 25075 v2.AddArg(mem) 25076 v1.AddArg(v2) 25077 v0.AddArg(v1) 25078 v0.AddArg(y) 25079 return true 25080 } 25081 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 25082 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25083 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25084 for { 25085 _ = v.Args[1] 25086 or := v.Args[0] 25087 if or.Op != OpAMD64ORQ { 25088 break 25089 } 25090 _ = or.Args[1] 25091 s0 := or.Args[0] 25092 if s0.Op != OpAMD64SHLQconst { 25093 break 25094 } 25095 j0 := s0.AuxInt 25096 x0 := s0.Args[0] 25097 if x0.Op != OpAMD64MOVBload { 25098 break 25099 } 25100 i0 := x0.AuxInt 25101 s := x0.Aux 25102 _ = x0.Args[1] 25103 p := x0.Args[0] 25104 mem := x0.Args[1] 25105 y := or.Args[1] 25106 s1 := v.Args[1] 25107 if s1.Op != OpAMD64SHLQconst { 25108 break 25109 } 25110 j1 := s1.AuxInt 25111 x1 := s1.Args[0] 25112 if x1.Op != OpAMD64MOVBload { 25113 break 25114 } 25115 i1 := x1.AuxInt 25116 if x1.Aux != s { 25117 break 25118 } 25119 _ = x1.Args[1] 25120 if p != x1.Args[0] { 25121 break 25122 } 25123 if mem != x1.Args[1] { 25124 break 25125 } 25126 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25127 break 25128 } 25129 b = mergePoint(b, x0, x1) 25130 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25131 v.reset(OpCopy) 25132 v.AddArg(v0) 25133 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25134 v1.AuxInt = j0 25135 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25136 v2.AuxInt = i0 25137 v2.Aux = s 25138 v2.AddArg(p) 25139 v2.AddArg(mem) 25140 v1.AddArg(v2) 25141 v0.AddArg(v1) 25142 v0.AddArg(y) 25143 return true 25144 } 25145 return false 25146 } 25147 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 25148 b := v.Block 25149 _ = b 25150 typ := &b.Func.Config.Types 25151 _ = typ 25152 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 25153 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25154 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25155 for { 25156 _ = v.Args[1] 25157 or := v.Args[0] 25158 if or.Op != OpAMD64ORQ { 25159 break 25160 } 25161 _ = or.Args[1] 25162 y := or.Args[0] 25163 s0 := or.Args[1] 25164 if s0.Op != OpAMD64SHLQconst { 25165 break 25166 } 25167 j0 := s0.AuxInt 25168 x0 := s0.Args[0] 25169 if x0.Op != OpAMD64MOVBload { 25170 break 25171 } 25172 i0 := x0.AuxInt 25173 s := x0.Aux 25174 _ = x0.Args[1] 25175 p := x0.Args[0] 25176 mem := x0.Args[1] 25177 s1 := v.Args[1] 25178 if s1.Op != OpAMD64SHLQconst { 25179 break 25180 } 25181 j1 := s1.AuxInt 25182 x1 := s1.Args[0] 25183 if x1.Op != OpAMD64MOVBload { 25184 break 25185 } 25186 i1 := x1.AuxInt 25187 if x1.Aux != s { 25188 break 25189 } 25190 _ = x1.Args[1] 25191 if p != x1.Args[0] { 25192 break 25193 } 25194 if mem != x1.Args[1] { 25195 break 25196 } 25197 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25198 break 25199 } 25200 b = mergePoint(b, x0, x1) 25201 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25202 v.reset(OpCopy) 25203 v.AddArg(v0) 25204 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25205 v1.AuxInt = j0 25206 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25207 v2.AuxInt = i0 25208 v2.Aux = s 25209 v2.AddArg(p) 25210 v2.AddArg(mem) 25211 v1.AddArg(v2) 25212 v0.AddArg(v1) 25213 v0.AddArg(y) 25214 return true 25215 } 25216 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 25217 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25218 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25219 for { 25220 _ = v.Args[1] 25221 s1 := v.Args[0] 25222 if s1.Op != OpAMD64SHLQconst { 25223 break 25224 } 25225 j1 := s1.AuxInt 25226 x1 := s1.Args[0] 25227 if x1.Op != OpAMD64MOVWload { 25228 break 25229 } 25230 i1 := x1.AuxInt 25231 s := x1.Aux 25232 _ = x1.Args[1] 25233 p := x1.Args[0] 25234 mem := x1.Args[1] 25235 or := v.Args[1] 25236 if or.Op != OpAMD64ORQ { 25237 break 25238 } 25239 _ = or.Args[1] 25240 s0 := or.Args[0] 25241 if s0.Op != OpAMD64SHLQconst { 25242 break 25243 } 25244 j0 := s0.AuxInt 25245 x0 := s0.Args[0] 25246 if x0.Op != OpAMD64MOVWload { 25247 break 25248 } 25249 i0 := x0.AuxInt 25250 if x0.Aux != s { 25251 break 25252 } 25253 _ = x0.Args[1] 25254 if p != x0.Args[0] { 25255 break 25256 } 25257 if mem != x0.Args[1] { 25258 break 25259 } 25260 y := or.Args[1] 25261 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25262 break 25263 } 25264 b = mergePoint(b, x0, x1) 25265 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25266 v.reset(OpCopy) 25267 v.AddArg(v0) 25268 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25269 v1.AuxInt = j0 25270 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25271 v2.AuxInt = i0 25272 v2.Aux = s 25273 v2.AddArg(p) 25274 v2.AddArg(mem) 25275 v1.AddArg(v2) 25276 v0.AddArg(v1) 25277 v0.AddArg(y) 25278 return true 25279 } 25280 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 25281 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25282 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25283 for { 25284 _ = v.Args[1] 25285 s1 := v.Args[0] 25286 if s1.Op != OpAMD64SHLQconst { 25287 break 25288 } 25289 j1 := s1.AuxInt 25290 x1 := s1.Args[0] 25291 if x1.Op != OpAMD64MOVWload { 25292 break 25293 } 25294 i1 := x1.AuxInt 25295 s := x1.Aux 25296 _ = x1.Args[1] 25297 p := x1.Args[0] 25298 mem := x1.Args[1] 25299 or := v.Args[1] 25300 if or.Op != OpAMD64ORQ { 25301 break 25302 } 25303 _ = or.Args[1] 25304 y := or.Args[0] 25305 s0 := or.Args[1] 25306 if s0.Op != OpAMD64SHLQconst { 25307 break 25308 } 25309 j0 := s0.AuxInt 25310 x0 := s0.Args[0] 25311 if x0.Op != OpAMD64MOVWload { 25312 break 25313 } 25314 i0 := x0.AuxInt 25315 if x0.Aux != s { 25316 break 25317 } 25318 _ = x0.Args[1] 25319 if p != x0.Args[0] { 25320 break 25321 } 25322 if mem != x0.Args[1] { 25323 break 25324 } 25325 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25326 break 25327 } 25328 b = mergePoint(b, x0, x1) 25329 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25330 v.reset(OpCopy) 25331 v.AddArg(v0) 25332 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25333 v1.AuxInt = j0 25334 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25335 v2.AuxInt = i0 25336 v2.Aux = s 25337 v2.AddArg(p) 25338 v2.AddArg(mem) 25339 v1.AddArg(v2) 25340 v0.AddArg(v1) 25341 v0.AddArg(y) 25342 return true 25343 } 25344 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 25345 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25346 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25347 for { 25348 _ = v.Args[1] 25349 or := v.Args[0] 25350 if or.Op != OpAMD64ORQ { 25351 break 25352 } 25353 _ = or.Args[1] 25354 s0 := or.Args[0] 25355 if s0.Op != OpAMD64SHLQconst { 25356 break 25357 } 25358 j0 := s0.AuxInt 25359 x0 := s0.Args[0] 25360 if x0.Op != OpAMD64MOVWload { 25361 break 25362 } 25363 i0 := x0.AuxInt 25364 s := x0.Aux 25365 _ = x0.Args[1] 25366 p := x0.Args[0] 25367 mem := x0.Args[1] 25368 y := or.Args[1] 25369 s1 := v.Args[1] 25370 if s1.Op != OpAMD64SHLQconst { 25371 break 25372 } 25373 j1 := s1.AuxInt 25374 x1 := s1.Args[0] 25375 if x1.Op != OpAMD64MOVWload { 25376 break 25377 } 25378 i1 := x1.AuxInt 25379 if x1.Aux != s { 25380 break 25381 } 25382 _ = x1.Args[1] 25383 if p != x1.Args[0] { 25384 break 25385 } 25386 if mem != x1.Args[1] { 25387 break 25388 } 25389 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25390 break 25391 } 25392 b = mergePoint(b, x0, x1) 25393 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25394 v.reset(OpCopy) 25395 v.AddArg(v0) 25396 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25397 v1.AuxInt = j0 25398 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25399 v2.AuxInt = i0 25400 v2.Aux = s 25401 v2.AddArg(p) 25402 v2.AddArg(mem) 25403 v1.AddArg(v2) 25404 v0.AddArg(v1) 25405 v0.AddArg(y) 25406 return true 25407 } 25408 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 25409 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25410 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25411 for { 25412 _ = v.Args[1] 25413 or := v.Args[0] 25414 if or.Op != OpAMD64ORQ { 25415 break 25416 } 25417 _ = or.Args[1] 25418 y := or.Args[0] 25419 s0 := or.Args[1] 25420 if s0.Op != OpAMD64SHLQconst { 25421 break 25422 } 25423 j0 := s0.AuxInt 25424 x0 := s0.Args[0] 25425 if x0.Op != OpAMD64MOVWload { 25426 break 25427 } 25428 i0 := x0.AuxInt 25429 s := x0.Aux 25430 _ = x0.Args[1] 25431 p := x0.Args[0] 25432 mem := x0.Args[1] 25433 s1 := v.Args[1] 25434 if s1.Op != OpAMD64SHLQconst { 25435 break 25436 } 25437 j1 := s1.AuxInt 25438 x1 := s1.Args[0] 25439 if x1.Op != OpAMD64MOVWload { 25440 break 25441 } 25442 i1 := x1.AuxInt 25443 if x1.Aux != s { 25444 break 25445 } 25446 _ = x1.Args[1] 25447 if p != x1.Args[0] { 25448 break 25449 } 25450 if mem != x1.Args[1] { 25451 break 25452 } 25453 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25454 break 25455 } 25456 b = mergePoint(b, x0, x1) 25457 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25458 v.reset(OpCopy) 25459 v.AddArg(v0) 25460 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25461 v1.AuxInt = j0 25462 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25463 v2.AuxInt = i0 25464 v2.Aux = s 25465 v2.AddArg(p) 25466 v2.AddArg(mem) 25467 v1.AddArg(v2) 25468 v0.AddArg(v1) 25469 v0.AddArg(y) 25470 return true 25471 } 25472 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25473 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25474 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25475 for { 25476 _ = v.Args[1] 25477 x0 := v.Args[0] 25478 if x0.Op != OpAMD64MOVBloadidx1 { 25479 break 25480 } 25481 i0 := x0.AuxInt 25482 s := x0.Aux 25483 _ = x0.Args[2] 25484 p := x0.Args[0] 25485 idx := x0.Args[1] 25486 mem := x0.Args[2] 25487 sh := v.Args[1] 25488 if sh.Op != OpAMD64SHLQconst { 25489 break 25490 } 25491 if sh.AuxInt != 8 { 25492 break 25493 } 25494 x1 := sh.Args[0] 25495 if x1.Op != OpAMD64MOVBloadidx1 { 25496 break 25497 } 25498 i1 := x1.AuxInt 25499 if x1.Aux != s { 25500 break 25501 } 25502 _ = x1.Args[2] 25503 if p != x1.Args[0] { 25504 break 25505 } 25506 if idx != x1.Args[1] { 25507 break 25508 } 25509 if mem != x1.Args[2] { 25510 break 25511 } 25512 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25513 break 25514 } 25515 b = mergePoint(b, x0, x1) 25516 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25517 v.reset(OpCopy) 25518 v.AddArg(v0) 25519 v0.AuxInt = i0 25520 v0.Aux = s 25521 v0.AddArg(p) 25522 v0.AddArg(idx) 25523 v0.AddArg(mem) 25524 return true 25525 } 25526 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25527 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25528 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25529 for { 25530 _ = v.Args[1] 25531 x0 := v.Args[0] 25532 if x0.Op != OpAMD64MOVBloadidx1 { 25533 break 25534 } 25535 i0 := x0.AuxInt 25536 s := x0.Aux 25537 _ = x0.Args[2] 25538 idx := x0.Args[0] 25539 p := x0.Args[1] 25540 mem := x0.Args[2] 25541 sh := v.Args[1] 25542 if sh.Op != OpAMD64SHLQconst { 25543 break 25544 } 25545 if sh.AuxInt != 8 { 25546 break 25547 } 25548 x1 := sh.Args[0] 25549 if x1.Op != OpAMD64MOVBloadidx1 { 25550 break 25551 } 25552 i1 := x1.AuxInt 25553 if x1.Aux != s { 25554 break 25555 } 25556 _ = x1.Args[2] 25557 if p != x1.Args[0] { 25558 break 25559 } 25560 if idx != x1.Args[1] { 25561 break 25562 } 25563 if mem != x1.Args[2] { 25564 break 25565 } 25566 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25567 break 25568 } 25569 b = mergePoint(b, x0, x1) 25570 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25571 v.reset(OpCopy) 25572 v.AddArg(v0) 25573 v0.AuxInt = i0 25574 v0.Aux = s 25575 v0.AddArg(p) 25576 v0.AddArg(idx) 25577 v0.AddArg(mem) 25578 return true 25579 } 25580 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25581 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25582 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25583 for { 25584 _ = v.Args[1] 25585 x0 := v.Args[0] 25586 if x0.Op != OpAMD64MOVBloadidx1 { 25587 break 25588 } 25589 i0 := x0.AuxInt 25590 s := x0.Aux 25591 _ = x0.Args[2] 25592 p := x0.Args[0] 25593 idx := x0.Args[1] 25594 mem := x0.Args[2] 25595 sh := v.Args[1] 25596 if sh.Op != OpAMD64SHLQconst { 25597 break 25598 } 25599 if sh.AuxInt != 8 { 25600 break 25601 } 25602 x1 := sh.Args[0] 25603 if x1.Op != OpAMD64MOVBloadidx1 { 25604 break 25605 } 25606 i1 := x1.AuxInt 25607 if x1.Aux != s { 25608 break 25609 } 25610 _ = x1.Args[2] 25611 if idx != x1.Args[0] { 25612 break 25613 } 25614 if p != x1.Args[1] { 25615 break 25616 } 25617 if mem != x1.Args[2] { 25618 break 25619 } 25620 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25621 break 25622 } 25623 b = mergePoint(b, x0, x1) 25624 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25625 v.reset(OpCopy) 25626 v.AddArg(v0) 25627 v0.AuxInt = i0 25628 v0.Aux = s 25629 v0.AddArg(p) 25630 v0.AddArg(idx) 25631 v0.AddArg(mem) 25632 return true 25633 } 25634 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25635 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25636 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25637 for { 25638 _ = v.Args[1] 25639 x0 := v.Args[0] 25640 if x0.Op != OpAMD64MOVBloadidx1 { 25641 break 25642 } 25643 i0 := x0.AuxInt 25644 s := x0.Aux 25645 _ = x0.Args[2] 25646 idx := x0.Args[0] 25647 p := x0.Args[1] 25648 mem := x0.Args[2] 25649 sh := v.Args[1] 25650 if sh.Op != OpAMD64SHLQconst { 25651 break 25652 } 25653 if sh.AuxInt != 8 { 25654 break 25655 } 25656 x1 := sh.Args[0] 25657 if x1.Op != OpAMD64MOVBloadidx1 { 25658 break 25659 } 25660 i1 := x1.AuxInt 25661 if x1.Aux != s { 25662 break 25663 } 25664 _ = x1.Args[2] 25665 if idx != x1.Args[0] { 25666 break 25667 } 25668 if p != x1.Args[1] { 25669 break 25670 } 25671 if mem != x1.Args[2] { 25672 break 25673 } 25674 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25675 break 25676 } 25677 b = mergePoint(b, x0, x1) 25678 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25679 v.reset(OpCopy) 25680 v.AddArg(v0) 25681 v0.AuxInt = i0 25682 v0.Aux = s 25683 v0.AddArg(p) 25684 v0.AddArg(idx) 25685 v0.AddArg(mem) 25686 return true 25687 } 25688 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 25689 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25690 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25691 for { 25692 _ = v.Args[1] 25693 sh := v.Args[0] 25694 if sh.Op != OpAMD64SHLQconst { 25695 break 25696 } 25697 if sh.AuxInt != 8 { 25698 break 25699 } 25700 x1 := sh.Args[0] 25701 if x1.Op != OpAMD64MOVBloadidx1 { 25702 break 25703 } 25704 i1 := x1.AuxInt 25705 s := x1.Aux 25706 _ = x1.Args[2] 25707 p := x1.Args[0] 25708 idx := x1.Args[1] 25709 mem := x1.Args[2] 25710 x0 := v.Args[1] 25711 if x0.Op != OpAMD64MOVBloadidx1 { 25712 break 25713 } 25714 i0 := x0.AuxInt 25715 if x0.Aux != s { 25716 break 25717 } 25718 _ = x0.Args[2] 25719 if p != x0.Args[0] { 25720 break 25721 } 25722 if idx != x0.Args[1] { 25723 break 25724 } 25725 if mem != x0.Args[2] { 25726 break 25727 } 25728 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25729 break 25730 } 25731 b = mergePoint(b, x0, x1) 25732 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25733 v.reset(OpCopy) 25734 v.AddArg(v0) 25735 v0.AuxInt = i0 25736 v0.Aux = s 25737 v0.AddArg(p) 25738 v0.AddArg(idx) 25739 v0.AddArg(mem) 25740 return true 25741 } 25742 return false 25743 } 25744 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 25745 b := v.Block 25746 _ = b 25747 typ := &b.Func.Config.Types 25748 _ = typ 25749 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 25750 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25751 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25752 for { 25753 _ = v.Args[1] 25754 sh := v.Args[0] 25755 if sh.Op != OpAMD64SHLQconst { 25756 break 25757 } 25758 if sh.AuxInt != 8 { 25759 break 25760 } 25761 x1 := sh.Args[0] 25762 if x1.Op != OpAMD64MOVBloadidx1 { 25763 break 25764 } 25765 i1 := x1.AuxInt 25766 s := x1.Aux 25767 _ = x1.Args[2] 25768 idx := x1.Args[0] 25769 p := x1.Args[1] 25770 mem := x1.Args[2] 25771 x0 := v.Args[1] 25772 if x0.Op != OpAMD64MOVBloadidx1 { 25773 break 25774 } 25775 i0 := x0.AuxInt 25776 if x0.Aux != s { 25777 break 25778 } 25779 _ = x0.Args[2] 25780 if p != x0.Args[0] { 25781 break 25782 } 25783 if idx != x0.Args[1] { 25784 break 25785 } 25786 if mem != x0.Args[2] { 25787 break 25788 } 25789 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25790 break 25791 } 25792 b = mergePoint(b, x0, x1) 25793 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25794 v.reset(OpCopy) 25795 v.AddArg(v0) 25796 v0.AuxInt = i0 25797 v0.Aux = s 25798 v0.AddArg(p) 25799 v0.AddArg(idx) 25800 v0.AddArg(mem) 25801 return true 25802 } 25803 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 25804 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25805 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25806 for { 25807 _ = v.Args[1] 25808 sh := v.Args[0] 25809 if sh.Op != OpAMD64SHLQconst { 25810 break 25811 } 25812 if sh.AuxInt != 8 { 25813 break 25814 } 25815 x1 := sh.Args[0] 25816 if x1.Op != OpAMD64MOVBloadidx1 { 25817 break 25818 } 25819 i1 := x1.AuxInt 25820 s := x1.Aux 25821 _ = x1.Args[2] 25822 p := x1.Args[0] 25823 idx := x1.Args[1] 25824 mem := x1.Args[2] 25825 x0 := v.Args[1] 25826 if x0.Op != OpAMD64MOVBloadidx1 { 25827 break 25828 } 25829 i0 := x0.AuxInt 25830 if x0.Aux != s { 25831 break 25832 } 25833 _ = x0.Args[2] 25834 if idx != x0.Args[0] { 25835 break 25836 } 25837 if p != x0.Args[1] { 25838 break 25839 } 25840 if mem != x0.Args[2] { 25841 break 25842 } 25843 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25844 break 25845 } 25846 b = mergePoint(b, x0, x1) 25847 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25848 v.reset(OpCopy) 25849 v.AddArg(v0) 25850 v0.AuxInt = i0 25851 v0.Aux = s 25852 v0.AddArg(p) 25853 v0.AddArg(idx) 25854 v0.AddArg(mem) 25855 return true 25856 } 25857 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 25858 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25859 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25860 for { 25861 _ = v.Args[1] 25862 sh := v.Args[0] 25863 if sh.Op != OpAMD64SHLQconst { 25864 break 25865 } 25866 if sh.AuxInt != 8 { 25867 break 25868 } 25869 x1 := sh.Args[0] 25870 if x1.Op != OpAMD64MOVBloadidx1 { 25871 break 25872 } 25873 i1 := x1.AuxInt 25874 s := x1.Aux 25875 _ = x1.Args[2] 25876 idx := x1.Args[0] 25877 p := x1.Args[1] 25878 mem := x1.Args[2] 25879 x0 := v.Args[1] 25880 if x0.Op != OpAMD64MOVBloadidx1 { 25881 break 25882 } 25883 i0 := x0.AuxInt 25884 if x0.Aux != s { 25885 break 25886 } 25887 _ = x0.Args[2] 25888 if idx != x0.Args[0] { 25889 break 25890 } 25891 if p != x0.Args[1] { 25892 break 25893 } 25894 if mem != x0.Args[2] { 25895 break 25896 } 25897 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25898 break 25899 } 25900 b = mergePoint(b, x0, x1) 25901 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25902 v.reset(OpCopy) 25903 v.AddArg(v0) 25904 v0.AuxInt = i0 25905 v0.Aux = s 25906 v0.AddArg(p) 25907 v0.AddArg(idx) 25908 v0.AddArg(mem) 25909 return true 25910 } 25911 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25912 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25913 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25914 for { 25915 _ = v.Args[1] 25916 x0 := v.Args[0] 25917 if x0.Op != OpAMD64MOVWloadidx1 { 25918 break 25919 } 25920 i0 := x0.AuxInt 25921 s := x0.Aux 25922 _ = x0.Args[2] 25923 p := x0.Args[0] 25924 idx := x0.Args[1] 25925 mem := x0.Args[2] 25926 sh := v.Args[1] 25927 if sh.Op != OpAMD64SHLQconst { 25928 break 25929 } 25930 if sh.AuxInt != 16 { 25931 break 25932 } 25933 x1 := sh.Args[0] 25934 if x1.Op != OpAMD64MOVWloadidx1 { 25935 break 25936 } 25937 i1 := x1.AuxInt 25938 if x1.Aux != s { 25939 break 25940 } 25941 _ = x1.Args[2] 25942 if p != x1.Args[0] { 25943 break 25944 } 25945 if idx != x1.Args[1] { 25946 break 25947 } 25948 if mem != x1.Args[2] { 25949 break 25950 } 25951 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25952 break 25953 } 25954 b = mergePoint(b, x0, x1) 25955 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 25956 v.reset(OpCopy) 25957 v.AddArg(v0) 25958 v0.AuxInt = i0 25959 v0.Aux = s 25960 v0.AddArg(p) 25961 v0.AddArg(idx) 25962 v0.AddArg(mem) 25963 return true 25964 } 25965 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25966 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25967 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25968 for { 25969 _ = v.Args[1] 25970 x0 := v.Args[0] 25971 if x0.Op != OpAMD64MOVWloadidx1 { 25972 break 25973 } 25974 i0 := x0.AuxInt 25975 s := x0.Aux 25976 _ = x0.Args[2] 25977 idx := x0.Args[0] 25978 p := x0.Args[1] 25979 mem := x0.Args[2] 25980 sh := v.Args[1] 25981 if sh.Op != OpAMD64SHLQconst { 25982 break 25983 } 25984 if sh.AuxInt != 16 { 25985 break 25986 } 25987 x1 := sh.Args[0] 25988 if x1.Op != OpAMD64MOVWloadidx1 { 25989 break 25990 } 25991 i1 := x1.AuxInt 25992 if x1.Aux != s { 25993 break 25994 } 25995 _ = x1.Args[2] 25996 if p != x1.Args[0] { 25997 break 25998 } 25999 if idx != x1.Args[1] { 26000 break 26001 } 26002 if mem != x1.Args[2] { 26003 break 26004 } 26005 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26006 break 26007 } 26008 b = mergePoint(b, x0, x1) 26009 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26010 v.reset(OpCopy) 26011 v.AddArg(v0) 26012 v0.AuxInt = i0 26013 v0.Aux = s 26014 v0.AddArg(p) 26015 v0.AddArg(idx) 26016 v0.AddArg(mem) 26017 return true 26018 } 26019 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 26020 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26021 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26022 for { 26023 _ = v.Args[1] 26024 x0 := v.Args[0] 26025 if x0.Op != OpAMD64MOVWloadidx1 { 26026 break 26027 } 26028 i0 := x0.AuxInt 26029 s := x0.Aux 26030 _ = x0.Args[2] 26031 p := x0.Args[0] 26032 idx := x0.Args[1] 26033 mem := x0.Args[2] 26034 sh := v.Args[1] 26035 if sh.Op != OpAMD64SHLQconst { 26036 break 26037 } 26038 if sh.AuxInt != 16 { 26039 break 26040 } 26041 x1 := sh.Args[0] 26042 if x1.Op != OpAMD64MOVWloadidx1 { 26043 break 26044 } 26045 i1 := x1.AuxInt 26046 if x1.Aux != s { 26047 break 26048 } 26049 _ = x1.Args[2] 26050 if idx != x1.Args[0] { 26051 break 26052 } 26053 if p != x1.Args[1] { 26054 break 26055 } 26056 if mem != x1.Args[2] { 26057 break 26058 } 26059 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26060 break 26061 } 26062 b = mergePoint(b, x0, x1) 26063 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26064 v.reset(OpCopy) 26065 v.AddArg(v0) 26066 v0.AuxInt = i0 26067 v0.Aux = s 26068 v0.AddArg(p) 26069 v0.AddArg(idx) 26070 v0.AddArg(mem) 26071 return true 26072 } 26073 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 26074 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26075 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26076 for { 26077 _ = v.Args[1] 26078 x0 := v.Args[0] 26079 if x0.Op != OpAMD64MOVWloadidx1 { 26080 break 26081 } 26082 i0 := x0.AuxInt 26083 s := x0.Aux 26084 _ = x0.Args[2] 26085 idx := x0.Args[0] 26086 p := x0.Args[1] 26087 mem := x0.Args[2] 26088 sh := v.Args[1] 26089 if sh.Op != OpAMD64SHLQconst { 26090 break 26091 } 26092 if sh.AuxInt != 16 { 26093 break 26094 } 26095 x1 := sh.Args[0] 26096 if x1.Op != OpAMD64MOVWloadidx1 { 26097 break 26098 } 26099 i1 := x1.AuxInt 26100 if x1.Aux != s { 26101 break 26102 } 26103 _ = x1.Args[2] 26104 if idx != x1.Args[0] { 26105 break 26106 } 26107 if p != x1.Args[1] { 26108 break 26109 } 26110 if mem != x1.Args[2] { 26111 break 26112 } 26113 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26114 break 26115 } 26116 b = mergePoint(b, x0, x1) 26117 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26118 v.reset(OpCopy) 26119 v.AddArg(v0) 26120 v0.AuxInt = i0 26121 v0.Aux = s 26122 v0.AddArg(p) 26123 v0.AddArg(idx) 26124 v0.AddArg(mem) 26125 return true 26126 } 26127 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 26128 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26129 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26130 for { 26131 _ = v.Args[1] 26132 sh := v.Args[0] 26133 if sh.Op != OpAMD64SHLQconst { 26134 break 26135 } 26136 if sh.AuxInt != 16 { 26137 break 26138 } 26139 x1 := sh.Args[0] 26140 if x1.Op != OpAMD64MOVWloadidx1 { 26141 break 26142 } 26143 i1 := x1.AuxInt 26144 s := x1.Aux 26145 _ = x1.Args[2] 26146 p := x1.Args[0] 26147 idx := x1.Args[1] 26148 mem := x1.Args[2] 26149 x0 := v.Args[1] 26150 if x0.Op != OpAMD64MOVWloadidx1 { 26151 break 26152 } 26153 i0 := x0.AuxInt 26154 if x0.Aux != s { 26155 break 26156 } 26157 _ = x0.Args[2] 26158 if p != x0.Args[0] { 26159 break 26160 } 26161 if idx != x0.Args[1] { 26162 break 26163 } 26164 if mem != x0.Args[2] { 26165 break 26166 } 26167 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26168 break 26169 } 26170 b = mergePoint(b, x0, x1) 26171 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26172 v.reset(OpCopy) 26173 v.AddArg(v0) 26174 v0.AuxInt = i0 26175 v0.Aux = s 26176 v0.AddArg(p) 26177 v0.AddArg(idx) 26178 v0.AddArg(mem) 26179 return true 26180 } 26181 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 26182 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26183 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26184 for { 26185 _ = v.Args[1] 26186 sh := v.Args[0] 26187 if sh.Op != OpAMD64SHLQconst { 26188 break 26189 } 26190 if sh.AuxInt != 16 { 26191 break 26192 } 26193 x1 := sh.Args[0] 26194 if x1.Op != OpAMD64MOVWloadidx1 { 26195 break 26196 } 26197 i1 := x1.AuxInt 26198 s := x1.Aux 26199 _ = x1.Args[2] 26200 idx := x1.Args[0] 26201 p := x1.Args[1] 26202 mem := x1.Args[2] 26203 x0 := v.Args[1] 26204 if x0.Op != OpAMD64MOVWloadidx1 { 26205 break 26206 } 26207 i0 := x0.AuxInt 26208 if x0.Aux != s { 26209 break 26210 } 26211 _ = x0.Args[2] 26212 if p != x0.Args[0] { 26213 break 26214 } 26215 if idx != x0.Args[1] { 26216 break 26217 } 26218 if mem != x0.Args[2] { 26219 break 26220 } 26221 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26222 break 26223 } 26224 b = mergePoint(b, x0, x1) 26225 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26226 v.reset(OpCopy) 26227 v.AddArg(v0) 26228 v0.AuxInt = i0 26229 v0.Aux = s 26230 v0.AddArg(p) 26231 v0.AddArg(idx) 26232 v0.AddArg(mem) 26233 return true 26234 } 26235 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 26236 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26237 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26238 for { 26239 _ = v.Args[1] 26240 sh := v.Args[0] 26241 if sh.Op != OpAMD64SHLQconst { 26242 break 26243 } 26244 if sh.AuxInt != 16 { 26245 break 26246 } 26247 x1 := sh.Args[0] 26248 if x1.Op != OpAMD64MOVWloadidx1 { 26249 break 26250 } 26251 i1 := x1.AuxInt 26252 s := x1.Aux 26253 _ = x1.Args[2] 26254 p := x1.Args[0] 26255 idx := x1.Args[1] 26256 mem := x1.Args[2] 26257 x0 := v.Args[1] 26258 if x0.Op != OpAMD64MOVWloadidx1 { 26259 break 26260 } 26261 i0 := x0.AuxInt 26262 if x0.Aux != s { 26263 break 26264 } 26265 _ = x0.Args[2] 26266 if idx != x0.Args[0] { 26267 break 26268 } 26269 if p != x0.Args[1] { 26270 break 26271 } 26272 if mem != x0.Args[2] { 26273 break 26274 } 26275 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26276 break 26277 } 26278 b = mergePoint(b, x0, x1) 26279 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26280 v.reset(OpCopy) 26281 v.AddArg(v0) 26282 v0.AuxInt = i0 26283 v0.Aux = s 26284 v0.AddArg(p) 26285 v0.AddArg(idx) 26286 v0.AddArg(mem) 26287 return true 26288 } 26289 return false 26290 } 26291 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 26292 b := v.Block 26293 _ = b 26294 typ := &b.Func.Config.Types 26295 _ = typ 26296 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 26297 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26298 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26299 for { 26300 _ = v.Args[1] 26301 sh := v.Args[0] 26302 if sh.Op != OpAMD64SHLQconst { 26303 break 26304 } 26305 if sh.AuxInt != 16 { 26306 break 26307 } 26308 x1 := sh.Args[0] 26309 if x1.Op != OpAMD64MOVWloadidx1 { 26310 break 26311 } 26312 i1 := x1.AuxInt 26313 s := x1.Aux 26314 _ = x1.Args[2] 26315 idx := x1.Args[0] 26316 p := x1.Args[1] 26317 mem := x1.Args[2] 26318 x0 := v.Args[1] 26319 if x0.Op != OpAMD64MOVWloadidx1 { 26320 break 26321 } 26322 i0 := x0.AuxInt 26323 if x0.Aux != s { 26324 break 26325 } 26326 _ = x0.Args[2] 26327 if idx != x0.Args[0] { 26328 break 26329 } 26330 if p != x0.Args[1] { 26331 break 26332 } 26333 if mem != x0.Args[2] { 26334 break 26335 } 26336 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26337 break 26338 } 26339 b = mergePoint(b, x0, x1) 26340 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26341 v.reset(OpCopy) 26342 v.AddArg(v0) 26343 v0.AuxInt = i0 26344 v0.Aux = s 26345 v0.AddArg(p) 26346 v0.AddArg(idx) 26347 v0.AddArg(mem) 26348 return true 26349 } 26350 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 26351 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26352 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26353 for { 26354 _ = v.Args[1] 26355 x0 := v.Args[0] 26356 if x0.Op != OpAMD64MOVLloadidx1 { 26357 break 26358 } 26359 i0 := x0.AuxInt 26360 s := x0.Aux 26361 _ = x0.Args[2] 26362 p := x0.Args[0] 26363 idx := x0.Args[1] 26364 mem := x0.Args[2] 26365 sh := v.Args[1] 26366 if sh.Op != OpAMD64SHLQconst { 26367 break 26368 } 26369 if sh.AuxInt != 32 { 26370 break 26371 } 26372 x1 := sh.Args[0] 26373 if x1.Op != OpAMD64MOVLloadidx1 { 26374 break 26375 } 26376 i1 := x1.AuxInt 26377 if x1.Aux != s { 26378 break 26379 } 26380 _ = x1.Args[2] 26381 if p != x1.Args[0] { 26382 break 26383 } 26384 if idx != x1.Args[1] { 26385 break 26386 } 26387 if mem != x1.Args[2] { 26388 break 26389 } 26390 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26391 break 26392 } 26393 b = mergePoint(b, x0, x1) 26394 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26395 v.reset(OpCopy) 26396 v.AddArg(v0) 26397 v0.AuxInt = i0 26398 v0.Aux = s 26399 v0.AddArg(p) 26400 v0.AddArg(idx) 26401 v0.AddArg(mem) 26402 return true 26403 } 26404 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 26405 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26406 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26407 for { 26408 _ = v.Args[1] 26409 x0 := v.Args[0] 26410 if x0.Op != OpAMD64MOVLloadidx1 { 26411 break 26412 } 26413 i0 := x0.AuxInt 26414 s := x0.Aux 26415 _ = x0.Args[2] 26416 idx := x0.Args[0] 26417 p := x0.Args[1] 26418 mem := x0.Args[2] 26419 sh := v.Args[1] 26420 if sh.Op != OpAMD64SHLQconst { 26421 break 26422 } 26423 if sh.AuxInt != 32 { 26424 break 26425 } 26426 x1 := sh.Args[0] 26427 if x1.Op != OpAMD64MOVLloadidx1 { 26428 break 26429 } 26430 i1 := x1.AuxInt 26431 if x1.Aux != s { 26432 break 26433 } 26434 _ = x1.Args[2] 26435 if p != x1.Args[0] { 26436 break 26437 } 26438 if idx != x1.Args[1] { 26439 break 26440 } 26441 if mem != x1.Args[2] { 26442 break 26443 } 26444 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26445 break 26446 } 26447 b = mergePoint(b, x0, x1) 26448 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26449 v.reset(OpCopy) 26450 v.AddArg(v0) 26451 v0.AuxInt = i0 26452 v0.Aux = s 26453 v0.AddArg(p) 26454 v0.AddArg(idx) 26455 v0.AddArg(mem) 26456 return true 26457 } 26458 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 26459 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26460 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26461 for { 26462 _ = v.Args[1] 26463 x0 := v.Args[0] 26464 if x0.Op != OpAMD64MOVLloadidx1 { 26465 break 26466 } 26467 i0 := x0.AuxInt 26468 s := x0.Aux 26469 _ = x0.Args[2] 26470 p := x0.Args[0] 26471 idx := x0.Args[1] 26472 mem := x0.Args[2] 26473 sh := v.Args[1] 26474 if sh.Op != OpAMD64SHLQconst { 26475 break 26476 } 26477 if sh.AuxInt != 32 { 26478 break 26479 } 26480 x1 := sh.Args[0] 26481 if x1.Op != OpAMD64MOVLloadidx1 { 26482 break 26483 } 26484 i1 := x1.AuxInt 26485 if x1.Aux != s { 26486 break 26487 } 26488 _ = x1.Args[2] 26489 if idx != x1.Args[0] { 26490 break 26491 } 26492 if p != x1.Args[1] { 26493 break 26494 } 26495 if mem != x1.Args[2] { 26496 break 26497 } 26498 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26499 break 26500 } 26501 b = mergePoint(b, x0, x1) 26502 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26503 v.reset(OpCopy) 26504 v.AddArg(v0) 26505 v0.AuxInt = i0 26506 v0.Aux = s 26507 v0.AddArg(p) 26508 v0.AddArg(idx) 26509 v0.AddArg(mem) 26510 return true 26511 } 26512 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 26513 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26514 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26515 for { 26516 _ = v.Args[1] 26517 x0 := v.Args[0] 26518 if x0.Op != OpAMD64MOVLloadidx1 { 26519 break 26520 } 26521 i0 := x0.AuxInt 26522 s := x0.Aux 26523 _ = x0.Args[2] 26524 idx := x0.Args[0] 26525 p := x0.Args[1] 26526 mem := x0.Args[2] 26527 sh := v.Args[1] 26528 if sh.Op != OpAMD64SHLQconst { 26529 break 26530 } 26531 if sh.AuxInt != 32 { 26532 break 26533 } 26534 x1 := sh.Args[0] 26535 if x1.Op != OpAMD64MOVLloadidx1 { 26536 break 26537 } 26538 i1 := x1.AuxInt 26539 if x1.Aux != s { 26540 break 26541 } 26542 _ = x1.Args[2] 26543 if idx != x1.Args[0] { 26544 break 26545 } 26546 if p != x1.Args[1] { 26547 break 26548 } 26549 if mem != x1.Args[2] { 26550 break 26551 } 26552 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26553 break 26554 } 26555 b = mergePoint(b, x0, x1) 26556 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26557 v.reset(OpCopy) 26558 v.AddArg(v0) 26559 v0.AuxInt = i0 26560 v0.Aux = s 26561 v0.AddArg(p) 26562 v0.AddArg(idx) 26563 v0.AddArg(mem) 26564 return true 26565 } 26566 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 26567 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26568 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26569 for { 26570 _ = v.Args[1] 26571 sh := v.Args[0] 26572 if sh.Op != OpAMD64SHLQconst { 26573 break 26574 } 26575 if sh.AuxInt != 32 { 26576 break 26577 } 26578 x1 := sh.Args[0] 26579 if x1.Op != OpAMD64MOVLloadidx1 { 26580 break 26581 } 26582 i1 := x1.AuxInt 26583 s := x1.Aux 26584 _ = x1.Args[2] 26585 p := x1.Args[0] 26586 idx := x1.Args[1] 26587 mem := x1.Args[2] 26588 x0 := v.Args[1] 26589 if x0.Op != OpAMD64MOVLloadidx1 { 26590 break 26591 } 26592 i0 := x0.AuxInt 26593 if x0.Aux != s { 26594 break 26595 } 26596 _ = x0.Args[2] 26597 if p != x0.Args[0] { 26598 break 26599 } 26600 if idx != x0.Args[1] { 26601 break 26602 } 26603 if mem != x0.Args[2] { 26604 break 26605 } 26606 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26607 break 26608 } 26609 b = mergePoint(b, x0, x1) 26610 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26611 v.reset(OpCopy) 26612 v.AddArg(v0) 26613 v0.AuxInt = i0 26614 v0.Aux = s 26615 v0.AddArg(p) 26616 v0.AddArg(idx) 26617 v0.AddArg(mem) 26618 return true 26619 } 26620 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 26621 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26622 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26623 for { 26624 _ = v.Args[1] 26625 sh := v.Args[0] 26626 if sh.Op != OpAMD64SHLQconst { 26627 break 26628 } 26629 if sh.AuxInt != 32 { 26630 break 26631 } 26632 x1 := sh.Args[0] 26633 if x1.Op != OpAMD64MOVLloadidx1 { 26634 break 26635 } 26636 i1 := x1.AuxInt 26637 s := x1.Aux 26638 _ = x1.Args[2] 26639 idx := x1.Args[0] 26640 p := x1.Args[1] 26641 mem := x1.Args[2] 26642 x0 := v.Args[1] 26643 if x0.Op != OpAMD64MOVLloadidx1 { 26644 break 26645 } 26646 i0 := x0.AuxInt 26647 if x0.Aux != s { 26648 break 26649 } 26650 _ = x0.Args[2] 26651 if p != x0.Args[0] { 26652 break 26653 } 26654 if idx != x0.Args[1] { 26655 break 26656 } 26657 if mem != x0.Args[2] { 26658 break 26659 } 26660 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26661 break 26662 } 26663 b = mergePoint(b, x0, x1) 26664 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26665 v.reset(OpCopy) 26666 v.AddArg(v0) 26667 v0.AuxInt = i0 26668 v0.Aux = s 26669 v0.AddArg(p) 26670 v0.AddArg(idx) 26671 v0.AddArg(mem) 26672 return true 26673 } 26674 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 26675 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26676 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26677 for { 26678 _ = v.Args[1] 26679 sh := v.Args[0] 26680 if sh.Op != OpAMD64SHLQconst { 26681 break 26682 } 26683 if sh.AuxInt != 32 { 26684 break 26685 } 26686 x1 := sh.Args[0] 26687 if x1.Op != OpAMD64MOVLloadidx1 { 26688 break 26689 } 26690 i1 := x1.AuxInt 26691 s := x1.Aux 26692 _ = x1.Args[2] 26693 p := x1.Args[0] 26694 idx := x1.Args[1] 26695 mem := x1.Args[2] 26696 x0 := v.Args[1] 26697 if x0.Op != OpAMD64MOVLloadidx1 { 26698 break 26699 } 26700 i0 := x0.AuxInt 26701 if x0.Aux != s { 26702 break 26703 } 26704 _ = x0.Args[2] 26705 if idx != x0.Args[0] { 26706 break 26707 } 26708 if p != x0.Args[1] { 26709 break 26710 } 26711 if mem != x0.Args[2] { 26712 break 26713 } 26714 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26715 break 26716 } 26717 b = mergePoint(b, x0, x1) 26718 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26719 v.reset(OpCopy) 26720 v.AddArg(v0) 26721 v0.AuxInt = i0 26722 v0.Aux = s 26723 v0.AddArg(p) 26724 v0.AddArg(idx) 26725 v0.AddArg(mem) 26726 return true 26727 } 26728 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 26729 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26730 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26731 for { 26732 _ = v.Args[1] 26733 sh := v.Args[0] 26734 if sh.Op != OpAMD64SHLQconst { 26735 break 26736 } 26737 if sh.AuxInt != 32 { 26738 break 26739 } 26740 x1 := sh.Args[0] 26741 if x1.Op != OpAMD64MOVLloadidx1 { 26742 break 26743 } 26744 i1 := x1.AuxInt 26745 s := x1.Aux 26746 _ = x1.Args[2] 26747 idx := x1.Args[0] 26748 p := x1.Args[1] 26749 mem := x1.Args[2] 26750 x0 := v.Args[1] 26751 if x0.Op != OpAMD64MOVLloadidx1 { 26752 break 26753 } 26754 i0 := x0.AuxInt 26755 if x0.Aux != s { 26756 break 26757 } 26758 _ = x0.Args[2] 26759 if idx != x0.Args[0] { 26760 break 26761 } 26762 if p != x0.Args[1] { 26763 break 26764 } 26765 if mem != x0.Args[2] { 26766 break 26767 } 26768 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26769 break 26770 } 26771 b = mergePoint(b, x0, x1) 26772 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26773 v.reset(OpCopy) 26774 v.AddArg(v0) 26775 v0.AuxInt = i0 26776 v0.Aux = s 26777 v0.AddArg(p) 26778 v0.AddArg(idx) 26779 v0.AddArg(mem) 26780 return true 26781 } 26782 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 26783 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26784 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26785 for { 26786 _ = v.Args[1] 26787 s1 := v.Args[0] 26788 if s1.Op != OpAMD64SHLQconst { 26789 break 26790 } 26791 j1 := s1.AuxInt 26792 x1 := s1.Args[0] 26793 if x1.Op != OpAMD64MOVBloadidx1 { 26794 break 26795 } 26796 i1 := x1.AuxInt 26797 s := x1.Aux 26798 _ = x1.Args[2] 26799 p := x1.Args[0] 26800 idx := x1.Args[1] 26801 mem := x1.Args[2] 26802 or := v.Args[1] 26803 if or.Op != OpAMD64ORQ { 26804 break 26805 } 26806 _ = or.Args[1] 26807 s0 := or.Args[0] 26808 if s0.Op != OpAMD64SHLQconst { 26809 break 26810 } 26811 j0 := s0.AuxInt 26812 x0 := s0.Args[0] 26813 if x0.Op != OpAMD64MOVBloadidx1 { 26814 break 26815 } 26816 i0 := x0.AuxInt 26817 if x0.Aux != s { 26818 break 26819 } 26820 _ = x0.Args[2] 26821 if p != x0.Args[0] { 26822 break 26823 } 26824 if idx != x0.Args[1] { 26825 break 26826 } 26827 if mem != x0.Args[2] { 26828 break 26829 } 26830 y := or.Args[1] 26831 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26832 break 26833 } 26834 b = mergePoint(b, x0, x1) 26835 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26836 v.reset(OpCopy) 26837 v.AddArg(v0) 26838 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26839 v1.AuxInt = j0 26840 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26841 v2.AuxInt = i0 26842 v2.Aux = s 26843 v2.AddArg(p) 26844 v2.AddArg(idx) 26845 v2.AddArg(mem) 26846 v1.AddArg(v2) 26847 v0.AddArg(v1) 26848 v0.AddArg(y) 26849 return true 26850 } 26851 return false 26852 } 26853 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 26854 b := v.Block 26855 _ = b 26856 typ := &b.Func.Config.Types 26857 _ = typ 26858 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 26859 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26860 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26861 for { 26862 _ = v.Args[1] 26863 s1 := v.Args[0] 26864 if s1.Op != OpAMD64SHLQconst { 26865 break 26866 } 26867 j1 := s1.AuxInt 26868 x1 := s1.Args[0] 26869 if x1.Op != OpAMD64MOVBloadidx1 { 26870 break 26871 } 26872 i1 := x1.AuxInt 26873 s := x1.Aux 26874 _ = x1.Args[2] 26875 idx := x1.Args[0] 26876 p := x1.Args[1] 26877 mem := x1.Args[2] 26878 or := v.Args[1] 26879 if or.Op != OpAMD64ORQ { 26880 break 26881 } 26882 _ = or.Args[1] 26883 s0 := or.Args[0] 26884 if s0.Op != OpAMD64SHLQconst { 26885 break 26886 } 26887 j0 := s0.AuxInt 26888 x0 := s0.Args[0] 26889 if x0.Op != OpAMD64MOVBloadidx1 { 26890 break 26891 } 26892 i0 := x0.AuxInt 26893 if x0.Aux != s { 26894 break 26895 } 26896 _ = x0.Args[2] 26897 if p != x0.Args[0] { 26898 break 26899 } 26900 if idx != x0.Args[1] { 26901 break 26902 } 26903 if mem != x0.Args[2] { 26904 break 26905 } 26906 y := or.Args[1] 26907 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26908 break 26909 } 26910 b = mergePoint(b, x0, x1) 26911 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26912 v.reset(OpCopy) 26913 v.AddArg(v0) 26914 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26915 v1.AuxInt = j0 26916 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26917 v2.AuxInt = i0 26918 v2.Aux = s 26919 v2.AddArg(p) 26920 v2.AddArg(idx) 26921 v2.AddArg(mem) 26922 v1.AddArg(v2) 26923 v0.AddArg(v1) 26924 v0.AddArg(y) 26925 return true 26926 } 26927 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 26928 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26929 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26930 for { 26931 _ = v.Args[1] 26932 s1 := v.Args[0] 26933 if s1.Op != OpAMD64SHLQconst { 26934 break 26935 } 26936 j1 := s1.AuxInt 26937 x1 := s1.Args[0] 26938 if x1.Op != OpAMD64MOVBloadidx1 { 26939 break 26940 } 26941 i1 := x1.AuxInt 26942 s := x1.Aux 26943 _ = x1.Args[2] 26944 p := x1.Args[0] 26945 idx := x1.Args[1] 26946 mem := x1.Args[2] 26947 or := v.Args[1] 26948 if or.Op != OpAMD64ORQ { 26949 break 26950 } 26951 _ = or.Args[1] 26952 s0 := or.Args[0] 26953 if s0.Op != OpAMD64SHLQconst { 26954 break 26955 } 26956 j0 := s0.AuxInt 26957 x0 := s0.Args[0] 26958 if x0.Op != OpAMD64MOVBloadidx1 { 26959 break 26960 } 26961 i0 := x0.AuxInt 26962 if x0.Aux != s { 26963 break 26964 } 26965 _ = x0.Args[2] 26966 if idx != x0.Args[0] { 26967 break 26968 } 26969 if p != x0.Args[1] { 26970 break 26971 } 26972 if mem != x0.Args[2] { 26973 break 26974 } 26975 y := or.Args[1] 26976 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26977 break 26978 } 26979 b = mergePoint(b, x0, x1) 26980 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26981 v.reset(OpCopy) 26982 v.AddArg(v0) 26983 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26984 v1.AuxInt = j0 26985 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26986 v2.AuxInt = i0 26987 v2.Aux = s 26988 v2.AddArg(p) 26989 v2.AddArg(idx) 26990 v2.AddArg(mem) 26991 v1.AddArg(v2) 26992 v0.AddArg(v1) 26993 v0.AddArg(y) 26994 return true 26995 } 26996 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 26997 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26998 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26999 for { 27000 _ = v.Args[1] 27001 s1 := v.Args[0] 27002 if s1.Op != OpAMD64SHLQconst { 27003 break 27004 } 27005 j1 := s1.AuxInt 27006 x1 := s1.Args[0] 27007 if x1.Op != OpAMD64MOVBloadidx1 { 27008 break 27009 } 27010 i1 := x1.AuxInt 27011 s := x1.Aux 27012 _ = x1.Args[2] 27013 idx := x1.Args[0] 27014 p := x1.Args[1] 27015 mem := x1.Args[2] 27016 or := v.Args[1] 27017 if or.Op != OpAMD64ORQ { 27018 break 27019 } 27020 _ = or.Args[1] 27021 s0 := or.Args[0] 27022 if s0.Op != OpAMD64SHLQconst { 27023 break 27024 } 27025 j0 := s0.AuxInt 27026 x0 := s0.Args[0] 27027 if x0.Op != OpAMD64MOVBloadidx1 { 27028 break 27029 } 27030 i0 := x0.AuxInt 27031 if x0.Aux != s { 27032 break 27033 } 27034 _ = x0.Args[2] 27035 if idx != x0.Args[0] { 27036 break 27037 } 27038 if p != x0.Args[1] { 27039 break 27040 } 27041 if mem != x0.Args[2] { 27042 break 27043 } 27044 y := or.Args[1] 27045 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27046 break 27047 } 27048 b = mergePoint(b, x0, x1) 27049 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27050 v.reset(OpCopy) 27051 v.AddArg(v0) 27052 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27053 v1.AuxInt = j0 27054 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27055 v2.AuxInt = i0 27056 v2.Aux = s 27057 v2.AddArg(p) 27058 v2.AddArg(idx) 27059 v2.AddArg(mem) 27060 v1.AddArg(v2) 27061 v0.AddArg(v1) 27062 v0.AddArg(y) 27063 return true 27064 } 27065 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 27066 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27067 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27068 for { 27069 _ = v.Args[1] 27070 s1 := v.Args[0] 27071 if s1.Op != OpAMD64SHLQconst { 27072 break 27073 } 27074 j1 := s1.AuxInt 27075 x1 := s1.Args[0] 27076 if x1.Op != OpAMD64MOVBloadidx1 { 27077 break 27078 } 27079 i1 := x1.AuxInt 27080 s := x1.Aux 27081 _ = x1.Args[2] 27082 p := x1.Args[0] 27083 idx := x1.Args[1] 27084 mem := x1.Args[2] 27085 or := v.Args[1] 27086 if or.Op != OpAMD64ORQ { 27087 break 27088 } 27089 _ = or.Args[1] 27090 y := or.Args[0] 27091 s0 := or.Args[1] 27092 if s0.Op != OpAMD64SHLQconst { 27093 break 27094 } 27095 j0 := s0.AuxInt 27096 x0 := s0.Args[0] 27097 if x0.Op != OpAMD64MOVBloadidx1 { 27098 break 27099 } 27100 i0 := x0.AuxInt 27101 if x0.Aux != s { 27102 break 27103 } 27104 _ = x0.Args[2] 27105 if p != x0.Args[0] { 27106 break 27107 } 27108 if idx != x0.Args[1] { 27109 break 27110 } 27111 if mem != x0.Args[2] { 27112 break 27113 } 27114 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27115 break 27116 } 27117 b = mergePoint(b, x0, x1) 27118 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27119 v.reset(OpCopy) 27120 v.AddArg(v0) 27121 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27122 v1.AuxInt = j0 27123 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27124 v2.AuxInt = i0 27125 v2.Aux = s 27126 v2.AddArg(p) 27127 v2.AddArg(idx) 27128 v2.AddArg(mem) 27129 v1.AddArg(v2) 27130 v0.AddArg(v1) 27131 v0.AddArg(y) 27132 return true 27133 } 27134 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 27135 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27136 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27137 for { 27138 _ = v.Args[1] 27139 s1 := v.Args[0] 27140 if s1.Op != OpAMD64SHLQconst { 27141 break 27142 } 27143 j1 := s1.AuxInt 27144 x1 := s1.Args[0] 27145 if x1.Op != OpAMD64MOVBloadidx1 { 27146 break 27147 } 27148 i1 := x1.AuxInt 27149 s := x1.Aux 27150 _ = x1.Args[2] 27151 idx := x1.Args[0] 27152 p := x1.Args[1] 27153 mem := x1.Args[2] 27154 or := v.Args[1] 27155 if or.Op != OpAMD64ORQ { 27156 break 27157 } 27158 _ = or.Args[1] 27159 y := or.Args[0] 27160 s0 := or.Args[1] 27161 if s0.Op != OpAMD64SHLQconst { 27162 break 27163 } 27164 j0 := s0.AuxInt 27165 x0 := s0.Args[0] 27166 if x0.Op != OpAMD64MOVBloadidx1 { 27167 break 27168 } 27169 i0 := x0.AuxInt 27170 if x0.Aux != s { 27171 break 27172 } 27173 _ = x0.Args[2] 27174 if p != x0.Args[0] { 27175 break 27176 } 27177 if idx != x0.Args[1] { 27178 break 27179 } 27180 if mem != x0.Args[2] { 27181 break 27182 } 27183 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27184 break 27185 } 27186 b = mergePoint(b, x0, x1) 27187 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27188 v.reset(OpCopy) 27189 v.AddArg(v0) 27190 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27191 v1.AuxInt = j0 27192 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27193 v2.AuxInt = i0 27194 v2.Aux = s 27195 v2.AddArg(p) 27196 v2.AddArg(idx) 27197 v2.AddArg(mem) 27198 v1.AddArg(v2) 27199 v0.AddArg(v1) 27200 v0.AddArg(y) 27201 return true 27202 } 27203 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27204 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27205 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27206 for { 27207 _ = v.Args[1] 27208 s1 := v.Args[0] 27209 if s1.Op != OpAMD64SHLQconst { 27210 break 27211 } 27212 j1 := s1.AuxInt 27213 x1 := s1.Args[0] 27214 if x1.Op != OpAMD64MOVBloadidx1 { 27215 break 27216 } 27217 i1 := x1.AuxInt 27218 s := x1.Aux 27219 _ = x1.Args[2] 27220 p := x1.Args[0] 27221 idx := x1.Args[1] 27222 mem := x1.Args[2] 27223 or := v.Args[1] 27224 if or.Op != OpAMD64ORQ { 27225 break 27226 } 27227 _ = or.Args[1] 27228 y := or.Args[0] 27229 s0 := or.Args[1] 27230 if s0.Op != OpAMD64SHLQconst { 27231 break 27232 } 27233 j0 := s0.AuxInt 27234 x0 := s0.Args[0] 27235 if x0.Op != OpAMD64MOVBloadidx1 { 27236 break 27237 } 27238 i0 := x0.AuxInt 27239 if x0.Aux != s { 27240 break 27241 } 27242 _ = x0.Args[2] 27243 if idx != x0.Args[0] { 27244 break 27245 } 27246 if p != x0.Args[1] { 27247 break 27248 } 27249 if mem != x0.Args[2] { 27250 break 27251 } 27252 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27253 break 27254 } 27255 b = mergePoint(b, x0, x1) 27256 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27257 v.reset(OpCopy) 27258 v.AddArg(v0) 27259 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27260 v1.AuxInt = j0 27261 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27262 v2.AuxInt = i0 27263 v2.Aux = s 27264 v2.AddArg(p) 27265 v2.AddArg(idx) 27266 v2.AddArg(mem) 27267 v1.AddArg(v2) 27268 v0.AddArg(v1) 27269 v0.AddArg(y) 27270 return true 27271 } 27272 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27273 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27274 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27275 for { 27276 _ = v.Args[1] 27277 s1 := v.Args[0] 27278 if s1.Op != OpAMD64SHLQconst { 27279 break 27280 } 27281 j1 := s1.AuxInt 27282 x1 := s1.Args[0] 27283 if x1.Op != OpAMD64MOVBloadidx1 { 27284 break 27285 } 27286 i1 := x1.AuxInt 27287 s := x1.Aux 27288 _ = x1.Args[2] 27289 idx := x1.Args[0] 27290 p := x1.Args[1] 27291 mem := x1.Args[2] 27292 or := v.Args[1] 27293 if or.Op != OpAMD64ORQ { 27294 break 27295 } 27296 _ = or.Args[1] 27297 y := or.Args[0] 27298 s0 := or.Args[1] 27299 if s0.Op != OpAMD64SHLQconst { 27300 break 27301 } 27302 j0 := s0.AuxInt 27303 x0 := s0.Args[0] 27304 if x0.Op != OpAMD64MOVBloadidx1 { 27305 break 27306 } 27307 i0 := x0.AuxInt 27308 if x0.Aux != s { 27309 break 27310 } 27311 _ = x0.Args[2] 27312 if idx != x0.Args[0] { 27313 break 27314 } 27315 if p != x0.Args[1] { 27316 break 27317 } 27318 if mem != x0.Args[2] { 27319 break 27320 } 27321 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27322 break 27323 } 27324 b = mergePoint(b, x0, x1) 27325 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27326 v.reset(OpCopy) 27327 v.AddArg(v0) 27328 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27329 v1.AuxInt = j0 27330 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27331 v2.AuxInt = i0 27332 v2.Aux = s 27333 v2.AddArg(p) 27334 v2.AddArg(idx) 27335 v2.AddArg(mem) 27336 v1.AddArg(v2) 27337 v0.AddArg(v1) 27338 v0.AddArg(y) 27339 return true 27340 } 27341 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27342 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27343 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27344 for { 27345 _ = v.Args[1] 27346 or := v.Args[0] 27347 if or.Op != OpAMD64ORQ { 27348 break 27349 } 27350 _ = or.Args[1] 27351 s0 := or.Args[0] 27352 if s0.Op != OpAMD64SHLQconst { 27353 break 27354 } 27355 j0 := s0.AuxInt 27356 x0 := s0.Args[0] 27357 if x0.Op != OpAMD64MOVBloadidx1 { 27358 break 27359 } 27360 i0 := x0.AuxInt 27361 s := x0.Aux 27362 _ = x0.Args[2] 27363 p := x0.Args[0] 27364 idx := x0.Args[1] 27365 mem := x0.Args[2] 27366 y := or.Args[1] 27367 s1 := v.Args[1] 27368 if s1.Op != OpAMD64SHLQconst { 27369 break 27370 } 27371 j1 := s1.AuxInt 27372 x1 := s1.Args[0] 27373 if x1.Op != OpAMD64MOVBloadidx1 { 27374 break 27375 } 27376 i1 := x1.AuxInt 27377 if x1.Aux != s { 27378 break 27379 } 27380 _ = x1.Args[2] 27381 if p != x1.Args[0] { 27382 break 27383 } 27384 if idx != x1.Args[1] { 27385 break 27386 } 27387 if mem != x1.Args[2] { 27388 break 27389 } 27390 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27391 break 27392 } 27393 b = mergePoint(b, x0, x1) 27394 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27395 v.reset(OpCopy) 27396 v.AddArg(v0) 27397 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27398 v1.AuxInt = j0 27399 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27400 v2.AuxInt = i0 27401 v2.Aux = s 27402 v2.AddArg(p) 27403 v2.AddArg(idx) 27404 v2.AddArg(mem) 27405 v1.AddArg(v2) 27406 v0.AddArg(v1) 27407 v0.AddArg(y) 27408 return true 27409 } 27410 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27411 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27412 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27413 for { 27414 _ = v.Args[1] 27415 or := v.Args[0] 27416 if or.Op != OpAMD64ORQ { 27417 break 27418 } 27419 _ = or.Args[1] 27420 s0 := or.Args[0] 27421 if s0.Op != OpAMD64SHLQconst { 27422 break 27423 } 27424 j0 := s0.AuxInt 27425 x0 := s0.Args[0] 27426 if x0.Op != OpAMD64MOVBloadidx1 { 27427 break 27428 } 27429 i0 := x0.AuxInt 27430 s := x0.Aux 27431 _ = x0.Args[2] 27432 idx := x0.Args[0] 27433 p := x0.Args[1] 27434 mem := x0.Args[2] 27435 y := or.Args[1] 27436 s1 := v.Args[1] 27437 if s1.Op != OpAMD64SHLQconst { 27438 break 27439 } 27440 j1 := s1.AuxInt 27441 x1 := s1.Args[0] 27442 if x1.Op != OpAMD64MOVBloadidx1 { 27443 break 27444 } 27445 i1 := x1.AuxInt 27446 if x1.Aux != s { 27447 break 27448 } 27449 _ = x1.Args[2] 27450 if p != x1.Args[0] { 27451 break 27452 } 27453 if idx != x1.Args[1] { 27454 break 27455 } 27456 if mem != x1.Args[2] { 27457 break 27458 } 27459 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27460 break 27461 } 27462 b = mergePoint(b, x0, x1) 27463 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27464 v.reset(OpCopy) 27465 v.AddArg(v0) 27466 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27467 v1.AuxInt = j0 27468 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27469 v2.AuxInt = i0 27470 v2.Aux = s 27471 v2.AddArg(p) 27472 v2.AddArg(idx) 27473 v2.AddArg(mem) 27474 v1.AddArg(v2) 27475 v0.AddArg(v1) 27476 v0.AddArg(y) 27477 return true 27478 } 27479 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27480 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27481 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27482 for { 27483 _ = v.Args[1] 27484 or := v.Args[0] 27485 if or.Op != OpAMD64ORQ { 27486 break 27487 } 27488 _ = or.Args[1] 27489 y := or.Args[0] 27490 s0 := or.Args[1] 27491 if s0.Op != OpAMD64SHLQconst { 27492 break 27493 } 27494 j0 := s0.AuxInt 27495 x0 := s0.Args[0] 27496 if x0.Op != OpAMD64MOVBloadidx1 { 27497 break 27498 } 27499 i0 := x0.AuxInt 27500 s := x0.Aux 27501 _ = x0.Args[2] 27502 p := x0.Args[0] 27503 idx := x0.Args[1] 27504 mem := x0.Args[2] 27505 s1 := v.Args[1] 27506 if s1.Op != OpAMD64SHLQconst { 27507 break 27508 } 27509 j1 := s1.AuxInt 27510 x1 := s1.Args[0] 27511 if x1.Op != OpAMD64MOVBloadidx1 { 27512 break 27513 } 27514 i1 := x1.AuxInt 27515 if x1.Aux != s { 27516 break 27517 } 27518 _ = x1.Args[2] 27519 if p != x1.Args[0] { 27520 break 27521 } 27522 if idx != x1.Args[1] { 27523 break 27524 } 27525 if mem != x1.Args[2] { 27526 break 27527 } 27528 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27529 break 27530 } 27531 b = mergePoint(b, x0, x1) 27532 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27533 v.reset(OpCopy) 27534 v.AddArg(v0) 27535 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27536 v1.AuxInt = j0 27537 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27538 v2.AuxInt = i0 27539 v2.Aux = s 27540 v2.AddArg(p) 27541 v2.AddArg(idx) 27542 v2.AddArg(mem) 27543 v1.AddArg(v2) 27544 v0.AddArg(v1) 27545 v0.AddArg(y) 27546 return true 27547 } 27548 return false 27549 } 27550 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 27551 b := v.Block 27552 _ = b 27553 typ := &b.Func.Config.Types 27554 _ = typ 27555 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27556 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27557 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27558 for { 27559 _ = v.Args[1] 27560 or := v.Args[0] 27561 if or.Op != OpAMD64ORQ { 27562 break 27563 } 27564 _ = or.Args[1] 27565 y := or.Args[0] 27566 s0 := or.Args[1] 27567 if s0.Op != OpAMD64SHLQconst { 27568 break 27569 } 27570 j0 := s0.AuxInt 27571 x0 := s0.Args[0] 27572 if x0.Op != OpAMD64MOVBloadidx1 { 27573 break 27574 } 27575 i0 := x0.AuxInt 27576 s := x0.Aux 27577 _ = x0.Args[2] 27578 idx := x0.Args[0] 27579 p := x0.Args[1] 27580 mem := x0.Args[2] 27581 s1 := v.Args[1] 27582 if s1.Op != OpAMD64SHLQconst { 27583 break 27584 } 27585 j1 := s1.AuxInt 27586 x1 := s1.Args[0] 27587 if x1.Op != OpAMD64MOVBloadidx1 { 27588 break 27589 } 27590 i1 := x1.AuxInt 27591 if x1.Aux != s { 27592 break 27593 } 27594 _ = x1.Args[2] 27595 if p != x1.Args[0] { 27596 break 27597 } 27598 if idx != x1.Args[1] { 27599 break 27600 } 27601 if mem != x1.Args[2] { 27602 break 27603 } 27604 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27605 break 27606 } 27607 b = mergePoint(b, x0, x1) 27608 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27609 v.reset(OpCopy) 27610 v.AddArg(v0) 27611 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27612 v1.AuxInt = j0 27613 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27614 v2.AuxInt = i0 27615 v2.Aux = s 27616 v2.AddArg(p) 27617 v2.AddArg(idx) 27618 v2.AddArg(mem) 27619 v1.AddArg(v2) 27620 v0.AddArg(v1) 27621 v0.AddArg(y) 27622 return true 27623 } 27624 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27625 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27626 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27627 for { 27628 _ = v.Args[1] 27629 or := v.Args[0] 27630 if or.Op != OpAMD64ORQ { 27631 break 27632 } 27633 _ = or.Args[1] 27634 s0 := or.Args[0] 27635 if s0.Op != OpAMD64SHLQconst { 27636 break 27637 } 27638 j0 := s0.AuxInt 27639 x0 := s0.Args[0] 27640 if x0.Op != OpAMD64MOVBloadidx1 { 27641 break 27642 } 27643 i0 := x0.AuxInt 27644 s := x0.Aux 27645 _ = x0.Args[2] 27646 p := x0.Args[0] 27647 idx := x0.Args[1] 27648 mem := x0.Args[2] 27649 y := or.Args[1] 27650 s1 := v.Args[1] 27651 if s1.Op != OpAMD64SHLQconst { 27652 break 27653 } 27654 j1 := s1.AuxInt 27655 x1 := s1.Args[0] 27656 if x1.Op != OpAMD64MOVBloadidx1 { 27657 break 27658 } 27659 i1 := x1.AuxInt 27660 if x1.Aux != s { 27661 break 27662 } 27663 _ = x1.Args[2] 27664 if idx != x1.Args[0] { 27665 break 27666 } 27667 if p != x1.Args[1] { 27668 break 27669 } 27670 if mem != x1.Args[2] { 27671 break 27672 } 27673 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27674 break 27675 } 27676 b = mergePoint(b, x0, x1) 27677 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27678 v.reset(OpCopy) 27679 v.AddArg(v0) 27680 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27681 v1.AuxInt = j0 27682 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27683 v2.AuxInt = i0 27684 v2.Aux = s 27685 v2.AddArg(p) 27686 v2.AddArg(idx) 27687 v2.AddArg(mem) 27688 v1.AddArg(v2) 27689 v0.AddArg(v1) 27690 v0.AddArg(y) 27691 return true 27692 } 27693 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27694 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27695 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27696 for { 27697 _ = v.Args[1] 27698 or := v.Args[0] 27699 if or.Op != OpAMD64ORQ { 27700 break 27701 } 27702 _ = or.Args[1] 27703 s0 := or.Args[0] 27704 if s0.Op != OpAMD64SHLQconst { 27705 break 27706 } 27707 j0 := s0.AuxInt 27708 x0 := s0.Args[0] 27709 if x0.Op != OpAMD64MOVBloadidx1 { 27710 break 27711 } 27712 i0 := x0.AuxInt 27713 s := x0.Aux 27714 _ = x0.Args[2] 27715 idx := x0.Args[0] 27716 p := x0.Args[1] 27717 mem := x0.Args[2] 27718 y := or.Args[1] 27719 s1 := v.Args[1] 27720 if s1.Op != OpAMD64SHLQconst { 27721 break 27722 } 27723 j1 := s1.AuxInt 27724 x1 := s1.Args[0] 27725 if x1.Op != OpAMD64MOVBloadidx1 { 27726 break 27727 } 27728 i1 := x1.AuxInt 27729 if x1.Aux != s { 27730 break 27731 } 27732 _ = x1.Args[2] 27733 if idx != x1.Args[0] { 27734 break 27735 } 27736 if p != x1.Args[1] { 27737 break 27738 } 27739 if mem != x1.Args[2] { 27740 break 27741 } 27742 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27743 break 27744 } 27745 b = mergePoint(b, x0, x1) 27746 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27747 v.reset(OpCopy) 27748 v.AddArg(v0) 27749 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27750 v1.AuxInt = j0 27751 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27752 v2.AuxInt = i0 27753 v2.Aux = s 27754 v2.AddArg(p) 27755 v2.AddArg(idx) 27756 v2.AddArg(mem) 27757 v1.AddArg(v2) 27758 v0.AddArg(v1) 27759 v0.AddArg(y) 27760 return true 27761 } 27762 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27763 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27764 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27765 for { 27766 _ = v.Args[1] 27767 or := v.Args[0] 27768 if or.Op != OpAMD64ORQ { 27769 break 27770 } 27771 _ = or.Args[1] 27772 y := or.Args[0] 27773 s0 := or.Args[1] 27774 if s0.Op != OpAMD64SHLQconst { 27775 break 27776 } 27777 j0 := s0.AuxInt 27778 x0 := s0.Args[0] 27779 if x0.Op != OpAMD64MOVBloadidx1 { 27780 break 27781 } 27782 i0 := x0.AuxInt 27783 s := x0.Aux 27784 _ = x0.Args[2] 27785 p := x0.Args[0] 27786 idx := x0.Args[1] 27787 mem := x0.Args[2] 27788 s1 := v.Args[1] 27789 if s1.Op != OpAMD64SHLQconst { 27790 break 27791 } 27792 j1 := s1.AuxInt 27793 x1 := s1.Args[0] 27794 if x1.Op != OpAMD64MOVBloadidx1 { 27795 break 27796 } 27797 i1 := x1.AuxInt 27798 if x1.Aux != s { 27799 break 27800 } 27801 _ = x1.Args[2] 27802 if idx != x1.Args[0] { 27803 break 27804 } 27805 if p != x1.Args[1] { 27806 break 27807 } 27808 if mem != x1.Args[2] { 27809 break 27810 } 27811 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27812 break 27813 } 27814 b = mergePoint(b, x0, x1) 27815 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27816 v.reset(OpCopy) 27817 v.AddArg(v0) 27818 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27819 v1.AuxInt = j0 27820 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27821 v2.AuxInt = i0 27822 v2.Aux = s 27823 v2.AddArg(p) 27824 v2.AddArg(idx) 27825 v2.AddArg(mem) 27826 v1.AddArg(v2) 27827 v0.AddArg(v1) 27828 v0.AddArg(y) 27829 return true 27830 } 27831 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27832 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27833 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27834 for { 27835 _ = v.Args[1] 27836 or := v.Args[0] 27837 if or.Op != OpAMD64ORQ { 27838 break 27839 } 27840 _ = or.Args[1] 27841 y := or.Args[0] 27842 s0 := or.Args[1] 27843 if s0.Op != OpAMD64SHLQconst { 27844 break 27845 } 27846 j0 := s0.AuxInt 27847 x0 := s0.Args[0] 27848 if x0.Op != OpAMD64MOVBloadidx1 { 27849 break 27850 } 27851 i0 := x0.AuxInt 27852 s := x0.Aux 27853 _ = x0.Args[2] 27854 idx := x0.Args[0] 27855 p := x0.Args[1] 27856 mem := x0.Args[2] 27857 s1 := v.Args[1] 27858 if s1.Op != OpAMD64SHLQconst { 27859 break 27860 } 27861 j1 := s1.AuxInt 27862 x1 := s1.Args[0] 27863 if x1.Op != OpAMD64MOVBloadidx1 { 27864 break 27865 } 27866 i1 := x1.AuxInt 27867 if x1.Aux != s { 27868 break 27869 } 27870 _ = x1.Args[2] 27871 if idx != x1.Args[0] { 27872 break 27873 } 27874 if p != x1.Args[1] { 27875 break 27876 } 27877 if mem != x1.Args[2] { 27878 break 27879 } 27880 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27881 break 27882 } 27883 b = mergePoint(b, x0, x1) 27884 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27885 v.reset(OpCopy) 27886 v.AddArg(v0) 27887 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27888 v1.AuxInt = j0 27889 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27890 v2.AuxInt = i0 27891 v2.Aux = s 27892 v2.AddArg(p) 27893 v2.AddArg(idx) 27894 v2.AddArg(mem) 27895 v1.AddArg(v2) 27896 v0.AddArg(v1) 27897 v0.AddArg(y) 27898 return true 27899 } 27900 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 27901 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27902 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27903 for { 27904 _ = v.Args[1] 27905 s1 := v.Args[0] 27906 if s1.Op != OpAMD64SHLQconst { 27907 break 27908 } 27909 j1 := s1.AuxInt 27910 x1 := s1.Args[0] 27911 if x1.Op != OpAMD64MOVWloadidx1 { 27912 break 27913 } 27914 i1 := x1.AuxInt 27915 s := x1.Aux 27916 _ = x1.Args[2] 27917 p := x1.Args[0] 27918 idx := x1.Args[1] 27919 mem := x1.Args[2] 27920 or := v.Args[1] 27921 if or.Op != OpAMD64ORQ { 27922 break 27923 } 27924 _ = or.Args[1] 27925 s0 := or.Args[0] 27926 if s0.Op != OpAMD64SHLQconst { 27927 break 27928 } 27929 j0 := s0.AuxInt 27930 x0 := s0.Args[0] 27931 if x0.Op != OpAMD64MOVWloadidx1 { 27932 break 27933 } 27934 i0 := x0.AuxInt 27935 if x0.Aux != s { 27936 break 27937 } 27938 _ = x0.Args[2] 27939 if p != x0.Args[0] { 27940 break 27941 } 27942 if idx != x0.Args[1] { 27943 break 27944 } 27945 if mem != x0.Args[2] { 27946 break 27947 } 27948 y := or.Args[1] 27949 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27950 break 27951 } 27952 b = mergePoint(b, x0, x1) 27953 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27954 v.reset(OpCopy) 27955 v.AddArg(v0) 27956 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27957 v1.AuxInt = j0 27958 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27959 v2.AuxInt = i0 27960 v2.Aux = s 27961 v2.AddArg(p) 27962 v2.AddArg(idx) 27963 v2.AddArg(mem) 27964 v1.AddArg(v2) 27965 v0.AddArg(v1) 27966 v0.AddArg(y) 27967 return true 27968 } 27969 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 27970 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27971 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27972 for { 27973 _ = v.Args[1] 27974 s1 := v.Args[0] 27975 if s1.Op != OpAMD64SHLQconst { 27976 break 27977 } 27978 j1 := s1.AuxInt 27979 x1 := s1.Args[0] 27980 if x1.Op != OpAMD64MOVWloadidx1 { 27981 break 27982 } 27983 i1 := x1.AuxInt 27984 s := x1.Aux 27985 _ = x1.Args[2] 27986 idx := x1.Args[0] 27987 p := x1.Args[1] 27988 mem := x1.Args[2] 27989 or := v.Args[1] 27990 if or.Op != OpAMD64ORQ { 27991 break 27992 } 27993 _ = or.Args[1] 27994 s0 := or.Args[0] 27995 if s0.Op != OpAMD64SHLQconst { 27996 break 27997 } 27998 j0 := s0.AuxInt 27999 x0 := s0.Args[0] 28000 if x0.Op != OpAMD64MOVWloadidx1 { 28001 break 28002 } 28003 i0 := x0.AuxInt 28004 if x0.Aux != s { 28005 break 28006 } 28007 _ = x0.Args[2] 28008 if p != x0.Args[0] { 28009 break 28010 } 28011 if idx != x0.Args[1] { 28012 break 28013 } 28014 if mem != x0.Args[2] { 28015 break 28016 } 28017 y := or.Args[1] 28018 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28019 break 28020 } 28021 b = mergePoint(b, x0, x1) 28022 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28023 v.reset(OpCopy) 28024 v.AddArg(v0) 28025 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28026 v1.AuxInt = j0 28027 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28028 v2.AuxInt = i0 28029 v2.Aux = s 28030 v2.AddArg(p) 28031 v2.AddArg(idx) 28032 v2.AddArg(mem) 28033 v1.AddArg(v2) 28034 v0.AddArg(v1) 28035 v0.AddArg(y) 28036 return true 28037 } 28038 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 28039 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28040 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28041 for { 28042 _ = v.Args[1] 28043 s1 := v.Args[0] 28044 if s1.Op != OpAMD64SHLQconst { 28045 break 28046 } 28047 j1 := s1.AuxInt 28048 x1 := s1.Args[0] 28049 if x1.Op != OpAMD64MOVWloadidx1 { 28050 break 28051 } 28052 i1 := x1.AuxInt 28053 s := x1.Aux 28054 _ = x1.Args[2] 28055 p := x1.Args[0] 28056 idx := x1.Args[1] 28057 mem := x1.Args[2] 28058 or := v.Args[1] 28059 if or.Op != OpAMD64ORQ { 28060 break 28061 } 28062 _ = or.Args[1] 28063 s0 := or.Args[0] 28064 if s0.Op != OpAMD64SHLQconst { 28065 break 28066 } 28067 j0 := s0.AuxInt 28068 x0 := s0.Args[0] 28069 if x0.Op != OpAMD64MOVWloadidx1 { 28070 break 28071 } 28072 i0 := x0.AuxInt 28073 if x0.Aux != s { 28074 break 28075 } 28076 _ = x0.Args[2] 28077 if idx != x0.Args[0] { 28078 break 28079 } 28080 if p != x0.Args[1] { 28081 break 28082 } 28083 if mem != x0.Args[2] { 28084 break 28085 } 28086 y := or.Args[1] 28087 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28088 break 28089 } 28090 b = mergePoint(b, x0, x1) 28091 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28092 v.reset(OpCopy) 28093 v.AddArg(v0) 28094 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28095 v1.AuxInt = j0 28096 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28097 v2.AuxInt = i0 28098 v2.Aux = s 28099 v2.AddArg(p) 28100 v2.AddArg(idx) 28101 v2.AddArg(mem) 28102 v1.AddArg(v2) 28103 v0.AddArg(v1) 28104 v0.AddArg(y) 28105 return true 28106 } 28107 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 28108 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28109 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28110 for { 28111 _ = v.Args[1] 28112 s1 := v.Args[0] 28113 if s1.Op != OpAMD64SHLQconst { 28114 break 28115 } 28116 j1 := s1.AuxInt 28117 x1 := s1.Args[0] 28118 if x1.Op != OpAMD64MOVWloadidx1 { 28119 break 28120 } 28121 i1 := x1.AuxInt 28122 s := x1.Aux 28123 _ = x1.Args[2] 28124 idx := x1.Args[0] 28125 p := x1.Args[1] 28126 mem := x1.Args[2] 28127 or := v.Args[1] 28128 if or.Op != OpAMD64ORQ { 28129 break 28130 } 28131 _ = or.Args[1] 28132 s0 := or.Args[0] 28133 if s0.Op != OpAMD64SHLQconst { 28134 break 28135 } 28136 j0 := s0.AuxInt 28137 x0 := s0.Args[0] 28138 if x0.Op != OpAMD64MOVWloadidx1 { 28139 break 28140 } 28141 i0 := x0.AuxInt 28142 if x0.Aux != s { 28143 break 28144 } 28145 _ = x0.Args[2] 28146 if idx != x0.Args[0] { 28147 break 28148 } 28149 if p != x0.Args[1] { 28150 break 28151 } 28152 if mem != x0.Args[2] { 28153 break 28154 } 28155 y := or.Args[1] 28156 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28157 break 28158 } 28159 b = mergePoint(b, x0, x1) 28160 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28161 v.reset(OpCopy) 28162 v.AddArg(v0) 28163 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28164 v1.AuxInt = j0 28165 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28166 v2.AuxInt = i0 28167 v2.Aux = s 28168 v2.AddArg(p) 28169 v2.AddArg(idx) 28170 v2.AddArg(mem) 28171 v1.AddArg(v2) 28172 v0.AddArg(v1) 28173 v0.AddArg(y) 28174 return true 28175 } 28176 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28177 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28178 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28179 for { 28180 _ = v.Args[1] 28181 s1 := v.Args[0] 28182 if s1.Op != OpAMD64SHLQconst { 28183 break 28184 } 28185 j1 := s1.AuxInt 28186 x1 := s1.Args[0] 28187 if x1.Op != OpAMD64MOVWloadidx1 { 28188 break 28189 } 28190 i1 := x1.AuxInt 28191 s := x1.Aux 28192 _ = x1.Args[2] 28193 p := x1.Args[0] 28194 idx := x1.Args[1] 28195 mem := x1.Args[2] 28196 or := v.Args[1] 28197 if or.Op != OpAMD64ORQ { 28198 break 28199 } 28200 _ = or.Args[1] 28201 y := or.Args[0] 28202 s0 := or.Args[1] 28203 if s0.Op != OpAMD64SHLQconst { 28204 break 28205 } 28206 j0 := s0.AuxInt 28207 x0 := s0.Args[0] 28208 if x0.Op != OpAMD64MOVWloadidx1 { 28209 break 28210 } 28211 i0 := x0.AuxInt 28212 if x0.Aux != s { 28213 break 28214 } 28215 _ = x0.Args[2] 28216 if p != x0.Args[0] { 28217 break 28218 } 28219 if idx != x0.Args[1] { 28220 break 28221 } 28222 if mem != x0.Args[2] { 28223 break 28224 } 28225 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28226 break 28227 } 28228 b = mergePoint(b, x0, x1) 28229 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28230 v.reset(OpCopy) 28231 v.AddArg(v0) 28232 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28233 v1.AuxInt = j0 28234 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28235 v2.AuxInt = i0 28236 v2.Aux = s 28237 v2.AddArg(p) 28238 v2.AddArg(idx) 28239 v2.AddArg(mem) 28240 v1.AddArg(v2) 28241 v0.AddArg(v1) 28242 v0.AddArg(y) 28243 return true 28244 } 28245 return false 28246 } 28247 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 28248 b := v.Block 28249 _ = b 28250 typ := &b.Func.Config.Types 28251 _ = typ 28252 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28253 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28254 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28255 for { 28256 _ = v.Args[1] 28257 s1 := v.Args[0] 28258 if s1.Op != OpAMD64SHLQconst { 28259 break 28260 } 28261 j1 := s1.AuxInt 28262 x1 := s1.Args[0] 28263 if x1.Op != OpAMD64MOVWloadidx1 { 28264 break 28265 } 28266 i1 := x1.AuxInt 28267 s := x1.Aux 28268 _ = x1.Args[2] 28269 idx := x1.Args[0] 28270 p := x1.Args[1] 28271 mem := x1.Args[2] 28272 or := v.Args[1] 28273 if or.Op != OpAMD64ORQ { 28274 break 28275 } 28276 _ = or.Args[1] 28277 y := or.Args[0] 28278 s0 := or.Args[1] 28279 if s0.Op != OpAMD64SHLQconst { 28280 break 28281 } 28282 j0 := s0.AuxInt 28283 x0 := s0.Args[0] 28284 if x0.Op != OpAMD64MOVWloadidx1 { 28285 break 28286 } 28287 i0 := x0.AuxInt 28288 if x0.Aux != s { 28289 break 28290 } 28291 _ = x0.Args[2] 28292 if p != x0.Args[0] { 28293 break 28294 } 28295 if idx != x0.Args[1] { 28296 break 28297 } 28298 if mem != x0.Args[2] { 28299 break 28300 } 28301 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28302 break 28303 } 28304 b = mergePoint(b, x0, x1) 28305 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28306 v.reset(OpCopy) 28307 v.AddArg(v0) 28308 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28309 v1.AuxInt = j0 28310 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28311 v2.AuxInt = i0 28312 v2.Aux = s 28313 v2.AddArg(p) 28314 v2.AddArg(idx) 28315 v2.AddArg(mem) 28316 v1.AddArg(v2) 28317 v0.AddArg(v1) 28318 v0.AddArg(y) 28319 return true 28320 } 28321 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28322 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28323 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28324 for { 28325 _ = v.Args[1] 28326 s1 := v.Args[0] 28327 if s1.Op != OpAMD64SHLQconst { 28328 break 28329 } 28330 j1 := s1.AuxInt 28331 x1 := s1.Args[0] 28332 if x1.Op != OpAMD64MOVWloadidx1 { 28333 break 28334 } 28335 i1 := x1.AuxInt 28336 s := x1.Aux 28337 _ = x1.Args[2] 28338 p := x1.Args[0] 28339 idx := x1.Args[1] 28340 mem := x1.Args[2] 28341 or := v.Args[1] 28342 if or.Op != OpAMD64ORQ { 28343 break 28344 } 28345 _ = or.Args[1] 28346 y := or.Args[0] 28347 s0 := or.Args[1] 28348 if s0.Op != OpAMD64SHLQconst { 28349 break 28350 } 28351 j0 := s0.AuxInt 28352 x0 := s0.Args[0] 28353 if x0.Op != OpAMD64MOVWloadidx1 { 28354 break 28355 } 28356 i0 := x0.AuxInt 28357 if x0.Aux != s { 28358 break 28359 } 28360 _ = x0.Args[2] 28361 if idx != x0.Args[0] { 28362 break 28363 } 28364 if p != x0.Args[1] { 28365 break 28366 } 28367 if mem != x0.Args[2] { 28368 break 28369 } 28370 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28371 break 28372 } 28373 b = mergePoint(b, x0, x1) 28374 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28375 v.reset(OpCopy) 28376 v.AddArg(v0) 28377 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28378 v1.AuxInt = j0 28379 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28380 v2.AuxInt = i0 28381 v2.Aux = s 28382 v2.AddArg(p) 28383 v2.AddArg(idx) 28384 v2.AddArg(mem) 28385 v1.AddArg(v2) 28386 v0.AddArg(v1) 28387 v0.AddArg(y) 28388 return true 28389 } 28390 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28391 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28392 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28393 for { 28394 _ = v.Args[1] 28395 s1 := v.Args[0] 28396 if s1.Op != OpAMD64SHLQconst { 28397 break 28398 } 28399 j1 := s1.AuxInt 28400 x1 := s1.Args[0] 28401 if x1.Op != OpAMD64MOVWloadidx1 { 28402 break 28403 } 28404 i1 := x1.AuxInt 28405 s := x1.Aux 28406 _ = x1.Args[2] 28407 idx := x1.Args[0] 28408 p := x1.Args[1] 28409 mem := x1.Args[2] 28410 or := v.Args[1] 28411 if or.Op != OpAMD64ORQ { 28412 break 28413 } 28414 _ = or.Args[1] 28415 y := or.Args[0] 28416 s0 := or.Args[1] 28417 if s0.Op != OpAMD64SHLQconst { 28418 break 28419 } 28420 j0 := s0.AuxInt 28421 x0 := s0.Args[0] 28422 if x0.Op != OpAMD64MOVWloadidx1 { 28423 break 28424 } 28425 i0 := x0.AuxInt 28426 if x0.Aux != s { 28427 break 28428 } 28429 _ = x0.Args[2] 28430 if idx != x0.Args[0] { 28431 break 28432 } 28433 if p != x0.Args[1] { 28434 break 28435 } 28436 if mem != x0.Args[2] { 28437 break 28438 } 28439 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28440 break 28441 } 28442 b = mergePoint(b, x0, x1) 28443 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28444 v.reset(OpCopy) 28445 v.AddArg(v0) 28446 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28447 v1.AuxInt = j0 28448 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28449 v2.AuxInt = i0 28450 v2.Aux = s 28451 v2.AddArg(p) 28452 v2.AddArg(idx) 28453 v2.AddArg(mem) 28454 v1.AddArg(v2) 28455 v0.AddArg(v1) 28456 v0.AddArg(y) 28457 return true 28458 } 28459 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28460 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28461 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28462 for { 28463 _ = v.Args[1] 28464 or := v.Args[0] 28465 if or.Op != OpAMD64ORQ { 28466 break 28467 } 28468 _ = or.Args[1] 28469 s0 := or.Args[0] 28470 if s0.Op != OpAMD64SHLQconst { 28471 break 28472 } 28473 j0 := s0.AuxInt 28474 x0 := s0.Args[0] 28475 if x0.Op != OpAMD64MOVWloadidx1 { 28476 break 28477 } 28478 i0 := x0.AuxInt 28479 s := x0.Aux 28480 _ = x0.Args[2] 28481 p := x0.Args[0] 28482 idx := x0.Args[1] 28483 mem := x0.Args[2] 28484 y := or.Args[1] 28485 s1 := v.Args[1] 28486 if s1.Op != OpAMD64SHLQconst { 28487 break 28488 } 28489 j1 := s1.AuxInt 28490 x1 := s1.Args[0] 28491 if x1.Op != OpAMD64MOVWloadidx1 { 28492 break 28493 } 28494 i1 := x1.AuxInt 28495 if x1.Aux != s { 28496 break 28497 } 28498 _ = x1.Args[2] 28499 if p != x1.Args[0] { 28500 break 28501 } 28502 if idx != x1.Args[1] { 28503 break 28504 } 28505 if mem != x1.Args[2] { 28506 break 28507 } 28508 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28509 break 28510 } 28511 b = mergePoint(b, x0, x1) 28512 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28513 v.reset(OpCopy) 28514 v.AddArg(v0) 28515 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28516 v1.AuxInt = j0 28517 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28518 v2.AuxInt = i0 28519 v2.Aux = s 28520 v2.AddArg(p) 28521 v2.AddArg(idx) 28522 v2.AddArg(mem) 28523 v1.AddArg(v2) 28524 v0.AddArg(v1) 28525 v0.AddArg(y) 28526 return true 28527 } 28528 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28529 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28530 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28531 for { 28532 _ = v.Args[1] 28533 or := v.Args[0] 28534 if or.Op != OpAMD64ORQ { 28535 break 28536 } 28537 _ = or.Args[1] 28538 s0 := or.Args[0] 28539 if s0.Op != OpAMD64SHLQconst { 28540 break 28541 } 28542 j0 := s0.AuxInt 28543 x0 := s0.Args[0] 28544 if x0.Op != OpAMD64MOVWloadidx1 { 28545 break 28546 } 28547 i0 := x0.AuxInt 28548 s := x0.Aux 28549 _ = x0.Args[2] 28550 idx := x0.Args[0] 28551 p := x0.Args[1] 28552 mem := x0.Args[2] 28553 y := or.Args[1] 28554 s1 := v.Args[1] 28555 if s1.Op != OpAMD64SHLQconst { 28556 break 28557 } 28558 j1 := s1.AuxInt 28559 x1 := s1.Args[0] 28560 if x1.Op != OpAMD64MOVWloadidx1 { 28561 break 28562 } 28563 i1 := x1.AuxInt 28564 if x1.Aux != s { 28565 break 28566 } 28567 _ = x1.Args[2] 28568 if p != x1.Args[0] { 28569 break 28570 } 28571 if idx != x1.Args[1] { 28572 break 28573 } 28574 if mem != x1.Args[2] { 28575 break 28576 } 28577 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28578 break 28579 } 28580 b = mergePoint(b, x0, x1) 28581 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28582 v.reset(OpCopy) 28583 v.AddArg(v0) 28584 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28585 v1.AuxInt = j0 28586 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28587 v2.AuxInt = i0 28588 v2.Aux = s 28589 v2.AddArg(p) 28590 v2.AddArg(idx) 28591 v2.AddArg(mem) 28592 v1.AddArg(v2) 28593 v0.AddArg(v1) 28594 v0.AddArg(y) 28595 return true 28596 } 28597 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28598 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28599 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28600 for { 28601 _ = v.Args[1] 28602 or := v.Args[0] 28603 if or.Op != OpAMD64ORQ { 28604 break 28605 } 28606 _ = or.Args[1] 28607 y := or.Args[0] 28608 s0 := or.Args[1] 28609 if s0.Op != OpAMD64SHLQconst { 28610 break 28611 } 28612 j0 := s0.AuxInt 28613 x0 := s0.Args[0] 28614 if x0.Op != OpAMD64MOVWloadidx1 { 28615 break 28616 } 28617 i0 := x0.AuxInt 28618 s := x0.Aux 28619 _ = x0.Args[2] 28620 p := x0.Args[0] 28621 idx := x0.Args[1] 28622 mem := x0.Args[2] 28623 s1 := v.Args[1] 28624 if s1.Op != OpAMD64SHLQconst { 28625 break 28626 } 28627 j1 := s1.AuxInt 28628 x1 := s1.Args[0] 28629 if x1.Op != OpAMD64MOVWloadidx1 { 28630 break 28631 } 28632 i1 := x1.AuxInt 28633 if x1.Aux != s { 28634 break 28635 } 28636 _ = x1.Args[2] 28637 if p != x1.Args[0] { 28638 break 28639 } 28640 if idx != x1.Args[1] { 28641 break 28642 } 28643 if mem != x1.Args[2] { 28644 break 28645 } 28646 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28647 break 28648 } 28649 b = mergePoint(b, x0, x1) 28650 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28651 v.reset(OpCopy) 28652 v.AddArg(v0) 28653 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28654 v1.AuxInt = j0 28655 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28656 v2.AuxInt = i0 28657 v2.Aux = s 28658 v2.AddArg(p) 28659 v2.AddArg(idx) 28660 v2.AddArg(mem) 28661 v1.AddArg(v2) 28662 v0.AddArg(v1) 28663 v0.AddArg(y) 28664 return true 28665 } 28666 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28667 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28668 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28669 for { 28670 _ = v.Args[1] 28671 or := v.Args[0] 28672 if or.Op != OpAMD64ORQ { 28673 break 28674 } 28675 _ = or.Args[1] 28676 y := or.Args[0] 28677 s0 := or.Args[1] 28678 if s0.Op != OpAMD64SHLQconst { 28679 break 28680 } 28681 j0 := s0.AuxInt 28682 x0 := s0.Args[0] 28683 if x0.Op != OpAMD64MOVWloadidx1 { 28684 break 28685 } 28686 i0 := x0.AuxInt 28687 s := x0.Aux 28688 _ = x0.Args[2] 28689 idx := x0.Args[0] 28690 p := x0.Args[1] 28691 mem := x0.Args[2] 28692 s1 := v.Args[1] 28693 if s1.Op != OpAMD64SHLQconst { 28694 break 28695 } 28696 j1 := s1.AuxInt 28697 x1 := s1.Args[0] 28698 if x1.Op != OpAMD64MOVWloadidx1 { 28699 break 28700 } 28701 i1 := x1.AuxInt 28702 if x1.Aux != s { 28703 break 28704 } 28705 _ = x1.Args[2] 28706 if p != x1.Args[0] { 28707 break 28708 } 28709 if idx != x1.Args[1] { 28710 break 28711 } 28712 if mem != x1.Args[2] { 28713 break 28714 } 28715 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28716 break 28717 } 28718 b = mergePoint(b, x0, x1) 28719 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28720 v.reset(OpCopy) 28721 v.AddArg(v0) 28722 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28723 v1.AuxInt = j0 28724 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28725 v2.AuxInt = i0 28726 v2.Aux = s 28727 v2.AddArg(p) 28728 v2.AddArg(idx) 28729 v2.AddArg(mem) 28730 v1.AddArg(v2) 28731 v0.AddArg(v1) 28732 v0.AddArg(y) 28733 return true 28734 } 28735 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28736 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28737 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28738 for { 28739 _ = v.Args[1] 28740 or := v.Args[0] 28741 if or.Op != OpAMD64ORQ { 28742 break 28743 } 28744 _ = or.Args[1] 28745 s0 := or.Args[0] 28746 if s0.Op != OpAMD64SHLQconst { 28747 break 28748 } 28749 j0 := s0.AuxInt 28750 x0 := s0.Args[0] 28751 if x0.Op != OpAMD64MOVWloadidx1 { 28752 break 28753 } 28754 i0 := x0.AuxInt 28755 s := x0.Aux 28756 _ = x0.Args[2] 28757 p := x0.Args[0] 28758 idx := x0.Args[1] 28759 mem := x0.Args[2] 28760 y := or.Args[1] 28761 s1 := v.Args[1] 28762 if s1.Op != OpAMD64SHLQconst { 28763 break 28764 } 28765 j1 := s1.AuxInt 28766 x1 := s1.Args[0] 28767 if x1.Op != OpAMD64MOVWloadidx1 { 28768 break 28769 } 28770 i1 := x1.AuxInt 28771 if x1.Aux != s { 28772 break 28773 } 28774 _ = x1.Args[2] 28775 if idx != x1.Args[0] { 28776 break 28777 } 28778 if p != x1.Args[1] { 28779 break 28780 } 28781 if mem != x1.Args[2] { 28782 break 28783 } 28784 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28785 break 28786 } 28787 b = mergePoint(b, x0, x1) 28788 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28789 v.reset(OpCopy) 28790 v.AddArg(v0) 28791 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28792 v1.AuxInt = j0 28793 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28794 v2.AuxInt = i0 28795 v2.Aux = s 28796 v2.AddArg(p) 28797 v2.AddArg(idx) 28798 v2.AddArg(mem) 28799 v1.AddArg(v2) 28800 v0.AddArg(v1) 28801 v0.AddArg(y) 28802 return true 28803 } 28804 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28805 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28806 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28807 for { 28808 _ = v.Args[1] 28809 or := v.Args[0] 28810 if or.Op != OpAMD64ORQ { 28811 break 28812 } 28813 _ = or.Args[1] 28814 s0 := or.Args[0] 28815 if s0.Op != OpAMD64SHLQconst { 28816 break 28817 } 28818 j0 := s0.AuxInt 28819 x0 := s0.Args[0] 28820 if x0.Op != OpAMD64MOVWloadidx1 { 28821 break 28822 } 28823 i0 := x0.AuxInt 28824 s := x0.Aux 28825 _ = x0.Args[2] 28826 idx := x0.Args[0] 28827 p := x0.Args[1] 28828 mem := x0.Args[2] 28829 y := or.Args[1] 28830 s1 := v.Args[1] 28831 if s1.Op != OpAMD64SHLQconst { 28832 break 28833 } 28834 j1 := s1.AuxInt 28835 x1 := s1.Args[0] 28836 if x1.Op != OpAMD64MOVWloadidx1 { 28837 break 28838 } 28839 i1 := x1.AuxInt 28840 if x1.Aux != s { 28841 break 28842 } 28843 _ = x1.Args[2] 28844 if idx != x1.Args[0] { 28845 break 28846 } 28847 if p != x1.Args[1] { 28848 break 28849 } 28850 if mem != x1.Args[2] { 28851 break 28852 } 28853 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28854 break 28855 } 28856 b = mergePoint(b, x0, x1) 28857 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28858 v.reset(OpCopy) 28859 v.AddArg(v0) 28860 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28861 v1.AuxInt = j0 28862 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28863 v2.AuxInt = i0 28864 v2.Aux = s 28865 v2.AddArg(p) 28866 v2.AddArg(idx) 28867 v2.AddArg(mem) 28868 v1.AddArg(v2) 28869 v0.AddArg(v1) 28870 v0.AddArg(y) 28871 return true 28872 } 28873 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28874 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28875 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28876 for { 28877 _ = v.Args[1] 28878 or := v.Args[0] 28879 if or.Op != OpAMD64ORQ { 28880 break 28881 } 28882 _ = or.Args[1] 28883 y := or.Args[0] 28884 s0 := or.Args[1] 28885 if s0.Op != OpAMD64SHLQconst { 28886 break 28887 } 28888 j0 := s0.AuxInt 28889 x0 := s0.Args[0] 28890 if x0.Op != OpAMD64MOVWloadidx1 { 28891 break 28892 } 28893 i0 := x0.AuxInt 28894 s := x0.Aux 28895 _ = x0.Args[2] 28896 p := x0.Args[0] 28897 idx := x0.Args[1] 28898 mem := x0.Args[2] 28899 s1 := v.Args[1] 28900 if s1.Op != OpAMD64SHLQconst { 28901 break 28902 } 28903 j1 := s1.AuxInt 28904 x1 := s1.Args[0] 28905 if x1.Op != OpAMD64MOVWloadidx1 { 28906 break 28907 } 28908 i1 := x1.AuxInt 28909 if x1.Aux != s { 28910 break 28911 } 28912 _ = x1.Args[2] 28913 if idx != x1.Args[0] { 28914 break 28915 } 28916 if p != x1.Args[1] { 28917 break 28918 } 28919 if mem != x1.Args[2] { 28920 break 28921 } 28922 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28923 break 28924 } 28925 b = mergePoint(b, x0, x1) 28926 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28927 v.reset(OpCopy) 28928 v.AddArg(v0) 28929 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28930 v1.AuxInt = j0 28931 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28932 v2.AuxInt = i0 28933 v2.Aux = s 28934 v2.AddArg(p) 28935 v2.AddArg(idx) 28936 v2.AddArg(mem) 28937 v1.AddArg(v2) 28938 v0.AddArg(v1) 28939 v0.AddArg(y) 28940 return true 28941 } 28942 return false 28943 } 28944 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 28945 b := v.Block 28946 _ = b 28947 typ := &b.Func.Config.Types 28948 _ = typ 28949 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28950 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28951 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28952 for { 28953 _ = v.Args[1] 28954 or := v.Args[0] 28955 if or.Op != OpAMD64ORQ { 28956 break 28957 } 28958 _ = or.Args[1] 28959 y := or.Args[0] 28960 s0 := or.Args[1] 28961 if s0.Op != OpAMD64SHLQconst { 28962 break 28963 } 28964 j0 := s0.AuxInt 28965 x0 := s0.Args[0] 28966 if x0.Op != OpAMD64MOVWloadidx1 { 28967 break 28968 } 28969 i0 := x0.AuxInt 28970 s := x0.Aux 28971 _ = x0.Args[2] 28972 idx := x0.Args[0] 28973 p := x0.Args[1] 28974 mem := x0.Args[2] 28975 s1 := v.Args[1] 28976 if s1.Op != OpAMD64SHLQconst { 28977 break 28978 } 28979 j1 := s1.AuxInt 28980 x1 := s1.Args[0] 28981 if x1.Op != OpAMD64MOVWloadidx1 { 28982 break 28983 } 28984 i1 := x1.AuxInt 28985 if x1.Aux != s { 28986 break 28987 } 28988 _ = x1.Args[2] 28989 if idx != x1.Args[0] { 28990 break 28991 } 28992 if p != x1.Args[1] { 28993 break 28994 } 28995 if mem != x1.Args[2] { 28996 break 28997 } 28998 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28999 break 29000 } 29001 b = mergePoint(b, x0, x1) 29002 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29003 v.reset(OpCopy) 29004 v.AddArg(v0) 29005 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29006 v1.AuxInt = j0 29007 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29008 v2.AuxInt = i0 29009 v2.Aux = s 29010 v2.AddArg(p) 29011 v2.AddArg(idx) 29012 v2.AddArg(mem) 29013 v1.AddArg(v2) 29014 v0.AddArg(v1) 29015 v0.AddArg(y) 29016 return true 29017 } 29018 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 29019 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29020 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 29021 for { 29022 _ = v.Args[1] 29023 x1 := v.Args[0] 29024 if x1.Op != OpAMD64MOVBload { 29025 break 29026 } 29027 i1 := x1.AuxInt 29028 s := x1.Aux 29029 _ = x1.Args[1] 29030 p := x1.Args[0] 29031 mem := x1.Args[1] 29032 sh := v.Args[1] 29033 if sh.Op != OpAMD64SHLQconst { 29034 break 29035 } 29036 if sh.AuxInt != 8 { 29037 break 29038 } 29039 x0 := sh.Args[0] 29040 if x0.Op != OpAMD64MOVBload { 29041 break 29042 } 29043 i0 := x0.AuxInt 29044 if x0.Aux != s { 29045 break 29046 } 29047 _ = x0.Args[1] 29048 if p != x0.Args[0] { 29049 break 29050 } 29051 if mem != x0.Args[1] { 29052 break 29053 } 29054 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29055 break 29056 } 29057 b = mergePoint(b, x0, x1) 29058 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29059 v.reset(OpCopy) 29060 v.AddArg(v0) 29061 v0.AuxInt = 8 29062 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29063 v1.AuxInt = i0 29064 v1.Aux = s 29065 v1.AddArg(p) 29066 v1.AddArg(mem) 29067 v0.AddArg(v1) 29068 return true 29069 } 29070 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 29071 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29072 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 29073 for { 29074 _ = v.Args[1] 29075 sh := v.Args[0] 29076 if sh.Op != OpAMD64SHLQconst { 29077 break 29078 } 29079 if sh.AuxInt != 8 { 29080 break 29081 } 29082 x0 := sh.Args[0] 29083 if x0.Op != OpAMD64MOVBload { 29084 break 29085 } 29086 i0 := x0.AuxInt 29087 s := x0.Aux 29088 _ = x0.Args[1] 29089 p := x0.Args[0] 29090 mem := x0.Args[1] 29091 x1 := v.Args[1] 29092 if x1.Op != OpAMD64MOVBload { 29093 break 29094 } 29095 i1 := x1.AuxInt 29096 if x1.Aux != s { 29097 break 29098 } 29099 _ = x1.Args[1] 29100 if p != x1.Args[0] { 29101 break 29102 } 29103 if mem != x1.Args[1] { 29104 break 29105 } 29106 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29107 break 29108 } 29109 b = mergePoint(b, x0, x1) 29110 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29111 v.reset(OpCopy) 29112 v.AddArg(v0) 29113 v0.AuxInt = 8 29114 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29115 v1.AuxInt = i0 29116 v1.Aux = s 29117 v1.AddArg(p) 29118 v1.AddArg(mem) 29119 v0.AddArg(v1) 29120 return true 29121 } 29122 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29123 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29124 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 29125 for { 29126 _ = v.Args[1] 29127 r1 := v.Args[0] 29128 if r1.Op != OpAMD64ROLWconst { 29129 break 29130 } 29131 if r1.AuxInt != 8 { 29132 break 29133 } 29134 x1 := r1.Args[0] 29135 if x1.Op != OpAMD64MOVWload { 29136 break 29137 } 29138 i1 := x1.AuxInt 29139 s := x1.Aux 29140 _ = x1.Args[1] 29141 p := x1.Args[0] 29142 mem := x1.Args[1] 29143 sh := v.Args[1] 29144 if sh.Op != OpAMD64SHLQconst { 29145 break 29146 } 29147 if sh.AuxInt != 16 { 29148 break 29149 } 29150 r0 := sh.Args[0] 29151 if r0.Op != OpAMD64ROLWconst { 29152 break 29153 } 29154 if r0.AuxInt != 8 { 29155 break 29156 } 29157 x0 := r0.Args[0] 29158 if x0.Op != OpAMD64MOVWload { 29159 break 29160 } 29161 i0 := x0.AuxInt 29162 if x0.Aux != s { 29163 break 29164 } 29165 _ = x0.Args[1] 29166 if p != x0.Args[0] { 29167 break 29168 } 29169 if mem != x0.Args[1] { 29170 break 29171 } 29172 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29173 break 29174 } 29175 b = mergePoint(b, x0, x1) 29176 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29177 v.reset(OpCopy) 29178 v.AddArg(v0) 29179 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29180 v1.AuxInt = i0 29181 v1.Aux = s 29182 v1.AddArg(p) 29183 v1.AddArg(mem) 29184 v0.AddArg(v1) 29185 return true 29186 } 29187 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 29188 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29189 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 29190 for { 29191 _ = v.Args[1] 29192 sh := v.Args[0] 29193 if sh.Op != OpAMD64SHLQconst { 29194 break 29195 } 29196 if sh.AuxInt != 16 { 29197 break 29198 } 29199 r0 := sh.Args[0] 29200 if r0.Op != OpAMD64ROLWconst { 29201 break 29202 } 29203 if r0.AuxInt != 8 { 29204 break 29205 } 29206 x0 := r0.Args[0] 29207 if x0.Op != OpAMD64MOVWload { 29208 break 29209 } 29210 i0 := x0.AuxInt 29211 s := x0.Aux 29212 _ = x0.Args[1] 29213 p := x0.Args[0] 29214 mem := x0.Args[1] 29215 r1 := v.Args[1] 29216 if r1.Op != OpAMD64ROLWconst { 29217 break 29218 } 29219 if r1.AuxInt != 8 { 29220 break 29221 } 29222 x1 := r1.Args[0] 29223 if x1.Op != OpAMD64MOVWload { 29224 break 29225 } 29226 i1 := x1.AuxInt 29227 if x1.Aux != s { 29228 break 29229 } 29230 _ = x1.Args[1] 29231 if p != x1.Args[0] { 29232 break 29233 } 29234 if mem != x1.Args[1] { 29235 break 29236 } 29237 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29238 break 29239 } 29240 b = mergePoint(b, x0, x1) 29241 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29242 v.reset(OpCopy) 29243 v.AddArg(v0) 29244 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29245 v1.AuxInt = i0 29246 v1.Aux = s 29247 v1.AddArg(p) 29248 v1.AddArg(mem) 29249 v0.AddArg(v1) 29250 return true 29251 } 29252 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 29253 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29254 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 29255 for { 29256 _ = v.Args[1] 29257 r1 := v.Args[0] 29258 if r1.Op != OpAMD64BSWAPL { 29259 break 29260 } 29261 x1 := r1.Args[0] 29262 if x1.Op != OpAMD64MOVLload { 29263 break 29264 } 29265 i1 := x1.AuxInt 29266 s := x1.Aux 29267 _ = x1.Args[1] 29268 p := x1.Args[0] 29269 mem := x1.Args[1] 29270 sh := v.Args[1] 29271 if sh.Op != OpAMD64SHLQconst { 29272 break 29273 } 29274 if sh.AuxInt != 32 { 29275 break 29276 } 29277 r0 := sh.Args[0] 29278 if r0.Op != OpAMD64BSWAPL { 29279 break 29280 } 29281 x0 := r0.Args[0] 29282 if x0.Op != OpAMD64MOVLload { 29283 break 29284 } 29285 i0 := x0.AuxInt 29286 if x0.Aux != s { 29287 break 29288 } 29289 _ = x0.Args[1] 29290 if p != x0.Args[0] { 29291 break 29292 } 29293 if mem != x0.Args[1] { 29294 break 29295 } 29296 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29297 break 29298 } 29299 b = mergePoint(b, x0, x1) 29300 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29301 v.reset(OpCopy) 29302 v.AddArg(v0) 29303 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 29304 v1.AuxInt = i0 29305 v1.Aux = s 29306 v1.AddArg(p) 29307 v1.AddArg(mem) 29308 v0.AddArg(v1) 29309 return true 29310 } 29311 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 29312 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29313 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 29314 for { 29315 _ = v.Args[1] 29316 sh := v.Args[0] 29317 if sh.Op != OpAMD64SHLQconst { 29318 break 29319 } 29320 if sh.AuxInt != 32 { 29321 break 29322 } 29323 r0 := sh.Args[0] 29324 if r0.Op != OpAMD64BSWAPL { 29325 break 29326 } 29327 x0 := r0.Args[0] 29328 if x0.Op != OpAMD64MOVLload { 29329 break 29330 } 29331 i0 := x0.AuxInt 29332 s := x0.Aux 29333 _ = x0.Args[1] 29334 p := x0.Args[0] 29335 mem := x0.Args[1] 29336 r1 := v.Args[1] 29337 if r1.Op != OpAMD64BSWAPL { 29338 break 29339 } 29340 x1 := r1.Args[0] 29341 if x1.Op != OpAMD64MOVLload { 29342 break 29343 } 29344 i1 := x1.AuxInt 29345 if x1.Aux != s { 29346 break 29347 } 29348 _ = x1.Args[1] 29349 if p != x1.Args[0] { 29350 break 29351 } 29352 if mem != x1.Args[1] { 29353 break 29354 } 29355 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29356 break 29357 } 29358 b = mergePoint(b, x0, x1) 29359 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29360 v.reset(OpCopy) 29361 v.AddArg(v0) 29362 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 29363 v1.AuxInt = i0 29364 v1.Aux = s 29365 v1.AddArg(p) 29366 v1.AddArg(mem) 29367 v0.AddArg(v1) 29368 return true 29369 } 29370 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 29371 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29372 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29373 for { 29374 _ = v.Args[1] 29375 s0 := v.Args[0] 29376 if s0.Op != OpAMD64SHLQconst { 29377 break 29378 } 29379 j0 := s0.AuxInt 29380 x0 := s0.Args[0] 29381 if x0.Op != OpAMD64MOVBload { 29382 break 29383 } 29384 i0 := x0.AuxInt 29385 s := x0.Aux 29386 _ = x0.Args[1] 29387 p := x0.Args[0] 29388 mem := x0.Args[1] 29389 or := v.Args[1] 29390 if or.Op != OpAMD64ORQ { 29391 break 29392 } 29393 _ = or.Args[1] 29394 s1 := or.Args[0] 29395 if s1.Op != OpAMD64SHLQconst { 29396 break 29397 } 29398 j1 := s1.AuxInt 29399 x1 := s1.Args[0] 29400 if x1.Op != OpAMD64MOVBload { 29401 break 29402 } 29403 i1 := x1.AuxInt 29404 if x1.Aux != s { 29405 break 29406 } 29407 _ = x1.Args[1] 29408 if p != x1.Args[0] { 29409 break 29410 } 29411 if mem != x1.Args[1] { 29412 break 29413 } 29414 y := or.Args[1] 29415 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29416 break 29417 } 29418 b = mergePoint(b, x0, x1) 29419 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29420 v.reset(OpCopy) 29421 v.AddArg(v0) 29422 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29423 v1.AuxInt = j1 29424 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29425 v2.AuxInt = 8 29426 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29427 v3.AuxInt = i0 29428 v3.Aux = s 29429 v3.AddArg(p) 29430 v3.AddArg(mem) 29431 v2.AddArg(v3) 29432 v1.AddArg(v2) 29433 v0.AddArg(v1) 29434 v0.AddArg(y) 29435 return true 29436 } 29437 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 29438 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29439 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29440 for { 29441 _ = v.Args[1] 29442 s0 := v.Args[0] 29443 if s0.Op != OpAMD64SHLQconst { 29444 break 29445 } 29446 j0 := s0.AuxInt 29447 x0 := s0.Args[0] 29448 if x0.Op != OpAMD64MOVBload { 29449 break 29450 } 29451 i0 := x0.AuxInt 29452 s := x0.Aux 29453 _ = x0.Args[1] 29454 p := x0.Args[0] 29455 mem := x0.Args[1] 29456 or := v.Args[1] 29457 if or.Op != OpAMD64ORQ { 29458 break 29459 } 29460 _ = or.Args[1] 29461 y := or.Args[0] 29462 s1 := or.Args[1] 29463 if s1.Op != OpAMD64SHLQconst { 29464 break 29465 } 29466 j1 := s1.AuxInt 29467 x1 := s1.Args[0] 29468 if x1.Op != OpAMD64MOVBload { 29469 break 29470 } 29471 i1 := x1.AuxInt 29472 if x1.Aux != s { 29473 break 29474 } 29475 _ = x1.Args[1] 29476 if p != x1.Args[0] { 29477 break 29478 } 29479 if mem != x1.Args[1] { 29480 break 29481 } 29482 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29483 break 29484 } 29485 b = mergePoint(b, x0, x1) 29486 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29487 v.reset(OpCopy) 29488 v.AddArg(v0) 29489 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29490 v1.AuxInt = j1 29491 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29492 v2.AuxInt = 8 29493 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29494 v3.AuxInt = i0 29495 v3.Aux = s 29496 v3.AddArg(p) 29497 v3.AddArg(mem) 29498 v2.AddArg(v3) 29499 v1.AddArg(v2) 29500 v0.AddArg(v1) 29501 v0.AddArg(y) 29502 return true 29503 } 29504 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 29505 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29506 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29507 for { 29508 _ = v.Args[1] 29509 or := v.Args[0] 29510 if or.Op != OpAMD64ORQ { 29511 break 29512 } 29513 _ = or.Args[1] 29514 s1 := or.Args[0] 29515 if s1.Op != OpAMD64SHLQconst { 29516 break 29517 } 29518 j1 := s1.AuxInt 29519 x1 := s1.Args[0] 29520 if x1.Op != OpAMD64MOVBload { 29521 break 29522 } 29523 i1 := x1.AuxInt 29524 s := x1.Aux 29525 _ = x1.Args[1] 29526 p := x1.Args[0] 29527 mem := x1.Args[1] 29528 y := or.Args[1] 29529 s0 := v.Args[1] 29530 if s0.Op != OpAMD64SHLQconst { 29531 break 29532 } 29533 j0 := s0.AuxInt 29534 x0 := s0.Args[0] 29535 if x0.Op != OpAMD64MOVBload { 29536 break 29537 } 29538 i0 := x0.AuxInt 29539 if x0.Aux != s { 29540 break 29541 } 29542 _ = x0.Args[1] 29543 if p != x0.Args[0] { 29544 break 29545 } 29546 if mem != x0.Args[1] { 29547 break 29548 } 29549 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29550 break 29551 } 29552 b = mergePoint(b, x0, x1) 29553 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29554 v.reset(OpCopy) 29555 v.AddArg(v0) 29556 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29557 v1.AuxInt = j1 29558 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29559 v2.AuxInt = 8 29560 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29561 v3.AuxInt = i0 29562 v3.Aux = s 29563 v3.AddArg(p) 29564 v3.AddArg(mem) 29565 v2.AddArg(v3) 29566 v1.AddArg(v2) 29567 v0.AddArg(v1) 29568 v0.AddArg(y) 29569 return true 29570 } 29571 return false 29572 } 29573 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 29574 b := v.Block 29575 _ = b 29576 typ := &b.Func.Config.Types 29577 _ = typ 29578 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 29579 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29580 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29581 for { 29582 _ = v.Args[1] 29583 or := v.Args[0] 29584 if or.Op != OpAMD64ORQ { 29585 break 29586 } 29587 _ = or.Args[1] 29588 y := or.Args[0] 29589 s1 := or.Args[1] 29590 if s1.Op != OpAMD64SHLQconst { 29591 break 29592 } 29593 j1 := s1.AuxInt 29594 x1 := s1.Args[0] 29595 if x1.Op != OpAMD64MOVBload { 29596 break 29597 } 29598 i1 := x1.AuxInt 29599 s := x1.Aux 29600 _ = x1.Args[1] 29601 p := x1.Args[0] 29602 mem := x1.Args[1] 29603 s0 := v.Args[1] 29604 if s0.Op != OpAMD64SHLQconst { 29605 break 29606 } 29607 j0 := s0.AuxInt 29608 x0 := s0.Args[0] 29609 if x0.Op != OpAMD64MOVBload { 29610 break 29611 } 29612 i0 := x0.AuxInt 29613 if x0.Aux != s { 29614 break 29615 } 29616 _ = x0.Args[1] 29617 if p != x0.Args[0] { 29618 break 29619 } 29620 if mem != x0.Args[1] { 29621 break 29622 } 29623 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29624 break 29625 } 29626 b = mergePoint(b, x0, x1) 29627 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29628 v.reset(OpCopy) 29629 v.AddArg(v0) 29630 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29631 v1.AuxInt = j1 29632 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29633 v2.AuxInt = 8 29634 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29635 v3.AuxInt = i0 29636 v3.Aux = s 29637 v3.AddArg(p) 29638 v3.AddArg(mem) 29639 v2.AddArg(v3) 29640 v1.AddArg(v2) 29641 v0.AddArg(v1) 29642 v0.AddArg(y) 29643 return true 29644 } 29645 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 29646 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29647 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29648 for { 29649 _ = v.Args[1] 29650 s0 := v.Args[0] 29651 if s0.Op != OpAMD64SHLQconst { 29652 break 29653 } 29654 j0 := s0.AuxInt 29655 r0 := s0.Args[0] 29656 if r0.Op != OpAMD64ROLWconst { 29657 break 29658 } 29659 if r0.AuxInt != 8 { 29660 break 29661 } 29662 x0 := r0.Args[0] 29663 if x0.Op != OpAMD64MOVWload { 29664 break 29665 } 29666 i0 := x0.AuxInt 29667 s := x0.Aux 29668 _ = x0.Args[1] 29669 p := x0.Args[0] 29670 mem := x0.Args[1] 29671 or := v.Args[1] 29672 if or.Op != OpAMD64ORQ { 29673 break 29674 } 29675 _ = or.Args[1] 29676 s1 := or.Args[0] 29677 if s1.Op != OpAMD64SHLQconst { 29678 break 29679 } 29680 j1 := s1.AuxInt 29681 r1 := s1.Args[0] 29682 if r1.Op != OpAMD64ROLWconst { 29683 break 29684 } 29685 if r1.AuxInt != 8 { 29686 break 29687 } 29688 x1 := r1.Args[0] 29689 if x1.Op != OpAMD64MOVWload { 29690 break 29691 } 29692 i1 := x1.AuxInt 29693 if x1.Aux != s { 29694 break 29695 } 29696 _ = x1.Args[1] 29697 if p != x1.Args[0] { 29698 break 29699 } 29700 if mem != x1.Args[1] { 29701 break 29702 } 29703 y := or.Args[1] 29704 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29705 break 29706 } 29707 b = mergePoint(b, x0, x1) 29708 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29709 v.reset(OpCopy) 29710 v.AddArg(v0) 29711 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29712 v1.AuxInt = j1 29713 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29714 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29715 v3.AuxInt = i0 29716 v3.Aux = s 29717 v3.AddArg(p) 29718 v3.AddArg(mem) 29719 v2.AddArg(v3) 29720 v1.AddArg(v2) 29721 v0.AddArg(v1) 29722 v0.AddArg(y) 29723 return true 29724 } 29725 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 29726 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29727 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29728 for { 29729 _ = v.Args[1] 29730 s0 := v.Args[0] 29731 if s0.Op != OpAMD64SHLQconst { 29732 break 29733 } 29734 j0 := s0.AuxInt 29735 r0 := s0.Args[0] 29736 if r0.Op != OpAMD64ROLWconst { 29737 break 29738 } 29739 if r0.AuxInt != 8 { 29740 break 29741 } 29742 x0 := r0.Args[0] 29743 if x0.Op != OpAMD64MOVWload { 29744 break 29745 } 29746 i0 := x0.AuxInt 29747 s := x0.Aux 29748 _ = x0.Args[1] 29749 p := x0.Args[0] 29750 mem := x0.Args[1] 29751 or := v.Args[1] 29752 if or.Op != OpAMD64ORQ { 29753 break 29754 } 29755 _ = or.Args[1] 29756 y := or.Args[0] 29757 s1 := or.Args[1] 29758 if s1.Op != OpAMD64SHLQconst { 29759 break 29760 } 29761 j1 := s1.AuxInt 29762 r1 := s1.Args[0] 29763 if r1.Op != OpAMD64ROLWconst { 29764 break 29765 } 29766 if r1.AuxInt != 8 { 29767 break 29768 } 29769 x1 := r1.Args[0] 29770 if x1.Op != OpAMD64MOVWload { 29771 break 29772 } 29773 i1 := x1.AuxInt 29774 if x1.Aux != s { 29775 break 29776 } 29777 _ = x1.Args[1] 29778 if p != x1.Args[0] { 29779 break 29780 } 29781 if mem != x1.Args[1] { 29782 break 29783 } 29784 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29785 break 29786 } 29787 b = mergePoint(b, x0, x1) 29788 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29789 v.reset(OpCopy) 29790 v.AddArg(v0) 29791 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29792 v1.AuxInt = j1 29793 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29794 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29795 v3.AuxInt = i0 29796 v3.Aux = s 29797 v3.AddArg(p) 29798 v3.AddArg(mem) 29799 v2.AddArg(v3) 29800 v1.AddArg(v2) 29801 v0.AddArg(v1) 29802 v0.AddArg(y) 29803 return true 29804 } 29805 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29806 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29807 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29808 for { 29809 _ = v.Args[1] 29810 or := v.Args[0] 29811 if or.Op != OpAMD64ORQ { 29812 break 29813 } 29814 _ = or.Args[1] 29815 s1 := or.Args[0] 29816 if s1.Op != OpAMD64SHLQconst { 29817 break 29818 } 29819 j1 := s1.AuxInt 29820 r1 := s1.Args[0] 29821 if r1.Op != OpAMD64ROLWconst { 29822 break 29823 } 29824 if r1.AuxInt != 8 { 29825 break 29826 } 29827 x1 := r1.Args[0] 29828 if x1.Op != OpAMD64MOVWload { 29829 break 29830 } 29831 i1 := x1.AuxInt 29832 s := x1.Aux 29833 _ = x1.Args[1] 29834 p := x1.Args[0] 29835 mem := x1.Args[1] 29836 y := or.Args[1] 29837 s0 := v.Args[1] 29838 if s0.Op != OpAMD64SHLQconst { 29839 break 29840 } 29841 j0 := s0.AuxInt 29842 r0 := s0.Args[0] 29843 if r0.Op != OpAMD64ROLWconst { 29844 break 29845 } 29846 if r0.AuxInt != 8 { 29847 break 29848 } 29849 x0 := r0.Args[0] 29850 if x0.Op != OpAMD64MOVWload { 29851 break 29852 } 29853 i0 := x0.AuxInt 29854 if x0.Aux != s { 29855 break 29856 } 29857 _ = x0.Args[1] 29858 if p != x0.Args[0] { 29859 break 29860 } 29861 if mem != x0.Args[1] { 29862 break 29863 } 29864 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29865 break 29866 } 29867 b = mergePoint(b, x0, x1) 29868 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29869 v.reset(OpCopy) 29870 v.AddArg(v0) 29871 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29872 v1.AuxInt = j1 29873 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29874 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29875 v3.AuxInt = i0 29876 v3.Aux = s 29877 v3.AddArg(p) 29878 v3.AddArg(mem) 29879 v2.AddArg(v3) 29880 v1.AddArg(v2) 29881 v0.AddArg(v1) 29882 v0.AddArg(y) 29883 return true 29884 } 29885 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29886 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29887 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29888 for { 29889 _ = v.Args[1] 29890 or := v.Args[0] 29891 if or.Op != OpAMD64ORQ { 29892 break 29893 } 29894 _ = or.Args[1] 29895 y := or.Args[0] 29896 s1 := or.Args[1] 29897 if s1.Op != OpAMD64SHLQconst { 29898 break 29899 } 29900 j1 := s1.AuxInt 29901 r1 := s1.Args[0] 29902 if r1.Op != OpAMD64ROLWconst { 29903 break 29904 } 29905 if r1.AuxInt != 8 { 29906 break 29907 } 29908 x1 := r1.Args[0] 29909 if x1.Op != OpAMD64MOVWload { 29910 break 29911 } 29912 i1 := x1.AuxInt 29913 s := x1.Aux 29914 _ = x1.Args[1] 29915 p := x1.Args[0] 29916 mem := x1.Args[1] 29917 s0 := v.Args[1] 29918 if s0.Op != OpAMD64SHLQconst { 29919 break 29920 } 29921 j0 := s0.AuxInt 29922 r0 := s0.Args[0] 29923 if r0.Op != OpAMD64ROLWconst { 29924 break 29925 } 29926 if r0.AuxInt != 8 { 29927 break 29928 } 29929 x0 := r0.Args[0] 29930 if x0.Op != OpAMD64MOVWload { 29931 break 29932 } 29933 i0 := x0.AuxInt 29934 if x0.Aux != s { 29935 break 29936 } 29937 _ = x0.Args[1] 29938 if p != x0.Args[0] { 29939 break 29940 } 29941 if mem != x0.Args[1] { 29942 break 29943 } 29944 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29945 break 29946 } 29947 b = mergePoint(b, x0, x1) 29948 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29949 v.reset(OpCopy) 29950 v.AddArg(v0) 29951 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29952 v1.AuxInt = j1 29953 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29954 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29955 v3.AuxInt = i0 29956 v3.Aux = s 29957 v3.AddArg(p) 29958 v3.AddArg(mem) 29959 v2.AddArg(v3) 29960 v1.AddArg(v2) 29961 v0.AddArg(v1) 29962 v0.AddArg(y) 29963 return true 29964 } 29965 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 29966 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29967 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 29968 for { 29969 _ = v.Args[1] 29970 x1 := v.Args[0] 29971 if x1.Op != OpAMD64MOVBloadidx1 { 29972 break 29973 } 29974 i1 := x1.AuxInt 29975 s := x1.Aux 29976 _ = x1.Args[2] 29977 p := x1.Args[0] 29978 idx := x1.Args[1] 29979 mem := x1.Args[2] 29980 sh := v.Args[1] 29981 if sh.Op != OpAMD64SHLQconst { 29982 break 29983 } 29984 if sh.AuxInt != 8 { 29985 break 29986 } 29987 x0 := sh.Args[0] 29988 if x0.Op != OpAMD64MOVBloadidx1 { 29989 break 29990 } 29991 i0 := x0.AuxInt 29992 if x0.Aux != s { 29993 break 29994 } 29995 _ = x0.Args[2] 29996 if p != x0.Args[0] { 29997 break 29998 } 29999 if idx != x0.Args[1] { 30000 break 30001 } 30002 if mem != x0.Args[2] { 30003 break 30004 } 30005 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30006 break 30007 } 30008 b = mergePoint(b, x0, x1) 30009 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30010 v.reset(OpCopy) 30011 v.AddArg(v0) 30012 v0.AuxInt = 8 30013 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30014 v1.AuxInt = i0 30015 v1.Aux = s 30016 v1.AddArg(p) 30017 v1.AddArg(idx) 30018 v1.AddArg(mem) 30019 v0.AddArg(v1) 30020 return true 30021 } 30022 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30023 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30024 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30025 for { 30026 _ = v.Args[1] 30027 x1 := v.Args[0] 30028 if x1.Op != OpAMD64MOVBloadidx1 { 30029 break 30030 } 30031 i1 := x1.AuxInt 30032 s := x1.Aux 30033 _ = x1.Args[2] 30034 idx := x1.Args[0] 30035 p := x1.Args[1] 30036 mem := x1.Args[2] 30037 sh := v.Args[1] 30038 if sh.Op != OpAMD64SHLQconst { 30039 break 30040 } 30041 if sh.AuxInt != 8 { 30042 break 30043 } 30044 x0 := sh.Args[0] 30045 if x0.Op != OpAMD64MOVBloadidx1 { 30046 break 30047 } 30048 i0 := x0.AuxInt 30049 if x0.Aux != s { 30050 break 30051 } 30052 _ = x0.Args[2] 30053 if p != x0.Args[0] { 30054 break 30055 } 30056 if idx != x0.Args[1] { 30057 break 30058 } 30059 if mem != x0.Args[2] { 30060 break 30061 } 30062 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30063 break 30064 } 30065 b = mergePoint(b, x0, x1) 30066 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30067 v.reset(OpCopy) 30068 v.AddArg(v0) 30069 v0.AuxInt = 8 30070 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30071 v1.AuxInt = i0 30072 v1.Aux = s 30073 v1.AddArg(p) 30074 v1.AddArg(idx) 30075 v1.AddArg(mem) 30076 v0.AddArg(v1) 30077 return true 30078 } 30079 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30080 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30081 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30082 for { 30083 _ = v.Args[1] 30084 x1 := v.Args[0] 30085 if x1.Op != OpAMD64MOVBloadidx1 { 30086 break 30087 } 30088 i1 := x1.AuxInt 30089 s := x1.Aux 30090 _ = x1.Args[2] 30091 p := x1.Args[0] 30092 idx := x1.Args[1] 30093 mem := x1.Args[2] 30094 sh := v.Args[1] 30095 if sh.Op != OpAMD64SHLQconst { 30096 break 30097 } 30098 if sh.AuxInt != 8 { 30099 break 30100 } 30101 x0 := sh.Args[0] 30102 if x0.Op != OpAMD64MOVBloadidx1 { 30103 break 30104 } 30105 i0 := x0.AuxInt 30106 if x0.Aux != s { 30107 break 30108 } 30109 _ = x0.Args[2] 30110 if idx != x0.Args[0] { 30111 break 30112 } 30113 if p != x0.Args[1] { 30114 break 30115 } 30116 if mem != x0.Args[2] { 30117 break 30118 } 30119 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30120 break 30121 } 30122 b = mergePoint(b, x0, x1) 30123 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30124 v.reset(OpCopy) 30125 v.AddArg(v0) 30126 v0.AuxInt = 8 30127 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30128 v1.AuxInt = i0 30129 v1.Aux = s 30130 v1.AddArg(p) 30131 v1.AddArg(idx) 30132 v1.AddArg(mem) 30133 v0.AddArg(v1) 30134 return true 30135 } 30136 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30137 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30138 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30139 for { 30140 _ = v.Args[1] 30141 x1 := v.Args[0] 30142 if x1.Op != OpAMD64MOVBloadidx1 { 30143 break 30144 } 30145 i1 := x1.AuxInt 30146 s := x1.Aux 30147 _ = x1.Args[2] 30148 idx := x1.Args[0] 30149 p := x1.Args[1] 30150 mem := x1.Args[2] 30151 sh := v.Args[1] 30152 if sh.Op != OpAMD64SHLQconst { 30153 break 30154 } 30155 if sh.AuxInt != 8 { 30156 break 30157 } 30158 x0 := sh.Args[0] 30159 if x0.Op != OpAMD64MOVBloadidx1 { 30160 break 30161 } 30162 i0 := x0.AuxInt 30163 if x0.Aux != s { 30164 break 30165 } 30166 _ = x0.Args[2] 30167 if idx != x0.Args[0] { 30168 break 30169 } 30170 if p != x0.Args[1] { 30171 break 30172 } 30173 if mem != x0.Args[2] { 30174 break 30175 } 30176 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30177 break 30178 } 30179 b = mergePoint(b, x0, x1) 30180 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30181 v.reset(OpCopy) 30182 v.AddArg(v0) 30183 v0.AuxInt = 8 30184 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30185 v1.AuxInt = i0 30186 v1.Aux = s 30187 v1.AddArg(p) 30188 v1.AddArg(idx) 30189 v1.AddArg(mem) 30190 v0.AddArg(v1) 30191 return true 30192 } 30193 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 30194 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30195 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30196 for { 30197 _ = v.Args[1] 30198 sh := v.Args[0] 30199 if sh.Op != OpAMD64SHLQconst { 30200 break 30201 } 30202 if sh.AuxInt != 8 { 30203 break 30204 } 30205 x0 := sh.Args[0] 30206 if x0.Op != OpAMD64MOVBloadidx1 { 30207 break 30208 } 30209 i0 := x0.AuxInt 30210 s := x0.Aux 30211 _ = x0.Args[2] 30212 p := x0.Args[0] 30213 idx := x0.Args[1] 30214 mem := x0.Args[2] 30215 x1 := v.Args[1] 30216 if x1.Op != OpAMD64MOVBloadidx1 { 30217 break 30218 } 30219 i1 := x1.AuxInt 30220 if x1.Aux != s { 30221 break 30222 } 30223 _ = x1.Args[2] 30224 if p != x1.Args[0] { 30225 break 30226 } 30227 if idx != x1.Args[1] { 30228 break 30229 } 30230 if mem != x1.Args[2] { 30231 break 30232 } 30233 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30234 break 30235 } 30236 b = mergePoint(b, x0, x1) 30237 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30238 v.reset(OpCopy) 30239 v.AddArg(v0) 30240 v0.AuxInt = 8 30241 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30242 v1.AuxInt = i0 30243 v1.Aux = s 30244 v1.AddArg(p) 30245 v1.AddArg(idx) 30246 v1.AddArg(mem) 30247 v0.AddArg(v1) 30248 return true 30249 } 30250 return false 30251 } 30252 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 30253 b := v.Block 30254 _ = b 30255 typ := &b.Func.Config.Types 30256 _ = typ 30257 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 30258 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30259 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30260 for { 30261 _ = v.Args[1] 30262 sh := v.Args[0] 30263 if sh.Op != OpAMD64SHLQconst { 30264 break 30265 } 30266 if sh.AuxInt != 8 { 30267 break 30268 } 30269 x0 := sh.Args[0] 30270 if x0.Op != OpAMD64MOVBloadidx1 { 30271 break 30272 } 30273 i0 := x0.AuxInt 30274 s := x0.Aux 30275 _ = x0.Args[2] 30276 idx := x0.Args[0] 30277 p := x0.Args[1] 30278 mem := x0.Args[2] 30279 x1 := v.Args[1] 30280 if x1.Op != OpAMD64MOVBloadidx1 { 30281 break 30282 } 30283 i1 := x1.AuxInt 30284 if x1.Aux != s { 30285 break 30286 } 30287 _ = x1.Args[2] 30288 if p != x1.Args[0] { 30289 break 30290 } 30291 if idx != x1.Args[1] { 30292 break 30293 } 30294 if mem != x1.Args[2] { 30295 break 30296 } 30297 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30298 break 30299 } 30300 b = mergePoint(b, x0, x1) 30301 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30302 v.reset(OpCopy) 30303 v.AddArg(v0) 30304 v0.AuxInt = 8 30305 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30306 v1.AuxInt = i0 30307 v1.Aux = s 30308 v1.AddArg(p) 30309 v1.AddArg(idx) 30310 v1.AddArg(mem) 30311 v0.AddArg(v1) 30312 return true 30313 } 30314 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 30315 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30316 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30317 for { 30318 _ = v.Args[1] 30319 sh := v.Args[0] 30320 if sh.Op != OpAMD64SHLQconst { 30321 break 30322 } 30323 if sh.AuxInt != 8 { 30324 break 30325 } 30326 x0 := sh.Args[0] 30327 if x0.Op != OpAMD64MOVBloadidx1 { 30328 break 30329 } 30330 i0 := x0.AuxInt 30331 s := x0.Aux 30332 _ = x0.Args[2] 30333 p := x0.Args[0] 30334 idx := x0.Args[1] 30335 mem := x0.Args[2] 30336 x1 := v.Args[1] 30337 if x1.Op != OpAMD64MOVBloadidx1 { 30338 break 30339 } 30340 i1 := x1.AuxInt 30341 if x1.Aux != s { 30342 break 30343 } 30344 _ = x1.Args[2] 30345 if idx != x1.Args[0] { 30346 break 30347 } 30348 if p != x1.Args[1] { 30349 break 30350 } 30351 if mem != x1.Args[2] { 30352 break 30353 } 30354 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30355 break 30356 } 30357 b = mergePoint(b, x0, x1) 30358 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30359 v.reset(OpCopy) 30360 v.AddArg(v0) 30361 v0.AuxInt = 8 30362 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30363 v1.AuxInt = i0 30364 v1.Aux = s 30365 v1.AddArg(p) 30366 v1.AddArg(idx) 30367 v1.AddArg(mem) 30368 v0.AddArg(v1) 30369 return true 30370 } 30371 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 30372 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30373 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30374 for { 30375 _ = v.Args[1] 30376 sh := v.Args[0] 30377 if sh.Op != OpAMD64SHLQconst { 30378 break 30379 } 30380 if sh.AuxInt != 8 { 30381 break 30382 } 30383 x0 := sh.Args[0] 30384 if x0.Op != OpAMD64MOVBloadidx1 { 30385 break 30386 } 30387 i0 := x0.AuxInt 30388 s := x0.Aux 30389 _ = x0.Args[2] 30390 idx := x0.Args[0] 30391 p := x0.Args[1] 30392 mem := x0.Args[2] 30393 x1 := v.Args[1] 30394 if x1.Op != OpAMD64MOVBloadidx1 { 30395 break 30396 } 30397 i1 := x1.AuxInt 30398 if x1.Aux != s { 30399 break 30400 } 30401 _ = x1.Args[2] 30402 if idx != x1.Args[0] { 30403 break 30404 } 30405 if p != x1.Args[1] { 30406 break 30407 } 30408 if mem != x1.Args[2] { 30409 break 30410 } 30411 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30412 break 30413 } 30414 b = mergePoint(b, x0, x1) 30415 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30416 v.reset(OpCopy) 30417 v.AddArg(v0) 30418 v0.AuxInt = 8 30419 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30420 v1.AuxInt = i0 30421 v1.Aux = s 30422 v1.AddArg(p) 30423 v1.AddArg(idx) 30424 v1.AddArg(mem) 30425 v0.AddArg(v1) 30426 return true 30427 } 30428 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30429 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30430 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30431 for { 30432 _ = v.Args[1] 30433 r1 := v.Args[0] 30434 if r1.Op != OpAMD64ROLWconst { 30435 break 30436 } 30437 if r1.AuxInt != 8 { 30438 break 30439 } 30440 x1 := r1.Args[0] 30441 if x1.Op != OpAMD64MOVWloadidx1 { 30442 break 30443 } 30444 i1 := x1.AuxInt 30445 s := x1.Aux 30446 _ = x1.Args[2] 30447 p := x1.Args[0] 30448 idx := x1.Args[1] 30449 mem := x1.Args[2] 30450 sh := v.Args[1] 30451 if sh.Op != OpAMD64SHLQconst { 30452 break 30453 } 30454 if sh.AuxInt != 16 { 30455 break 30456 } 30457 r0 := sh.Args[0] 30458 if r0.Op != OpAMD64ROLWconst { 30459 break 30460 } 30461 if r0.AuxInt != 8 { 30462 break 30463 } 30464 x0 := r0.Args[0] 30465 if x0.Op != OpAMD64MOVWloadidx1 { 30466 break 30467 } 30468 i0 := x0.AuxInt 30469 if x0.Aux != s { 30470 break 30471 } 30472 _ = x0.Args[2] 30473 if p != x0.Args[0] { 30474 break 30475 } 30476 if idx != x0.Args[1] { 30477 break 30478 } 30479 if mem != x0.Args[2] { 30480 break 30481 } 30482 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30483 break 30484 } 30485 b = mergePoint(b, x0, x1) 30486 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30487 v.reset(OpCopy) 30488 v.AddArg(v0) 30489 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30490 v1.AuxInt = i0 30491 v1.Aux = s 30492 v1.AddArg(p) 30493 v1.AddArg(idx) 30494 v1.AddArg(mem) 30495 v0.AddArg(v1) 30496 return true 30497 } 30498 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30499 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30500 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30501 for { 30502 _ = v.Args[1] 30503 r1 := v.Args[0] 30504 if r1.Op != OpAMD64ROLWconst { 30505 break 30506 } 30507 if r1.AuxInt != 8 { 30508 break 30509 } 30510 x1 := r1.Args[0] 30511 if x1.Op != OpAMD64MOVWloadidx1 { 30512 break 30513 } 30514 i1 := x1.AuxInt 30515 s := x1.Aux 30516 _ = x1.Args[2] 30517 idx := x1.Args[0] 30518 p := x1.Args[1] 30519 mem := x1.Args[2] 30520 sh := v.Args[1] 30521 if sh.Op != OpAMD64SHLQconst { 30522 break 30523 } 30524 if sh.AuxInt != 16 { 30525 break 30526 } 30527 r0 := sh.Args[0] 30528 if r0.Op != OpAMD64ROLWconst { 30529 break 30530 } 30531 if r0.AuxInt != 8 { 30532 break 30533 } 30534 x0 := r0.Args[0] 30535 if x0.Op != OpAMD64MOVWloadidx1 { 30536 break 30537 } 30538 i0 := x0.AuxInt 30539 if x0.Aux != s { 30540 break 30541 } 30542 _ = x0.Args[2] 30543 if p != x0.Args[0] { 30544 break 30545 } 30546 if idx != x0.Args[1] { 30547 break 30548 } 30549 if mem != x0.Args[2] { 30550 break 30551 } 30552 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30553 break 30554 } 30555 b = mergePoint(b, x0, x1) 30556 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30557 v.reset(OpCopy) 30558 v.AddArg(v0) 30559 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30560 v1.AuxInt = i0 30561 v1.Aux = s 30562 v1.AddArg(p) 30563 v1.AddArg(idx) 30564 v1.AddArg(mem) 30565 v0.AddArg(v1) 30566 return true 30567 } 30568 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30569 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30570 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30571 for { 30572 _ = v.Args[1] 30573 r1 := v.Args[0] 30574 if r1.Op != OpAMD64ROLWconst { 30575 break 30576 } 30577 if r1.AuxInt != 8 { 30578 break 30579 } 30580 x1 := r1.Args[0] 30581 if x1.Op != OpAMD64MOVWloadidx1 { 30582 break 30583 } 30584 i1 := x1.AuxInt 30585 s := x1.Aux 30586 _ = x1.Args[2] 30587 p := x1.Args[0] 30588 idx := x1.Args[1] 30589 mem := x1.Args[2] 30590 sh := v.Args[1] 30591 if sh.Op != OpAMD64SHLQconst { 30592 break 30593 } 30594 if sh.AuxInt != 16 { 30595 break 30596 } 30597 r0 := sh.Args[0] 30598 if r0.Op != OpAMD64ROLWconst { 30599 break 30600 } 30601 if r0.AuxInt != 8 { 30602 break 30603 } 30604 x0 := r0.Args[0] 30605 if x0.Op != OpAMD64MOVWloadidx1 { 30606 break 30607 } 30608 i0 := x0.AuxInt 30609 if x0.Aux != s { 30610 break 30611 } 30612 _ = x0.Args[2] 30613 if idx != x0.Args[0] { 30614 break 30615 } 30616 if p != x0.Args[1] { 30617 break 30618 } 30619 if mem != x0.Args[2] { 30620 break 30621 } 30622 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30623 break 30624 } 30625 b = mergePoint(b, x0, x1) 30626 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30627 v.reset(OpCopy) 30628 v.AddArg(v0) 30629 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30630 v1.AuxInt = i0 30631 v1.Aux = s 30632 v1.AddArg(p) 30633 v1.AddArg(idx) 30634 v1.AddArg(mem) 30635 v0.AddArg(v1) 30636 return true 30637 } 30638 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30639 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30640 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30641 for { 30642 _ = v.Args[1] 30643 r1 := v.Args[0] 30644 if r1.Op != OpAMD64ROLWconst { 30645 break 30646 } 30647 if r1.AuxInt != 8 { 30648 break 30649 } 30650 x1 := r1.Args[0] 30651 if x1.Op != OpAMD64MOVWloadidx1 { 30652 break 30653 } 30654 i1 := x1.AuxInt 30655 s := x1.Aux 30656 _ = x1.Args[2] 30657 idx := x1.Args[0] 30658 p := x1.Args[1] 30659 mem := x1.Args[2] 30660 sh := v.Args[1] 30661 if sh.Op != OpAMD64SHLQconst { 30662 break 30663 } 30664 if sh.AuxInt != 16 { 30665 break 30666 } 30667 r0 := sh.Args[0] 30668 if r0.Op != OpAMD64ROLWconst { 30669 break 30670 } 30671 if r0.AuxInt != 8 { 30672 break 30673 } 30674 x0 := r0.Args[0] 30675 if x0.Op != OpAMD64MOVWloadidx1 { 30676 break 30677 } 30678 i0 := x0.AuxInt 30679 if x0.Aux != s { 30680 break 30681 } 30682 _ = x0.Args[2] 30683 if idx != x0.Args[0] { 30684 break 30685 } 30686 if p != x0.Args[1] { 30687 break 30688 } 30689 if mem != x0.Args[2] { 30690 break 30691 } 30692 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30693 break 30694 } 30695 b = mergePoint(b, x0, x1) 30696 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30697 v.reset(OpCopy) 30698 v.AddArg(v0) 30699 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30700 v1.AuxInt = i0 30701 v1.Aux = s 30702 v1.AddArg(p) 30703 v1.AddArg(idx) 30704 v1.AddArg(mem) 30705 v0.AddArg(v1) 30706 return true 30707 } 30708 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30709 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30710 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30711 for { 30712 _ = v.Args[1] 30713 sh := v.Args[0] 30714 if sh.Op != OpAMD64SHLQconst { 30715 break 30716 } 30717 if sh.AuxInt != 16 { 30718 break 30719 } 30720 r0 := sh.Args[0] 30721 if r0.Op != OpAMD64ROLWconst { 30722 break 30723 } 30724 if r0.AuxInt != 8 { 30725 break 30726 } 30727 x0 := r0.Args[0] 30728 if x0.Op != OpAMD64MOVWloadidx1 { 30729 break 30730 } 30731 i0 := x0.AuxInt 30732 s := x0.Aux 30733 _ = x0.Args[2] 30734 p := x0.Args[0] 30735 idx := x0.Args[1] 30736 mem := x0.Args[2] 30737 r1 := v.Args[1] 30738 if r1.Op != OpAMD64ROLWconst { 30739 break 30740 } 30741 if r1.AuxInt != 8 { 30742 break 30743 } 30744 x1 := r1.Args[0] 30745 if x1.Op != OpAMD64MOVWloadidx1 { 30746 break 30747 } 30748 i1 := x1.AuxInt 30749 if x1.Aux != s { 30750 break 30751 } 30752 _ = x1.Args[2] 30753 if p != x1.Args[0] { 30754 break 30755 } 30756 if idx != x1.Args[1] { 30757 break 30758 } 30759 if mem != x1.Args[2] { 30760 break 30761 } 30762 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30763 break 30764 } 30765 b = mergePoint(b, x0, x1) 30766 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30767 v.reset(OpCopy) 30768 v.AddArg(v0) 30769 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30770 v1.AuxInt = i0 30771 v1.Aux = s 30772 v1.AddArg(p) 30773 v1.AddArg(idx) 30774 v1.AddArg(mem) 30775 v0.AddArg(v1) 30776 return true 30777 } 30778 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30779 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30780 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30781 for { 30782 _ = v.Args[1] 30783 sh := v.Args[0] 30784 if sh.Op != OpAMD64SHLQconst { 30785 break 30786 } 30787 if sh.AuxInt != 16 { 30788 break 30789 } 30790 r0 := sh.Args[0] 30791 if r0.Op != OpAMD64ROLWconst { 30792 break 30793 } 30794 if r0.AuxInt != 8 { 30795 break 30796 } 30797 x0 := r0.Args[0] 30798 if x0.Op != OpAMD64MOVWloadidx1 { 30799 break 30800 } 30801 i0 := x0.AuxInt 30802 s := x0.Aux 30803 _ = x0.Args[2] 30804 idx := x0.Args[0] 30805 p := x0.Args[1] 30806 mem := x0.Args[2] 30807 r1 := v.Args[1] 30808 if r1.Op != OpAMD64ROLWconst { 30809 break 30810 } 30811 if r1.AuxInt != 8 { 30812 break 30813 } 30814 x1 := r1.Args[0] 30815 if x1.Op != OpAMD64MOVWloadidx1 { 30816 break 30817 } 30818 i1 := x1.AuxInt 30819 if x1.Aux != s { 30820 break 30821 } 30822 _ = x1.Args[2] 30823 if p != x1.Args[0] { 30824 break 30825 } 30826 if idx != x1.Args[1] { 30827 break 30828 } 30829 if mem != x1.Args[2] { 30830 break 30831 } 30832 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30833 break 30834 } 30835 b = mergePoint(b, x0, x1) 30836 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30837 v.reset(OpCopy) 30838 v.AddArg(v0) 30839 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30840 v1.AuxInt = i0 30841 v1.Aux = s 30842 v1.AddArg(p) 30843 v1.AddArg(idx) 30844 v1.AddArg(mem) 30845 v0.AddArg(v1) 30846 return true 30847 } 30848 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30849 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30850 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30851 for { 30852 _ = v.Args[1] 30853 sh := v.Args[0] 30854 if sh.Op != OpAMD64SHLQconst { 30855 break 30856 } 30857 if sh.AuxInt != 16 { 30858 break 30859 } 30860 r0 := sh.Args[0] 30861 if r0.Op != OpAMD64ROLWconst { 30862 break 30863 } 30864 if r0.AuxInt != 8 { 30865 break 30866 } 30867 x0 := r0.Args[0] 30868 if x0.Op != OpAMD64MOVWloadidx1 { 30869 break 30870 } 30871 i0 := x0.AuxInt 30872 s := x0.Aux 30873 _ = x0.Args[2] 30874 p := x0.Args[0] 30875 idx := x0.Args[1] 30876 mem := x0.Args[2] 30877 r1 := v.Args[1] 30878 if r1.Op != OpAMD64ROLWconst { 30879 break 30880 } 30881 if r1.AuxInt != 8 { 30882 break 30883 } 30884 x1 := r1.Args[0] 30885 if x1.Op != OpAMD64MOVWloadidx1 { 30886 break 30887 } 30888 i1 := x1.AuxInt 30889 if x1.Aux != s { 30890 break 30891 } 30892 _ = x1.Args[2] 30893 if idx != x1.Args[0] { 30894 break 30895 } 30896 if p != x1.Args[1] { 30897 break 30898 } 30899 if mem != x1.Args[2] { 30900 break 30901 } 30902 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30903 break 30904 } 30905 b = mergePoint(b, x0, x1) 30906 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30907 v.reset(OpCopy) 30908 v.AddArg(v0) 30909 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30910 v1.AuxInt = i0 30911 v1.Aux = s 30912 v1.AddArg(p) 30913 v1.AddArg(idx) 30914 v1.AddArg(mem) 30915 v0.AddArg(v1) 30916 return true 30917 } 30918 return false 30919 } 30920 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 30921 b := v.Block 30922 _ = b 30923 typ := &b.Func.Config.Types 30924 _ = typ 30925 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30926 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30927 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30928 for { 30929 _ = v.Args[1] 30930 sh := v.Args[0] 30931 if sh.Op != OpAMD64SHLQconst { 30932 break 30933 } 30934 if sh.AuxInt != 16 { 30935 break 30936 } 30937 r0 := sh.Args[0] 30938 if r0.Op != OpAMD64ROLWconst { 30939 break 30940 } 30941 if r0.AuxInt != 8 { 30942 break 30943 } 30944 x0 := r0.Args[0] 30945 if x0.Op != OpAMD64MOVWloadidx1 { 30946 break 30947 } 30948 i0 := x0.AuxInt 30949 s := x0.Aux 30950 _ = x0.Args[2] 30951 idx := x0.Args[0] 30952 p := x0.Args[1] 30953 mem := x0.Args[2] 30954 r1 := v.Args[1] 30955 if r1.Op != OpAMD64ROLWconst { 30956 break 30957 } 30958 if r1.AuxInt != 8 { 30959 break 30960 } 30961 x1 := r1.Args[0] 30962 if x1.Op != OpAMD64MOVWloadidx1 { 30963 break 30964 } 30965 i1 := x1.AuxInt 30966 if x1.Aux != s { 30967 break 30968 } 30969 _ = x1.Args[2] 30970 if idx != x1.Args[0] { 30971 break 30972 } 30973 if p != x1.Args[1] { 30974 break 30975 } 30976 if mem != x1.Args[2] { 30977 break 30978 } 30979 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30980 break 30981 } 30982 b = mergePoint(b, x0, x1) 30983 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30984 v.reset(OpCopy) 30985 v.AddArg(v0) 30986 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30987 v1.AuxInt = i0 30988 v1.Aux = s 30989 v1.AddArg(p) 30990 v1.AddArg(idx) 30991 v1.AddArg(mem) 30992 v0.AddArg(v1) 30993 return true 30994 } 30995 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 30996 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30997 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 30998 for { 30999 _ = v.Args[1] 31000 r1 := v.Args[0] 31001 if r1.Op != OpAMD64BSWAPL { 31002 break 31003 } 31004 x1 := r1.Args[0] 31005 if x1.Op != OpAMD64MOVLloadidx1 { 31006 break 31007 } 31008 i1 := x1.AuxInt 31009 s := x1.Aux 31010 _ = x1.Args[2] 31011 p := x1.Args[0] 31012 idx := x1.Args[1] 31013 mem := x1.Args[2] 31014 sh := v.Args[1] 31015 if sh.Op != OpAMD64SHLQconst { 31016 break 31017 } 31018 if sh.AuxInt != 32 { 31019 break 31020 } 31021 r0 := sh.Args[0] 31022 if r0.Op != OpAMD64BSWAPL { 31023 break 31024 } 31025 x0 := r0.Args[0] 31026 if x0.Op != OpAMD64MOVLloadidx1 { 31027 break 31028 } 31029 i0 := x0.AuxInt 31030 if x0.Aux != s { 31031 break 31032 } 31033 _ = x0.Args[2] 31034 if p != x0.Args[0] { 31035 break 31036 } 31037 if idx != x0.Args[1] { 31038 break 31039 } 31040 if mem != x0.Args[2] { 31041 break 31042 } 31043 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31044 break 31045 } 31046 b = mergePoint(b, x0, x1) 31047 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31048 v.reset(OpCopy) 31049 v.AddArg(v0) 31050 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31051 v1.AuxInt = i0 31052 v1.Aux = s 31053 v1.AddArg(p) 31054 v1.AddArg(idx) 31055 v1.AddArg(mem) 31056 v0.AddArg(v1) 31057 return true 31058 } 31059 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 31060 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31061 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31062 for { 31063 _ = v.Args[1] 31064 r1 := v.Args[0] 31065 if r1.Op != OpAMD64BSWAPL { 31066 break 31067 } 31068 x1 := r1.Args[0] 31069 if x1.Op != OpAMD64MOVLloadidx1 { 31070 break 31071 } 31072 i1 := x1.AuxInt 31073 s := x1.Aux 31074 _ = x1.Args[2] 31075 idx := x1.Args[0] 31076 p := x1.Args[1] 31077 mem := x1.Args[2] 31078 sh := v.Args[1] 31079 if sh.Op != OpAMD64SHLQconst { 31080 break 31081 } 31082 if sh.AuxInt != 32 { 31083 break 31084 } 31085 r0 := sh.Args[0] 31086 if r0.Op != OpAMD64BSWAPL { 31087 break 31088 } 31089 x0 := r0.Args[0] 31090 if x0.Op != OpAMD64MOVLloadidx1 { 31091 break 31092 } 31093 i0 := x0.AuxInt 31094 if x0.Aux != s { 31095 break 31096 } 31097 _ = x0.Args[2] 31098 if p != x0.Args[0] { 31099 break 31100 } 31101 if idx != x0.Args[1] { 31102 break 31103 } 31104 if mem != x0.Args[2] { 31105 break 31106 } 31107 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31108 break 31109 } 31110 b = mergePoint(b, x0, x1) 31111 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31112 v.reset(OpCopy) 31113 v.AddArg(v0) 31114 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31115 v1.AuxInt = i0 31116 v1.Aux = s 31117 v1.AddArg(p) 31118 v1.AddArg(idx) 31119 v1.AddArg(mem) 31120 v0.AddArg(v1) 31121 return true 31122 } 31123 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 31124 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31125 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31126 for { 31127 _ = v.Args[1] 31128 r1 := v.Args[0] 31129 if r1.Op != OpAMD64BSWAPL { 31130 break 31131 } 31132 x1 := r1.Args[0] 31133 if x1.Op != OpAMD64MOVLloadidx1 { 31134 break 31135 } 31136 i1 := x1.AuxInt 31137 s := x1.Aux 31138 _ = x1.Args[2] 31139 p := x1.Args[0] 31140 idx := x1.Args[1] 31141 mem := x1.Args[2] 31142 sh := v.Args[1] 31143 if sh.Op != OpAMD64SHLQconst { 31144 break 31145 } 31146 if sh.AuxInt != 32 { 31147 break 31148 } 31149 r0 := sh.Args[0] 31150 if r0.Op != OpAMD64BSWAPL { 31151 break 31152 } 31153 x0 := r0.Args[0] 31154 if x0.Op != OpAMD64MOVLloadidx1 { 31155 break 31156 } 31157 i0 := x0.AuxInt 31158 if x0.Aux != s { 31159 break 31160 } 31161 _ = x0.Args[2] 31162 if idx != x0.Args[0] { 31163 break 31164 } 31165 if p != x0.Args[1] { 31166 break 31167 } 31168 if mem != x0.Args[2] { 31169 break 31170 } 31171 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31172 break 31173 } 31174 b = mergePoint(b, x0, x1) 31175 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31176 v.reset(OpCopy) 31177 v.AddArg(v0) 31178 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31179 v1.AuxInt = i0 31180 v1.Aux = s 31181 v1.AddArg(p) 31182 v1.AddArg(idx) 31183 v1.AddArg(mem) 31184 v0.AddArg(v1) 31185 return true 31186 } 31187 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 31188 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31189 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31190 for { 31191 _ = v.Args[1] 31192 r1 := v.Args[0] 31193 if r1.Op != OpAMD64BSWAPL { 31194 break 31195 } 31196 x1 := r1.Args[0] 31197 if x1.Op != OpAMD64MOVLloadidx1 { 31198 break 31199 } 31200 i1 := x1.AuxInt 31201 s := x1.Aux 31202 _ = x1.Args[2] 31203 idx := x1.Args[0] 31204 p := x1.Args[1] 31205 mem := x1.Args[2] 31206 sh := v.Args[1] 31207 if sh.Op != OpAMD64SHLQconst { 31208 break 31209 } 31210 if sh.AuxInt != 32 { 31211 break 31212 } 31213 r0 := sh.Args[0] 31214 if r0.Op != OpAMD64BSWAPL { 31215 break 31216 } 31217 x0 := r0.Args[0] 31218 if x0.Op != OpAMD64MOVLloadidx1 { 31219 break 31220 } 31221 i0 := x0.AuxInt 31222 if x0.Aux != s { 31223 break 31224 } 31225 _ = x0.Args[2] 31226 if idx != x0.Args[0] { 31227 break 31228 } 31229 if p != x0.Args[1] { 31230 break 31231 } 31232 if mem != x0.Args[2] { 31233 break 31234 } 31235 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31236 break 31237 } 31238 b = mergePoint(b, x0, x1) 31239 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31240 v.reset(OpCopy) 31241 v.AddArg(v0) 31242 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31243 v1.AuxInt = i0 31244 v1.Aux = s 31245 v1.AddArg(p) 31246 v1.AddArg(idx) 31247 v1.AddArg(mem) 31248 v0.AddArg(v1) 31249 return true 31250 } 31251 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 31252 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31253 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31254 for { 31255 _ = v.Args[1] 31256 sh := v.Args[0] 31257 if sh.Op != OpAMD64SHLQconst { 31258 break 31259 } 31260 if sh.AuxInt != 32 { 31261 break 31262 } 31263 r0 := sh.Args[0] 31264 if r0.Op != OpAMD64BSWAPL { 31265 break 31266 } 31267 x0 := r0.Args[0] 31268 if x0.Op != OpAMD64MOVLloadidx1 { 31269 break 31270 } 31271 i0 := x0.AuxInt 31272 s := x0.Aux 31273 _ = x0.Args[2] 31274 p := x0.Args[0] 31275 idx := x0.Args[1] 31276 mem := x0.Args[2] 31277 r1 := v.Args[1] 31278 if r1.Op != OpAMD64BSWAPL { 31279 break 31280 } 31281 x1 := r1.Args[0] 31282 if x1.Op != OpAMD64MOVLloadidx1 { 31283 break 31284 } 31285 i1 := x1.AuxInt 31286 if x1.Aux != s { 31287 break 31288 } 31289 _ = x1.Args[2] 31290 if p != x1.Args[0] { 31291 break 31292 } 31293 if idx != x1.Args[1] { 31294 break 31295 } 31296 if mem != x1.Args[2] { 31297 break 31298 } 31299 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31300 break 31301 } 31302 b = mergePoint(b, x0, x1) 31303 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31304 v.reset(OpCopy) 31305 v.AddArg(v0) 31306 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31307 v1.AuxInt = i0 31308 v1.Aux = s 31309 v1.AddArg(p) 31310 v1.AddArg(idx) 31311 v1.AddArg(mem) 31312 v0.AddArg(v1) 31313 return true 31314 } 31315 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 31316 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31317 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31318 for { 31319 _ = v.Args[1] 31320 sh := v.Args[0] 31321 if sh.Op != OpAMD64SHLQconst { 31322 break 31323 } 31324 if sh.AuxInt != 32 { 31325 break 31326 } 31327 r0 := sh.Args[0] 31328 if r0.Op != OpAMD64BSWAPL { 31329 break 31330 } 31331 x0 := r0.Args[0] 31332 if x0.Op != OpAMD64MOVLloadidx1 { 31333 break 31334 } 31335 i0 := x0.AuxInt 31336 s := x0.Aux 31337 _ = x0.Args[2] 31338 idx := x0.Args[0] 31339 p := x0.Args[1] 31340 mem := x0.Args[2] 31341 r1 := v.Args[1] 31342 if r1.Op != OpAMD64BSWAPL { 31343 break 31344 } 31345 x1 := r1.Args[0] 31346 if x1.Op != OpAMD64MOVLloadidx1 { 31347 break 31348 } 31349 i1 := x1.AuxInt 31350 if x1.Aux != s { 31351 break 31352 } 31353 _ = x1.Args[2] 31354 if p != x1.Args[0] { 31355 break 31356 } 31357 if idx != x1.Args[1] { 31358 break 31359 } 31360 if mem != x1.Args[2] { 31361 break 31362 } 31363 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31364 break 31365 } 31366 b = mergePoint(b, x0, x1) 31367 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31368 v.reset(OpCopy) 31369 v.AddArg(v0) 31370 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31371 v1.AuxInt = i0 31372 v1.Aux = s 31373 v1.AddArg(p) 31374 v1.AddArg(idx) 31375 v1.AddArg(mem) 31376 v0.AddArg(v1) 31377 return true 31378 } 31379 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 31380 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31381 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31382 for { 31383 _ = v.Args[1] 31384 sh := v.Args[0] 31385 if sh.Op != OpAMD64SHLQconst { 31386 break 31387 } 31388 if sh.AuxInt != 32 { 31389 break 31390 } 31391 r0 := sh.Args[0] 31392 if r0.Op != OpAMD64BSWAPL { 31393 break 31394 } 31395 x0 := r0.Args[0] 31396 if x0.Op != OpAMD64MOVLloadidx1 { 31397 break 31398 } 31399 i0 := x0.AuxInt 31400 s := x0.Aux 31401 _ = x0.Args[2] 31402 p := x0.Args[0] 31403 idx := x0.Args[1] 31404 mem := x0.Args[2] 31405 r1 := v.Args[1] 31406 if r1.Op != OpAMD64BSWAPL { 31407 break 31408 } 31409 x1 := r1.Args[0] 31410 if x1.Op != OpAMD64MOVLloadidx1 { 31411 break 31412 } 31413 i1 := x1.AuxInt 31414 if x1.Aux != s { 31415 break 31416 } 31417 _ = x1.Args[2] 31418 if idx != x1.Args[0] { 31419 break 31420 } 31421 if p != x1.Args[1] { 31422 break 31423 } 31424 if mem != x1.Args[2] { 31425 break 31426 } 31427 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31428 break 31429 } 31430 b = mergePoint(b, x0, x1) 31431 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31432 v.reset(OpCopy) 31433 v.AddArg(v0) 31434 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31435 v1.AuxInt = i0 31436 v1.Aux = s 31437 v1.AddArg(p) 31438 v1.AddArg(idx) 31439 v1.AddArg(mem) 31440 v0.AddArg(v1) 31441 return true 31442 } 31443 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 31444 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31445 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31446 for { 31447 _ = v.Args[1] 31448 sh := v.Args[0] 31449 if sh.Op != OpAMD64SHLQconst { 31450 break 31451 } 31452 if sh.AuxInt != 32 { 31453 break 31454 } 31455 r0 := sh.Args[0] 31456 if r0.Op != OpAMD64BSWAPL { 31457 break 31458 } 31459 x0 := r0.Args[0] 31460 if x0.Op != OpAMD64MOVLloadidx1 { 31461 break 31462 } 31463 i0 := x0.AuxInt 31464 s := x0.Aux 31465 _ = x0.Args[2] 31466 idx := x0.Args[0] 31467 p := x0.Args[1] 31468 mem := x0.Args[2] 31469 r1 := v.Args[1] 31470 if r1.Op != OpAMD64BSWAPL { 31471 break 31472 } 31473 x1 := r1.Args[0] 31474 if x1.Op != OpAMD64MOVLloadidx1 { 31475 break 31476 } 31477 i1 := x1.AuxInt 31478 if x1.Aux != s { 31479 break 31480 } 31481 _ = x1.Args[2] 31482 if idx != x1.Args[0] { 31483 break 31484 } 31485 if p != x1.Args[1] { 31486 break 31487 } 31488 if mem != x1.Args[2] { 31489 break 31490 } 31491 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31492 break 31493 } 31494 b = mergePoint(b, x0, x1) 31495 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31496 v.reset(OpCopy) 31497 v.AddArg(v0) 31498 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31499 v1.AuxInt = i0 31500 v1.Aux = s 31501 v1.AddArg(p) 31502 v1.AddArg(idx) 31503 v1.AddArg(mem) 31504 v0.AddArg(v1) 31505 return true 31506 } 31507 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 31508 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31509 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31510 for { 31511 _ = v.Args[1] 31512 s0 := v.Args[0] 31513 if s0.Op != OpAMD64SHLQconst { 31514 break 31515 } 31516 j0 := s0.AuxInt 31517 x0 := s0.Args[0] 31518 if x0.Op != OpAMD64MOVBloadidx1 { 31519 break 31520 } 31521 i0 := x0.AuxInt 31522 s := x0.Aux 31523 _ = x0.Args[2] 31524 p := x0.Args[0] 31525 idx := x0.Args[1] 31526 mem := x0.Args[2] 31527 or := v.Args[1] 31528 if or.Op != OpAMD64ORQ { 31529 break 31530 } 31531 _ = or.Args[1] 31532 s1 := or.Args[0] 31533 if s1.Op != OpAMD64SHLQconst { 31534 break 31535 } 31536 j1 := s1.AuxInt 31537 x1 := s1.Args[0] 31538 if x1.Op != OpAMD64MOVBloadidx1 { 31539 break 31540 } 31541 i1 := x1.AuxInt 31542 if x1.Aux != s { 31543 break 31544 } 31545 _ = x1.Args[2] 31546 if p != x1.Args[0] { 31547 break 31548 } 31549 if idx != x1.Args[1] { 31550 break 31551 } 31552 if mem != x1.Args[2] { 31553 break 31554 } 31555 y := or.Args[1] 31556 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31557 break 31558 } 31559 b = mergePoint(b, x0, x1) 31560 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31561 v.reset(OpCopy) 31562 v.AddArg(v0) 31563 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31564 v1.AuxInt = j1 31565 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31566 v2.AuxInt = 8 31567 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31568 v3.AuxInt = i0 31569 v3.Aux = s 31570 v3.AddArg(p) 31571 v3.AddArg(idx) 31572 v3.AddArg(mem) 31573 v2.AddArg(v3) 31574 v1.AddArg(v2) 31575 v0.AddArg(v1) 31576 v0.AddArg(y) 31577 return true 31578 } 31579 return false 31580 } 31581 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 31582 b := v.Block 31583 _ = b 31584 typ := &b.Func.Config.Types 31585 _ = typ 31586 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 31587 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31588 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31589 for { 31590 _ = v.Args[1] 31591 s0 := v.Args[0] 31592 if s0.Op != OpAMD64SHLQconst { 31593 break 31594 } 31595 j0 := s0.AuxInt 31596 x0 := s0.Args[0] 31597 if x0.Op != OpAMD64MOVBloadidx1 { 31598 break 31599 } 31600 i0 := x0.AuxInt 31601 s := x0.Aux 31602 _ = x0.Args[2] 31603 idx := x0.Args[0] 31604 p := x0.Args[1] 31605 mem := x0.Args[2] 31606 or := v.Args[1] 31607 if or.Op != OpAMD64ORQ { 31608 break 31609 } 31610 _ = or.Args[1] 31611 s1 := or.Args[0] 31612 if s1.Op != OpAMD64SHLQconst { 31613 break 31614 } 31615 j1 := s1.AuxInt 31616 x1 := s1.Args[0] 31617 if x1.Op != OpAMD64MOVBloadidx1 { 31618 break 31619 } 31620 i1 := x1.AuxInt 31621 if x1.Aux != s { 31622 break 31623 } 31624 _ = x1.Args[2] 31625 if p != x1.Args[0] { 31626 break 31627 } 31628 if idx != x1.Args[1] { 31629 break 31630 } 31631 if mem != x1.Args[2] { 31632 break 31633 } 31634 y := or.Args[1] 31635 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31636 break 31637 } 31638 b = mergePoint(b, x0, x1) 31639 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31640 v.reset(OpCopy) 31641 v.AddArg(v0) 31642 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31643 v1.AuxInt = j1 31644 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31645 v2.AuxInt = 8 31646 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31647 v3.AuxInt = i0 31648 v3.Aux = s 31649 v3.AddArg(p) 31650 v3.AddArg(idx) 31651 v3.AddArg(mem) 31652 v2.AddArg(v3) 31653 v1.AddArg(v2) 31654 v0.AddArg(v1) 31655 v0.AddArg(y) 31656 return true 31657 } 31658 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 31659 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31660 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31661 for { 31662 _ = v.Args[1] 31663 s0 := v.Args[0] 31664 if s0.Op != OpAMD64SHLQconst { 31665 break 31666 } 31667 j0 := s0.AuxInt 31668 x0 := s0.Args[0] 31669 if x0.Op != OpAMD64MOVBloadidx1 { 31670 break 31671 } 31672 i0 := x0.AuxInt 31673 s := x0.Aux 31674 _ = x0.Args[2] 31675 p := x0.Args[0] 31676 idx := x0.Args[1] 31677 mem := x0.Args[2] 31678 or := v.Args[1] 31679 if or.Op != OpAMD64ORQ { 31680 break 31681 } 31682 _ = or.Args[1] 31683 s1 := or.Args[0] 31684 if s1.Op != OpAMD64SHLQconst { 31685 break 31686 } 31687 j1 := s1.AuxInt 31688 x1 := s1.Args[0] 31689 if x1.Op != OpAMD64MOVBloadidx1 { 31690 break 31691 } 31692 i1 := x1.AuxInt 31693 if x1.Aux != s { 31694 break 31695 } 31696 _ = x1.Args[2] 31697 if idx != x1.Args[0] { 31698 break 31699 } 31700 if p != x1.Args[1] { 31701 break 31702 } 31703 if mem != x1.Args[2] { 31704 break 31705 } 31706 y := or.Args[1] 31707 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31708 break 31709 } 31710 b = mergePoint(b, x0, x1) 31711 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31712 v.reset(OpCopy) 31713 v.AddArg(v0) 31714 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31715 v1.AuxInt = j1 31716 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31717 v2.AuxInt = 8 31718 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31719 v3.AuxInt = i0 31720 v3.Aux = s 31721 v3.AddArg(p) 31722 v3.AddArg(idx) 31723 v3.AddArg(mem) 31724 v2.AddArg(v3) 31725 v1.AddArg(v2) 31726 v0.AddArg(v1) 31727 v0.AddArg(y) 31728 return true 31729 } 31730 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 31731 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31732 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31733 for { 31734 _ = v.Args[1] 31735 s0 := v.Args[0] 31736 if s0.Op != OpAMD64SHLQconst { 31737 break 31738 } 31739 j0 := s0.AuxInt 31740 x0 := s0.Args[0] 31741 if x0.Op != OpAMD64MOVBloadidx1 { 31742 break 31743 } 31744 i0 := x0.AuxInt 31745 s := x0.Aux 31746 _ = x0.Args[2] 31747 idx := x0.Args[0] 31748 p := x0.Args[1] 31749 mem := x0.Args[2] 31750 or := v.Args[1] 31751 if or.Op != OpAMD64ORQ { 31752 break 31753 } 31754 _ = or.Args[1] 31755 s1 := or.Args[0] 31756 if s1.Op != OpAMD64SHLQconst { 31757 break 31758 } 31759 j1 := s1.AuxInt 31760 x1 := s1.Args[0] 31761 if x1.Op != OpAMD64MOVBloadidx1 { 31762 break 31763 } 31764 i1 := x1.AuxInt 31765 if x1.Aux != s { 31766 break 31767 } 31768 _ = x1.Args[2] 31769 if idx != x1.Args[0] { 31770 break 31771 } 31772 if p != x1.Args[1] { 31773 break 31774 } 31775 if mem != x1.Args[2] { 31776 break 31777 } 31778 y := or.Args[1] 31779 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31780 break 31781 } 31782 b = mergePoint(b, x0, x1) 31783 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31784 v.reset(OpCopy) 31785 v.AddArg(v0) 31786 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31787 v1.AuxInt = j1 31788 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31789 v2.AuxInt = 8 31790 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31791 v3.AuxInt = i0 31792 v3.Aux = s 31793 v3.AddArg(p) 31794 v3.AddArg(idx) 31795 v3.AddArg(mem) 31796 v2.AddArg(v3) 31797 v1.AddArg(v2) 31798 v0.AddArg(v1) 31799 v0.AddArg(y) 31800 return true 31801 } 31802 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 31803 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31804 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31805 for { 31806 _ = v.Args[1] 31807 s0 := v.Args[0] 31808 if s0.Op != OpAMD64SHLQconst { 31809 break 31810 } 31811 j0 := s0.AuxInt 31812 x0 := s0.Args[0] 31813 if x0.Op != OpAMD64MOVBloadidx1 { 31814 break 31815 } 31816 i0 := x0.AuxInt 31817 s := x0.Aux 31818 _ = x0.Args[2] 31819 p := x0.Args[0] 31820 idx := x0.Args[1] 31821 mem := x0.Args[2] 31822 or := v.Args[1] 31823 if or.Op != OpAMD64ORQ { 31824 break 31825 } 31826 _ = or.Args[1] 31827 y := or.Args[0] 31828 s1 := or.Args[1] 31829 if s1.Op != OpAMD64SHLQconst { 31830 break 31831 } 31832 j1 := s1.AuxInt 31833 x1 := s1.Args[0] 31834 if x1.Op != OpAMD64MOVBloadidx1 { 31835 break 31836 } 31837 i1 := x1.AuxInt 31838 if x1.Aux != s { 31839 break 31840 } 31841 _ = x1.Args[2] 31842 if p != x1.Args[0] { 31843 break 31844 } 31845 if idx != x1.Args[1] { 31846 break 31847 } 31848 if mem != x1.Args[2] { 31849 break 31850 } 31851 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31852 break 31853 } 31854 b = mergePoint(b, x0, x1) 31855 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31856 v.reset(OpCopy) 31857 v.AddArg(v0) 31858 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31859 v1.AuxInt = j1 31860 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31861 v2.AuxInt = 8 31862 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31863 v3.AuxInt = i0 31864 v3.Aux = s 31865 v3.AddArg(p) 31866 v3.AddArg(idx) 31867 v3.AddArg(mem) 31868 v2.AddArg(v3) 31869 v1.AddArg(v2) 31870 v0.AddArg(v1) 31871 v0.AddArg(y) 31872 return true 31873 } 31874 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 31875 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31876 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31877 for { 31878 _ = v.Args[1] 31879 s0 := v.Args[0] 31880 if s0.Op != OpAMD64SHLQconst { 31881 break 31882 } 31883 j0 := s0.AuxInt 31884 x0 := s0.Args[0] 31885 if x0.Op != OpAMD64MOVBloadidx1 { 31886 break 31887 } 31888 i0 := x0.AuxInt 31889 s := x0.Aux 31890 _ = x0.Args[2] 31891 idx := x0.Args[0] 31892 p := x0.Args[1] 31893 mem := x0.Args[2] 31894 or := v.Args[1] 31895 if or.Op != OpAMD64ORQ { 31896 break 31897 } 31898 _ = or.Args[1] 31899 y := or.Args[0] 31900 s1 := or.Args[1] 31901 if s1.Op != OpAMD64SHLQconst { 31902 break 31903 } 31904 j1 := s1.AuxInt 31905 x1 := s1.Args[0] 31906 if x1.Op != OpAMD64MOVBloadidx1 { 31907 break 31908 } 31909 i1 := x1.AuxInt 31910 if x1.Aux != s { 31911 break 31912 } 31913 _ = x1.Args[2] 31914 if p != x1.Args[0] { 31915 break 31916 } 31917 if idx != x1.Args[1] { 31918 break 31919 } 31920 if mem != x1.Args[2] { 31921 break 31922 } 31923 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31924 break 31925 } 31926 b = mergePoint(b, x0, x1) 31927 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31928 v.reset(OpCopy) 31929 v.AddArg(v0) 31930 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31931 v1.AuxInt = j1 31932 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31933 v2.AuxInt = 8 31934 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31935 v3.AuxInt = i0 31936 v3.Aux = s 31937 v3.AddArg(p) 31938 v3.AddArg(idx) 31939 v3.AddArg(mem) 31940 v2.AddArg(v3) 31941 v1.AddArg(v2) 31942 v0.AddArg(v1) 31943 v0.AddArg(y) 31944 return true 31945 } 31946 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 31947 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31948 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31949 for { 31950 _ = v.Args[1] 31951 s0 := v.Args[0] 31952 if s0.Op != OpAMD64SHLQconst { 31953 break 31954 } 31955 j0 := s0.AuxInt 31956 x0 := s0.Args[0] 31957 if x0.Op != OpAMD64MOVBloadidx1 { 31958 break 31959 } 31960 i0 := x0.AuxInt 31961 s := x0.Aux 31962 _ = x0.Args[2] 31963 p := x0.Args[0] 31964 idx := x0.Args[1] 31965 mem := x0.Args[2] 31966 or := v.Args[1] 31967 if or.Op != OpAMD64ORQ { 31968 break 31969 } 31970 _ = or.Args[1] 31971 y := or.Args[0] 31972 s1 := or.Args[1] 31973 if s1.Op != OpAMD64SHLQconst { 31974 break 31975 } 31976 j1 := s1.AuxInt 31977 x1 := s1.Args[0] 31978 if x1.Op != OpAMD64MOVBloadidx1 { 31979 break 31980 } 31981 i1 := x1.AuxInt 31982 if x1.Aux != s { 31983 break 31984 } 31985 _ = x1.Args[2] 31986 if idx != x1.Args[0] { 31987 break 31988 } 31989 if p != x1.Args[1] { 31990 break 31991 } 31992 if mem != x1.Args[2] { 31993 break 31994 } 31995 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31996 break 31997 } 31998 b = mergePoint(b, x0, x1) 31999 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32000 v.reset(OpCopy) 32001 v.AddArg(v0) 32002 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32003 v1.AuxInt = j1 32004 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32005 v2.AuxInt = 8 32006 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32007 v3.AuxInt = i0 32008 v3.Aux = s 32009 v3.AddArg(p) 32010 v3.AddArg(idx) 32011 v3.AddArg(mem) 32012 v2.AddArg(v3) 32013 v1.AddArg(v2) 32014 v0.AddArg(v1) 32015 v0.AddArg(y) 32016 return true 32017 } 32018 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 32019 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32020 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32021 for { 32022 _ = v.Args[1] 32023 s0 := v.Args[0] 32024 if s0.Op != OpAMD64SHLQconst { 32025 break 32026 } 32027 j0 := s0.AuxInt 32028 x0 := s0.Args[0] 32029 if x0.Op != OpAMD64MOVBloadidx1 { 32030 break 32031 } 32032 i0 := x0.AuxInt 32033 s := x0.Aux 32034 _ = x0.Args[2] 32035 idx := x0.Args[0] 32036 p := x0.Args[1] 32037 mem := x0.Args[2] 32038 or := v.Args[1] 32039 if or.Op != OpAMD64ORQ { 32040 break 32041 } 32042 _ = or.Args[1] 32043 y := or.Args[0] 32044 s1 := or.Args[1] 32045 if s1.Op != OpAMD64SHLQconst { 32046 break 32047 } 32048 j1 := s1.AuxInt 32049 x1 := s1.Args[0] 32050 if x1.Op != OpAMD64MOVBloadidx1 { 32051 break 32052 } 32053 i1 := x1.AuxInt 32054 if x1.Aux != s { 32055 break 32056 } 32057 _ = x1.Args[2] 32058 if idx != x1.Args[0] { 32059 break 32060 } 32061 if p != x1.Args[1] { 32062 break 32063 } 32064 if mem != x1.Args[2] { 32065 break 32066 } 32067 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32068 break 32069 } 32070 b = mergePoint(b, x0, x1) 32071 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32072 v.reset(OpCopy) 32073 v.AddArg(v0) 32074 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32075 v1.AuxInt = j1 32076 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32077 v2.AuxInt = 8 32078 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32079 v3.AuxInt = i0 32080 v3.Aux = s 32081 v3.AddArg(p) 32082 v3.AddArg(idx) 32083 v3.AddArg(mem) 32084 v2.AddArg(v3) 32085 v1.AddArg(v2) 32086 v0.AddArg(v1) 32087 v0.AddArg(y) 32088 return true 32089 } 32090 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32091 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32092 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32093 for { 32094 _ = v.Args[1] 32095 or := v.Args[0] 32096 if or.Op != OpAMD64ORQ { 32097 break 32098 } 32099 _ = or.Args[1] 32100 s1 := or.Args[0] 32101 if s1.Op != OpAMD64SHLQconst { 32102 break 32103 } 32104 j1 := s1.AuxInt 32105 x1 := s1.Args[0] 32106 if x1.Op != OpAMD64MOVBloadidx1 { 32107 break 32108 } 32109 i1 := x1.AuxInt 32110 s := x1.Aux 32111 _ = x1.Args[2] 32112 p := x1.Args[0] 32113 idx := x1.Args[1] 32114 mem := x1.Args[2] 32115 y := or.Args[1] 32116 s0 := v.Args[1] 32117 if s0.Op != OpAMD64SHLQconst { 32118 break 32119 } 32120 j0 := s0.AuxInt 32121 x0 := s0.Args[0] 32122 if x0.Op != OpAMD64MOVBloadidx1 { 32123 break 32124 } 32125 i0 := x0.AuxInt 32126 if x0.Aux != s { 32127 break 32128 } 32129 _ = x0.Args[2] 32130 if p != x0.Args[0] { 32131 break 32132 } 32133 if idx != x0.Args[1] { 32134 break 32135 } 32136 if mem != x0.Args[2] { 32137 break 32138 } 32139 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32140 break 32141 } 32142 b = mergePoint(b, x0, x1) 32143 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32144 v.reset(OpCopy) 32145 v.AddArg(v0) 32146 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32147 v1.AuxInt = j1 32148 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32149 v2.AuxInt = 8 32150 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32151 v3.AuxInt = i0 32152 v3.Aux = s 32153 v3.AddArg(p) 32154 v3.AddArg(idx) 32155 v3.AddArg(mem) 32156 v2.AddArg(v3) 32157 v1.AddArg(v2) 32158 v0.AddArg(v1) 32159 v0.AddArg(y) 32160 return true 32161 } 32162 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32163 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32164 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32165 for { 32166 _ = v.Args[1] 32167 or := v.Args[0] 32168 if or.Op != OpAMD64ORQ { 32169 break 32170 } 32171 _ = or.Args[1] 32172 s1 := or.Args[0] 32173 if s1.Op != OpAMD64SHLQconst { 32174 break 32175 } 32176 j1 := s1.AuxInt 32177 x1 := s1.Args[0] 32178 if x1.Op != OpAMD64MOVBloadidx1 { 32179 break 32180 } 32181 i1 := x1.AuxInt 32182 s := x1.Aux 32183 _ = x1.Args[2] 32184 idx := x1.Args[0] 32185 p := x1.Args[1] 32186 mem := x1.Args[2] 32187 y := or.Args[1] 32188 s0 := v.Args[1] 32189 if s0.Op != OpAMD64SHLQconst { 32190 break 32191 } 32192 j0 := s0.AuxInt 32193 x0 := s0.Args[0] 32194 if x0.Op != OpAMD64MOVBloadidx1 { 32195 break 32196 } 32197 i0 := x0.AuxInt 32198 if x0.Aux != s { 32199 break 32200 } 32201 _ = x0.Args[2] 32202 if p != x0.Args[0] { 32203 break 32204 } 32205 if idx != x0.Args[1] { 32206 break 32207 } 32208 if mem != x0.Args[2] { 32209 break 32210 } 32211 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32212 break 32213 } 32214 b = mergePoint(b, x0, x1) 32215 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32216 v.reset(OpCopy) 32217 v.AddArg(v0) 32218 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32219 v1.AuxInt = j1 32220 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32221 v2.AuxInt = 8 32222 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32223 v3.AuxInt = i0 32224 v3.Aux = s 32225 v3.AddArg(p) 32226 v3.AddArg(idx) 32227 v3.AddArg(mem) 32228 v2.AddArg(v3) 32229 v1.AddArg(v2) 32230 v0.AddArg(v1) 32231 v0.AddArg(y) 32232 return true 32233 } 32234 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32235 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32236 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32237 for { 32238 _ = v.Args[1] 32239 or := v.Args[0] 32240 if or.Op != OpAMD64ORQ { 32241 break 32242 } 32243 _ = or.Args[1] 32244 y := or.Args[0] 32245 s1 := or.Args[1] 32246 if s1.Op != OpAMD64SHLQconst { 32247 break 32248 } 32249 j1 := s1.AuxInt 32250 x1 := s1.Args[0] 32251 if x1.Op != OpAMD64MOVBloadidx1 { 32252 break 32253 } 32254 i1 := x1.AuxInt 32255 s := x1.Aux 32256 _ = x1.Args[2] 32257 p := x1.Args[0] 32258 idx := x1.Args[1] 32259 mem := x1.Args[2] 32260 s0 := v.Args[1] 32261 if s0.Op != OpAMD64SHLQconst { 32262 break 32263 } 32264 j0 := s0.AuxInt 32265 x0 := s0.Args[0] 32266 if x0.Op != OpAMD64MOVBloadidx1 { 32267 break 32268 } 32269 i0 := x0.AuxInt 32270 if x0.Aux != s { 32271 break 32272 } 32273 _ = x0.Args[2] 32274 if p != x0.Args[0] { 32275 break 32276 } 32277 if idx != x0.Args[1] { 32278 break 32279 } 32280 if mem != x0.Args[2] { 32281 break 32282 } 32283 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32284 break 32285 } 32286 b = mergePoint(b, x0, x1) 32287 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32288 v.reset(OpCopy) 32289 v.AddArg(v0) 32290 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32291 v1.AuxInt = j1 32292 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32293 v2.AuxInt = 8 32294 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32295 v3.AuxInt = i0 32296 v3.Aux = s 32297 v3.AddArg(p) 32298 v3.AddArg(idx) 32299 v3.AddArg(mem) 32300 v2.AddArg(v3) 32301 v1.AddArg(v2) 32302 v0.AddArg(v1) 32303 v0.AddArg(y) 32304 return true 32305 } 32306 return false 32307 } 32308 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 32309 b := v.Block 32310 _ = b 32311 typ := &b.Func.Config.Types 32312 _ = typ 32313 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32314 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32315 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32316 for { 32317 _ = v.Args[1] 32318 or := v.Args[0] 32319 if or.Op != OpAMD64ORQ { 32320 break 32321 } 32322 _ = or.Args[1] 32323 y := or.Args[0] 32324 s1 := or.Args[1] 32325 if s1.Op != OpAMD64SHLQconst { 32326 break 32327 } 32328 j1 := s1.AuxInt 32329 x1 := s1.Args[0] 32330 if x1.Op != OpAMD64MOVBloadidx1 { 32331 break 32332 } 32333 i1 := x1.AuxInt 32334 s := x1.Aux 32335 _ = x1.Args[2] 32336 idx := x1.Args[0] 32337 p := x1.Args[1] 32338 mem := x1.Args[2] 32339 s0 := v.Args[1] 32340 if s0.Op != OpAMD64SHLQconst { 32341 break 32342 } 32343 j0 := s0.AuxInt 32344 x0 := s0.Args[0] 32345 if x0.Op != OpAMD64MOVBloadidx1 { 32346 break 32347 } 32348 i0 := x0.AuxInt 32349 if x0.Aux != s { 32350 break 32351 } 32352 _ = x0.Args[2] 32353 if p != x0.Args[0] { 32354 break 32355 } 32356 if idx != x0.Args[1] { 32357 break 32358 } 32359 if mem != x0.Args[2] { 32360 break 32361 } 32362 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32363 break 32364 } 32365 b = mergePoint(b, x0, x1) 32366 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32367 v.reset(OpCopy) 32368 v.AddArg(v0) 32369 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32370 v1.AuxInt = j1 32371 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32372 v2.AuxInt = 8 32373 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32374 v3.AuxInt = i0 32375 v3.Aux = s 32376 v3.AddArg(p) 32377 v3.AddArg(idx) 32378 v3.AddArg(mem) 32379 v2.AddArg(v3) 32380 v1.AddArg(v2) 32381 v0.AddArg(v1) 32382 v0.AddArg(y) 32383 return true 32384 } 32385 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32386 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32387 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32388 for { 32389 _ = v.Args[1] 32390 or := v.Args[0] 32391 if or.Op != OpAMD64ORQ { 32392 break 32393 } 32394 _ = or.Args[1] 32395 s1 := or.Args[0] 32396 if s1.Op != OpAMD64SHLQconst { 32397 break 32398 } 32399 j1 := s1.AuxInt 32400 x1 := s1.Args[0] 32401 if x1.Op != OpAMD64MOVBloadidx1 { 32402 break 32403 } 32404 i1 := x1.AuxInt 32405 s := x1.Aux 32406 _ = x1.Args[2] 32407 p := x1.Args[0] 32408 idx := x1.Args[1] 32409 mem := x1.Args[2] 32410 y := or.Args[1] 32411 s0 := v.Args[1] 32412 if s0.Op != OpAMD64SHLQconst { 32413 break 32414 } 32415 j0 := s0.AuxInt 32416 x0 := s0.Args[0] 32417 if x0.Op != OpAMD64MOVBloadidx1 { 32418 break 32419 } 32420 i0 := x0.AuxInt 32421 if x0.Aux != s { 32422 break 32423 } 32424 _ = x0.Args[2] 32425 if idx != x0.Args[0] { 32426 break 32427 } 32428 if p != x0.Args[1] { 32429 break 32430 } 32431 if mem != x0.Args[2] { 32432 break 32433 } 32434 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32435 break 32436 } 32437 b = mergePoint(b, x0, x1) 32438 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32439 v.reset(OpCopy) 32440 v.AddArg(v0) 32441 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32442 v1.AuxInt = j1 32443 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32444 v2.AuxInt = 8 32445 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32446 v3.AuxInt = i0 32447 v3.Aux = s 32448 v3.AddArg(p) 32449 v3.AddArg(idx) 32450 v3.AddArg(mem) 32451 v2.AddArg(v3) 32452 v1.AddArg(v2) 32453 v0.AddArg(v1) 32454 v0.AddArg(y) 32455 return true 32456 } 32457 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32458 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32459 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32460 for { 32461 _ = v.Args[1] 32462 or := v.Args[0] 32463 if or.Op != OpAMD64ORQ { 32464 break 32465 } 32466 _ = or.Args[1] 32467 s1 := or.Args[0] 32468 if s1.Op != OpAMD64SHLQconst { 32469 break 32470 } 32471 j1 := s1.AuxInt 32472 x1 := s1.Args[0] 32473 if x1.Op != OpAMD64MOVBloadidx1 { 32474 break 32475 } 32476 i1 := x1.AuxInt 32477 s := x1.Aux 32478 _ = x1.Args[2] 32479 idx := x1.Args[0] 32480 p := x1.Args[1] 32481 mem := x1.Args[2] 32482 y := or.Args[1] 32483 s0 := v.Args[1] 32484 if s0.Op != OpAMD64SHLQconst { 32485 break 32486 } 32487 j0 := s0.AuxInt 32488 x0 := s0.Args[0] 32489 if x0.Op != OpAMD64MOVBloadidx1 { 32490 break 32491 } 32492 i0 := x0.AuxInt 32493 if x0.Aux != s { 32494 break 32495 } 32496 _ = x0.Args[2] 32497 if idx != x0.Args[0] { 32498 break 32499 } 32500 if p != x0.Args[1] { 32501 break 32502 } 32503 if mem != x0.Args[2] { 32504 break 32505 } 32506 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32507 break 32508 } 32509 b = mergePoint(b, x0, x1) 32510 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32511 v.reset(OpCopy) 32512 v.AddArg(v0) 32513 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32514 v1.AuxInt = j1 32515 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32516 v2.AuxInt = 8 32517 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32518 v3.AuxInt = i0 32519 v3.Aux = s 32520 v3.AddArg(p) 32521 v3.AddArg(idx) 32522 v3.AddArg(mem) 32523 v2.AddArg(v3) 32524 v1.AddArg(v2) 32525 v0.AddArg(v1) 32526 v0.AddArg(y) 32527 return true 32528 } 32529 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32530 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32531 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32532 for { 32533 _ = v.Args[1] 32534 or := v.Args[0] 32535 if or.Op != OpAMD64ORQ { 32536 break 32537 } 32538 _ = or.Args[1] 32539 y := or.Args[0] 32540 s1 := or.Args[1] 32541 if s1.Op != OpAMD64SHLQconst { 32542 break 32543 } 32544 j1 := s1.AuxInt 32545 x1 := s1.Args[0] 32546 if x1.Op != OpAMD64MOVBloadidx1 { 32547 break 32548 } 32549 i1 := x1.AuxInt 32550 s := x1.Aux 32551 _ = x1.Args[2] 32552 p := x1.Args[0] 32553 idx := x1.Args[1] 32554 mem := x1.Args[2] 32555 s0 := v.Args[1] 32556 if s0.Op != OpAMD64SHLQconst { 32557 break 32558 } 32559 j0 := s0.AuxInt 32560 x0 := s0.Args[0] 32561 if x0.Op != OpAMD64MOVBloadidx1 { 32562 break 32563 } 32564 i0 := x0.AuxInt 32565 if x0.Aux != s { 32566 break 32567 } 32568 _ = x0.Args[2] 32569 if idx != x0.Args[0] { 32570 break 32571 } 32572 if p != x0.Args[1] { 32573 break 32574 } 32575 if mem != x0.Args[2] { 32576 break 32577 } 32578 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32579 break 32580 } 32581 b = mergePoint(b, x0, x1) 32582 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32583 v.reset(OpCopy) 32584 v.AddArg(v0) 32585 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32586 v1.AuxInt = j1 32587 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32588 v2.AuxInt = 8 32589 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32590 v3.AuxInt = i0 32591 v3.Aux = s 32592 v3.AddArg(p) 32593 v3.AddArg(idx) 32594 v3.AddArg(mem) 32595 v2.AddArg(v3) 32596 v1.AddArg(v2) 32597 v0.AddArg(v1) 32598 v0.AddArg(y) 32599 return true 32600 } 32601 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32602 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32603 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32604 for { 32605 _ = v.Args[1] 32606 or := v.Args[0] 32607 if or.Op != OpAMD64ORQ { 32608 break 32609 } 32610 _ = or.Args[1] 32611 y := or.Args[0] 32612 s1 := or.Args[1] 32613 if s1.Op != OpAMD64SHLQconst { 32614 break 32615 } 32616 j1 := s1.AuxInt 32617 x1 := s1.Args[0] 32618 if x1.Op != OpAMD64MOVBloadidx1 { 32619 break 32620 } 32621 i1 := x1.AuxInt 32622 s := x1.Aux 32623 _ = x1.Args[2] 32624 idx := x1.Args[0] 32625 p := x1.Args[1] 32626 mem := x1.Args[2] 32627 s0 := v.Args[1] 32628 if s0.Op != OpAMD64SHLQconst { 32629 break 32630 } 32631 j0 := s0.AuxInt 32632 x0 := s0.Args[0] 32633 if x0.Op != OpAMD64MOVBloadidx1 { 32634 break 32635 } 32636 i0 := x0.AuxInt 32637 if x0.Aux != s { 32638 break 32639 } 32640 _ = x0.Args[2] 32641 if idx != x0.Args[0] { 32642 break 32643 } 32644 if p != x0.Args[1] { 32645 break 32646 } 32647 if mem != x0.Args[2] { 32648 break 32649 } 32650 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32651 break 32652 } 32653 b = mergePoint(b, x0, x1) 32654 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32655 v.reset(OpCopy) 32656 v.AddArg(v0) 32657 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32658 v1.AuxInt = j1 32659 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32660 v2.AuxInt = 8 32661 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32662 v3.AuxInt = i0 32663 v3.Aux = s 32664 v3.AddArg(p) 32665 v3.AddArg(idx) 32666 v3.AddArg(mem) 32667 v2.AddArg(v3) 32668 v1.AddArg(v2) 32669 v0.AddArg(v1) 32670 v0.AddArg(y) 32671 return true 32672 } 32673 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 32674 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32675 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32676 for { 32677 _ = v.Args[1] 32678 s0 := v.Args[0] 32679 if s0.Op != OpAMD64SHLQconst { 32680 break 32681 } 32682 j0 := s0.AuxInt 32683 r0 := s0.Args[0] 32684 if r0.Op != OpAMD64ROLWconst { 32685 break 32686 } 32687 if r0.AuxInt != 8 { 32688 break 32689 } 32690 x0 := r0.Args[0] 32691 if x0.Op != OpAMD64MOVWloadidx1 { 32692 break 32693 } 32694 i0 := x0.AuxInt 32695 s := x0.Aux 32696 _ = x0.Args[2] 32697 p := x0.Args[0] 32698 idx := x0.Args[1] 32699 mem := x0.Args[2] 32700 or := v.Args[1] 32701 if or.Op != OpAMD64ORQ { 32702 break 32703 } 32704 _ = or.Args[1] 32705 s1 := or.Args[0] 32706 if s1.Op != OpAMD64SHLQconst { 32707 break 32708 } 32709 j1 := s1.AuxInt 32710 r1 := s1.Args[0] 32711 if r1.Op != OpAMD64ROLWconst { 32712 break 32713 } 32714 if r1.AuxInt != 8 { 32715 break 32716 } 32717 x1 := r1.Args[0] 32718 if x1.Op != OpAMD64MOVWloadidx1 { 32719 break 32720 } 32721 i1 := x1.AuxInt 32722 if x1.Aux != s { 32723 break 32724 } 32725 _ = x1.Args[2] 32726 if p != x1.Args[0] { 32727 break 32728 } 32729 if idx != x1.Args[1] { 32730 break 32731 } 32732 if mem != x1.Args[2] { 32733 break 32734 } 32735 y := or.Args[1] 32736 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32737 break 32738 } 32739 b = mergePoint(b, x0, x1) 32740 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32741 v.reset(OpCopy) 32742 v.AddArg(v0) 32743 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32744 v1.AuxInt = j1 32745 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32746 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32747 v3.AuxInt = i0 32748 v3.Aux = s 32749 v3.AddArg(p) 32750 v3.AddArg(idx) 32751 v3.AddArg(mem) 32752 v2.AddArg(v3) 32753 v1.AddArg(v2) 32754 v0.AddArg(v1) 32755 v0.AddArg(y) 32756 return true 32757 } 32758 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 32759 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32760 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32761 for { 32762 _ = v.Args[1] 32763 s0 := v.Args[0] 32764 if s0.Op != OpAMD64SHLQconst { 32765 break 32766 } 32767 j0 := s0.AuxInt 32768 r0 := s0.Args[0] 32769 if r0.Op != OpAMD64ROLWconst { 32770 break 32771 } 32772 if r0.AuxInt != 8 { 32773 break 32774 } 32775 x0 := r0.Args[0] 32776 if x0.Op != OpAMD64MOVWloadidx1 { 32777 break 32778 } 32779 i0 := x0.AuxInt 32780 s := x0.Aux 32781 _ = x0.Args[2] 32782 idx := x0.Args[0] 32783 p := x0.Args[1] 32784 mem := x0.Args[2] 32785 or := v.Args[1] 32786 if or.Op != OpAMD64ORQ { 32787 break 32788 } 32789 _ = or.Args[1] 32790 s1 := or.Args[0] 32791 if s1.Op != OpAMD64SHLQconst { 32792 break 32793 } 32794 j1 := s1.AuxInt 32795 r1 := s1.Args[0] 32796 if r1.Op != OpAMD64ROLWconst { 32797 break 32798 } 32799 if r1.AuxInt != 8 { 32800 break 32801 } 32802 x1 := r1.Args[0] 32803 if x1.Op != OpAMD64MOVWloadidx1 { 32804 break 32805 } 32806 i1 := x1.AuxInt 32807 if x1.Aux != s { 32808 break 32809 } 32810 _ = x1.Args[2] 32811 if p != x1.Args[0] { 32812 break 32813 } 32814 if idx != x1.Args[1] { 32815 break 32816 } 32817 if mem != x1.Args[2] { 32818 break 32819 } 32820 y := or.Args[1] 32821 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32822 break 32823 } 32824 b = mergePoint(b, x0, x1) 32825 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32826 v.reset(OpCopy) 32827 v.AddArg(v0) 32828 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32829 v1.AuxInt = j1 32830 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32831 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32832 v3.AuxInt = i0 32833 v3.Aux = s 32834 v3.AddArg(p) 32835 v3.AddArg(idx) 32836 v3.AddArg(mem) 32837 v2.AddArg(v3) 32838 v1.AddArg(v2) 32839 v0.AddArg(v1) 32840 v0.AddArg(y) 32841 return true 32842 } 32843 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 32844 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32845 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32846 for { 32847 _ = v.Args[1] 32848 s0 := v.Args[0] 32849 if s0.Op != OpAMD64SHLQconst { 32850 break 32851 } 32852 j0 := s0.AuxInt 32853 r0 := s0.Args[0] 32854 if r0.Op != OpAMD64ROLWconst { 32855 break 32856 } 32857 if r0.AuxInt != 8 { 32858 break 32859 } 32860 x0 := r0.Args[0] 32861 if x0.Op != OpAMD64MOVWloadidx1 { 32862 break 32863 } 32864 i0 := x0.AuxInt 32865 s := x0.Aux 32866 _ = x0.Args[2] 32867 p := x0.Args[0] 32868 idx := x0.Args[1] 32869 mem := x0.Args[2] 32870 or := v.Args[1] 32871 if or.Op != OpAMD64ORQ { 32872 break 32873 } 32874 _ = or.Args[1] 32875 s1 := or.Args[0] 32876 if s1.Op != OpAMD64SHLQconst { 32877 break 32878 } 32879 j1 := s1.AuxInt 32880 r1 := s1.Args[0] 32881 if r1.Op != OpAMD64ROLWconst { 32882 break 32883 } 32884 if r1.AuxInt != 8 { 32885 break 32886 } 32887 x1 := r1.Args[0] 32888 if x1.Op != OpAMD64MOVWloadidx1 { 32889 break 32890 } 32891 i1 := x1.AuxInt 32892 if x1.Aux != s { 32893 break 32894 } 32895 _ = x1.Args[2] 32896 if idx != x1.Args[0] { 32897 break 32898 } 32899 if p != x1.Args[1] { 32900 break 32901 } 32902 if mem != x1.Args[2] { 32903 break 32904 } 32905 y := or.Args[1] 32906 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32907 break 32908 } 32909 b = mergePoint(b, x0, x1) 32910 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32911 v.reset(OpCopy) 32912 v.AddArg(v0) 32913 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32914 v1.AuxInt = j1 32915 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32916 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32917 v3.AuxInt = i0 32918 v3.Aux = s 32919 v3.AddArg(p) 32920 v3.AddArg(idx) 32921 v3.AddArg(mem) 32922 v2.AddArg(v3) 32923 v1.AddArg(v2) 32924 v0.AddArg(v1) 32925 v0.AddArg(y) 32926 return true 32927 } 32928 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 32929 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32930 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32931 for { 32932 _ = v.Args[1] 32933 s0 := v.Args[0] 32934 if s0.Op != OpAMD64SHLQconst { 32935 break 32936 } 32937 j0 := s0.AuxInt 32938 r0 := s0.Args[0] 32939 if r0.Op != OpAMD64ROLWconst { 32940 break 32941 } 32942 if r0.AuxInt != 8 { 32943 break 32944 } 32945 x0 := r0.Args[0] 32946 if x0.Op != OpAMD64MOVWloadidx1 { 32947 break 32948 } 32949 i0 := x0.AuxInt 32950 s := x0.Aux 32951 _ = x0.Args[2] 32952 idx := x0.Args[0] 32953 p := x0.Args[1] 32954 mem := x0.Args[2] 32955 or := v.Args[1] 32956 if or.Op != OpAMD64ORQ { 32957 break 32958 } 32959 _ = or.Args[1] 32960 s1 := or.Args[0] 32961 if s1.Op != OpAMD64SHLQconst { 32962 break 32963 } 32964 j1 := s1.AuxInt 32965 r1 := s1.Args[0] 32966 if r1.Op != OpAMD64ROLWconst { 32967 break 32968 } 32969 if r1.AuxInt != 8 { 32970 break 32971 } 32972 x1 := r1.Args[0] 32973 if x1.Op != OpAMD64MOVWloadidx1 { 32974 break 32975 } 32976 i1 := x1.AuxInt 32977 if x1.Aux != s { 32978 break 32979 } 32980 _ = x1.Args[2] 32981 if idx != x1.Args[0] { 32982 break 32983 } 32984 if p != x1.Args[1] { 32985 break 32986 } 32987 if mem != x1.Args[2] { 32988 break 32989 } 32990 y := or.Args[1] 32991 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32992 break 32993 } 32994 b = mergePoint(b, x0, x1) 32995 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32996 v.reset(OpCopy) 32997 v.AddArg(v0) 32998 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32999 v1.AuxInt = j1 33000 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33001 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33002 v3.AuxInt = i0 33003 v3.Aux = s 33004 v3.AddArg(p) 33005 v3.AddArg(idx) 33006 v3.AddArg(mem) 33007 v2.AddArg(v3) 33008 v1.AddArg(v2) 33009 v0.AddArg(v1) 33010 v0.AddArg(y) 33011 return true 33012 } 33013 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 33014 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33015 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33016 for { 33017 _ = v.Args[1] 33018 s0 := v.Args[0] 33019 if s0.Op != OpAMD64SHLQconst { 33020 break 33021 } 33022 j0 := s0.AuxInt 33023 r0 := s0.Args[0] 33024 if r0.Op != OpAMD64ROLWconst { 33025 break 33026 } 33027 if r0.AuxInt != 8 { 33028 break 33029 } 33030 x0 := r0.Args[0] 33031 if x0.Op != OpAMD64MOVWloadidx1 { 33032 break 33033 } 33034 i0 := x0.AuxInt 33035 s := x0.Aux 33036 _ = x0.Args[2] 33037 p := x0.Args[0] 33038 idx := x0.Args[1] 33039 mem := x0.Args[2] 33040 or := v.Args[1] 33041 if or.Op != OpAMD64ORQ { 33042 break 33043 } 33044 _ = or.Args[1] 33045 y := or.Args[0] 33046 s1 := or.Args[1] 33047 if s1.Op != OpAMD64SHLQconst { 33048 break 33049 } 33050 j1 := s1.AuxInt 33051 r1 := s1.Args[0] 33052 if r1.Op != OpAMD64ROLWconst { 33053 break 33054 } 33055 if r1.AuxInt != 8 { 33056 break 33057 } 33058 x1 := r1.Args[0] 33059 if x1.Op != OpAMD64MOVWloadidx1 { 33060 break 33061 } 33062 i1 := x1.AuxInt 33063 if x1.Aux != s { 33064 break 33065 } 33066 _ = x1.Args[2] 33067 if p != x1.Args[0] { 33068 break 33069 } 33070 if idx != x1.Args[1] { 33071 break 33072 } 33073 if mem != x1.Args[2] { 33074 break 33075 } 33076 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33077 break 33078 } 33079 b = mergePoint(b, x0, x1) 33080 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33081 v.reset(OpCopy) 33082 v.AddArg(v0) 33083 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33084 v1.AuxInt = j1 33085 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33086 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33087 v3.AuxInt = i0 33088 v3.Aux = s 33089 v3.AddArg(p) 33090 v3.AddArg(idx) 33091 v3.AddArg(mem) 33092 v2.AddArg(v3) 33093 v1.AddArg(v2) 33094 v0.AddArg(v1) 33095 v0.AddArg(y) 33096 return true 33097 } 33098 return false 33099 } 33100 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 33101 b := v.Block 33102 _ = b 33103 typ := &b.Func.Config.Types 33104 _ = typ 33105 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 33106 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33107 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33108 for { 33109 _ = v.Args[1] 33110 s0 := v.Args[0] 33111 if s0.Op != OpAMD64SHLQconst { 33112 break 33113 } 33114 j0 := s0.AuxInt 33115 r0 := s0.Args[0] 33116 if r0.Op != OpAMD64ROLWconst { 33117 break 33118 } 33119 if r0.AuxInt != 8 { 33120 break 33121 } 33122 x0 := r0.Args[0] 33123 if x0.Op != OpAMD64MOVWloadidx1 { 33124 break 33125 } 33126 i0 := x0.AuxInt 33127 s := x0.Aux 33128 _ = x0.Args[2] 33129 idx := x0.Args[0] 33130 p := x0.Args[1] 33131 mem := x0.Args[2] 33132 or := v.Args[1] 33133 if or.Op != OpAMD64ORQ { 33134 break 33135 } 33136 _ = or.Args[1] 33137 y := or.Args[0] 33138 s1 := or.Args[1] 33139 if s1.Op != OpAMD64SHLQconst { 33140 break 33141 } 33142 j1 := s1.AuxInt 33143 r1 := s1.Args[0] 33144 if r1.Op != OpAMD64ROLWconst { 33145 break 33146 } 33147 if r1.AuxInt != 8 { 33148 break 33149 } 33150 x1 := r1.Args[0] 33151 if x1.Op != OpAMD64MOVWloadidx1 { 33152 break 33153 } 33154 i1 := x1.AuxInt 33155 if x1.Aux != s { 33156 break 33157 } 33158 _ = x1.Args[2] 33159 if p != x1.Args[0] { 33160 break 33161 } 33162 if idx != x1.Args[1] { 33163 break 33164 } 33165 if mem != x1.Args[2] { 33166 break 33167 } 33168 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33169 break 33170 } 33171 b = mergePoint(b, x0, x1) 33172 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33173 v.reset(OpCopy) 33174 v.AddArg(v0) 33175 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33176 v1.AuxInt = j1 33177 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33178 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33179 v3.AuxInt = i0 33180 v3.Aux = s 33181 v3.AddArg(p) 33182 v3.AddArg(idx) 33183 v3.AddArg(mem) 33184 v2.AddArg(v3) 33185 v1.AddArg(v2) 33186 v0.AddArg(v1) 33187 v0.AddArg(y) 33188 return true 33189 } 33190 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 33191 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33192 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33193 for { 33194 _ = v.Args[1] 33195 s0 := v.Args[0] 33196 if s0.Op != OpAMD64SHLQconst { 33197 break 33198 } 33199 j0 := s0.AuxInt 33200 r0 := s0.Args[0] 33201 if r0.Op != OpAMD64ROLWconst { 33202 break 33203 } 33204 if r0.AuxInt != 8 { 33205 break 33206 } 33207 x0 := r0.Args[0] 33208 if x0.Op != OpAMD64MOVWloadidx1 { 33209 break 33210 } 33211 i0 := x0.AuxInt 33212 s := x0.Aux 33213 _ = x0.Args[2] 33214 p := x0.Args[0] 33215 idx := x0.Args[1] 33216 mem := x0.Args[2] 33217 or := v.Args[1] 33218 if or.Op != OpAMD64ORQ { 33219 break 33220 } 33221 _ = or.Args[1] 33222 y := or.Args[0] 33223 s1 := or.Args[1] 33224 if s1.Op != OpAMD64SHLQconst { 33225 break 33226 } 33227 j1 := s1.AuxInt 33228 r1 := s1.Args[0] 33229 if r1.Op != OpAMD64ROLWconst { 33230 break 33231 } 33232 if r1.AuxInt != 8 { 33233 break 33234 } 33235 x1 := r1.Args[0] 33236 if x1.Op != OpAMD64MOVWloadidx1 { 33237 break 33238 } 33239 i1 := x1.AuxInt 33240 if x1.Aux != s { 33241 break 33242 } 33243 _ = x1.Args[2] 33244 if idx != x1.Args[0] { 33245 break 33246 } 33247 if p != x1.Args[1] { 33248 break 33249 } 33250 if mem != x1.Args[2] { 33251 break 33252 } 33253 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33254 break 33255 } 33256 b = mergePoint(b, x0, x1) 33257 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33258 v.reset(OpCopy) 33259 v.AddArg(v0) 33260 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33261 v1.AuxInt = j1 33262 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33263 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33264 v3.AuxInt = i0 33265 v3.Aux = s 33266 v3.AddArg(p) 33267 v3.AddArg(idx) 33268 v3.AddArg(mem) 33269 v2.AddArg(v3) 33270 v1.AddArg(v2) 33271 v0.AddArg(v1) 33272 v0.AddArg(y) 33273 return true 33274 } 33275 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 33276 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33277 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33278 for { 33279 _ = v.Args[1] 33280 s0 := v.Args[0] 33281 if s0.Op != OpAMD64SHLQconst { 33282 break 33283 } 33284 j0 := s0.AuxInt 33285 r0 := s0.Args[0] 33286 if r0.Op != OpAMD64ROLWconst { 33287 break 33288 } 33289 if r0.AuxInt != 8 { 33290 break 33291 } 33292 x0 := r0.Args[0] 33293 if x0.Op != OpAMD64MOVWloadidx1 { 33294 break 33295 } 33296 i0 := x0.AuxInt 33297 s := x0.Aux 33298 _ = x0.Args[2] 33299 idx := x0.Args[0] 33300 p := x0.Args[1] 33301 mem := x0.Args[2] 33302 or := v.Args[1] 33303 if or.Op != OpAMD64ORQ { 33304 break 33305 } 33306 _ = or.Args[1] 33307 y := or.Args[0] 33308 s1 := or.Args[1] 33309 if s1.Op != OpAMD64SHLQconst { 33310 break 33311 } 33312 j1 := s1.AuxInt 33313 r1 := s1.Args[0] 33314 if r1.Op != OpAMD64ROLWconst { 33315 break 33316 } 33317 if r1.AuxInt != 8 { 33318 break 33319 } 33320 x1 := r1.Args[0] 33321 if x1.Op != OpAMD64MOVWloadidx1 { 33322 break 33323 } 33324 i1 := x1.AuxInt 33325 if x1.Aux != s { 33326 break 33327 } 33328 _ = x1.Args[2] 33329 if idx != x1.Args[0] { 33330 break 33331 } 33332 if p != x1.Args[1] { 33333 break 33334 } 33335 if mem != x1.Args[2] { 33336 break 33337 } 33338 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33339 break 33340 } 33341 b = mergePoint(b, x0, x1) 33342 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33343 v.reset(OpCopy) 33344 v.AddArg(v0) 33345 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33346 v1.AuxInt = j1 33347 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33348 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33349 v3.AuxInt = i0 33350 v3.Aux = s 33351 v3.AddArg(p) 33352 v3.AddArg(idx) 33353 v3.AddArg(mem) 33354 v2.AddArg(v3) 33355 v1.AddArg(v2) 33356 v0.AddArg(v1) 33357 v0.AddArg(y) 33358 return true 33359 } 33360 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33361 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33362 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33363 for { 33364 _ = v.Args[1] 33365 or := v.Args[0] 33366 if or.Op != OpAMD64ORQ { 33367 break 33368 } 33369 _ = or.Args[1] 33370 s1 := or.Args[0] 33371 if s1.Op != OpAMD64SHLQconst { 33372 break 33373 } 33374 j1 := s1.AuxInt 33375 r1 := s1.Args[0] 33376 if r1.Op != OpAMD64ROLWconst { 33377 break 33378 } 33379 if r1.AuxInt != 8 { 33380 break 33381 } 33382 x1 := r1.Args[0] 33383 if x1.Op != OpAMD64MOVWloadidx1 { 33384 break 33385 } 33386 i1 := x1.AuxInt 33387 s := x1.Aux 33388 _ = x1.Args[2] 33389 p := x1.Args[0] 33390 idx := x1.Args[1] 33391 mem := x1.Args[2] 33392 y := or.Args[1] 33393 s0 := v.Args[1] 33394 if s0.Op != OpAMD64SHLQconst { 33395 break 33396 } 33397 j0 := s0.AuxInt 33398 r0 := s0.Args[0] 33399 if r0.Op != OpAMD64ROLWconst { 33400 break 33401 } 33402 if r0.AuxInt != 8 { 33403 break 33404 } 33405 x0 := r0.Args[0] 33406 if x0.Op != OpAMD64MOVWloadidx1 { 33407 break 33408 } 33409 i0 := x0.AuxInt 33410 if x0.Aux != s { 33411 break 33412 } 33413 _ = x0.Args[2] 33414 if p != x0.Args[0] { 33415 break 33416 } 33417 if idx != x0.Args[1] { 33418 break 33419 } 33420 if mem != x0.Args[2] { 33421 break 33422 } 33423 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33424 break 33425 } 33426 b = mergePoint(b, x0, x1) 33427 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33428 v.reset(OpCopy) 33429 v.AddArg(v0) 33430 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33431 v1.AuxInt = j1 33432 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33433 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33434 v3.AuxInt = i0 33435 v3.Aux = s 33436 v3.AddArg(p) 33437 v3.AddArg(idx) 33438 v3.AddArg(mem) 33439 v2.AddArg(v3) 33440 v1.AddArg(v2) 33441 v0.AddArg(v1) 33442 v0.AddArg(y) 33443 return true 33444 } 33445 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33446 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33447 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33448 for { 33449 _ = v.Args[1] 33450 or := v.Args[0] 33451 if or.Op != OpAMD64ORQ { 33452 break 33453 } 33454 _ = or.Args[1] 33455 s1 := or.Args[0] 33456 if s1.Op != OpAMD64SHLQconst { 33457 break 33458 } 33459 j1 := s1.AuxInt 33460 r1 := s1.Args[0] 33461 if r1.Op != OpAMD64ROLWconst { 33462 break 33463 } 33464 if r1.AuxInt != 8 { 33465 break 33466 } 33467 x1 := r1.Args[0] 33468 if x1.Op != OpAMD64MOVWloadidx1 { 33469 break 33470 } 33471 i1 := x1.AuxInt 33472 s := x1.Aux 33473 _ = x1.Args[2] 33474 idx := x1.Args[0] 33475 p := x1.Args[1] 33476 mem := x1.Args[2] 33477 y := or.Args[1] 33478 s0 := v.Args[1] 33479 if s0.Op != OpAMD64SHLQconst { 33480 break 33481 } 33482 j0 := s0.AuxInt 33483 r0 := s0.Args[0] 33484 if r0.Op != OpAMD64ROLWconst { 33485 break 33486 } 33487 if r0.AuxInt != 8 { 33488 break 33489 } 33490 x0 := r0.Args[0] 33491 if x0.Op != OpAMD64MOVWloadidx1 { 33492 break 33493 } 33494 i0 := x0.AuxInt 33495 if x0.Aux != s { 33496 break 33497 } 33498 _ = x0.Args[2] 33499 if p != x0.Args[0] { 33500 break 33501 } 33502 if idx != x0.Args[1] { 33503 break 33504 } 33505 if mem != x0.Args[2] { 33506 break 33507 } 33508 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33509 break 33510 } 33511 b = mergePoint(b, x0, x1) 33512 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33513 v.reset(OpCopy) 33514 v.AddArg(v0) 33515 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33516 v1.AuxInt = j1 33517 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33518 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33519 v3.AuxInt = i0 33520 v3.Aux = s 33521 v3.AddArg(p) 33522 v3.AddArg(idx) 33523 v3.AddArg(mem) 33524 v2.AddArg(v3) 33525 v1.AddArg(v2) 33526 v0.AddArg(v1) 33527 v0.AddArg(y) 33528 return true 33529 } 33530 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33531 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33532 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33533 for { 33534 _ = v.Args[1] 33535 or := v.Args[0] 33536 if or.Op != OpAMD64ORQ { 33537 break 33538 } 33539 _ = or.Args[1] 33540 y := or.Args[0] 33541 s1 := or.Args[1] 33542 if s1.Op != OpAMD64SHLQconst { 33543 break 33544 } 33545 j1 := s1.AuxInt 33546 r1 := s1.Args[0] 33547 if r1.Op != OpAMD64ROLWconst { 33548 break 33549 } 33550 if r1.AuxInt != 8 { 33551 break 33552 } 33553 x1 := r1.Args[0] 33554 if x1.Op != OpAMD64MOVWloadidx1 { 33555 break 33556 } 33557 i1 := x1.AuxInt 33558 s := x1.Aux 33559 _ = x1.Args[2] 33560 p := x1.Args[0] 33561 idx := x1.Args[1] 33562 mem := x1.Args[2] 33563 s0 := v.Args[1] 33564 if s0.Op != OpAMD64SHLQconst { 33565 break 33566 } 33567 j0 := s0.AuxInt 33568 r0 := s0.Args[0] 33569 if r0.Op != OpAMD64ROLWconst { 33570 break 33571 } 33572 if r0.AuxInt != 8 { 33573 break 33574 } 33575 x0 := r0.Args[0] 33576 if x0.Op != OpAMD64MOVWloadidx1 { 33577 break 33578 } 33579 i0 := x0.AuxInt 33580 if x0.Aux != s { 33581 break 33582 } 33583 _ = x0.Args[2] 33584 if p != x0.Args[0] { 33585 break 33586 } 33587 if idx != x0.Args[1] { 33588 break 33589 } 33590 if mem != x0.Args[2] { 33591 break 33592 } 33593 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33594 break 33595 } 33596 b = mergePoint(b, x0, x1) 33597 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33598 v.reset(OpCopy) 33599 v.AddArg(v0) 33600 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33601 v1.AuxInt = j1 33602 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33603 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33604 v3.AuxInt = i0 33605 v3.Aux = s 33606 v3.AddArg(p) 33607 v3.AddArg(idx) 33608 v3.AddArg(mem) 33609 v2.AddArg(v3) 33610 v1.AddArg(v2) 33611 v0.AddArg(v1) 33612 v0.AddArg(y) 33613 return true 33614 } 33615 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33616 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33617 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33618 for { 33619 _ = v.Args[1] 33620 or := v.Args[0] 33621 if or.Op != OpAMD64ORQ { 33622 break 33623 } 33624 _ = or.Args[1] 33625 y := or.Args[0] 33626 s1 := or.Args[1] 33627 if s1.Op != OpAMD64SHLQconst { 33628 break 33629 } 33630 j1 := s1.AuxInt 33631 r1 := s1.Args[0] 33632 if r1.Op != OpAMD64ROLWconst { 33633 break 33634 } 33635 if r1.AuxInt != 8 { 33636 break 33637 } 33638 x1 := r1.Args[0] 33639 if x1.Op != OpAMD64MOVWloadidx1 { 33640 break 33641 } 33642 i1 := x1.AuxInt 33643 s := x1.Aux 33644 _ = x1.Args[2] 33645 idx := x1.Args[0] 33646 p := x1.Args[1] 33647 mem := x1.Args[2] 33648 s0 := v.Args[1] 33649 if s0.Op != OpAMD64SHLQconst { 33650 break 33651 } 33652 j0 := s0.AuxInt 33653 r0 := s0.Args[0] 33654 if r0.Op != OpAMD64ROLWconst { 33655 break 33656 } 33657 if r0.AuxInt != 8 { 33658 break 33659 } 33660 x0 := r0.Args[0] 33661 if x0.Op != OpAMD64MOVWloadidx1 { 33662 break 33663 } 33664 i0 := x0.AuxInt 33665 if x0.Aux != s { 33666 break 33667 } 33668 _ = x0.Args[2] 33669 if p != x0.Args[0] { 33670 break 33671 } 33672 if idx != x0.Args[1] { 33673 break 33674 } 33675 if mem != x0.Args[2] { 33676 break 33677 } 33678 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33679 break 33680 } 33681 b = mergePoint(b, x0, x1) 33682 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33683 v.reset(OpCopy) 33684 v.AddArg(v0) 33685 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33686 v1.AuxInt = j1 33687 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33688 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33689 v3.AuxInt = i0 33690 v3.Aux = s 33691 v3.AddArg(p) 33692 v3.AddArg(idx) 33693 v3.AddArg(mem) 33694 v2.AddArg(v3) 33695 v1.AddArg(v2) 33696 v0.AddArg(v1) 33697 v0.AddArg(y) 33698 return true 33699 } 33700 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33701 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33702 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33703 for { 33704 _ = v.Args[1] 33705 or := v.Args[0] 33706 if or.Op != OpAMD64ORQ { 33707 break 33708 } 33709 _ = or.Args[1] 33710 s1 := or.Args[0] 33711 if s1.Op != OpAMD64SHLQconst { 33712 break 33713 } 33714 j1 := s1.AuxInt 33715 r1 := s1.Args[0] 33716 if r1.Op != OpAMD64ROLWconst { 33717 break 33718 } 33719 if r1.AuxInt != 8 { 33720 break 33721 } 33722 x1 := r1.Args[0] 33723 if x1.Op != OpAMD64MOVWloadidx1 { 33724 break 33725 } 33726 i1 := x1.AuxInt 33727 s := x1.Aux 33728 _ = x1.Args[2] 33729 p := x1.Args[0] 33730 idx := x1.Args[1] 33731 mem := x1.Args[2] 33732 y := or.Args[1] 33733 s0 := v.Args[1] 33734 if s0.Op != OpAMD64SHLQconst { 33735 break 33736 } 33737 j0 := s0.AuxInt 33738 r0 := s0.Args[0] 33739 if r0.Op != OpAMD64ROLWconst { 33740 break 33741 } 33742 if r0.AuxInt != 8 { 33743 break 33744 } 33745 x0 := r0.Args[0] 33746 if x0.Op != OpAMD64MOVWloadidx1 { 33747 break 33748 } 33749 i0 := x0.AuxInt 33750 if x0.Aux != s { 33751 break 33752 } 33753 _ = x0.Args[2] 33754 if idx != x0.Args[0] { 33755 break 33756 } 33757 if p != x0.Args[1] { 33758 break 33759 } 33760 if mem != x0.Args[2] { 33761 break 33762 } 33763 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33764 break 33765 } 33766 b = mergePoint(b, x0, x1) 33767 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33768 v.reset(OpCopy) 33769 v.AddArg(v0) 33770 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33771 v1.AuxInt = j1 33772 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33773 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33774 v3.AuxInt = i0 33775 v3.Aux = s 33776 v3.AddArg(p) 33777 v3.AddArg(idx) 33778 v3.AddArg(mem) 33779 v2.AddArg(v3) 33780 v1.AddArg(v2) 33781 v0.AddArg(v1) 33782 v0.AddArg(y) 33783 return true 33784 } 33785 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33786 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33787 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33788 for { 33789 _ = v.Args[1] 33790 or := v.Args[0] 33791 if or.Op != OpAMD64ORQ { 33792 break 33793 } 33794 _ = or.Args[1] 33795 s1 := or.Args[0] 33796 if s1.Op != OpAMD64SHLQconst { 33797 break 33798 } 33799 j1 := s1.AuxInt 33800 r1 := s1.Args[0] 33801 if r1.Op != OpAMD64ROLWconst { 33802 break 33803 } 33804 if r1.AuxInt != 8 { 33805 break 33806 } 33807 x1 := r1.Args[0] 33808 if x1.Op != OpAMD64MOVWloadidx1 { 33809 break 33810 } 33811 i1 := x1.AuxInt 33812 s := x1.Aux 33813 _ = x1.Args[2] 33814 idx := x1.Args[0] 33815 p := x1.Args[1] 33816 mem := x1.Args[2] 33817 y := or.Args[1] 33818 s0 := v.Args[1] 33819 if s0.Op != OpAMD64SHLQconst { 33820 break 33821 } 33822 j0 := s0.AuxInt 33823 r0 := s0.Args[0] 33824 if r0.Op != OpAMD64ROLWconst { 33825 break 33826 } 33827 if r0.AuxInt != 8 { 33828 break 33829 } 33830 x0 := r0.Args[0] 33831 if x0.Op != OpAMD64MOVWloadidx1 { 33832 break 33833 } 33834 i0 := x0.AuxInt 33835 if x0.Aux != s { 33836 break 33837 } 33838 _ = x0.Args[2] 33839 if idx != x0.Args[0] { 33840 break 33841 } 33842 if p != x0.Args[1] { 33843 break 33844 } 33845 if mem != x0.Args[2] { 33846 break 33847 } 33848 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33849 break 33850 } 33851 b = mergePoint(b, x0, x1) 33852 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33853 v.reset(OpCopy) 33854 v.AddArg(v0) 33855 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33856 v1.AuxInt = j1 33857 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33858 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33859 v3.AuxInt = i0 33860 v3.Aux = s 33861 v3.AddArg(p) 33862 v3.AddArg(idx) 33863 v3.AddArg(mem) 33864 v2.AddArg(v3) 33865 v1.AddArg(v2) 33866 v0.AddArg(v1) 33867 v0.AddArg(y) 33868 return true 33869 } 33870 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33871 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33872 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33873 for { 33874 _ = v.Args[1] 33875 or := v.Args[0] 33876 if or.Op != OpAMD64ORQ { 33877 break 33878 } 33879 _ = or.Args[1] 33880 y := or.Args[0] 33881 s1 := or.Args[1] 33882 if s1.Op != OpAMD64SHLQconst { 33883 break 33884 } 33885 j1 := s1.AuxInt 33886 r1 := s1.Args[0] 33887 if r1.Op != OpAMD64ROLWconst { 33888 break 33889 } 33890 if r1.AuxInt != 8 { 33891 break 33892 } 33893 x1 := r1.Args[0] 33894 if x1.Op != OpAMD64MOVWloadidx1 { 33895 break 33896 } 33897 i1 := x1.AuxInt 33898 s := x1.Aux 33899 _ = x1.Args[2] 33900 p := x1.Args[0] 33901 idx := x1.Args[1] 33902 mem := x1.Args[2] 33903 s0 := v.Args[1] 33904 if s0.Op != OpAMD64SHLQconst { 33905 break 33906 } 33907 j0 := s0.AuxInt 33908 r0 := s0.Args[0] 33909 if r0.Op != OpAMD64ROLWconst { 33910 break 33911 } 33912 if r0.AuxInt != 8 { 33913 break 33914 } 33915 x0 := r0.Args[0] 33916 if x0.Op != OpAMD64MOVWloadidx1 { 33917 break 33918 } 33919 i0 := x0.AuxInt 33920 if x0.Aux != s { 33921 break 33922 } 33923 _ = x0.Args[2] 33924 if idx != x0.Args[0] { 33925 break 33926 } 33927 if p != x0.Args[1] { 33928 break 33929 } 33930 if mem != x0.Args[2] { 33931 break 33932 } 33933 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33934 break 33935 } 33936 b = mergePoint(b, x0, x1) 33937 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33938 v.reset(OpCopy) 33939 v.AddArg(v0) 33940 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33941 v1.AuxInt = j1 33942 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33943 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33944 v3.AuxInt = i0 33945 v3.Aux = s 33946 v3.AddArg(p) 33947 v3.AddArg(idx) 33948 v3.AddArg(mem) 33949 v2.AddArg(v3) 33950 v1.AddArg(v2) 33951 v0.AddArg(v1) 33952 v0.AddArg(y) 33953 return true 33954 } 33955 return false 33956 } 33957 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 33958 b := v.Block 33959 _ = b 33960 typ := &b.Func.Config.Types 33961 _ = typ 33962 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33963 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33964 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33965 for { 33966 _ = v.Args[1] 33967 or := v.Args[0] 33968 if or.Op != OpAMD64ORQ { 33969 break 33970 } 33971 _ = or.Args[1] 33972 y := or.Args[0] 33973 s1 := or.Args[1] 33974 if s1.Op != OpAMD64SHLQconst { 33975 break 33976 } 33977 j1 := s1.AuxInt 33978 r1 := s1.Args[0] 33979 if r1.Op != OpAMD64ROLWconst { 33980 break 33981 } 33982 if r1.AuxInt != 8 { 33983 break 33984 } 33985 x1 := r1.Args[0] 33986 if x1.Op != OpAMD64MOVWloadidx1 { 33987 break 33988 } 33989 i1 := x1.AuxInt 33990 s := x1.Aux 33991 _ = x1.Args[2] 33992 idx := x1.Args[0] 33993 p := x1.Args[1] 33994 mem := x1.Args[2] 33995 s0 := v.Args[1] 33996 if s0.Op != OpAMD64SHLQconst { 33997 break 33998 } 33999 j0 := s0.AuxInt 34000 r0 := s0.Args[0] 34001 if r0.Op != OpAMD64ROLWconst { 34002 break 34003 } 34004 if r0.AuxInt != 8 { 34005 break 34006 } 34007 x0 := r0.Args[0] 34008 if x0.Op != OpAMD64MOVWloadidx1 { 34009 break 34010 } 34011 i0 := x0.AuxInt 34012 if x0.Aux != s { 34013 break 34014 } 34015 _ = x0.Args[2] 34016 if idx != x0.Args[0] { 34017 break 34018 } 34019 if p != x0.Args[1] { 34020 break 34021 } 34022 if mem != x0.Args[2] { 34023 break 34024 } 34025 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 34026 break 34027 } 34028 b = mergePoint(b, x0, x1) 34029 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34030 v.reset(OpCopy) 34031 v.AddArg(v0) 34032 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34033 v1.AuxInt = j1 34034 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 34035 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34036 v3.AuxInt = i0 34037 v3.Aux = s 34038 v3.AddArg(p) 34039 v3.AddArg(idx) 34040 v3.AddArg(mem) 34041 v2.AddArg(v3) 34042 v1.AddArg(v2) 34043 v0.AddArg(v1) 34044 v0.AddArg(y) 34045 return true 34046 } 34047 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 34048 // cond: canMergeLoad(v, l, x) && clobber(l) 34049 // result: (ORQmem x [off] {sym} ptr mem) 34050 for { 34051 _ = v.Args[1] 34052 x := v.Args[0] 34053 l := v.Args[1] 34054 if l.Op != OpAMD64MOVQload { 34055 break 34056 } 34057 off := l.AuxInt 34058 sym := l.Aux 34059 _ = l.Args[1] 34060 ptr := l.Args[0] 34061 mem := l.Args[1] 34062 if !(canMergeLoad(v, l, x) && clobber(l)) { 34063 break 34064 } 34065 v.reset(OpAMD64ORQmem) 34066 v.AuxInt = off 34067 v.Aux = sym 34068 v.AddArg(x) 34069 v.AddArg(ptr) 34070 v.AddArg(mem) 34071 return true 34072 } 34073 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 34074 // cond: canMergeLoad(v, l, x) && clobber(l) 34075 // result: (ORQmem x [off] {sym} ptr mem) 34076 for { 34077 _ = v.Args[1] 34078 l := v.Args[0] 34079 if l.Op != OpAMD64MOVQload { 34080 break 34081 } 34082 off := l.AuxInt 34083 sym := l.Aux 34084 _ = l.Args[1] 34085 ptr := l.Args[0] 34086 mem := l.Args[1] 34087 x := v.Args[1] 34088 if !(canMergeLoad(v, l, x) && clobber(l)) { 34089 break 34090 } 34091 v.reset(OpAMD64ORQmem) 34092 v.AuxInt = off 34093 v.Aux = sym 34094 v.AddArg(x) 34095 v.AddArg(ptr) 34096 v.AddArg(mem) 34097 return true 34098 } 34099 return false 34100 } 34101 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 34102 // match: (ORQconst [0] x) 34103 // cond: 34104 // result: x 34105 for { 34106 if v.AuxInt != 0 { 34107 break 34108 } 34109 x := v.Args[0] 34110 v.reset(OpCopy) 34111 v.Type = x.Type 34112 v.AddArg(x) 34113 return true 34114 } 34115 // match: (ORQconst [-1] _) 34116 // cond: 34117 // result: (MOVQconst [-1]) 34118 for { 34119 if v.AuxInt != -1 { 34120 break 34121 } 34122 v.reset(OpAMD64MOVQconst) 34123 v.AuxInt = -1 34124 return true 34125 } 34126 // match: (ORQconst [c] (MOVQconst [d])) 34127 // cond: 34128 // result: (MOVQconst [c|d]) 34129 for { 34130 c := v.AuxInt 34131 v_0 := v.Args[0] 34132 if v_0.Op != OpAMD64MOVQconst { 34133 break 34134 } 34135 d := v_0.AuxInt 34136 v.reset(OpAMD64MOVQconst) 34137 v.AuxInt = c | d 34138 return true 34139 } 34140 return false 34141 } 34142 func rewriteValueAMD64_OpAMD64ORQmem_0(v *Value) bool { 34143 b := v.Block 34144 _ = b 34145 typ := &b.Func.Config.Types 34146 _ = typ 34147 // match: (ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 34148 // cond: 34149 // result: ( ORQ x (MOVQf2i y)) 34150 for { 34151 off := v.AuxInt 34152 sym := v.Aux 34153 _ = v.Args[2] 34154 x := v.Args[0] 34155 ptr := v.Args[1] 34156 v_2 := v.Args[2] 34157 if v_2.Op != OpAMD64MOVSDstore { 34158 break 34159 } 34160 if v_2.AuxInt != off { 34161 break 34162 } 34163 if v_2.Aux != sym { 34164 break 34165 } 34166 _ = v_2.Args[2] 34167 if ptr != v_2.Args[0] { 34168 break 34169 } 34170 y := v_2.Args[1] 34171 v.reset(OpAMD64ORQ) 34172 v.AddArg(x) 34173 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 34174 v0.AddArg(y) 34175 v.AddArg(v0) 34176 return true 34177 } 34178 return false 34179 } 34180 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 34181 // match: (ROLB x (NEGQ y)) 34182 // cond: 34183 // result: (RORB x y) 34184 for { 34185 _ = v.Args[1] 34186 x := v.Args[0] 34187 v_1 := v.Args[1] 34188 if v_1.Op != OpAMD64NEGQ { 34189 break 34190 } 34191 y := v_1.Args[0] 34192 v.reset(OpAMD64RORB) 34193 v.AddArg(x) 34194 v.AddArg(y) 34195 return true 34196 } 34197 // match: (ROLB x (NEGL y)) 34198 // cond: 34199 // result: (RORB x y) 34200 for { 34201 _ = v.Args[1] 34202 x := v.Args[0] 34203 v_1 := v.Args[1] 34204 if v_1.Op != OpAMD64NEGL { 34205 break 34206 } 34207 y := v_1.Args[0] 34208 v.reset(OpAMD64RORB) 34209 v.AddArg(x) 34210 v.AddArg(y) 34211 return true 34212 } 34213 // match: (ROLB x (MOVQconst [c])) 34214 // cond: 34215 // result: (ROLBconst [c&7 ] x) 34216 for { 34217 _ = v.Args[1] 34218 x := v.Args[0] 34219 v_1 := v.Args[1] 34220 if v_1.Op != OpAMD64MOVQconst { 34221 break 34222 } 34223 c := v_1.AuxInt 34224 v.reset(OpAMD64ROLBconst) 34225 v.AuxInt = c & 7 34226 v.AddArg(x) 34227 return true 34228 } 34229 // match: (ROLB x (MOVLconst [c])) 34230 // cond: 34231 // result: (ROLBconst [c&7 ] x) 34232 for { 34233 _ = v.Args[1] 34234 x := v.Args[0] 34235 v_1 := v.Args[1] 34236 if v_1.Op != OpAMD64MOVLconst { 34237 break 34238 } 34239 c := v_1.AuxInt 34240 v.reset(OpAMD64ROLBconst) 34241 v.AuxInt = c & 7 34242 v.AddArg(x) 34243 return true 34244 } 34245 return false 34246 } 34247 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 34248 // match: (ROLBconst [c] (ROLBconst [d] x)) 34249 // cond: 34250 // result: (ROLBconst [(c+d)& 7] x) 34251 for { 34252 c := v.AuxInt 34253 v_0 := v.Args[0] 34254 if v_0.Op != OpAMD64ROLBconst { 34255 break 34256 } 34257 d := v_0.AuxInt 34258 x := v_0.Args[0] 34259 v.reset(OpAMD64ROLBconst) 34260 v.AuxInt = (c + d) & 7 34261 v.AddArg(x) 34262 return true 34263 } 34264 // match: (ROLBconst x [0]) 34265 // cond: 34266 // result: x 34267 for { 34268 if v.AuxInt != 0 { 34269 break 34270 } 34271 x := v.Args[0] 34272 v.reset(OpCopy) 34273 v.Type = x.Type 34274 v.AddArg(x) 34275 return true 34276 } 34277 return false 34278 } 34279 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 34280 // match: (ROLL x (NEGQ y)) 34281 // cond: 34282 // result: (RORL x y) 34283 for { 34284 _ = v.Args[1] 34285 x := v.Args[0] 34286 v_1 := v.Args[1] 34287 if v_1.Op != OpAMD64NEGQ { 34288 break 34289 } 34290 y := v_1.Args[0] 34291 v.reset(OpAMD64RORL) 34292 v.AddArg(x) 34293 v.AddArg(y) 34294 return true 34295 } 34296 // match: (ROLL x (NEGL y)) 34297 // cond: 34298 // result: (RORL x y) 34299 for { 34300 _ = v.Args[1] 34301 x := v.Args[0] 34302 v_1 := v.Args[1] 34303 if v_1.Op != OpAMD64NEGL { 34304 break 34305 } 34306 y := v_1.Args[0] 34307 v.reset(OpAMD64RORL) 34308 v.AddArg(x) 34309 v.AddArg(y) 34310 return true 34311 } 34312 // match: (ROLL x (MOVQconst [c])) 34313 // cond: 34314 // result: (ROLLconst [c&31] x) 34315 for { 34316 _ = v.Args[1] 34317 x := v.Args[0] 34318 v_1 := v.Args[1] 34319 if v_1.Op != OpAMD64MOVQconst { 34320 break 34321 } 34322 c := v_1.AuxInt 34323 v.reset(OpAMD64ROLLconst) 34324 v.AuxInt = c & 31 34325 v.AddArg(x) 34326 return true 34327 } 34328 // match: (ROLL x (MOVLconst [c])) 34329 // cond: 34330 // result: (ROLLconst [c&31] x) 34331 for { 34332 _ = v.Args[1] 34333 x := v.Args[0] 34334 v_1 := v.Args[1] 34335 if v_1.Op != OpAMD64MOVLconst { 34336 break 34337 } 34338 c := v_1.AuxInt 34339 v.reset(OpAMD64ROLLconst) 34340 v.AuxInt = c & 31 34341 v.AddArg(x) 34342 return true 34343 } 34344 return false 34345 } 34346 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 34347 // match: (ROLLconst [c] (ROLLconst [d] x)) 34348 // cond: 34349 // result: (ROLLconst [(c+d)&31] x) 34350 for { 34351 c := v.AuxInt 34352 v_0 := v.Args[0] 34353 if v_0.Op != OpAMD64ROLLconst { 34354 break 34355 } 34356 d := v_0.AuxInt 34357 x := v_0.Args[0] 34358 v.reset(OpAMD64ROLLconst) 34359 v.AuxInt = (c + d) & 31 34360 v.AddArg(x) 34361 return true 34362 } 34363 // match: (ROLLconst x [0]) 34364 // cond: 34365 // result: x 34366 for { 34367 if v.AuxInt != 0 { 34368 break 34369 } 34370 x := v.Args[0] 34371 v.reset(OpCopy) 34372 v.Type = x.Type 34373 v.AddArg(x) 34374 return true 34375 } 34376 return false 34377 } 34378 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 34379 // match: (ROLQ x (NEGQ y)) 34380 // cond: 34381 // result: (RORQ x y) 34382 for { 34383 _ = v.Args[1] 34384 x := v.Args[0] 34385 v_1 := v.Args[1] 34386 if v_1.Op != OpAMD64NEGQ { 34387 break 34388 } 34389 y := v_1.Args[0] 34390 v.reset(OpAMD64RORQ) 34391 v.AddArg(x) 34392 v.AddArg(y) 34393 return true 34394 } 34395 // match: (ROLQ x (NEGL y)) 34396 // cond: 34397 // result: (RORQ x y) 34398 for { 34399 _ = v.Args[1] 34400 x := v.Args[0] 34401 v_1 := v.Args[1] 34402 if v_1.Op != OpAMD64NEGL { 34403 break 34404 } 34405 y := v_1.Args[0] 34406 v.reset(OpAMD64RORQ) 34407 v.AddArg(x) 34408 v.AddArg(y) 34409 return true 34410 } 34411 // match: (ROLQ x (MOVQconst [c])) 34412 // cond: 34413 // result: (ROLQconst [c&63] x) 34414 for { 34415 _ = v.Args[1] 34416 x := v.Args[0] 34417 v_1 := v.Args[1] 34418 if v_1.Op != OpAMD64MOVQconst { 34419 break 34420 } 34421 c := v_1.AuxInt 34422 v.reset(OpAMD64ROLQconst) 34423 v.AuxInt = c & 63 34424 v.AddArg(x) 34425 return true 34426 } 34427 // match: (ROLQ x (MOVLconst [c])) 34428 // cond: 34429 // result: (ROLQconst [c&63] x) 34430 for { 34431 _ = v.Args[1] 34432 x := v.Args[0] 34433 v_1 := v.Args[1] 34434 if v_1.Op != OpAMD64MOVLconst { 34435 break 34436 } 34437 c := v_1.AuxInt 34438 v.reset(OpAMD64ROLQconst) 34439 v.AuxInt = c & 63 34440 v.AddArg(x) 34441 return true 34442 } 34443 return false 34444 } 34445 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 34446 // match: (ROLQconst [c] (ROLQconst [d] x)) 34447 // cond: 34448 // result: (ROLQconst [(c+d)&63] x) 34449 for { 34450 c := v.AuxInt 34451 v_0 := v.Args[0] 34452 if v_0.Op != OpAMD64ROLQconst { 34453 break 34454 } 34455 d := v_0.AuxInt 34456 x := v_0.Args[0] 34457 v.reset(OpAMD64ROLQconst) 34458 v.AuxInt = (c + d) & 63 34459 v.AddArg(x) 34460 return true 34461 } 34462 // match: (ROLQconst x [0]) 34463 // cond: 34464 // result: x 34465 for { 34466 if v.AuxInt != 0 { 34467 break 34468 } 34469 x := v.Args[0] 34470 v.reset(OpCopy) 34471 v.Type = x.Type 34472 v.AddArg(x) 34473 return true 34474 } 34475 return false 34476 } 34477 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 34478 // match: (ROLW x (NEGQ y)) 34479 // cond: 34480 // result: (RORW x y) 34481 for { 34482 _ = v.Args[1] 34483 x := v.Args[0] 34484 v_1 := v.Args[1] 34485 if v_1.Op != OpAMD64NEGQ { 34486 break 34487 } 34488 y := v_1.Args[0] 34489 v.reset(OpAMD64RORW) 34490 v.AddArg(x) 34491 v.AddArg(y) 34492 return true 34493 } 34494 // match: (ROLW x (NEGL y)) 34495 // cond: 34496 // result: (RORW x y) 34497 for { 34498 _ = v.Args[1] 34499 x := v.Args[0] 34500 v_1 := v.Args[1] 34501 if v_1.Op != OpAMD64NEGL { 34502 break 34503 } 34504 y := v_1.Args[0] 34505 v.reset(OpAMD64RORW) 34506 v.AddArg(x) 34507 v.AddArg(y) 34508 return true 34509 } 34510 // match: (ROLW x (MOVQconst [c])) 34511 // cond: 34512 // result: (ROLWconst [c&15] x) 34513 for { 34514 _ = v.Args[1] 34515 x := v.Args[0] 34516 v_1 := v.Args[1] 34517 if v_1.Op != OpAMD64MOVQconst { 34518 break 34519 } 34520 c := v_1.AuxInt 34521 v.reset(OpAMD64ROLWconst) 34522 v.AuxInt = c & 15 34523 v.AddArg(x) 34524 return true 34525 } 34526 // match: (ROLW x (MOVLconst [c])) 34527 // cond: 34528 // result: (ROLWconst [c&15] x) 34529 for { 34530 _ = v.Args[1] 34531 x := v.Args[0] 34532 v_1 := v.Args[1] 34533 if v_1.Op != OpAMD64MOVLconst { 34534 break 34535 } 34536 c := v_1.AuxInt 34537 v.reset(OpAMD64ROLWconst) 34538 v.AuxInt = c & 15 34539 v.AddArg(x) 34540 return true 34541 } 34542 return false 34543 } 34544 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 34545 // match: (ROLWconst [c] (ROLWconst [d] x)) 34546 // cond: 34547 // result: (ROLWconst [(c+d)&15] x) 34548 for { 34549 c := v.AuxInt 34550 v_0 := v.Args[0] 34551 if v_0.Op != OpAMD64ROLWconst { 34552 break 34553 } 34554 d := v_0.AuxInt 34555 x := v_0.Args[0] 34556 v.reset(OpAMD64ROLWconst) 34557 v.AuxInt = (c + d) & 15 34558 v.AddArg(x) 34559 return true 34560 } 34561 // match: (ROLWconst x [0]) 34562 // cond: 34563 // result: x 34564 for { 34565 if v.AuxInt != 0 { 34566 break 34567 } 34568 x := v.Args[0] 34569 v.reset(OpCopy) 34570 v.Type = x.Type 34571 v.AddArg(x) 34572 return true 34573 } 34574 return false 34575 } 34576 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 34577 // match: (RORB x (NEGQ y)) 34578 // cond: 34579 // result: (ROLB x y) 34580 for { 34581 _ = v.Args[1] 34582 x := v.Args[0] 34583 v_1 := v.Args[1] 34584 if v_1.Op != OpAMD64NEGQ { 34585 break 34586 } 34587 y := v_1.Args[0] 34588 v.reset(OpAMD64ROLB) 34589 v.AddArg(x) 34590 v.AddArg(y) 34591 return true 34592 } 34593 // match: (RORB x (NEGL y)) 34594 // cond: 34595 // result: (ROLB x y) 34596 for { 34597 _ = v.Args[1] 34598 x := v.Args[0] 34599 v_1 := v.Args[1] 34600 if v_1.Op != OpAMD64NEGL { 34601 break 34602 } 34603 y := v_1.Args[0] 34604 v.reset(OpAMD64ROLB) 34605 v.AddArg(x) 34606 v.AddArg(y) 34607 return true 34608 } 34609 // match: (RORB x (MOVQconst [c])) 34610 // cond: 34611 // result: (ROLBconst [(-c)&7 ] x) 34612 for { 34613 _ = v.Args[1] 34614 x := v.Args[0] 34615 v_1 := v.Args[1] 34616 if v_1.Op != OpAMD64MOVQconst { 34617 break 34618 } 34619 c := v_1.AuxInt 34620 v.reset(OpAMD64ROLBconst) 34621 v.AuxInt = (-c) & 7 34622 v.AddArg(x) 34623 return true 34624 } 34625 // match: (RORB x (MOVLconst [c])) 34626 // cond: 34627 // result: (ROLBconst [(-c)&7 ] x) 34628 for { 34629 _ = v.Args[1] 34630 x := v.Args[0] 34631 v_1 := v.Args[1] 34632 if v_1.Op != OpAMD64MOVLconst { 34633 break 34634 } 34635 c := v_1.AuxInt 34636 v.reset(OpAMD64ROLBconst) 34637 v.AuxInt = (-c) & 7 34638 v.AddArg(x) 34639 return true 34640 } 34641 return false 34642 } 34643 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 34644 // match: (RORL x (NEGQ y)) 34645 // cond: 34646 // result: (ROLL x y) 34647 for { 34648 _ = v.Args[1] 34649 x := v.Args[0] 34650 v_1 := v.Args[1] 34651 if v_1.Op != OpAMD64NEGQ { 34652 break 34653 } 34654 y := v_1.Args[0] 34655 v.reset(OpAMD64ROLL) 34656 v.AddArg(x) 34657 v.AddArg(y) 34658 return true 34659 } 34660 // match: (RORL x (NEGL y)) 34661 // cond: 34662 // result: (ROLL x y) 34663 for { 34664 _ = v.Args[1] 34665 x := v.Args[0] 34666 v_1 := v.Args[1] 34667 if v_1.Op != OpAMD64NEGL { 34668 break 34669 } 34670 y := v_1.Args[0] 34671 v.reset(OpAMD64ROLL) 34672 v.AddArg(x) 34673 v.AddArg(y) 34674 return true 34675 } 34676 // match: (RORL x (MOVQconst [c])) 34677 // cond: 34678 // result: (ROLLconst [(-c)&31] x) 34679 for { 34680 _ = v.Args[1] 34681 x := v.Args[0] 34682 v_1 := v.Args[1] 34683 if v_1.Op != OpAMD64MOVQconst { 34684 break 34685 } 34686 c := v_1.AuxInt 34687 v.reset(OpAMD64ROLLconst) 34688 v.AuxInt = (-c) & 31 34689 v.AddArg(x) 34690 return true 34691 } 34692 // match: (RORL x (MOVLconst [c])) 34693 // cond: 34694 // result: (ROLLconst [(-c)&31] x) 34695 for { 34696 _ = v.Args[1] 34697 x := v.Args[0] 34698 v_1 := v.Args[1] 34699 if v_1.Op != OpAMD64MOVLconst { 34700 break 34701 } 34702 c := v_1.AuxInt 34703 v.reset(OpAMD64ROLLconst) 34704 v.AuxInt = (-c) & 31 34705 v.AddArg(x) 34706 return true 34707 } 34708 return false 34709 } 34710 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 34711 // match: (RORQ x (NEGQ y)) 34712 // cond: 34713 // result: (ROLQ x y) 34714 for { 34715 _ = v.Args[1] 34716 x := v.Args[0] 34717 v_1 := v.Args[1] 34718 if v_1.Op != OpAMD64NEGQ { 34719 break 34720 } 34721 y := v_1.Args[0] 34722 v.reset(OpAMD64ROLQ) 34723 v.AddArg(x) 34724 v.AddArg(y) 34725 return true 34726 } 34727 // match: (RORQ x (NEGL y)) 34728 // cond: 34729 // result: (ROLQ x y) 34730 for { 34731 _ = v.Args[1] 34732 x := v.Args[0] 34733 v_1 := v.Args[1] 34734 if v_1.Op != OpAMD64NEGL { 34735 break 34736 } 34737 y := v_1.Args[0] 34738 v.reset(OpAMD64ROLQ) 34739 v.AddArg(x) 34740 v.AddArg(y) 34741 return true 34742 } 34743 // match: (RORQ x (MOVQconst [c])) 34744 // cond: 34745 // result: (ROLQconst [(-c)&63] x) 34746 for { 34747 _ = v.Args[1] 34748 x := v.Args[0] 34749 v_1 := v.Args[1] 34750 if v_1.Op != OpAMD64MOVQconst { 34751 break 34752 } 34753 c := v_1.AuxInt 34754 v.reset(OpAMD64ROLQconst) 34755 v.AuxInt = (-c) & 63 34756 v.AddArg(x) 34757 return true 34758 } 34759 // match: (RORQ x (MOVLconst [c])) 34760 // cond: 34761 // result: (ROLQconst [(-c)&63] x) 34762 for { 34763 _ = v.Args[1] 34764 x := v.Args[0] 34765 v_1 := v.Args[1] 34766 if v_1.Op != OpAMD64MOVLconst { 34767 break 34768 } 34769 c := v_1.AuxInt 34770 v.reset(OpAMD64ROLQconst) 34771 v.AuxInt = (-c) & 63 34772 v.AddArg(x) 34773 return true 34774 } 34775 return false 34776 } 34777 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 34778 // match: (RORW x (NEGQ y)) 34779 // cond: 34780 // result: (ROLW x y) 34781 for { 34782 _ = v.Args[1] 34783 x := v.Args[0] 34784 v_1 := v.Args[1] 34785 if v_1.Op != OpAMD64NEGQ { 34786 break 34787 } 34788 y := v_1.Args[0] 34789 v.reset(OpAMD64ROLW) 34790 v.AddArg(x) 34791 v.AddArg(y) 34792 return true 34793 } 34794 // match: (RORW x (NEGL y)) 34795 // cond: 34796 // result: (ROLW x y) 34797 for { 34798 _ = v.Args[1] 34799 x := v.Args[0] 34800 v_1 := v.Args[1] 34801 if v_1.Op != OpAMD64NEGL { 34802 break 34803 } 34804 y := v_1.Args[0] 34805 v.reset(OpAMD64ROLW) 34806 v.AddArg(x) 34807 v.AddArg(y) 34808 return true 34809 } 34810 // match: (RORW x (MOVQconst [c])) 34811 // cond: 34812 // result: (ROLWconst [(-c)&15] x) 34813 for { 34814 _ = v.Args[1] 34815 x := v.Args[0] 34816 v_1 := v.Args[1] 34817 if v_1.Op != OpAMD64MOVQconst { 34818 break 34819 } 34820 c := v_1.AuxInt 34821 v.reset(OpAMD64ROLWconst) 34822 v.AuxInt = (-c) & 15 34823 v.AddArg(x) 34824 return true 34825 } 34826 // match: (RORW x (MOVLconst [c])) 34827 // cond: 34828 // result: (ROLWconst [(-c)&15] x) 34829 for { 34830 _ = v.Args[1] 34831 x := v.Args[0] 34832 v_1 := v.Args[1] 34833 if v_1.Op != OpAMD64MOVLconst { 34834 break 34835 } 34836 c := v_1.AuxInt 34837 v.reset(OpAMD64ROLWconst) 34838 v.AuxInt = (-c) & 15 34839 v.AddArg(x) 34840 return true 34841 } 34842 return false 34843 } 34844 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 34845 // match: (SARB x (MOVQconst [c])) 34846 // cond: 34847 // result: (SARBconst [min(c&31,7)] x) 34848 for { 34849 _ = v.Args[1] 34850 x := v.Args[0] 34851 v_1 := v.Args[1] 34852 if v_1.Op != OpAMD64MOVQconst { 34853 break 34854 } 34855 c := v_1.AuxInt 34856 v.reset(OpAMD64SARBconst) 34857 v.AuxInt = min(c&31, 7) 34858 v.AddArg(x) 34859 return true 34860 } 34861 // match: (SARB x (MOVLconst [c])) 34862 // cond: 34863 // result: (SARBconst [min(c&31,7)] x) 34864 for { 34865 _ = v.Args[1] 34866 x := v.Args[0] 34867 v_1 := v.Args[1] 34868 if v_1.Op != OpAMD64MOVLconst { 34869 break 34870 } 34871 c := v_1.AuxInt 34872 v.reset(OpAMD64SARBconst) 34873 v.AuxInt = min(c&31, 7) 34874 v.AddArg(x) 34875 return true 34876 } 34877 return false 34878 } 34879 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 34880 // match: (SARBconst x [0]) 34881 // cond: 34882 // result: x 34883 for { 34884 if v.AuxInt != 0 { 34885 break 34886 } 34887 x := v.Args[0] 34888 v.reset(OpCopy) 34889 v.Type = x.Type 34890 v.AddArg(x) 34891 return true 34892 } 34893 // match: (SARBconst [c] (MOVQconst [d])) 34894 // cond: 34895 // result: (MOVQconst [int64(int8(d))>>uint64(c)]) 34896 for { 34897 c := v.AuxInt 34898 v_0 := v.Args[0] 34899 if v_0.Op != OpAMD64MOVQconst { 34900 break 34901 } 34902 d := v_0.AuxInt 34903 v.reset(OpAMD64MOVQconst) 34904 v.AuxInt = int64(int8(d)) >> uint64(c) 34905 return true 34906 } 34907 return false 34908 } 34909 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 34910 b := v.Block 34911 _ = b 34912 // match: (SARL x (MOVQconst [c])) 34913 // cond: 34914 // result: (SARLconst [c&31] x) 34915 for { 34916 _ = v.Args[1] 34917 x := v.Args[0] 34918 v_1 := v.Args[1] 34919 if v_1.Op != OpAMD64MOVQconst { 34920 break 34921 } 34922 c := v_1.AuxInt 34923 v.reset(OpAMD64SARLconst) 34924 v.AuxInt = c & 31 34925 v.AddArg(x) 34926 return true 34927 } 34928 // match: (SARL x (MOVLconst [c])) 34929 // cond: 34930 // result: (SARLconst [c&31] x) 34931 for { 34932 _ = v.Args[1] 34933 x := v.Args[0] 34934 v_1 := v.Args[1] 34935 if v_1.Op != OpAMD64MOVLconst { 34936 break 34937 } 34938 c := v_1.AuxInt 34939 v.reset(OpAMD64SARLconst) 34940 v.AuxInt = c & 31 34941 v.AddArg(x) 34942 return true 34943 } 34944 // match: (SARL x (ADDQconst [c] y)) 34945 // cond: c & 31 == 0 34946 // result: (SARL x y) 34947 for { 34948 _ = v.Args[1] 34949 x := v.Args[0] 34950 v_1 := v.Args[1] 34951 if v_1.Op != OpAMD64ADDQconst { 34952 break 34953 } 34954 c := v_1.AuxInt 34955 y := v_1.Args[0] 34956 if !(c&31 == 0) { 34957 break 34958 } 34959 v.reset(OpAMD64SARL) 34960 v.AddArg(x) 34961 v.AddArg(y) 34962 return true 34963 } 34964 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 34965 // cond: c & 31 == 0 34966 // result: (SARL x (NEGQ <t> y)) 34967 for { 34968 _ = v.Args[1] 34969 x := v.Args[0] 34970 v_1 := v.Args[1] 34971 if v_1.Op != OpAMD64NEGQ { 34972 break 34973 } 34974 t := v_1.Type 34975 v_1_0 := v_1.Args[0] 34976 if v_1_0.Op != OpAMD64ADDQconst { 34977 break 34978 } 34979 c := v_1_0.AuxInt 34980 y := v_1_0.Args[0] 34981 if !(c&31 == 0) { 34982 break 34983 } 34984 v.reset(OpAMD64SARL) 34985 v.AddArg(x) 34986 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 34987 v0.AddArg(y) 34988 v.AddArg(v0) 34989 return true 34990 } 34991 // match: (SARL x (ANDQconst [c] y)) 34992 // cond: c & 31 == 31 34993 // result: (SARL x y) 34994 for { 34995 _ = v.Args[1] 34996 x := v.Args[0] 34997 v_1 := v.Args[1] 34998 if v_1.Op != OpAMD64ANDQconst { 34999 break 35000 } 35001 c := v_1.AuxInt 35002 y := v_1.Args[0] 35003 if !(c&31 == 31) { 35004 break 35005 } 35006 v.reset(OpAMD64SARL) 35007 v.AddArg(x) 35008 v.AddArg(y) 35009 return true 35010 } 35011 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 35012 // cond: c & 31 == 31 35013 // result: (SARL x (NEGQ <t> y)) 35014 for { 35015 _ = v.Args[1] 35016 x := v.Args[0] 35017 v_1 := v.Args[1] 35018 if v_1.Op != OpAMD64NEGQ { 35019 break 35020 } 35021 t := v_1.Type 35022 v_1_0 := v_1.Args[0] 35023 if v_1_0.Op != OpAMD64ANDQconst { 35024 break 35025 } 35026 c := v_1_0.AuxInt 35027 y := v_1_0.Args[0] 35028 if !(c&31 == 31) { 35029 break 35030 } 35031 v.reset(OpAMD64SARL) 35032 v.AddArg(x) 35033 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35034 v0.AddArg(y) 35035 v.AddArg(v0) 35036 return true 35037 } 35038 // match: (SARL x (ADDLconst [c] y)) 35039 // cond: c & 31 == 0 35040 // result: (SARL x y) 35041 for { 35042 _ = v.Args[1] 35043 x := v.Args[0] 35044 v_1 := v.Args[1] 35045 if v_1.Op != OpAMD64ADDLconst { 35046 break 35047 } 35048 c := v_1.AuxInt 35049 y := v_1.Args[0] 35050 if !(c&31 == 0) { 35051 break 35052 } 35053 v.reset(OpAMD64SARL) 35054 v.AddArg(x) 35055 v.AddArg(y) 35056 return true 35057 } 35058 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 35059 // cond: c & 31 == 0 35060 // result: (SARL x (NEGL <t> y)) 35061 for { 35062 _ = v.Args[1] 35063 x := v.Args[0] 35064 v_1 := v.Args[1] 35065 if v_1.Op != OpAMD64NEGL { 35066 break 35067 } 35068 t := v_1.Type 35069 v_1_0 := v_1.Args[0] 35070 if v_1_0.Op != OpAMD64ADDLconst { 35071 break 35072 } 35073 c := v_1_0.AuxInt 35074 y := v_1_0.Args[0] 35075 if !(c&31 == 0) { 35076 break 35077 } 35078 v.reset(OpAMD64SARL) 35079 v.AddArg(x) 35080 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35081 v0.AddArg(y) 35082 v.AddArg(v0) 35083 return true 35084 } 35085 // match: (SARL x (ANDLconst [c] y)) 35086 // cond: c & 31 == 31 35087 // result: (SARL x y) 35088 for { 35089 _ = v.Args[1] 35090 x := v.Args[0] 35091 v_1 := v.Args[1] 35092 if v_1.Op != OpAMD64ANDLconst { 35093 break 35094 } 35095 c := v_1.AuxInt 35096 y := v_1.Args[0] 35097 if !(c&31 == 31) { 35098 break 35099 } 35100 v.reset(OpAMD64SARL) 35101 v.AddArg(x) 35102 v.AddArg(y) 35103 return true 35104 } 35105 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 35106 // cond: c & 31 == 31 35107 // result: (SARL x (NEGL <t> y)) 35108 for { 35109 _ = v.Args[1] 35110 x := v.Args[0] 35111 v_1 := v.Args[1] 35112 if v_1.Op != OpAMD64NEGL { 35113 break 35114 } 35115 t := v_1.Type 35116 v_1_0 := v_1.Args[0] 35117 if v_1_0.Op != OpAMD64ANDLconst { 35118 break 35119 } 35120 c := v_1_0.AuxInt 35121 y := v_1_0.Args[0] 35122 if !(c&31 == 31) { 35123 break 35124 } 35125 v.reset(OpAMD64SARL) 35126 v.AddArg(x) 35127 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35128 v0.AddArg(y) 35129 v.AddArg(v0) 35130 return true 35131 } 35132 return false 35133 } 35134 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 35135 // match: (SARLconst x [0]) 35136 // cond: 35137 // result: x 35138 for { 35139 if v.AuxInt != 0 { 35140 break 35141 } 35142 x := v.Args[0] 35143 v.reset(OpCopy) 35144 v.Type = x.Type 35145 v.AddArg(x) 35146 return true 35147 } 35148 // match: (SARLconst [c] (MOVQconst [d])) 35149 // cond: 35150 // result: (MOVQconst [int64(int32(d))>>uint64(c)]) 35151 for { 35152 c := v.AuxInt 35153 v_0 := v.Args[0] 35154 if v_0.Op != OpAMD64MOVQconst { 35155 break 35156 } 35157 d := v_0.AuxInt 35158 v.reset(OpAMD64MOVQconst) 35159 v.AuxInt = int64(int32(d)) >> uint64(c) 35160 return true 35161 } 35162 return false 35163 } 35164 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 35165 b := v.Block 35166 _ = b 35167 // match: (SARQ x (MOVQconst [c])) 35168 // cond: 35169 // result: (SARQconst [c&63] x) 35170 for { 35171 _ = v.Args[1] 35172 x := v.Args[0] 35173 v_1 := v.Args[1] 35174 if v_1.Op != OpAMD64MOVQconst { 35175 break 35176 } 35177 c := v_1.AuxInt 35178 v.reset(OpAMD64SARQconst) 35179 v.AuxInt = c & 63 35180 v.AddArg(x) 35181 return true 35182 } 35183 // match: (SARQ x (MOVLconst [c])) 35184 // cond: 35185 // result: (SARQconst [c&63] x) 35186 for { 35187 _ = v.Args[1] 35188 x := v.Args[0] 35189 v_1 := v.Args[1] 35190 if v_1.Op != OpAMD64MOVLconst { 35191 break 35192 } 35193 c := v_1.AuxInt 35194 v.reset(OpAMD64SARQconst) 35195 v.AuxInt = c & 63 35196 v.AddArg(x) 35197 return true 35198 } 35199 // match: (SARQ x (ADDQconst [c] y)) 35200 // cond: c & 63 == 0 35201 // result: (SARQ x y) 35202 for { 35203 _ = v.Args[1] 35204 x := v.Args[0] 35205 v_1 := v.Args[1] 35206 if v_1.Op != OpAMD64ADDQconst { 35207 break 35208 } 35209 c := v_1.AuxInt 35210 y := v_1.Args[0] 35211 if !(c&63 == 0) { 35212 break 35213 } 35214 v.reset(OpAMD64SARQ) 35215 v.AddArg(x) 35216 v.AddArg(y) 35217 return true 35218 } 35219 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 35220 // cond: c & 63 == 0 35221 // result: (SARQ x (NEGQ <t> y)) 35222 for { 35223 _ = v.Args[1] 35224 x := v.Args[0] 35225 v_1 := v.Args[1] 35226 if v_1.Op != OpAMD64NEGQ { 35227 break 35228 } 35229 t := v_1.Type 35230 v_1_0 := v_1.Args[0] 35231 if v_1_0.Op != OpAMD64ADDQconst { 35232 break 35233 } 35234 c := v_1_0.AuxInt 35235 y := v_1_0.Args[0] 35236 if !(c&63 == 0) { 35237 break 35238 } 35239 v.reset(OpAMD64SARQ) 35240 v.AddArg(x) 35241 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35242 v0.AddArg(y) 35243 v.AddArg(v0) 35244 return true 35245 } 35246 // match: (SARQ x (ANDQconst [c] y)) 35247 // cond: c & 63 == 63 35248 // result: (SARQ x y) 35249 for { 35250 _ = v.Args[1] 35251 x := v.Args[0] 35252 v_1 := v.Args[1] 35253 if v_1.Op != OpAMD64ANDQconst { 35254 break 35255 } 35256 c := v_1.AuxInt 35257 y := v_1.Args[0] 35258 if !(c&63 == 63) { 35259 break 35260 } 35261 v.reset(OpAMD64SARQ) 35262 v.AddArg(x) 35263 v.AddArg(y) 35264 return true 35265 } 35266 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 35267 // cond: c & 63 == 63 35268 // result: (SARQ x (NEGQ <t> y)) 35269 for { 35270 _ = v.Args[1] 35271 x := v.Args[0] 35272 v_1 := v.Args[1] 35273 if v_1.Op != OpAMD64NEGQ { 35274 break 35275 } 35276 t := v_1.Type 35277 v_1_0 := v_1.Args[0] 35278 if v_1_0.Op != OpAMD64ANDQconst { 35279 break 35280 } 35281 c := v_1_0.AuxInt 35282 y := v_1_0.Args[0] 35283 if !(c&63 == 63) { 35284 break 35285 } 35286 v.reset(OpAMD64SARQ) 35287 v.AddArg(x) 35288 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35289 v0.AddArg(y) 35290 v.AddArg(v0) 35291 return true 35292 } 35293 // match: (SARQ x (ADDLconst [c] y)) 35294 // cond: c & 63 == 0 35295 // result: (SARQ x y) 35296 for { 35297 _ = v.Args[1] 35298 x := v.Args[0] 35299 v_1 := v.Args[1] 35300 if v_1.Op != OpAMD64ADDLconst { 35301 break 35302 } 35303 c := v_1.AuxInt 35304 y := v_1.Args[0] 35305 if !(c&63 == 0) { 35306 break 35307 } 35308 v.reset(OpAMD64SARQ) 35309 v.AddArg(x) 35310 v.AddArg(y) 35311 return true 35312 } 35313 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 35314 // cond: c & 63 == 0 35315 // result: (SARQ x (NEGL <t> y)) 35316 for { 35317 _ = v.Args[1] 35318 x := v.Args[0] 35319 v_1 := v.Args[1] 35320 if v_1.Op != OpAMD64NEGL { 35321 break 35322 } 35323 t := v_1.Type 35324 v_1_0 := v_1.Args[0] 35325 if v_1_0.Op != OpAMD64ADDLconst { 35326 break 35327 } 35328 c := v_1_0.AuxInt 35329 y := v_1_0.Args[0] 35330 if !(c&63 == 0) { 35331 break 35332 } 35333 v.reset(OpAMD64SARQ) 35334 v.AddArg(x) 35335 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35336 v0.AddArg(y) 35337 v.AddArg(v0) 35338 return true 35339 } 35340 // match: (SARQ x (ANDLconst [c] y)) 35341 // cond: c & 63 == 63 35342 // result: (SARQ x y) 35343 for { 35344 _ = v.Args[1] 35345 x := v.Args[0] 35346 v_1 := v.Args[1] 35347 if v_1.Op != OpAMD64ANDLconst { 35348 break 35349 } 35350 c := v_1.AuxInt 35351 y := v_1.Args[0] 35352 if !(c&63 == 63) { 35353 break 35354 } 35355 v.reset(OpAMD64SARQ) 35356 v.AddArg(x) 35357 v.AddArg(y) 35358 return true 35359 } 35360 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 35361 // cond: c & 63 == 63 35362 // result: (SARQ x (NEGL <t> y)) 35363 for { 35364 _ = v.Args[1] 35365 x := v.Args[0] 35366 v_1 := v.Args[1] 35367 if v_1.Op != OpAMD64NEGL { 35368 break 35369 } 35370 t := v_1.Type 35371 v_1_0 := v_1.Args[0] 35372 if v_1_0.Op != OpAMD64ANDLconst { 35373 break 35374 } 35375 c := v_1_0.AuxInt 35376 y := v_1_0.Args[0] 35377 if !(c&63 == 63) { 35378 break 35379 } 35380 v.reset(OpAMD64SARQ) 35381 v.AddArg(x) 35382 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35383 v0.AddArg(y) 35384 v.AddArg(v0) 35385 return true 35386 } 35387 return false 35388 } 35389 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 35390 // match: (SARQconst x [0]) 35391 // cond: 35392 // result: x 35393 for { 35394 if v.AuxInt != 0 { 35395 break 35396 } 35397 x := v.Args[0] 35398 v.reset(OpCopy) 35399 v.Type = x.Type 35400 v.AddArg(x) 35401 return true 35402 } 35403 // match: (SARQconst [c] (MOVQconst [d])) 35404 // cond: 35405 // result: (MOVQconst [d>>uint64(c)]) 35406 for { 35407 c := v.AuxInt 35408 v_0 := v.Args[0] 35409 if v_0.Op != OpAMD64MOVQconst { 35410 break 35411 } 35412 d := v_0.AuxInt 35413 v.reset(OpAMD64MOVQconst) 35414 v.AuxInt = d >> uint64(c) 35415 return true 35416 } 35417 return false 35418 } 35419 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 35420 // match: (SARW x (MOVQconst [c])) 35421 // cond: 35422 // result: (SARWconst [min(c&31,15)] x) 35423 for { 35424 _ = v.Args[1] 35425 x := v.Args[0] 35426 v_1 := v.Args[1] 35427 if v_1.Op != OpAMD64MOVQconst { 35428 break 35429 } 35430 c := v_1.AuxInt 35431 v.reset(OpAMD64SARWconst) 35432 v.AuxInt = min(c&31, 15) 35433 v.AddArg(x) 35434 return true 35435 } 35436 // match: (SARW x (MOVLconst [c])) 35437 // cond: 35438 // result: (SARWconst [min(c&31,15)] x) 35439 for { 35440 _ = v.Args[1] 35441 x := v.Args[0] 35442 v_1 := v.Args[1] 35443 if v_1.Op != OpAMD64MOVLconst { 35444 break 35445 } 35446 c := v_1.AuxInt 35447 v.reset(OpAMD64SARWconst) 35448 v.AuxInt = min(c&31, 15) 35449 v.AddArg(x) 35450 return true 35451 } 35452 return false 35453 } 35454 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 35455 // match: (SARWconst x [0]) 35456 // cond: 35457 // result: x 35458 for { 35459 if v.AuxInt != 0 { 35460 break 35461 } 35462 x := v.Args[0] 35463 v.reset(OpCopy) 35464 v.Type = x.Type 35465 v.AddArg(x) 35466 return true 35467 } 35468 // match: (SARWconst [c] (MOVQconst [d])) 35469 // cond: 35470 // result: (MOVQconst [int64(int16(d))>>uint64(c)]) 35471 for { 35472 c := v.AuxInt 35473 v_0 := v.Args[0] 35474 if v_0.Op != OpAMD64MOVQconst { 35475 break 35476 } 35477 d := v_0.AuxInt 35478 v.reset(OpAMD64MOVQconst) 35479 v.AuxInt = int64(int16(d)) >> uint64(c) 35480 return true 35481 } 35482 return false 35483 } 35484 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 35485 // match: (SBBLcarrymask (FlagEQ)) 35486 // cond: 35487 // result: (MOVLconst [0]) 35488 for { 35489 v_0 := v.Args[0] 35490 if v_0.Op != OpAMD64FlagEQ { 35491 break 35492 } 35493 v.reset(OpAMD64MOVLconst) 35494 v.AuxInt = 0 35495 return true 35496 } 35497 // match: (SBBLcarrymask (FlagLT_ULT)) 35498 // cond: 35499 // result: (MOVLconst [-1]) 35500 for { 35501 v_0 := v.Args[0] 35502 if v_0.Op != OpAMD64FlagLT_ULT { 35503 break 35504 } 35505 v.reset(OpAMD64MOVLconst) 35506 v.AuxInt = -1 35507 return true 35508 } 35509 // match: (SBBLcarrymask (FlagLT_UGT)) 35510 // cond: 35511 // result: (MOVLconst [0]) 35512 for { 35513 v_0 := v.Args[0] 35514 if v_0.Op != OpAMD64FlagLT_UGT { 35515 break 35516 } 35517 v.reset(OpAMD64MOVLconst) 35518 v.AuxInt = 0 35519 return true 35520 } 35521 // match: (SBBLcarrymask (FlagGT_ULT)) 35522 // cond: 35523 // result: (MOVLconst [-1]) 35524 for { 35525 v_0 := v.Args[0] 35526 if v_0.Op != OpAMD64FlagGT_ULT { 35527 break 35528 } 35529 v.reset(OpAMD64MOVLconst) 35530 v.AuxInt = -1 35531 return true 35532 } 35533 // match: (SBBLcarrymask (FlagGT_UGT)) 35534 // cond: 35535 // result: (MOVLconst [0]) 35536 for { 35537 v_0 := v.Args[0] 35538 if v_0.Op != OpAMD64FlagGT_UGT { 35539 break 35540 } 35541 v.reset(OpAMD64MOVLconst) 35542 v.AuxInt = 0 35543 return true 35544 } 35545 return false 35546 } 35547 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 35548 // match: (SBBQcarrymask (FlagEQ)) 35549 // cond: 35550 // result: (MOVQconst [0]) 35551 for { 35552 v_0 := v.Args[0] 35553 if v_0.Op != OpAMD64FlagEQ { 35554 break 35555 } 35556 v.reset(OpAMD64MOVQconst) 35557 v.AuxInt = 0 35558 return true 35559 } 35560 // match: (SBBQcarrymask (FlagLT_ULT)) 35561 // cond: 35562 // result: (MOVQconst [-1]) 35563 for { 35564 v_0 := v.Args[0] 35565 if v_0.Op != OpAMD64FlagLT_ULT { 35566 break 35567 } 35568 v.reset(OpAMD64MOVQconst) 35569 v.AuxInt = -1 35570 return true 35571 } 35572 // match: (SBBQcarrymask (FlagLT_UGT)) 35573 // cond: 35574 // result: (MOVQconst [0]) 35575 for { 35576 v_0 := v.Args[0] 35577 if v_0.Op != OpAMD64FlagLT_UGT { 35578 break 35579 } 35580 v.reset(OpAMD64MOVQconst) 35581 v.AuxInt = 0 35582 return true 35583 } 35584 // match: (SBBQcarrymask (FlagGT_ULT)) 35585 // cond: 35586 // result: (MOVQconst [-1]) 35587 for { 35588 v_0 := v.Args[0] 35589 if v_0.Op != OpAMD64FlagGT_ULT { 35590 break 35591 } 35592 v.reset(OpAMD64MOVQconst) 35593 v.AuxInt = -1 35594 return true 35595 } 35596 // match: (SBBQcarrymask (FlagGT_UGT)) 35597 // cond: 35598 // result: (MOVQconst [0]) 35599 for { 35600 v_0 := v.Args[0] 35601 if v_0.Op != OpAMD64FlagGT_UGT { 35602 break 35603 } 35604 v.reset(OpAMD64MOVQconst) 35605 v.AuxInt = 0 35606 return true 35607 } 35608 return false 35609 } 35610 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 35611 // match: (SETA (InvertFlags x)) 35612 // cond: 35613 // result: (SETB x) 35614 for { 35615 v_0 := v.Args[0] 35616 if v_0.Op != OpAMD64InvertFlags { 35617 break 35618 } 35619 x := v_0.Args[0] 35620 v.reset(OpAMD64SETB) 35621 v.AddArg(x) 35622 return true 35623 } 35624 // match: (SETA (FlagEQ)) 35625 // cond: 35626 // result: (MOVLconst [0]) 35627 for { 35628 v_0 := v.Args[0] 35629 if v_0.Op != OpAMD64FlagEQ { 35630 break 35631 } 35632 v.reset(OpAMD64MOVLconst) 35633 v.AuxInt = 0 35634 return true 35635 } 35636 // match: (SETA (FlagLT_ULT)) 35637 // cond: 35638 // result: (MOVLconst [0]) 35639 for { 35640 v_0 := v.Args[0] 35641 if v_0.Op != OpAMD64FlagLT_ULT { 35642 break 35643 } 35644 v.reset(OpAMD64MOVLconst) 35645 v.AuxInt = 0 35646 return true 35647 } 35648 // match: (SETA (FlagLT_UGT)) 35649 // cond: 35650 // result: (MOVLconst [1]) 35651 for { 35652 v_0 := v.Args[0] 35653 if v_0.Op != OpAMD64FlagLT_UGT { 35654 break 35655 } 35656 v.reset(OpAMD64MOVLconst) 35657 v.AuxInt = 1 35658 return true 35659 } 35660 // match: (SETA (FlagGT_ULT)) 35661 // cond: 35662 // result: (MOVLconst [0]) 35663 for { 35664 v_0 := v.Args[0] 35665 if v_0.Op != OpAMD64FlagGT_ULT { 35666 break 35667 } 35668 v.reset(OpAMD64MOVLconst) 35669 v.AuxInt = 0 35670 return true 35671 } 35672 // match: (SETA (FlagGT_UGT)) 35673 // cond: 35674 // result: (MOVLconst [1]) 35675 for { 35676 v_0 := v.Args[0] 35677 if v_0.Op != OpAMD64FlagGT_UGT { 35678 break 35679 } 35680 v.reset(OpAMD64MOVLconst) 35681 v.AuxInt = 1 35682 return true 35683 } 35684 return false 35685 } 35686 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 35687 // match: (SETAE (InvertFlags x)) 35688 // cond: 35689 // result: (SETBE x) 35690 for { 35691 v_0 := v.Args[0] 35692 if v_0.Op != OpAMD64InvertFlags { 35693 break 35694 } 35695 x := v_0.Args[0] 35696 v.reset(OpAMD64SETBE) 35697 v.AddArg(x) 35698 return true 35699 } 35700 // match: (SETAE (FlagEQ)) 35701 // cond: 35702 // result: (MOVLconst [1]) 35703 for { 35704 v_0 := v.Args[0] 35705 if v_0.Op != OpAMD64FlagEQ { 35706 break 35707 } 35708 v.reset(OpAMD64MOVLconst) 35709 v.AuxInt = 1 35710 return true 35711 } 35712 // match: (SETAE (FlagLT_ULT)) 35713 // cond: 35714 // result: (MOVLconst [0]) 35715 for { 35716 v_0 := v.Args[0] 35717 if v_0.Op != OpAMD64FlagLT_ULT { 35718 break 35719 } 35720 v.reset(OpAMD64MOVLconst) 35721 v.AuxInt = 0 35722 return true 35723 } 35724 // match: (SETAE (FlagLT_UGT)) 35725 // cond: 35726 // result: (MOVLconst [1]) 35727 for { 35728 v_0 := v.Args[0] 35729 if v_0.Op != OpAMD64FlagLT_UGT { 35730 break 35731 } 35732 v.reset(OpAMD64MOVLconst) 35733 v.AuxInt = 1 35734 return true 35735 } 35736 // match: (SETAE (FlagGT_ULT)) 35737 // cond: 35738 // result: (MOVLconst [0]) 35739 for { 35740 v_0 := v.Args[0] 35741 if v_0.Op != OpAMD64FlagGT_ULT { 35742 break 35743 } 35744 v.reset(OpAMD64MOVLconst) 35745 v.AuxInt = 0 35746 return true 35747 } 35748 // match: (SETAE (FlagGT_UGT)) 35749 // cond: 35750 // result: (MOVLconst [1]) 35751 for { 35752 v_0 := v.Args[0] 35753 if v_0.Op != OpAMD64FlagGT_UGT { 35754 break 35755 } 35756 v.reset(OpAMD64MOVLconst) 35757 v.AuxInt = 1 35758 return true 35759 } 35760 return false 35761 } 35762 func rewriteValueAMD64_OpAMD64SETAEmem_0(v *Value) bool { 35763 b := v.Block 35764 _ = b 35765 // match: (SETAEmem [off] {sym} ptr (InvertFlags x) mem) 35766 // cond: 35767 // result: (SETBEmem [off] {sym} ptr x mem) 35768 for { 35769 off := v.AuxInt 35770 sym := v.Aux 35771 _ = v.Args[2] 35772 ptr := v.Args[0] 35773 v_1 := v.Args[1] 35774 if v_1.Op != OpAMD64InvertFlags { 35775 break 35776 } 35777 x := v_1.Args[0] 35778 mem := v.Args[2] 35779 v.reset(OpAMD64SETBEmem) 35780 v.AuxInt = off 35781 v.Aux = sym 35782 v.AddArg(ptr) 35783 v.AddArg(x) 35784 v.AddArg(mem) 35785 return true 35786 } 35787 // match: (SETAEmem [off] {sym} ptr x:(FlagEQ) mem) 35788 // cond: 35789 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 35790 for { 35791 off := v.AuxInt 35792 sym := v.Aux 35793 _ = v.Args[2] 35794 ptr := v.Args[0] 35795 x := v.Args[1] 35796 if x.Op != OpAMD64FlagEQ { 35797 break 35798 } 35799 mem := v.Args[2] 35800 v.reset(OpAMD64MOVBstore) 35801 v.AuxInt = off 35802 v.Aux = sym 35803 v.AddArg(ptr) 35804 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35805 v0.AuxInt = 1 35806 v.AddArg(v0) 35807 v.AddArg(mem) 35808 return true 35809 } 35810 // match: (SETAEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 35811 // cond: 35812 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 35813 for { 35814 off := v.AuxInt 35815 sym := v.Aux 35816 _ = v.Args[2] 35817 ptr := v.Args[0] 35818 x := v.Args[1] 35819 if x.Op != OpAMD64FlagLT_ULT { 35820 break 35821 } 35822 mem := v.Args[2] 35823 v.reset(OpAMD64MOVBstore) 35824 v.AuxInt = off 35825 v.Aux = sym 35826 v.AddArg(ptr) 35827 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35828 v0.AuxInt = 0 35829 v.AddArg(v0) 35830 v.AddArg(mem) 35831 return true 35832 } 35833 // match: (SETAEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 35834 // cond: 35835 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 35836 for { 35837 off := v.AuxInt 35838 sym := v.Aux 35839 _ = v.Args[2] 35840 ptr := v.Args[0] 35841 x := v.Args[1] 35842 if x.Op != OpAMD64FlagLT_UGT { 35843 break 35844 } 35845 mem := v.Args[2] 35846 v.reset(OpAMD64MOVBstore) 35847 v.AuxInt = off 35848 v.Aux = sym 35849 v.AddArg(ptr) 35850 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35851 v0.AuxInt = 1 35852 v.AddArg(v0) 35853 v.AddArg(mem) 35854 return true 35855 } 35856 // match: (SETAEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 35857 // cond: 35858 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 35859 for { 35860 off := v.AuxInt 35861 sym := v.Aux 35862 _ = v.Args[2] 35863 ptr := v.Args[0] 35864 x := v.Args[1] 35865 if x.Op != OpAMD64FlagGT_ULT { 35866 break 35867 } 35868 mem := v.Args[2] 35869 v.reset(OpAMD64MOVBstore) 35870 v.AuxInt = off 35871 v.Aux = sym 35872 v.AddArg(ptr) 35873 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35874 v0.AuxInt = 0 35875 v.AddArg(v0) 35876 v.AddArg(mem) 35877 return true 35878 } 35879 // match: (SETAEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 35880 // cond: 35881 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 35882 for { 35883 off := v.AuxInt 35884 sym := v.Aux 35885 _ = v.Args[2] 35886 ptr := v.Args[0] 35887 x := v.Args[1] 35888 if x.Op != OpAMD64FlagGT_UGT { 35889 break 35890 } 35891 mem := v.Args[2] 35892 v.reset(OpAMD64MOVBstore) 35893 v.AuxInt = off 35894 v.Aux = sym 35895 v.AddArg(ptr) 35896 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35897 v0.AuxInt = 1 35898 v.AddArg(v0) 35899 v.AddArg(mem) 35900 return true 35901 } 35902 return false 35903 } 35904 func rewriteValueAMD64_OpAMD64SETAmem_0(v *Value) bool { 35905 b := v.Block 35906 _ = b 35907 // match: (SETAmem [off] {sym} ptr (InvertFlags x) mem) 35908 // cond: 35909 // result: (SETBmem [off] {sym} ptr x mem) 35910 for { 35911 off := v.AuxInt 35912 sym := v.Aux 35913 _ = v.Args[2] 35914 ptr := v.Args[0] 35915 v_1 := v.Args[1] 35916 if v_1.Op != OpAMD64InvertFlags { 35917 break 35918 } 35919 x := v_1.Args[0] 35920 mem := v.Args[2] 35921 v.reset(OpAMD64SETBmem) 35922 v.AuxInt = off 35923 v.Aux = sym 35924 v.AddArg(ptr) 35925 v.AddArg(x) 35926 v.AddArg(mem) 35927 return true 35928 } 35929 // match: (SETAmem [off] {sym} ptr x:(FlagEQ) mem) 35930 // cond: 35931 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 35932 for { 35933 off := v.AuxInt 35934 sym := v.Aux 35935 _ = v.Args[2] 35936 ptr := v.Args[0] 35937 x := v.Args[1] 35938 if x.Op != OpAMD64FlagEQ { 35939 break 35940 } 35941 mem := v.Args[2] 35942 v.reset(OpAMD64MOVBstore) 35943 v.AuxInt = off 35944 v.Aux = sym 35945 v.AddArg(ptr) 35946 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35947 v0.AuxInt = 0 35948 v.AddArg(v0) 35949 v.AddArg(mem) 35950 return true 35951 } 35952 // match: (SETAmem [off] {sym} ptr x:(FlagLT_ULT) mem) 35953 // cond: 35954 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 35955 for { 35956 off := v.AuxInt 35957 sym := v.Aux 35958 _ = v.Args[2] 35959 ptr := v.Args[0] 35960 x := v.Args[1] 35961 if x.Op != OpAMD64FlagLT_ULT { 35962 break 35963 } 35964 mem := v.Args[2] 35965 v.reset(OpAMD64MOVBstore) 35966 v.AuxInt = off 35967 v.Aux = sym 35968 v.AddArg(ptr) 35969 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35970 v0.AuxInt = 0 35971 v.AddArg(v0) 35972 v.AddArg(mem) 35973 return true 35974 } 35975 // match: (SETAmem [off] {sym} ptr x:(FlagLT_UGT) mem) 35976 // cond: 35977 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 35978 for { 35979 off := v.AuxInt 35980 sym := v.Aux 35981 _ = v.Args[2] 35982 ptr := v.Args[0] 35983 x := v.Args[1] 35984 if x.Op != OpAMD64FlagLT_UGT { 35985 break 35986 } 35987 mem := v.Args[2] 35988 v.reset(OpAMD64MOVBstore) 35989 v.AuxInt = off 35990 v.Aux = sym 35991 v.AddArg(ptr) 35992 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35993 v0.AuxInt = 1 35994 v.AddArg(v0) 35995 v.AddArg(mem) 35996 return true 35997 } 35998 // match: (SETAmem [off] {sym} ptr x:(FlagGT_ULT) mem) 35999 // cond: 36000 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36001 for { 36002 off := v.AuxInt 36003 sym := v.Aux 36004 _ = v.Args[2] 36005 ptr := v.Args[0] 36006 x := v.Args[1] 36007 if x.Op != OpAMD64FlagGT_ULT { 36008 break 36009 } 36010 mem := v.Args[2] 36011 v.reset(OpAMD64MOVBstore) 36012 v.AuxInt = off 36013 v.Aux = sym 36014 v.AddArg(ptr) 36015 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36016 v0.AuxInt = 0 36017 v.AddArg(v0) 36018 v.AddArg(mem) 36019 return true 36020 } 36021 // match: (SETAmem [off] {sym} ptr x:(FlagGT_UGT) mem) 36022 // cond: 36023 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36024 for { 36025 off := v.AuxInt 36026 sym := v.Aux 36027 _ = v.Args[2] 36028 ptr := v.Args[0] 36029 x := v.Args[1] 36030 if x.Op != OpAMD64FlagGT_UGT { 36031 break 36032 } 36033 mem := v.Args[2] 36034 v.reset(OpAMD64MOVBstore) 36035 v.AuxInt = off 36036 v.Aux = sym 36037 v.AddArg(ptr) 36038 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36039 v0.AuxInt = 1 36040 v.AddArg(v0) 36041 v.AddArg(mem) 36042 return true 36043 } 36044 return false 36045 } 36046 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 36047 // match: (SETB (InvertFlags x)) 36048 // cond: 36049 // result: (SETA x) 36050 for { 36051 v_0 := v.Args[0] 36052 if v_0.Op != OpAMD64InvertFlags { 36053 break 36054 } 36055 x := v_0.Args[0] 36056 v.reset(OpAMD64SETA) 36057 v.AddArg(x) 36058 return true 36059 } 36060 // match: (SETB (FlagEQ)) 36061 // cond: 36062 // result: (MOVLconst [0]) 36063 for { 36064 v_0 := v.Args[0] 36065 if v_0.Op != OpAMD64FlagEQ { 36066 break 36067 } 36068 v.reset(OpAMD64MOVLconst) 36069 v.AuxInt = 0 36070 return true 36071 } 36072 // match: (SETB (FlagLT_ULT)) 36073 // cond: 36074 // result: (MOVLconst [1]) 36075 for { 36076 v_0 := v.Args[0] 36077 if v_0.Op != OpAMD64FlagLT_ULT { 36078 break 36079 } 36080 v.reset(OpAMD64MOVLconst) 36081 v.AuxInt = 1 36082 return true 36083 } 36084 // match: (SETB (FlagLT_UGT)) 36085 // cond: 36086 // result: (MOVLconst [0]) 36087 for { 36088 v_0 := v.Args[0] 36089 if v_0.Op != OpAMD64FlagLT_UGT { 36090 break 36091 } 36092 v.reset(OpAMD64MOVLconst) 36093 v.AuxInt = 0 36094 return true 36095 } 36096 // match: (SETB (FlagGT_ULT)) 36097 // cond: 36098 // result: (MOVLconst [1]) 36099 for { 36100 v_0 := v.Args[0] 36101 if v_0.Op != OpAMD64FlagGT_ULT { 36102 break 36103 } 36104 v.reset(OpAMD64MOVLconst) 36105 v.AuxInt = 1 36106 return true 36107 } 36108 // match: (SETB (FlagGT_UGT)) 36109 // cond: 36110 // result: (MOVLconst [0]) 36111 for { 36112 v_0 := v.Args[0] 36113 if v_0.Op != OpAMD64FlagGT_UGT { 36114 break 36115 } 36116 v.reset(OpAMD64MOVLconst) 36117 v.AuxInt = 0 36118 return true 36119 } 36120 return false 36121 } 36122 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 36123 // match: (SETBE (InvertFlags x)) 36124 // cond: 36125 // result: (SETAE x) 36126 for { 36127 v_0 := v.Args[0] 36128 if v_0.Op != OpAMD64InvertFlags { 36129 break 36130 } 36131 x := v_0.Args[0] 36132 v.reset(OpAMD64SETAE) 36133 v.AddArg(x) 36134 return true 36135 } 36136 // match: (SETBE (FlagEQ)) 36137 // cond: 36138 // result: (MOVLconst [1]) 36139 for { 36140 v_0 := v.Args[0] 36141 if v_0.Op != OpAMD64FlagEQ { 36142 break 36143 } 36144 v.reset(OpAMD64MOVLconst) 36145 v.AuxInt = 1 36146 return true 36147 } 36148 // match: (SETBE (FlagLT_ULT)) 36149 // cond: 36150 // result: (MOVLconst [1]) 36151 for { 36152 v_0 := v.Args[0] 36153 if v_0.Op != OpAMD64FlagLT_ULT { 36154 break 36155 } 36156 v.reset(OpAMD64MOVLconst) 36157 v.AuxInt = 1 36158 return true 36159 } 36160 // match: (SETBE (FlagLT_UGT)) 36161 // cond: 36162 // result: (MOVLconst [0]) 36163 for { 36164 v_0 := v.Args[0] 36165 if v_0.Op != OpAMD64FlagLT_UGT { 36166 break 36167 } 36168 v.reset(OpAMD64MOVLconst) 36169 v.AuxInt = 0 36170 return true 36171 } 36172 // match: (SETBE (FlagGT_ULT)) 36173 // cond: 36174 // result: (MOVLconst [1]) 36175 for { 36176 v_0 := v.Args[0] 36177 if v_0.Op != OpAMD64FlagGT_ULT { 36178 break 36179 } 36180 v.reset(OpAMD64MOVLconst) 36181 v.AuxInt = 1 36182 return true 36183 } 36184 // match: (SETBE (FlagGT_UGT)) 36185 // cond: 36186 // result: (MOVLconst [0]) 36187 for { 36188 v_0 := v.Args[0] 36189 if v_0.Op != OpAMD64FlagGT_UGT { 36190 break 36191 } 36192 v.reset(OpAMD64MOVLconst) 36193 v.AuxInt = 0 36194 return true 36195 } 36196 return false 36197 } 36198 func rewriteValueAMD64_OpAMD64SETBEmem_0(v *Value) bool { 36199 b := v.Block 36200 _ = b 36201 // match: (SETBEmem [off] {sym} ptr (InvertFlags x) mem) 36202 // cond: 36203 // result: (SETAEmem [off] {sym} ptr x mem) 36204 for { 36205 off := v.AuxInt 36206 sym := v.Aux 36207 _ = v.Args[2] 36208 ptr := v.Args[0] 36209 v_1 := v.Args[1] 36210 if v_1.Op != OpAMD64InvertFlags { 36211 break 36212 } 36213 x := v_1.Args[0] 36214 mem := v.Args[2] 36215 v.reset(OpAMD64SETAEmem) 36216 v.AuxInt = off 36217 v.Aux = sym 36218 v.AddArg(ptr) 36219 v.AddArg(x) 36220 v.AddArg(mem) 36221 return true 36222 } 36223 // match: (SETBEmem [off] {sym} ptr x:(FlagEQ) mem) 36224 // cond: 36225 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36226 for { 36227 off := v.AuxInt 36228 sym := v.Aux 36229 _ = v.Args[2] 36230 ptr := v.Args[0] 36231 x := v.Args[1] 36232 if x.Op != OpAMD64FlagEQ { 36233 break 36234 } 36235 mem := v.Args[2] 36236 v.reset(OpAMD64MOVBstore) 36237 v.AuxInt = off 36238 v.Aux = sym 36239 v.AddArg(ptr) 36240 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36241 v0.AuxInt = 1 36242 v.AddArg(v0) 36243 v.AddArg(mem) 36244 return true 36245 } 36246 // match: (SETBEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 36247 // cond: 36248 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36249 for { 36250 off := v.AuxInt 36251 sym := v.Aux 36252 _ = v.Args[2] 36253 ptr := v.Args[0] 36254 x := v.Args[1] 36255 if x.Op != OpAMD64FlagLT_ULT { 36256 break 36257 } 36258 mem := v.Args[2] 36259 v.reset(OpAMD64MOVBstore) 36260 v.AuxInt = off 36261 v.Aux = sym 36262 v.AddArg(ptr) 36263 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36264 v0.AuxInt = 1 36265 v.AddArg(v0) 36266 v.AddArg(mem) 36267 return true 36268 } 36269 // match: (SETBEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 36270 // cond: 36271 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36272 for { 36273 off := v.AuxInt 36274 sym := v.Aux 36275 _ = v.Args[2] 36276 ptr := v.Args[0] 36277 x := v.Args[1] 36278 if x.Op != OpAMD64FlagLT_UGT { 36279 break 36280 } 36281 mem := v.Args[2] 36282 v.reset(OpAMD64MOVBstore) 36283 v.AuxInt = off 36284 v.Aux = sym 36285 v.AddArg(ptr) 36286 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36287 v0.AuxInt = 0 36288 v.AddArg(v0) 36289 v.AddArg(mem) 36290 return true 36291 } 36292 // match: (SETBEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 36293 // cond: 36294 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36295 for { 36296 off := v.AuxInt 36297 sym := v.Aux 36298 _ = v.Args[2] 36299 ptr := v.Args[0] 36300 x := v.Args[1] 36301 if x.Op != OpAMD64FlagGT_ULT { 36302 break 36303 } 36304 mem := v.Args[2] 36305 v.reset(OpAMD64MOVBstore) 36306 v.AuxInt = off 36307 v.Aux = sym 36308 v.AddArg(ptr) 36309 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36310 v0.AuxInt = 1 36311 v.AddArg(v0) 36312 v.AddArg(mem) 36313 return true 36314 } 36315 // match: (SETBEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 36316 // cond: 36317 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36318 for { 36319 off := v.AuxInt 36320 sym := v.Aux 36321 _ = v.Args[2] 36322 ptr := v.Args[0] 36323 x := v.Args[1] 36324 if x.Op != OpAMD64FlagGT_UGT { 36325 break 36326 } 36327 mem := v.Args[2] 36328 v.reset(OpAMD64MOVBstore) 36329 v.AuxInt = off 36330 v.Aux = sym 36331 v.AddArg(ptr) 36332 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36333 v0.AuxInt = 0 36334 v.AddArg(v0) 36335 v.AddArg(mem) 36336 return true 36337 } 36338 return false 36339 } 36340 func rewriteValueAMD64_OpAMD64SETBmem_0(v *Value) bool { 36341 b := v.Block 36342 _ = b 36343 // match: (SETBmem [off] {sym} ptr (InvertFlags x) mem) 36344 // cond: 36345 // result: (SETAmem [off] {sym} ptr x mem) 36346 for { 36347 off := v.AuxInt 36348 sym := v.Aux 36349 _ = v.Args[2] 36350 ptr := v.Args[0] 36351 v_1 := v.Args[1] 36352 if v_1.Op != OpAMD64InvertFlags { 36353 break 36354 } 36355 x := v_1.Args[0] 36356 mem := v.Args[2] 36357 v.reset(OpAMD64SETAmem) 36358 v.AuxInt = off 36359 v.Aux = sym 36360 v.AddArg(ptr) 36361 v.AddArg(x) 36362 v.AddArg(mem) 36363 return true 36364 } 36365 // match: (SETBmem [off] {sym} ptr x:(FlagEQ) mem) 36366 // cond: 36367 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36368 for { 36369 off := v.AuxInt 36370 sym := v.Aux 36371 _ = v.Args[2] 36372 ptr := v.Args[0] 36373 x := v.Args[1] 36374 if x.Op != OpAMD64FlagEQ { 36375 break 36376 } 36377 mem := v.Args[2] 36378 v.reset(OpAMD64MOVBstore) 36379 v.AuxInt = off 36380 v.Aux = sym 36381 v.AddArg(ptr) 36382 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36383 v0.AuxInt = 0 36384 v.AddArg(v0) 36385 v.AddArg(mem) 36386 return true 36387 } 36388 // match: (SETBmem [off] {sym} ptr x:(FlagLT_ULT) mem) 36389 // cond: 36390 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36391 for { 36392 off := v.AuxInt 36393 sym := v.Aux 36394 _ = v.Args[2] 36395 ptr := v.Args[0] 36396 x := v.Args[1] 36397 if x.Op != OpAMD64FlagLT_ULT { 36398 break 36399 } 36400 mem := v.Args[2] 36401 v.reset(OpAMD64MOVBstore) 36402 v.AuxInt = off 36403 v.Aux = sym 36404 v.AddArg(ptr) 36405 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36406 v0.AuxInt = 1 36407 v.AddArg(v0) 36408 v.AddArg(mem) 36409 return true 36410 } 36411 // match: (SETBmem [off] {sym} ptr x:(FlagLT_UGT) mem) 36412 // cond: 36413 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36414 for { 36415 off := v.AuxInt 36416 sym := v.Aux 36417 _ = v.Args[2] 36418 ptr := v.Args[0] 36419 x := v.Args[1] 36420 if x.Op != OpAMD64FlagLT_UGT { 36421 break 36422 } 36423 mem := v.Args[2] 36424 v.reset(OpAMD64MOVBstore) 36425 v.AuxInt = off 36426 v.Aux = sym 36427 v.AddArg(ptr) 36428 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36429 v0.AuxInt = 0 36430 v.AddArg(v0) 36431 v.AddArg(mem) 36432 return true 36433 } 36434 // match: (SETBmem [off] {sym} ptr x:(FlagGT_ULT) mem) 36435 // cond: 36436 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36437 for { 36438 off := v.AuxInt 36439 sym := v.Aux 36440 _ = v.Args[2] 36441 ptr := v.Args[0] 36442 x := v.Args[1] 36443 if x.Op != OpAMD64FlagGT_ULT { 36444 break 36445 } 36446 mem := v.Args[2] 36447 v.reset(OpAMD64MOVBstore) 36448 v.AuxInt = off 36449 v.Aux = sym 36450 v.AddArg(ptr) 36451 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36452 v0.AuxInt = 1 36453 v.AddArg(v0) 36454 v.AddArg(mem) 36455 return true 36456 } 36457 // match: (SETBmem [off] {sym} ptr x:(FlagGT_UGT) mem) 36458 // cond: 36459 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36460 for { 36461 off := v.AuxInt 36462 sym := v.Aux 36463 _ = v.Args[2] 36464 ptr := v.Args[0] 36465 x := v.Args[1] 36466 if x.Op != OpAMD64FlagGT_UGT { 36467 break 36468 } 36469 mem := v.Args[2] 36470 v.reset(OpAMD64MOVBstore) 36471 v.AuxInt = off 36472 v.Aux = sym 36473 v.AddArg(ptr) 36474 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36475 v0.AuxInt = 0 36476 v.AddArg(v0) 36477 v.AddArg(mem) 36478 return true 36479 } 36480 return false 36481 } 36482 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 36483 b := v.Block 36484 _ = b 36485 config := b.Func.Config 36486 _ = config 36487 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 36488 // cond: !config.nacl 36489 // result: (SETAE (BTL x y)) 36490 for { 36491 v_0 := v.Args[0] 36492 if v_0.Op != OpAMD64TESTL { 36493 break 36494 } 36495 _ = v_0.Args[1] 36496 v_0_0 := v_0.Args[0] 36497 if v_0_0.Op != OpAMD64SHLL { 36498 break 36499 } 36500 _ = v_0_0.Args[1] 36501 v_0_0_0 := v_0_0.Args[0] 36502 if v_0_0_0.Op != OpAMD64MOVLconst { 36503 break 36504 } 36505 if v_0_0_0.AuxInt != 1 { 36506 break 36507 } 36508 x := v_0_0.Args[1] 36509 y := v_0.Args[1] 36510 if !(!config.nacl) { 36511 break 36512 } 36513 v.reset(OpAMD64SETAE) 36514 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36515 v0.AddArg(x) 36516 v0.AddArg(y) 36517 v.AddArg(v0) 36518 return true 36519 } 36520 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 36521 // cond: !config.nacl 36522 // result: (SETAE (BTL x y)) 36523 for { 36524 v_0 := v.Args[0] 36525 if v_0.Op != OpAMD64TESTL { 36526 break 36527 } 36528 _ = v_0.Args[1] 36529 y := v_0.Args[0] 36530 v_0_1 := v_0.Args[1] 36531 if v_0_1.Op != OpAMD64SHLL { 36532 break 36533 } 36534 _ = v_0_1.Args[1] 36535 v_0_1_0 := v_0_1.Args[0] 36536 if v_0_1_0.Op != OpAMD64MOVLconst { 36537 break 36538 } 36539 if v_0_1_0.AuxInt != 1 { 36540 break 36541 } 36542 x := v_0_1.Args[1] 36543 if !(!config.nacl) { 36544 break 36545 } 36546 v.reset(OpAMD64SETAE) 36547 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36548 v0.AddArg(x) 36549 v0.AddArg(y) 36550 v.AddArg(v0) 36551 return true 36552 } 36553 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 36554 // cond: !config.nacl 36555 // result: (SETAE (BTQ x y)) 36556 for { 36557 v_0 := v.Args[0] 36558 if v_0.Op != OpAMD64TESTQ { 36559 break 36560 } 36561 _ = v_0.Args[1] 36562 v_0_0 := v_0.Args[0] 36563 if v_0_0.Op != OpAMD64SHLQ { 36564 break 36565 } 36566 _ = v_0_0.Args[1] 36567 v_0_0_0 := v_0_0.Args[0] 36568 if v_0_0_0.Op != OpAMD64MOVQconst { 36569 break 36570 } 36571 if v_0_0_0.AuxInt != 1 { 36572 break 36573 } 36574 x := v_0_0.Args[1] 36575 y := v_0.Args[1] 36576 if !(!config.nacl) { 36577 break 36578 } 36579 v.reset(OpAMD64SETAE) 36580 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36581 v0.AddArg(x) 36582 v0.AddArg(y) 36583 v.AddArg(v0) 36584 return true 36585 } 36586 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 36587 // cond: !config.nacl 36588 // result: (SETAE (BTQ x y)) 36589 for { 36590 v_0 := v.Args[0] 36591 if v_0.Op != OpAMD64TESTQ { 36592 break 36593 } 36594 _ = v_0.Args[1] 36595 y := v_0.Args[0] 36596 v_0_1 := v_0.Args[1] 36597 if v_0_1.Op != OpAMD64SHLQ { 36598 break 36599 } 36600 _ = v_0_1.Args[1] 36601 v_0_1_0 := v_0_1.Args[0] 36602 if v_0_1_0.Op != OpAMD64MOVQconst { 36603 break 36604 } 36605 if v_0_1_0.AuxInt != 1 { 36606 break 36607 } 36608 x := v_0_1.Args[1] 36609 if !(!config.nacl) { 36610 break 36611 } 36612 v.reset(OpAMD64SETAE) 36613 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36614 v0.AddArg(x) 36615 v0.AddArg(y) 36616 v.AddArg(v0) 36617 return true 36618 } 36619 // match: (SETEQ (TESTLconst [c] x)) 36620 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 36621 // result: (SETAE (BTLconst [log2(c)] x)) 36622 for { 36623 v_0 := v.Args[0] 36624 if v_0.Op != OpAMD64TESTLconst { 36625 break 36626 } 36627 c := v_0.AuxInt 36628 x := v_0.Args[0] 36629 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 36630 break 36631 } 36632 v.reset(OpAMD64SETAE) 36633 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 36634 v0.AuxInt = log2(c) 36635 v0.AddArg(x) 36636 v.AddArg(v0) 36637 return true 36638 } 36639 // match: (SETEQ (TESTQconst [c] x)) 36640 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36641 // result: (SETAE (BTQconst [log2(c)] x)) 36642 for { 36643 v_0 := v.Args[0] 36644 if v_0.Op != OpAMD64TESTQconst { 36645 break 36646 } 36647 c := v_0.AuxInt 36648 x := v_0.Args[0] 36649 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36650 break 36651 } 36652 v.reset(OpAMD64SETAE) 36653 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36654 v0.AuxInt = log2(c) 36655 v0.AddArg(x) 36656 v.AddArg(v0) 36657 return true 36658 } 36659 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 36660 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36661 // result: (SETAE (BTQconst [log2(c)] x)) 36662 for { 36663 v_0 := v.Args[0] 36664 if v_0.Op != OpAMD64TESTQ { 36665 break 36666 } 36667 _ = v_0.Args[1] 36668 v_0_0 := v_0.Args[0] 36669 if v_0_0.Op != OpAMD64MOVQconst { 36670 break 36671 } 36672 c := v_0_0.AuxInt 36673 x := v_0.Args[1] 36674 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36675 break 36676 } 36677 v.reset(OpAMD64SETAE) 36678 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36679 v0.AuxInt = log2(c) 36680 v0.AddArg(x) 36681 v.AddArg(v0) 36682 return true 36683 } 36684 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 36685 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36686 // result: (SETAE (BTQconst [log2(c)] x)) 36687 for { 36688 v_0 := v.Args[0] 36689 if v_0.Op != OpAMD64TESTQ { 36690 break 36691 } 36692 _ = v_0.Args[1] 36693 x := v_0.Args[0] 36694 v_0_1 := v_0.Args[1] 36695 if v_0_1.Op != OpAMD64MOVQconst { 36696 break 36697 } 36698 c := v_0_1.AuxInt 36699 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36700 break 36701 } 36702 v.reset(OpAMD64SETAE) 36703 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36704 v0.AuxInt = log2(c) 36705 v0.AddArg(x) 36706 v.AddArg(v0) 36707 return true 36708 } 36709 // match: (SETEQ (InvertFlags x)) 36710 // cond: 36711 // result: (SETEQ x) 36712 for { 36713 v_0 := v.Args[0] 36714 if v_0.Op != OpAMD64InvertFlags { 36715 break 36716 } 36717 x := v_0.Args[0] 36718 v.reset(OpAMD64SETEQ) 36719 v.AddArg(x) 36720 return true 36721 } 36722 // match: (SETEQ (FlagEQ)) 36723 // cond: 36724 // result: (MOVLconst [1]) 36725 for { 36726 v_0 := v.Args[0] 36727 if v_0.Op != OpAMD64FlagEQ { 36728 break 36729 } 36730 v.reset(OpAMD64MOVLconst) 36731 v.AuxInt = 1 36732 return true 36733 } 36734 return false 36735 } 36736 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 36737 // match: (SETEQ (FlagLT_ULT)) 36738 // cond: 36739 // result: (MOVLconst [0]) 36740 for { 36741 v_0 := v.Args[0] 36742 if v_0.Op != OpAMD64FlagLT_ULT { 36743 break 36744 } 36745 v.reset(OpAMD64MOVLconst) 36746 v.AuxInt = 0 36747 return true 36748 } 36749 // match: (SETEQ (FlagLT_UGT)) 36750 // cond: 36751 // result: (MOVLconst [0]) 36752 for { 36753 v_0 := v.Args[0] 36754 if v_0.Op != OpAMD64FlagLT_UGT { 36755 break 36756 } 36757 v.reset(OpAMD64MOVLconst) 36758 v.AuxInt = 0 36759 return true 36760 } 36761 // match: (SETEQ (FlagGT_ULT)) 36762 // cond: 36763 // result: (MOVLconst [0]) 36764 for { 36765 v_0 := v.Args[0] 36766 if v_0.Op != OpAMD64FlagGT_ULT { 36767 break 36768 } 36769 v.reset(OpAMD64MOVLconst) 36770 v.AuxInt = 0 36771 return true 36772 } 36773 // match: (SETEQ (FlagGT_UGT)) 36774 // cond: 36775 // result: (MOVLconst [0]) 36776 for { 36777 v_0 := v.Args[0] 36778 if v_0.Op != OpAMD64FlagGT_UGT { 36779 break 36780 } 36781 v.reset(OpAMD64MOVLconst) 36782 v.AuxInt = 0 36783 return true 36784 } 36785 return false 36786 } 36787 func rewriteValueAMD64_OpAMD64SETEQmem_0(v *Value) bool { 36788 b := v.Block 36789 _ = b 36790 config := b.Func.Config 36791 _ = config 36792 // match: (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 36793 // cond: !config.nacl 36794 // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) 36795 for { 36796 off := v.AuxInt 36797 sym := v.Aux 36798 _ = v.Args[2] 36799 ptr := v.Args[0] 36800 v_1 := v.Args[1] 36801 if v_1.Op != OpAMD64TESTL { 36802 break 36803 } 36804 _ = v_1.Args[1] 36805 v_1_0 := v_1.Args[0] 36806 if v_1_0.Op != OpAMD64SHLL { 36807 break 36808 } 36809 _ = v_1_0.Args[1] 36810 v_1_0_0 := v_1_0.Args[0] 36811 if v_1_0_0.Op != OpAMD64MOVLconst { 36812 break 36813 } 36814 if v_1_0_0.AuxInt != 1 { 36815 break 36816 } 36817 x := v_1_0.Args[1] 36818 y := v_1.Args[1] 36819 mem := v.Args[2] 36820 if !(!config.nacl) { 36821 break 36822 } 36823 v.reset(OpAMD64SETAEmem) 36824 v.AuxInt = off 36825 v.Aux = sym 36826 v.AddArg(ptr) 36827 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36828 v0.AddArg(x) 36829 v0.AddArg(y) 36830 v.AddArg(v0) 36831 v.AddArg(mem) 36832 return true 36833 } 36834 // match: (SETEQmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 36835 // cond: !config.nacl 36836 // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) 36837 for { 36838 off := v.AuxInt 36839 sym := v.Aux 36840 _ = v.Args[2] 36841 ptr := v.Args[0] 36842 v_1 := v.Args[1] 36843 if v_1.Op != OpAMD64TESTL { 36844 break 36845 } 36846 _ = v_1.Args[1] 36847 y := v_1.Args[0] 36848 v_1_1 := v_1.Args[1] 36849 if v_1_1.Op != OpAMD64SHLL { 36850 break 36851 } 36852 _ = v_1_1.Args[1] 36853 v_1_1_0 := v_1_1.Args[0] 36854 if v_1_1_0.Op != OpAMD64MOVLconst { 36855 break 36856 } 36857 if v_1_1_0.AuxInt != 1 { 36858 break 36859 } 36860 x := v_1_1.Args[1] 36861 mem := v.Args[2] 36862 if !(!config.nacl) { 36863 break 36864 } 36865 v.reset(OpAMD64SETAEmem) 36866 v.AuxInt = off 36867 v.Aux = sym 36868 v.AddArg(ptr) 36869 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36870 v0.AddArg(x) 36871 v0.AddArg(y) 36872 v.AddArg(v0) 36873 v.AddArg(mem) 36874 return true 36875 } 36876 // match: (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 36877 // cond: !config.nacl 36878 // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) 36879 for { 36880 off := v.AuxInt 36881 sym := v.Aux 36882 _ = v.Args[2] 36883 ptr := v.Args[0] 36884 v_1 := v.Args[1] 36885 if v_1.Op != OpAMD64TESTQ { 36886 break 36887 } 36888 _ = v_1.Args[1] 36889 v_1_0 := v_1.Args[0] 36890 if v_1_0.Op != OpAMD64SHLQ { 36891 break 36892 } 36893 _ = v_1_0.Args[1] 36894 v_1_0_0 := v_1_0.Args[0] 36895 if v_1_0_0.Op != OpAMD64MOVQconst { 36896 break 36897 } 36898 if v_1_0_0.AuxInt != 1 { 36899 break 36900 } 36901 x := v_1_0.Args[1] 36902 y := v_1.Args[1] 36903 mem := v.Args[2] 36904 if !(!config.nacl) { 36905 break 36906 } 36907 v.reset(OpAMD64SETAEmem) 36908 v.AuxInt = off 36909 v.Aux = sym 36910 v.AddArg(ptr) 36911 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36912 v0.AddArg(x) 36913 v0.AddArg(y) 36914 v.AddArg(v0) 36915 v.AddArg(mem) 36916 return true 36917 } 36918 // match: (SETEQmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 36919 // cond: !config.nacl 36920 // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) 36921 for { 36922 off := v.AuxInt 36923 sym := v.Aux 36924 _ = v.Args[2] 36925 ptr := v.Args[0] 36926 v_1 := v.Args[1] 36927 if v_1.Op != OpAMD64TESTQ { 36928 break 36929 } 36930 _ = v_1.Args[1] 36931 y := v_1.Args[0] 36932 v_1_1 := v_1.Args[1] 36933 if v_1_1.Op != OpAMD64SHLQ { 36934 break 36935 } 36936 _ = v_1_1.Args[1] 36937 v_1_1_0 := v_1_1.Args[0] 36938 if v_1_1_0.Op != OpAMD64MOVQconst { 36939 break 36940 } 36941 if v_1_1_0.AuxInt != 1 { 36942 break 36943 } 36944 x := v_1_1.Args[1] 36945 mem := v.Args[2] 36946 if !(!config.nacl) { 36947 break 36948 } 36949 v.reset(OpAMD64SETAEmem) 36950 v.AuxInt = off 36951 v.Aux = sym 36952 v.AddArg(ptr) 36953 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36954 v0.AddArg(x) 36955 v0.AddArg(y) 36956 v.AddArg(v0) 36957 v.AddArg(mem) 36958 return true 36959 } 36960 // match: (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem) 36961 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 36962 // result: (SETAEmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) 36963 for { 36964 off := v.AuxInt 36965 sym := v.Aux 36966 _ = v.Args[2] 36967 ptr := v.Args[0] 36968 v_1 := v.Args[1] 36969 if v_1.Op != OpAMD64TESTLconst { 36970 break 36971 } 36972 c := v_1.AuxInt 36973 x := v_1.Args[0] 36974 mem := v.Args[2] 36975 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 36976 break 36977 } 36978 v.reset(OpAMD64SETAEmem) 36979 v.AuxInt = off 36980 v.Aux = sym 36981 v.AddArg(ptr) 36982 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 36983 v0.AuxInt = log2(c) 36984 v0.AddArg(x) 36985 v.AddArg(v0) 36986 v.AddArg(mem) 36987 return true 36988 } 36989 // match: (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem) 36990 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36991 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 36992 for { 36993 off := v.AuxInt 36994 sym := v.Aux 36995 _ = v.Args[2] 36996 ptr := v.Args[0] 36997 v_1 := v.Args[1] 36998 if v_1.Op != OpAMD64TESTQconst { 36999 break 37000 } 37001 c := v_1.AuxInt 37002 x := v_1.Args[0] 37003 mem := v.Args[2] 37004 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37005 break 37006 } 37007 v.reset(OpAMD64SETAEmem) 37008 v.AuxInt = off 37009 v.Aux = sym 37010 v.AddArg(ptr) 37011 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37012 v0.AuxInt = log2(c) 37013 v0.AddArg(x) 37014 v.AddArg(v0) 37015 v.AddArg(mem) 37016 return true 37017 } 37018 // match: (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 37019 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37020 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 37021 for { 37022 off := v.AuxInt 37023 sym := v.Aux 37024 _ = v.Args[2] 37025 ptr := v.Args[0] 37026 v_1 := v.Args[1] 37027 if v_1.Op != OpAMD64TESTQ { 37028 break 37029 } 37030 _ = v_1.Args[1] 37031 v_1_0 := v_1.Args[0] 37032 if v_1_0.Op != OpAMD64MOVQconst { 37033 break 37034 } 37035 c := v_1_0.AuxInt 37036 x := v_1.Args[1] 37037 mem := v.Args[2] 37038 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37039 break 37040 } 37041 v.reset(OpAMD64SETAEmem) 37042 v.AuxInt = off 37043 v.Aux = sym 37044 v.AddArg(ptr) 37045 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37046 v0.AuxInt = log2(c) 37047 v0.AddArg(x) 37048 v.AddArg(v0) 37049 v.AddArg(mem) 37050 return true 37051 } 37052 // match: (SETEQmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 37053 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37054 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 37055 for { 37056 off := v.AuxInt 37057 sym := v.Aux 37058 _ = v.Args[2] 37059 ptr := v.Args[0] 37060 v_1 := v.Args[1] 37061 if v_1.Op != OpAMD64TESTQ { 37062 break 37063 } 37064 _ = v_1.Args[1] 37065 x := v_1.Args[0] 37066 v_1_1 := v_1.Args[1] 37067 if v_1_1.Op != OpAMD64MOVQconst { 37068 break 37069 } 37070 c := v_1_1.AuxInt 37071 mem := v.Args[2] 37072 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37073 break 37074 } 37075 v.reset(OpAMD64SETAEmem) 37076 v.AuxInt = off 37077 v.Aux = sym 37078 v.AddArg(ptr) 37079 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37080 v0.AuxInt = log2(c) 37081 v0.AddArg(x) 37082 v.AddArg(v0) 37083 v.AddArg(mem) 37084 return true 37085 } 37086 // match: (SETEQmem [off] {sym} ptr (InvertFlags x) mem) 37087 // cond: 37088 // result: (SETEQmem [off] {sym} ptr x mem) 37089 for { 37090 off := v.AuxInt 37091 sym := v.Aux 37092 _ = v.Args[2] 37093 ptr := v.Args[0] 37094 v_1 := v.Args[1] 37095 if v_1.Op != OpAMD64InvertFlags { 37096 break 37097 } 37098 x := v_1.Args[0] 37099 mem := v.Args[2] 37100 v.reset(OpAMD64SETEQmem) 37101 v.AuxInt = off 37102 v.Aux = sym 37103 v.AddArg(ptr) 37104 v.AddArg(x) 37105 v.AddArg(mem) 37106 return true 37107 } 37108 // match: (SETEQmem [off] {sym} ptr x:(FlagEQ) mem) 37109 // cond: 37110 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37111 for { 37112 off := v.AuxInt 37113 sym := v.Aux 37114 _ = v.Args[2] 37115 ptr := v.Args[0] 37116 x := v.Args[1] 37117 if x.Op != OpAMD64FlagEQ { 37118 break 37119 } 37120 mem := v.Args[2] 37121 v.reset(OpAMD64MOVBstore) 37122 v.AuxInt = off 37123 v.Aux = sym 37124 v.AddArg(ptr) 37125 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37126 v0.AuxInt = 1 37127 v.AddArg(v0) 37128 v.AddArg(mem) 37129 return true 37130 } 37131 return false 37132 } 37133 func rewriteValueAMD64_OpAMD64SETEQmem_10(v *Value) bool { 37134 b := v.Block 37135 _ = b 37136 // match: (SETEQmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37137 // cond: 37138 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37139 for { 37140 off := v.AuxInt 37141 sym := v.Aux 37142 _ = v.Args[2] 37143 ptr := v.Args[0] 37144 x := v.Args[1] 37145 if x.Op != OpAMD64FlagLT_ULT { 37146 break 37147 } 37148 mem := v.Args[2] 37149 v.reset(OpAMD64MOVBstore) 37150 v.AuxInt = off 37151 v.Aux = sym 37152 v.AddArg(ptr) 37153 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37154 v0.AuxInt = 0 37155 v.AddArg(v0) 37156 v.AddArg(mem) 37157 return true 37158 } 37159 // match: (SETEQmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37160 // cond: 37161 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37162 for { 37163 off := v.AuxInt 37164 sym := v.Aux 37165 _ = v.Args[2] 37166 ptr := v.Args[0] 37167 x := v.Args[1] 37168 if x.Op != OpAMD64FlagLT_UGT { 37169 break 37170 } 37171 mem := v.Args[2] 37172 v.reset(OpAMD64MOVBstore) 37173 v.AuxInt = off 37174 v.Aux = sym 37175 v.AddArg(ptr) 37176 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37177 v0.AuxInt = 0 37178 v.AddArg(v0) 37179 v.AddArg(mem) 37180 return true 37181 } 37182 // match: (SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37183 // cond: 37184 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37185 for { 37186 off := v.AuxInt 37187 sym := v.Aux 37188 _ = v.Args[2] 37189 ptr := v.Args[0] 37190 x := v.Args[1] 37191 if x.Op != OpAMD64FlagGT_ULT { 37192 break 37193 } 37194 mem := v.Args[2] 37195 v.reset(OpAMD64MOVBstore) 37196 v.AuxInt = off 37197 v.Aux = sym 37198 v.AddArg(ptr) 37199 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37200 v0.AuxInt = 0 37201 v.AddArg(v0) 37202 v.AddArg(mem) 37203 return true 37204 } 37205 // match: (SETEQmem [off] {sym} ptr x:(FlagGT_UGT) mem) 37206 // cond: 37207 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37208 for { 37209 off := v.AuxInt 37210 sym := v.Aux 37211 _ = v.Args[2] 37212 ptr := v.Args[0] 37213 x := v.Args[1] 37214 if x.Op != OpAMD64FlagGT_UGT { 37215 break 37216 } 37217 mem := v.Args[2] 37218 v.reset(OpAMD64MOVBstore) 37219 v.AuxInt = off 37220 v.Aux = sym 37221 v.AddArg(ptr) 37222 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37223 v0.AuxInt = 0 37224 v.AddArg(v0) 37225 v.AddArg(mem) 37226 return true 37227 } 37228 return false 37229 } 37230 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 37231 // match: (SETG (InvertFlags x)) 37232 // cond: 37233 // result: (SETL x) 37234 for { 37235 v_0 := v.Args[0] 37236 if v_0.Op != OpAMD64InvertFlags { 37237 break 37238 } 37239 x := v_0.Args[0] 37240 v.reset(OpAMD64SETL) 37241 v.AddArg(x) 37242 return true 37243 } 37244 // match: (SETG (FlagEQ)) 37245 // cond: 37246 // result: (MOVLconst [0]) 37247 for { 37248 v_0 := v.Args[0] 37249 if v_0.Op != OpAMD64FlagEQ { 37250 break 37251 } 37252 v.reset(OpAMD64MOVLconst) 37253 v.AuxInt = 0 37254 return true 37255 } 37256 // match: (SETG (FlagLT_ULT)) 37257 // cond: 37258 // result: (MOVLconst [0]) 37259 for { 37260 v_0 := v.Args[0] 37261 if v_0.Op != OpAMD64FlagLT_ULT { 37262 break 37263 } 37264 v.reset(OpAMD64MOVLconst) 37265 v.AuxInt = 0 37266 return true 37267 } 37268 // match: (SETG (FlagLT_UGT)) 37269 // cond: 37270 // result: (MOVLconst [0]) 37271 for { 37272 v_0 := v.Args[0] 37273 if v_0.Op != OpAMD64FlagLT_UGT { 37274 break 37275 } 37276 v.reset(OpAMD64MOVLconst) 37277 v.AuxInt = 0 37278 return true 37279 } 37280 // match: (SETG (FlagGT_ULT)) 37281 // cond: 37282 // result: (MOVLconst [1]) 37283 for { 37284 v_0 := v.Args[0] 37285 if v_0.Op != OpAMD64FlagGT_ULT { 37286 break 37287 } 37288 v.reset(OpAMD64MOVLconst) 37289 v.AuxInt = 1 37290 return true 37291 } 37292 // match: (SETG (FlagGT_UGT)) 37293 // cond: 37294 // result: (MOVLconst [1]) 37295 for { 37296 v_0 := v.Args[0] 37297 if v_0.Op != OpAMD64FlagGT_UGT { 37298 break 37299 } 37300 v.reset(OpAMD64MOVLconst) 37301 v.AuxInt = 1 37302 return true 37303 } 37304 return false 37305 } 37306 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 37307 // match: (SETGE (InvertFlags x)) 37308 // cond: 37309 // result: (SETLE x) 37310 for { 37311 v_0 := v.Args[0] 37312 if v_0.Op != OpAMD64InvertFlags { 37313 break 37314 } 37315 x := v_0.Args[0] 37316 v.reset(OpAMD64SETLE) 37317 v.AddArg(x) 37318 return true 37319 } 37320 // match: (SETGE (FlagEQ)) 37321 // cond: 37322 // result: (MOVLconst [1]) 37323 for { 37324 v_0 := v.Args[0] 37325 if v_0.Op != OpAMD64FlagEQ { 37326 break 37327 } 37328 v.reset(OpAMD64MOVLconst) 37329 v.AuxInt = 1 37330 return true 37331 } 37332 // match: (SETGE (FlagLT_ULT)) 37333 // cond: 37334 // result: (MOVLconst [0]) 37335 for { 37336 v_0 := v.Args[0] 37337 if v_0.Op != OpAMD64FlagLT_ULT { 37338 break 37339 } 37340 v.reset(OpAMD64MOVLconst) 37341 v.AuxInt = 0 37342 return true 37343 } 37344 // match: (SETGE (FlagLT_UGT)) 37345 // cond: 37346 // result: (MOVLconst [0]) 37347 for { 37348 v_0 := v.Args[0] 37349 if v_0.Op != OpAMD64FlagLT_UGT { 37350 break 37351 } 37352 v.reset(OpAMD64MOVLconst) 37353 v.AuxInt = 0 37354 return true 37355 } 37356 // match: (SETGE (FlagGT_ULT)) 37357 // cond: 37358 // result: (MOVLconst [1]) 37359 for { 37360 v_0 := v.Args[0] 37361 if v_0.Op != OpAMD64FlagGT_ULT { 37362 break 37363 } 37364 v.reset(OpAMD64MOVLconst) 37365 v.AuxInt = 1 37366 return true 37367 } 37368 // match: (SETGE (FlagGT_UGT)) 37369 // cond: 37370 // result: (MOVLconst [1]) 37371 for { 37372 v_0 := v.Args[0] 37373 if v_0.Op != OpAMD64FlagGT_UGT { 37374 break 37375 } 37376 v.reset(OpAMD64MOVLconst) 37377 v.AuxInt = 1 37378 return true 37379 } 37380 return false 37381 } 37382 func rewriteValueAMD64_OpAMD64SETGEmem_0(v *Value) bool { 37383 b := v.Block 37384 _ = b 37385 // match: (SETGEmem [off] {sym} ptr (InvertFlags x) mem) 37386 // cond: 37387 // result: (SETLEmem [off] {sym} ptr x mem) 37388 for { 37389 off := v.AuxInt 37390 sym := v.Aux 37391 _ = v.Args[2] 37392 ptr := v.Args[0] 37393 v_1 := v.Args[1] 37394 if v_1.Op != OpAMD64InvertFlags { 37395 break 37396 } 37397 x := v_1.Args[0] 37398 mem := v.Args[2] 37399 v.reset(OpAMD64SETLEmem) 37400 v.AuxInt = off 37401 v.Aux = sym 37402 v.AddArg(ptr) 37403 v.AddArg(x) 37404 v.AddArg(mem) 37405 return true 37406 } 37407 // match: (SETGEmem [off] {sym} ptr x:(FlagEQ) mem) 37408 // cond: 37409 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37410 for { 37411 off := v.AuxInt 37412 sym := v.Aux 37413 _ = v.Args[2] 37414 ptr := v.Args[0] 37415 x := v.Args[1] 37416 if x.Op != OpAMD64FlagEQ { 37417 break 37418 } 37419 mem := v.Args[2] 37420 v.reset(OpAMD64MOVBstore) 37421 v.AuxInt = off 37422 v.Aux = sym 37423 v.AddArg(ptr) 37424 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37425 v0.AuxInt = 1 37426 v.AddArg(v0) 37427 v.AddArg(mem) 37428 return true 37429 } 37430 // match: (SETGEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37431 // cond: 37432 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37433 for { 37434 off := v.AuxInt 37435 sym := v.Aux 37436 _ = v.Args[2] 37437 ptr := v.Args[0] 37438 x := v.Args[1] 37439 if x.Op != OpAMD64FlagLT_ULT { 37440 break 37441 } 37442 mem := v.Args[2] 37443 v.reset(OpAMD64MOVBstore) 37444 v.AuxInt = off 37445 v.Aux = sym 37446 v.AddArg(ptr) 37447 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37448 v0.AuxInt = 0 37449 v.AddArg(v0) 37450 v.AddArg(mem) 37451 return true 37452 } 37453 // match: (SETGEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37454 // cond: 37455 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37456 for { 37457 off := v.AuxInt 37458 sym := v.Aux 37459 _ = v.Args[2] 37460 ptr := v.Args[0] 37461 x := v.Args[1] 37462 if x.Op != OpAMD64FlagLT_UGT { 37463 break 37464 } 37465 mem := v.Args[2] 37466 v.reset(OpAMD64MOVBstore) 37467 v.AuxInt = off 37468 v.Aux = sym 37469 v.AddArg(ptr) 37470 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37471 v0.AuxInt = 0 37472 v.AddArg(v0) 37473 v.AddArg(mem) 37474 return true 37475 } 37476 // match: (SETGEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37477 // cond: 37478 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37479 for { 37480 off := v.AuxInt 37481 sym := v.Aux 37482 _ = v.Args[2] 37483 ptr := v.Args[0] 37484 x := v.Args[1] 37485 if x.Op != OpAMD64FlagGT_ULT { 37486 break 37487 } 37488 mem := v.Args[2] 37489 v.reset(OpAMD64MOVBstore) 37490 v.AuxInt = off 37491 v.Aux = sym 37492 v.AddArg(ptr) 37493 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37494 v0.AuxInt = 1 37495 v.AddArg(v0) 37496 v.AddArg(mem) 37497 return true 37498 } 37499 // match: (SETGEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 37500 // cond: 37501 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37502 for { 37503 off := v.AuxInt 37504 sym := v.Aux 37505 _ = v.Args[2] 37506 ptr := v.Args[0] 37507 x := v.Args[1] 37508 if x.Op != OpAMD64FlagGT_UGT { 37509 break 37510 } 37511 mem := v.Args[2] 37512 v.reset(OpAMD64MOVBstore) 37513 v.AuxInt = off 37514 v.Aux = sym 37515 v.AddArg(ptr) 37516 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37517 v0.AuxInt = 1 37518 v.AddArg(v0) 37519 v.AddArg(mem) 37520 return true 37521 } 37522 return false 37523 } 37524 func rewriteValueAMD64_OpAMD64SETGmem_0(v *Value) bool { 37525 b := v.Block 37526 _ = b 37527 // match: (SETGmem [off] {sym} ptr (InvertFlags x) mem) 37528 // cond: 37529 // result: (SETLmem [off] {sym} ptr x mem) 37530 for { 37531 off := v.AuxInt 37532 sym := v.Aux 37533 _ = v.Args[2] 37534 ptr := v.Args[0] 37535 v_1 := v.Args[1] 37536 if v_1.Op != OpAMD64InvertFlags { 37537 break 37538 } 37539 x := v_1.Args[0] 37540 mem := v.Args[2] 37541 v.reset(OpAMD64SETLmem) 37542 v.AuxInt = off 37543 v.Aux = sym 37544 v.AddArg(ptr) 37545 v.AddArg(x) 37546 v.AddArg(mem) 37547 return true 37548 } 37549 // match: (SETGmem [off] {sym} ptr x:(FlagEQ) mem) 37550 // cond: 37551 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37552 for { 37553 off := v.AuxInt 37554 sym := v.Aux 37555 _ = v.Args[2] 37556 ptr := v.Args[0] 37557 x := v.Args[1] 37558 if x.Op != OpAMD64FlagEQ { 37559 break 37560 } 37561 mem := v.Args[2] 37562 v.reset(OpAMD64MOVBstore) 37563 v.AuxInt = off 37564 v.Aux = sym 37565 v.AddArg(ptr) 37566 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37567 v0.AuxInt = 0 37568 v.AddArg(v0) 37569 v.AddArg(mem) 37570 return true 37571 } 37572 // match: (SETGmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37573 // cond: 37574 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37575 for { 37576 off := v.AuxInt 37577 sym := v.Aux 37578 _ = v.Args[2] 37579 ptr := v.Args[0] 37580 x := v.Args[1] 37581 if x.Op != OpAMD64FlagLT_ULT { 37582 break 37583 } 37584 mem := v.Args[2] 37585 v.reset(OpAMD64MOVBstore) 37586 v.AuxInt = off 37587 v.Aux = sym 37588 v.AddArg(ptr) 37589 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37590 v0.AuxInt = 0 37591 v.AddArg(v0) 37592 v.AddArg(mem) 37593 return true 37594 } 37595 // match: (SETGmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37596 // cond: 37597 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37598 for { 37599 off := v.AuxInt 37600 sym := v.Aux 37601 _ = v.Args[2] 37602 ptr := v.Args[0] 37603 x := v.Args[1] 37604 if x.Op != OpAMD64FlagLT_UGT { 37605 break 37606 } 37607 mem := v.Args[2] 37608 v.reset(OpAMD64MOVBstore) 37609 v.AuxInt = off 37610 v.Aux = sym 37611 v.AddArg(ptr) 37612 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37613 v0.AuxInt = 0 37614 v.AddArg(v0) 37615 v.AddArg(mem) 37616 return true 37617 } 37618 // match: (SETGmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37619 // cond: 37620 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37621 for { 37622 off := v.AuxInt 37623 sym := v.Aux 37624 _ = v.Args[2] 37625 ptr := v.Args[0] 37626 x := v.Args[1] 37627 if x.Op != OpAMD64FlagGT_ULT { 37628 break 37629 } 37630 mem := v.Args[2] 37631 v.reset(OpAMD64MOVBstore) 37632 v.AuxInt = off 37633 v.Aux = sym 37634 v.AddArg(ptr) 37635 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37636 v0.AuxInt = 1 37637 v.AddArg(v0) 37638 v.AddArg(mem) 37639 return true 37640 } 37641 // match: (SETGmem [off] {sym} ptr x:(FlagGT_UGT) mem) 37642 // cond: 37643 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37644 for { 37645 off := v.AuxInt 37646 sym := v.Aux 37647 _ = v.Args[2] 37648 ptr := v.Args[0] 37649 x := v.Args[1] 37650 if x.Op != OpAMD64FlagGT_UGT { 37651 break 37652 } 37653 mem := v.Args[2] 37654 v.reset(OpAMD64MOVBstore) 37655 v.AuxInt = off 37656 v.Aux = sym 37657 v.AddArg(ptr) 37658 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37659 v0.AuxInt = 1 37660 v.AddArg(v0) 37661 v.AddArg(mem) 37662 return true 37663 } 37664 return false 37665 } 37666 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 37667 // match: (SETL (InvertFlags x)) 37668 // cond: 37669 // result: (SETG x) 37670 for { 37671 v_0 := v.Args[0] 37672 if v_0.Op != OpAMD64InvertFlags { 37673 break 37674 } 37675 x := v_0.Args[0] 37676 v.reset(OpAMD64SETG) 37677 v.AddArg(x) 37678 return true 37679 } 37680 // match: (SETL (FlagEQ)) 37681 // cond: 37682 // result: (MOVLconst [0]) 37683 for { 37684 v_0 := v.Args[0] 37685 if v_0.Op != OpAMD64FlagEQ { 37686 break 37687 } 37688 v.reset(OpAMD64MOVLconst) 37689 v.AuxInt = 0 37690 return true 37691 } 37692 // match: (SETL (FlagLT_ULT)) 37693 // cond: 37694 // result: (MOVLconst [1]) 37695 for { 37696 v_0 := v.Args[0] 37697 if v_0.Op != OpAMD64FlagLT_ULT { 37698 break 37699 } 37700 v.reset(OpAMD64MOVLconst) 37701 v.AuxInt = 1 37702 return true 37703 } 37704 // match: (SETL (FlagLT_UGT)) 37705 // cond: 37706 // result: (MOVLconst [1]) 37707 for { 37708 v_0 := v.Args[0] 37709 if v_0.Op != OpAMD64FlagLT_UGT { 37710 break 37711 } 37712 v.reset(OpAMD64MOVLconst) 37713 v.AuxInt = 1 37714 return true 37715 } 37716 // match: (SETL (FlagGT_ULT)) 37717 // cond: 37718 // result: (MOVLconst [0]) 37719 for { 37720 v_0 := v.Args[0] 37721 if v_0.Op != OpAMD64FlagGT_ULT { 37722 break 37723 } 37724 v.reset(OpAMD64MOVLconst) 37725 v.AuxInt = 0 37726 return true 37727 } 37728 // match: (SETL (FlagGT_UGT)) 37729 // cond: 37730 // result: (MOVLconst [0]) 37731 for { 37732 v_0 := v.Args[0] 37733 if v_0.Op != OpAMD64FlagGT_UGT { 37734 break 37735 } 37736 v.reset(OpAMD64MOVLconst) 37737 v.AuxInt = 0 37738 return true 37739 } 37740 return false 37741 } 37742 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 37743 // match: (SETLE (InvertFlags x)) 37744 // cond: 37745 // result: (SETGE x) 37746 for { 37747 v_0 := v.Args[0] 37748 if v_0.Op != OpAMD64InvertFlags { 37749 break 37750 } 37751 x := v_0.Args[0] 37752 v.reset(OpAMD64SETGE) 37753 v.AddArg(x) 37754 return true 37755 } 37756 // match: (SETLE (FlagEQ)) 37757 // cond: 37758 // result: (MOVLconst [1]) 37759 for { 37760 v_0 := v.Args[0] 37761 if v_0.Op != OpAMD64FlagEQ { 37762 break 37763 } 37764 v.reset(OpAMD64MOVLconst) 37765 v.AuxInt = 1 37766 return true 37767 } 37768 // match: (SETLE (FlagLT_ULT)) 37769 // cond: 37770 // result: (MOVLconst [1]) 37771 for { 37772 v_0 := v.Args[0] 37773 if v_0.Op != OpAMD64FlagLT_ULT { 37774 break 37775 } 37776 v.reset(OpAMD64MOVLconst) 37777 v.AuxInt = 1 37778 return true 37779 } 37780 // match: (SETLE (FlagLT_UGT)) 37781 // cond: 37782 // result: (MOVLconst [1]) 37783 for { 37784 v_0 := v.Args[0] 37785 if v_0.Op != OpAMD64FlagLT_UGT { 37786 break 37787 } 37788 v.reset(OpAMD64MOVLconst) 37789 v.AuxInt = 1 37790 return true 37791 } 37792 // match: (SETLE (FlagGT_ULT)) 37793 // cond: 37794 // result: (MOVLconst [0]) 37795 for { 37796 v_0 := v.Args[0] 37797 if v_0.Op != OpAMD64FlagGT_ULT { 37798 break 37799 } 37800 v.reset(OpAMD64MOVLconst) 37801 v.AuxInt = 0 37802 return true 37803 } 37804 // match: (SETLE (FlagGT_UGT)) 37805 // cond: 37806 // result: (MOVLconst [0]) 37807 for { 37808 v_0 := v.Args[0] 37809 if v_0.Op != OpAMD64FlagGT_UGT { 37810 break 37811 } 37812 v.reset(OpAMD64MOVLconst) 37813 v.AuxInt = 0 37814 return true 37815 } 37816 return false 37817 } 37818 func rewriteValueAMD64_OpAMD64SETLEmem_0(v *Value) bool { 37819 b := v.Block 37820 _ = b 37821 // match: (SETLEmem [off] {sym} ptr (InvertFlags x) mem) 37822 // cond: 37823 // result: (SETGEmem [off] {sym} ptr x mem) 37824 for { 37825 off := v.AuxInt 37826 sym := v.Aux 37827 _ = v.Args[2] 37828 ptr := v.Args[0] 37829 v_1 := v.Args[1] 37830 if v_1.Op != OpAMD64InvertFlags { 37831 break 37832 } 37833 x := v_1.Args[0] 37834 mem := v.Args[2] 37835 v.reset(OpAMD64SETGEmem) 37836 v.AuxInt = off 37837 v.Aux = sym 37838 v.AddArg(ptr) 37839 v.AddArg(x) 37840 v.AddArg(mem) 37841 return true 37842 } 37843 // match: (SETLEmem [off] {sym} ptr x:(FlagEQ) mem) 37844 // cond: 37845 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37846 for { 37847 off := v.AuxInt 37848 sym := v.Aux 37849 _ = v.Args[2] 37850 ptr := v.Args[0] 37851 x := v.Args[1] 37852 if x.Op != OpAMD64FlagEQ { 37853 break 37854 } 37855 mem := v.Args[2] 37856 v.reset(OpAMD64MOVBstore) 37857 v.AuxInt = off 37858 v.Aux = sym 37859 v.AddArg(ptr) 37860 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37861 v0.AuxInt = 1 37862 v.AddArg(v0) 37863 v.AddArg(mem) 37864 return true 37865 } 37866 // match: (SETLEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37867 // cond: 37868 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37869 for { 37870 off := v.AuxInt 37871 sym := v.Aux 37872 _ = v.Args[2] 37873 ptr := v.Args[0] 37874 x := v.Args[1] 37875 if x.Op != OpAMD64FlagLT_ULT { 37876 break 37877 } 37878 mem := v.Args[2] 37879 v.reset(OpAMD64MOVBstore) 37880 v.AuxInt = off 37881 v.Aux = sym 37882 v.AddArg(ptr) 37883 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37884 v0.AuxInt = 1 37885 v.AddArg(v0) 37886 v.AddArg(mem) 37887 return true 37888 } 37889 // match: (SETLEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37890 // cond: 37891 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37892 for { 37893 off := v.AuxInt 37894 sym := v.Aux 37895 _ = v.Args[2] 37896 ptr := v.Args[0] 37897 x := v.Args[1] 37898 if x.Op != OpAMD64FlagLT_UGT { 37899 break 37900 } 37901 mem := v.Args[2] 37902 v.reset(OpAMD64MOVBstore) 37903 v.AuxInt = off 37904 v.Aux = sym 37905 v.AddArg(ptr) 37906 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37907 v0.AuxInt = 1 37908 v.AddArg(v0) 37909 v.AddArg(mem) 37910 return true 37911 } 37912 // match: (SETLEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37913 // cond: 37914 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37915 for { 37916 off := v.AuxInt 37917 sym := v.Aux 37918 _ = v.Args[2] 37919 ptr := v.Args[0] 37920 x := v.Args[1] 37921 if x.Op != OpAMD64FlagGT_ULT { 37922 break 37923 } 37924 mem := v.Args[2] 37925 v.reset(OpAMD64MOVBstore) 37926 v.AuxInt = off 37927 v.Aux = sym 37928 v.AddArg(ptr) 37929 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37930 v0.AuxInt = 0 37931 v.AddArg(v0) 37932 v.AddArg(mem) 37933 return true 37934 } 37935 // match: (SETLEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 37936 // cond: 37937 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37938 for { 37939 off := v.AuxInt 37940 sym := v.Aux 37941 _ = v.Args[2] 37942 ptr := v.Args[0] 37943 x := v.Args[1] 37944 if x.Op != OpAMD64FlagGT_UGT { 37945 break 37946 } 37947 mem := v.Args[2] 37948 v.reset(OpAMD64MOVBstore) 37949 v.AuxInt = off 37950 v.Aux = sym 37951 v.AddArg(ptr) 37952 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37953 v0.AuxInt = 0 37954 v.AddArg(v0) 37955 v.AddArg(mem) 37956 return true 37957 } 37958 return false 37959 } 37960 func rewriteValueAMD64_OpAMD64SETLmem_0(v *Value) bool { 37961 b := v.Block 37962 _ = b 37963 // match: (SETLmem [off] {sym} ptr (InvertFlags x) mem) 37964 // cond: 37965 // result: (SETGmem [off] {sym} ptr x mem) 37966 for { 37967 off := v.AuxInt 37968 sym := v.Aux 37969 _ = v.Args[2] 37970 ptr := v.Args[0] 37971 v_1 := v.Args[1] 37972 if v_1.Op != OpAMD64InvertFlags { 37973 break 37974 } 37975 x := v_1.Args[0] 37976 mem := v.Args[2] 37977 v.reset(OpAMD64SETGmem) 37978 v.AuxInt = off 37979 v.Aux = sym 37980 v.AddArg(ptr) 37981 v.AddArg(x) 37982 v.AddArg(mem) 37983 return true 37984 } 37985 // match: (SETLmem [off] {sym} ptr x:(FlagEQ) mem) 37986 // cond: 37987 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37988 for { 37989 off := v.AuxInt 37990 sym := v.Aux 37991 _ = v.Args[2] 37992 ptr := v.Args[0] 37993 x := v.Args[1] 37994 if x.Op != OpAMD64FlagEQ { 37995 break 37996 } 37997 mem := v.Args[2] 37998 v.reset(OpAMD64MOVBstore) 37999 v.AuxInt = off 38000 v.Aux = sym 38001 v.AddArg(ptr) 38002 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38003 v0.AuxInt = 0 38004 v.AddArg(v0) 38005 v.AddArg(mem) 38006 return true 38007 } 38008 // match: (SETLmem [off] {sym} ptr x:(FlagLT_ULT) mem) 38009 // cond: 38010 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38011 for { 38012 off := v.AuxInt 38013 sym := v.Aux 38014 _ = v.Args[2] 38015 ptr := v.Args[0] 38016 x := v.Args[1] 38017 if x.Op != OpAMD64FlagLT_ULT { 38018 break 38019 } 38020 mem := v.Args[2] 38021 v.reset(OpAMD64MOVBstore) 38022 v.AuxInt = off 38023 v.Aux = sym 38024 v.AddArg(ptr) 38025 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38026 v0.AuxInt = 1 38027 v.AddArg(v0) 38028 v.AddArg(mem) 38029 return true 38030 } 38031 // match: (SETLmem [off] {sym} ptr x:(FlagLT_UGT) mem) 38032 // cond: 38033 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38034 for { 38035 off := v.AuxInt 38036 sym := v.Aux 38037 _ = v.Args[2] 38038 ptr := v.Args[0] 38039 x := v.Args[1] 38040 if x.Op != OpAMD64FlagLT_UGT { 38041 break 38042 } 38043 mem := v.Args[2] 38044 v.reset(OpAMD64MOVBstore) 38045 v.AuxInt = off 38046 v.Aux = sym 38047 v.AddArg(ptr) 38048 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38049 v0.AuxInt = 1 38050 v.AddArg(v0) 38051 v.AddArg(mem) 38052 return true 38053 } 38054 // match: (SETLmem [off] {sym} ptr x:(FlagGT_ULT) mem) 38055 // cond: 38056 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38057 for { 38058 off := v.AuxInt 38059 sym := v.Aux 38060 _ = v.Args[2] 38061 ptr := v.Args[0] 38062 x := v.Args[1] 38063 if x.Op != OpAMD64FlagGT_ULT { 38064 break 38065 } 38066 mem := v.Args[2] 38067 v.reset(OpAMD64MOVBstore) 38068 v.AuxInt = off 38069 v.Aux = sym 38070 v.AddArg(ptr) 38071 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38072 v0.AuxInt = 0 38073 v.AddArg(v0) 38074 v.AddArg(mem) 38075 return true 38076 } 38077 // match: (SETLmem [off] {sym} ptr x:(FlagGT_UGT) mem) 38078 // cond: 38079 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38080 for { 38081 off := v.AuxInt 38082 sym := v.Aux 38083 _ = v.Args[2] 38084 ptr := v.Args[0] 38085 x := v.Args[1] 38086 if x.Op != OpAMD64FlagGT_UGT { 38087 break 38088 } 38089 mem := v.Args[2] 38090 v.reset(OpAMD64MOVBstore) 38091 v.AuxInt = off 38092 v.Aux = sym 38093 v.AddArg(ptr) 38094 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38095 v0.AuxInt = 0 38096 v.AddArg(v0) 38097 v.AddArg(mem) 38098 return true 38099 } 38100 return false 38101 } 38102 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 38103 b := v.Block 38104 _ = b 38105 config := b.Func.Config 38106 _ = config 38107 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 38108 // cond: !config.nacl 38109 // result: (SETB (BTL x y)) 38110 for { 38111 v_0 := v.Args[0] 38112 if v_0.Op != OpAMD64TESTL { 38113 break 38114 } 38115 _ = v_0.Args[1] 38116 v_0_0 := v_0.Args[0] 38117 if v_0_0.Op != OpAMD64SHLL { 38118 break 38119 } 38120 _ = v_0_0.Args[1] 38121 v_0_0_0 := v_0_0.Args[0] 38122 if v_0_0_0.Op != OpAMD64MOVLconst { 38123 break 38124 } 38125 if v_0_0_0.AuxInt != 1 { 38126 break 38127 } 38128 x := v_0_0.Args[1] 38129 y := v_0.Args[1] 38130 if !(!config.nacl) { 38131 break 38132 } 38133 v.reset(OpAMD64SETB) 38134 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38135 v0.AddArg(x) 38136 v0.AddArg(y) 38137 v.AddArg(v0) 38138 return true 38139 } 38140 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 38141 // cond: !config.nacl 38142 // result: (SETB (BTL x y)) 38143 for { 38144 v_0 := v.Args[0] 38145 if v_0.Op != OpAMD64TESTL { 38146 break 38147 } 38148 _ = v_0.Args[1] 38149 y := v_0.Args[0] 38150 v_0_1 := v_0.Args[1] 38151 if v_0_1.Op != OpAMD64SHLL { 38152 break 38153 } 38154 _ = v_0_1.Args[1] 38155 v_0_1_0 := v_0_1.Args[0] 38156 if v_0_1_0.Op != OpAMD64MOVLconst { 38157 break 38158 } 38159 if v_0_1_0.AuxInt != 1 { 38160 break 38161 } 38162 x := v_0_1.Args[1] 38163 if !(!config.nacl) { 38164 break 38165 } 38166 v.reset(OpAMD64SETB) 38167 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38168 v0.AddArg(x) 38169 v0.AddArg(y) 38170 v.AddArg(v0) 38171 return true 38172 } 38173 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 38174 // cond: !config.nacl 38175 // result: (SETB (BTQ x y)) 38176 for { 38177 v_0 := v.Args[0] 38178 if v_0.Op != OpAMD64TESTQ { 38179 break 38180 } 38181 _ = v_0.Args[1] 38182 v_0_0 := v_0.Args[0] 38183 if v_0_0.Op != OpAMD64SHLQ { 38184 break 38185 } 38186 _ = v_0_0.Args[1] 38187 v_0_0_0 := v_0_0.Args[0] 38188 if v_0_0_0.Op != OpAMD64MOVQconst { 38189 break 38190 } 38191 if v_0_0_0.AuxInt != 1 { 38192 break 38193 } 38194 x := v_0_0.Args[1] 38195 y := v_0.Args[1] 38196 if !(!config.nacl) { 38197 break 38198 } 38199 v.reset(OpAMD64SETB) 38200 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38201 v0.AddArg(x) 38202 v0.AddArg(y) 38203 v.AddArg(v0) 38204 return true 38205 } 38206 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 38207 // cond: !config.nacl 38208 // result: (SETB (BTQ x y)) 38209 for { 38210 v_0 := v.Args[0] 38211 if v_0.Op != OpAMD64TESTQ { 38212 break 38213 } 38214 _ = v_0.Args[1] 38215 y := v_0.Args[0] 38216 v_0_1 := v_0.Args[1] 38217 if v_0_1.Op != OpAMD64SHLQ { 38218 break 38219 } 38220 _ = v_0_1.Args[1] 38221 v_0_1_0 := v_0_1.Args[0] 38222 if v_0_1_0.Op != OpAMD64MOVQconst { 38223 break 38224 } 38225 if v_0_1_0.AuxInt != 1 { 38226 break 38227 } 38228 x := v_0_1.Args[1] 38229 if !(!config.nacl) { 38230 break 38231 } 38232 v.reset(OpAMD64SETB) 38233 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38234 v0.AddArg(x) 38235 v0.AddArg(y) 38236 v.AddArg(v0) 38237 return true 38238 } 38239 // match: (SETNE (TESTLconst [c] x)) 38240 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 38241 // result: (SETB (BTLconst [log2(c)] x)) 38242 for { 38243 v_0 := v.Args[0] 38244 if v_0.Op != OpAMD64TESTLconst { 38245 break 38246 } 38247 c := v_0.AuxInt 38248 x := v_0.Args[0] 38249 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 38250 break 38251 } 38252 v.reset(OpAMD64SETB) 38253 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 38254 v0.AuxInt = log2(c) 38255 v0.AddArg(x) 38256 v.AddArg(v0) 38257 return true 38258 } 38259 // match: (SETNE (TESTQconst [c] x)) 38260 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38261 // result: (SETB (BTQconst [log2(c)] x)) 38262 for { 38263 v_0 := v.Args[0] 38264 if v_0.Op != OpAMD64TESTQconst { 38265 break 38266 } 38267 c := v_0.AuxInt 38268 x := v_0.Args[0] 38269 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38270 break 38271 } 38272 v.reset(OpAMD64SETB) 38273 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38274 v0.AuxInt = log2(c) 38275 v0.AddArg(x) 38276 v.AddArg(v0) 38277 return true 38278 } 38279 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 38280 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38281 // result: (SETB (BTQconst [log2(c)] x)) 38282 for { 38283 v_0 := v.Args[0] 38284 if v_0.Op != OpAMD64TESTQ { 38285 break 38286 } 38287 _ = v_0.Args[1] 38288 v_0_0 := v_0.Args[0] 38289 if v_0_0.Op != OpAMD64MOVQconst { 38290 break 38291 } 38292 c := v_0_0.AuxInt 38293 x := v_0.Args[1] 38294 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38295 break 38296 } 38297 v.reset(OpAMD64SETB) 38298 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38299 v0.AuxInt = log2(c) 38300 v0.AddArg(x) 38301 v.AddArg(v0) 38302 return true 38303 } 38304 // match: (SETNE (TESTQ x (MOVQconst [c]))) 38305 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38306 // result: (SETB (BTQconst [log2(c)] x)) 38307 for { 38308 v_0 := v.Args[0] 38309 if v_0.Op != OpAMD64TESTQ { 38310 break 38311 } 38312 _ = v_0.Args[1] 38313 x := v_0.Args[0] 38314 v_0_1 := v_0.Args[1] 38315 if v_0_1.Op != OpAMD64MOVQconst { 38316 break 38317 } 38318 c := v_0_1.AuxInt 38319 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38320 break 38321 } 38322 v.reset(OpAMD64SETB) 38323 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38324 v0.AuxInt = log2(c) 38325 v0.AddArg(x) 38326 v.AddArg(v0) 38327 return true 38328 } 38329 // match: (SETNE (InvertFlags x)) 38330 // cond: 38331 // result: (SETNE x) 38332 for { 38333 v_0 := v.Args[0] 38334 if v_0.Op != OpAMD64InvertFlags { 38335 break 38336 } 38337 x := v_0.Args[0] 38338 v.reset(OpAMD64SETNE) 38339 v.AddArg(x) 38340 return true 38341 } 38342 // match: (SETNE (FlagEQ)) 38343 // cond: 38344 // result: (MOVLconst [0]) 38345 for { 38346 v_0 := v.Args[0] 38347 if v_0.Op != OpAMD64FlagEQ { 38348 break 38349 } 38350 v.reset(OpAMD64MOVLconst) 38351 v.AuxInt = 0 38352 return true 38353 } 38354 return false 38355 } 38356 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 38357 // match: (SETNE (FlagLT_ULT)) 38358 // cond: 38359 // result: (MOVLconst [1]) 38360 for { 38361 v_0 := v.Args[0] 38362 if v_0.Op != OpAMD64FlagLT_ULT { 38363 break 38364 } 38365 v.reset(OpAMD64MOVLconst) 38366 v.AuxInt = 1 38367 return true 38368 } 38369 // match: (SETNE (FlagLT_UGT)) 38370 // cond: 38371 // result: (MOVLconst [1]) 38372 for { 38373 v_0 := v.Args[0] 38374 if v_0.Op != OpAMD64FlagLT_UGT { 38375 break 38376 } 38377 v.reset(OpAMD64MOVLconst) 38378 v.AuxInt = 1 38379 return true 38380 } 38381 // match: (SETNE (FlagGT_ULT)) 38382 // cond: 38383 // result: (MOVLconst [1]) 38384 for { 38385 v_0 := v.Args[0] 38386 if v_0.Op != OpAMD64FlagGT_ULT { 38387 break 38388 } 38389 v.reset(OpAMD64MOVLconst) 38390 v.AuxInt = 1 38391 return true 38392 } 38393 // match: (SETNE (FlagGT_UGT)) 38394 // cond: 38395 // result: (MOVLconst [1]) 38396 for { 38397 v_0 := v.Args[0] 38398 if v_0.Op != OpAMD64FlagGT_UGT { 38399 break 38400 } 38401 v.reset(OpAMD64MOVLconst) 38402 v.AuxInt = 1 38403 return true 38404 } 38405 return false 38406 } 38407 func rewriteValueAMD64_OpAMD64SETNEmem_0(v *Value) bool { 38408 b := v.Block 38409 _ = b 38410 config := b.Func.Config 38411 _ = config 38412 // match: (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 38413 // cond: !config.nacl 38414 // result: (SETBmem [off] {sym} ptr (BTL x y) mem) 38415 for { 38416 off := v.AuxInt 38417 sym := v.Aux 38418 _ = v.Args[2] 38419 ptr := v.Args[0] 38420 v_1 := v.Args[1] 38421 if v_1.Op != OpAMD64TESTL { 38422 break 38423 } 38424 _ = v_1.Args[1] 38425 v_1_0 := v_1.Args[0] 38426 if v_1_0.Op != OpAMD64SHLL { 38427 break 38428 } 38429 _ = v_1_0.Args[1] 38430 v_1_0_0 := v_1_0.Args[0] 38431 if v_1_0_0.Op != OpAMD64MOVLconst { 38432 break 38433 } 38434 if v_1_0_0.AuxInt != 1 { 38435 break 38436 } 38437 x := v_1_0.Args[1] 38438 y := v_1.Args[1] 38439 mem := v.Args[2] 38440 if !(!config.nacl) { 38441 break 38442 } 38443 v.reset(OpAMD64SETBmem) 38444 v.AuxInt = off 38445 v.Aux = sym 38446 v.AddArg(ptr) 38447 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38448 v0.AddArg(x) 38449 v0.AddArg(y) 38450 v.AddArg(v0) 38451 v.AddArg(mem) 38452 return true 38453 } 38454 // match: (SETNEmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 38455 // cond: !config.nacl 38456 // result: (SETBmem [off] {sym} ptr (BTL x y) mem) 38457 for { 38458 off := v.AuxInt 38459 sym := v.Aux 38460 _ = v.Args[2] 38461 ptr := v.Args[0] 38462 v_1 := v.Args[1] 38463 if v_1.Op != OpAMD64TESTL { 38464 break 38465 } 38466 _ = v_1.Args[1] 38467 y := v_1.Args[0] 38468 v_1_1 := v_1.Args[1] 38469 if v_1_1.Op != OpAMD64SHLL { 38470 break 38471 } 38472 _ = v_1_1.Args[1] 38473 v_1_1_0 := v_1_1.Args[0] 38474 if v_1_1_0.Op != OpAMD64MOVLconst { 38475 break 38476 } 38477 if v_1_1_0.AuxInt != 1 { 38478 break 38479 } 38480 x := v_1_1.Args[1] 38481 mem := v.Args[2] 38482 if !(!config.nacl) { 38483 break 38484 } 38485 v.reset(OpAMD64SETBmem) 38486 v.AuxInt = off 38487 v.Aux = sym 38488 v.AddArg(ptr) 38489 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38490 v0.AddArg(x) 38491 v0.AddArg(y) 38492 v.AddArg(v0) 38493 v.AddArg(mem) 38494 return true 38495 } 38496 // match: (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 38497 // cond: !config.nacl 38498 // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) 38499 for { 38500 off := v.AuxInt 38501 sym := v.Aux 38502 _ = v.Args[2] 38503 ptr := v.Args[0] 38504 v_1 := v.Args[1] 38505 if v_1.Op != OpAMD64TESTQ { 38506 break 38507 } 38508 _ = v_1.Args[1] 38509 v_1_0 := v_1.Args[0] 38510 if v_1_0.Op != OpAMD64SHLQ { 38511 break 38512 } 38513 _ = v_1_0.Args[1] 38514 v_1_0_0 := v_1_0.Args[0] 38515 if v_1_0_0.Op != OpAMD64MOVQconst { 38516 break 38517 } 38518 if v_1_0_0.AuxInt != 1 { 38519 break 38520 } 38521 x := v_1_0.Args[1] 38522 y := v_1.Args[1] 38523 mem := v.Args[2] 38524 if !(!config.nacl) { 38525 break 38526 } 38527 v.reset(OpAMD64SETBmem) 38528 v.AuxInt = off 38529 v.Aux = sym 38530 v.AddArg(ptr) 38531 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38532 v0.AddArg(x) 38533 v0.AddArg(y) 38534 v.AddArg(v0) 38535 v.AddArg(mem) 38536 return true 38537 } 38538 // match: (SETNEmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 38539 // cond: !config.nacl 38540 // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) 38541 for { 38542 off := v.AuxInt 38543 sym := v.Aux 38544 _ = v.Args[2] 38545 ptr := v.Args[0] 38546 v_1 := v.Args[1] 38547 if v_1.Op != OpAMD64TESTQ { 38548 break 38549 } 38550 _ = v_1.Args[1] 38551 y := v_1.Args[0] 38552 v_1_1 := v_1.Args[1] 38553 if v_1_1.Op != OpAMD64SHLQ { 38554 break 38555 } 38556 _ = v_1_1.Args[1] 38557 v_1_1_0 := v_1_1.Args[0] 38558 if v_1_1_0.Op != OpAMD64MOVQconst { 38559 break 38560 } 38561 if v_1_1_0.AuxInt != 1 { 38562 break 38563 } 38564 x := v_1_1.Args[1] 38565 mem := v.Args[2] 38566 if !(!config.nacl) { 38567 break 38568 } 38569 v.reset(OpAMD64SETBmem) 38570 v.AuxInt = off 38571 v.Aux = sym 38572 v.AddArg(ptr) 38573 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38574 v0.AddArg(x) 38575 v0.AddArg(y) 38576 v.AddArg(v0) 38577 v.AddArg(mem) 38578 return true 38579 } 38580 // match: (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem) 38581 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 38582 // result: (SETBmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) 38583 for { 38584 off := v.AuxInt 38585 sym := v.Aux 38586 _ = v.Args[2] 38587 ptr := v.Args[0] 38588 v_1 := v.Args[1] 38589 if v_1.Op != OpAMD64TESTLconst { 38590 break 38591 } 38592 c := v_1.AuxInt 38593 x := v_1.Args[0] 38594 mem := v.Args[2] 38595 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 38596 break 38597 } 38598 v.reset(OpAMD64SETBmem) 38599 v.AuxInt = off 38600 v.Aux = sym 38601 v.AddArg(ptr) 38602 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 38603 v0.AuxInt = log2(c) 38604 v0.AddArg(x) 38605 v.AddArg(v0) 38606 v.AddArg(mem) 38607 return true 38608 } 38609 // match: (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem) 38610 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38611 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 38612 for { 38613 off := v.AuxInt 38614 sym := v.Aux 38615 _ = v.Args[2] 38616 ptr := v.Args[0] 38617 v_1 := v.Args[1] 38618 if v_1.Op != OpAMD64TESTQconst { 38619 break 38620 } 38621 c := v_1.AuxInt 38622 x := v_1.Args[0] 38623 mem := v.Args[2] 38624 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38625 break 38626 } 38627 v.reset(OpAMD64SETBmem) 38628 v.AuxInt = off 38629 v.Aux = sym 38630 v.AddArg(ptr) 38631 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38632 v0.AuxInt = log2(c) 38633 v0.AddArg(x) 38634 v.AddArg(v0) 38635 v.AddArg(mem) 38636 return true 38637 } 38638 // match: (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 38639 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38640 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 38641 for { 38642 off := v.AuxInt 38643 sym := v.Aux 38644 _ = v.Args[2] 38645 ptr := v.Args[0] 38646 v_1 := v.Args[1] 38647 if v_1.Op != OpAMD64TESTQ { 38648 break 38649 } 38650 _ = v_1.Args[1] 38651 v_1_0 := v_1.Args[0] 38652 if v_1_0.Op != OpAMD64MOVQconst { 38653 break 38654 } 38655 c := v_1_0.AuxInt 38656 x := v_1.Args[1] 38657 mem := v.Args[2] 38658 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38659 break 38660 } 38661 v.reset(OpAMD64SETBmem) 38662 v.AuxInt = off 38663 v.Aux = sym 38664 v.AddArg(ptr) 38665 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38666 v0.AuxInt = log2(c) 38667 v0.AddArg(x) 38668 v.AddArg(v0) 38669 v.AddArg(mem) 38670 return true 38671 } 38672 // match: (SETNEmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 38673 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38674 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 38675 for { 38676 off := v.AuxInt 38677 sym := v.Aux 38678 _ = v.Args[2] 38679 ptr := v.Args[0] 38680 v_1 := v.Args[1] 38681 if v_1.Op != OpAMD64TESTQ { 38682 break 38683 } 38684 _ = v_1.Args[1] 38685 x := v_1.Args[0] 38686 v_1_1 := v_1.Args[1] 38687 if v_1_1.Op != OpAMD64MOVQconst { 38688 break 38689 } 38690 c := v_1_1.AuxInt 38691 mem := v.Args[2] 38692 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38693 break 38694 } 38695 v.reset(OpAMD64SETBmem) 38696 v.AuxInt = off 38697 v.Aux = sym 38698 v.AddArg(ptr) 38699 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38700 v0.AuxInt = log2(c) 38701 v0.AddArg(x) 38702 v.AddArg(v0) 38703 v.AddArg(mem) 38704 return true 38705 } 38706 // match: (SETNEmem [off] {sym} ptr (InvertFlags x) mem) 38707 // cond: 38708 // result: (SETNEmem [off] {sym} ptr x mem) 38709 for { 38710 off := v.AuxInt 38711 sym := v.Aux 38712 _ = v.Args[2] 38713 ptr := v.Args[0] 38714 v_1 := v.Args[1] 38715 if v_1.Op != OpAMD64InvertFlags { 38716 break 38717 } 38718 x := v_1.Args[0] 38719 mem := v.Args[2] 38720 v.reset(OpAMD64SETNEmem) 38721 v.AuxInt = off 38722 v.Aux = sym 38723 v.AddArg(ptr) 38724 v.AddArg(x) 38725 v.AddArg(mem) 38726 return true 38727 } 38728 // match: (SETNEmem [off] {sym} ptr x:(FlagEQ) mem) 38729 // cond: 38730 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38731 for { 38732 off := v.AuxInt 38733 sym := v.Aux 38734 _ = v.Args[2] 38735 ptr := v.Args[0] 38736 x := v.Args[1] 38737 if x.Op != OpAMD64FlagEQ { 38738 break 38739 } 38740 mem := v.Args[2] 38741 v.reset(OpAMD64MOVBstore) 38742 v.AuxInt = off 38743 v.Aux = sym 38744 v.AddArg(ptr) 38745 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38746 v0.AuxInt = 0 38747 v.AddArg(v0) 38748 v.AddArg(mem) 38749 return true 38750 } 38751 return false 38752 } 38753 func rewriteValueAMD64_OpAMD64SETNEmem_10(v *Value) bool { 38754 b := v.Block 38755 _ = b 38756 // match: (SETNEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 38757 // cond: 38758 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38759 for { 38760 off := v.AuxInt 38761 sym := v.Aux 38762 _ = v.Args[2] 38763 ptr := v.Args[0] 38764 x := v.Args[1] 38765 if x.Op != OpAMD64FlagLT_ULT { 38766 break 38767 } 38768 mem := v.Args[2] 38769 v.reset(OpAMD64MOVBstore) 38770 v.AuxInt = off 38771 v.Aux = sym 38772 v.AddArg(ptr) 38773 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38774 v0.AuxInt = 1 38775 v.AddArg(v0) 38776 v.AddArg(mem) 38777 return true 38778 } 38779 // match: (SETNEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 38780 // cond: 38781 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38782 for { 38783 off := v.AuxInt 38784 sym := v.Aux 38785 _ = v.Args[2] 38786 ptr := v.Args[0] 38787 x := v.Args[1] 38788 if x.Op != OpAMD64FlagLT_UGT { 38789 break 38790 } 38791 mem := v.Args[2] 38792 v.reset(OpAMD64MOVBstore) 38793 v.AuxInt = off 38794 v.Aux = sym 38795 v.AddArg(ptr) 38796 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38797 v0.AuxInt = 1 38798 v.AddArg(v0) 38799 v.AddArg(mem) 38800 return true 38801 } 38802 // match: (SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 38803 // cond: 38804 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38805 for { 38806 off := v.AuxInt 38807 sym := v.Aux 38808 _ = v.Args[2] 38809 ptr := v.Args[0] 38810 x := v.Args[1] 38811 if x.Op != OpAMD64FlagGT_ULT { 38812 break 38813 } 38814 mem := v.Args[2] 38815 v.reset(OpAMD64MOVBstore) 38816 v.AuxInt = off 38817 v.Aux = sym 38818 v.AddArg(ptr) 38819 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38820 v0.AuxInt = 1 38821 v.AddArg(v0) 38822 v.AddArg(mem) 38823 return true 38824 } 38825 // match: (SETNEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 38826 // cond: 38827 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38828 for { 38829 off := v.AuxInt 38830 sym := v.Aux 38831 _ = v.Args[2] 38832 ptr := v.Args[0] 38833 x := v.Args[1] 38834 if x.Op != OpAMD64FlagGT_UGT { 38835 break 38836 } 38837 mem := v.Args[2] 38838 v.reset(OpAMD64MOVBstore) 38839 v.AuxInt = off 38840 v.Aux = sym 38841 v.AddArg(ptr) 38842 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38843 v0.AuxInt = 1 38844 v.AddArg(v0) 38845 v.AddArg(mem) 38846 return true 38847 } 38848 return false 38849 } 38850 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 38851 b := v.Block 38852 _ = b 38853 // match: (SHLL x (MOVQconst [c])) 38854 // cond: 38855 // result: (SHLLconst [c&31] x) 38856 for { 38857 _ = v.Args[1] 38858 x := v.Args[0] 38859 v_1 := v.Args[1] 38860 if v_1.Op != OpAMD64MOVQconst { 38861 break 38862 } 38863 c := v_1.AuxInt 38864 v.reset(OpAMD64SHLLconst) 38865 v.AuxInt = c & 31 38866 v.AddArg(x) 38867 return true 38868 } 38869 // match: (SHLL x (MOVLconst [c])) 38870 // cond: 38871 // result: (SHLLconst [c&31] x) 38872 for { 38873 _ = v.Args[1] 38874 x := v.Args[0] 38875 v_1 := v.Args[1] 38876 if v_1.Op != OpAMD64MOVLconst { 38877 break 38878 } 38879 c := v_1.AuxInt 38880 v.reset(OpAMD64SHLLconst) 38881 v.AuxInt = c & 31 38882 v.AddArg(x) 38883 return true 38884 } 38885 // match: (SHLL x (ADDQconst [c] y)) 38886 // cond: c & 31 == 0 38887 // result: (SHLL x y) 38888 for { 38889 _ = v.Args[1] 38890 x := v.Args[0] 38891 v_1 := v.Args[1] 38892 if v_1.Op != OpAMD64ADDQconst { 38893 break 38894 } 38895 c := v_1.AuxInt 38896 y := v_1.Args[0] 38897 if !(c&31 == 0) { 38898 break 38899 } 38900 v.reset(OpAMD64SHLL) 38901 v.AddArg(x) 38902 v.AddArg(y) 38903 return true 38904 } 38905 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 38906 // cond: c & 31 == 0 38907 // result: (SHLL x (NEGQ <t> y)) 38908 for { 38909 _ = v.Args[1] 38910 x := v.Args[0] 38911 v_1 := v.Args[1] 38912 if v_1.Op != OpAMD64NEGQ { 38913 break 38914 } 38915 t := v_1.Type 38916 v_1_0 := v_1.Args[0] 38917 if v_1_0.Op != OpAMD64ADDQconst { 38918 break 38919 } 38920 c := v_1_0.AuxInt 38921 y := v_1_0.Args[0] 38922 if !(c&31 == 0) { 38923 break 38924 } 38925 v.reset(OpAMD64SHLL) 38926 v.AddArg(x) 38927 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 38928 v0.AddArg(y) 38929 v.AddArg(v0) 38930 return true 38931 } 38932 // match: (SHLL x (ANDQconst [c] y)) 38933 // cond: c & 31 == 31 38934 // result: (SHLL x y) 38935 for { 38936 _ = v.Args[1] 38937 x := v.Args[0] 38938 v_1 := v.Args[1] 38939 if v_1.Op != OpAMD64ANDQconst { 38940 break 38941 } 38942 c := v_1.AuxInt 38943 y := v_1.Args[0] 38944 if !(c&31 == 31) { 38945 break 38946 } 38947 v.reset(OpAMD64SHLL) 38948 v.AddArg(x) 38949 v.AddArg(y) 38950 return true 38951 } 38952 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 38953 // cond: c & 31 == 31 38954 // result: (SHLL x (NEGQ <t> y)) 38955 for { 38956 _ = v.Args[1] 38957 x := v.Args[0] 38958 v_1 := v.Args[1] 38959 if v_1.Op != OpAMD64NEGQ { 38960 break 38961 } 38962 t := v_1.Type 38963 v_1_0 := v_1.Args[0] 38964 if v_1_0.Op != OpAMD64ANDQconst { 38965 break 38966 } 38967 c := v_1_0.AuxInt 38968 y := v_1_0.Args[0] 38969 if !(c&31 == 31) { 38970 break 38971 } 38972 v.reset(OpAMD64SHLL) 38973 v.AddArg(x) 38974 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 38975 v0.AddArg(y) 38976 v.AddArg(v0) 38977 return true 38978 } 38979 // match: (SHLL x (ADDLconst [c] y)) 38980 // cond: c & 31 == 0 38981 // result: (SHLL x y) 38982 for { 38983 _ = v.Args[1] 38984 x := v.Args[0] 38985 v_1 := v.Args[1] 38986 if v_1.Op != OpAMD64ADDLconst { 38987 break 38988 } 38989 c := v_1.AuxInt 38990 y := v_1.Args[0] 38991 if !(c&31 == 0) { 38992 break 38993 } 38994 v.reset(OpAMD64SHLL) 38995 v.AddArg(x) 38996 v.AddArg(y) 38997 return true 38998 } 38999 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 39000 // cond: c & 31 == 0 39001 // result: (SHLL x (NEGL <t> y)) 39002 for { 39003 _ = v.Args[1] 39004 x := v.Args[0] 39005 v_1 := v.Args[1] 39006 if v_1.Op != OpAMD64NEGL { 39007 break 39008 } 39009 t := v_1.Type 39010 v_1_0 := v_1.Args[0] 39011 if v_1_0.Op != OpAMD64ADDLconst { 39012 break 39013 } 39014 c := v_1_0.AuxInt 39015 y := v_1_0.Args[0] 39016 if !(c&31 == 0) { 39017 break 39018 } 39019 v.reset(OpAMD64SHLL) 39020 v.AddArg(x) 39021 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39022 v0.AddArg(y) 39023 v.AddArg(v0) 39024 return true 39025 } 39026 // match: (SHLL x (ANDLconst [c] y)) 39027 // cond: c & 31 == 31 39028 // result: (SHLL x y) 39029 for { 39030 _ = v.Args[1] 39031 x := v.Args[0] 39032 v_1 := v.Args[1] 39033 if v_1.Op != OpAMD64ANDLconst { 39034 break 39035 } 39036 c := v_1.AuxInt 39037 y := v_1.Args[0] 39038 if !(c&31 == 31) { 39039 break 39040 } 39041 v.reset(OpAMD64SHLL) 39042 v.AddArg(x) 39043 v.AddArg(y) 39044 return true 39045 } 39046 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 39047 // cond: c & 31 == 31 39048 // result: (SHLL x (NEGL <t> y)) 39049 for { 39050 _ = v.Args[1] 39051 x := v.Args[0] 39052 v_1 := v.Args[1] 39053 if v_1.Op != OpAMD64NEGL { 39054 break 39055 } 39056 t := v_1.Type 39057 v_1_0 := v_1.Args[0] 39058 if v_1_0.Op != OpAMD64ANDLconst { 39059 break 39060 } 39061 c := v_1_0.AuxInt 39062 y := v_1_0.Args[0] 39063 if !(c&31 == 31) { 39064 break 39065 } 39066 v.reset(OpAMD64SHLL) 39067 v.AddArg(x) 39068 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39069 v0.AddArg(y) 39070 v.AddArg(v0) 39071 return true 39072 } 39073 return false 39074 } 39075 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 39076 // match: (SHLLconst x [0]) 39077 // cond: 39078 // result: x 39079 for { 39080 if v.AuxInt != 0 { 39081 break 39082 } 39083 x := v.Args[0] 39084 v.reset(OpCopy) 39085 v.Type = x.Type 39086 v.AddArg(x) 39087 return true 39088 } 39089 return false 39090 } 39091 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 39092 b := v.Block 39093 _ = b 39094 // match: (SHLQ x (MOVQconst [c])) 39095 // cond: 39096 // result: (SHLQconst [c&63] x) 39097 for { 39098 _ = v.Args[1] 39099 x := v.Args[0] 39100 v_1 := v.Args[1] 39101 if v_1.Op != OpAMD64MOVQconst { 39102 break 39103 } 39104 c := v_1.AuxInt 39105 v.reset(OpAMD64SHLQconst) 39106 v.AuxInt = c & 63 39107 v.AddArg(x) 39108 return true 39109 } 39110 // match: (SHLQ x (MOVLconst [c])) 39111 // cond: 39112 // result: (SHLQconst [c&63] x) 39113 for { 39114 _ = v.Args[1] 39115 x := v.Args[0] 39116 v_1 := v.Args[1] 39117 if v_1.Op != OpAMD64MOVLconst { 39118 break 39119 } 39120 c := v_1.AuxInt 39121 v.reset(OpAMD64SHLQconst) 39122 v.AuxInt = c & 63 39123 v.AddArg(x) 39124 return true 39125 } 39126 // match: (SHLQ x (ADDQconst [c] y)) 39127 // cond: c & 63 == 0 39128 // result: (SHLQ x y) 39129 for { 39130 _ = v.Args[1] 39131 x := v.Args[0] 39132 v_1 := v.Args[1] 39133 if v_1.Op != OpAMD64ADDQconst { 39134 break 39135 } 39136 c := v_1.AuxInt 39137 y := v_1.Args[0] 39138 if !(c&63 == 0) { 39139 break 39140 } 39141 v.reset(OpAMD64SHLQ) 39142 v.AddArg(x) 39143 v.AddArg(y) 39144 return true 39145 } 39146 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 39147 // cond: c & 63 == 0 39148 // result: (SHLQ x (NEGQ <t> y)) 39149 for { 39150 _ = v.Args[1] 39151 x := v.Args[0] 39152 v_1 := v.Args[1] 39153 if v_1.Op != OpAMD64NEGQ { 39154 break 39155 } 39156 t := v_1.Type 39157 v_1_0 := v_1.Args[0] 39158 if v_1_0.Op != OpAMD64ADDQconst { 39159 break 39160 } 39161 c := v_1_0.AuxInt 39162 y := v_1_0.Args[0] 39163 if !(c&63 == 0) { 39164 break 39165 } 39166 v.reset(OpAMD64SHLQ) 39167 v.AddArg(x) 39168 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39169 v0.AddArg(y) 39170 v.AddArg(v0) 39171 return true 39172 } 39173 // match: (SHLQ x (ANDQconst [c] y)) 39174 // cond: c & 63 == 63 39175 // result: (SHLQ x y) 39176 for { 39177 _ = v.Args[1] 39178 x := v.Args[0] 39179 v_1 := v.Args[1] 39180 if v_1.Op != OpAMD64ANDQconst { 39181 break 39182 } 39183 c := v_1.AuxInt 39184 y := v_1.Args[0] 39185 if !(c&63 == 63) { 39186 break 39187 } 39188 v.reset(OpAMD64SHLQ) 39189 v.AddArg(x) 39190 v.AddArg(y) 39191 return true 39192 } 39193 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 39194 // cond: c & 63 == 63 39195 // result: (SHLQ x (NEGQ <t> y)) 39196 for { 39197 _ = v.Args[1] 39198 x := v.Args[0] 39199 v_1 := v.Args[1] 39200 if v_1.Op != OpAMD64NEGQ { 39201 break 39202 } 39203 t := v_1.Type 39204 v_1_0 := v_1.Args[0] 39205 if v_1_0.Op != OpAMD64ANDQconst { 39206 break 39207 } 39208 c := v_1_0.AuxInt 39209 y := v_1_0.Args[0] 39210 if !(c&63 == 63) { 39211 break 39212 } 39213 v.reset(OpAMD64SHLQ) 39214 v.AddArg(x) 39215 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39216 v0.AddArg(y) 39217 v.AddArg(v0) 39218 return true 39219 } 39220 // match: (SHLQ x (ADDLconst [c] y)) 39221 // cond: c & 63 == 0 39222 // result: (SHLQ x y) 39223 for { 39224 _ = v.Args[1] 39225 x := v.Args[0] 39226 v_1 := v.Args[1] 39227 if v_1.Op != OpAMD64ADDLconst { 39228 break 39229 } 39230 c := v_1.AuxInt 39231 y := v_1.Args[0] 39232 if !(c&63 == 0) { 39233 break 39234 } 39235 v.reset(OpAMD64SHLQ) 39236 v.AddArg(x) 39237 v.AddArg(y) 39238 return true 39239 } 39240 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 39241 // cond: c & 63 == 0 39242 // result: (SHLQ x (NEGL <t> y)) 39243 for { 39244 _ = v.Args[1] 39245 x := v.Args[0] 39246 v_1 := v.Args[1] 39247 if v_1.Op != OpAMD64NEGL { 39248 break 39249 } 39250 t := v_1.Type 39251 v_1_0 := v_1.Args[0] 39252 if v_1_0.Op != OpAMD64ADDLconst { 39253 break 39254 } 39255 c := v_1_0.AuxInt 39256 y := v_1_0.Args[0] 39257 if !(c&63 == 0) { 39258 break 39259 } 39260 v.reset(OpAMD64SHLQ) 39261 v.AddArg(x) 39262 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39263 v0.AddArg(y) 39264 v.AddArg(v0) 39265 return true 39266 } 39267 // match: (SHLQ x (ANDLconst [c] y)) 39268 // cond: c & 63 == 63 39269 // result: (SHLQ x y) 39270 for { 39271 _ = v.Args[1] 39272 x := v.Args[0] 39273 v_1 := v.Args[1] 39274 if v_1.Op != OpAMD64ANDLconst { 39275 break 39276 } 39277 c := v_1.AuxInt 39278 y := v_1.Args[0] 39279 if !(c&63 == 63) { 39280 break 39281 } 39282 v.reset(OpAMD64SHLQ) 39283 v.AddArg(x) 39284 v.AddArg(y) 39285 return true 39286 } 39287 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 39288 // cond: c & 63 == 63 39289 // result: (SHLQ x (NEGL <t> y)) 39290 for { 39291 _ = v.Args[1] 39292 x := v.Args[0] 39293 v_1 := v.Args[1] 39294 if v_1.Op != OpAMD64NEGL { 39295 break 39296 } 39297 t := v_1.Type 39298 v_1_0 := v_1.Args[0] 39299 if v_1_0.Op != OpAMD64ANDLconst { 39300 break 39301 } 39302 c := v_1_0.AuxInt 39303 y := v_1_0.Args[0] 39304 if !(c&63 == 63) { 39305 break 39306 } 39307 v.reset(OpAMD64SHLQ) 39308 v.AddArg(x) 39309 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39310 v0.AddArg(y) 39311 v.AddArg(v0) 39312 return true 39313 } 39314 return false 39315 } 39316 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 39317 // match: (SHLQconst x [0]) 39318 // cond: 39319 // result: x 39320 for { 39321 if v.AuxInt != 0 { 39322 break 39323 } 39324 x := v.Args[0] 39325 v.reset(OpCopy) 39326 v.Type = x.Type 39327 v.AddArg(x) 39328 return true 39329 } 39330 return false 39331 } 39332 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 39333 // match: (SHRB x (MOVQconst [c])) 39334 // cond: c&31 < 8 39335 // result: (SHRBconst [c&31] x) 39336 for { 39337 _ = v.Args[1] 39338 x := v.Args[0] 39339 v_1 := v.Args[1] 39340 if v_1.Op != OpAMD64MOVQconst { 39341 break 39342 } 39343 c := v_1.AuxInt 39344 if !(c&31 < 8) { 39345 break 39346 } 39347 v.reset(OpAMD64SHRBconst) 39348 v.AuxInt = c & 31 39349 v.AddArg(x) 39350 return true 39351 } 39352 // match: (SHRB x (MOVLconst [c])) 39353 // cond: c&31 < 8 39354 // result: (SHRBconst [c&31] x) 39355 for { 39356 _ = v.Args[1] 39357 x := v.Args[0] 39358 v_1 := v.Args[1] 39359 if v_1.Op != OpAMD64MOVLconst { 39360 break 39361 } 39362 c := v_1.AuxInt 39363 if !(c&31 < 8) { 39364 break 39365 } 39366 v.reset(OpAMD64SHRBconst) 39367 v.AuxInt = c & 31 39368 v.AddArg(x) 39369 return true 39370 } 39371 // match: (SHRB _ (MOVQconst [c])) 39372 // cond: c&31 >= 8 39373 // result: (MOVLconst [0]) 39374 for { 39375 _ = v.Args[1] 39376 v_1 := v.Args[1] 39377 if v_1.Op != OpAMD64MOVQconst { 39378 break 39379 } 39380 c := v_1.AuxInt 39381 if !(c&31 >= 8) { 39382 break 39383 } 39384 v.reset(OpAMD64MOVLconst) 39385 v.AuxInt = 0 39386 return true 39387 } 39388 // match: (SHRB _ (MOVLconst [c])) 39389 // cond: c&31 >= 8 39390 // result: (MOVLconst [0]) 39391 for { 39392 _ = v.Args[1] 39393 v_1 := v.Args[1] 39394 if v_1.Op != OpAMD64MOVLconst { 39395 break 39396 } 39397 c := v_1.AuxInt 39398 if !(c&31 >= 8) { 39399 break 39400 } 39401 v.reset(OpAMD64MOVLconst) 39402 v.AuxInt = 0 39403 return true 39404 } 39405 return false 39406 } 39407 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 39408 // match: (SHRBconst x [0]) 39409 // cond: 39410 // result: x 39411 for { 39412 if v.AuxInt != 0 { 39413 break 39414 } 39415 x := v.Args[0] 39416 v.reset(OpCopy) 39417 v.Type = x.Type 39418 v.AddArg(x) 39419 return true 39420 } 39421 return false 39422 } 39423 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 39424 b := v.Block 39425 _ = b 39426 // match: (SHRL x (MOVQconst [c])) 39427 // cond: 39428 // result: (SHRLconst [c&31] x) 39429 for { 39430 _ = v.Args[1] 39431 x := v.Args[0] 39432 v_1 := v.Args[1] 39433 if v_1.Op != OpAMD64MOVQconst { 39434 break 39435 } 39436 c := v_1.AuxInt 39437 v.reset(OpAMD64SHRLconst) 39438 v.AuxInt = c & 31 39439 v.AddArg(x) 39440 return true 39441 } 39442 // match: (SHRL x (MOVLconst [c])) 39443 // cond: 39444 // result: (SHRLconst [c&31] x) 39445 for { 39446 _ = v.Args[1] 39447 x := v.Args[0] 39448 v_1 := v.Args[1] 39449 if v_1.Op != OpAMD64MOVLconst { 39450 break 39451 } 39452 c := v_1.AuxInt 39453 v.reset(OpAMD64SHRLconst) 39454 v.AuxInt = c & 31 39455 v.AddArg(x) 39456 return true 39457 } 39458 // match: (SHRL x (ADDQconst [c] y)) 39459 // cond: c & 31 == 0 39460 // result: (SHRL x y) 39461 for { 39462 _ = v.Args[1] 39463 x := v.Args[0] 39464 v_1 := v.Args[1] 39465 if v_1.Op != OpAMD64ADDQconst { 39466 break 39467 } 39468 c := v_1.AuxInt 39469 y := v_1.Args[0] 39470 if !(c&31 == 0) { 39471 break 39472 } 39473 v.reset(OpAMD64SHRL) 39474 v.AddArg(x) 39475 v.AddArg(y) 39476 return true 39477 } 39478 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 39479 // cond: c & 31 == 0 39480 // result: (SHRL x (NEGQ <t> y)) 39481 for { 39482 _ = v.Args[1] 39483 x := v.Args[0] 39484 v_1 := v.Args[1] 39485 if v_1.Op != OpAMD64NEGQ { 39486 break 39487 } 39488 t := v_1.Type 39489 v_1_0 := v_1.Args[0] 39490 if v_1_0.Op != OpAMD64ADDQconst { 39491 break 39492 } 39493 c := v_1_0.AuxInt 39494 y := v_1_0.Args[0] 39495 if !(c&31 == 0) { 39496 break 39497 } 39498 v.reset(OpAMD64SHRL) 39499 v.AddArg(x) 39500 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39501 v0.AddArg(y) 39502 v.AddArg(v0) 39503 return true 39504 } 39505 // match: (SHRL x (ANDQconst [c] y)) 39506 // cond: c & 31 == 31 39507 // result: (SHRL x y) 39508 for { 39509 _ = v.Args[1] 39510 x := v.Args[0] 39511 v_1 := v.Args[1] 39512 if v_1.Op != OpAMD64ANDQconst { 39513 break 39514 } 39515 c := v_1.AuxInt 39516 y := v_1.Args[0] 39517 if !(c&31 == 31) { 39518 break 39519 } 39520 v.reset(OpAMD64SHRL) 39521 v.AddArg(x) 39522 v.AddArg(y) 39523 return true 39524 } 39525 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 39526 // cond: c & 31 == 31 39527 // result: (SHRL x (NEGQ <t> y)) 39528 for { 39529 _ = v.Args[1] 39530 x := v.Args[0] 39531 v_1 := v.Args[1] 39532 if v_1.Op != OpAMD64NEGQ { 39533 break 39534 } 39535 t := v_1.Type 39536 v_1_0 := v_1.Args[0] 39537 if v_1_0.Op != OpAMD64ANDQconst { 39538 break 39539 } 39540 c := v_1_0.AuxInt 39541 y := v_1_0.Args[0] 39542 if !(c&31 == 31) { 39543 break 39544 } 39545 v.reset(OpAMD64SHRL) 39546 v.AddArg(x) 39547 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39548 v0.AddArg(y) 39549 v.AddArg(v0) 39550 return true 39551 } 39552 // match: (SHRL x (ADDLconst [c] y)) 39553 // cond: c & 31 == 0 39554 // result: (SHRL x y) 39555 for { 39556 _ = v.Args[1] 39557 x := v.Args[0] 39558 v_1 := v.Args[1] 39559 if v_1.Op != OpAMD64ADDLconst { 39560 break 39561 } 39562 c := v_1.AuxInt 39563 y := v_1.Args[0] 39564 if !(c&31 == 0) { 39565 break 39566 } 39567 v.reset(OpAMD64SHRL) 39568 v.AddArg(x) 39569 v.AddArg(y) 39570 return true 39571 } 39572 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 39573 // cond: c & 31 == 0 39574 // result: (SHRL x (NEGL <t> y)) 39575 for { 39576 _ = v.Args[1] 39577 x := v.Args[0] 39578 v_1 := v.Args[1] 39579 if v_1.Op != OpAMD64NEGL { 39580 break 39581 } 39582 t := v_1.Type 39583 v_1_0 := v_1.Args[0] 39584 if v_1_0.Op != OpAMD64ADDLconst { 39585 break 39586 } 39587 c := v_1_0.AuxInt 39588 y := v_1_0.Args[0] 39589 if !(c&31 == 0) { 39590 break 39591 } 39592 v.reset(OpAMD64SHRL) 39593 v.AddArg(x) 39594 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39595 v0.AddArg(y) 39596 v.AddArg(v0) 39597 return true 39598 } 39599 // match: (SHRL x (ANDLconst [c] y)) 39600 // cond: c & 31 == 31 39601 // result: (SHRL x y) 39602 for { 39603 _ = v.Args[1] 39604 x := v.Args[0] 39605 v_1 := v.Args[1] 39606 if v_1.Op != OpAMD64ANDLconst { 39607 break 39608 } 39609 c := v_1.AuxInt 39610 y := v_1.Args[0] 39611 if !(c&31 == 31) { 39612 break 39613 } 39614 v.reset(OpAMD64SHRL) 39615 v.AddArg(x) 39616 v.AddArg(y) 39617 return true 39618 } 39619 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 39620 // cond: c & 31 == 31 39621 // result: (SHRL x (NEGL <t> y)) 39622 for { 39623 _ = v.Args[1] 39624 x := v.Args[0] 39625 v_1 := v.Args[1] 39626 if v_1.Op != OpAMD64NEGL { 39627 break 39628 } 39629 t := v_1.Type 39630 v_1_0 := v_1.Args[0] 39631 if v_1_0.Op != OpAMD64ANDLconst { 39632 break 39633 } 39634 c := v_1_0.AuxInt 39635 y := v_1_0.Args[0] 39636 if !(c&31 == 31) { 39637 break 39638 } 39639 v.reset(OpAMD64SHRL) 39640 v.AddArg(x) 39641 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39642 v0.AddArg(y) 39643 v.AddArg(v0) 39644 return true 39645 } 39646 return false 39647 } 39648 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 39649 // match: (SHRLconst x [0]) 39650 // cond: 39651 // result: x 39652 for { 39653 if v.AuxInt != 0 { 39654 break 39655 } 39656 x := v.Args[0] 39657 v.reset(OpCopy) 39658 v.Type = x.Type 39659 v.AddArg(x) 39660 return true 39661 } 39662 return false 39663 } 39664 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 39665 b := v.Block 39666 _ = b 39667 // match: (SHRQ x (MOVQconst [c])) 39668 // cond: 39669 // result: (SHRQconst [c&63] x) 39670 for { 39671 _ = v.Args[1] 39672 x := v.Args[0] 39673 v_1 := v.Args[1] 39674 if v_1.Op != OpAMD64MOVQconst { 39675 break 39676 } 39677 c := v_1.AuxInt 39678 v.reset(OpAMD64SHRQconst) 39679 v.AuxInt = c & 63 39680 v.AddArg(x) 39681 return true 39682 } 39683 // match: (SHRQ x (MOVLconst [c])) 39684 // cond: 39685 // result: (SHRQconst [c&63] x) 39686 for { 39687 _ = v.Args[1] 39688 x := v.Args[0] 39689 v_1 := v.Args[1] 39690 if v_1.Op != OpAMD64MOVLconst { 39691 break 39692 } 39693 c := v_1.AuxInt 39694 v.reset(OpAMD64SHRQconst) 39695 v.AuxInt = c & 63 39696 v.AddArg(x) 39697 return true 39698 } 39699 // match: (SHRQ x (ADDQconst [c] y)) 39700 // cond: c & 63 == 0 39701 // result: (SHRQ x y) 39702 for { 39703 _ = v.Args[1] 39704 x := v.Args[0] 39705 v_1 := v.Args[1] 39706 if v_1.Op != OpAMD64ADDQconst { 39707 break 39708 } 39709 c := v_1.AuxInt 39710 y := v_1.Args[0] 39711 if !(c&63 == 0) { 39712 break 39713 } 39714 v.reset(OpAMD64SHRQ) 39715 v.AddArg(x) 39716 v.AddArg(y) 39717 return true 39718 } 39719 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 39720 // cond: c & 63 == 0 39721 // result: (SHRQ x (NEGQ <t> y)) 39722 for { 39723 _ = v.Args[1] 39724 x := v.Args[0] 39725 v_1 := v.Args[1] 39726 if v_1.Op != OpAMD64NEGQ { 39727 break 39728 } 39729 t := v_1.Type 39730 v_1_0 := v_1.Args[0] 39731 if v_1_0.Op != OpAMD64ADDQconst { 39732 break 39733 } 39734 c := v_1_0.AuxInt 39735 y := v_1_0.Args[0] 39736 if !(c&63 == 0) { 39737 break 39738 } 39739 v.reset(OpAMD64SHRQ) 39740 v.AddArg(x) 39741 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39742 v0.AddArg(y) 39743 v.AddArg(v0) 39744 return true 39745 } 39746 // match: (SHRQ x (ANDQconst [c] y)) 39747 // cond: c & 63 == 63 39748 // result: (SHRQ x y) 39749 for { 39750 _ = v.Args[1] 39751 x := v.Args[0] 39752 v_1 := v.Args[1] 39753 if v_1.Op != OpAMD64ANDQconst { 39754 break 39755 } 39756 c := v_1.AuxInt 39757 y := v_1.Args[0] 39758 if !(c&63 == 63) { 39759 break 39760 } 39761 v.reset(OpAMD64SHRQ) 39762 v.AddArg(x) 39763 v.AddArg(y) 39764 return true 39765 } 39766 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 39767 // cond: c & 63 == 63 39768 // result: (SHRQ x (NEGQ <t> y)) 39769 for { 39770 _ = v.Args[1] 39771 x := v.Args[0] 39772 v_1 := v.Args[1] 39773 if v_1.Op != OpAMD64NEGQ { 39774 break 39775 } 39776 t := v_1.Type 39777 v_1_0 := v_1.Args[0] 39778 if v_1_0.Op != OpAMD64ANDQconst { 39779 break 39780 } 39781 c := v_1_0.AuxInt 39782 y := v_1_0.Args[0] 39783 if !(c&63 == 63) { 39784 break 39785 } 39786 v.reset(OpAMD64SHRQ) 39787 v.AddArg(x) 39788 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39789 v0.AddArg(y) 39790 v.AddArg(v0) 39791 return true 39792 } 39793 // match: (SHRQ x (ADDLconst [c] y)) 39794 // cond: c & 63 == 0 39795 // result: (SHRQ x y) 39796 for { 39797 _ = v.Args[1] 39798 x := v.Args[0] 39799 v_1 := v.Args[1] 39800 if v_1.Op != OpAMD64ADDLconst { 39801 break 39802 } 39803 c := v_1.AuxInt 39804 y := v_1.Args[0] 39805 if !(c&63 == 0) { 39806 break 39807 } 39808 v.reset(OpAMD64SHRQ) 39809 v.AddArg(x) 39810 v.AddArg(y) 39811 return true 39812 } 39813 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 39814 // cond: c & 63 == 0 39815 // result: (SHRQ x (NEGL <t> y)) 39816 for { 39817 _ = v.Args[1] 39818 x := v.Args[0] 39819 v_1 := v.Args[1] 39820 if v_1.Op != OpAMD64NEGL { 39821 break 39822 } 39823 t := v_1.Type 39824 v_1_0 := v_1.Args[0] 39825 if v_1_0.Op != OpAMD64ADDLconst { 39826 break 39827 } 39828 c := v_1_0.AuxInt 39829 y := v_1_0.Args[0] 39830 if !(c&63 == 0) { 39831 break 39832 } 39833 v.reset(OpAMD64SHRQ) 39834 v.AddArg(x) 39835 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39836 v0.AddArg(y) 39837 v.AddArg(v0) 39838 return true 39839 } 39840 // match: (SHRQ x (ANDLconst [c] y)) 39841 // cond: c & 63 == 63 39842 // result: (SHRQ x y) 39843 for { 39844 _ = v.Args[1] 39845 x := v.Args[0] 39846 v_1 := v.Args[1] 39847 if v_1.Op != OpAMD64ANDLconst { 39848 break 39849 } 39850 c := v_1.AuxInt 39851 y := v_1.Args[0] 39852 if !(c&63 == 63) { 39853 break 39854 } 39855 v.reset(OpAMD64SHRQ) 39856 v.AddArg(x) 39857 v.AddArg(y) 39858 return true 39859 } 39860 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 39861 // cond: c & 63 == 63 39862 // result: (SHRQ x (NEGL <t> y)) 39863 for { 39864 _ = v.Args[1] 39865 x := v.Args[0] 39866 v_1 := v.Args[1] 39867 if v_1.Op != OpAMD64NEGL { 39868 break 39869 } 39870 t := v_1.Type 39871 v_1_0 := v_1.Args[0] 39872 if v_1_0.Op != OpAMD64ANDLconst { 39873 break 39874 } 39875 c := v_1_0.AuxInt 39876 y := v_1_0.Args[0] 39877 if !(c&63 == 63) { 39878 break 39879 } 39880 v.reset(OpAMD64SHRQ) 39881 v.AddArg(x) 39882 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39883 v0.AddArg(y) 39884 v.AddArg(v0) 39885 return true 39886 } 39887 return false 39888 } 39889 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 39890 // match: (SHRQconst x [0]) 39891 // cond: 39892 // result: x 39893 for { 39894 if v.AuxInt != 0 { 39895 break 39896 } 39897 x := v.Args[0] 39898 v.reset(OpCopy) 39899 v.Type = x.Type 39900 v.AddArg(x) 39901 return true 39902 } 39903 return false 39904 } 39905 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 39906 // match: (SHRW x (MOVQconst [c])) 39907 // cond: c&31 < 16 39908 // result: (SHRWconst [c&31] x) 39909 for { 39910 _ = v.Args[1] 39911 x := v.Args[0] 39912 v_1 := v.Args[1] 39913 if v_1.Op != OpAMD64MOVQconst { 39914 break 39915 } 39916 c := v_1.AuxInt 39917 if !(c&31 < 16) { 39918 break 39919 } 39920 v.reset(OpAMD64SHRWconst) 39921 v.AuxInt = c & 31 39922 v.AddArg(x) 39923 return true 39924 } 39925 // match: (SHRW x (MOVLconst [c])) 39926 // cond: c&31 < 16 39927 // result: (SHRWconst [c&31] x) 39928 for { 39929 _ = v.Args[1] 39930 x := v.Args[0] 39931 v_1 := v.Args[1] 39932 if v_1.Op != OpAMD64MOVLconst { 39933 break 39934 } 39935 c := v_1.AuxInt 39936 if !(c&31 < 16) { 39937 break 39938 } 39939 v.reset(OpAMD64SHRWconst) 39940 v.AuxInt = c & 31 39941 v.AddArg(x) 39942 return true 39943 } 39944 // match: (SHRW _ (MOVQconst [c])) 39945 // cond: c&31 >= 16 39946 // result: (MOVLconst [0]) 39947 for { 39948 _ = v.Args[1] 39949 v_1 := v.Args[1] 39950 if v_1.Op != OpAMD64MOVQconst { 39951 break 39952 } 39953 c := v_1.AuxInt 39954 if !(c&31 >= 16) { 39955 break 39956 } 39957 v.reset(OpAMD64MOVLconst) 39958 v.AuxInt = 0 39959 return true 39960 } 39961 // match: (SHRW _ (MOVLconst [c])) 39962 // cond: c&31 >= 16 39963 // result: (MOVLconst [0]) 39964 for { 39965 _ = v.Args[1] 39966 v_1 := v.Args[1] 39967 if v_1.Op != OpAMD64MOVLconst { 39968 break 39969 } 39970 c := v_1.AuxInt 39971 if !(c&31 >= 16) { 39972 break 39973 } 39974 v.reset(OpAMD64MOVLconst) 39975 v.AuxInt = 0 39976 return true 39977 } 39978 return false 39979 } 39980 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 39981 // match: (SHRWconst x [0]) 39982 // cond: 39983 // result: x 39984 for { 39985 if v.AuxInt != 0 { 39986 break 39987 } 39988 x := v.Args[0] 39989 v.reset(OpCopy) 39990 v.Type = x.Type 39991 v.AddArg(x) 39992 return true 39993 } 39994 return false 39995 } 39996 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 39997 b := v.Block 39998 _ = b 39999 // match: (SUBL x (MOVLconst [c])) 40000 // cond: 40001 // result: (SUBLconst x [c]) 40002 for { 40003 _ = v.Args[1] 40004 x := v.Args[0] 40005 v_1 := v.Args[1] 40006 if v_1.Op != OpAMD64MOVLconst { 40007 break 40008 } 40009 c := v_1.AuxInt 40010 v.reset(OpAMD64SUBLconst) 40011 v.AuxInt = c 40012 v.AddArg(x) 40013 return true 40014 } 40015 // match: (SUBL (MOVLconst [c]) x) 40016 // cond: 40017 // result: (NEGL (SUBLconst <v.Type> x [c])) 40018 for { 40019 _ = v.Args[1] 40020 v_0 := v.Args[0] 40021 if v_0.Op != OpAMD64MOVLconst { 40022 break 40023 } 40024 c := v_0.AuxInt 40025 x := v.Args[1] 40026 v.reset(OpAMD64NEGL) 40027 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 40028 v0.AuxInt = c 40029 v0.AddArg(x) 40030 v.AddArg(v0) 40031 return true 40032 } 40033 // match: (SUBL x x) 40034 // cond: 40035 // result: (MOVLconst [0]) 40036 for { 40037 _ = v.Args[1] 40038 x := v.Args[0] 40039 if x != v.Args[1] { 40040 break 40041 } 40042 v.reset(OpAMD64MOVLconst) 40043 v.AuxInt = 0 40044 return true 40045 } 40046 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 40047 // cond: canMergeLoad(v, l, x) && clobber(l) 40048 // result: (SUBLmem x [off] {sym} ptr mem) 40049 for { 40050 _ = v.Args[1] 40051 x := v.Args[0] 40052 l := v.Args[1] 40053 if l.Op != OpAMD64MOVLload { 40054 break 40055 } 40056 off := l.AuxInt 40057 sym := l.Aux 40058 _ = l.Args[1] 40059 ptr := l.Args[0] 40060 mem := l.Args[1] 40061 if !(canMergeLoad(v, l, x) && clobber(l)) { 40062 break 40063 } 40064 v.reset(OpAMD64SUBLmem) 40065 v.AuxInt = off 40066 v.Aux = sym 40067 v.AddArg(x) 40068 v.AddArg(ptr) 40069 v.AddArg(mem) 40070 return true 40071 } 40072 return false 40073 } 40074 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 40075 // match: (SUBLconst [c] x) 40076 // cond: int32(c) == 0 40077 // result: x 40078 for { 40079 c := v.AuxInt 40080 x := v.Args[0] 40081 if !(int32(c) == 0) { 40082 break 40083 } 40084 v.reset(OpCopy) 40085 v.Type = x.Type 40086 v.AddArg(x) 40087 return true 40088 } 40089 // match: (SUBLconst [c] x) 40090 // cond: 40091 // result: (ADDLconst [int64(int32(-c))] x) 40092 for { 40093 c := v.AuxInt 40094 x := v.Args[0] 40095 v.reset(OpAMD64ADDLconst) 40096 v.AuxInt = int64(int32(-c)) 40097 v.AddArg(x) 40098 return true 40099 } 40100 } 40101 func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool { 40102 b := v.Block 40103 _ = b 40104 typ := &b.Func.Config.Types 40105 _ = typ 40106 // match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 40107 // cond: 40108 // result: (SUBL x (MOVLf2i y)) 40109 for { 40110 off := v.AuxInt 40111 sym := v.Aux 40112 _ = v.Args[2] 40113 x := v.Args[0] 40114 ptr := v.Args[1] 40115 v_2 := v.Args[2] 40116 if v_2.Op != OpAMD64MOVSSstore { 40117 break 40118 } 40119 if v_2.AuxInt != off { 40120 break 40121 } 40122 if v_2.Aux != sym { 40123 break 40124 } 40125 _ = v_2.Args[2] 40126 if ptr != v_2.Args[0] { 40127 break 40128 } 40129 y := v_2.Args[1] 40130 v.reset(OpAMD64SUBL) 40131 v.AddArg(x) 40132 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 40133 v0.AddArg(y) 40134 v.AddArg(v0) 40135 return true 40136 } 40137 return false 40138 } 40139 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 40140 b := v.Block 40141 _ = b 40142 // match: (SUBQ x (MOVQconst [c])) 40143 // cond: is32Bit(c) 40144 // result: (SUBQconst x [c]) 40145 for { 40146 _ = v.Args[1] 40147 x := v.Args[0] 40148 v_1 := v.Args[1] 40149 if v_1.Op != OpAMD64MOVQconst { 40150 break 40151 } 40152 c := v_1.AuxInt 40153 if !(is32Bit(c)) { 40154 break 40155 } 40156 v.reset(OpAMD64SUBQconst) 40157 v.AuxInt = c 40158 v.AddArg(x) 40159 return true 40160 } 40161 // match: (SUBQ (MOVQconst [c]) x) 40162 // cond: is32Bit(c) 40163 // result: (NEGQ (SUBQconst <v.Type> x [c])) 40164 for { 40165 _ = v.Args[1] 40166 v_0 := v.Args[0] 40167 if v_0.Op != OpAMD64MOVQconst { 40168 break 40169 } 40170 c := v_0.AuxInt 40171 x := v.Args[1] 40172 if !(is32Bit(c)) { 40173 break 40174 } 40175 v.reset(OpAMD64NEGQ) 40176 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 40177 v0.AuxInt = c 40178 v0.AddArg(x) 40179 v.AddArg(v0) 40180 return true 40181 } 40182 // match: (SUBQ x x) 40183 // cond: 40184 // result: (MOVQconst [0]) 40185 for { 40186 _ = v.Args[1] 40187 x := v.Args[0] 40188 if x != v.Args[1] { 40189 break 40190 } 40191 v.reset(OpAMD64MOVQconst) 40192 v.AuxInt = 0 40193 return true 40194 } 40195 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 40196 // cond: canMergeLoad(v, l, x) && clobber(l) 40197 // result: (SUBQmem x [off] {sym} ptr mem) 40198 for { 40199 _ = v.Args[1] 40200 x := v.Args[0] 40201 l := v.Args[1] 40202 if l.Op != OpAMD64MOVQload { 40203 break 40204 } 40205 off := l.AuxInt 40206 sym := l.Aux 40207 _ = l.Args[1] 40208 ptr := l.Args[0] 40209 mem := l.Args[1] 40210 if !(canMergeLoad(v, l, x) && clobber(l)) { 40211 break 40212 } 40213 v.reset(OpAMD64SUBQmem) 40214 v.AuxInt = off 40215 v.Aux = sym 40216 v.AddArg(x) 40217 v.AddArg(ptr) 40218 v.AddArg(mem) 40219 return true 40220 } 40221 return false 40222 } 40223 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 40224 // match: (SUBQconst [0] x) 40225 // cond: 40226 // result: x 40227 for { 40228 if v.AuxInt != 0 { 40229 break 40230 } 40231 x := v.Args[0] 40232 v.reset(OpCopy) 40233 v.Type = x.Type 40234 v.AddArg(x) 40235 return true 40236 } 40237 // match: (SUBQconst [c] x) 40238 // cond: c != -(1<<31) 40239 // result: (ADDQconst [-c] x) 40240 for { 40241 c := v.AuxInt 40242 x := v.Args[0] 40243 if !(c != -(1 << 31)) { 40244 break 40245 } 40246 v.reset(OpAMD64ADDQconst) 40247 v.AuxInt = -c 40248 v.AddArg(x) 40249 return true 40250 } 40251 // match: (SUBQconst (MOVQconst [d]) [c]) 40252 // cond: 40253 // result: (MOVQconst [d-c]) 40254 for { 40255 c := v.AuxInt 40256 v_0 := v.Args[0] 40257 if v_0.Op != OpAMD64MOVQconst { 40258 break 40259 } 40260 d := v_0.AuxInt 40261 v.reset(OpAMD64MOVQconst) 40262 v.AuxInt = d - c 40263 return true 40264 } 40265 // match: (SUBQconst (SUBQconst x [d]) [c]) 40266 // cond: is32Bit(-c-d) 40267 // result: (ADDQconst [-c-d] x) 40268 for { 40269 c := v.AuxInt 40270 v_0 := v.Args[0] 40271 if v_0.Op != OpAMD64SUBQconst { 40272 break 40273 } 40274 d := v_0.AuxInt 40275 x := v_0.Args[0] 40276 if !(is32Bit(-c - d)) { 40277 break 40278 } 40279 v.reset(OpAMD64ADDQconst) 40280 v.AuxInt = -c - d 40281 v.AddArg(x) 40282 return true 40283 } 40284 return false 40285 } 40286 func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool { 40287 b := v.Block 40288 _ = b 40289 typ := &b.Func.Config.Types 40290 _ = typ 40291 // match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 40292 // cond: 40293 // result: (SUBQ x (MOVQf2i y)) 40294 for { 40295 off := v.AuxInt 40296 sym := v.Aux 40297 _ = v.Args[2] 40298 x := v.Args[0] 40299 ptr := v.Args[1] 40300 v_2 := v.Args[2] 40301 if v_2.Op != OpAMD64MOVSDstore { 40302 break 40303 } 40304 if v_2.AuxInt != off { 40305 break 40306 } 40307 if v_2.Aux != sym { 40308 break 40309 } 40310 _ = v_2.Args[2] 40311 if ptr != v_2.Args[0] { 40312 break 40313 } 40314 y := v_2.Args[1] 40315 v.reset(OpAMD64SUBQ) 40316 v.AddArg(x) 40317 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 40318 v0.AddArg(y) 40319 v.AddArg(v0) 40320 return true 40321 } 40322 return false 40323 } 40324 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 40325 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 40326 // cond: canMergeLoad(v, l, x) && clobber(l) 40327 // result: (SUBSDmem x [off] {sym} ptr mem) 40328 for { 40329 _ = v.Args[1] 40330 x := v.Args[0] 40331 l := v.Args[1] 40332 if l.Op != OpAMD64MOVSDload { 40333 break 40334 } 40335 off := l.AuxInt 40336 sym := l.Aux 40337 _ = l.Args[1] 40338 ptr := l.Args[0] 40339 mem := l.Args[1] 40340 if !(canMergeLoad(v, l, x) && clobber(l)) { 40341 break 40342 } 40343 v.reset(OpAMD64SUBSDmem) 40344 v.AuxInt = off 40345 v.Aux = sym 40346 v.AddArg(x) 40347 v.AddArg(ptr) 40348 v.AddArg(mem) 40349 return true 40350 } 40351 return false 40352 } 40353 func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool { 40354 b := v.Block 40355 _ = b 40356 typ := &b.Func.Config.Types 40357 _ = typ 40358 // match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 40359 // cond: 40360 // result: (SUBSD x (MOVQi2f y)) 40361 for { 40362 off := v.AuxInt 40363 sym := v.Aux 40364 _ = v.Args[2] 40365 x := v.Args[0] 40366 ptr := v.Args[1] 40367 v_2 := v.Args[2] 40368 if v_2.Op != OpAMD64MOVQstore { 40369 break 40370 } 40371 if v_2.AuxInt != off { 40372 break 40373 } 40374 if v_2.Aux != sym { 40375 break 40376 } 40377 _ = v_2.Args[2] 40378 if ptr != v_2.Args[0] { 40379 break 40380 } 40381 y := v_2.Args[1] 40382 v.reset(OpAMD64SUBSD) 40383 v.AddArg(x) 40384 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 40385 v0.AddArg(y) 40386 v.AddArg(v0) 40387 return true 40388 } 40389 return false 40390 } 40391 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 40392 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 40393 // cond: canMergeLoad(v, l, x) && clobber(l) 40394 // result: (SUBSSmem x [off] {sym} ptr mem) 40395 for { 40396 _ = v.Args[1] 40397 x := v.Args[0] 40398 l := v.Args[1] 40399 if l.Op != OpAMD64MOVSSload { 40400 break 40401 } 40402 off := l.AuxInt 40403 sym := l.Aux 40404 _ = l.Args[1] 40405 ptr := l.Args[0] 40406 mem := l.Args[1] 40407 if !(canMergeLoad(v, l, x) && clobber(l)) { 40408 break 40409 } 40410 v.reset(OpAMD64SUBSSmem) 40411 v.AuxInt = off 40412 v.Aux = sym 40413 v.AddArg(x) 40414 v.AddArg(ptr) 40415 v.AddArg(mem) 40416 return true 40417 } 40418 return false 40419 } 40420 func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool { 40421 b := v.Block 40422 _ = b 40423 typ := &b.Func.Config.Types 40424 _ = typ 40425 // match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 40426 // cond: 40427 // result: (SUBSS x (MOVLi2f y)) 40428 for { 40429 off := v.AuxInt 40430 sym := v.Aux 40431 _ = v.Args[2] 40432 x := v.Args[0] 40433 ptr := v.Args[1] 40434 v_2 := v.Args[2] 40435 if v_2.Op != OpAMD64MOVLstore { 40436 break 40437 } 40438 if v_2.AuxInt != off { 40439 break 40440 } 40441 if v_2.Aux != sym { 40442 break 40443 } 40444 _ = v_2.Args[2] 40445 if ptr != v_2.Args[0] { 40446 break 40447 } 40448 y := v_2.Args[1] 40449 v.reset(OpAMD64SUBSS) 40450 v.AddArg(x) 40451 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 40452 v0.AddArg(y) 40453 v.AddArg(v0) 40454 return true 40455 } 40456 return false 40457 } 40458 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 40459 // match: (TESTB (MOVLconst [c]) x) 40460 // cond: 40461 // result: (TESTBconst [c] x) 40462 for { 40463 _ = v.Args[1] 40464 v_0 := v.Args[0] 40465 if v_0.Op != OpAMD64MOVLconst { 40466 break 40467 } 40468 c := v_0.AuxInt 40469 x := v.Args[1] 40470 v.reset(OpAMD64TESTBconst) 40471 v.AuxInt = c 40472 v.AddArg(x) 40473 return true 40474 } 40475 // match: (TESTB x (MOVLconst [c])) 40476 // cond: 40477 // result: (TESTBconst [c] x) 40478 for { 40479 _ = v.Args[1] 40480 x := v.Args[0] 40481 v_1 := v.Args[1] 40482 if v_1.Op != OpAMD64MOVLconst { 40483 break 40484 } 40485 c := v_1.AuxInt 40486 v.reset(OpAMD64TESTBconst) 40487 v.AuxInt = c 40488 v.AddArg(x) 40489 return true 40490 } 40491 return false 40492 } 40493 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 40494 // match: (TESTL (MOVLconst [c]) x) 40495 // cond: 40496 // result: (TESTLconst [c] x) 40497 for { 40498 _ = v.Args[1] 40499 v_0 := v.Args[0] 40500 if v_0.Op != OpAMD64MOVLconst { 40501 break 40502 } 40503 c := v_0.AuxInt 40504 x := v.Args[1] 40505 v.reset(OpAMD64TESTLconst) 40506 v.AuxInt = c 40507 v.AddArg(x) 40508 return true 40509 } 40510 // match: (TESTL x (MOVLconst [c])) 40511 // cond: 40512 // result: (TESTLconst [c] x) 40513 for { 40514 _ = v.Args[1] 40515 x := v.Args[0] 40516 v_1 := v.Args[1] 40517 if v_1.Op != OpAMD64MOVLconst { 40518 break 40519 } 40520 c := v_1.AuxInt 40521 v.reset(OpAMD64TESTLconst) 40522 v.AuxInt = c 40523 v.AddArg(x) 40524 return true 40525 } 40526 return false 40527 } 40528 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 40529 // match: (TESTQ (MOVQconst [c]) x) 40530 // cond: is32Bit(c) 40531 // result: (TESTQconst [c] x) 40532 for { 40533 _ = v.Args[1] 40534 v_0 := v.Args[0] 40535 if v_0.Op != OpAMD64MOVQconst { 40536 break 40537 } 40538 c := v_0.AuxInt 40539 x := v.Args[1] 40540 if !(is32Bit(c)) { 40541 break 40542 } 40543 v.reset(OpAMD64TESTQconst) 40544 v.AuxInt = c 40545 v.AddArg(x) 40546 return true 40547 } 40548 // match: (TESTQ x (MOVQconst [c])) 40549 // cond: is32Bit(c) 40550 // result: (TESTQconst [c] x) 40551 for { 40552 _ = v.Args[1] 40553 x := v.Args[0] 40554 v_1 := v.Args[1] 40555 if v_1.Op != OpAMD64MOVQconst { 40556 break 40557 } 40558 c := v_1.AuxInt 40559 if !(is32Bit(c)) { 40560 break 40561 } 40562 v.reset(OpAMD64TESTQconst) 40563 v.AuxInt = c 40564 v.AddArg(x) 40565 return true 40566 } 40567 return false 40568 } 40569 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 40570 // match: (TESTW (MOVLconst [c]) x) 40571 // cond: 40572 // result: (TESTWconst [c] x) 40573 for { 40574 _ = v.Args[1] 40575 v_0 := v.Args[0] 40576 if v_0.Op != OpAMD64MOVLconst { 40577 break 40578 } 40579 c := v_0.AuxInt 40580 x := v.Args[1] 40581 v.reset(OpAMD64TESTWconst) 40582 v.AuxInt = c 40583 v.AddArg(x) 40584 return true 40585 } 40586 // match: (TESTW x (MOVLconst [c])) 40587 // cond: 40588 // result: (TESTWconst [c] x) 40589 for { 40590 _ = v.Args[1] 40591 x := v.Args[0] 40592 v_1 := v.Args[1] 40593 if v_1.Op != OpAMD64MOVLconst { 40594 break 40595 } 40596 c := v_1.AuxInt 40597 v.reset(OpAMD64TESTWconst) 40598 v.AuxInt = c 40599 v.AddArg(x) 40600 return true 40601 } 40602 return false 40603 } 40604 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 40605 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 40606 // cond: is32Bit(off1+off2) 40607 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 40608 for { 40609 off1 := v.AuxInt 40610 sym := v.Aux 40611 _ = v.Args[2] 40612 val := v.Args[0] 40613 v_1 := v.Args[1] 40614 if v_1.Op != OpAMD64ADDQconst { 40615 break 40616 } 40617 off2 := v_1.AuxInt 40618 ptr := v_1.Args[0] 40619 mem := v.Args[2] 40620 if !(is32Bit(off1 + off2)) { 40621 break 40622 } 40623 v.reset(OpAMD64XADDLlock) 40624 v.AuxInt = off1 + off2 40625 v.Aux = sym 40626 v.AddArg(val) 40627 v.AddArg(ptr) 40628 v.AddArg(mem) 40629 return true 40630 } 40631 return false 40632 } 40633 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 40634 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 40635 // cond: is32Bit(off1+off2) 40636 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 40637 for { 40638 off1 := v.AuxInt 40639 sym := v.Aux 40640 _ = v.Args[2] 40641 val := v.Args[0] 40642 v_1 := v.Args[1] 40643 if v_1.Op != OpAMD64ADDQconst { 40644 break 40645 } 40646 off2 := v_1.AuxInt 40647 ptr := v_1.Args[0] 40648 mem := v.Args[2] 40649 if !(is32Bit(off1 + off2)) { 40650 break 40651 } 40652 v.reset(OpAMD64XADDQlock) 40653 v.AuxInt = off1 + off2 40654 v.Aux = sym 40655 v.AddArg(val) 40656 v.AddArg(ptr) 40657 v.AddArg(mem) 40658 return true 40659 } 40660 return false 40661 } 40662 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 40663 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 40664 // cond: is32Bit(off1+off2) 40665 // result: (XCHGL [off1+off2] {sym} val ptr mem) 40666 for { 40667 off1 := v.AuxInt 40668 sym := v.Aux 40669 _ = v.Args[2] 40670 val := v.Args[0] 40671 v_1 := v.Args[1] 40672 if v_1.Op != OpAMD64ADDQconst { 40673 break 40674 } 40675 off2 := v_1.AuxInt 40676 ptr := v_1.Args[0] 40677 mem := v.Args[2] 40678 if !(is32Bit(off1 + off2)) { 40679 break 40680 } 40681 v.reset(OpAMD64XCHGL) 40682 v.AuxInt = off1 + off2 40683 v.Aux = sym 40684 v.AddArg(val) 40685 v.AddArg(ptr) 40686 v.AddArg(mem) 40687 return true 40688 } 40689 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 40690 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 40691 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 40692 for { 40693 off1 := v.AuxInt 40694 sym1 := v.Aux 40695 _ = v.Args[2] 40696 val := v.Args[0] 40697 v_1 := v.Args[1] 40698 if v_1.Op != OpAMD64LEAQ { 40699 break 40700 } 40701 off2 := v_1.AuxInt 40702 sym2 := v_1.Aux 40703 ptr := v_1.Args[0] 40704 mem := v.Args[2] 40705 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 40706 break 40707 } 40708 v.reset(OpAMD64XCHGL) 40709 v.AuxInt = off1 + off2 40710 v.Aux = mergeSym(sym1, sym2) 40711 v.AddArg(val) 40712 v.AddArg(ptr) 40713 v.AddArg(mem) 40714 return true 40715 } 40716 return false 40717 } 40718 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 40719 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 40720 // cond: is32Bit(off1+off2) 40721 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 40722 for { 40723 off1 := v.AuxInt 40724 sym := v.Aux 40725 _ = v.Args[2] 40726 val := v.Args[0] 40727 v_1 := v.Args[1] 40728 if v_1.Op != OpAMD64ADDQconst { 40729 break 40730 } 40731 off2 := v_1.AuxInt 40732 ptr := v_1.Args[0] 40733 mem := v.Args[2] 40734 if !(is32Bit(off1 + off2)) { 40735 break 40736 } 40737 v.reset(OpAMD64XCHGQ) 40738 v.AuxInt = off1 + off2 40739 v.Aux = sym 40740 v.AddArg(val) 40741 v.AddArg(ptr) 40742 v.AddArg(mem) 40743 return true 40744 } 40745 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 40746 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 40747 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 40748 for { 40749 off1 := v.AuxInt 40750 sym1 := v.Aux 40751 _ = v.Args[2] 40752 val := v.Args[0] 40753 v_1 := v.Args[1] 40754 if v_1.Op != OpAMD64LEAQ { 40755 break 40756 } 40757 off2 := v_1.AuxInt 40758 sym2 := v_1.Aux 40759 ptr := v_1.Args[0] 40760 mem := v.Args[2] 40761 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 40762 break 40763 } 40764 v.reset(OpAMD64XCHGQ) 40765 v.AuxInt = off1 + off2 40766 v.Aux = mergeSym(sym1, sym2) 40767 v.AddArg(val) 40768 v.AddArg(ptr) 40769 v.AddArg(mem) 40770 return true 40771 } 40772 return false 40773 } 40774 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 40775 // match: (XORL x (MOVLconst [c])) 40776 // cond: 40777 // result: (XORLconst [c] x) 40778 for { 40779 _ = v.Args[1] 40780 x := v.Args[0] 40781 v_1 := v.Args[1] 40782 if v_1.Op != OpAMD64MOVLconst { 40783 break 40784 } 40785 c := v_1.AuxInt 40786 v.reset(OpAMD64XORLconst) 40787 v.AuxInt = c 40788 v.AddArg(x) 40789 return true 40790 } 40791 // match: (XORL (MOVLconst [c]) x) 40792 // cond: 40793 // result: (XORLconst [c] x) 40794 for { 40795 _ = v.Args[1] 40796 v_0 := v.Args[0] 40797 if v_0.Op != OpAMD64MOVLconst { 40798 break 40799 } 40800 c := v_0.AuxInt 40801 x := v.Args[1] 40802 v.reset(OpAMD64XORLconst) 40803 v.AuxInt = c 40804 v.AddArg(x) 40805 return true 40806 } 40807 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 40808 // cond: d==32-c 40809 // result: (ROLLconst x [c]) 40810 for { 40811 _ = v.Args[1] 40812 v_0 := v.Args[0] 40813 if v_0.Op != OpAMD64SHLLconst { 40814 break 40815 } 40816 c := v_0.AuxInt 40817 x := v_0.Args[0] 40818 v_1 := v.Args[1] 40819 if v_1.Op != OpAMD64SHRLconst { 40820 break 40821 } 40822 d := v_1.AuxInt 40823 if x != v_1.Args[0] { 40824 break 40825 } 40826 if !(d == 32-c) { 40827 break 40828 } 40829 v.reset(OpAMD64ROLLconst) 40830 v.AuxInt = c 40831 v.AddArg(x) 40832 return true 40833 } 40834 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 40835 // cond: d==32-c 40836 // result: (ROLLconst x [c]) 40837 for { 40838 _ = v.Args[1] 40839 v_0 := v.Args[0] 40840 if v_0.Op != OpAMD64SHRLconst { 40841 break 40842 } 40843 d := v_0.AuxInt 40844 x := v_0.Args[0] 40845 v_1 := v.Args[1] 40846 if v_1.Op != OpAMD64SHLLconst { 40847 break 40848 } 40849 c := v_1.AuxInt 40850 if x != v_1.Args[0] { 40851 break 40852 } 40853 if !(d == 32-c) { 40854 break 40855 } 40856 v.reset(OpAMD64ROLLconst) 40857 v.AuxInt = c 40858 v.AddArg(x) 40859 return true 40860 } 40861 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 40862 // cond: d==16-c && c < 16 && t.Size() == 2 40863 // result: (ROLWconst x [c]) 40864 for { 40865 t := v.Type 40866 _ = v.Args[1] 40867 v_0 := v.Args[0] 40868 if v_0.Op != OpAMD64SHLLconst { 40869 break 40870 } 40871 c := v_0.AuxInt 40872 x := v_0.Args[0] 40873 v_1 := v.Args[1] 40874 if v_1.Op != OpAMD64SHRWconst { 40875 break 40876 } 40877 d := v_1.AuxInt 40878 if x != v_1.Args[0] { 40879 break 40880 } 40881 if !(d == 16-c && c < 16 && t.Size() == 2) { 40882 break 40883 } 40884 v.reset(OpAMD64ROLWconst) 40885 v.AuxInt = c 40886 v.AddArg(x) 40887 return true 40888 } 40889 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 40890 // cond: d==16-c && c < 16 && t.Size() == 2 40891 // result: (ROLWconst x [c]) 40892 for { 40893 t := v.Type 40894 _ = v.Args[1] 40895 v_0 := v.Args[0] 40896 if v_0.Op != OpAMD64SHRWconst { 40897 break 40898 } 40899 d := v_0.AuxInt 40900 x := v_0.Args[0] 40901 v_1 := v.Args[1] 40902 if v_1.Op != OpAMD64SHLLconst { 40903 break 40904 } 40905 c := v_1.AuxInt 40906 if x != v_1.Args[0] { 40907 break 40908 } 40909 if !(d == 16-c && c < 16 && t.Size() == 2) { 40910 break 40911 } 40912 v.reset(OpAMD64ROLWconst) 40913 v.AuxInt = c 40914 v.AddArg(x) 40915 return true 40916 } 40917 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 40918 // cond: d==8-c && c < 8 && t.Size() == 1 40919 // result: (ROLBconst x [c]) 40920 for { 40921 t := v.Type 40922 _ = v.Args[1] 40923 v_0 := v.Args[0] 40924 if v_0.Op != OpAMD64SHLLconst { 40925 break 40926 } 40927 c := v_0.AuxInt 40928 x := v_0.Args[0] 40929 v_1 := v.Args[1] 40930 if v_1.Op != OpAMD64SHRBconst { 40931 break 40932 } 40933 d := v_1.AuxInt 40934 if x != v_1.Args[0] { 40935 break 40936 } 40937 if !(d == 8-c && c < 8 && t.Size() == 1) { 40938 break 40939 } 40940 v.reset(OpAMD64ROLBconst) 40941 v.AuxInt = c 40942 v.AddArg(x) 40943 return true 40944 } 40945 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 40946 // cond: d==8-c && c < 8 && t.Size() == 1 40947 // result: (ROLBconst x [c]) 40948 for { 40949 t := v.Type 40950 _ = v.Args[1] 40951 v_0 := v.Args[0] 40952 if v_0.Op != OpAMD64SHRBconst { 40953 break 40954 } 40955 d := v_0.AuxInt 40956 x := v_0.Args[0] 40957 v_1 := v.Args[1] 40958 if v_1.Op != OpAMD64SHLLconst { 40959 break 40960 } 40961 c := v_1.AuxInt 40962 if x != v_1.Args[0] { 40963 break 40964 } 40965 if !(d == 8-c && c < 8 && t.Size() == 1) { 40966 break 40967 } 40968 v.reset(OpAMD64ROLBconst) 40969 v.AuxInt = c 40970 v.AddArg(x) 40971 return true 40972 } 40973 // match: (XORL x x) 40974 // cond: 40975 // result: (MOVLconst [0]) 40976 for { 40977 _ = v.Args[1] 40978 x := v.Args[0] 40979 if x != v.Args[1] { 40980 break 40981 } 40982 v.reset(OpAMD64MOVLconst) 40983 v.AuxInt = 0 40984 return true 40985 } 40986 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 40987 // cond: canMergeLoad(v, l, x) && clobber(l) 40988 // result: (XORLmem x [off] {sym} ptr mem) 40989 for { 40990 _ = v.Args[1] 40991 x := v.Args[0] 40992 l := v.Args[1] 40993 if l.Op != OpAMD64MOVLload { 40994 break 40995 } 40996 off := l.AuxInt 40997 sym := l.Aux 40998 _ = l.Args[1] 40999 ptr := l.Args[0] 41000 mem := l.Args[1] 41001 if !(canMergeLoad(v, l, x) && clobber(l)) { 41002 break 41003 } 41004 v.reset(OpAMD64XORLmem) 41005 v.AuxInt = off 41006 v.Aux = sym 41007 v.AddArg(x) 41008 v.AddArg(ptr) 41009 v.AddArg(mem) 41010 return true 41011 } 41012 return false 41013 } 41014 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 41015 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 41016 // cond: canMergeLoad(v, l, x) && clobber(l) 41017 // result: (XORLmem x [off] {sym} ptr mem) 41018 for { 41019 _ = v.Args[1] 41020 l := v.Args[0] 41021 if l.Op != OpAMD64MOVLload { 41022 break 41023 } 41024 off := l.AuxInt 41025 sym := l.Aux 41026 _ = l.Args[1] 41027 ptr := l.Args[0] 41028 mem := l.Args[1] 41029 x := v.Args[1] 41030 if !(canMergeLoad(v, l, x) && clobber(l)) { 41031 break 41032 } 41033 v.reset(OpAMD64XORLmem) 41034 v.AuxInt = off 41035 v.Aux = sym 41036 v.AddArg(x) 41037 v.AddArg(ptr) 41038 v.AddArg(mem) 41039 return true 41040 } 41041 return false 41042 } 41043 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 41044 // match: (XORLconst [1] (SETNE x)) 41045 // cond: 41046 // result: (SETEQ x) 41047 for { 41048 if v.AuxInt != 1 { 41049 break 41050 } 41051 v_0 := v.Args[0] 41052 if v_0.Op != OpAMD64SETNE { 41053 break 41054 } 41055 x := v_0.Args[0] 41056 v.reset(OpAMD64SETEQ) 41057 v.AddArg(x) 41058 return true 41059 } 41060 // match: (XORLconst [1] (SETEQ x)) 41061 // cond: 41062 // result: (SETNE x) 41063 for { 41064 if v.AuxInt != 1 { 41065 break 41066 } 41067 v_0 := v.Args[0] 41068 if v_0.Op != OpAMD64SETEQ { 41069 break 41070 } 41071 x := v_0.Args[0] 41072 v.reset(OpAMD64SETNE) 41073 v.AddArg(x) 41074 return true 41075 } 41076 // match: (XORLconst [1] (SETL x)) 41077 // cond: 41078 // result: (SETGE x) 41079 for { 41080 if v.AuxInt != 1 { 41081 break 41082 } 41083 v_0 := v.Args[0] 41084 if v_0.Op != OpAMD64SETL { 41085 break 41086 } 41087 x := v_0.Args[0] 41088 v.reset(OpAMD64SETGE) 41089 v.AddArg(x) 41090 return true 41091 } 41092 // match: (XORLconst [1] (SETGE x)) 41093 // cond: 41094 // result: (SETL x) 41095 for { 41096 if v.AuxInt != 1 { 41097 break 41098 } 41099 v_0 := v.Args[0] 41100 if v_0.Op != OpAMD64SETGE { 41101 break 41102 } 41103 x := v_0.Args[0] 41104 v.reset(OpAMD64SETL) 41105 v.AddArg(x) 41106 return true 41107 } 41108 // match: (XORLconst [1] (SETLE x)) 41109 // cond: 41110 // result: (SETG x) 41111 for { 41112 if v.AuxInt != 1 { 41113 break 41114 } 41115 v_0 := v.Args[0] 41116 if v_0.Op != OpAMD64SETLE { 41117 break 41118 } 41119 x := v_0.Args[0] 41120 v.reset(OpAMD64SETG) 41121 v.AddArg(x) 41122 return true 41123 } 41124 // match: (XORLconst [1] (SETG x)) 41125 // cond: 41126 // result: (SETLE x) 41127 for { 41128 if v.AuxInt != 1 { 41129 break 41130 } 41131 v_0 := v.Args[0] 41132 if v_0.Op != OpAMD64SETG { 41133 break 41134 } 41135 x := v_0.Args[0] 41136 v.reset(OpAMD64SETLE) 41137 v.AddArg(x) 41138 return true 41139 } 41140 // match: (XORLconst [1] (SETB x)) 41141 // cond: 41142 // result: (SETAE x) 41143 for { 41144 if v.AuxInt != 1 { 41145 break 41146 } 41147 v_0 := v.Args[0] 41148 if v_0.Op != OpAMD64SETB { 41149 break 41150 } 41151 x := v_0.Args[0] 41152 v.reset(OpAMD64SETAE) 41153 v.AddArg(x) 41154 return true 41155 } 41156 // match: (XORLconst [1] (SETAE x)) 41157 // cond: 41158 // result: (SETB x) 41159 for { 41160 if v.AuxInt != 1 { 41161 break 41162 } 41163 v_0 := v.Args[0] 41164 if v_0.Op != OpAMD64SETAE { 41165 break 41166 } 41167 x := v_0.Args[0] 41168 v.reset(OpAMD64SETB) 41169 v.AddArg(x) 41170 return true 41171 } 41172 // match: (XORLconst [1] (SETBE x)) 41173 // cond: 41174 // result: (SETA x) 41175 for { 41176 if v.AuxInt != 1 { 41177 break 41178 } 41179 v_0 := v.Args[0] 41180 if v_0.Op != OpAMD64SETBE { 41181 break 41182 } 41183 x := v_0.Args[0] 41184 v.reset(OpAMD64SETA) 41185 v.AddArg(x) 41186 return true 41187 } 41188 // match: (XORLconst [1] (SETA x)) 41189 // cond: 41190 // result: (SETBE x) 41191 for { 41192 if v.AuxInt != 1 { 41193 break 41194 } 41195 v_0 := v.Args[0] 41196 if v_0.Op != OpAMD64SETA { 41197 break 41198 } 41199 x := v_0.Args[0] 41200 v.reset(OpAMD64SETBE) 41201 v.AddArg(x) 41202 return true 41203 } 41204 return false 41205 } 41206 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { 41207 // match: (XORLconst [c] (XORLconst [d] x)) 41208 // cond: 41209 // result: (XORLconst [c ^ d] x) 41210 for { 41211 c := v.AuxInt 41212 v_0 := v.Args[0] 41213 if v_0.Op != OpAMD64XORLconst { 41214 break 41215 } 41216 d := v_0.AuxInt 41217 x := v_0.Args[0] 41218 v.reset(OpAMD64XORLconst) 41219 v.AuxInt = c ^ d 41220 v.AddArg(x) 41221 return true 41222 } 41223 // match: (XORLconst [c] x) 41224 // cond: int32(c)==0 41225 // result: x 41226 for { 41227 c := v.AuxInt 41228 x := v.Args[0] 41229 if !(int32(c) == 0) { 41230 break 41231 } 41232 v.reset(OpCopy) 41233 v.Type = x.Type 41234 v.AddArg(x) 41235 return true 41236 } 41237 // match: (XORLconst [c] (MOVLconst [d])) 41238 // cond: 41239 // result: (MOVLconst [c^d]) 41240 for { 41241 c := v.AuxInt 41242 v_0 := v.Args[0] 41243 if v_0.Op != OpAMD64MOVLconst { 41244 break 41245 } 41246 d := v_0.AuxInt 41247 v.reset(OpAMD64MOVLconst) 41248 v.AuxInt = c ^ d 41249 return true 41250 } 41251 return false 41252 } 41253 func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool { 41254 b := v.Block 41255 _ = b 41256 typ := &b.Func.Config.Types 41257 _ = typ 41258 // match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 41259 // cond: 41260 // result: (XORL x (MOVLf2i y)) 41261 for { 41262 off := v.AuxInt 41263 sym := v.Aux 41264 _ = v.Args[2] 41265 x := v.Args[0] 41266 ptr := v.Args[1] 41267 v_2 := v.Args[2] 41268 if v_2.Op != OpAMD64MOVSSstore { 41269 break 41270 } 41271 if v_2.AuxInt != off { 41272 break 41273 } 41274 if v_2.Aux != sym { 41275 break 41276 } 41277 _ = v_2.Args[2] 41278 if ptr != v_2.Args[0] { 41279 break 41280 } 41281 y := v_2.Args[1] 41282 v.reset(OpAMD64XORL) 41283 v.AddArg(x) 41284 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 41285 v0.AddArg(y) 41286 v.AddArg(v0) 41287 return true 41288 } 41289 return false 41290 } 41291 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 41292 // match: (XORQ x (MOVQconst [c])) 41293 // cond: is32Bit(c) 41294 // result: (XORQconst [c] x) 41295 for { 41296 _ = v.Args[1] 41297 x := v.Args[0] 41298 v_1 := v.Args[1] 41299 if v_1.Op != OpAMD64MOVQconst { 41300 break 41301 } 41302 c := v_1.AuxInt 41303 if !(is32Bit(c)) { 41304 break 41305 } 41306 v.reset(OpAMD64XORQconst) 41307 v.AuxInt = c 41308 v.AddArg(x) 41309 return true 41310 } 41311 // match: (XORQ (MOVQconst [c]) x) 41312 // cond: is32Bit(c) 41313 // result: (XORQconst [c] x) 41314 for { 41315 _ = v.Args[1] 41316 v_0 := v.Args[0] 41317 if v_0.Op != OpAMD64MOVQconst { 41318 break 41319 } 41320 c := v_0.AuxInt 41321 x := v.Args[1] 41322 if !(is32Bit(c)) { 41323 break 41324 } 41325 v.reset(OpAMD64XORQconst) 41326 v.AuxInt = c 41327 v.AddArg(x) 41328 return true 41329 } 41330 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 41331 // cond: d==64-c 41332 // result: (ROLQconst x [c]) 41333 for { 41334 _ = v.Args[1] 41335 v_0 := v.Args[0] 41336 if v_0.Op != OpAMD64SHLQconst { 41337 break 41338 } 41339 c := v_0.AuxInt 41340 x := v_0.Args[0] 41341 v_1 := v.Args[1] 41342 if v_1.Op != OpAMD64SHRQconst { 41343 break 41344 } 41345 d := v_1.AuxInt 41346 if x != v_1.Args[0] { 41347 break 41348 } 41349 if !(d == 64-c) { 41350 break 41351 } 41352 v.reset(OpAMD64ROLQconst) 41353 v.AuxInt = c 41354 v.AddArg(x) 41355 return true 41356 } 41357 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 41358 // cond: d==64-c 41359 // result: (ROLQconst x [c]) 41360 for { 41361 _ = v.Args[1] 41362 v_0 := v.Args[0] 41363 if v_0.Op != OpAMD64SHRQconst { 41364 break 41365 } 41366 d := v_0.AuxInt 41367 x := v_0.Args[0] 41368 v_1 := v.Args[1] 41369 if v_1.Op != OpAMD64SHLQconst { 41370 break 41371 } 41372 c := v_1.AuxInt 41373 if x != v_1.Args[0] { 41374 break 41375 } 41376 if !(d == 64-c) { 41377 break 41378 } 41379 v.reset(OpAMD64ROLQconst) 41380 v.AuxInt = c 41381 v.AddArg(x) 41382 return true 41383 } 41384 // match: (XORQ x x) 41385 // cond: 41386 // result: (MOVQconst [0]) 41387 for { 41388 _ = v.Args[1] 41389 x := v.Args[0] 41390 if x != v.Args[1] { 41391 break 41392 } 41393 v.reset(OpAMD64MOVQconst) 41394 v.AuxInt = 0 41395 return true 41396 } 41397 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 41398 // cond: canMergeLoad(v, l, x) && clobber(l) 41399 // result: (XORQmem x [off] {sym} ptr mem) 41400 for { 41401 _ = v.Args[1] 41402 x := v.Args[0] 41403 l := v.Args[1] 41404 if l.Op != OpAMD64MOVQload { 41405 break 41406 } 41407 off := l.AuxInt 41408 sym := l.Aux 41409 _ = l.Args[1] 41410 ptr := l.Args[0] 41411 mem := l.Args[1] 41412 if !(canMergeLoad(v, l, x) && clobber(l)) { 41413 break 41414 } 41415 v.reset(OpAMD64XORQmem) 41416 v.AuxInt = off 41417 v.Aux = sym 41418 v.AddArg(x) 41419 v.AddArg(ptr) 41420 v.AddArg(mem) 41421 return true 41422 } 41423 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 41424 // cond: canMergeLoad(v, l, x) && clobber(l) 41425 // result: (XORQmem x [off] {sym} ptr mem) 41426 for { 41427 _ = v.Args[1] 41428 l := v.Args[0] 41429 if l.Op != OpAMD64MOVQload { 41430 break 41431 } 41432 off := l.AuxInt 41433 sym := l.Aux 41434 _ = l.Args[1] 41435 ptr := l.Args[0] 41436 mem := l.Args[1] 41437 x := v.Args[1] 41438 if !(canMergeLoad(v, l, x) && clobber(l)) { 41439 break 41440 } 41441 v.reset(OpAMD64XORQmem) 41442 v.AuxInt = off 41443 v.Aux = sym 41444 v.AddArg(x) 41445 v.AddArg(ptr) 41446 v.AddArg(mem) 41447 return true 41448 } 41449 return false 41450 } 41451 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 41452 // match: (XORQconst [c] (XORQconst [d] x)) 41453 // cond: 41454 // result: (XORQconst [c ^ d] x) 41455 for { 41456 c := v.AuxInt 41457 v_0 := v.Args[0] 41458 if v_0.Op != OpAMD64XORQconst { 41459 break 41460 } 41461 d := v_0.AuxInt 41462 x := v_0.Args[0] 41463 v.reset(OpAMD64XORQconst) 41464 v.AuxInt = c ^ d 41465 v.AddArg(x) 41466 return true 41467 } 41468 // match: (XORQconst [0] x) 41469 // cond: 41470 // result: x 41471 for { 41472 if v.AuxInt != 0 { 41473 break 41474 } 41475 x := v.Args[0] 41476 v.reset(OpCopy) 41477 v.Type = x.Type 41478 v.AddArg(x) 41479 return true 41480 } 41481 // match: (XORQconst [c] (MOVQconst [d])) 41482 // cond: 41483 // result: (MOVQconst [c^d]) 41484 for { 41485 c := v.AuxInt 41486 v_0 := v.Args[0] 41487 if v_0.Op != OpAMD64MOVQconst { 41488 break 41489 } 41490 d := v_0.AuxInt 41491 v.reset(OpAMD64MOVQconst) 41492 v.AuxInt = c ^ d 41493 return true 41494 } 41495 return false 41496 } 41497 func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool { 41498 b := v.Block 41499 _ = b 41500 typ := &b.Func.Config.Types 41501 _ = typ 41502 // match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 41503 // cond: 41504 // result: (XORQ x (MOVQf2i y)) 41505 for { 41506 off := v.AuxInt 41507 sym := v.Aux 41508 _ = v.Args[2] 41509 x := v.Args[0] 41510 ptr := v.Args[1] 41511 v_2 := v.Args[2] 41512 if v_2.Op != OpAMD64MOVSDstore { 41513 break 41514 } 41515 if v_2.AuxInt != off { 41516 break 41517 } 41518 if v_2.Aux != sym { 41519 break 41520 } 41521 _ = v_2.Args[2] 41522 if ptr != v_2.Args[0] { 41523 break 41524 } 41525 y := v_2.Args[1] 41526 v.reset(OpAMD64XORQ) 41527 v.AddArg(x) 41528 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 41529 v0.AddArg(y) 41530 v.AddArg(v0) 41531 return true 41532 } 41533 return false 41534 } 41535 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 41536 // match: (Add16 x y) 41537 // cond: 41538 // result: (ADDL x y) 41539 for { 41540 _ = v.Args[1] 41541 x := v.Args[0] 41542 y := v.Args[1] 41543 v.reset(OpAMD64ADDL) 41544 v.AddArg(x) 41545 v.AddArg(y) 41546 return true 41547 } 41548 } 41549 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 41550 // match: (Add32 x y) 41551 // cond: 41552 // result: (ADDL x y) 41553 for { 41554 _ = v.Args[1] 41555 x := v.Args[0] 41556 y := v.Args[1] 41557 v.reset(OpAMD64ADDL) 41558 v.AddArg(x) 41559 v.AddArg(y) 41560 return true 41561 } 41562 } 41563 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 41564 // match: (Add32F x y) 41565 // cond: 41566 // result: (ADDSS x y) 41567 for { 41568 _ = v.Args[1] 41569 x := v.Args[0] 41570 y := v.Args[1] 41571 v.reset(OpAMD64ADDSS) 41572 v.AddArg(x) 41573 v.AddArg(y) 41574 return true 41575 } 41576 } 41577 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 41578 // match: (Add64 x y) 41579 // cond: 41580 // result: (ADDQ x y) 41581 for { 41582 _ = v.Args[1] 41583 x := v.Args[0] 41584 y := v.Args[1] 41585 v.reset(OpAMD64ADDQ) 41586 v.AddArg(x) 41587 v.AddArg(y) 41588 return true 41589 } 41590 } 41591 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 41592 // match: (Add64F x y) 41593 // cond: 41594 // result: (ADDSD x y) 41595 for { 41596 _ = v.Args[1] 41597 x := v.Args[0] 41598 y := v.Args[1] 41599 v.reset(OpAMD64ADDSD) 41600 v.AddArg(x) 41601 v.AddArg(y) 41602 return true 41603 } 41604 } 41605 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 41606 // match: (Add8 x y) 41607 // cond: 41608 // result: (ADDL x y) 41609 for { 41610 _ = v.Args[1] 41611 x := v.Args[0] 41612 y := v.Args[1] 41613 v.reset(OpAMD64ADDL) 41614 v.AddArg(x) 41615 v.AddArg(y) 41616 return true 41617 } 41618 } 41619 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 41620 b := v.Block 41621 _ = b 41622 config := b.Func.Config 41623 _ = config 41624 // match: (AddPtr x y) 41625 // cond: config.PtrSize == 8 41626 // result: (ADDQ x y) 41627 for { 41628 _ = v.Args[1] 41629 x := v.Args[0] 41630 y := v.Args[1] 41631 if !(config.PtrSize == 8) { 41632 break 41633 } 41634 v.reset(OpAMD64ADDQ) 41635 v.AddArg(x) 41636 v.AddArg(y) 41637 return true 41638 } 41639 // match: (AddPtr x y) 41640 // cond: config.PtrSize == 4 41641 // result: (ADDL x y) 41642 for { 41643 _ = v.Args[1] 41644 x := v.Args[0] 41645 y := v.Args[1] 41646 if !(config.PtrSize == 4) { 41647 break 41648 } 41649 v.reset(OpAMD64ADDL) 41650 v.AddArg(x) 41651 v.AddArg(y) 41652 return true 41653 } 41654 return false 41655 } 41656 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 41657 b := v.Block 41658 _ = b 41659 config := b.Func.Config 41660 _ = config 41661 // match: (Addr {sym} base) 41662 // cond: config.PtrSize == 8 41663 // result: (LEAQ {sym} base) 41664 for { 41665 sym := v.Aux 41666 base := v.Args[0] 41667 if !(config.PtrSize == 8) { 41668 break 41669 } 41670 v.reset(OpAMD64LEAQ) 41671 v.Aux = sym 41672 v.AddArg(base) 41673 return true 41674 } 41675 // match: (Addr {sym} base) 41676 // cond: config.PtrSize == 4 41677 // result: (LEAL {sym} base) 41678 for { 41679 sym := v.Aux 41680 base := v.Args[0] 41681 if !(config.PtrSize == 4) { 41682 break 41683 } 41684 v.reset(OpAMD64LEAL) 41685 v.Aux = sym 41686 v.AddArg(base) 41687 return true 41688 } 41689 return false 41690 } 41691 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 41692 // match: (And16 x y) 41693 // cond: 41694 // result: (ANDL x y) 41695 for { 41696 _ = v.Args[1] 41697 x := v.Args[0] 41698 y := v.Args[1] 41699 v.reset(OpAMD64ANDL) 41700 v.AddArg(x) 41701 v.AddArg(y) 41702 return true 41703 } 41704 } 41705 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 41706 // match: (And32 x y) 41707 // cond: 41708 // result: (ANDL x y) 41709 for { 41710 _ = v.Args[1] 41711 x := v.Args[0] 41712 y := v.Args[1] 41713 v.reset(OpAMD64ANDL) 41714 v.AddArg(x) 41715 v.AddArg(y) 41716 return true 41717 } 41718 } 41719 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 41720 // match: (And64 x y) 41721 // cond: 41722 // result: (ANDQ x y) 41723 for { 41724 _ = v.Args[1] 41725 x := v.Args[0] 41726 y := v.Args[1] 41727 v.reset(OpAMD64ANDQ) 41728 v.AddArg(x) 41729 v.AddArg(y) 41730 return true 41731 } 41732 } 41733 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 41734 // match: (And8 x y) 41735 // cond: 41736 // result: (ANDL x y) 41737 for { 41738 _ = v.Args[1] 41739 x := v.Args[0] 41740 y := v.Args[1] 41741 v.reset(OpAMD64ANDL) 41742 v.AddArg(x) 41743 v.AddArg(y) 41744 return true 41745 } 41746 } 41747 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 41748 // match: (AndB x y) 41749 // cond: 41750 // result: (ANDL x y) 41751 for { 41752 _ = v.Args[1] 41753 x := v.Args[0] 41754 y := v.Args[1] 41755 v.reset(OpAMD64ANDL) 41756 v.AddArg(x) 41757 v.AddArg(y) 41758 return true 41759 } 41760 } 41761 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 41762 b := v.Block 41763 _ = b 41764 typ := &b.Func.Config.Types 41765 _ = typ 41766 // match: (AtomicAdd32 ptr val mem) 41767 // cond: 41768 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 41769 for { 41770 _ = v.Args[2] 41771 ptr := v.Args[0] 41772 val := v.Args[1] 41773 mem := v.Args[2] 41774 v.reset(OpAMD64AddTupleFirst32) 41775 v.AddArg(val) 41776 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 41777 v0.AddArg(val) 41778 v0.AddArg(ptr) 41779 v0.AddArg(mem) 41780 v.AddArg(v0) 41781 return true 41782 } 41783 } 41784 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 41785 b := v.Block 41786 _ = b 41787 typ := &b.Func.Config.Types 41788 _ = typ 41789 // match: (AtomicAdd64 ptr val mem) 41790 // cond: 41791 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 41792 for { 41793 _ = v.Args[2] 41794 ptr := v.Args[0] 41795 val := v.Args[1] 41796 mem := v.Args[2] 41797 v.reset(OpAMD64AddTupleFirst64) 41798 v.AddArg(val) 41799 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 41800 v0.AddArg(val) 41801 v0.AddArg(ptr) 41802 v0.AddArg(mem) 41803 v.AddArg(v0) 41804 return true 41805 } 41806 } 41807 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 41808 // match: (AtomicAnd8 ptr val mem) 41809 // cond: 41810 // result: (ANDBlock ptr val mem) 41811 for { 41812 _ = v.Args[2] 41813 ptr := v.Args[0] 41814 val := v.Args[1] 41815 mem := v.Args[2] 41816 v.reset(OpAMD64ANDBlock) 41817 v.AddArg(ptr) 41818 v.AddArg(val) 41819 v.AddArg(mem) 41820 return true 41821 } 41822 } 41823 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 41824 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 41825 // cond: 41826 // result: (CMPXCHGLlock ptr old new_ mem) 41827 for { 41828 _ = v.Args[3] 41829 ptr := v.Args[0] 41830 old := v.Args[1] 41831 new_ := v.Args[2] 41832 mem := v.Args[3] 41833 v.reset(OpAMD64CMPXCHGLlock) 41834 v.AddArg(ptr) 41835 v.AddArg(old) 41836 v.AddArg(new_) 41837 v.AddArg(mem) 41838 return true 41839 } 41840 } 41841 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 41842 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 41843 // cond: 41844 // result: (CMPXCHGQlock ptr old new_ mem) 41845 for { 41846 _ = v.Args[3] 41847 ptr := v.Args[0] 41848 old := v.Args[1] 41849 new_ := v.Args[2] 41850 mem := v.Args[3] 41851 v.reset(OpAMD64CMPXCHGQlock) 41852 v.AddArg(ptr) 41853 v.AddArg(old) 41854 v.AddArg(new_) 41855 v.AddArg(mem) 41856 return true 41857 } 41858 } 41859 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 41860 // match: (AtomicExchange32 ptr val mem) 41861 // cond: 41862 // result: (XCHGL val ptr mem) 41863 for { 41864 _ = v.Args[2] 41865 ptr := v.Args[0] 41866 val := v.Args[1] 41867 mem := v.Args[2] 41868 v.reset(OpAMD64XCHGL) 41869 v.AddArg(val) 41870 v.AddArg(ptr) 41871 v.AddArg(mem) 41872 return true 41873 } 41874 } 41875 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 41876 // match: (AtomicExchange64 ptr val mem) 41877 // cond: 41878 // result: (XCHGQ val ptr mem) 41879 for { 41880 _ = v.Args[2] 41881 ptr := v.Args[0] 41882 val := v.Args[1] 41883 mem := v.Args[2] 41884 v.reset(OpAMD64XCHGQ) 41885 v.AddArg(val) 41886 v.AddArg(ptr) 41887 v.AddArg(mem) 41888 return true 41889 } 41890 } 41891 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 41892 // match: (AtomicLoad32 ptr mem) 41893 // cond: 41894 // result: (MOVLatomicload ptr mem) 41895 for { 41896 _ = v.Args[1] 41897 ptr := v.Args[0] 41898 mem := v.Args[1] 41899 v.reset(OpAMD64MOVLatomicload) 41900 v.AddArg(ptr) 41901 v.AddArg(mem) 41902 return true 41903 } 41904 } 41905 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 41906 // match: (AtomicLoad64 ptr mem) 41907 // cond: 41908 // result: (MOVQatomicload ptr mem) 41909 for { 41910 _ = v.Args[1] 41911 ptr := v.Args[0] 41912 mem := v.Args[1] 41913 v.reset(OpAMD64MOVQatomicload) 41914 v.AddArg(ptr) 41915 v.AddArg(mem) 41916 return true 41917 } 41918 } 41919 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 41920 b := v.Block 41921 _ = b 41922 config := b.Func.Config 41923 _ = config 41924 // match: (AtomicLoadPtr ptr mem) 41925 // cond: config.PtrSize == 8 41926 // result: (MOVQatomicload ptr mem) 41927 for { 41928 _ = v.Args[1] 41929 ptr := v.Args[0] 41930 mem := v.Args[1] 41931 if !(config.PtrSize == 8) { 41932 break 41933 } 41934 v.reset(OpAMD64MOVQatomicload) 41935 v.AddArg(ptr) 41936 v.AddArg(mem) 41937 return true 41938 } 41939 // match: (AtomicLoadPtr ptr mem) 41940 // cond: config.PtrSize == 4 41941 // result: (MOVLatomicload ptr mem) 41942 for { 41943 _ = v.Args[1] 41944 ptr := v.Args[0] 41945 mem := v.Args[1] 41946 if !(config.PtrSize == 4) { 41947 break 41948 } 41949 v.reset(OpAMD64MOVLatomicload) 41950 v.AddArg(ptr) 41951 v.AddArg(mem) 41952 return true 41953 } 41954 return false 41955 } 41956 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 41957 // match: (AtomicOr8 ptr val mem) 41958 // cond: 41959 // result: (ORBlock ptr val mem) 41960 for { 41961 _ = v.Args[2] 41962 ptr := v.Args[0] 41963 val := v.Args[1] 41964 mem := v.Args[2] 41965 v.reset(OpAMD64ORBlock) 41966 v.AddArg(ptr) 41967 v.AddArg(val) 41968 v.AddArg(mem) 41969 return true 41970 } 41971 } 41972 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 41973 b := v.Block 41974 _ = b 41975 typ := &b.Func.Config.Types 41976 _ = typ 41977 // match: (AtomicStore32 ptr val mem) 41978 // cond: 41979 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 41980 for { 41981 _ = v.Args[2] 41982 ptr := v.Args[0] 41983 val := v.Args[1] 41984 mem := v.Args[2] 41985 v.reset(OpSelect1) 41986 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 41987 v0.AddArg(val) 41988 v0.AddArg(ptr) 41989 v0.AddArg(mem) 41990 v.AddArg(v0) 41991 return true 41992 } 41993 } 41994 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 41995 b := v.Block 41996 _ = b 41997 typ := &b.Func.Config.Types 41998 _ = typ 41999 // match: (AtomicStore64 ptr val mem) 42000 // cond: 42001 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 42002 for { 42003 _ = v.Args[2] 42004 ptr := v.Args[0] 42005 val := v.Args[1] 42006 mem := v.Args[2] 42007 v.reset(OpSelect1) 42008 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 42009 v0.AddArg(val) 42010 v0.AddArg(ptr) 42011 v0.AddArg(mem) 42012 v.AddArg(v0) 42013 return true 42014 } 42015 } 42016 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 42017 b := v.Block 42018 _ = b 42019 config := b.Func.Config 42020 _ = config 42021 typ := &b.Func.Config.Types 42022 _ = typ 42023 // match: (AtomicStorePtrNoWB ptr val mem) 42024 // cond: config.PtrSize == 8 42025 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 42026 for { 42027 _ = v.Args[2] 42028 ptr := v.Args[0] 42029 val := v.Args[1] 42030 mem := v.Args[2] 42031 if !(config.PtrSize == 8) { 42032 break 42033 } 42034 v.reset(OpSelect1) 42035 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 42036 v0.AddArg(val) 42037 v0.AddArg(ptr) 42038 v0.AddArg(mem) 42039 v.AddArg(v0) 42040 return true 42041 } 42042 // match: (AtomicStorePtrNoWB ptr val mem) 42043 // cond: config.PtrSize == 4 42044 // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 42045 for { 42046 _ = v.Args[2] 42047 ptr := v.Args[0] 42048 val := v.Args[1] 42049 mem := v.Args[2] 42050 if !(config.PtrSize == 4) { 42051 break 42052 } 42053 v.reset(OpSelect1) 42054 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem)) 42055 v0.AddArg(val) 42056 v0.AddArg(ptr) 42057 v0.AddArg(mem) 42058 v.AddArg(v0) 42059 return true 42060 } 42061 return false 42062 } 42063 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 42064 // match: (Avg64u x y) 42065 // cond: 42066 // result: (AVGQU x y) 42067 for { 42068 _ = v.Args[1] 42069 x := v.Args[0] 42070 y := v.Args[1] 42071 v.reset(OpAMD64AVGQU) 42072 v.AddArg(x) 42073 v.AddArg(y) 42074 return true 42075 } 42076 } 42077 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 42078 b := v.Block 42079 _ = b 42080 typ := &b.Func.Config.Types 42081 _ = typ 42082 // match: (BitLen32 x) 42083 // cond: 42084 // result: (BitLen64 (MOVLQZX <typ.UInt64> x)) 42085 for { 42086 x := v.Args[0] 42087 v.reset(OpBitLen64) 42088 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 42089 v0.AddArg(x) 42090 v.AddArg(v0) 42091 return true 42092 } 42093 } 42094 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 42095 b := v.Block 42096 _ = b 42097 typ := &b.Func.Config.Types 42098 _ = typ 42099 // match: (BitLen64 <t> x) 42100 // cond: 42101 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 42102 for { 42103 t := v.Type 42104 x := v.Args[0] 42105 v.reset(OpAMD64ADDQconst) 42106 v.AuxInt = 1 42107 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 42108 v1 := b.NewValue0(v.Pos, OpSelect0, t) 42109 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42110 v2.AddArg(x) 42111 v1.AddArg(v2) 42112 v0.AddArg(v1) 42113 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 42114 v3.AuxInt = -1 42115 v0.AddArg(v3) 42116 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 42117 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42118 v5.AddArg(x) 42119 v4.AddArg(v5) 42120 v0.AddArg(v4) 42121 v.AddArg(v0) 42122 return true 42123 } 42124 } 42125 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 42126 // match: (Bswap32 x) 42127 // cond: 42128 // result: (BSWAPL x) 42129 for { 42130 x := v.Args[0] 42131 v.reset(OpAMD64BSWAPL) 42132 v.AddArg(x) 42133 return true 42134 } 42135 } 42136 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 42137 // match: (Bswap64 x) 42138 // cond: 42139 // result: (BSWAPQ x) 42140 for { 42141 x := v.Args[0] 42142 v.reset(OpAMD64BSWAPQ) 42143 v.AddArg(x) 42144 return true 42145 } 42146 } 42147 func rewriteValueAMD64_OpCeil_0(v *Value) bool { 42148 // match: (Ceil x) 42149 // cond: 42150 // result: (ROUNDSD [2] x) 42151 for { 42152 x := v.Args[0] 42153 v.reset(OpAMD64ROUNDSD) 42154 v.AuxInt = 2 42155 v.AddArg(x) 42156 return true 42157 } 42158 } 42159 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 42160 // match: (ClosureCall [argwid] entry closure mem) 42161 // cond: 42162 // result: (CALLclosure [argwid] entry closure mem) 42163 for { 42164 argwid := v.AuxInt 42165 _ = v.Args[2] 42166 entry := v.Args[0] 42167 closure := v.Args[1] 42168 mem := v.Args[2] 42169 v.reset(OpAMD64CALLclosure) 42170 v.AuxInt = argwid 42171 v.AddArg(entry) 42172 v.AddArg(closure) 42173 v.AddArg(mem) 42174 return true 42175 } 42176 } 42177 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 42178 // match: (Com16 x) 42179 // cond: 42180 // result: (NOTL x) 42181 for { 42182 x := v.Args[0] 42183 v.reset(OpAMD64NOTL) 42184 v.AddArg(x) 42185 return true 42186 } 42187 } 42188 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 42189 // match: (Com32 x) 42190 // cond: 42191 // result: (NOTL x) 42192 for { 42193 x := v.Args[0] 42194 v.reset(OpAMD64NOTL) 42195 v.AddArg(x) 42196 return true 42197 } 42198 } 42199 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 42200 // match: (Com64 x) 42201 // cond: 42202 // result: (NOTQ x) 42203 for { 42204 x := v.Args[0] 42205 v.reset(OpAMD64NOTQ) 42206 v.AddArg(x) 42207 return true 42208 } 42209 } 42210 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 42211 // match: (Com8 x) 42212 // cond: 42213 // result: (NOTL x) 42214 for { 42215 x := v.Args[0] 42216 v.reset(OpAMD64NOTL) 42217 v.AddArg(x) 42218 return true 42219 } 42220 } 42221 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 42222 // match: (Const16 [val]) 42223 // cond: 42224 // result: (MOVLconst [val]) 42225 for { 42226 val := v.AuxInt 42227 v.reset(OpAMD64MOVLconst) 42228 v.AuxInt = val 42229 return true 42230 } 42231 } 42232 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 42233 // match: (Const32 [val]) 42234 // cond: 42235 // result: (MOVLconst [val]) 42236 for { 42237 val := v.AuxInt 42238 v.reset(OpAMD64MOVLconst) 42239 v.AuxInt = val 42240 return true 42241 } 42242 } 42243 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 42244 // match: (Const32F [val]) 42245 // cond: 42246 // result: (MOVSSconst [val]) 42247 for { 42248 val := v.AuxInt 42249 v.reset(OpAMD64MOVSSconst) 42250 v.AuxInt = val 42251 return true 42252 } 42253 } 42254 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 42255 // match: (Const64 [val]) 42256 // cond: 42257 // result: (MOVQconst [val]) 42258 for { 42259 val := v.AuxInt 42260 v.reset(OpAMD64MOVQconst) 42261 v.AuxInt = val 42262 return true 42263 } 42264 } 42265 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 42266 // match: (Const64F [val]) 42267 // cond: 42268 // result: (MOVSDconst [val]) 42269 for { 42270 val := v.AuxInt 42271 v.reset(OpAMD64MOVSDconst) 42272 v.AuxInt = val 42273 return true 42274 } 42275 } 42276 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 42277 // match: (Const8 [val]) 42278 // cond: 42279 // result: (MOVLconst [val]) 42280 for { 42281 val := v.AuxInt 42282 v.reset(OpAMD64MOVLconst) 42283 v.AuxInt = val 42284 return true 42285 } 42286 } 42287 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 42288 // match: (ConstBool [b]) 42289 // cond: 42290 // result: (MOVLconst [b]) 42291 for { 42292 b := v.AuxInt 42293 v.reset(OpAMD64MOVLconst) 42294 v.AuxInt = b 42295 return true 42296 } 42297 } 42298 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 42299 b := v.Block 42300 _ = b 42301 config := b.Func.Config 42302 _ = config 42303 // match: (ConstNil) 42304 // cond: config.PtrSize == 8 42305 // result: (MOVQconst [0]) 42306 for { 42307 if !(config.PtrSize == 8) { 42308 break 42309 } 42310 v.reset(OpAMD64MOVQconst) 42311 v.AuxInt = 0 42312 return true 42313 } 42314 // match: (ConstNil) 42315 // cond: config.PtrSize == 4 42316 // result: (MOVLconst [0]) 42317 for { 42318 if !(config.PtrSize == 4) { 42319 break 42320 } 42321 v.reset(OpAMD64MOVLconst) 42322 v.AuxInt = 0 42323 return true 42324 } 42325 return false 42326 } 42327 func rewriteValueAMD64_OpConvert_0(v *Value) bool { 42328 b := v.Block 42329 _ = b 42330 config := b.Func.Config 42331 _ = config 42332 // match: (Convert <t> x mem) 42333 // cond: config.PtrSize == 8 42334 // result: (MOVQconvert <t> x mem) 42335 for { 42336 t := v.Type 42337 _ = v.Args[1] 42338 x := v.Args[0] 42339 mem := v.Args[1] 42340 if !(config.PtrSize == 8) { 42341 break 42342 } 42343 v.reset(OpAMD64MOVQconvert) 42344 v.Type = t 42345 v.AddArg(x) 42346 v.AddArg(mem) 42347 return true 42348 } 42349 // match: (Convert <t> x mem) 42350 // cond: config.PtrSize == 4 42351 // result: (MOVLconvert <t> x mem) 42352 for { 42353 t := v.Type 42354 _ = v.Args[1] 42355 x := v.Args[0] 42356 mem := v.Args[1] 42357 if !(config.PtrSize == 4) { 42358 break 42359 } 42360 v.reset(OpAMD64MOVLconvert) 42361 v.Type = t 42362 v.AddArg(x) 42363 v.AddArg(mem) 42364 return true 42365 } 42366 return false 42367 } 42368 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 42369 b := v.Block 42370 _ = b 42371 typ := &b.Func.Config.Types 42372 _ = typ 42373 // match: (Ctz32 x) 42374 // cond: 42375 // result: (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x))) 42376 for { 42377 x := v.Args[0] 42378 v.reset(OpSelect0) 42379 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42380 v1 := b.NewValue0(v.Pos, OpAMD64ORQ, typ.UInt64) 42381 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 42382 v2.AuxInt = 1 << 32 42383 v1.AddArg(v2) 42384 v1.AddArg(x) 42385 v0.AddArg(v1) 42386 v.AddArg(v0) 42387 return true 42388 } 42389 } 42390 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 42391 b := v.Block 42392 _ = b 42393 typ := &b.Func.Config.Types 42394 _ = typ 42395 // match: (Ctz64 <t> x) 42396 // cond: 42397 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 42398 for { 42399 t := v.Type 42400 x := v.Args[0] 42401 v.reset(OpAMD64CMOVQEQ) 42402 v0 := b.NewValue0(v.Pos, OpSelect0, t) 42403 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42404 v1.AddArg(x) 42405 v0.AddArg(v1) 42406 v.AddArg(v0) 42407 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 42408 v2.AuxInt = 64 42409 v.AddArg(v2) 42410 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 42411 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42412 v4.AddArg(x) 42413 v3.AddArg(v4) 42414 v.AddArg(v3) 42415 return true 42416 } 42417 } 42418 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 42419 // match: (Cvt32Fto32 x) 42420 // cond: 42421 // result: (CVTTSS2SL x) 42422 for { 42423 x := v.Args[0] 42424 v.reset(OpAMD64CVTTSS2SL) 42425 v.AddArg(x) 42426 return true 42427 } 42428 } 42429 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 42430 // match: (Cvt32Fto64 x) 42431 // cond: 42432 // result: (CVTTSS2SQ x) 42433 for { 42434 x := v.Args[0] 42435 v.reset(OpAMD64CVTTSS2SQ) 42436 v.AddArg(x) 42437 return true 42438 } 42439 } 42440 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 42441 // match: (Cvt32Fto64F x) 42442 // cond: 42443 // result: (CVTSS2SD x) 42444 for { 42445 x := v.Args[0] 42446 v.reset(OpAMD64CVTSS2SD) 42447 v.AddArg(x) 42448 return true 42449 } 42450 } 42451 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 42452 // match: (Cvt32to32F x) 42453 // cond: 42454 // result: (CVTSL2SS x) 42455 for { 42456 x := v.Args[0] 42457 v.reset(OpAMD64CVTSL2SS) 42458 v.AddArg(x) 42459 return true 42460 } 42461 } 42462 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 42463 // match: (Cvt32to64F x) 42464 // cond: 42465 // result: (CVTSL2SD x) 42466 for { 42467 x := v.Args[0] 42468 v.reset(OpAMD64CVTSL2SD) 42469 v.AddArg(x) 42470 return true 42471 } 42472 } 42473 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 42474 // match: (Cvt64Fto32 x) 42475 // cond: 42476 // result: (CVTTSD2SL x) 42477 for { 42478 x := v.Args[0] 42479 v.reset(OpAMD64CVTTSD2SL) 42480 v.AddArg(x) 42481 return true 42482 } 42483 } 42484 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 42485 // match: (Cvt64Fto32F x) 42486 // cond: 42487 // result: (CVTSD2SS x) 42488 for { 42489 x := v.Args[0] 42490 v.reset(OpAMD64CVTSD2SS) 42491 v.AddArg(x) 42492 return true 42493 } 42494 } 42495 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 42496 // match: (Cvt64Fto64 x) 42497 // cond: 42498 // result: (CVTTSD2SQ x) 42499 for { 42500 x := v.Args[0] 42501 v.reset(OpAMD64CVTTSD2SQ) 42502 v.AddArg(x) 42503 return true 42504 } 42505 } 42506 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 42507 // match: (Cvt64to32F x) 42508 // cond: 42509 // result: (CVTSQ2SS x) 42510 for { 42511 x := v.Args[0] 42512 v.reset(OpAMD64CVTSQ2SS) 42513 v.AddArg(x) 42514 return true 42515 } 42516 } 42517 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 42518 // match: (Cvt64to64F x) 42519 // cond: 42520 // result: (CVTSQ2SD x) 42521 for { 42522 x := v.Args[0] 42523 v.reset(OpAMD64CVTSQ2SD) 42524 v.AddArg(x) 42525 return true 42526 } 42527 } 42528 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 42529 // match: (Div128u xhi xlo y) 42530 // cond: 42531 // result: (DIVQU2 xhi xlo y) 42532 for { 42533 _ = v.Args[2] 42534 xhi := v.Args[0] 42535 xlo := v.Args[1] 42536 y := v.Args[2] 42537 v.reset(OpAMD64DIVQU2) 42538 v.AddArg(xhi) 42539 v.AddArg(xlo) 42540 v.AddArg(y) 42541 return true 42542 } 42543 } 42544 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 42545 b := v.Block 42546 _ = b 42547 typ := &b.Func.Config.Types 42548 _ = typ 42549 // match: (Div16 x y) 42550 // cond: 42551 // result: (Select0 (DIVW x y)) 42552 for { 42553 _ = v.Args[1] 42554 x := v.Args[0] 42555 y := v.Args[1] 42556 v.reset(OpSelect0) 42557 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 42558 v0.AddArg(x) 42559 v0.AddArg(y) 42560 v.AddArg(v0) 42561 return true 42562 } 42563 } 42564 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 42565 b := v.Block 42566 _ = b 42567 typ := &b.Func.Config.Types 42568 _ = typ 42569 // match: (Div16u x y) 42570 // cond: 42571 // result: (Select0 (DIVWU x y)) 42572 for { 42573 _ = v.Args[1] 42574 x := v.Args[0] 42575 y := v.Args[1] 42576 v.reset(OpSelect0) 42577 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 42578 v0.AddArg(x) 42579 v0.AddArg(y) 42580 v.AddArg(v0) 42581 return true 42582 } 42583 } 42584 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 42585 b := v.Block 42586 _ = b 42587 typ := &b.Func.Config.Types 42588 _ = typ 42589 // match: (Div32 x y) 42590 // cond: 42591 // result: (Select0 (DIVL x y)) 42592 for { 42593 _ = v.Args[1] 42594 x := v.Args[0] 42595 y := v.Args[1] 42596 v.reset(OpSelect0) 42597 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 42598 v0.AddArg(x) 42599 v0.AddArg(y) 42600 v.AddArg(v0) 42601 return true 42602 } 42603 } 42604 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 42605 // match: (Div32F x y) 42606 // cond: 42607 // result: (DIVSS x y) 42608 for { 42609 _ = v.Args[1] 42610 x := v.Args[0] 42611 y := v.Args[1] 42612 v.reset(OpAMD64DIVSS) 42613 v.AddArg(x) 42614 v.AddArg(y) 42615 return true 42616 } 42617 } 42618 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 42619 b := v.Block 42620 _ = b 42621 typ := &b.Func.Config.Types 42622 _ = typ 42623 // match: (Div32u x y) 42624 // cond: 42625 // result: (Select0 (DIVLU x y)) 42626 for { 42627 _ = v.Args[1] 42628 x := v.Args[0] 42629 y := v.Args[1] 42630 v.reset(OpSelect0) 42631 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 42632 v0.AddArg(x) 42633 v0.AddArg(y) 42634 v.AddArg(v0) 42635 return true 42636 } 42637 } 42638 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 42639 b := v.Block 42640 _ = b 42641 typ := &b.Func.Config.Types 42642 _ = typ 42643 // match: (Div64 x y) 42644 // cond: 42645 // result: (Select0 (DIVQ x y)) 42646 for { 42647 _ = v.Args[1] 42648 x := v.Args[0] 42649 y := v.Args[1] 42650 v.reset(OpSelect0) 42651 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 42652 v0.AddArg(x) 42653 v0.AddArg(y) 42654 v.AddArg(v0) 42655 return true 42656 } 42657 } 42658 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 42659 // match: (Div64F x y) 42660 // cond: 42661 // result: (DIVSD x y) 42662 for { 42663 _ = v.Args[1] 42664 x := v.Args[0] 42665 y := v.Args[1] 42666 v.reset(OpAMD64DIVSD) 42667 v.AddArg(x) 42668 v.AddArg(y) 42669 return true 42670 } 42671 } 42672 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 42673 b := v.Block 42674 _ = b 42675 typ := &b.Func.Config.Types 42676 _ = typ 42677 // match: (Div64u x y) 42678 // cond: 42679 // result: (Select0 (DIVQU x y)) 42680 for { 42681 _ = v.Args[1] 42682 x := v.Args[0] 42683 y := v.Args[1] 42684 v.reset(OpSelect0) 42685 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 42686 v0.AddArg(x) 42687 v0.AddArg(y) 42688 v.AddArg(v0) 42689 return true 42690 } 42691 } 42692 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 42693 b := v.Block 42694 _ = b 42695 typ := &b.Func.Config.Types 42696 _ = typ 42697 // match: (Div8 x y) 42698 // cond: 42699 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 42700 for { 42701 _ = v.Args[1] 42702 x := v.Args[0] 42703 y := v.Args[1] 42704 v.reset(OpSelect0) 42705 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 42706 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 42707 v1.AddArg(x) 42708 v0.AddArg(v1) 42709 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 42710 v2.AddArg(y) 42711 v0.AddArg(v2) 42712 v.AddArg(v0) 42713 return true 42714 } 42715 } 42716 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 42717 b := v.Block 42718 _ = b 42719 typ := &b.Func.Config.Types 42720 _ = typ 42721 // match: (Div8u x y) 42722 // cond: 42723 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 42724 for { 42725 _ = v.Args[1] 42726 x := v.Args[0] 42727 y := v.Args[1] 42728 v.reset(OpSelect0) 42729 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 42730 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 42731 v1.AddArg(x) 42732 v0.AddArg(v1) 42733 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 42734 v2.AddArg(y) 42735 v0.AddArg(v2) 42736 v.AddArg(v0) 42737 return true 42738 } 42739 } 42740 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 42741 b := v.Block 42742 _ = b 42743 // match: (Eq16 x y) 42744 // cond: 42745 // result: (SETEQ (CMPW x y)) 42746 for { 42747 _ = v.Args[1] 42748 x := v.Args[0] 42749 y := v.Args[1] 42750 v.reset(OpAMD64SETEQ) 42751 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42752 v0.AddArg(x) 42753 v0.AddArg(y) 42754 v.AddArg(v0) 42755 return true 42756 } 42757 } 42758 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 42759 b := v.Block 42760 _ = b 42761 // match: (Eq32 x y) 42762 // cond: 42763 // result: (SETEQ (CMPL x y)) 42764 for { 42765 _ = v.Args[1] 42766 x := v.Args[0] 42767 y := v.Args[1] 42768 v.reset(OpAMD64SETEQ) 42769 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42770 v0.AddArg(x) 42771 v0.AddArg(y) 42772 v.AddArg(v0) 42773 return true 42774 } 42775 } 42776 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 42777 b := v.Block 42778 _ = b 42779 // match: (Eq32F x y) 42780 // cond: 42781 // result: (SETEQF (UCOMISS x y)) 42782 for { 42783 _ = v.Args[1] 42784 x := v.Args[0] 42785 y := v.Args[1] 42786 v.reset(OpAMD64SETEQF) 42787 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 42788 v0.AddArg(x) 42789 v0.AddArg(y) 42790 v.AddArg(v0) 42791 return true 42792 } 42793 } 42794 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 42795 b := v.Block 42796 _ = b 42797 // match: (Eq64 x y) 42798 // cond: 42799 // result: (SETEQ (CMPQ x y)) 42800 for { 42801 _ = v.Args[1] 42802 x := v.Args[0] 42803 y := v.Args[1] 42804 v.reset(OpAMD64SETEQ) 42805 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42806 v0.AddArg(x) 42807 v0.AddArg(y) 42808 v.AddArg(v0) 42809 return true 42810 } 42811 } 42812 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 42813 b := v.Block 42814 _ = b 42815 // match: (Eq64F x y) 42816 // cond: 42817 // result: (SETEQF (UCOMISD x y)) 42818 for { 42819 _ = v.Args[1] 42820 x := v.Args[0] 42821 y := v.Args[1] 42822 v.reset(OpAMD64SETEQF) 42823 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 42824 v0.AddArg(x) 42825 v0.AddArg(y) 42826 v.AddArg(v0) 42827 return true 42828 } 42829 } 42830 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 42831 b := v.Block 42832 _ = b 42833 // match: (Eq8 x y) 42834 // cond: 42835 // result: (SETEQ (CMPB x y)) 42836 for { 42837 _ = v.Args[1] 42838 x := v.Args[0] 42839 y := v.Args[1] 42840 v.reset(OpAMD64SETEQ) 42841 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42842 v0.AddArg(x) 42843 v0.AddArg(y) 42844 v.AddArg(v0) 42845 return true 42846 } 42847 } 42848 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 42849 b := v.Block 42850 _ = b 42851 // match: (EqB x y) 42852 // cond: 42853 // result: (SETEQ (CMPB x y)) 42854 for { 42855 _ = v.Args[1] 42856 x := v.Args[0] 42857 y := v.Args[1] 42858 v.reset(OpAMD64SETEQ) 42859 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42860 v0.AddArg(x) 42861 v0.AddArg(y) 42862 v.AddArg(v0) 42863 return true 42864 } 42865 } 42866 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 42867 b := v.Block 42868 _ = b 42869 config := b.Func.Config 42870 _ = config 42871 // match: (EqPtr x y) 42872 // cond: config.PtrSize == 8 42873 // result: (SETEQ (CMPQ x y)) 42874 for { 42875 _ = v.Args[1] 42876 x := v.Args[0] 42877 y := v.Args[1] 42878 if !(config.PtrSize == 8) { 42879 break 42880 } 42881 v.reset(OpAMD64SETEQ) 42882 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42883 v0.AddArg(x) 42884 v0.AddArg(y) 42885 v.AddArg(v0) 42886 return true 42887 } 42888 // match: (EqPtr x y) 42889 // cond: config.PtrSize == 4 42890 // result: (SETEQ (CMPL x y)) 42891 for { 42892 _ = v.Args[1] 42893 x := v.Args[0] 42894 y := v.Args[1] 42895 if !(config.PtrSize == 4) { 42896 break 42897 } 42898 v.reset(OpAMD64SETEQ) 42899 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42900 v0.AddArg(x) 42901 v0.AddArg(y) 42902 v.AddArg(v0) 42903 return true 42904 } 42905 return false 42906 } 42907 func rewriteValueAMD64_OpFloor_0(v *Value) bool { 42908 // match: (Floor x) 42909 // cond: 42910 // result: (ROUNDSD [1] x) 42911 for { 42912 x := v.Args[0] 42913 v.reset(OpAMD64ROUNDSD) 42914 v.AuxInt = 1 42915 v.AddArg(x) 42916 return true 42917 } 42918 } 42919 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 42920 b := v.Block 42921 _ = b 42922 // match: (Geq16 x y) 42923 // cond: 42924 // result: (SETGE (CMPW x y)) 42925 for { 42926 _ = v.Args[1] 42927 x := v.Args[0] 42928 y := v.Args[1] 42929 v.reset(OpAMD64SETGE) 42930 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42931 v0.AddArg(x) 42932 v0.AddArg(y) 42933 v.AddArg(v0) 42934 return true 42935 } 42936 } 42937 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 42938 b := v.Block 42939 _ = b 42940 // match: (Geq16U x y) 42941 // cond: 42942 // result: (SETAE (CMPW x y)) 42943 for { 42944 _ = v.Args[1] 42945 x := v.Args[0] 42946 y := v.Args[1] 42947 v.reset(OpAMD64SETAE) 42948 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42949 v0.AddArg(x) 42950 v0.AddArg(y) 42951 v.AddArg(v0) 42952 return true 42953 } 42954 } 42955 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 42956 b := v.Block 42957 _ = b 42958 // match: (Geq32 x y) 42959 // cond: 42960 // result: (SETGE (CMPL x y)) 42961 for { 42962 _ = v.Args[1] 42963 x := v.Args[0] 42964 y := v.Args[1] 42965 v.reset(OpAMD64SETGE) 42966 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42967 v0.AddArg(x) 42968 v0.AddArg(y) 42969 v.AddArg(v0) 42970 return true 42971 } 42972 } 42973 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 42974 b := v.Block 42975 _ = b 42976 // match: (Geq32F x y) 42977 // cond: 42978 // result: (SETGEF (UCOMISS x y)) 42979 for { 42980 _ = v.Args[1] 42981 x := v.Args[0] 42982 y := v.Args[1] 42983 v.reset(OpAMD64SETGEF) 42984 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 42985 v0.AddArg(x) 42986 v0.AddArg(y) 42987 v.AddArg(v0) 42988 return true 42989 } 42990 } 42991 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 42992 b := v.Block 42993 _ = b 42994 // match: (Geq32U x y) 42995 // cond: 42996 // result: (SETAE (CMPL x y)) 42997 for { 42998 _ = v.Args[1] 42999 x := v.Args[0] 43000 y := v.Args[1] 43001 v.reset(OpAMD64SETAE) 43002 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43003 v0.AddArg(x) 43004 v0.AddArg(y) 43005 v.AddArg(v0) 43006 return true 43007 } 43008 } 43009 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 43010 b := v.Block 43011 _ = b 43012 // match: (Geq64 x y) 43013 // cond: 43014 // result: (SETGE (CMPQ x y)) 43015 for { 43016 _ = v.Args[1] 43017 x := v.Args[0] 43018 y := v.Args[1] 43019 v.reset(OpAMD64SETGE) 43020 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43021 v0.AddArg(x) 43022 v0.AddArg(y) 43023 v.AddArg(v0) 43024 return true 43025 } 43026 } 43027 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 43028 b := v.Block 43029 _ = b 43030 // match: (Geq64F x y) 43031 // cond: 43032 // result: (SETGEF (UCOMISD x y)) 43033 for { 43034 _ = v.Args[1] 43035 x := v.Args[0] 43036 y := v.Args[1] 43037 v.reset(OpAMD64SETGEF) 43038 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43039 v0.AddArg(x) 43040 v0.AddArg(y) 43041 v.AddArg(v0) 43042 return true 43043 } 43044 } 43045 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 43046 b := v.Block 43047 _ = b 43048 // match: (Geq64U x y) 43049 // cond: 43050 // result: (SETAE (CMPQ x y)) 43051 for { 43052 _ = v.Args[1] 43053 x := v.Args[0] 43054 y := v.Args[1] 43055 v.reset(OpAMD64SETAE) 43056 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43057 v0.AddArg(x) 43058 v0.AddArg(y) 43059 v.AddArg(v0) 43060 return true 43061 } 43062 } 43063 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 43064 b := v.Block 43065 _ = b 43066 // match: (Geq8 x y) 43067 // cond: 43068 // result: (SETGE (CMPB x y)) 43069 for { 43070 _ = v.Args[1] 43071 x := v.Args[0] 43072 y := v.Args[1] 43073 v.reset(OpAMD64SETGE) 43074 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43075 v0.AddArg(x) 43076 v0.AddArg(y) 43077 v.AddArg(v0) 43078 return true 43079 } 43080 } 43081 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 43082 b := v.Block 43083 _ = b 43084 // match: (Geq8U x y) 43085 // cond: 43086 // result: (SETAE (CMPB x y)) 43087 for { 43088 _ = v.Args[1] 43089 x := v.Args[0] 43090 y := v.Args[1] 43091 v.reset(OpAMD64SETAE) 43092 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43093 v0.AddArg(x) 43094 v0.AddArg(y) 43095 v.AddArg(v0) 43096 return true 43097 } 43098 } 43099 func rewriteValueAMD64_OpGetCallerPC_0(v *Value) bool { 43100 // match: (GetCallerPC) 43101 // cond: 43102 // result: (LoweredGetCallerPC) 43103 for { 43104 v.reset(OpAMD64LoweredGetCallerPC) 43105 return true 43106 } 43107 } 43108 func rewriteValueAMD64_OpGetCallerSP_0(v *Value) bool { 43109 // match: (GetCallerSP) 43110 // cond: 43111 // result: (LoweredGetCallerSP) 43112 for { 43113 v.reset(OpAMD64LoweredGetCallerSP) 43114 return true 43115 } 43116 } 43117 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 43118 // match: (GetClosurePtr) 43119 // cond: 43120 // result: (LoweredGetClosurePtr) 43121 for { 43122 v.reset(OpAMD64LoweredGetClosurePtr) 43123 return true 43124 } 43125 } 43126 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 43127 // match: (GetG mem) 43128 // cond: 43129 // result: (LoweredGetG mem) 43130 for { 43131 mem := v.Args[0] 43132 v.reset(OpAMD64LoweredGetG) 43133 v.AddArg(mem) 43134 return true 43135 } 43136 } 43137 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 43138 b := v.Block 43139 _ = b 43140 // match: (Greater16 x y) 43141 // cond: 43142 // result: (SETG (CMPW x y)) 43143 for { 43144 _ = v.Args[1] 43145 x := v.Args[0] 43146 y := v.Args[1] 43147 v.reset(OpAMD64SETG) 43148 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43149 v0.AddArg(x) 43150 v0.AddArg(y) 43151 v.AddArg(v0) 43152 return true 43153 } 43154 } 43155 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 43156 b := v.Block 43157 _ = b 43158 // match: (Greater16U x y) 43159 // cond: 43160 // result: (SETA (CMPW x y)) 43161 for { 43162 _ = v.Args[1] 43163 x := v.Args[0] 43164 y := v.Args[1] 43165 v.reset(OpAMD64SETA) 43166 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43167 v0.AddArg(x) 43168 v0.AddArg(y) 43169 v.AddArg(v0) 43170 return true 43171 } 43172 } 43173 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 43174 b := v.Block 43175 _ = b 43176 // match: (Greater32 x y) 43177 // cond: 43178 // result: (SETG (CMPL x y)) 43179 for { 43180 _ = v.Args[1] 43181 x := v.Args[0] 43182 y := v.Args[1] 43183 v.reset(OpAMD64SETG) 43184 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43185 v0.AddArg(x) 43186 v0.AddArg(y) 43187 v.AddArg(v0) 43188 return true 43189 } 43190 } 43191 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 43192 b := v.Block 43193 _ = b 43194 // match: (Greater32F x y) 43195 // cond: 43196 // result: (SETGF (UCOMISS x y)) 43197 for { 43198 _ = v.Args[1] 43199 x := v.Args[0] 43200 y := v.Args[1] 43201 v.reset(OpAMD64SETGF) 43202 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 43203 v0.AddArg(x) 43204 v0.AddArg(y) 43205 v.AddArg(v0) 43206 return true 43207 } 43208 } 43209 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 43210 b := v.Block 43211 _ = b 43212 // match: (Greater32U x y) 43213 // cond: 43214 // result: (SETA (CMPL x y)) 43215 for { 43216 _ = v.Args[1] 43217 x := v.Args[0] 43218 y := v.Args[1] 43219 v.reset(OpAMD64SETA) 43220 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43221 v0.AddArg(x) 43222 v0.AddArg(y) 43223 v.AddArg(v0) 43224 return true 43225 } 43226 } 43227 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 43228 b := v.Block 43229 _ = b 43230 // match: (Greater64 x y) 43231 // cond: 43232 // result: (SETG (CMPQ x y)) 43233 for { 43234 _ = v.Args[1] 43235 x := v.Args[0] 43236 y := v.Args[1] 43237 v.reset(OpAMD64SETG) 43238 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43239 v0.AddArg(x) 43240 v0.AddArg(y) 43241 v.AddArg(v0) 43242 return true 43243 } 43244 } 43245 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 43246 b := v.Block 43247 _ = b 43248 // match: (Greater64F x y) 43249 // cond: 43250 // result: (SETGF (UCOMISD x y)) 43251 for { 43252 _ = v.Args[1] 43253 x := v.Args[0] 43254 y := v.Args[1] 43255 v.reset(OpAMD64SETGF) 43256 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43257 v0.AddArg(x) 43258 v0.AddArg(y) 43259 v.AddArg(v0) 43260 return true 43261 } 43262 } 43263 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 43264 b := v.Block 43265 _ = b 43266 // match: (Greater64U x y) 43267 // cond: 43268 // result: (SETA (CMPQ x y)) 43269 for { 43270 _ = v.Args[1] 43271 x := v.Args[0] 43272 y := v.Args[1] 43273 v.reset(OpAMD64SETA) 43274 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43275 v0.AddArg(x) 43276 v0.AddArg(y) 43277 v.AddArg(v0) 43278 return true 43279 } 43280 } 43281 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 43282 b := v.Block 43283 _ = b 43284 // match: (Greater8 x y) 43285 // cond: 43286 // result: (SETG (CMPB x y)) 43287 for { 43288 _ = v.Args[1] 43289 x := v.Args[0] 43290 y := v.Args[1] 43291 v.reset(OpAMD64SETG) 43292 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43293 v0.AddArg(x) 43294 v0.AddArg(y) 43295 v.AddArg(v0) 43296 return true 43297 } 43298 } 43299 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 43300 b := v.Block 43301 _ = b 43302 // match: (Greater8U x y) 43303 // cond: 43304 // result: (SETA (CMPB x y)) 43305 for { 43306 _ = v.Args[1] 43307 x := v.Args[0] 43308 y := v.Args[1] 43309 v.reset(OpAMD64SETA) 43310 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43311 v0.AddArg(x) 43312 v0.AddArg(y) 43313 v.AddArg(v0) 43314 return true 43315 } 43316 } 43317 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 43318 // match: (Hmul32 x y) 43319 // cond: 43320 // result: (HMULL x y) 43321 for { 43322 _ = v.Args[1] 43323 x := v.Args[0] 43324 y := v.Args[1] 43325 v.reset(OpAMD64HMULL) 43326 v.AddArg(x) 43327 v.AddArg(y) 43328 return true 43329 } 43330 } 43331 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 43332 // match: (Hmul32u x y) 43333 // cond: 43334 // result: (HMULLU x y) 43335 for { 43336 _ = v.Args[1] 43337 x := v.Args[0] 43338 y := v.Args[1] 43339 v.reset(OpAMD64HMULLU) 43340 v.AddArg(x) 43341 v.AddArg(y) 43342 return true 43343 } 43344 } 43345 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 43346 // match: (Hmul64 x y) 43347 // cond: 43348 // result: (HMULQ x y) 43349 for { 43350 _ = v.Args[1] 43351 x := v.Args[0] 43352 y := v.Args[1] 43353 v.reset(OpAMD64HMULQ) 43354 v.AddArg(x) 43355 v.AddArg(y) 43356 return true 43357 } 43358 } 43359 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 43360 // match: (Hmul64u x y) 43361 // cond: 43362 // result: (HMULQU x y) 43363 for { 43364 _ = v.Args[1] 43365 x := v.Args[0] 43366 y := v.Args[1] 43367 v.reset(OpAMD64HMULQU) 43368 v.AddArg(x) 43369 v.AddArg(y) 43370 return true 43371 } 43372 } 43373 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool { 43374 // match: (Int64Hi x) 43375 // cond: 43376 // result: (SHRQconst [32] x) 43377 for { 43378 x := v.Args[0] 43379 v.reset(OpAMD64SHRQconst) 43380 v.AuxInt = 32 43381 v.AddArg(x) 43382 return true 43383 } 43384 } 43385 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 43386 // match: (InterCall [argwid] entry mem) 43387 // cond: 43388 // result: (CALLinter [argwid] entry mem) 43389 for { 43390 argwid := v.AuxInt 43391 _ = v.Args[1] 43392 entry := v.Args[0] 43393 mem := v.Args[1] 43394 v.reset(OpAMD64CALLinter) 43395 v.AuxInt = argwid 43396 v.AddArg(entry) 43397 v.AddArg(mem) 43398 return true 43399 } 43400 } 43401 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 43402 b := v.Block 43403 _ = b 43404 config := b.Func.Config 43405 _ = config 43406 // match: (IsInBounds idx len) 43407 // cond: config.PtrSize == 8 43408 // result: (SETB (CMPQ idx len)) 43409 for { 43410 _ = v.Args[1] 43411 idx := v.Args[0] 43412 len := v.Args[1] 43413 if !(config.PtrSize == 8) { 43414 break 43415 } 43416 v.reset(OpAMD64SETB) 43417 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43418 v0.AddArg(idx) 43419 v0.AddArg(len) 43420 v.AddArg(v0) 43421 return true 43422 } 43423 // match: (IsInBounds idx len) 43424 // cond: config.PtrSize == 4 43425 // result: (SETB (CMPL idx len)) 43426 for { 43427 _ = v.Args[1] 43428 idx := v.Args[0] 43429 len := v.Args[1] 43430 if !(config.PtrSize == 4) { 43431 break 43432 } 43433 v.reset(OpAMD64SETB) 43434 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43435 v0.AddArg(idx) 43436 v0.AddArg(len) 43437 v.AddArg(v0) 43438 return true 43439 } 43440 return false 43441 } 43442 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 43443 b := v.Block 43444 _ = b 43445 config := b.Func.Config 43446 _ = config 43447 // match: (IsNonNil p) 43448 // cond: config.PtrSize == 8 43449 // result: (SETNE (TESTQ p p)) 43450 for { 43451 p := v.Args[0] 43452 if !(config.PtrSize == 8) { 43453 break 43454 } 43455 v.reset(OpAMD64SETNE) 43456 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 43457 v0.AddArg(p) 43458 v0.AddArg(p) 43459 v.AddArg(v0) 43460 return true 43461 } 43462 // match: (IsNonNil p) 43463 // cond: config.PtrSize == 4 43464 // result: (SETNE (TESTL p p)) 43465 for { 43466 p := v.Args[0] 43467 if !(config.PtrSize == 4) { 43468 break 43469 } 43470 v.reset(OpAMD64SETNE) 43471 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags) 43472 v0.AddArg(p) 43473 v0.AddArg(p) 43474 v.AddArg(v0) 43475 return true 43476 } 43477 return false 43478 } 43479 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 43480 b := v.Block 43481 _ = b 43482 config := b.Func.Config 43483 _ = config 43484 // match: (IsSliceInBounds idx len) 43485 // cond: config.PtrSize == 8 43486 // result: (SETBE (CMPQ idx len)) 43487 for { 43488 _ = v.Args[1] 43489 idx := v.Args[0] 43490 len := v.Args[1] 43491 if !(config.PtrSize == 8) { 43492 break 43493 } 43494 v.reset(OpAMD64SETBE) 43495 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43496 v0.AddArg(idx) 43497 v0.AddArg(len) 43498 v.AddArg(v0) 43499 return true 43500 } 43501 // match: (IsSliceInBounds idx len) 43502 // cond: config.PtrSize == 4 43503 // result: (SETBE (CMPL idx len)) 43504 for { 43505 _ = v.Args[1] 43506 idx := v.Args[0] 43507 len := v.Args[1] 43508 if !(config.PtrSize == 4) { 43509 break 43510 } 43511 v.reset(OpAMD64SETBE) 43512 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43513 v0.AddArg(idx) 43514 v0.AddArg(len) 43515 v.AddArg(v0) 43516 return true 43517 } 43518 return false 43519 } 43520 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 43521 b := v.Block 43522 _ = b 43523 // match: (Leq16 x y) 43524 // cond: 43525 // result: (SETLE (CMPW x y)) 43526 for { 43527 _ = v.Args[1] 43528 x := v.Args[0] 43529 y := v.Args[1] 43530 v.reset(OpAMD64SETLE) 43531 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43532 v0.AddArg(x) 43533 v0.AddArg(y) 43534 v.AddArg(v0) 43535 return true 43536 } 43537 } 43538 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 43539 b := v.Block 43540 _ = b 43541 // match: (Leq16U x y) 43542 // cond: 43543 // result: (SETBE (CMPW x y)) 43544 for { 43545 _ = v.Args[1] 43546 x := v.Args[0] 43547 y := v.Args[1] 43548 v.reset(OpAMD64SETBE) 43549 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43550 v0.AddArg(x) 43551 v0.AddArg(y) 43552 v.AddArg(v0) 43553 return true 43554 } 43555 } 43556 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 43557 b := v.Block 43558 _ = b 43559 // match: (Leq32 x y) 43560 // cond: 43561 // result: (SETLE (CMPL x y)) 43562 for { 43563 _ = v.Args[1] 43564 x := v.Args[0] 43565 y := v.Args[1] 43566 v.reset(OpAMD64SETLE) 43567 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43568 v0.AddArg(x) 43569 v0.AddArg(y) 43570 v.AddArg(v0) 43571 return true 43572 } 43573 } 43574 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 43575 b := v.Block 43576 _ = b 43577 // match: (Leq32F x y) 43578 // cond: 43579 // result: (SETGEF (UCOMISS y x)) 43580 for { 43581 _ = v.Args[1] 43582 x := v.Args[0] 43583 y := v.Args[1] 43584 v.reset(OpAMD64SETGEF) 43585 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 43586 v0.AddArg(y) 43587 v0.AddArg(x) 43588 v.AddArg(v0) 43589 return true 43590 } 43591 } 43592 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 43593 b := v.Block 43594 _ = b 43595 // match: (Leq32U x y) 43596 // cond: 43597 // result: (SETBE (CMPL x y)) 43598 for { 43599 _ = v.Args[1] 43600 x := v.Args[0] 43601 y := v.Args[1] 43602 v.reset(OpAMD64SETBE) 43603 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43604 v0.AddArg(x) 43605 v0.AddArg(y) 43606 v.AddArg(v0) 43607 return true 43608 } 43609 } 43610 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 43611 b := v.Block 43612 _ = b 43613 // match: (Leq64 x y) 43614 // cond: 43615 // result: (SETLE (CMPQ x y)) 43616 for { 43617 _ = v.Args[1] 43618 x := v.Args[0] 43619 y := v.Args[1] 43620 v.reset(OpAMD64SETLE) 43621 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43622 v0.AddArg(x) 43623 v0.AddArg(y) 43624 v.AddArg(v0) 43625 return true 43626 } 43627 } 43628 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 43629 b := v.Block 43630 _ = b 43631 // match: (Leq64F x y) 43632 // cond: 43633 // result: (SETGEF (UCOMISD y x)) 43634 for { 43635 _ = v.Args[1] 43636 x := v.Args[0] 43637 y := v.Args[1] 43638 v.reset(OpAMD64SETGEF) 43639 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43640 v0.AddArg(y) 43641 v0.AddArg(x) 43642 v.AddArg(v0) 43643 return true 43644 } 43645 } 43646 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 43647 b := v.Block 43648 _ = b 43649 // match: (Leq64U x y) 43650 // cond: 43651 // result: (SETBE (CMPQ x y)) 43652 for { 43653 _ = v.Args[1] 43654 x := v.Args[0] 43655 y := v.Args[1] 43656 v.reset(OpAMD64SETBE) 43657 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43658 v0.AddArg(x) 43659 v0.AddArg(y) 43660 v.AddArg(v0) 43661 return true 43662 } 43663 } 43664 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 43665 b := v.Block 43666 _ = b 43667 // match: (Leq8 x y) 43668 // cond: 43669 // result: (SETLE (CMPB x y)) 43670 for { 43671 _ = v.Args[1] 43672 x := v.Args[0] 43673 y := v.Args[1] 43674 v.reset(OpAMD64SETLE) 43675 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43676 v0.AddArg(x) 43677 v0.AddArg(y) 43678 v.AddArg(v0) 43679 return true 43680 } 43681 } 43682 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 43683 b := v.Block 43684 _ = b 43685 // match: (Leq8U x y) 43686 // cond: 43687 // result: (SETBE (CMPB x y)) 43688 for { 43689 _ = v.Args[1] 43690 x := v.Args[0] 43691 y := v.Args[1] 43692 v.reset(OpAMD64SETBE) 43693 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43694 v0.AddArg(x) 43695 v0.AddArg(y) 43696 v.AddArg(v0) 43697 return true 43698 } 43699 } 43700 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 43701 b := v.Block 43702 _ = b 43703 // match: (Less16 x y) 43704 // cond: 43705 // result: (SETL (CMPW x y)) 43706 for { 43707 _ = v.Args[1] 43708 x := v.Args[0] 43709 y := v.Args[1] 43710 v.reset(OpAMD64SETL) 43711 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43712 v0.AddArg(x) 43713 v0.AddArg(y) 43714 v.AddArg(v0) 43715 return true 43716 } 43717 } 43718 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 43719 b := v.Block 43720 _ = b 43721 // match: (Less16U x y) 43722 // cond: 43723 // result: (SETB (CMPW x y)) 43724 for { 43725 _ = v.Args[1] 43726 x := v.Args[0] 43727 y := v.Args[1] 43728 v.reset(OpAMD64SETB) 43729 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43730 v0.AddArg(x) 43731 v0.AddArg(y) 43732 v.AddArg(v0) 43733 return true 43734 } 43735 } 43736 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 43737 b := v.Block 43738 _ = b 43739 // match: (Less32 x y) 43740 // cond: 43741 // result: (SETL (CMPL x y)) 43742 for { 43743 _ = v.Args[1] 43744 x := v.Args[0] 43745 y := v.Args[1] 43746 v.reset(OpAMD64SETL) 43747 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43748 v0.AddArg(x) 43749 v0.AddArg(y) 43750 v.AddArg(v0) 43751 return true 43752 } 43753 } 43754 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 43755 b := v.Block 43756 _ = b 43757 // match: (Less32F x y) 43758 // cond: 43759 // result: (SETGF (UCOMISS y x)) 43760 for { 43761 _ = v.Args[1] 43762 x := v.Args[0] 43763 y := v.Args[1] 43764 v.reset(OpAMD64SETGF) 43765 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 43766 v0.AddArg(y) 43767 v0.AddArg(x) 43768 v.AddArg(v0) 43769 return true 43770 } 43771 } 43772 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 43773 b := v.Block 43774 _ = b 43775 // match: (Less32U x y) 43776 // cond: 43777 // result: (SETB (CMPL x y)) 43778 for { 43779 _ = v.Args[1] 43780 x := v.Args[0] 43781 y := v.Args[1] 43782 v.reset(OpAMD64SETB) 43783 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43784 v0.AddArg(x) 43785 v0.AddArg(y) 43786 v.AddArg(v0) 43787 return true 43788 } 43789 } 43790 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 43791 b := v.Block 43792 _ = b 43793 // match: (Less64 x y) 43794 // cond: 43795 // result: (SETL (CMPQ x y)) 43796 for { 43797 _ = v.Args[1] 43798 x := v.Args[0] 43799 y := v.Args[1] 43800 v.reset(OpAMD64SETL) 43801 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43802 v0.AddArg(x) 43803 v0.AddArg(y) 43804 v.AddArg(v0) 43805 return true 43806 } 43807 } 43808 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 43809 b := v.Block 43810 _ = b 43811 // match: (Less64F x y) 43812 // cond: 43813 // result: (SETGF (UCOMISD y x)) 43814 for { 43815 _ = v.Args[1] 43816 x := v.Args[0] 43817 y := v.Args[1] 43818 v.reset(OpAMD64SETGF) 43819 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43820 v0.AddArg(y) 43821 v0.AddArg(x) 43822 v.AddArg(v0) 43823 return true 43824 } 43825 } 43826 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 43827 b := v.Block 43828 _ = b 43829 // match: (Less64U x y) 43830 // cond: 43831 // result: (SETB (CMPQ x y)) 43832 for { 43833 _ = v.Args[1] 43834 x := v.Args[0] 43835 y := v.Args[1] 43836 v.reset(OpAMD64SETB) 43837 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43838 v0.AddArg(x) 43839 v0.AddArg(y) 43840 v.AddArg(v0) 43841 return true 43842 } 43843 } 43844 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 43845 b := v.Block 43846 _ = b 43847 // match: (Less8 x y) 43848 // cond: 43849 // result: (SETL (CMPB x y)) 43850 for { 43851 _ = v.Args[1] 43852 x := v.Args[0] 43853 y := v.Args[1] 43854 v.reset(OpAMD64SETL) 43855 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43856 v0.AddArg(x) 43857 v0.AddArg(y) 43858 v.AddArg(v0) 43859 return true 43860 } 43861 } 43862 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 43863 b := v.Block 43864 _ = b 43865 // match: (Less8U x y) 43866 // cond: 43867 // result: (SETB (CMPB x y)) 43868 for { 43869 _ = v.Args[1] 43870 x := v.Args[0] 43871 y := v.Args[1] 43872 v.reset(OpAMD64SETB) 43873 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43874 v0.AddArg(x) 43875 v0.AddArg(y) 43876 v.AddArg(v0) 43877 return true 43878 } 43879 } 43880 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 43881 b := v.Block 43882 _ = b 43883 config := b.Func.Config 43884 _ = config 43885 // match: (Load <t> ptr mem) 43886 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 43887 // result: (MOVQload ptr mem) 43888 for { 43889 t := v.Type 43890 _ = v.Args[1] 43891 ptr := v.Args[0] 43892 mem := v.Args[1] 43893 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 43894 break 43895 } 43896 v.reset(OpAMD64MOVQload) 43897 v.AddArg(ptr) 43898 v.AddArg(mem) 43899 return true 43900 } 43901 // match: (Load <t> ptr mem) 43902 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 43903 // result: (MOVLload ptr mem) 43904 for { 43905 t := v.Type 43906 _ = v.Args[1] 43907 ptr := v.Args[0] 43908 mem := v.Args[1] 43909 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 43910 break 43911 } 43912 v.reset(OpAMD64MOVLload) 43913 v.AddArg(ptr) 43914 v.AddArg(mem) 43915 return true 43916 } 43917 // match: (Load <t> ptr mem) 43918 // cond: is16BitInt(t) 43919 // result: (MOVWload ptr mem) 43920 for { 43921 t := v.Type 43922 _ = v.Args[1] 43923 ptr := v.Args[0] 43924 mem := v.Args[1] 43925 if !(is16BitInt(t)) { 43926 break 43927 } 43928 v.reset(OpAMD64MOVWload) 43929 v.AddArg(ptr) 43930 v.AddArg(mem) 43931 return true 43932 } 43933 // match: (Load <t> ptr mem) 43934 // cond: (t.IsBoolean() || is8BitInt(t)) 43935 // result: (MOVBload ptr mem) 43936 for { 43937 t := v.Type 43938 _ = v.Args[1] 43939 ptr := v.Args[0] 43940 mem := v.Args[1] 43941 if !(t.IsBoolean() || is8BitInt(t)) { 43942 break 43943 } 43944 v.reset(OpAMD64MOVBload) 43945 v.AddArg(ptr) 43946 v.AddArg(mem) 43947 return true 43948 } 43949 // match: (Load <t> ptr mem) 43950 // cond: is32BitFloat(t) 43951 // result: (MOVSSload ptr mem) 43952 for { 43953 t := v.Type 43954 _ = v.Args[1] 43955 ptr := v.Args[0] 43956 mem := v.Args[1] 43957 if !(is32BitFloat(t)) { 43958 break 43959 } 43960 v.reset(OpAMD64MOVSSload) 43961 v.AddArg(ptr) 43962 v.AddArg(mem) 43963 return true 43964 } 43965 // match: (Load <t> ptr mem) 43966 // cond: is64BitFloat(t) 43967 // result: (MOVSDload ptr mem) 43968 for { 43969 t := v.Type 43970 _ = v.Args[1] 43971 ptr := v.Args[0] 43972 mem := v.Args[1] 43973 if !(is64BitFloat(t)) { 43974 break 43975 } 43976 v.reset(OpAMD64MOVSDload) 43977 v.AddArg(ptr) 43978 v.AddArg(mem) 43979 return true 43980 } 43981 return false 43982 } 43983 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 43984 b := v.Block 43985 _ = b 43986 // match: (Lsh16x16 <t> x y) 43987 // cond: 43988 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 43989 for { 43990 t := v.Type 43991 _ = v.Args[1] 43992 x := v.Args[0] 43993 y := v.Args[1] 43994 v.reset(OpAMD64ANDL) 43995 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 43996 v0.AddArg(x) 43997 v0.AddArg(y) 43998 v.AddArg(v0) 43999 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44000 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44001 v2.AuxInt = 32 44002 v2.AddArg(y) 44003 v1.AddArg(v2) 44004 v.AddArg(v1) 44005 return true 44006 } 44007 } 44008 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 44009 b := v.Block 44010 _ = b 44011 // match: (Lsh16x32 <t> x y) 44012 // cond: 44013 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 44014 for { 44015 t := v.Type 44016 _ = v.Args[1] 44017 x := v.Args[0] 44018 y := v.Args[1] 44019 v.reset(OpAMD64ANDL) 44020 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44021 v0.AddArg(x) 44022 v0.AddArg(y) 44023 v.AddArg(v0) 44024 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44025 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44026 v2.AuxInt = 32 44027 v2.AddArg(y) 44028 v1.AddArg(v2) 44029 v.AddArg(v1) 44030 return true 44031 } 44032 } 44033 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 44034 b := v.Block 44035 _ = b 44036 // match: (Lsh16x64 <t> x y) 44037 // cond: 44038 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 44039 for { 44040 t := v.Type 44041 _ = v.Args[1] 44042 x := v.Args[0] 44043 y := v.Args[1] 44044 v.reset(OpAMD64ANDL) 44045 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44046 v0.AddArg(x) 44047 v0.AddArg(y) 44048 v.AddArg(v0) 44049 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44050 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44051 v2.AuxInt = 32 44052 v2.AddArg(y) 44053 v1.AddArg(v2) 44054 v.AddArg(v1) 44055 return true 44056 } 44057 } 44058 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 44059 b := v.Block 44060 _ = b 44061 // match: (Lsh16x8 <t> x y) 44062 // cond: 44063 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 44064 for { 44065 t := v.Type 44066 _ = v.Args[1] 44067 x := v.Args[0] 44068 y := v.Args[1] 44069 v.reset(OpAMD64ANDL) 44070 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44071 v0.AddArg(x) 44072 v0.AddArg(y) 44073 v.AddArg(v0) 44074 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44075 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44076 v2.AuxInt = 32 44077 v2.AddArg(y) 44078 v1.AddArg(v2) 44079 v.AddArg(v1) 44080 return true 44081 } 44082 } 44083 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 44084 b := v.Block 44085 _ = b 44086 // match: (Lsh32x16 <t> x y) 44087 // cond: 44088 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 44089 for { 44090 t := v.Type 44091 _ = v.Args[1] 44092 x := v.Args[0] 44093 y := v.Args[1] 44094 v.reset(OpAMD64ANDL) 44095 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44096 v0.AddArg(x) 44097 v0.AddArg(y) 44098 v.AddArg(v0) 44099 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44100 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44101 v2.AuxInt = 32 44102 v2.AddArg(y) 44103 v1.AddArg(v2) 44104 v.AddArg(v1) 44105 return true 44106 } 44107 } 44108 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 44109 b := v.Block 44110 _ = b 44111 // match: (Lsh32x32 <t> x y) 44112 // cond: 44113 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 44114 for { 44115 t := v.Type 44116 _ = v.Args[1] 44117 x := v.Args[0] 44118 y := v.Args[1] 44119 v.reset(OpAMD64ANDL) 44120 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44121 v0.AddArg(x) 44122 v0.AddArg(y) 44123 v.AddArg(v0) 44124 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44125 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44126 v2.AuxInt = 32 44127 v2.AddArg(y) 44128 v1.AddArg(v2) 44129 v.AddArg(v1) 44130 return true 44131 } 44132 } 44133 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 44134 b := v.Block 44135 _ = b 44136 // match: (Lsh32x64 <t> x y) 44137 // cond: 44138 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 44139 for { 44140 t := v.Type 44141 _ = v.Args[1] 44142 x := v.Args[0] 44143 y := v.Args[1] 44144 v.reset(OpAMD64ANDL) 44145 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44146 v0.AddArg(x) 44147 v0.AddArg(y) 44148 v.AddArg(v0) 44149 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44150 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44151 v2.AuxInt = 32 44152 v2.AddArg(y) 44153 v1.AddArg(v2) 44154 v.AddArg(v1) 44155 return true 44156 } 44157 } 44158 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 44159 b := v.Block 44160 _ = b 44161 // match: (Lsh32x8 <t> x y) 44162 // cond: 44163 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 44164 for { 44165 t := v.Type 44166 _ = v.Args[1] 44167 x := v.Args[0] 44168 y := v.Args[1] 44169 v.reset(OpAMD64ANDL) 44170 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44171 v0.AddArg(x) 44172 v0.AddArg(y) 44173 v.AddArg(v0) 44174 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44175 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44176 v2.AuxInt = 32 44177 v2.AddArg(y) 44178 v1.AddArg(v2) 44179 v.AddArg(v1) 44180 return true 44181 } 44182 } 44183 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 44184 b := v.Block 44185 _ = b 44186 // match: (Lsh64x16 <t> x y) 44187 // cond: 44188 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 44189 for { 44190 t := v.Type 44191 _ = v.Args[1] 44192 x := v.Args[0] 44193 y := v.Args[1] 44194 v.reset(OpAMD64ANDQ) 44195 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44196 v0.AddArg(x) 44197 v0.AddArg(y) 44198 v.AddArg(v0) 44199 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44200 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44201 v2.AuxInt = 64 44202 v2.AddArg(y) 44203 v1.AddArg(v2) 44204 v.AddArg(v1) 44205 return true 44206 } 44207 } 44208 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 44209 b := v.Block 44210 _ = b 44211 // match: (Lsh64x32 <t> x y) 44212 // cond: 44213 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 44214 for { 44215 t := v.Type 44216 _ = v.Args[1] 44217 x := v.Args[0] 44218 y := v.Args[1] 44219 v.reset(OpAMD64ANDQ) 44220 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44221 v0.AddArg(x) 44222 v0.AddArg(y) 44223 v.AddArg(v0) 44224 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44225 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44226 v2.AuxInt = 64 44227 v2.AddArg(y) 44228 v1.AddArg(v2) 44229 v.AddArg(v1) 44230 return true 44231 } 44232 } 44233 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 44234 b := v.Block 44235 _ = b 44236 // match: (Lsh64x64 <t> x y) 44237 // cond: 44238 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 44239 for { 44240 t := v.Type 44241 _ = v.Args[1] 44242 x := v.Args[0] 44243 y := v.Args[1] 44244 v.reset(OpAMD64ANDQ) 44245 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44246 v0.AddArg(x) 44247 v0.AddArg(y) 44248 v.AddArg(v0) 44249 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44250 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44251 v2.AuxInt = 64 44252 v2.AddArg(y) 44253 v1.AddArg(v2) 44254 v.AddArg(v1) 44255 return true 44256 } 44257 } 44258 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 44259 b := v.Block 44260 _ = b 44261 // match: (Lsh64x8 <t> x y) 44262 // cond: 44263 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 44264 for { 44265 t := v.Type 44266 _ = v.Args[1] 44267 x := v.Args[0] 44268 y := v.Args[1] 44269 v.reset(OpAMD64ANDQ) 44270 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44271 v0.AddArg(x) 44272 v0.AddArg(y) 44273 v.AddArg(v0) 44274 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44275 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44276 v2.AuxInt = 64 44277 v2.AddArg(y) 44278 v1.AddArg(v2) 44279 v.AddArg(v1) 44280 return true 44281 } 44282 } 44283 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 44284 b := v.Block 44285 _ = b 44286 // match: (Lsh8x16 <t> x y) 44287 // cond: 44288 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 44289 for { 44290 t := v.Type 44291 _ = v.Args[1] 44292 x := v.Args[0] 44293 y := v.Args[1] 44294 v.reset(OpAMD64ANDL) 44295 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44296 v0.AddArg(x) 44297 v0.AddArg(y) 44298 v.AddArg(v0) 44299 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44300 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44301 v2.AuxInt = 32 44302 v2.AddArg(y) 44303 v1.AddArg(v2) 44304 v.AddArg(v1) 44305 return true 44306 } 44307 } 44308 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 44309 b := v.Block 44310 _ = b 44311 // match: (Lsh8x32 <t> x y) 44312 // cond: 44313 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 44314 for { 44315 t := v.Type 44316 _ = v.Args[1] 44317 x := v.Args[0] 44318 y := v.Args[1] 44319 v.reset(OpAMD64ANDL) 44320 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44321 v0.AddArg(x) 44322 v0.AddArg(y) 44323 v.AddArg(v0) 44324 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44325 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44326 v2.AuxInt = 32 44327 v2.AddArg(y) 44328 v1.AddArg(v2) 44329 v.AddArg(v1) 44330 return true 44331 } 44332 } 44333 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 44334 b := v.Block 44335 _ = b 44336 // match: (Lsh8x64 <t> x y) 44337 // cond: 44338 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 44339 for { 44340 t := v.Type 44341 _ = v.Args[1] 44342 x := v.Args[0] 44343 y := v.Args[1] 44344 v.reset(OpAMD64ANDL) 44345 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44346 v0.AddArg(x) 44347 v0.AddArg(y) 44348 v.AddArg(v0) 44349 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44350 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44351 v2.AuxInt = 32 44352 v2.AddArg(y) 44353 v1.AddArg(v2) 44354 v.AddArg(v1) 44355 return true 44356 } 44357 } 44358 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 44359 b := v.Block 44360 _ = b 44361 // match: (Lsh8x8 <t> x y) 44362 // cond: 44363 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 44364 for { 44365 t := v.Type 44366 _ = v.Args[1] 44367 x := v.Args[0] 44368 y := v.Args[1] 44369 v.reset(OpAMD64ANDL) 44370 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44371 v0.AddArg(x) 44372 v0.AddArg(y) 44373 v.AddArg(v0) 44374 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44375 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44376 v2.AuxInt = 32 44377 v2.AddArg(y) 44378 v1.AddArg(v2) 44379 v.AddArg(v1) 44380 return true 44381 } 44382 } 44383 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 44384 b := v.Block 44385 _ = b 44386 typ := &b.Func.Config.Types 44387 _ = typ 44388 // match: (Mod16 x y) 44389 // cond: 44390 // result: (Select1 (DIVW x y)) 44391 for { 44392 _ = v.Args[1] 44393 x := v.Args[0] 44394 y := v.Args[1] 44395 v.reset(OpSelect1) 44396 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 44397 v0.AddArg(x) 44398 v0.AddArg(y) 44399 v.AddArg(v0) 44400 return true 44401 } 44402 } 44403 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 44404 b := v.Block 44405 _ = b 44406 typ := &b.Func.Config.Types 44407 _ = typ 44408 // match: (Mod16u x y) 44409 // cond: 44410 // result: (Select1 (DIVWU x y)) 44411 for { 44412 _ = v.Args[1] 44413 x := v.Args[0] 44414 y := v.Args[1] 44415 v.reset(OpSelect1) 44416 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 44417 v0.AddArg(x) 44418 v0.AddArg(y) 44419 v.AddArg(v0) 44420 return true 44421 } 44422 } 44423 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 44424 b := v.Block 44425 _ = b 44426 typ := &b.Func.Config.Types 44427 _ = typ 44428 // match: (Mod32 x y) 44429 // cond: 44430 // result: (Select1 (DIVL x y)) 44431 for { 44432 _ = v.Args[1] 44433 x := v.Args[0] 44434 y := v.Args[1] 44435 v.reset(OpSelect1) 44436 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 44437 v0.AddArg(x) 44438 v0.AddArg(y) 44439 v.AddArg(v0) 44440 return true 44441 } 44442 } 44443 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 44444 b := v.Block 44445 _ = b 44446 typ := &b.Func.Config.Types 44447 _ = typ 44448 // match: (Mod32u x y) 44449 // cond: 44450 // result: (Select1 (DIVLU x y)) 44451 for { 44452 _ = v.Args[1] 44453 x := v.Args[0] 44454 y := v.Args[1] 44455 v.reset(OpSelect1) 44456 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 44457 v0.AddArg(x) 44458 v0.AddArg(y) 44459 v.AddArg(v0) 44460 return true 44461 } 44462 } 44463 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 44464 b := v.Block 44465 _ = b 44466 typ := &b.Func.Config.Types 44467 _ = typ 44468 // match: (Mod64 x y) 44469 // cond: 44470 // result: (Select1 (DIVQ x y)) 44471 for { 44472 _ = v.Args[1] 44473 x := v.Args[0] 44474 y := v.Args[1] 44475 v.reset(OpSelect1) 44476 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 44477 v0.AddArg(x) 44478 v0.AddArg(y) 44479 v.AddArg(v0) 44480 return true 44481 } 44482 } 44483 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 44484 b := v.Block 44485 _ = b 44486 typ := &b.Func.Config.Types 44487 _ = typ 44488 // match: (Mod64u x y) 44489 // cond: 44490 // result: (Select1 (DIVQU x y)) 44491 for { 44492 _ = v.Args[1] 44493 x := v.Args[0] 44494 y := v.Args[1] 44495 v.reset(OpSelect1) 44496 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 44497 v0.AddArg(x) 44498 v0.AddArg(y) 44499 v.AddArg(v0) 44500 return true 44501 } 44502 } 44503 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 44504 b := v.Block 44505 _ = b 44506 typ := &b.Func.Config.Types 44507 _ = typ 44508 // match: (Mod8 x y) 44509 // cond: 44510 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 44511 for { 44512 _ = v.Args[1] 44513 x := v.Args[0] 44514 y := v.Args[1] 44515 v.reset(OpSelect1) 44516 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 44517 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 44518 v1.AddArg(x) 44519 v0.AddArg(v1) 44520 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 44521 v2.AddArg(y) 44522 v0.AddArg(v2) 44523 v.AddArg(v0) 44524 return true 44525 } 44526 } 44527 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 44528 b := v.Block 44529 _ = b 44530 typ := &b.Func.Config.Types 44531 _ = typ 44532 // match: (Mod8u x y) 44533 // cond: 44534 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 44535 for { 44536 _ = v.Args[1] 44537 x := v.Args[0] 44538 y := v.Args[1] 44539 v.reset(OpSelect1) 44540 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 44541 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 44542 v1.AddArg(x) 44543 v0.AddArg(v1) 44544 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 44545 v2.AddArg(y) 44546 v0.AddArg(v2) 44547 v.AddArg(v0) 44548 return true 44549 } 44550 } 44551 func rewriteValueAMD64_OpMove_0(v *Value) bool { 44552 b := v.Block 44553 _ = b 44554 config := b.Func.Config 44555 _ = config 44556 typ := &b.Func.Config.Types 44557 _ = typ 44558 // match: (Move [0] _ _ mem) 44559 // cond: 44560 // result: mem 44561 for { 44562 if v.AuxInt != 0 { 44563 break 44564 } 44565 _ = v.Args[2] 44566 mem := v.Args[2] 44567 v.reset(OpCopy) 44568 v.Type = mem.Type 44569 v.AddArg(mem) 44570 return true 44571 } 44572 // match: (Move [1] dst src mem) 44573 // cond: 44574 // result: (MOVBstore dst (MOVBload src mem) mem) 44575 for { 44576 if v.AuxInt != 1 { 44577 break 44578 } 44579 _ = v.Args[2] 44580 dst := v.Args[0] 44581 src := v.Args[1] 44582 mem := v.Args[2] 44583 v.reset(OpAMD64MOVBstore) 44584 v.AddArg(dst) 44585 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 44586 v0.AddArg(src) 44587 v0.AddArg(mem) 44588 v.AddArg(v0) 44589 v.AddArg(mem) 44590 return true 44591 } 44592 // match: (Move [2] dst src mem) 44593 // cond: 44594 // result: (MOVWstore dst (MOVWload src mem) mem) 44595 for { 44596 if v.AuxInt != 2 { 44597 break 44598 } 44599 _ = v.Args[2] 44600 dst := v.Args[0] 44601 src := v.Args[1] 44602 mem := v.Args[2] 44603 v.reset(OpAMD64MOVWstore) 44604 v.AddArg(dst) 44605 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 44606 v0.AddArg(src) 44607 v0.AddArg(mem) 44608 v.AddArg(v0) 44609 v.AddArg(mem) 44610 return true 44611 } 44612 // match: (Move [4] dst src mem) 44613 // cond: 44614 // result: (MOVLstore dst (MOVLload src mem) mem) 44615 for { 44616 if v.AuxInt != 4 { 44617 break 44618 } 44619 _ = v.Args[2] 44620 dst := v.Args[0] 44621 src := v.Args[1] 44622 mem := v.Args[2] 44623 v.reset(OpAMD64MOVLstore) 44624 v.AddArg(dst) 44625 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44626 v0.AddArg(src) 44627 v0.AddArg(mem) 44628 v.AddArg(v0) 44629 v.AddArg(mem) 44630 return true 44631 } 44632 // match: (Move [8] dst src mem) 44633 // cond: 44634 // result: (MOVQstore dst (MOVQload src mem) mem) 44635 for { 44636 if v.AuxInt != 8 { 44637 break 44638 } 44639 _ = v.Args[2] 44640 dst := v.Args[0] 44641 src := v.Args[1] 44642 mem := v.Args[2] 44643 v.reset(OpAMD64MOVQstore) 44644 v.AddArg(dst) 44645 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44646 v0.AddArg(src) 44647 v0.AddArg(mem) 44648 v.AddArg(v0) 44649 v.AddArg(mem) 44650 return true 44651 } 44652 // match: (Move [16] dst src mem) 44653 // cond: config.useSSE 44654 // result: (MOVOstore dst (MOVOload src mem) mem) 44655 for { 44656 if v.AuxInt != 16 { 44657 break 44658 } 44659 _ = v.Args[2] 44660 dst := v.Args[0] 44661 src := v.Args[1] 44662 mem := v.Args[2] 44663 if !(config.useSSE) { 44664 break 44665 } 44666 v.reset(OpAMD64MOVOstore) 44667 v.AddArg(dst) 44668 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 44669 v0.AddArg(src) 44670 v0.AddArg(mem) 44671 v.AddArg(v0) 44672 v.AddArg(mem) 44673 return true 44674 } 44675 // match: (Move [16] dst src mem) 44676 // cond: !config.useSSE 44677 // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 44678 for { 44679 if v.AuxInt != 16 { 44680 break 44681 } 44682 _ = v.Args[2] 44683 dst := v.Args[0] 44684 src := v.Args[1] 44685 mem := v.Args[2] 44686 if !(!config.useSSE) { 44687 break 44688 } 44689 v.reset(OpAMD64MOVQstore) 44690 v.AuxInt = 8 44691 v.AddArg(dst) 44692 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44693 v0.AuxInt = 8 44694 v0.AddArg(src) 44695 v0.AddArg(mem) 44696 v.AddArg(v0) 44697 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44698 v1.AddArg(dst) 44699 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44700 v2.AddArg(src) 44701 v2.AddArg(mem) 44702 v1.AddArg(v2) 44703 v1.AddArg(mem) 44704 v.AddArg(v1) 44705 return true 44706 } 44707 // match: (Move [3] dst src mem) 44708 // cond: 44709 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 44710 for { 44711 if v.AuxInt != 3 { 44712 break 44713 } 44714 _ = v.Args[2] 44715 dst := v.Args[0] 44716 src := v.Args[1] 44717 mem := v.Args[2] 44718 v.reset(OpAMD64MOVBstore) 44719 v.AuxInt = 2 44720 v.AddArg(dst) 44721 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 44722 v0.AuxInt = 2 44723 v0.AddArg(src) 44724 v0.AddArg(mem) 44725 v.AddArg(v0) 44726 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 44727 v1.AddArg(dst) 44728 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 44729 v2.AddArg(src) 44730 v2.AddArg(mem) 44731 v1.AddArg(v2) 44732 v1.AddArg(mem) 44733 v.AddArg(v1) 44734 return true 44735 } 44736 // match: (Move [5] dst src mem) 44737 // cond: 44738 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 44739 for { 44740 if v.AuxInt != 5 { 44741 break 44742 } 44743 _ = v.Args[2] 44744 dst := v.Args[0] 44745 src := v.Args[1] 44746 mem := v.Args[2] 44747 v.reset(OpAMD64MOVBstore) 44748 v.AuxInt = 4 44749 v.AddArg(dst) 44750 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 44751 v0.AuxInt = 4 44752 v0.AddArg(src) 44753 v0.AddArg(mem) 44754 v.AddArg(v0) 44755 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 44756 v1.AddArg(dst) 44757 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44758 v2.AddArg(src) 44759 v2.AddArg(mem) 44760 v1.AddArg(v2) 44761 v1.AddArg(mem) 44762 v.AddArg(v1) 44763 return true 44764 } 44765 // match: (Move [6] dst src mem) 44766 // cond: 44767 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 44768 for { 44769 if v.AuxInt != 6 { 44770 break 44771 } 44772 _ = v.Args[2] 44773 dst := v.Args[0] 44774 src := v.Args[1] 44775 mem := v.Args[2] 44776 v.reset(OpAMD64MOVWstore) 44777 v.AuxInt = 4 44778 v.AddArg(dst) 44779 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 44780 v0.AuxInt = 4 44781 v0.AddArg(src) 44782 v0.AddArg(mem) 44783 v.AddArg(v0) 44784 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 44785 v1.AddArg(dst) 44786 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44787 v2.AddArg(src) 44788 v2.AddArg(mem) 44789 v1.AddArg(v2) 44790 v1.AddArg(mem) 44791 v.AddArg(v1) 44792 return true 44793 } 44794 return false 44795 } 44796 func rewriteValueAMD64_OpMove_10(v *Value) bool { 44797 b := v.Block 44798 _ = b 44799 config := b.Func.Config 44800 _ = config 44801 typ := &b.Func.Config.Types 44802 _ = typ 44803 // match: (Move [7] dst src mem) 44804 // cond: 44805 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 44806 for { 44807 if v.AuxInt != 7 { 44808 break 44809 } 44810 _ = v.Args[2] 44811 dst := v.Args[0] 44812 src := v.Args[1] 44813 mem := v.Args[2] 44814 v.reset(OpAMD64MOVLstore) 44815 v.AuxInt = 3 44816 v.AddArg(dst) 44817 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44818 v0.AuxInt = 3 44819 v0.AddArg(src) 44820 v0.AddArg(mem) 44821 v.AddArg(v0) 44822 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 44823 v1.AddArg(dst) 44824 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44825 v2.AddArg(src) 44826 v2.AddArg(mem) 44827 v1.AddArg(v2) 44828 v1.AddArg(mem) 44829 v.AddArg(v1) 44830 return true 44831 } 44832 // match: (Move [s] dst src mem) 44833 // cond: s > 8 && s < 16 44834 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 44835 for { 44836 s := v.AuxInt 44837 _ = v.Args[2] 44838 dst := v.Args[0] 44839 src := v.Args[1] 44840 mem := v.Args[2] 44841 if !(s > 8 && s < 16) { 44842 break 44843 } 44844 v.reset(OpAMD64MOVQstore) 44845 v.AuxInt = s - 8 44846 v.AddArg(dst) 44847 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44848 v0.AuxInt = s - 8 44849 v0.AddArg(src) 44850 v0.AddArg(mem) 44851 v.AddArg(v0) 44852 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44853 v1.AddArg(dst) 44854 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44855 v2.AddArg(src) 44856 v2.AddArg(mem) 44857 v1.AddArg(v2) 44858 v1.AddArg(mem) 44859 v.AddArg(v1) 44860 return true 44861 } 44862 // match: (Move [s] dst src mem) 44863 // cond: s > 16 && s%16 != 0 && s%16 <= 8 44864 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 44865 for { 44866 s := v.AuxInt 44867 _ = v.Args[2] 44868 dst := v.Args[0] 44869 src := v.Args[1] 44870 mem := v.Args[2] 44871 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 44872 break 44873 } 44874 v.reset(OpMove) 44875 v.AuxInt = s - s%16 44876 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 44877 v0.AuxInt = s % 16 44878 v0.AddArg(dst) 44879 v.AddArg(v0) 44880 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 44881 v1.AuxInt = s % 16 44882 v1.AddArg(src) 44883 v.AddArg(v1) 44884 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44885 v2.AddArg(dst) 44886 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44887 v3.AddArg(src) 44888 v3.AddArg(mem) 44889 v2.AddArg(v3) 44890 v2.AddArg(mem) 44891 v.AddArg(v2) 44892 return true 44893 } 44894 // match: (Move [s] dst src mem) 44895 // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE 44896 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 44897 for { 44898 s := v.AuxInt 44899 _ = v.Args[2] 44900 dst := v.Args[0] 44901 src := v.Args[1] 44902 mem := v.Args[2] 44903 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { 44904 break 44905 } 44906 v.reset(OpMove) 44907 v.AuxInt = s - s%16 44908 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 44909 v0.AuxInt = s % 16 44910 v0.AddArg(dst) 44911 v.AddArg(v0) 44912 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 44913 v1.AuxInt = s % 16 44914 v1.AddArg(src) 44915 v.AddArg(v1) 44916 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 44917 v2.AddArg(dst) 44918 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 44919 v3.AddArg(src) 44920 v3.AddArg(mem) 44921 v2.AddArg(v3) 44922 v2.AddArg(mem) 44923 v.AddArg(v2) 44924 return true 44925 } 44926 // match: (Move [s] dst src mem) 44927 // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE 44928 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) 44929 for { 44930 s := v.AuxInt 44931 _ = v.Args[2] 44932 dst := v.Args[0] 44933 src := v.Args[1] 44934 mem := v.Args[2] 44935 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { 44936 break 44937 } 44938 v.reset(OpMove) 44939 v.AuxInt = s - s%16 44940 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 44941 v0.AuxInt = s % 16 44942 v0.AddArg(dst) 44943 v.AddArg(v0) 44944 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 44945 v1.AuxInt = s % 16 44946 v1.AddArg(src) 44947 v.AddArg(v1) 44948 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44949 v2.AuxInt = 8 44950 v2.AddArg(dst) 44951 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44952 v3.AuxInt = 8 44953 v3.AddArg(src) 44954 v3.AddArg(mem) 44955 v2.AddArg(v3) 44956 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44957 v4.AddArg(dst) 44958 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44959 v5.AddArg(src) 44960 v5.AddArg(mem) 44961 v4.AddArg(v5) 44962 v4.AddArg(mem) 44963 v2.AddArg(v4) 44964 v.AddArg(v2) 44965 return true 44966 } 44967 // match: (Move [s] dst src mem) 44968 // cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 44969 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 44970 for { 44971 s := v.AuxInt 44972 _ = v.Args[2] 44973 dst := v.Args[0] 44974 src := v.Args[1] 44975 mem := v.Args[2] 44976 if !(s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 44977 break 44978 } 44979 v.reset(OpAMD64DUFFCOPY) 44980 v.AuxInt = 14 * (64 - s/16) 44981 v.AddArg(dst) 44982 v.AddArg(src) 44983 v.AddArg(mem) 44984 return true 44985 } 44986 // match: (Move [s] dst src mem) 44987 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 44988 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 44989 for { 44990 s := v.AuxInt 44991 _ = v.Args[2] 44992 dst := v.Args[0] 44993 src := v.Args[1] 44994 mem := v.Args[2] 44995 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 44996 break 44997 } 44998 v.reset(OpAMD64REPMOVSQ) 44999 v.AddArg(dst) 45000 v.AddArg(src) 45001 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 45002 v0.AuxInt = s / 8 45003 v.AddArg(v0) 45004 v.AddArg(mem) 45005 return true 45006 } 45007 return false 45008 } 45009 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 45010 // match: (Mul16 x y) 45011 // cond: 45012 // result: (MULL x y) 45013 for { 45014 _ = v.Args[1] 45015 x := v.Args[0] 45016 y := v.Args[1] 45017 v.reset(OpAMD64MULL) 45018 v.AddArg(x) 45019 v.AddArg(y) 45020 return true 45021 } 45022 } 45023 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 45024 // match: (Mul32 x y) 45025 // cond: 45026 // result: (MULL x y) 45027 for { 45028 _ = v.Args[1] 45029 x := v.Args[0] 45030 y := v.Args[1] 45031 v.reset(OpAMD64MULL) 45032 v.AddArg(x) 45033 v.AddArg(y) 45034 return true 45035 } 45036 } 45037 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 45038 // match: (Mul32F x y) 45039 // cond: 45040 // result: (MULSS x y) 45041 for { 45042 _ = v.Args[1] 45043 x := v.Args[0] 45044 y := v.Args[1] 45045 v.reset(OpAMD64MULSS) 45046 v.AddArg(x) 45047 v.AddArg(y) 45048 return true 45049 } 45050 } 45051 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 45052 // match: (Mul64 x y) 45053 // cond: 45054 // result: (MULQ x y) 45055 for { 45056 _ = v.Args[1] 45057 x := v.Args[0] 45058 y := v.Args[1] 45059 v.reset(OpAMD64MULQ) 45060 v.AddArg(x) 45061 v.AddArg(y) 45062 return true 45063 } 45064 } 45065 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 45066 // match: (Mul64F x y) 45067 // cond: 45068 // result: (MULSD x y) 45069 for { 45070 _ = v.Args[1] 45071 x := v.Args[0] 45072 y := v.Args[1] 45073 v.reset(OpAMD64MULSD) 45074 v.AddArg(x) 45075 v.AddArg(y) 45076 return true 45077 } 45078 } 45079 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 45080 // match: (Mul64uhilo x y) 45081 // cond: 45082 // result: (MULQU2 x y) 45083 for { 45084 _ = v.Args[1] 45085 x := v.Args[0] 45086 y := v.Args[1] 45087 v.reset(OpAMD64MULQU2) 45088 v.AddArg(x) 45089 v.AddArg(y) 45090 return true 45091 } 45092 } 45093 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 45094 // match: (Mul8 x y) 45095 // cond: 45096 // result: (MULL x y) 45097 for { 45098 _ = v.Args[1] 45099 x := v.Args[0] 45100 y := v.Args[1] 45101 v.reset(OpAMD64MULL) 45102 v.AddArg(x) 45103 v.AddArg(y) 45104 return true 45105 } 45106 } 45107 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 45108 // match: (Neg16 x) 45109 // cond: 45110 // result: (NEGL x) 45111 for { 45112 x := v.Args[0] 45113 v.reset(OpAMD64NEGL) 45114 v.AddArg(x) 45115 return true 45116 } 45117 } 45118 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 45119 // match: (Neg32 x) 45120 // cond: 45121 // result: (NEGL x) 45122 for { 45123 x := v.Args[0] 45124 v.reset(OpAMD64NEGL) 45125 v.AddArg(x) 45126 return true 45127 } 45128 } 45129 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 45130 b := v.Block 45131 _ = b 45132 typ := &b.Func.Config.Types 45133 _ = typ 45134 // match: (Neg32F x) 45135 // cond: 45136 // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))])) 45137 for { 45138 x := v.Args[0] 45139 v.reset(OpAMD64PXOR) 45140 v.AddArg(x) 45141 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 45142 v0.AuxInt = f2i(math.Copysign(0, -1)) 45143 v.AddArg(v0) 45144 return true 45145 } 45146 } 45147 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 45148 // match: (Neg64 x) 45149 // cond: 45150 // result: (NEGQ x) 45151 for { 45152 x := v.Args[0] 45153 v.reset(OpAMD64NEGQ) 45154 v.AddArg(x) 45155 return true 45156 } 45157 } 45158 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 45159 b := v.Block 45160 _ = b 45161 typ := &b.Func.Config.Types 45162 _ = typ 45163 // match: (Neg64F x) 45164 // cond: 45165 // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))])) 45166 for { 45167 x := v.Args[0] 45168 v.reset(OpAMD64PXOR) 45169 v.AddArg(x) 45170 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 45171 v0.AuxInt = f2i(math.Copysign(0, -1)) 45172 v.AddArg(v0) 45173 return true 45174 } 45175 } 45176 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 45177 // match: (Neg8 x) 45178 // cond: 45179 // result: (NEGL x) 45180 for { 45181 x := v.Args[0] 45182 v.reset(OpAMD64NEGL) 45183 v.AddArg(x) 45184 return true 45185 } 45186 } 45187 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 45188 b := v.Block 45189 _ = b 45190 // match: (Neq16 x y) 45191 // cond: 45192 // result: (SETNE (CMPW x y)) 45193 for { 45194 _ = v.Args[1] 45195 x := v.Args[0] 45196 y := v.Args[1] 45197 v.reset(OpAMD64SETNE) 45198 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 45199 v0.AddArg(x) 45200 v0.AddArg(y) 45201 v.AddArg(v0) 45202 return true 45203 } 45204 } 45205 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 45206 b := v.Block 45207 _ = b 45208 // match: (Neq32 x y) 45209 // cond: 45210 // result: (SETNE (CMPL x y)) 45211 for { 45212 _ = v.Args[1] 45213 x := v.Args[0] 45214 y := v.Args[1] 45215 v.reset(OpAMD64SETNE) 45216 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 45217 v0.AddArg(x) 45218 v0.AddArg(y) 45219 v.AddArg(v0) 45220 return true 45221 } 45222 } 45223 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 45224 b := v.Block 45225 _ = b 45226 // match: (Neq32F x y) 45227 // cond: 45228 // result: (SETNEF (UCOMISS x y)) 45229 for { 45230 _ = v.Args[1] 45231 x := v.Args[0] 45232 y := v.Args[1] 45233 v.reset(OpAMD64SETNEF) 45234 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 45235 v0.AddArg(x) 45236 v0.AddArg(y) 45237 v.AddArg(v0) 45238 return true 45239 } 45240 } 45241 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 45242 b := v.Block 45243 _ = b 45244 // match: (Neq64 x y) 45245 // cond: 45246 // result: (SETNE (CMPQ x y)) 45247 for { 45248 _ = v.Args[1] 45249 x := v.Args[0] 45250 y := v.Args[1] 45251 v.reset(OpAMD64SETNE) 45252 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 45253 v0.AddArg(x) 45254 v0.AddArg(y) 45255 v.AddArg(v0) 45256 return true 45257 } 45258 } 45259 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 45260 b := v.Block 45261 _ = b 45262 // match: (Neq64F x y) 45263 // cond: 45264 // result: (SETNEF (UCOMISD x y)) 45265 for { 45266 _ = v.Args[1] 45267 x := v.Args[0] 45268 y := v.Args[1] 45269 v.reset(OpAMD64SETNEF) 45270 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 45271 v0.AddArg(x) 45272 v0.AddArg(y) 45273 v.AddArg(v0) 45274 return true 45275 } 45276 } 45277 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 45278 b := v.Block 45279 _ = b 45280 // match: (Neq8 x y) 45281 // cond: 45282 // result: (SETNE (CMPB x y)) 45283 for { 45284 _ = v.Args[1] 45285 x := v.Args[0] 45286 y := v.Args[1] 45287 v.reset(OpAMD64SETNE) 45288 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 45289 v0.AddArg(x) 45290 v0.AddArg(y) 45291 v.AddArg(v0) 45292 return true 45293 } 45294 } 45295 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 45296 b := v.Block 45297 _ = b 45298 // match: (NeqB x y) 45299 // cond: 45300 // result: (SETNE (CMPB x y)) 45301 for { 45302 _ = v.Args[1] 45303 x := v.Args[0] 45304 y := v.Args[1] 45305 v.reset(OpAMD64SETNE) 45306 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 45307 v0.AddArg(x) 45308 v0.AddArg(y) 45309 v.AddArg(v0) 45310 return true 45311 } 45312 } 45313 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 45314 b := v.Block 45315 _ = b 45316 config := b.Func.Config 45317 _ = config 45318 // match: (NeqPtr x y) 45319 // cond: config.PtrSize == 8 45320 // result: (SETNE (CMPQ x y)) 45321 for { 45322 _ = v.Args[1] 45323 x := v.Args[0] 45324 y := v.Args[1] 45325 if !(config.PtrSize == 8) { 45326 break 45327 } 45328 v.reset(OpAMD64SETNE) 45329 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 45330 v0.AddArg(x) 45331 v0.AddArg(y) 45332 v.AddArg(v0) 45333 return true 45334 } 45335 // match: (NeqPtr x y) 45336 // cond: config.PtrSize == 4 45337 // result: (SETNE (CMPL x y)) 45338 for { 45339 _ = v.Args[1] 45340 x := v.Args[0] 45341 y := v.Args[1] 45342 if !(config.PtrSize == 4) { 45343 break 45344 } 45345 v.reset(OpAMD64SETNE) 45346 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 45347 v0.AddArg(x) 45348 v0.AddArg(y) 45349 v.AddArg(v0) 45350 return true 45351 } 45352 return false 45353 } 45354 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 45355 // match: (NilCheck ptr mem) 45356 // cond: 45357 // result: (LoweredNilCheck ptr mem) 45358 for { 45359 _ = v.Args[1] 45360 ptr := v.Args[0] 45361 mem := v.Args[1] 45362 v.reset(OpAMD64LoweredNilCheck) 45363 v.AddArg(ptr) 45364 v.AddArg(mem) 45365 return true 45366 } 45367 } 45368 func rewriteValueAMD64_OpNot_0(v *Value) bool { 45369 // match: (Not x) 45370 // cond: 45371 // result: (XORLconst [1] x) 45372 for { 45373 x := v.Args[0] 45374 v.reset(OpAMD64XORLconst) 45375 v.AuxInt = 1 45376 v.AddArg(x) 45377 return true 45378 } 45379 } 45380 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 45381 b := v.Block 45382 _ = b 45383 config := b.Func.Config 45384 _ = config 45385 typ := &b.Func.Config.Types 45386 _ = typ 45387 // match: (OffPtr [off] ptr) 45388 // cond: config.PtrSize == 8 && is32Bit(off) 45389 // result: (ADDQconst [off] ptr) 45390 for { 45391 off := v.AuxInt 45392 ptr := v.Args[0] 45393 if !(config.PtrSize == 8 && is32Bit(off)) { 45394 break 45395 } 45396 v.reset(OpAMD64ADDQconst) 45397 v.AuxInt = off 45398 v.AddArg(ptr) 45399 return true 45400 } 45401 // match: (OffPtr [off] ptr) 45402 // cond: config.PtrSize == 8 45403 // result: (ADDQ (MOVQconst [off]) ptr) 45404 for { 45405 off := v.AuxInt 45406 ptr := v.Args[0] 45407 if !(config.PtrSize == 8) { 45408 break 45409 } 45410 v.reset(OpAMD64ADDQ) 45411 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 45412 v0.AuxInt = off 45413 v.AddArg(v0) 45414 v.AddArg(ptr) 45415 return true 45416 } 45417 // match: (OffPtr [off] ptr) 45418 // cond: config.PtrSize == 4 45419 // result: (ADDLconst [off] ptr) 45420 for { 45421 off := v.AuxInt 45422 ptr := v.Args[0] 45423 if !(config.PtrSize == 4) { 45424 break 45425 } 45426 v.reset(OpAMD64ADDLconst) 45427 v.AuxInt = off 45428 v.AddArg(ptr) 45429 return true 45430 } 45431 return false 45432 } 45433 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 45434 // match: (Or16 x y) 45435 // cond: 45436 // result: (ORL x y) 45437 for { 45438 _ = v.Args[1] 45439 x := v.Args[0] 45440 y := v.Args[1] 45441 v.reset(OpAMD64ORL) 45442 v.AddArg(x) 45443 v.AddArg(y) 45444 return true 45445 } 45446 } 45447 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 45448 // match: (Or32 x y) 45449 // cond: 45450 // result: (ORL x y) 45451 for { 45452 _ = v.Args[1] 45453 x := v.Args[0] 45454 y := v.Args[1] 45455 v.reset(OpAMD64ORL) 45456 v.AddArg(x) 45457 v.AddArg(y) 45458 return true 45459 } 45460 } 45461 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 45462 // match: (Or64 x y) 45463 // cond: 45464 // result: (ORQ x y) 45465 for { 45466 _ = v.Args[1] 45467 x := v.Args[0] 45468 y := v.Args[1] 45469 v.reset(OpAMD64ORQ) 45470 v.AddArg(x) 45471 v.AddArg(y) 45472 return true 45473 } 45474 } 45475 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 45476 // match: (Or8 x y) 45477 // cond: 45478 // result: (ORL x y) 45479 for { 45480 _ = v.Args[1] 45481 x := v.Args[0] 45482 y := v.Args[1] 45483 v.reset(OpAMD64ORL) 45484 v.AddArg(x) 45485 v.AddArg(y) 45486 return true 45487 } 45488 } 45489 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 45490 // match: (OrB x y) 45491 // cond: 45492 // result: (ORL x y) 45493 for { 45494 _ = v.Args[1] 45495 x := v.Args[0] 45496 y := v.Args[1] 45497 v.reset(OpAMD64ORL) 45498 v.AddArg(x) 45499 v.AddArg(y) 45500 return true 45501 } 45502 } 45503 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 45504 b := v.Block 45505 _ = b 45506 typ := &b.Func.Config.Types 45507 _ = typ 45508 // match: (PopCount16 x) 45509 // cond: 45510 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 45511 for { 45512 x := v.Args[0] 45513 v.reset(OpAMD64POPCNTL) 45514 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 45515 v0.AddArg(x) 45516 v.AddArg(v0) 45517 return true 45518 } 45519 } 45520 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 45521 // match: (PopCount32 x) 45522 // cond: 45523 // result: (POPCNTL x) 45524 for { 45525 x := v.Args[0] 45526 v.reset(OpAMD64POPCNTL) 45527 v.AddArg(x) 45528 return true 45529 } 45530 } 45531 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 45532 // match: (PopCount64 x) 45533 // cond: 45534 // result: (POPCNTQ x) 45535 for { 45536 x := v.Args[0] 45537 v.reset(OpAMD64POPCNTQ) 45538 v.AddArg(x) 45539 return true 45540 } 45541 } 45542 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 45543 b := v.Block 45544 _ = b 45545 typ := &b.Func.Config.Types 45546 _ = typ 45547 // match: (PopCount8 x) 45548 // cond: 45549 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 45550 for { 45551 x := v.Args[0] 45552 v.reset(OpAMD64POPCNTL) 45553 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 45554 v0.AddArg(x) 45555 v.AddArg(v0) 45556 return true 45557 } 45558 } 45559 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 45560 // match: (Round32F x) 45561 // cond: 45562 // result: x 45563 for { 45564 x := v.Args[0] 45565 v.reset(OpCopy) 45566 v.Type = x.Type 45567 v.AddArg(x) 45568 return true 45569 } 45570 } 45571 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 45572 // match: (Round64F x) 45573 // cond: 45574 // result: x 45575 for { 45576 x := v.Args[0] 45577 v.reset(OpCopy) 45578 v.Type = x.Type 45579 v.AddArg(x) 45580 return true 45581 } 45582 } 45583 func rewriteValueAMD64_OpRoundToEven_0(v *Value) bool { 45584 // match: (RoundToEven x) 45585 // cond: 45586 // result: (ROUNDSD [0] x) 45587 for { 45588 x := v.Args[0] 45589 v.reset(OpAMD64ROUNDSD) 45590 v.AuxInt = 0 45591 v.AddArg(x) 45592 return true 45593 } 45594 } 45595 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 45596 b := v.Block 45597 _ = b 45598 // match: (Rsh16Ux16 <t> x y) 45599 // cond: 45600 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 45601 for { 45602 t := v.Type 45603 _ = v.Args[1] 45604 x := v.Args[0] 45605 y := v.Args[1] 45606 v.reset(OpAMD64ANDL) 45607 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45608 v0.AddArg(x) 45609 v0.AddArg(y) 45610 v.AddArg(v0) 45611 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45612 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45613 v2.AuxInt = 16 45614 v2.AddArg(y) 45615 v1.AddArg(v2) 45616 v.AddArg(v1) 45617 return true 45618 } 45619 } 45620 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 45621 b := v.Block 45622 _ = b 45623 // match: (Rsh16Ux32 <t> x y) 45624 // cond: 45625 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 45626 for { 45627 t := v.Type 45628 _ = v.Args[1] 45629 x := v.Args[0] 45630 y := v.Args[1] 45631 v.reset(OpAMD64ANDL) 45632 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45633 v0.AddArg(x) 45634 v0.AddArg(y) 45635 v.AddArg(v0) 45636 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45637 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45638 v2.AuxInt = 16 45639 v2.AddArg(y) 45640 v1.AddArg(v2) 45641 v.AddArg(v1) 45642 return true 45643 } 45644 } 45645 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 45646 b := v.Block 45647 _ = b 45648 // match: (Rsh16Ux64 <t> x y) 45649 // cond: 45650 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 45651 for { 45652 t := v.Type 45653 _ = v.Args[1] 45654 x := v.Args[0] 45655 y := v.Args[1] 45656 v.reset(OpAMD64ANDL) 45657 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45658 v0.AddArg(x) 45659 v0.AddArg(y) 45660 v.AddArg(v0) 45661 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45662 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45663 v2.AuxInt = 16 45664 v2.AddArg(y) 45665 v1.AddArg(v2) 45666 v.AddArg(v1) 45667 return true 45668 } 45669 } 45670 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 45671 b := v.Block 45672 _ = b 45673 // match: (Rsh16Ux8 <t> x y) 45674 // cond: 45675 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 45676 for { 45677 t := v.Type 45678 _ = v.Args[1] 45679 x := v.Args[0] 45680 y := v.Args[1] 45681 v.reset(OpAMD64ANDL) 45682 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45683 v0.AddArg(x) 45684 v0.AddArg(y) 45685 v.AddArg(v0) 45686 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45687 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45688 v2.AuxInt = 16 45689 v2.AddArg(y) 45690 v1.AddArg(v2) 45691 v.AddArg(v1) 45692 return true 45693 } 45694 } 45695 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 45696 b := v.Block 45697 _ = b 45698 // match: (Rsh16x16 <t> x y) 45699 // cond: 45700 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 45701 for { 45702 t := v.Type 45703 _ = v.Args[1] 45704 x := v.Args[0] 45705 y := v.Args[1] 45706 v.reset(OpAMD64SARW) 45707 v.Type = t 45708 v.AddArg(x) 45709 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45710 v0.AddArg(y) 45711 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45712 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45713 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45714 v3.AuxInt = 16 45715 v3.AddArg(y) 45716 v2.AddArg(v3) 45717 v1.AddArg(v2) 45718 v0.AddArg(v1) 45719 v.AddArg(v0) 45720 return true 45721 } 45722 } 45723 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 45724 b := v.Block 45725 _ = b 45726 // match: (Rsh16x32 <t> x y) 45727 // cond: 45728 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 45729 for { 45730 t := v.Type 45731 _ = v.Args[1] 45732 x := v.Args[0] 45733 y := v.Args[1] 45734 v.reset(OpAMD64SARW) 45735 v.Type = t 45736 v.AddArg(x) 45737 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45738 v0.AddArg(y) 45739 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45740 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45741 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45742 v3.AuxInt = 16 45743 v3.AddArg(y) 45744 v2.AddArg(v3) 45745 v1.AddArg(v2) 45746 v0.AddArg(v1) 45747 v.AddArg(v0) 45748 return true 45749 } 45750 } 45751 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 45752 b := v.Block 45753 _ = b 45754 // match: (Rsh16x64 <t> x y) 45755 // cond: 45756 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 45757 for { 45758 t := v.Type 45759 _ = v.Args[1] 45760 x := v.Args[0] 45761 y := v.Args[1] 45762 v.reset(OpAMD64SARW) 45763 v.Type = t 45764 v.AddArg(x) 45765 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 45766 v0.AddArg(y) 45767 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 45768 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 45769 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45770 v3.AuxInt = 16 45771 v3.AddArg(y) 45772 v2.AddArg(v3) 45773 v1.AddArg(v2) 45774 v0.AddArg(v1) 45775 v.AddArg(v0) 45776 return true 45777 } 45778 } 45779 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 45780 b := v.Block 45781 _ = b 45782 // match: (Rsh16x8 <t> x y) 45783 // cond: 45784 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 45785 for { 45786 t := v.Type 45787 _ = v.Args[1] 45788 x := v.Args[0] 45789 y := v.Args[1] 45790 v.reset(OpAMD64SARW) 45791 v.Type = t 45792 v.AddArg(x) 45793 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45794 v0.AddArg(y) 45795 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45796 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45797 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45798 v3.AuxInt = 16 45799 v3.AddArg(y) 45800 v2.AddArg(v3) 45801 v1.AddArg(v2) 45802 v0.AddArg(v1) 45803 v.AddArg(v0) 45804 return true 45805 } 45806 } 45807 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 45808 b := v.Block 45809 _ = b 45810 // match: (Rsh32Ux16 <t> x y) 45811 // cond: 45812 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 45813 for { 45814 t := v.Type 45815 _ = v.Args[1] 45816 x := v.Args[0] 45817 y := v.Args[1] 45818 v.reset(OpAMD64ANDL) 45819 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45820 v0.AddArg(x) 45821 v0.AddArg(y) 45822 v.AddArg(v0) 45823 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45824 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45825 v2.AuxInt = 32 45826 v2.AddArg(y) 45827 v1.AddArg(v2) 45828 v.AddArg(v1) 45829 return true 45830 } 45831 } 45832 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 45833 b := v.Block 45834 _ = b 45835 // match: (Rsh32Ux32 <t> x y) 45836 // cond: 45837 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 45838 for { 45839 t := v.Type 45840 _ = v.Args[1] 45841 x := v.Args[0] 45842 y := v.Args[1] 45843 v.reset(OpAMD64ANDL) 45844 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45845 v0.AddArg(x) 45846 v0.AddArg(y) 45847 v.AddArg(v0) 45848 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45849 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45850 v2.AuxInt = 32 45851 v2.AddArg(y) 45852 v1.AddArg(v2) 45853 v.AddArg(v1) 45854 return true 45855 } 45856 } 45857 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 45858 b := v.Block 45859 _ = b 45860 // match: (Rsh32Ux64 <t> x y) 45861 // cond: 45862 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 45863 for { 45864 t := v.Type 45865 _ = v.Args[1] 45866 x := v.Args[0] 45867 y := v.Args[1] 45868 v.reset(OpAMD64ANDL) 45869 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45870 v0.AddArg(x) 45871 v0.AddArg(y) 45872 v.AddArg(v0) 45873 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45874 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45875 v2.AuxInt = 32 45876 v2.AddArg(y) 45877 v1.AddArg(v2) 45878 v.AddArg(v1) 45879 return true 45880 } 45881 } 45882 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 45883 b := v.Block 45884 _ = b 45885 // match: (Rsh32Ux8 <t> x y) 45886 // cond: 45887 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 45888 for { 45889 t := v.Type 45890 _ = v.Args[1] 45891 x := v.Args[0] 45892 y := v.Args[1] 45893 v.reset(OpAMD64ANDL) 45894 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45895 v0.AddArg(x) 45896 v0.AddArg(y) 45897 v.AddArg(v0) 45898 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45899 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45900 v2.AuxInt = 32 45901 v2.AddArg(y) 45902 v1.AddArg(v2) 45903 v.AddArg(v1) 45904 return true 45905 } 45906 } 45907 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 45908 b := v.Block 45909 _ = b 45910 // match: (Rsh32x16 <t> x y) 45911 // cond: 45912 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 45913 for { 45914 t := v.Type 45915 _ = v.Args[1] 45916 x := v.Args[0] 45917 y := v.Args[1] 45918 v.reset(OpAMD64SARL) 45919 v.Type = t 45920 v.AddArg(x) 45921 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45922 v0.AddArg(y) 45923 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45924 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45925 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45926 v3.AuxInt = 32 45927 v3.AddArg(y) 45928 v2.AddArg(v3) 45929 v1.AddArg(v2) 45930 v0.AddArg(v1) 45931 v.AddArg(v0) 45932 return true 45933 } 45934 } 45935 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 45936 b := v.Block 45937 _ = b 45938 // match: (Rsh32x32 <t> x y) 45939 // cond: 45940 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 45941 for { 45942 t := v.Type 45943 _ = v.Args[1] 45944 x := v.Args[0] 45945 y := v.Args[1] 45946 v.reset(OpAMD64SARL) 45947 v.Type = t 45948 v.AddArg(x) 45949 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45950 v0.AddArg(y) 45951 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45952 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45953 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45954 v3.AuxInt = 32 45955 v3.AddArg(y) 45956 v2.AddArg(v3) 45957 v1.AddArg(v2) 45958 v0.AddArg(v1) 45959 v.AddArg(v0) 45960 return true 45961 } 45962 } 45963 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 45964 b := v.Block 45965 _ = b 45966 // match: (Rsh32x64 <t> x y) 45967 // cond: 45968 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 45969 for { 45970 t := v.Type 45971 _ = v.Args[1] 45972 x := v.Args[0] 45973 y := v.Args[1] 45974 v.reset(OpAMD64SARL) 45975 v.Type = t 45976 v.AddArg(x) 45977 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 45978 v0.AddArg(y) 45979 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 45980 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 45981 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45982 v3.AuxInt = 32 45983 v3.AddArg(y) 45984 v2.AddArg(v3) 45985 v1.AddArg(v2) 45986 v0.AddArg(v1) 45987 v.AddArg(v0) 45988 return true 45989 } 45990 } 45991 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 45992 b := v.Block 45993 _ = b 45994 // match: (Rsh32x8 <t> x y) 45995 // cond: 45996 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 45997 for { 45998 t := v.Type 45999 _ = v.Args[1] 46000 x := v.Args[0] 46001 y := v.Args[1] 46002 v.reset(OpAMD64SARL) 46003 v.Type = t 46004 v.AddArg(x) 46005 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46006 v0.AddArg(y) 46007 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46008 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46009 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46010 v3.AuxInt = 32 46011 v3.AddArg(y) 46012 v2.AddArg(v3) 46013 v1.AddArg(v2) 46014 v0.AddArg(v1) 46015 v.AddArg(v0) 46016 return true 46017 } 46018 } 46019 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 46020 b := v.Block 46021 _ = b 46022 // match: (Rsh64Ux16 <t> x y) 46023 // cond: 46024 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 46025 for { 46026 t := v.Type 46027 _ = v.Args[1] 46028 x := v.Args[0] 46029 y := v.Args[1] 46030 v.reset(OpAMD64ANDQ) 46031 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46032 v0.AddArg(x) 46033 v0.AddArg(y) 46034 v.AddArg(v0) 46035 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46036 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46037 v2.AuxInt = 64 46038 v2.AddArg(y) 46039 v1.AddArg(v2) 46040 v.AddArg(v1) 46041 return true 46042 } 46043 } 46044 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 46045 b := v.Block 46046 _ = b 46047 // match: (Rsh64Ux32 <t> x y) 46048 // cond: 46049 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 46050 for { 46051 t := v.Type 46052 _ = v.Args[1] 46053 x := v.Args[0] 46054 y := v.Args[1] 46055 v.reset(OpAMD64ANDQ) 46056 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46057 v0.AddArg(x) 46058 v0.AddArg(y) 46059 v.AddArg(v0) 46060 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46061 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46062 v2.AuxInt = 64 46063 v2.AddArg(y) 46064 v1.AddArg(v2) 46065 v.AddArg(v1) 46066 return true 46067 } 46068 } 46069 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 46070 b := v.Block 46071 _ = b 46072 // match: (Rsh64Ux64 <t> x y) 46073 // cond: 46074 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 46075 for { 46076 t := v.Type 46077 _ = v.Args[1] 46078 x := v.Args[0] 46079 y := v.Args[1] 46080 v.reset(OpAMD64ANDQ) 46081 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46082 v0.AddArg(x) 46083 v0.AddArg(y) 46084 v.AddArg(v0) 46085 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46086 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46087 v2.AuxInt = 64 46088 v2.AddArg(y) 46089 v1.AddArg(v2) 46090 v.AddArg(v1) 46091 return true 46092 } 46093 } 46094 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 46095 b := v.Block 46096 _ = b 46097 // match: (Rsh64Ux8 <t> x y) 46098 // cond: 46099 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 46100 for { 46101 t := v.Type 46102 _ = v.Args[1] 46103 x := v.Args[0] 46104 y := v.Args[1] 46105 v.reset(OpAMD64ANDQ) 46106 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46107 v0.AddArg(x) 46108 v0.AddArg(y) 46109 v.AddArg(v0) 46110 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46111 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46112 v2.AuxInt = 64 46113 v2.AddArg(y) 46114 v1.AddArg(v2) 46115 v.AddArg(v1) 46116 return true 46117 } 46118 } 46119 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 46120 b := v.Block 46121 _ = b 46122 // match: (Rsh64x16 <t> x y) 46123 // cond: 46124 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 46125 for { 46126 t := v.Type 46127 _ = v.Args[1] 46128 x := v.Args[0] 46129 y := v.Args[1] 46130 v.reset(OpAMD64SARQ) 46131 v.Type = t 46132 v.AddArg(x) 46133 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46134 v0.AddArg(y) 46135 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46136 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46137 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46138 v3.AuxInt = 64 46139 v3.AddArg(y) 46140 v2.AddArg(v3) 46141 v1.AddArg(v2) 46142 v0.AddArg(v1) 46143 v.AddArg(v0) 46144 return true 46145 } 46146 } 46147 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 46148 b := v.Block 46149 _ = b 46150 // match: (Rsh64x32 <t> x y) 46151 // cond: 46152 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 46153 for { 46154 t := v.Type 46155 _ = v.Args[1] 46156 x := v.Args[0] 46157 y := v.Args[1] 46158 v.reset(OpAMD64SARQ) 46159 v.Type = t 46160 v.AddArg(x) 46161 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46162 v0.AddArg(y) 46163 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46164 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46165 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46166 v3.AuxInt = 64 46167 v3.AddArg(y) 46168 v2.AddArg(v3) 46169 v1.AddArg(v2) 46170 v0.AddArg(v1) 46171 v.AddArg(v0) 46172 return true 46173 } 46174 } 46175 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 46176 b := v.Block 46177 _ = b 46178 // match: (Rsh64x64 <t> x y) 46179 // cond: 46180 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 46181 for { 46182 t := v.Type 46183 _ = v.Args[1] 46184 x := v.Args[0] 46185 y := v.Args[1] 46186 v.reset(OpAMD64SARQ) 46187 v.Type = t 46188 v.AddArg(x) 46189 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 46190 v0.AddArg(y) 46191 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 46192 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 46193 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46194 v3.AuxInt = 64 46195 v3.AddArg(y) 46196 v2.AddArg(v3) 46197 v1.AddArg(v2) 46198 v0.AddArg(v1) 46199 v.AddArg(v0) 46200 return true 46201 } 46202 } 46203 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 46204 b := v.Block 46205 _ = b 46206 // match: (Rsh64x8 <t> x y) 46207 // cond: 46208 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 46209 for { 46210 t := v.Type 46211 _ = v.Args[1] 46212 x := v.Args[0] 46213 y := v.Args[1] 46214 v.reset(OpAMD64SARQ) 46215 v.Type = t 46216 v.AddArg(x) 46217 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46218 v0.AddArg(y) 46219 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46220 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46221 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46222 v3.AuxInt = 64 46223 v3.AddArg(y) 46224 v2.AddArg(v3) 46225 v1.AddArg(v2) 46226 v0.AddArg(v1) 46227 v.AddArg(v0) 46228 return true 46229 } 46230 } 46231 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 46232 b := v.Block 46233 _ = b 46234 // match: (Rsh8Ux16 <t> x y) 46235 // cond: 46236 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 46237 for { 46238 t := v.Type 46239 _ = v.Args[1] 46240 x := v.Args[0] 46241 y := v.Args[1] 46242 v.reset(OpAMD64ANDL) 46243 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46244 v0.AddArg(x) 46245 v0.AddArg(y) 46246 v.AddArg(v0) 46247 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46248 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46249 v2.AuxInt = 8 46250 v2.AddArg(y) 46251 v1.AddArg(v2) 46252 v.AddArg(v1) 46253 return true 46254 } 46255 } 46256 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 46257 b := v.Block 46258 _ = b 46259 // match: (Rsh8Ux32 <t> x y) 46260 // cond: 46261 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 46262 for { 46263 t := v.Type 46264 _ = v.Args[1] 46265 x := v.Args[0] 46266 y := v.Args[1] 46267 v.reset(OpAMD64ANDL) 46268 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46269 v0.AddArg(x) 46270 v0.AddArg(y) 46271 v.AddArg(v0) 46272 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46273 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46274 v2.AuxInt = 8 46275 v2.AddArg(y) 46276 v1.AddArg(v2) 46277 v.AddArg(v1) 46278 return true 46279 } 46280 } 46281 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 46282 b := v.Block 46283 _ = b 46284 // match: (Rsh8Ux64 <t> x y) 46285 // cond: 46286 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 46287 for { 46288 t := v.Type 46289 _ = v.Args[1] 46290 x := v.Args[0] 46291 y := v.Args[1] 46292 v.reset(OpAMD64ANDL) 46293 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46294 v0.AddArg(x) 46295 v0.AddArg(y) 46296 v.AddArg(v0) 46297 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46298 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46299 v2.AuxInt = 8 46300 v2.AddArg(y) 46301 v1.AddArg(v2) 46302 v.AddArg(v1) 46303 return true 46304 } 46305 } 46306 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 46307 b := v.Block 46308 _ = b 46309 // match: (Rsh8Ux8 <t> x y) 46310 // cond: 46311 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 46312 for { 46313 t := v.Type 46314 _ = v.Args[1] 46315 x := v.Args[0] 46316 y := v.Args[1] 46317 v.reset(OpAMD64ANDL) 46318 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46319 v0.AddArg(x) 46320 v0.AddArg(y) 46321 v.AddArg(v0) 46322 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46323 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46324 v2.AuxInt = 8 46325 v2.AddArg(y) 46326 v1.AddArg(v2) 46327 v.AddArg(v1) 46328 return true 46329 } 46330 } 46331 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 46332 b := v.Block 46333 _ = b 46334 // match: (Rsh8x16 <t> x y) 46335 // cond: 46336 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 46337 for { 46338 t := v.Type 46339 _ = v.Args[1] 46340 x := v.Args[0] 46341 y := v.Args[1] 46342 v.reset(OpAMD64SARB) 46343 v.Type = t 46344 v.AddArg(x) 46345 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46346 v0.AddArg(y) 46347 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46348 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46349 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46350 v3.AuxInt = 8 46351 v3.AddArg(y) 46352 v2.AddArg(v3) 46353 v1.AddArg(v2) 46354 v0.AddArg(v1) 46355 v.AddArg(v0) 46356 return true 46357 } 46358 } 46359 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 46360 b := v.Block 46361 _ = b 46362 // match: (Rsh8x32 <t> x y) 46363 // cond: 46364 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 46365 for { 46366 t := v.Type 46367 _ = v.Args[1] 46368 x := v.Args[0] 46369 y := v.Args[1] 46370 v.reset(OpAMD64SARB) 46371 v.Type = t 46372 v.AddArg(x) 46373 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46374 v0.AddArg(y) 46375 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46376 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46377 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46378 v3.AuxInt = 8 46379 v3.AddArg(y) 46380 v2.AddArg(v3) 46381 v1.AddArg(v2) 46382 v0.AddArg(v1) 46383 v.AddArg(v0) 46384 return true 46385 } 46386 } 46387 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 46388 b := v.Block 46389 _ = b 46390 // match: (Rsh8x64 <t> x y) 46391 // cond: 46392 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 46393 for { 46394 t := v.Type 46395 _ = v.Args[1] 46396 x := v.Args[0] 46397 y := v.Args[1] 46398 v.reset(OpAMD64SARB) 46399 v.Type = t 46400 v.AddArg(x) 46401 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 46402 v0.AddArg(y) 46403 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 46404 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 46405 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46406 v3.AuxInt = 8 46407 v3.AddArg(y) 46408 v2.AddArg(v3) 46409 v1.AddArg(v2) 46410 v0.AddArg(v1) 46411 v.AddArg(v0) 46412 return true 46413 } 46414 } 46415 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 46416 b := v.Block 46417 _ = b 46418 // match: (Rsh8x8 <t> x y) 46419 // cond: 46420 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 46421 for { 46422 t := v.Type 46423 _ = v.Args[1] 46424 x := v.Args[0] 46425 y := v.Args[1] 46426 v.reset(OpAMD64SARB) 46427 v.Type = t 46428 v.AddArg(x) 46429 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46430 v0.AddArg(y) 46431 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46432 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46433 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46434 v3.AuxInt = 8 46435 v3.AddArg(y) 46436 v2.AddArg(v3) 46437 v1.AddArg(v2) 46438 v0.AddArg(v1) 46439 v.AddArg(v0) 46440 return true 46441 } 46442 } 46443 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 46444 b := v.Block 46445 _ = b 46446 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 46447 // cond: 46448 // result: (ADDL val (Select0 <t> tuple)) 46449 for { 46450 t := v.Type 46451 v_0 := v.Args[0] 46452 if v_0.Op != OpAMD64AddTupleFirst32 { 46453 break 46454 } 46455 _ = v_0.Args[1] 46456 val := v_0.Args[0] 46457 tuple := v_0.Args[1] 46458 v.reset(OpAMD64ADDL) 46459 v.AddArg(val) 46460 v0 := b.NewValue0(v.Pos, OpSelect0, t) 46461 v0.AddArg(tuple) 46462 v.AddArg(v0) 46463 return true 46464 } 46465 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 46466 // cond: 46467 // result: (ADDQ val (Select0 <t> tuple)) 46468 for { 46469 t := v.Type 46470 v_0 := v.Args[0] 46471 if v_0.Op != OpAMD64AddTupleFirst64 { 46472 break 46473 } 46474 _ = v_0.Args[1] 46475 val := v_0.Args[0] 46476 tuple := v_0.Args[1] 46477 v.reset(OpAMD64ADDQ) 46478 v.AddArg(val) 46479 v0 := b.NewValue0(v.Pos, OpSelect0, t) 46480 v0.AddArg(tuple) 46481 v.AddArg(v0) 46482 return true 46483 } 46484 return false 46485 } 46486 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 46487 // match: (Select1 (AddTupleFirst32 _ tuple)) 46488 // cond: 46489 // result: (Select1 tuple) 46490 for { 46491 v_0 := v.Args[0] 46492 if v_0.Op != OpAMD64AddTupleFirst32 { 46493 break 46494 } 46495 _ = v_0.Args[1] 46496 tuple := v_0.Args[1] 46497 v.reset(OpSelect1) 46498 v.AddArg(tuple) 46499 return true 46500 } 46501 // match: (Select1 (AddTupleFirst64 _ tuple)) 46502 // cond: 46503 // result: (Select1 tuple) 46504 for { 46505 v_0 := v.Args[0] 46506 if v_0.Op != OpAMD64AddTupleFirst64 { 46507 break 46508 } 46509 _ = v_0.Args[1] 46510 tuple := v_0.Args[1] 46511 v.reset(OpSelect1) 46512 v.AddArg(tuple) 46513 return true 46514 } 46515 return false 46516 } 46517 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 46518 // match: (SignExt16to32 x) 46519 // cond: 46520 // result: (MOVWQSX x) 46521 for { 46522 x := v.Args[0] 46523 v.reset(OpAMD64MOVWQSX) 46524 v.AddArg(x) 46525 return true 46526 } 46527 } 46528 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 46529 // match: (SignExt16to64 x) 46530 // cond: 46531 // result: (MOVWQSX x) 46532 for { 46533 x := v.Args[0] 46534 v.reset(OpAMD64MOVWQSX) 46535 v.AddArg(x) 46536 return true 46537 } 46538 } 46539 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 46540 // match: (SignExt32to64 x) 46541 // cond: 46542 // result: (MOVLQSX x) 46543 for { 46544 x := v.Args[0] 46545 v.reset(OpAMD64MOVLQSX) 46546 v.AddArg(x) 46547 return true 46548 } 46549 } 46550 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 46551 // match: (SignExt8to16 x) 46552 // cond: 46553 // result: (MOVBQSX x) 46554 for { 46555 x := v.Args[0] 46556 v.reset(OpAMD64MOVBQSX) 46557 v.AddArg(x) 46558 return true 46559 } 46560 } 46561 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 46562 // match: (SignExt8to32 x) 46563 // cond: 46564 // result: (MOVBQSX x) 46565 for { 46566 x := v.Args[0] 46567 v.reset(OpAMD64MOVBQSX) 46568 v.AddArg(x) 46569 return true 46570 } 46571 } 46572 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 46573 // match: (SignExt8to64 x) 46574 // cond: 46575 // result: (MOVBQSX x) 46576 for { 46577 x := v.Args[0] 46578 v.reset(OpAMD64MOVBQSX) 46579 v.AddArg(x) 46580 return true 46581 } 46582 } 46583 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 46584 b := v.Block 46585 _ = b 46586 // match: (Slicemask <t> x) 46587 // cond: 46588 // result: (SARQconst (NEGQ <t> x) [63]) 46589 for { 46590 t := v.Type 46591 x := v.Args[0] 46592 v.reset(OpAMD64SARQconst) 46593 v.AuxInt = 63 46594 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 46595 v0.AddArg(x) 46596 v.AddArg(v0) 46597 return true 46598 } 46599 } 46600 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 46601 // match: (Sqrt x) 46602 // cond: 46603 // result: (SQRTSD x) 46604 for { 46605 x := v.Args[0] 46606 v.reset(OpAMD64SQRTSD) 46607 v.AddArg(x) 46608 return true 46609 } 46610 } 46611 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 46612 // match: (StaticCall [argwid] {target} mem) 46613 // cond: 46614 // result: (CALLstatic [argwid] {target} mem) 46615 for { 46616 argwid := v.AuxInt 46617 target := v.Aux 46618 mem := v.Args[0] 46619 v.reset(OpAMD64CALLstatic) 46620 v.AuxInt = argwid 46621 v.Aux = target 46622 v.AddArg(mem) 46623 return true 46624 } 46625 } 46626 func rewriteValueAMD64_OpStore_0(v *Value) bool { 46627 // match: (Store {t} ptr val mem) 46628 // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) 46629 // result: (MOVSDstore ptr val mem) 46630 for { 46631 t := v.Aux 46632 _ = v.Args[2] 46633 ptr := v.Args[0] 46634 val := v.Args[1] 46635 mem := v.Args[2] 46636 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { 46637 break 46638 } 46639 v.reset(OpAMD64MOVSDstore) 46640 v.AddArg(ptr) 46641 v.AddArg(val) 46642 v.AddArg(mem) 46643 return true 46644 } 46645 // match: (Store {t} ptr val mem) 46646 // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) 46647 // result: (MOVSSstore ptr val mem) 46648 for { 46649 t := v.Aux 46650 _ = v.Args[2] 46651 ptr := v.Args[0] 46652 val := v.Args[1] 46653 mem := v.Args[2] 46654 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { 46655 break 46656 } 46657 v.reset(OpAMD64MOVSSstore) 46658 v.AddArg(ptr) 46659 v.AddArg(val) 46660 v.AddArg(mem) 46661 return true 46662 } 46663 // match: (Store {t} ptr val mem) 46664 // cond: t.(*types.Type).Size() == 8 46665 // result: (MOVQstore ptr val mem) 46666 for { 46667 t := v.Aux 46668 _ = v.Args[2] 46669 ptr := v.Args[0] 46670 val := v.Args[1] 46671 mem := v.Args[2] 46672 if !(t.(*types.Type).Size() == 8) { 46673 break 46674 } 46675 v.reset(OpAMD64MOVQstore) 46676 v.AddArg(ptr) 46677 v.AddArg(val) 46678 v.AddArg(mem) 46679 return true 46680 } 46681 // match: (Store {t} ptr val mem) 46682 // cond: t.(*types.Type).Size() == 4 46683 // result: (MOVLstore ptr val mem) 46684 for { 46685 t := v.Aux 46686 _ = v.Args[2] 46687 ptr := v.Args[0] 46688 val := v.Args[1] 46689 mem := v.Args[2] 46690 if !(t.(*types.Type).Size() == 4) { 46691 break 46692 } 46693 v.reset(OpAMD64MOVLstore) 46694 v.AddArg(ptr) 46695 v.AddArg(val) 46696 v.AddArg(mem) 46697 return true 46698 } 46699 // match: (Store {t} ptr val mem) 46700 // cond: t.(*types.Type).Size() == 2 46701 // result: (MOVWstore ptr val mem) 46702 for { 46703 t := v.Aux 46704 _ = v.Args[2] 46705 ptr := v.Args[0] 46706 val := v.Args[1] 46707 mem := v.Args[2] 46708 if !(t.(*types.Type).Size() == 2) { 46709 break 46710 } 46711 v.reset(OpAMD64MOVWstore) 46712 v.AddArg(ptr) 46713 v.AddArg(val) 46714 v.AddArg(mem) 46715 return true 46716 } 46717 // match: (Store {t} ptr val mem) 46718 // cond: t.(*types.Type).Size() == 1 46719 // result: (MOVBstore ptr val mem) 46720 for { 46721 t := v.Aux 46722 _ = v.Args[2] 46723 ptr := v.Args[0] 46724 val := v.Args[1] 46725 mem := v.Args[2] 46726 if !(t.(*types.Type).Size() == 1) { 46727 break 46728 } 46729 v.reset(OpAMD64MOVBstore) 46730 v.AddArg(ptr) 46731 v.AddArg(val) 46732 v.AddArg(mem) 46733 return true 46734 } 46735 return false 46736 } 46737 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 46738 // match: (Sub16 x y) 46739 // cond: 46740 // result: (SUBL x y) 46741 for { 46742 _ = v.Args[1] 46743 x := v.Args[0] 46744 y := v.Args[1] 46745 v.reset(OpAMD64SUBL) 46746 v.AddArg(x) 46747 v.AddArg(y) 46748 return true 46749 } 46750 } 46751 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 46752 // match: (Sub32 x y) 46753 // cond: 46754 // result: (SUBL x y) 46755 for { 46756 _ = v.Args[1] 46757 x := v.Args[0] 46758 y := v.Args[1] 46759 v.reset(OpAMD64SUBL) 46760 v.AddArg(x) 46761 v.AddArg(y) 46762 return true 46763 } 46764 } 46765 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 46766 // match: (Sub32F x y) 46767 // cond: 46768 // result: (SUBSS x y) 46769 for { 46770 _ = v.Args[1] 46771 x := v.Args[0] 46772 y := v.Args[1] 46773 v.reset(OpAMD64SUBSS) 46774 v.AddArg(x) 46775 v.AddArg(y) 46776 return true 46777 } 46778 } 46779 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 46780 // match: (Sub64 x y) 46781 // cond: 46782 // result: (SUBQ x y) 46783 for { 46784 _ = v.Args[1] 46785 x := v.Args[0] 46786 y := v.Args[1] 46787 v.reset(OpAMD64SUBQ) 46788 v.AddArg(x) 46789 v.AddArg(y) 46790 return true 46791 } 46792 } 46793 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 46794 // match: (Sub64F x y) 46795 // cond: 46796 // result: (SUBSD x y) 46797 for { 46798 _ = v.Args[1] 46799 x := v.Args[0] 46800 y := v.Args[1] 46801 v.reset(OpAMD64SUBSD) 46802 v.AddArg(x) 46803 v.AddArg(y) 46804 return true 46805 } 46806 } 46807 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 46808 // match: (Sub8 x y) 46809 // cond: 46810 // result: (SUBL x y) 46811 for { 46812 _ = v.Args[1] 46813 x := v.Args[0] 46814 y := v.Args[1] 46815 v.reset(OpAMD64SUBL) 46816 v.AddArg(x) 46817 v.AddArg(y) 46818 return true 46819 } 46820 } 46821 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 46822 b := v.Block 46823 _ = b 46824 config := b.Func.Config 46825 _ = config 46826 // match: (SubPtr x y) 46827 // cond: config.PtrSize == 8 46828 // result: (SUBQ x y) 46829 for { 46830 _ = v.Args[1] 46831 x := v.Args[0] 46832 y := v.Args[1] 46833 if !(config.PtrSize == 8) { 46834 break 46835 } 46836 v.reset(OpAMD64SUBQ) 46837 v.AddArg(x) 46838 v.AddArg(y) 46839 return true 46840 } 46841 // match: (SubPtr x y) 46842 // cond: config.PtrSize == 4 46843 // result: (SUBL x y) 46844 for { 46845 _ = v.Args[1] 46846 x := v.Args[0] 46847 y := v.Args[1] 46848 if !(config.PtrSize == 4) { 46849 break 46850 } 46851 v.reset(OpAMD64SUBL) 46852 v.AddArg(x) 46853 v.AddArg(y) 46854 return true 46855 } 46856 return false 46857 } 46858 func rewriteValueAMD64_OpTrunc_0(v *Value) bool { 46859 // match: (Trunc x) 46860 // cond: 46861 // result: (ROUNDSD [3] x) 46862 for { 46863 x := v.Args[0] 46864 v.reset(OpAMD64ROUNDSD) 46865 v.AuxInt = 3 46866 v.AddArg(x) 46867 return true 46868 } 46869 } 46870 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 46871 // match: (Trunc16to8 x) 46872 // cond: 46873 // result: x 46874 for { 46875 x := v.Args[0] 46876 v.reset(OpCopy) 46877 v.Type = x.Type 46878 v.AddArg(x) 46879 return true 46880 } 46881 } 46882 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 46883 // match: (Trunc32to16 x) 46884 // cond: 46885 // result: x 46886 for { 46887 x := v.Args[0] 46888 v.reset(OpCopy) 46889 v.Type = x.Type 46890 v.AddArg(x) 46891 return true 46892 } 46893 } 46894 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 46895 // match: (Trunc32to8 x) 46896 // cond: 46897 // result: x 46898 for { 46899 x := v.Args[0] 46900 v.reset(OpCopy) 46901 v.Type = x.Type 46902 v.AddArg(x) 46903 return true 46904 } 46905 } 46906 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 46907 // match: (Trunc64to16 x) 46908 // cond: 46909 // result: x 46910 for { 46911 x := v.Args[0] 46912 v.reset(OpCopy) 46913 v.Type = x.Type 46914 v.AddArg(x) 46915 return true 46916 } 46917 } 46918 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 46919 // match: (Trunc64to32 x) 46920 // cond: 46921 // result: x 46922 for { 46923 x := v.Args[0] 46924 v.reset(OpCopy) 46925 v.Type = x.Type 46926 v.AddArg(x) 46927 return true 46928 } 46929 } 46930 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 46931 // match: (Trunc64to8 x) 46932 // cond: 46933 // result: x 46934 for { 46935 x := v.Args[0] 46936 v.reset(OpCopy) 46937 v.Type = x.Type 46938 v.AddArg(x) 46939 return true 46940 } 46941 } 46942 func rewriteValueAMD64_OpWB_0(v *Value) bool { 46943 // match: (WB {fn} destptr srcptr mem) 46944 // cond: 46945 // result: (LoweredWB {fn} destptr srcptr mem) 46946 for { 46947 fn := v.Aux 46948 _ = v.Args[2] 46949 destptr := v.Args[0] 46950 srcptr := v.Args[1] 46951 mem := v.Args[2] 46952 v.reset(OpAMD64LoweredWB) 46953 v.Aux = fn 46954 v.AddArg(destptr) 46955 v.AddArg(srcptr) 46956 v.AddArg(mem) 46957 return true 46958 } 46959 } 46960 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 46961 // match: (Xor16 x y) 46962 // cond: 46963 // result: (XORL x y) 46964 for { 46965 _ = v.Args[1] 46966 x := v.Args[0] 46967 y := v.Args[1] 46968 v.reset(OpAMD64XORL) 46969 v.AddArg(x) 46970 v.AddArg(y) 46971 return true 46972 } 46973 } 46974 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 46975 // match: (Xor32 x y) 46976 // cond: 46977 // result: (XORL x y) 46978 for { 46979 _ = v.Args[1] 46980 x := v.Args[0] 46981 y := v.Args[1] 46982 v.reset(OpAMD64XORL) 46983 v.AddArg(x) 46984 v.AddArg(y) 46985 return true 46986 } 46987 } 46988 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 46989 // match: (Xor64 x y) 46990 // cond: 46991 // result: (XORQ x y) 46992 for { 46993 _ = v.Args[1] 46994 x := v.Args[0] 46995 y := v.Args[1] 46996 v.reset(OpAMD64XORQ) 46997 v.AddArg(x) 46998 v.AddArg(y) 46999 return true 47000 } 47001 } 47002 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 47003 // match: (Xor8 x y) 47004 // cond: 47005 // result: (XORL x y) 47006 for { 47007 _ = v.Args[1] 47008 x := v.Args[0] 47009 y := v.Args[1] 47010 v.reset(OpAMD64XORL) 47011 v.AddArg(x) 47012 v.AddArg(y) 47013 return true 47014 } 47015 } 47016 func rewriteValueAMD64_OpZero_0(v *Value) bool { 47017 b := v.Block 47018 _ = b 47019 config := b.Func.Config 47020 _ = config 47021 // match: (Zero [0] _ mem) 47022 // cond: 47023 // result: mem 47024 for { 47025 if v.AuxInt != 0 { 47026 break 47027 } 47028 _ = v.Args[1] 47029 mem := v.Args[1] 47030 v.reset(OpCopy) 47031 v.Type = mem.Type 47032 v.AddArg(mem) 47033 return true 47034 } 47035 // match: (Zero [1] destptr mem) 47036 // cond: 47037 // result: (MOVBstoreconst [0] destptr mem) 47038 for { 47039 if v.AuxInt != 1 { 47040 break 47041 } 47042 _ = v.Args[1] 47043 destptr := v.Args[0] 47044 mem := v.Args[1] 47045 v.reset(OpAMD64MOVBstoreconst) 47046 v.AuxInt = 0 47047 v.AddArg(destptr) 47048 v.AddArg(mem) 47049 return true 47050 } 47051 // match: (Zero [2] destptr mem) 47052 // cond: 47053 // result: (MOVWstoreconst [0] destptr mem) 47054 for { 47055 if v.AuxInt != 2 { 47056 break 47057 } 47058 _ = v.Args[1] 47059 destptr := v.Args[0] 47060 mem := v.Args[1] 47061 v.reset(OpAMD64MOVWstoreconst) 47062 v.AuxInt = 0 47063 v.AddArg(destptr) 47064 v.AddArg(mem) 47065 return true 47066 } 47067 // match: (Zero [4] destptr mem) 47068 // cond: 47069 // result: (MOVLstoreconst [0] destptr mem) 47070 for { 47071 if v.AuxInt != 4 { 47072 break 47073 } 47074 _ = v.Args[1] 47075 destptr := v.Args[0] 47076 mem := v.Args[1] 47077 v.reset(OpAMD64MOVLstoreconst) 47078 v.AuxInt = 0 47079 v.AddArg(destptr) 47080 v.AddArg(mem) 47081 return true 47082 } 47083 // match: (Zero [8] destptr mem) 47084 // cond: 47085 // result: (MOVQstoreconst [0] destptr mem) 47086 for { 47087 if v.AuxInt != 8 { 47088 break 47089 } 47090 _ = v.Args[1] 47091 destptr := v.Args[0] 47092 mem := v.Args[1] 47093 v.reset(OpAMD64MOVQstoreconst) 47094 v.AuxInt = 0 47095 v.AddArg(destptr) 47096 v.AddArg(mem) 47097 return true 47098 } 47099 // match: (Zero [3] destptr mem) 47100 // cond: 47101 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 47102 for { 47103 if v.AuxInt != 3 { 47104 break 47105 } 47106 _ = v.Args[1] 47107 destptr := v.Args[0] 47108 mem := v.Args[1] 47109 v.reset(OpAMD64MOVBstoreconst) 47110 v.AuxInt = makeValAndOff(0, 2) 47111 v.AddArg(destptr) 47112 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 47113 v0.AuxInt = 0 47114 v0.AddArg(destptr) 47115 v0.AddArg(mem) 47116 v.AddArg(v0) 47117 return true 47118 } 47119 // match: (Zero [5] destptr mem) 47120 // cond: 47121 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 47122 for { 47123 if v.AuxInt != 5 { 47124 break 47125 } 47126 _ = v.Args[1] 47127 destptr := v.Args[0] 47128 mem := v.Args[1] 47129 v.reset(OpAMD64MOVBstoreconst) 47130 v.AuxInt = makeValAndOff(0, 4) 47131 v.AddArg(destptr) 47132 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 47133 v0.AuxInt = 0 47134 v0.AddArg(destptr) 47135 v0.AddArg(mem) 47136 v.AddArg(v0) 47137 return true 47138 } 47139 // match: (Zero [6] destptr mem) 47140 // cond: 47141 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 47142 for { 47143 if v.AuxInt != 6 { 47144 break 47145 } 47146 _ = v.Args[1] 47147 destptr := v.Args[0] 47148 mem := v.Args[1] 47149 v.reset(OpAMD64MOVWstoreconst) 47150 v.AuxInt = makeValAndOff(0, 4) 47151 v.AddArg(destptr) 47152 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 47153 v0.AuxInt = 0 47154 v0.AddArg(destptr) 47155 v0.AddArg(mem) 47156 v.AddArg(v0) 47157 return true 47158 } 47159 // match: (Zero [7] destptr mem) 47160 // cond: 47161 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 47162 for { 47163 if v.AuxInt != 7 { 47164 break 47165 } 47166 _ = v.Args[1] 47167 destptr := v.Args[0] 47168 mem := v.Args[1] 47169 v.reset(OpAMD64MOVLstoreconst) 47170 v.AuxInt = makeValAndOff(0, 3) 47171 v.AddArg(destptr) 47172 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 47173 v0.AuxInt = 0 47174 v0.AddArg(destptr) 47175 v0.AddArg(mem) 47176 v.AddArg(v0) 47177 return true 47178 } 47179 // match: (Zero [s] destptr mem) 47180 // cond: s%8 != 0 && s > 8 && !config.useSSE 47181 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 47182 for { 47183 s := v.AuxInt 47184 _ = v.Args[1] 47185 destptr := v.Args[0] 47186 mem := v.Args[1] 47187 if !(s%8 != 0 && s > 8 && !config.useSSE) { 47188 break 47189 } 47190 v.reset(OpZero) 47191 v.AuxInt = s - s%8 47192 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47193 v0.AuxInt = s % 8 47194 v0.AddArg(destptr) 47195 v.AddArg(v0) 47196 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47197 v1.AuxInt = 0 47198 v1.AddArg(destptr) 47199 v1.AddArg(mem) 47200 v.AddArg(v1) 47201 return true 47202 } 47203 return false 47204 } 47205 func rewriteValueAMD64_OpZero_10(v *Value) bool { 47206 b := v.Block 47207 _ = b 47208 config := b.Func.Config 47209 _ = config 47210 // match: (Zero [16] destptr mem) 47211 // cond: !config.useSSE 47212 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 47213 for { 47214 if v.AuxInt != 16 { 47215 break 47216 } 47217 _ = v.Args[1] 47218 destptr := v.Args[0] 47219 mem := v.Args[1] 47220 if !(!config.useSSE) { 47221 break 47222 } 47223 v.reset(OpAMD64MOVQstoreconst) 47224 v.AuxInt = makeValAndOff(0, 8) 47225 v.AddArg(destptr) 47226 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47227 v0.AuxInt = 0 47228 v0.AddArg(destptr) 47229 v0.AddArg(mem) 47230 v.AddArg(v0) 47231 return true 47232 } 47233 // match: (Zero [24] destptr mem) 47234 // cond: !config.useSSE 47235 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 47236 for { 47237 if v.AuxInt != 24 { 47238 break 47239 } 47240 _ = v.Args[1] 47241 destptr := v.Args[0] 47242 mem := v.Args[1] 47243 if !(!config.useSSE) { 47244 break 47245 } 47246 v.reset(OpAMD64MOVQstoreconst) 47247 v.AuxInt = makeValAndOff(0, 16) 47248 v.AddArg(destptr) 47249 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47250 v0.AuxInt = makeValAndOff(0, 8) 47251 v0.AddArg(destptr) 47252 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47253 v1.AuxInt = 0 47254 v1.AddArg(destptr) 47255 v1.AddArg(mem) 47256 v0.AddArg(v1) 47257 v.AddArg(v0) 47258 return true 47259 } 47260 // match: (Zero [32] destptr mem) 47261 // cond: !config.useSSE 47262 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 47263 for { 47264 if v.AuxInt != 32 { 47265 break 47266 } 47267 _ = v.Args[1] 47268 destptr := v.Args[0] 47269 mem := v.Args[1] 47270 if !(!config.useSSE) { 47271 break 47272 } 47273 v.reset(OpAMD64MOVQstoreconst) 47274 v.AuxInt = makeValAndOff(0, 24) 47275 v.AddArg(destptr) 47276 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47277 v0.AuxInt = makeValAndOff(0, 16) 47278 v0.AddArg(destptr) 47279 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47280 v1.AuxInt = makeValAndOff(0, 8) 47281 v1.AddArg(destptr) 47282 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47283 v2.AuxInt = 0 47284 v2.AddArg(destptr) 47285 v2.AddArg(mem) 47286 v1.AddArg(v2) 47287 v0.AddArg(v1) 47288 v.AddArg(v0) 47289 return true 47290 } 47291 // match: (Zero [s] destptr mem) 47292 // cond: s > 8 && s < 16 && config.useSSE 47293 // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem)) 47294 for { 47295 s := v.AuxInt 47296 _ = v.Args[1] 47297 destptr := v.Args[0] 47298 mem := v.Args[1] 47299 if !(s > 8 && s < 16 && config.useSSE) { 47300 break 47301 } 47302 v.reset(OpAMD64MOVQstoreconst) 47303 v.AuxInt = makeValAndOff(0, s-8) 47304 v.AddArg(destptr) 47305 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47306 v0.AuxInt = 0 47307 v0.AddArg(destptr) 47308 v0.AddArg(mem) 47309 v.AddArg(v0) 47310 return true 47311 } 47312 // match: (Zero [s] destptr mem) 47313 // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE 47314 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) 47315 for { 47316 s := v.AuxInt 47317 _ = v.Args[1] 47318 destptr := v.Args[0] 47319 mem := v.Args[1] 47320 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { 47321 break 47322 } 47323 v.reset(OpZero) 47324 v.AuxInt = s - s%16 47325 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47326 v0.AuxInt = s % 16 47327 v0.AddArg(destptr) 47328 v.AddArg(v0) 47329 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47330 v1.AddArg(destptr) 47331 v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47332 v2.AuxInt = 0 47333 v1.AddArg(v2) 47334 v1.AddArg(mem) 47335 v.AddArg(v1) 47336 return true 47337 } 47338 // match: (Zero [s] destptr mem) 47339 // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE 47340 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [0] destptr mem)) 47341 for { 47342 s := v.AuxInt 47343 _ = v.Args[1] 47344 destptr := v.Args[0] 47345 mem := v.Args[1] 47346 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { 47347 break 47348 } 47349 v.reset(OpZero) 47350 v.AuxInt = s - s%16 47351 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47352 v0.AuxInt = s % 16 47353 v0.AddArg(destptr) 47354 v.AddArg(v0) 47355 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47356 v1.AuxInt = 0 47357 v1.AddArg(destptr) 47358 v1.AddArg(mem) 47359 v.AddArg(v1) 47360 return true 47361 } 47362 // match: (Zero [16] destptr mem) 47363 // cond: config.useSSE 47364 // result: (MOVOstore destptr (MOVOconst [0]) mem) 47365 for { 47366 if v.AuxInt != 16 { 47367 break 47368 } 47369 _ = v.Args[1] 47370 destptr := v.Args[0] 47371 mem := v.Args[1] 47372 if !(config.useSSE) { 47373 break 47374 } 47375 v.reset(OpAMD64MOVOstore) 47376 v.AddArg(destptr) 47377 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47378 v0.AuxInt = 0 47379 v.AddArg(v0) 47380 v.AddArg(mem) 47381 return true 47382 } 47383 // match: (Zero [32] destptr mem) 47384 // cond: config.useSSE 47385 // result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)) 47386 for { 47387 if v.AuxInt != 32 { 47388 break 47389 } 47390 _ = v.Args[1] 47391 destptr := v.Args[0] 47392 mem := v.Args[1] 47393 if !(config.useSSE) { 47394 break 47395 } 47396 v.reset(OpAMD64MOVOstore) 47397 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47398 v0.AuxInt = 16 47399 v0.AddArg(destptr) 47400 v.AddArg(v0) 47401 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47402 v1.AuxInt = 0 47403 v.AddArg(v1) 47404 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47405 v2.AddArg(destptr) 47406 v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47407 v3.AuxInt = 0 47408 v2.AddArg(v3) 47409 v2.AddArg(mem) 47410 v.AddArg(v2) 47411 return true 47412 } 47413 // match: (Zero [48] destptr mem) 47414 // cond: config.useSSE 47415 // result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))) 47416 for { 47417 if v.AuxInt != 48 { 47418 break 47419 } 47420 _ = v.Args[1] 47421 destptr := v.Args[0] 47422 mem := v.Args[1] 47423 if !(config.useSSE) { 47424 break 47425 } 47426 v.reset(OpAMD64MOVOstore) 47427 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47428 v0.AuxInt = 32 47429 v0.AddArg(destptr) 47430 v.AddArg(v0) 47431 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47432 v1.AuxInt = 0 47433 v.AddArg(v1) 47434 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47435 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47436 v3.AuxInt = 16 47437 v3.AddArg(destptr) 47438 v2.AddArg(v3) 47439 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47440 v4.AuxInt = 0 47441 v2.AddArg(v4) 47442 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47443 v5.AddArg(destptr) 47444 v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47445 v6.AuxInt = 0 47446 v5.AddArg(v6) 47447 v5.AddArg(mem) 47448 v2.AddArg(v5) 47449 v.AddArg(v2) 47450 return true 47451 } 47452 // match: (Zero [64] destptr mem) 47453 // cond: config.useSSE 47454 // result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))) 47455 for { 47456 if v.AuxInt != 64 { 47457 break 47458 } 47459 _ = v.Args[1] 47460 destptr := v.Args[0] 47461 mem := v.Args[1] 47462 if !(config.useSSE) { 47463 break 47464 } 47465 v.reset(OpAMD64MOVOstore) 47466 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47467 v0.AuxInt = 48 47468 v0.AddArg(destptr) 47469 v.AddArg(v0) 47470 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47471 v1.AuxInt = 0 47472 v.AddArg(v1) 47473 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47474 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47475 v3.AuxInt = 32 47476 v3.AddArg(destptr) 47477 v2.AddArg(v3) 47478 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47479 v4.AuxInt = 0 47480 v2.AddArg(v4) 47481 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47482 v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47483 v6.AuxInt = 16 47484 v6.AddArg(destptr) 47485 v5.AddArg(v6) 47486 v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47487 v7.AuxInt = 0 47488 v5.AddArg(v7) 47489 v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47490 v8.AddArg(destptr) 47491 v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47492 v9.AuxInt = 0 47493 v8.AddArg(v9) 47494 v8.AddArg(mem) 47495 v5.AddArg(v8) 47496 v2.AddArg(v5) 47497 v.AddArg(v2) 47498 return true 47499 } 47500 return false 47501 } 47502 func rewriteValueAMD64_OpZero_20(v *Value) bool { 47503 b := v.Block 47504 _ = b 47505 config := b.Func.Config 47506 _ = config 47507 typ := &b.Func.Config.Types 47508 _ = typ 47509 // match: (Zero [s] destptr mem) 47510 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice 47511 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 47512 for { 47513 s := v.AuxInt 47514 _ = v.Args[1] 47515 destptr := v.Args[0] 47516 mem := v.Args[1] 47517 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 47518 break 47519 } 47520 v.reset(OpAMD64DUFFZERO) 47521 v.AuxInt = s 47522 v.AddArg(destptr) 47523 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47524 v0.AuxInt = 0 47525 v.AddArg(v0) 47526 v.AddArg(mem) 47527 return true 47528 } 47529 // match: (Zero [s] destptr mem) 47530 // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 47531 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 47532 for { 47533 s := v.AuxInt 47534 _ = v.Args[1] 47535 destptr := v.Args[0] 47536 mem := v.Args[1] 47537 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { 47538 break 47539 } 47540 v.reset(OpAMD64REPSTOSQ) 47541 v.AddArg(destptr) 47542 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 47543 v0.AuxInt = s / 8 47544 v.AddArg(v0) 47545 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 47546 v1.AuxInt = 0 47547 v.AddArg(v1) 47548 v.AddArg(mem) 47549 return true 47550 } 47551 return false 47552 } 47553 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 47554 // match: (ZeroExt16to32 x) 47555 // cond: 47556 // result: (MOVWQZX x) 47557 for { 47558 x := v.Args[0] 47559 v.reset(OpAMD64MOVWQZX) 47560 v.AddArg(x) 47561 return true 47562 } 47563 } 47564 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 47565 // match: (ZeroExt16to64 x) 47566 // cond: 47567 // result: (MOVWQZX x) 47568 for { 47569 x := v.Args[0] 47570 v.reset(OpAMD64MOVWQZX) 47571 v.AddArg(x) 47572 return true 47573 } 47574 } 47575 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 47576 // match: (ZeroExt32to64 x) 47577 // cond: 47578 // result: (MOVLQZX x) 47579 for { 47580 x := v.Args[0] 47581 v.reset(OpAMD64MOVLQZX) 47582 v.AddArg(x) 47583 return true 47584 } 47585 } 47586 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 47587 // match: (ZeroExt8to16 x) 47588 // cond: 47589 // result: (MOVBQZX x) 47590 for { 47591 x := v.Args[0] 47592 v.reset(OpAMD64MOVBQZX) 47593 v.AddArg(x) 47594 return true 47595 } 47596 } 47597 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 47598 // match: (ZeroExt8to32 x) 47599 // cond: 47600 // result: (MOVBQZX x) 47601 for { 47602 x := v.Args[0] 47603 v.reset(OpAMD64MOVBQZX) 47604 v.AddArg(x) 47605 return true 47606 } 47607 } 47608 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 47609 // match: (ZeroExt8to64 x) 47610 // cond: 47611 // result: (MOVBQZX x) 47612 for { 47613 x := v.Args[0] 47614 v.reset(OpAMD64MOVBQZX) 47615 v.AddArg(x) 47616 return true 47617 } 47618 } 47619 func rewriteBlockAMD64(b *Block) bool { 47620 config := b.Func.Config 47621 _ = config 47622 fe := b.Func.fe 47623 _ = fe 47624 typ := &config.Types 47625 _ = typ 47626 switch b.Kind { 47627 case BlockAMD64EQ: 47628 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 47629 // cond: !config.nacl 47630 // result: (UGE (BTL x y)) 47631 for { 47632 v := b.Control 47633 if v.Op != OpAMD64TESTL { 47634 break 47635 } 47636 _ = v.Args[1] 47637 v_0 := v.Args[0] 47638 if v_0.Op != OpAMD64SHLL { 47639 break 47640 } 47641 _ = v_0.Args[1] 47642 v_0_0 := v_0.Args[0] 47643 if v_0_0.Op != OpAMD64MOVLconst { 47644 break 47645 } 47646 if v_0_0.AuxInt != 1 { 47647 break 47648 } 47649 x := v_0.Args[1] 47650 y := v.Args[1] 47651 if !(!config.nacl) { 47652 break 47653 } 47654 b.Kind = BlockAMD64UGE 47655 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 47656 v0.AddArg(x) 47657 v0.AddArg(y) 47658 b.SetControl(v0) 47659 b.Aux = nil 47660 return true 47661 } 47662 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 47663 // cond: !config.nacl 47664 // result: (UGE (BTL x y)) 47665 for { 47666 v := b.Control 47667 if v.Op != OpAMD64TESTL { 47668 break 47669 } 47670 _ = v.Args[1] 47671 y := v.Args[0] 47672 v_1 := v.Args[1] 47673 if v_1.Op != OpAMD64SHLL { 47674 break 47675 } 47676 _ = v_1.Args[1] 47677 v_1_0 := v_1.Args[0] 47678 if v_1_0.Op != OpAMD64MOVLconst { 47679 break 47680 } 47681 if v_1_0.AuxInt != 1 { 47682 break 47683 } 47684 x := v_1.Args[1] 47685 if !(!config.nacl) { 47686 break 47687 } 47688 b.Kind = BlockAMD64UGE 47689 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 47690 v0.AddArg(x) 47691 v0.AddArg(y) 47692 b.SetControl(v0) 47693 b.Aux = nil 47694 return true 47695 } 47696 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 47697 // cond: !config.nacl 47698 // result: (UGE (BTQ x y)) 47699 for { 47700 v := b.Control 47701 if v.Op != OpAMD64TESTQ { 47702 break 47703 } 47704 _ = v.Args[1] 47705 v_0 := v.Args[0] 47706 if v_0.Op != OpAMD64SHLQ { 47707 break 47708 } 47709 _ = v_0.Args[1] 47710 v_0_0 := v_0.Args[0] 47711 if v_0_0.Op != OpAMD64MOVQconst { 47712 break 47713 } 47714 if v_0_0.AuxInt != 1 { 47715 break 47716 } 47717 x := v_0.Args[1] 47718 y := v.Args[1] 47719 if !(!config.nacl) { 47720 break 47721 } 47722 b.Kind = BlockAMD64UGE 47723 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 47724 v0.AddArg(x) 47725 v0.AddArg(y) 47726 b.SetControl(v0) 47727 b.Aux = nil 47728 return true 47729 } 47730 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 47731 // cond: !config.nacl 47732 // result: (UGE (BTQ x y)) 47733 for { 47734 v := b.Control 47735 if v.Op != OpAMD64TESTQ { 47736 break 47737 } 47738 _ = v.Args[1] 47739 y := v.Args[0] 47740 v_1 := v.Args[1] 47741 if v_1.Op != OpAMD64SHLQ { 47742 break 47743 } 47744 _ = v_1.Args[1] 47745 v_1_0 := v_1.Args[0] 47746 if v_1_0.Op != OpAMD64MOVQconst { 47747 break 47748 } 47749 if v_1_0.AuxInt != 1 { 47750 break 47751 } 47752 x := v_1.Args[1] 47753 if !(!config.nacl) { 47754 break 47755 } 47756 b.Kind = BlockAMD64UGE 47757 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 47758 v0.AddArg(x) 47759 v0.AddArg(y) 47760 b.SetControl(v0) 47761 b.Aux = nil 47762 return true 47763 } 47764 // match: (EQ (TESTLconst [c] x)) 47765 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 47766 // result: (UGE (BTLconst [log2(c)] x)) 47767 for { 47768 v := b.Control 47769 if v.Op != OpAMD64TESTLconst { 47770 break 47771 } 47772 c := v.AuxInt 47773 x := v.Args[0] 47774 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 47775 break 47776 } 47777 b.Kind = BlockAMD64UGE 47778 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 47779 v0.AuxInt = log2(c) 47780 v0.AddArg(x) 47781 b.SetControl(v0) 47782 b.Aux = nil 47783 return true 47784 } 47785 // match: (EQ (TESTQconst [c] x)) 47786 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 47787 // result: (UGE (BTQconst [log2(c)] x)) 47788 for { 47789 v := b.Control 47790 if v.Op != OpAMD64TESTQconst { 47791 break 47792 } 47793 c := v.AuxInt 47794 x := v.Args[0] 47795 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 47796 break 47797 } 47798 b.Kind = BlockAMD64UGE 47799 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 47800 v0.AuxInt = log2(c) 47801 v0.AddArg(x) 47802 b.SetControl(v0) 47803 b.Aux = nil 47804 return true 47805 } 47806 // match: (EQ (TESTQ (MOVQconst [c]) x)) 47807 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 47808 // result: (UGE (BTQconst [log2(c)] x)) 47809 for { 47810 v := b.Control 47811 if v.Op != OpAMD64TESTQ { 47812 break 47813 } 47814 _ = v.Args[1] 47815 v_0 := v.Args[0] 47816 if v_0.Op != OpAMD64MOVQconst { 47817 break 47818 } 47819 c := v_0.AuxInt 47820 x := v.Args[1] 47821 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 47822 break 47823 } 47824 b.Kind = BlockAMD64UGE 47825 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 47826 v0.AuxInt = log2(c) 47827 v0.AddArg(x) 47828 b.SetControl(v0) 47829 b.Aux = nil 47830 return true 47831 } 47832 // match: (EQ (TESTQ x (MOVQconst [c]))) 47833 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 47834 // result: (UGE (BTQconst [log2(c)] x)) 47835 for { 47836 v := b.Control 47837 if v.Op != OpAMD64TESTQ { 47838 break 47839 } 47840 _ = v.Args[1] 47841 x := v.Args[0] 47842 v_1 := v.Args[1] 47843 if v_1.Op != OpAMD64MOVQconst { 47844 break 47845 } 47846 c := v_1.AuxInt 47847 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 47848 break 47849 } 47850 b.Kind = BlockAMD64UGE 47851 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 47852 v0.AuxInt = log2(c) 47853 v0.AddArg(x) 47854 b.SetControl(v0) 47855 b.Aux = nil 47856 return true 47857 } 47858 // match: (EQ (InvertFlags cmp) yes no) 47859 // cond: 47860 // result: (EQ cmp yes no) 47861 for { 47862 v := b.Control 47863 if v.Op != OpAMD64InvertFlags { 47864 break 47865 } 47866 cmp := v.Args[0] 47867 b.Kind = BlockAMD64EQ 47868 b.SetControl(cmp) 47869 b.Aux = nil 47870 return true 47871 } 47872 // match: (EQ (FlagEQ) yes no) 47873 // cond: 47874 // result: (First nil yes no) 47875 for { 47876 v := b.Control 47877 if v.Op != OpAMD64FlagEQ { 47878 break 47879 } 47880 b.Kind = BlockFirst 47881 b.SetControl(nil) 47882 b.Aux = nil 47883 return true 47884 } 47885 // match: (EQ (FlagLT_ULT) yes no) 47886 // cond: 47887 // result: (First nil no yes) 47888 for { 47889 v := b.Control 47890 if v.Op != OpAMD64FlagLT_ULT { 47891 break 47892 } 47893 b.Kind = BlockFirst 47894 b.SetControl(nil) 47895 b.Aux = nil 47896 b.swapSuccessors() 47897 return true 47898 } 47899 // match: (EQ (FlagLT_UGT) yes no) 47900 // cond: 47901 // result: (First nil no yes) 47902 for { 47903 v := b.Control 47904 if v.Op != OpAMD64FlagLT_UGT { 47905 break 47906 } 47907 b.Kind = BlockFirst 47908 b.SetControl(nil) 47909 b.Aux = nil 47910 b.swapSuccessors() 47911 return true 47912 } 47913 // match: (EQ (FlagGT_ULT) yes no) 47914 // cond: 47915 // result: (First nil no yes) 47916 for { 47917 v := b.Control 47918 if v.Op != OpAMD64FlagGT_ULT { 47919 break 47920 } 47921 b.Kind = BlockFirst 47922 b.SetControl(nil) 47923 b.Aux = nil 47924 b.swapSuccessors() 47925 return true 47926 } 47927 // match: (EQ (FlagGT_UGT) yes no) 47928 // cond: 47929 // result: (First nil no yes) 47930 for { 47931 v := b.Control 47932 if v.Op != OpAMD64FlagGT_UGT { 47933 break 47934 } 47935 b.Kind = BlockFirst 47936 b.SetControl(nil) 47937 b.Aux = nil 47938 b.swapSuccessors() 47939 return true 47940 } 47941 case BlockAMD64GE: 47942 // match: (GE (InvertFlags cmp) yes no) 47943 // cond: 47944 // result: (LE cmp yes no) 47945 for { 47946 v := b.Control 47947 if v.Op != OpAMD64InvertFlags { 47948 break 47949 } 47950 cmp := v.Args[0] 47951 b.Kind = BlockAMD64LE 47952 b.SetControl(cmp) 47953 b.Aux = nil 47954 return true 47955 } 47956 // match: (GE (FlagEQ) yes no) 47957 // cond: 47958 // result: (First nil yes no) 47959 for { 47960 v := b.Control 47961 if v.Op != OpAMD64FlagEQ { 47962 break 47963 } 47964 b.Kind = BlockFirst 47965 b.SetControl(nil) 47966 b.Aux = nil 47967 return true 47968 } 47969 // match: (GE (FlagLT_ULT) yes no) 47970 // cond: 47971 // result: (First nil no yes) 47972 for { 47973 v := b.Control 47974 if v.Op != OpAMD64FlagLT_ULT { 47975 break 47976 } 47977 b.Kind = BlockFirst 47978 b.SetControl(nil) 47979 b.Aux = nil 47980 b.swapSuccessors() 47981 return true 47982 } 47983 // match: (GE (FlagLT_UGT) yes no) 47984 // cond: 47985 // result: (First nil no yes) 47986 for { 47987 v := b.Control 47988 if v.Op != OpAMD64FlagLT_UGT { 47989 break 47990 } 47991 b.Kind = BlockFirst 47992 b.SetControl(nil) 47993 b.Aux = nil 47994 b.swapSuccessors() 47995 return true 47996 } 47997 // match: (GE (FlagGT_ULT) yes no) 47998 // cond: 47999 // result: (First nil yes no) 48000 for { 48001 v := b.Control 48002 if v.Op != OpAMD64FlagGT_ULT { 48003 break 48004 } 48005 b.Kind = BlockFirst 48006 b.SetControl(nil) 48007 b.Aux = nil 48008 return true 48009 } 48010 // match: (GE (FlagGT_UGT) yes no) 48011 // cond: 48012 // result: (First nil yes no) 48013 for { 48014 v := b.Control 48015 if v.Op != OpAMD64FlagGT_UGT { 48016 break 48017 } 48018 b.Kind = BlockFirst 48019 b.SetControl(nil) 48020 b.Aux = nil 48021 return true 48022 } 48023 case BlockAMD64GT: 48024 // match: (GT (InvertFlags cmp) yes no) 48025 // cond: 48026 // result: (LT cmp yes no) 48027 for { 48028 v := b.Control 48029 if v.Op != OpAMD64InvertFlags { 48030 break 48031 } 48032 cmp := v.Args[0] 48033 b.Kind = BlockAMD64LT 48034 b.SetControl(cmp) 48035 b.Aux = nil 48036 return true 48037 } 48038 // match: (GT (FlagEQ) yes no) 48039 // cond: 48040 // result: (First nil no yes) 48041 for { 48042 v := b.Control 48043 if v.Op != OpAMD64FlagEQ { 48044 break 48045 } 48046 b.Kind = BlockFirst 48047 b.SetControl(nil) 48048 b.Aux = nil 48049 b.swapSuccessors() 48050 return true 48051 } 48052 // match: (GT (FlagLT_ULT) yes no) 48053 // cond: 48054 // result: (First nil no yes) 48055 for { 48056 v := b.Control 48057 if v.Op != OpAMD64FlagLT_ULT { 48058 break 48059 } 48060 b.Kind = BlockFirst 48061 b.SetControl(nil) 48062 b.Aux = nil 48063 b.swapSuccessors() 48064 return true 48065 } 48066 // match: (GT (FlagLT_UGT) yes no) 48067 // cond: 48068 // result: (First nil no yes) 48069 for { 48070 v := b.Control 48071 if v.Op != OpAMD64FlagLT_UGT { 48072 break 48073 } 48074 b.Kind = BlockFirst 48075 b.SetControl(nil) 48076 b.Aux = nil 48077 b.swapSuccessors() 48078 return true 48079 } 48080 // match: (GT (FlagGT_ULT) yes no) 48081 // cond: 48082 // result: (First nil yes no) 48083 for { 48084 v := b.Control 48085 if v.Op != OpAMD64FlagGT_ULT { 48086 break 48087 } 48088 b.Kind = BlockFirst 48089 b.SetControl(nil) 48090 b.Aux = nil 48091 return true 48092 } 48093 // match: (GT (FlagGT_UGT) yes no) 48094 // cond: 48095 // result: (First nil yes no) 48096 for { 48097 v := b.Control 48098 if v.Op != OpAMD64FlagGT_UGT { 48099 break 48100 } 48101 b.Kind = BlockFirst 48102 b.SetControl(nil) 48103 b.Aux = nil 48104 return true 48105 } 48106 case BlockIf: 48107 // match: (If (SETL cmp) yes no) 48108 // cond: 48109 // result: (LT cmp yes no) 48110 for { 48111 v := b.Control 48112 if v.Op != OpAMD64SETL { 48113 break 48114 } 48115 cmp := v.Args[0] 48116 b.Kind = BlockAMD64LT 48117 b.SetControl(cmp) 48118 b.Aux = nil 48119 return true 48120 } 48121 // match: (If (SETLE cmp) yes no) 48122 // cond: 48123 // result: (LE cmp yes no) 48124 for { 48125 v := b.Control 48126 if v.Op != OpAMD64SETLE { 48127 break 48128 } 48129 cmp := v.Args[0] 48130 b.Kind = BlockAMD64LE 48131 b.SetControl(cmp) 48132 b.Aux = nil 48133 return true 48134 } 48135 // match: (If (SETG cmp) yes no) 48136 // cond: 48137 // result: (GT cmp yes no) 48138 for { 48139 v := b.Control 48140 if v.Op != OpAMD64SETG { 48141 break 48142 } 48143 cmp := v.Args[0] 48144 b.Kind = BlockAMD64GT 48145 b.SetControl(cmp) 48146 b.Aux = nil 48147 return true 48148 } 48149 // match: (If (SETGE cmp) yes no) 48150 // cond: 48151 // result: (GE cmp yes no) 48152 for { 48153 v := b.Control 48154 if v.Op != OpAMD64SETGE { 48155 break 48156 } 48157 cmp := v.Args[0] 48158 b.Kind = BlockAMD64GE 48159 b.SetControl(cmp) 48160 b.Aux = nil 48161 return true 48162 } 48163 // match: (If (SETEQ cmp) yes no) 48164 // cond: 48165 // result: (EQ cmp yes no) 48166 for { 48167 v := b.Control 48168 if v.Op != OpAMD64SETEQ { 48169 break 48170 } 48171 cmp := v.Args[0] 48172 b.Kind = BlockAMD64EQ 48173 b.SetControl(cmp) 48174 b.Aux = nil 48175 return true 48176 } 48177 // match: (If (SETNE cmp) yes no) 48178 // cond: 48179 // result: (NE cmp yes no) 48180 for { 48181 v := b.Control 48182 if v.Op != OpAMD64SETNE { 48183 break 48184 } 48185 cmp := v.Args[0] 48186 b.Kind = BlockAMD64NE 48187 b.SetControl(cmp) 48188 b.Aux = nil 48189 return true 48190 } 48191 // match: (If (SETB cmp) yes no) 48192 // cond: 48193 // result: (ULT cmp yes no) 48194 for { 48195 v := b.Control 48196 if v.Op != OpAMD64SETB { 48197 break 48198 } 48199 cmp := v.Args[0] 48200 b.Kind = BlockAMD64ULT 48201 b.SetControl(cmp) 48202 b.Aux = nil 48203 return true 48204 } 48205 // match: (If (SETBE cmp) yes no) 48206 // cond: 48207 // result: (ULE cmp yes no) 48208 for { 48209 v := b.Control 48210 if v.Op != OpAMD64SETBE { 48211 break 48212 } 48213 cmp := v.Args[0] 48214 b.Kind = BlockAMD64ULE 48215 b.SetControl(cmp) 48216 b.Aux = nil 48217 return true 48218 } 48219 // match: (If (SETA cmp) yes no) 48220 // cond: 48221 // result: (UGT cmp yes no) 48222 for { 48223 v := b.Control 48224 if v.Op != OpAMD64SETA { 48225 break 48226 } 48227 cmp := v.Args[0] 48228 b.Kind = BlockAMD64UGT 48229 b.SetControl(cmp) 48230 b.Aux = nil 48231 return true 48232 } 48233 // match: (If (SETAE cmp) yes no) 48234 // cond: 48235 // result: (UGE cmp yes no) 48236 for { 48237 v := b.Control 48238 if v.Op != OpAMD64SETAE { 48239 break 48240 } 48241 cmp := v.Args[0] 48242 b.Kind = BlockAMD64UGE 48243 b.SetControl(cmp) 48244 b.Aux = nil 48245 return true 48246 } 48247 // match: (If (SETGF cmp) yes no) 48248 // cond: 48249 // result: (UGT cmp yes no) 48250 for { 48251 v := b.Control 48252 if v.Op != OpAMD64SETGF { 48253 break 48254 } 48255 cmp := v.Args[0] 48256 b.Kind = BlockAMD64UGT 48257 b.SetControl(cmp) 48258 b.Aux = nil 48259 return true 48260 } 48261 // match: (If (SETGEF cmp) yes no) 48262 // cond: 48263 // result: (UGE cmp yes no) 48264 for { 48265 v := b.Control 48266 if v.Op != OpAMD64SETGEF { 48267 break 48268 } 48269 cmp := v.Args[0] 48270 b.Kind = BlockAMD64UGE 48271 b.SetControl(cmp) 48272 b.Aux = nil 48273 return true 48274 } 48275 // match: (If (SETEQF cmp) yes no) 48276 // cond: 48277 // result: (EQF cmp yes no) 48278 for { 48279 v := b.Control 48280 if v.Op != OpAMD64SETEQF { 48281 break 48282 } 48283 cmp := v.Args[0] 48284 b.Kind = BlockAMD64EQF 48285 b.SetControl(cmp) 48286 b.Aux = nil 48287 return true 48288 } 48289 // match: (If (SETNEF cmp) yes no) 48290 // cond: 48291 // result: (NEF cmp yes no) 48292 for { 48293 v := b.Control 48294 if v.Op != OpAMD64SETNEF { 48295 break 48296 } 48297 cmp := v.Args[0] 48298 b.Kind = BlockAMD64NEF 48299 b.SetControl(cmp) 48300 b.Aux = nil 48301 return true 48302 } 48303 // match: (If cond yes no) 48304 // cond: 48305 // result: (NE (TESTB cond cond) yes no) 48306 for { 48307 v := b.Control 48308 _ = v 48309 cond := b.Control 48310 b.Kind = BlockAMD64NE 48311 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags) 48312 v0.AddArg(cond) 48313 v0.AddArg(cond) 48314 b.SetControl(v0) 48315 b.Aux = nil 48316 return true 48317 } 48318 case BlockAMD64LE: 48319 // match: (LE (InvertFlags cmp) yes no) 48320 // cond: 48321 // result: (GE cmp yes no) 48322 for { 48323 v := b.Control 48324 if v.Op != OpAMD64InvertFlags { 48325 break 48326 } 48327 cmp := v.Args[0] 48328 b.Kind = BlockAMD64GE 48329 b.SetControl(cmp) 48330 b.Aux = nil 48331 return true 48332 } 48333 // match: (LE (FlagEQ) yes no) 48334 // cond: 48335 // result: (First nil yes no) 48336 for { 48337 v := b.Control 48338 if v.Op != OpAMD64FlagEQ { 48339 break 48340 } 48341 b.Kind = BlockFirst 48342 b.SetControl(nil) 48343 b.Aux = nil 48344 return true 48345 } 48346 // match: (LE (FlagLT_ULT) yes no) 48347 // cond: 48348 // result: (First nil yes no) 48349 for { 48350 v := b.Control 48351 if v.Op != OpAMD64FlagLT_ULT { 48352 break 48353 } 48354 b.Kind = BlockFirst 48355 b.SetControl(nil) 48356 b.Aux = nil 48357 return true 48358 } 48359 // match: (LE (FlagLT_UGT) yes no) 48360 // cond: 48361 // result: (First nil yes no) 48362 for { 48363 v := b.Control 48364 if v.Op != OpAMD64FlagLT_UGT { 48365 break 48366 } 48367 b.Kind = BlockFirst 48368 b.SetControl(nil) 48369 b.Aux = nil 48370 return true 48371 } 48372 // match: (LE (FlagGT_ULT) yes no) 48373 // cond: 48374 // result: (First nil no yes) 48375 for { 48376 v := b.Control 48377 if v.Op != OpAMD64FlagGT_ULT { 48378 break 48379 } 48380 b.Kind = BlockFirst 48381 b.SetControl(nil) 48382 b.Aux = nil 48383 b.swapSuccessors() 48384 return true 48385 } 48386 // match: (LE (FlagGT_UGT) yes no) 48387 // cond: 48388 // result: (First nil no yes) 48389 for { 48390 v := b.Control 48391 if v.Op != OpAMD64FlagGT_UGT { 48392 break 48393 } 48394 b.Kind = BlockFirst 48395 b.SetControl(nil) 48396 b.Aux = nil 48397 b.swapSuccessors() 48398 return true 48399 } 48400 case BlockAMD64LT: 48401 // match: (LT (InvertFlags cmp) yes no) 48402 // cond: 48403 // result: (GT cmp yes no) 48404 for { 48405 v := b.Control 48406 if v.Op != OpAMD64InvertFlags { 48407 break 48408 } 48409 cmp := v.Args[0] 48410 b.Kind = BlockAMD64GT 48411 b.SetControl(cmp) 48412 b.Aux = nil 48413 return true 48414 } 48415 // match: (LT (FlagEQ) yes no) 48416 // cond: 48417 // result: (First nil no yes) 48418 for { 48419 v := b.Control 48420 if v.Op != OpAMD64FlagEQ { 48421 break 48422 } 48423 b.Kind = BlockFirst 48424 b.SetControl(nil) 48425 b.Aux = nil 48426 b.swapSuccessors() 48427 return true 48428 } 48429 // match: (LT (FlagLT_ULT) yes no) 48430 // cond: 48431 // result: (First nil yes no) 48432 for { 48433 v := b.Control 48434 if v.Op != OpAMD64FlagLT_ULT { 48435 break 48436 } 48437 b.Kind = BlockFirst 48438 b.SetControl(nil) 48439 b.Aux = nil 48440 return true 48441 } 48442 // match: (LT (FlagLT_UGT) yes no) 48443 // cond: 48444 // result: (First nil yes no) 48445 for { 48446 v := b.Control 48447 if v.Op != OpAMD64FlagLT_UGT { 48448 break 48449 } 48450 b.Kind = BlockFirst 48451 b.SetControl(nil) 48452 b.Aux = nil 48453 return true 48454 } 48455 // match: (LT (FlagGT_ULT) yes no) 48456 // cond: 48457 // result: (First nil no yes) 48458 for { 48459 v := b.Control 48460 if v.Op != OpAMD64FlagGT_ULT { 48461 break 48462 } 48463 b.Kind = BlockFirst 48464 b.SetControl(nil) 48465 b.Aux = nil 48466 b.swapSuccessors() 48467 return true 48468 } 48469 // match: (LT (FlagGT_UGT) yes no) 48470 // cond: 48471 // result: (First nil no yes) 48472 for { 48473 v := b.Control 48474 if v.Op != OpAMD64FlagGT_UGT { 48475 break 48476 } 48477 b.Kind = BlockFirst 48478 b.SetControl(nil) 48479 b.Aux = nil 48480 b.swapSuccessors() 48481 return true 48482 } 48483 case BlockAMD64NE: 48484 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 48485 // cond: 48486 // result: (LT cmp yes no) 48487 for { 48488 v := b.Control 48489 if v.Op != OpAMD64TESTB { 48490 break 48491 } 48492 _ = v.Args[1] 48493 v_0 := v.Args[0] 48494 if v_0.Op != OpAMD64SETL { 48495 break 48496 } 48497 cmp := v_0.Args[0] 48498 v_1 := v.Args[1] 48499 if v_1.Op != OpAMD64SETL { 48500 break 48501 } 48502 if cmp != v_1.Args[0] { 48503 break 48504 } 48505 b.Kind = BlockAMD64LT 48506 b.SetControl(cmp) 48507 b.Aux = nil 48508 return true 48509 } 48510 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 48511 // cond: 48512 // result: (LT cmp yes no) 48513 for { 48514 v := b.Control 48515 if v.Op != OpAMD64TESTB { 48516 break 48517 } 48518 _ = v.Args[1] 48519 v_0 := v.Args[0] 48520 if v_0.Op != OpAMD64SETL { 48521 break 48522 } 48523 cmp := v_0.Args[0] 48524 v_1 := v.Args[1] 48525 if v_1.Op != OpAMD64SETL { 48526 break 48527 } 48528 if cmp != v_1.Args[0] { 48529 break 48530 } 48531 b.Kind = BlockAMD64LT 48532 b.SetControl(cmp) 48533 b.Aux = nil 48534 return true 48535 } 48536 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 48537 // cond: 48538 // result: (LE cmp yes no) 48539 for { 48540 v := b.Control 48541 if v.Op != OpAMD64TESTB { 48542 break 48543 } 48544 _ = v.Args[1] 48545 v_0 := v.Args[0] 48546 if v_0.Op != OpAMD64SETLE { 48547 break 48548 } 48549 cmp := v_0.Args[0] 48550 v_1 := v.Args[1] 48551 if v_1.Op != OpAMD64SETLE { 48552 break 48553 } 48554 if cmp != v_1.Args[0] { 48555 break 48556 } 48557 b.Kind = BlockAMD64LE 48558 b.SetControl(cmp) 48559 b.Aux = nil 48560 return true 48561 } 48562 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 48563 // cond: 48564 // result: (LE cmp yes no) 48565 for { 48566 v := b.Control 48567 if v.Op != OpAMD64TESTB { 48568 break 48569 } 48570 _ = v.Args[1] 48571 v_0 := v.Args[0] 48572 if v_0.Op != OpAMD64SETLE { 48573 break 48574 } 48575 cmp := v_0.Args[0] 48576 v_1 := v.Args[1] 48577 if v_1.Op != OpAMD64SETLE { 48578 break 48579 } 48580 if cmp != v_1.Args[0] { 48581 break 48582 } 48583 b.Kind = BlockAMD64LE 48584 b.SetControl(cmp) 48585 b.Aux = nil 48586 return true 48587 } 48588 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 48589 // cond: 48590 // result: (GT cmp yes no) 48591 for { 48592 v := b.Control 48593 if v.Op != OpAMD64TESTB { 48594 break 48595 } 48596 _ = v.Args[1] 48597 v_0 := v.Args[0] 48598 if v_0.Op != OpAMD64SETG { 48599 break 48600 } 48601 cmp := v_0.Args[0] 48602 v_1 := v.Args[1] 48603 if v_1.Op != OpAMD64SETG { 48604 break 48605 } 48606 if cmp != v_1.Args[0] { 48607 break 48608 } 48609 b.Kind = BlockAMD64GT 48610 b.SetControl(cmp) 48611 b.Aux = nil 48612 return true 48613 } 48614 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 48615 // cond: 48616 // result: (GT cmp yes no) 48617 for { 48618 v := b.Control 48619 if v.Op != OpAMD64TESTB { 48620 break 48621 } 48622 _ = v.Args[1] 48623 v_0 := v.Args[0] 48624 if v_0.Op != OpAMD64SETG { 48625 break 48626 } 48627 cmp := v_0.Args[0] 48628 v_1 := v.Args[1] 48629 if v_1.Op != OpAMD64SETG { 48630 break 48631 } 48632 if cmp != v_1.Args[0] { 48633 break 48634 } 48635 b.Kind = BlockAMD64GT 48636 b.SetControl(cmp) 48637 b.Aux = nil 48638 return true 48639 } 48640 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 48641 // cond: 48642 // result: (GE cmp yes no) 48643 for { 48644 v := b.Control 48645 if v.Op != OpAMD64TESTB { 48646 break 48647 } 48648 _ = v.Args[1] 48649 v_0 := v.Args[0] 48650 if v_0.Op != OpAMD64SETGE { 48651 break 48652 } 48653 cmp := v_0.Args[0] 48654 v_1 := v.Args[1] 48655 if v_1.Op != OpAMD64SETGE { 48656 break 48657 } 48658 if cmp != v_1.Args[0] { 48659 break 48660 } 48661 b.Kind = BlockAMD64GE 48662 b.SetControl(cmp) 48663 b.Aux = nil 48664 return true 48665 } 48666 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 48667 // cond: 48668 // result: (GE cmp yes no) 48669 for { 48670 v := b.Control 48671 if v.Op != OpAMD64TESTB { 48672 break 48673 } 48674 _ = v.Args[1] 48675 v_0 := v.Args[0] 48676 if v_0.Op != OpAMD64SETGE { 48677 break 48678 } 48679 cmp := v_0.Args[0] 48680 v_1 := v.Args[1] 48681 if v_1.Op != OpAMD64SETGE { 48682 break 48683 } 48684 if cmp != v_1.Args[0] { 48685 break 48686 } 48687 b.Kind = BlockAMD64GE 48688 b.SetControl(cmp) 48689 b.Aux = nil 48690 return true 48691 } 48692 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 48693 // cond: 48694 // result: (EQ cmp yes no) 48695 for { 48696 v := b.Control 48697 if v.Op != OpAMD64TESTB { 48698 break 48699 } 48700 _ = v.Args[1] 48701 v_0 := v.Args[0] 48702 if v_0.Op != OpAMD64SETEQ { 48703 break 48704 } 48705 cmp := v_0.Args[0] 48706 v_1 := v.Args[1] 48707 if v_1.Op != OpAMD64SETEQ { 48708 break 48709 } 48710 if cmp != v_1.Args[0] { 48711 break 48712 } 48713 b.Kind = BlockAMD64EQ 48714 b.SetControl(cmp) 48715 b.Aux = nil 48716 return true 48717 } 48718 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 48719 // cond: 48720 // result: (EQ cmp yes no) 48721 for { 48722 v := b.Control 48723 if v.Op != OpAMD64TESTB { 48724 break 48725 } 48726 _ = v.Args[1] 48727 v_0 := v.Args[0] 48728 if v_0.Op != OpAMD64SETEQ { 48729 break 48730 } 48731 cmp := v_0.Args[0] 48732 v_1 := v.Args[1] 48733 if v_1.Op != OpAMD64SETEQ { 48734 break 48735 } 48736 if cmp != v_1.Args[0] { 48737 break 48738 } 48739 b.Kind = BlockAMD64EQ 48740 b.SetControl(cmp) 48741 b.Aux = nil 48742 return true 48743 } 48744 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 48745 // cond: 48746 // result: (NE cmp yes no) 48747 for { 48748 v := b.Control 48749 if v.Op != OpAMD64TESTB { 48750 break 48751 } 48752 _ = v.Args[1] 48753 v_0 := v.Args[0] 48754 if v_0.Op != OpAMD64SETNE { 48755 break 48756 } 48757 cmp := v_0.Args[0] 48758 v_1 := v.Args[1] 48759 if v_1.Op != OpAMD64SETNE { 48760 break 48761 } 48762 if cmp != v_1.Args[0] { 48763 break 48764 } 48765 b.Kind = BlockAMD64NE 48766 b.SetControl(cmp) 48767 b.Aux = nil 48768 return true 48769 } 48770 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 48771 // cond: 48772 // result: (NE cmp yes no) 48773 for { 48774 v := b.Control 48775 if v.Op != OpAMD64TESTB { 48776 break 48777 } 48778 _ = v.Args[1] 48779 v_0 := v.Args[0] 48780 if v_0.Op != OpAMD64SETNE { 48781 break 48782 } 48783 cmp := v_0.Args[0] 48784 v_1 := v.Args[1] 48785 if v_1.Op != OpAMD64SETNE { 48786 break 48787 } 48788 if cmp != v_1.Args[0] { 48789 break 48790 } 48791 b.Kind = BlockAMD64NE 48792 b.SetControl(cmp) 48793 b.Aux = nil 48794 return true 48795 } 48796 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 48797 // cond: 48798 // result: (ULT cmp yes no) 48799 for { 48800 v := b.Control 48801 if v.Op != OpAMD64TESTB { 48802 break 48803 } 48804 _ = v.Args[1] 48805 v_0 := v.Args[0] 48806 if v_0.Op != OpAMD64SETB { 48807 break 48808 } 48809 cmp := v_0.Args[0] 48810 v_1 := v.Args[1] 48811 if v_1.Op != OpAMD64SETB { 48812 break 48813 } 48814 if cmp != v_1.Args[0] { 48815 break 48816 } 48817 b.Kind = BlockAMD64ULT 48818 b.SetControl(cmp) 48819 b.Aux = nil 48820 return true 48821 } 48822 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 48823 // cond: 48824 // result: (ULT cmp yes no) 48825 for { 48826 v := b.Control 48827 if v.Op != OpAMD64TESTB { 48828 break 48829 } 48830 _ = v.Args[1] 48831 v_0 := v.Args[0] 48832 if v_0.Op != OpAMD64SETB { 48833 break 48834 } 48835 cmp := v_0.Args[0] 48836 v_1 := v.Args[1] 48837 if v_1.Op != OpAMD64SETB { 48838 break 48839 } 48840 if cmp != v_1.Args[0] { 48841 break 48842 } 48843 b.Kind = BlockAMD64ULT 48844 b.SetControl(cmp) 48845 b.Aux = nil 48846 return true 48847 } 48848 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 48849 // cond: 48850 // result: (ULE cmp yes no) 48851 for { 48852 v := b.Control 48853 if v.Op != OpAMD64TESTB { 48854 break 48855 } 48856 _ = v.Args[1] 48857 v_0 := v.Args[0] 48858 if v_0.Op != OpAMD64SETBE { 48859 break 48860 } 48861 cmp := v_0.Args[0] 48862 v_1 := v.Args[1] 48863 if v_1.Op != OpAMD64SETBE { 48864 break 48865 } 48866 if cmp != v_1.Args[0] { 48867 break 48868 } 48869 b.Kind = BlockAMD64ULE 48870 b.SetControl(cmp) 48871 b.Aux = nil 48872 return true 48873 } 48874 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 48875 // cond: 48876 // result: (ULE cmp yes no) 48877 for { 48878 v := b.Control 48879 if v.Op != OpAMD64TESTB { 48880 break 48881 } 48882 _ = v.Args[1] 48883 v_0 := v.Args[0] 48884 if v_0.Op != OpAMD64SETBE { 48885 break 48886 } 48887 cmp := v_0.Args[0] 48888 v_1 := v.Args[1] 48889 if v_1.Op != OpAMD64SETBE { 48890 break 48891 } 48892 if cmp != v_1.Args[0] { 48893 break 48894 } 48895 b.Kind = BlockAMD64ULE 48896 b.SetControl(cmp) 48897 b.Aux = nil 48898 return true 48899 } 48900 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 48901 // cond: 48902 // result: (UGT cmp yes no) 48903 for { 48904 v := b.Control 48905 if v.Op != OpAMD64TESTB { 48906 break 48907 } 48908 _ = v.Args[1] 48909 v_0 := v.Args[0] 48910 if v_0.Op != OpAMD64SETA { 48911 break 48912 } 48913 cmp := v_0.Args[0] 48914 v_1 := v.Args[1] 48915 if v_1.Op != OpAMD64SETA { 48916 break 48917 } 48918 if cmp != v_1.Args[0] { 48919 break 48920 } 48921 b.Kind = BlockAMD64UGT 48922 b.SetControl(cmp) 48923 b.Aux = nil 48924 return true 48925 } 48926 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 48927 // cond: 48928 // result: (UGT cmp yes no) 48929 for { 48930 v := b.Control 48931 if v.Op != OpAMD64TESTB { 48932 break 48933 } 48934 _ = v.Args[1] 48935 v_0 := v.Args[0] 48936 if v_0.Op != OpAMD64SETA { 48937 break 48938 } 48939 cmp := v_0.Args[0] 48940 v_1 := v.Args[1] 48941 if v_1.Op != OpAMD64SETA { 48942 break 48943 } 48944 if cmp != v_1.Args[0] { 48945 break 48946 } 48947 b.Kind = BlockAMD64UGT 48948 b.SetControl(cmp) 48949 b.Aux = nil 48950 return true 48951 } 48952 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 48953 // cond: 48954 // result: (UGE cmp yes no) 48955 for { 48956 v := b.Control 48957 if v.Op != OpAMD64TESTB { 48958 break 48959 } 48960 _ = v.Args[1] 48961 v_0 := v.Args[0] 48962 if v_0.Op != OpAMD64SETAE { 48963 break 48964 } 48965 cmp := v_0.Args[0] 48966 v_1 := v.Args[1] 48967 if v_1.Op != OpAMD64SETAE { 48968 break 48969 } 48970 if cmp != v_1.Args[0] { 48971 break 48972 } 48973 b.Kind = BlockAMD64UGE 48974 b.SetControl(cmp) 48975 b.Aux = nil 48976 return true 48977 } 48978 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 48979 // cond: 48980 // result: (UGE cmp yes no) 48981 for { 48982 v := b.Control 48983 if v.Op != OpAMD64TESTB { 48984 break 48985 } 48986 _ = v.Args[1] 48987 v_0 := v.Args[0] 48988 if v_0.Op != OpAMD64SETAE { 48989 break 48990 } 48991 cmp := v_0.Args[0] 48992 v_1 := v.Args[1] 48993 if v_1.Op != OpAMD64SETAE { 48994 break 48995 } 48996 if cmp != v_1.Args[0] { 48997 break 48998 } 48999 b.Kind = BlockAMD64UGE 49000 b.SetControl(cmp) 49001 b.Aux = nil 49002 return true 49003 } 49004 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 49005 // cond: !config.nacl 49006 // result: (ULT (BTL x y)) 49007 for { 49008 v := b.Control 49009 if v.Op != OpAMD64TESTL { 49010 break 49011 } 49012 _ = v.Args[1] 49013 v_0 := v.Args[0] 49014 if v_0.Op != OpAMD64SHLL { 49015 break 49016 } 49017 _ = v_0.Args[1] 49018 v_0_0 := v_0.Args[0] 49019 if v_0_0.Op != OpAMD64MOVLconst { 49020 break 49021 } 49022 if v_0_0.AuxInt != 1 { 49023 break 49024 } 49025 x := v_0.Args[1] 49026 y := v.Args[1] 49027 if !(!config.nacl) { 49028 break 49029 } 49030 b.Kind = BlockAMD64ULT 49031 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49032 v0.AddArg(x) 49033 v0.AddArg(y) 49034 b.SetControl(v0) 49035 b.Aux = nil 49036 return true 49037 } 49038 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 49039 // cond: !config.nacl 49040 // result: (ULT (BTL x y)) 49041 for { 49042 v := b.Control 49043 if v.Op != OpAMD64TESTL { 49044 break 49045 } 49046 _ = v.Args[1] 49047 y := v.Args[0] 49048 v_1 := v.Args[1] 49049 if v_1.Op != OpAMD64SHLL { 49050 break 49051 } 49052 _ = v_1.Args[1] 49053 v_1_0 := v_1.Args[0] 49054 if v_1_0.Op != OpAMD64MOVLconst { 49055 break 49056 } 49057 if v_1_0.AuxInt != 1 { 49058 break 49059 } 49060 x := v_1.Args[1] 49061 if !(!config.nacl) { 49062 break 49063 } 49064 b.Kind = BlockAMD64ULT 49065 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49066 v0.AddArg(x) 49067 v0.AddArg(y) 49068 b.SetControl(v0) 49069 b.Aux = nil 49070 return true 49071 } 49072 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 49073 // cond: !config.nacl 49074 // result: (ULT (BTQ x y)) 49075 for { 49076 v := b.Control 49077 if v.Op != OpAMD64TESTQ { 49078 break 49079 } 49080 _ = v.Args[1] 49081 v_0 := v.Args[0] 49082 if v_0.Op != OpAMD64SHLQ { 49083 break 49084 } 49085 _ = v_0.Args[1] 49086 v_0_0 := v_0.Args[0] 49087 if v_0_0.Op != OpAMD64MOVQconst { 49088 break 49089 } 49090 if v_0_0.AuxInt != 1 { 49091 break 49092 } 49093 x := v_0.Args[1] 49094 y := v.Args[1] 49095 if !(!config.nacl) { 49096 break 49097 } 49098 b.Kind = BlockAMD64ULT 49099 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49100 v0.AddArg(x) 49101 v0.AddArg(y) 49102 b.SetControl(v0) 49103 b.Aux = nil 49104 return true 49105 } 49106 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 49107 // cond: !config.nacl 49108 // result: (ULT (BTQ x y)) 49109 for { 49110 v := b.Control 49111 if v.Op != OpAMD64TESTQ { 49112 break 49113 } 49114 _ = v.Args[1] 49115 y := v.Args[0] 49116 v_1 := v.Args[1] 49117 if v_1.Op != OpAMD64SHLQ { 49118 break 49119 } 49120 _ = v_1.Args[1] 49121 v_1_0 := v_1.Args[0] 49122 if v_1_0.Op != OpAMD64MOVQconst { 49123 break 49124 } 49125 if v_1_0.AuxInt != 1 { 49126 break 49127 } 49128 x := v_1.Args[1] 49129 if !(!config.nacl) { 49130 break 49131 } 49132 b.Kind = BlockAMD64ULT 49133 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49134 v0.AddArg(x) 49135 v0.AddArg(y) 49136 b.SetControl(v0) 49137 b.Aux = nil 49138 return true 49139 } 49140 // match: (NE (TESTLconst [c] x)) 49141 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 49142 // result: (ULT (BTLconst [log2(c)] x)) 49143 for { 49144 v := b.Control 49145 if v.Op != OpAMD64TESTLconst { 49146 break 49147 } 49148 c := v.AuxInt 49149 x := v.Args[0] 49150 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 49151 break 49152 } 49153 b.Kind = BlockAMD64ULT 49154 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49155 v0.AuxInt = log2(c) 49156 v0.AddArg(x) 49157 b.SetControl(v0) 49158 b.Aux = nil 49159 return true 49160 } 49161 // match: (NE (TESTQconst [c] x)) 49162 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 49163 // result: (ULT (BTQconst [log2(c)] x)) 49164 for { 49165 v := b.Control 49166 if v.Op != OpAMD64TESTQconst { 49167 break 49168 } 49169 c := v.AuxInt 49170 x := v.Args[0] 49171 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 49172 break 49173 } 49174 b.Kind = BlockAMD64ULT 49175 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49176 v0.AuxInt = log2(c) 49177 v0.AddArg(x) 49178 b.SetControl(v0) 49179 b.Aux = nil 49180 return true 49181 } 49182 // match: (NE (TESTQ (MOVQconst [c]) x)) 49183 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 49184 // result: (ULT (BTQconst [log2(c)] x)) 49185 for { 49186 v := b.Control 49187 if v.Op != OpAMD64TESTQ { 49188 break 49189 } 49190 _ = v.Args[1] 49191 v_0 := v.Args[0] 49192 if v_0.Op != OpAMD64MOVQconst { 49193 break 49194 } 49195 c := v_0.AuxInt 49196 x := v.Args[1] 49197 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 49198 break 49199 } 49200 b.Kind = BlockAMD64ULT 49201 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49202 v0.AuxInt = log2(c) 49203 v0.AddArg(x) 49204 b.SetControl(v0) 49205 b.Aux = nil 49206 return true 49207 } 49208 // match: (NE (TESTQ x (MOVQconst [c]))) 49209 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 49210 // result: (ULT (BTQconst [log2(c)] x)) 49211 for { 49212 v := b.Control 49213 if v.Op != OpAMD64TESTQ { 49214 break 49215 } 49216 _ = v.Args[1] 49217 x := v.Args[0] 49218 v_1 := v.Args[1] 49219 if v_1.Op != OpAMD64MOVQconst { 49220 break 49221 } 49222 c := v_1.AuxInt 49223 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 49224 break 49225 } 49226 b.Kind = BlockAMD64ULT 49227 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49228 v0.AuxInt = log2(c) 49229 v0.AddArg(x) 49230 b.SetControl(v0) 49231 b.Aux = nil 49232 return true 49233 } 49234 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 49235 // cond: 49236 // result: (UGT cmp yes no) 49237 for { 49238 v := b.Control 49239 if v.Op != OpAMD64TESTB { 49240 break 49241 } 49242 _ = v.Args[1] 49243 v_0 := v.Args[0] 49244 if v_0.Op != OpAMD64SETGF { 49245 break 49246 } 49247 cmp := v_0.Args[0] 49248 v_1 := v.Args[1] 49249 if v_1.Op != OpAMD64SETGF { 49250 break 49251 } 49252 if cmp != v_1.Args[0] { 49253 break 49254 } 49255 b.Kind = BlockAMD64UGT 49256 b.SetControl(cmp) 49257 b.Aux = nil 49258 return true 49259 } 49260 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 49261 // cond: 49262 // result: (UGT cmp yes no) 49263 for { 49264 v := b.Control 49265 if v.Op != OpAMD64TESTB { 49266 break 49267 } 49268 _ = v.Args[1] 49269 v_0 := v.Args[0] 49270 if v_0.Op != OpAMD64SETGF { 49271 break 49272 } 49273 cmp := v_0.Args[0] 49274 v_1 := v.Args[1] 49275 if v_1.Op != OpAMD64SETGF { 49276 break 49277 } 49278 if cmp != v_1.Args[0] { 49279 break 49280 } 49281 b.Kind = BlockAMD64UGT 49282 b.SetControl(cmp) 49283 b.Aux = nil 49284 return true 49285 } 49286 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 49287 // cond: 49288 // result: (UGE cmp yes no) 49289 for { 49290 v := b.Control 49291 if v.Op != OpAMD64TESTB { 49292 break 49293 } 49294 _ = v.Args[1] 49295 v_0 := v.Args[0] 49296 if v_0.Op != OpAMD64SETGEF { 49297 break 49298 } 49299 cmp := v_0.Args[0] 49300 v_1 := v.Args[1] 49301 if v_1.Op != OpAMD64SETGEF { 49302 break 49303 } 49304 if cmp != v_1.Args[0] { 49305 break 49306 } 49307 b.Kind = BlockAMD64UGE 49308 b.SetControl(cmp) 49309 b.Aux = nil 49310 return true 49311 } 49312 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 49313 // cond: 49314 // result: (UGE cmp yes no) 49315 for { 49316 v := b.Control 49317 if v.Op != OpAMD64TESTB { 49318 break 49319 } 49320 _ = v.Args[1] 49321 v_0 := v.Args[0] 49322 if v_0.Op != OpAMD64SETGEF { 49323 break 49324 } 49325 cmp := v_0.Args[0] 49326 v_1 := v.Args[1] 49327 if v_1.Op != OpAMD64SETGEF { 49328 break 49329 } 49330 if cmp != v_1.Args[0] { 49331 break 49332 } 49333 b.Kind = BlockAMD64UGE 49334 b.SetControl(cmp) 49335 b.Aux = nil 49336 return true 49337 } 49338 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 49339 // cond: 49340 // result: (EQF cmp yes no) 49341 for { 49342 v := b.Control 49343 if v.Op != OpAMD64TESTB { 49344 break 49345 } 49346 _ = v.Args[1] 49347 v_0 := v.Args[0] 49348 if v_0.Op != OpAMD64SETEQF { 49349 break 49350 } 49351 cmp := v_0.Args[0] 49352 v_1 := v.Args[1] 49353 if v_1.Op != OpAMD64SETEQF { 49354 break 49355 } 49356 if cmp != v_1.Args[0] { 49357 break 49358 } 49359 b.Kind = BlockAMD64EQF 49360 b.SetControl(cmp) 49361 b.Aux = nil 49362 return true 49363 } 49364 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 49365 // cond: 49366 // result: (EQF cmp yes no) 49367 for { 49368 v := b.Control 49369 if v.Op != OpAMD64TESTB { 49370 break 49371 } 49372 _ = v.Args[1] 49373 v_0 := v.Args[0] 49374 if v_0.Op != OpAMD64SETEQF { 49375 break 49376 } 49377 cmp := v_0.Args[0] 49378 v_1 := v.Args[1] 49379 if v_1.Op != OpAMD64SETEQF { 49380 break 49381 } 49382 if cmp != v_1.Args[0] { 49383 break 49384 } 49385 b.Kind = BlockAMD64EQF 49386 b.SetControl(cmp) 49387 b.Aux = nil 49388 return true 49389 } 49390 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 49391 // cond: 49392 // result: (NEF cmp yes no) 49393 for { 49394 v := b.Control 49395 if v.Op != OpAMD64TESTB { 49396 break 49397 } 49398 _ = v.Args[1] 49399 v_0 := v.Args[0] 49400 if v_0.Op != OpAMD64SETNEF { 49401 break 49402 } 49403 cmp := v_0.Args[0] 49404 v_1 := v.Args[1] 49405 if v_1.Op != OpAMD64SETNEF { 49406 break 49407 } 49408 if cmp != v_1.Args[0] { 49409 break 49410 } 49411 b.Kind = BlockAMD64NEF 49412 b.SetControl(cmp) 49413 b.Aux = nil 49414 return true 49415 } 49416 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 49417 // cond: 49418 // result: (NEF cmp yes no) 49419 for { 49420 v := b.Control 49421 if v.Op != OpAMD64TESTB { 49422 break 49423 } 49424 _ = v.Args[1] 49425 v_0 := v.Args[0] 49426 if v_0.Op != OpAMD64SETNEF { 49427 break 49428 } 49429 cmp := v_0.Args[0] 49430 v_1 := v.Args[1] 49431 if v_1.Op != OpAMD64SETNEF { 49432 break 49433 } 49434 if cmp != v_1.Args[0] { 49435 break 49436 } 49437 b.Kind = BlockAMD64NEF 49438 b.SetControl(cmp) 49439 b.Aux = nil 49440 return true 49441 } 49442 // match: (NE (InvertFlags cmp) yes no) 49443 // cond: 49444 // result: (NE cmp yes no) 49445 for { 49446 v := b.Control 49447 if v.Op != OpAMD64InvertFlags { 49448 break 49449 } 49450 cmp := v.Args[0] 49451 b.Kind = BlockAMD64NE 49452 b.SetControl(cmp) 49453 b.Aux = nil 49454 return true 49455 } 49456 // match: (NE (FlagEQ) yes no) 49457 // cond: 49458 // result: (First nil no yes) 49459 for { 49460 v := b.Control 49461 if v.Op != OpAMD64FlagEQ { 49462 break 49463 } 49464 b.Kind = BlockFirst 49465 b.SetControl(nil) 49466 b.Aux = nil 49467 b.swapSuccessors() 49468 return true 49469 } 49470 // match: (NE (FlagLT_ULT) yes no) 49471 // cond: 49472 // result: (First nil yes no) 49473 for { 49474 v := b.Control 49475 if v.Op != OpAMD64FlagLT_ULT { 49476 break 49477 } 49478 b.Kind = BlockFirst 49479 b.SetControl(nil) 49480 b.Aux = nil 49481 return true 49482 } 49483 // match: (NE (FlagLT_UGT) yes no) 49484 // cond: 49485 // result: (First nil yes no) 49486 for { 49487 v := b.Control 49488 if v.Op != OpAMD64FlagLT_UGT { 49489 break 49490 } 49491 b.Kind = BlockFirst 49492 b.SetControl(nil) 49493 b.Aux = nil 49494 return true 49495 } 49496 // match: (NE (FlagGT_ULT) yes no) 49497 // cond: 49498 // result: (First nil yes no) 49499 for { 49500 v := b.Control 49501 if v.Op != OpAMD64FlagGT_ULT { 49502 break 49503 } 49504 b.Kind = BlockFirst 49505 b.SetControl(nil) 49506 b.Aux = nil 49507 return true 49508 } 49509 // match: (NE (FlagGT_UGT) yes no) 49510 // cond: 49511 // result: (First nil yes no) 49512 for { 49513 v := b.Control 49514 if v.Op != OpAMD64FlagGT_UGT { 49515 break 49516 } 49517 b.Kind = BlockFirst 49518 b.SetControl(nil) 49519 b.Aux = nil 49520 return true 49521 } 49522 case BlockAMD64UGE: 49523 // match: (UGE (InvertFlags cmp) yes no) 49524 // cond: 49525 // result: (ULE cmp yes no) 49526 for { 49527 v := b.Control 49528 if v.Op != OpAMD64InvertFlags { 49529 break 49530 } 49531 cmp := v.Args[0] 49532 b.Kind = BlockAMD64ULE 49533 b.SetControl(cmp) 49534 b.Aux = nil 49535 return true 49536 } 49537 // match: (UGE (FlagEQ) yes no) 49538 // cond: 49539 // result: (First nil yes no) 49540 for { 49541 v := b.Control 49542 if v.Op != OpAMD64FlagEQ { 49543 break 49544 } 49545 b.Kind = BlockFirst 49546 b.SetControl(nil) 49547 b.Aux = nil 49548 return true 49549 } 49550 // match: (UGE (FlagLT_ULT) yes no) 49551 // cond: 49552 // result: (First nil no yes) 49553 for { 49554 v := b.Control 49555 if v.Op != OpAMD64FlagLT_ULT { 49556 break 49557 } 49558 b.Kind = BlockFirst 49559 b.SetControl(nil) 49560 b.Aux = nil 49561 b.swapSuccessors() 49562 return true 49563 } 49564 // match: (UGE (FlagLT_UGT) yes no) 49565 // cond: 49566 // result: (First nil yes no) 49567 for { 49568 v := b.Control 49569 if v.Op != OpAMD64FlagLT_UGT { 49570 break 49571 } 49572 b.Kind = BlockFirst 49573 b.SetControl(nil) 49574 b.Aux = nil 49575 return true 49576 } 49577 // match: (UGE (FlagGT_ULT) yes no) 49578 // cond: 49579 // result: (First nil no yes) 49580 for { 49581 v := b.Control 49582 if v.Op != OpAMD64FlagGT_ULT { 49583 break 49584 } 49585 b.Kind = BlockFirst 49586 b.SetControl(nil) 49587 b.Aux = nil 49588 b.swapSuccessors() 49589 return true 49590 } 49591 // match: (UGE (FlagGT_UGT) yes no) 49592 // cond: 49593 // result: (First nil yes no) 49594 for { 49595 v := b.Control 49596 if v.Op != OpAMD64FlagGT_UGT { 49597 break 49598 } 49599 b.Kind = BlockFirst 49600 b.SetControl(nil) 49601 b.Aux = nil 49602 return true 49603 } 49604 case BlockAMD64UGT: 49605 // match: (UGT (InvertFlags cmp) yes no) 49606 // cond: 49607 // result: (ULT cmp yes no) 49608 for { 49609 v := b.Control 49610 if v.Op != OpAMD64InvertFlags { 49611 break 49612 } 49613 cmp := v.Args[0] 49614 b.Kind = BlockAMD64ULT 49615 b.SetControl(cmp) 49616 b.Aux = nil 49617 return true 49618 } 49619 // match: (UGT (FlagEQ) yes no) 49620 // cond: 49621 // result: (First nil no yes) 49622 for { 49623 v := b.Control 49624 if v.Op != OpAMD64FlagEQ { 49625 break 49626 } 49627 b.Kind = BlockFirst 49628 b.SetControl(nil) 49629 b.Aux = nil 49630 b.swapSuccessors() 49631 return true 49632 } 49633 // match: (UGT (FlagLT_ULT) yes no) 49634 // cond: 49635 // result: (First nil no yes) 49636 for { 49637 v := b.Control 49638 if v.Op != OpAMD64FlagLT_ULT { 49639 break 49640 } 49641 b.Kind = BlockFirst 49642 b.SetControl(nil) 49643 b.Aux = nil 49644 b.swapSuccessors() 49645 return true 49646 } 49647 // match: (UGT (FlagLT_UGT) yes no) 49648 // cond: 49649 // result: (First nil yes no) 49650 for { 49651 v := b.Control 49652 if v.Op != OpAMD64FlagLT_UGT { 49653 break 49654 } 49655 b.Kind = BlockFirst 49656 b.SetControl(nil) 49657 b.Aux = nil 49658 return true 49659 } 49660 // match: (UGT (FlagGT_ULT) yes no) 49661 // cond: 49662 // result: (First nil no yes) 49663 for { 49664 v := b.Control 49665 if v.Op != OpAMD64FlagGT_ULT { 49666 break 49667 } 49668 b.Kind = BlockFirst 49669 b.SetControl(nil) 49670 b.Aux = nil 49671 b.swapSuccessors() 49672 return true 49673 } 49674 // match: (UGT (FlagGT_UGT) yes no) 49675 // cond: 49676 // result: (First nil yes no) 49677 for { 49678 v := b.Control 49679 if v.Op != OpAMD64FlagGT_UGT { 49680 break 49681 } 49682 b.Kind = BlockFirst 49683 b.SetControl(nil) 49684 b.Aux = nil 49685 return true 49686 } 49687 case BlockAMD64ULE: 49688 // match: (ULE (InvertFlags cmp) yes no) 49689 // cond: 49690 // result: (UGE cmp yes no) 49691 for { 49692 v := b.Control 49693 if v.Op != OpAMD64InvertFlags { 49694 break 49695 } 49696 cmp := v.Args[0] 49697 b.Kind = BlockAMD64UGE 49698 b.SetControl(cmp) 49699 b.Aux = nil 49700 return true 49701 } 49702 // match: (ULE (FlagEQ) yes no) 49703 // cond: 49704 // result: (First nil yes no) 49705 for { 49706 v := b.Control 49707 if v.Op != OpAMD64FlagEQ { 49708 break 49709 } 49710 b.Kind = BlockFirst 49711 b.SetControl(nil) 49712 b.Aux = nil 49713 return true 49714 } 49715 // match: (ULE (FlagLT_ULT) yes no) 49716 // cond: 49717 // result: (First nil yes no) 49718 for { 49719 v := b.Control 49720 if v.Op != OpAMD64FlagLT_ULT { 49721 break 49722 } 49723 b.Kind = BlockFirst 49724 b.SetControl(nil) 49725 b.Aux = nil 49726 return true 49727 } 49728 // match: (ULE (FlagLT_UGT) yes no) 49729 // cond: 49730 // result: (First nil no yes) 49731 for { 49732 v := b.Control 49733 if v.Op != OpAMD64FlagLT_UGT { 49734 break 49735 } 49736 b.Kind = BlockFirst 49737 b.SetControl(nil) 49738 b.Aux = nil 49739 b.swapSuccessors() 49740 return true 49741 } 49742 // match: (ULE (FlagGT_ULT) yes no) 49743 // cond: 49744 // result: (First nil yes no) 49745 for { 49746 v := b.Control 49747 if v.Op != OpAMD64FlagGT_ULT { 49748 break 49749 } 49750 b.Kind = BlockFirst 49751 b.SetControl(nil) 49752 b.Aux = nil 49753 return true 49754 } 49755 // match: (ULE (FlagGT_UGT) yes no) 49756 // cond: 49757 // result: (First nil no yes) 49758 for { 49759 v := b.Control 49760 if v.Op != OpAMD64FlagGT_UGT { 49761 break 49762 } 49763 b.Kind = BlockFirst 49764 b.SetControl(nil) 49765 b.Aux = nil 49766 b.swapSuccessors() 49767 return true 49768 } 49769 case BlockAMD64ULT: 49770 // match: (ULT (InvertFlags cmp) yes no) 49771 // cond: 49772 // result: (UGT cmp yes no) 49773 for { 49774 v := b.Control 49775 if v.Op != OpAMD64InvertFlags { 49776 break 49777 } 49778 cmp := v.Args[0] 49779 b.Kind = BlockAMD64UGT 49780 b.SetControl(cmp) 49781 b.Aux = nil 49782 return true 49783 } 49784 // match: (ULT (FlagEQ) yes no) 49785 // cond: 49786 // result: (First nil no yes) 49787 for { 49788 v := b.Control 49789 if v.Op != OpAMD64FlagEQ { 49790 break 49791 } 49792 b.Kind = BlockFirst 49793 b.SetControl(nil) 49794 b.Aux = nil 49795 b.swapSuccessors() 49796 return true 49797 } 49798 // match: (ULT (FlagLT_ULT) yes no) 49799 // cond: 49800 // result: (First nil yes no) 49801 for { 49802 v := b.Control 49803 if v.Op != OpAMD64FlagLT_ULT { 49804 break 49805 } 49806 b.Kind = BlockFirst 49807 b.SetControl(nil) 49808 b.Aux = nil 49809 return true 49810 } 49811 // match: (ULT (FlagLT_UGT) yes no) 49812 // cond: 49813 // result: (First nil no yes) 49814 for { 49815 v := b.Control 49816 if v.Op != OpAMD64FlagLT_UGT { 49817 break 49818 } 49819 b.Kind = BlockFirst 49820 b.SetControl(nil) 49821 b.Aux = nil 49822 b.swapSuccessors() 49823 return true 49824 } 49825 // match: (ULT (FlagGT_ULT) yes no) 49826 // cond: 49827 // result: (First nil yes no) 49828 for { 49829 v := b.Control 49830 if v.Op != OpAMD64FlagGT_ULT { 49831 break 49832 } 49833 b.Kind = BlockFirst 49834 b.SetControl(nil) 49835 b.Aux = nil 49836 return true 49837 } 49838 // match: (ULT (FlagGT_UGT) yes no) 49839 // cond: 49840 // result: (First nil no yes) 49841 for { 49842 v := b.Control 49843 if v.Op != OpAMD64FlagGT_UGT { 49844 break 49845 } 49846 b.Kind = BlockFirst 49847 b.SetControl(nil) 49848 b.Aux = nil 49849 b.swapSuccessors() 49850 return true 49851 } 49852 } 49853 return false 49854 }