github.com/karrick/go@v0.0.0-20170817181416-d5b0ec858b37/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "cmd/internal/obj" 8 import "cmd/internal/objabi" 9 import "cmd/compile/internal/types" 10 11 var _ = math.MinInt8 // in case not otherwise used 12 var _ = obj.ANOP // in case not otherwise used 13 var _ = objabi.GOROOT // in case not otherwise used 14 var _ = types.TypeMem // in case not otherwise used 15 16 func rewriteValueAMD64(v *Value) bool { 17 switch v.Op { 18 case OpAMD64ADDL: 19 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) 20 case OpAMD64ADDLconst: 21 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) 22 case OpAMD64ADDQ: 23 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 24 case OpAMD64ADDQconst: 25 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) 26 case OpAMD64ADDSD: 27 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 28 case OpAMD64ADDSS: 29 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 30 case OpAMD64ANDL: 31 return rewriteValueAMD64_OpAMD64ANDL_0(v) 32 case OpAMD64ANDLconst: 33 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 34 case OpAMD64ANDQ: 35 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 36 case OpAMD64ANDQconst: 37 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 38 case OpAMD64BSFQ: 39 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 40 case OpAMD64BTQconst: 41 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 42 case OpAMD64CMOVQEQ: 43 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 44 case OpAMD64CMPB: 45 return rewriteValueAMD64_OpAMD64CMPB_0(v) 46 case OpAMD64CMPBconst: 47 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 48 case OpAMD64CMPL: 49 return rewriteValueAMD64_OpAMD64CMPL_0(v) 50 case OpAMD64CMPLconst: 51 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) 52 case OpAMD64CMPQ: 53 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 54 case OpAMD64CMPQconst: 55 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 56 case OpAMD64CMPW: 57 return rewriteValueAMD64_OpAMD64CMPW_0(v) 58 case OpAMD64CMPWconst: 59 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 60 case OpAMD64CMPXCHGLlock: 61 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 62 case OpAMD64CMPXCHGQlock: 63 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 64 case OpAMD64LEAL: 65 return rewriteValueAMD64_OpAMD64LEAL_0(v) 66 case OpAMD64LEAQ: 67 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 68 case OpAMD64LEAQ1: 69 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 70 case OpAMD64LEAQ2: 71 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 72 case OpAMD64LEAQ4: 73 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 74 case OpAMD64LEAQ8: 75 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 76 case OpAMD64MOVBQSX: 77 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 78 case OpAMD64MOVBQSXload: 79 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 80 case OpAMD64MOVBQZX: 81 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 82 case OpAMD64MOVBload: 83 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 84 case OpAMD64MOVBloadidx1: 85 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 86 case OpAMD64MOVBstore: 87 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) 88 case OpAMD64MOVBstoreconst: 89 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 90 case OpAMD64MOVBstoreconstidx1: 91 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 92 case OpAMD64MOVBstoreidx1: 93 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) 94 case OpAMD64MOVLQSX: 95 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 96 case OpAMD64MOVLQSXload: 97 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 98 case OpAMD64MOVLQZX: 99 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 100 case OpAMD64MOVLatomicload: 101 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 102 case OpAMD64MOVLload: 103 return rewriteValueAMD64_OpAMD64MOVLload_0(v) 104 case OpAMD64MOVLloadidx1: 105 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 106 case OpAMD64MOVLloadidx4: 107 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 108 case OpAMD64MOVLstore: 109 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) 110 case OpAMD64MOVLstoreconst: 111 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 112 case OpAMD64MOVLstoreconstidx1: 113 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 114 case OpAMD64MOVLstoreconstidx4: 115 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 116 case OpAMD64MOVLstoreidx1: 117 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 118 case OpAMD64MOVLstoreidx4: 119 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 120 case OpAMD64MOVOload: 121 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 122 case OpAMD64MOVOstore: 123 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 124 case OpAMD64MOVQatomicload: 125 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 126 case OpAMD64MOVQload: 127 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 128 case OpAMD64MOVQloadidx1: 129 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 130 case OpAMD64MOVQloadidx8: 131 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 132 case OpAMD64MOVQstore: 133 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) 134 case OpAMD64MOVQstoreconst: 135 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 136 case OpAMD64MOVQstoreconstidx1: 137 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 138 case OpAMD64MOVQstoreconstidx8: 139 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 140 case OpAMD64MOVQstoreidx1: 141 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 142 case OpAMD64MOVQstoreidx8: 143 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 144 case OpAMD64MOVSDload: 145 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 146 case OpAMD64MOVSDloadidx1: 147 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 148 case OpAMD64MOVSDloadidx8: 149 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 150 case OpAMD64MOVSDstore: 151 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 152 case OpAMD64MOVSDstoreidx1: 153 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 154 case OpAMD64MOVSDstoreidx8: 155 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 156 case OpAMD64MOVSSload: 157 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 158 case OpAMD64MOVSSloadidx1: 159 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 160 case OpAMD64MOVSSloadidx4: 161 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 162 case OpAMD64MOVSSstore: 163 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 164 case OpAMD64MOVSSstoreidx1: 165 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 166 case OpAMD64MOVSSstoreidx4: 167 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 168 case OpAMD64MOVWQSX: 169 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 170 case OpAMD64MOVWQSXload: 171 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 172 case OpAMD64MOVWQZX: 173 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 174 case OpAMD64MOVWload: 175 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 176 case OpAMD64MOVWloadidx1: 177 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 178 case OpAMD64MOVWloadidx2: 179 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 180 case OpAMD64MOVWstore: 181 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 182 case OpAMD64MOVWstoreconst: 183 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 184 case OpAMD64MOVWstoreconstidx1: 185 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 186 case OpAMD64MOVWstoreconstidx2: 187 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 188 case OpAMD64MOVWstoreidx1: 189 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 190 case OpAMD64MOVWstoreidx2: 191 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 192 case OpAMD64MULL: 193 return rewriteValueAMD64_OpAMD64MULL_0(v) 194 case OpAMD64MULLconst: 195 return rewriteValueAMD64_OpAMD64MULLconst_0(v) 196 case OpAMD64MULQ: 197 return rewriteValueAMD64_OpAMD64MULQ_0(v) 198 case OpAMD64MULQconst: 199 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) 200 case OpAMD64MULSD: 201 return rewriteValueAMD64_OpAMD64MULSD_0(v) 202 case OpAMD64MULSS: 203 return rewriteValueAMD64_OpAMD64MULSS_0(v) 204 case OpAMD64NEGL: 205 return rewriteValueAMD64_OpAMD64NEGL_0(v) 206 case OpAMD64NEGQ: 207 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 208 case OpAMD64NOTL: 209 return rewriteValueAMD64_OpAMD64NOTL_0(v) 210 case OpAMD64NOTQ: 211 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 212 case OpAMD64ORL: 213 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 214 case OpAMD64ORLconst: 215 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 216 case OpAMD64ORQ: 217 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 218 case OpAMD64ORQconst: 219 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 220 case OpAMD64ROLB: 221 return rewriteValueAMD64_OpAMD64ROLB_0(v) 222 case OpAMD64ROLBconst: 223 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 224 case OpAMD64ROLL: 225 return rewriteValueAMD64_OpAMD64ROLL_0(v) 226 case OpAMD64ROLLconst: 227 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 228 case OpAMD64ROLQ: 229 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 230 case OpAMD64ROLQconst: 231 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 232 case OpAMD64ROLW: 233 return rewriteValueAMD64_OpAMD64ROLW_0(v) 234 case OpAMD64ROLWconst: 235 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 236 case OpAMD64RORB: 237 return rewriteValueAMD64_OpAMD64RORB_0(v) 238 case OpAMD64RORL: 239 return rewriteValueAMD64_OpAMD64RORL_0(v) 240 case OpAMD64RORQ: 241 return rewriteValueAMD64_OpAMD64RORQ_0(v) 242 case OpAMD64RORW: 243 return rewriteValueAMD64_OpAMD64RORW_0(v) 244 case OpAMD64SARB: 245 return rewriteValueAMD64_OpAMD64SARB_0(v) 246 case OpAMD64SARBconst: 247 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 248 case OpAMD64SARL: 249 return rewriteValueAMD64_OpAMD64SARL_0(v) 250 case OpAMD64SARLconst: 251 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 252 case OpAMD64SARQ: 253 return rewriteValueAMD64_OpAMD64SARQ_0(v) 254 case OpAMD64SARQconst: 255 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 256 case OpAMD64SARW: 257 return rewriteValueAMD64_OpAMD64SARW_0(v) 258 case OpAMD64SARWconst: 259 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 260 case OpAMD64SBBLcarrymask: 261 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 262 case OpAMD64SBBQcarrymask: 263 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 264 case OpAMD64SETA: 265 return rewriteValueAMD64_OpAMD64SETA_0(v) 266 case OpAMD64SETAE: 267 return rewriteValueAMD64_OpAMD64SETAE_0(v) 268 case OpAMD64SETB: 269 return rewriteValueAMD64_OpAMD64SETB_0(v) 270 case OpAMD64SETBE: 271 return rewriteValueAMD64_OpAMD64SETBE_0(v) 272 case OpAMD64SETEQ: 273 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) 274 case OpAMD64SETG: 275 return rewriteValueAMD64_OpAMD64SETG_0(v) 276 case OpAMD64SETGE: 277 return rewriteValueAMD64_OpAMD64SETGE_0(v) 278 case OpAMD64SETL: 279 return rewriteValueAMD64_OpAMD64SETL_0(v) 280 case OpAMD64SETLE: 281 return rewriteValueAMD64_OpAMD64SETLE_0(v) 282 case OpAMD64SETNE: 283 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) 284 case OpAMD64SHLL: 285 return rewriteValueAMD64_OpAMD64SHLL_0(v) 286 case OpAMD64SHLLconst: 287 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 288 case OpAMD64SHLQ: 289 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 290 case OpAMD64SHLQconst: 291 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 292 case OpAMD64SHRB: 293 return rewriteValueAMD64_OpAMD64SHRB_0(v) 294 case OpAMD64SHRBconst: 295 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 296 case OpAMD64SHRL: 297 return rewriteValueAMD64_OpAMD64SHRL_0(v) 298 case OpAMD64SHRLconst: 299 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 300 case OpAMD64SHRQ: 301 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 302 case OpAMD64SHRQconst: 303 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 304 case OpAMD64SHRW: 305 return rewriteValueAMD64_OpAMD64SHRW_0(v) 306 case OpAMD64SHRWconst: 307 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 308 case OpAMD64SUBL: 309 return rewriteValueAMD64_OpAMD64SUBL_0(v) 310 case OpAMD64SUBLconst: 311 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 312 case OpAMD64SUBQ: 313 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 314 case OpAMD64SUBQconst: 315 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 316 case OpAMD64SUBSD: 317 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 318 case OpAMD64SUBSS: 319 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 320 case OpAMD64TESTB: 321 return rewriteValueAMD64_OpAMD64TESTB_0(v) 322 case OpAMD64TESTL: 323 return rewriteValueAMD64_OpAMD64TESTL_0(v) 324 case OpAMD64TESTQ: 325 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 326 case OpAMD64TESTW: 327 return rewriteValueAMD64_OpAMD64TESTW_0(v) 328 case OpAMD64XADDLlock: 329 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 330 case OpAMD64XADDQlock: 331 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 332 case OpAMD64XCHGL: 333 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 334 case OpAMD64XCHGQ: 335 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 336 case OpAMD64XORL: 337 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 338 case OpAMD64XORLconst: 339 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) 340 case OpAMD64XORQ: 341 return rewriteValueAMD64_OpAMD64XORQ_0(v) 342 case OpAMD64XORQconst: 343 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 344 case OpAdd16: 345 return rewriteValueAMD64_OpAdd16_0(v) 346 case OpAdd32: 347 return rewriteValueAMD64_OpAdd32_0(v) 348 case OpAdd32F: 349 return rewriteValueAMD64_OpAdd32F_0(v) 350 case OpAdd64: 351 return rewriteValueAMD64_OpAdd64_0(v) 352 case OpAdd64F: 353 return rewriteValueAMD64_OpAdd64F_0(v) 354 case OpAdd8: 355 return rewriteValueAMD64_OpAdd8_0(v) 356 case OpAddPtr: 357 return rewriteValueAMD64_OpAddPtr_0(v) 358 case OpAddr: 359 return rewriteValueAMD64_OpAddr_0(v) 360 case OpAnd16: 361 return rewriteValueAMD64_OpAnd16_0(v) 362 case OpAnd32: 363 return rewriteValueAMD64_OpAnd32_0(v) 364 case OpAnd64: 365 return rewriteValueAMD64_OpAnd64_0(v) 366 case OpAnd8: 367 return rewriteValueAMD64_OpAnd8_0(v) 368 case OpAndB: 369 return rewriteValueAMD64_OpAndB_0(v) 370 case OpAtomicAdd32: 371 return rewriteValueAMD64_OpAtomicAdd32_0(v) 372 case OpAtomicAdd64: 373 return rewriteValueAMD64_OpAtomicAdd64_0(v) 374 case OpAtomicAnd8: 375 return rewriteValueAMD64_OpAtomicAnd8_0(v) 376 case OpAtomicCompareAndSwap32: 377 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 378 case OpAtomicCompareAndSwap64: 379 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 380 case OpAtomicExchange32: 381 return rewriteValueAMD64_OpAtomicExchange32_0(v) 382 case OpAtomicExchange64: 383 return rewriteValueAMD64_OpAtomicExchange64_0(v) 384 case OpAtomicLoad32: 385 return rewriteValueAMD64_OpAtomicLoad32_0(v) 386 case OpAtomicLoad64: 387 return rewriteValueAMD64_OpAtomicLoad64_0(v) 388 case OpAtomicLoadPtr: 389 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 390 case OpAtomicOr8: 391 return rewriteValueAMD64_OpAtomicOr8_0(v) 392 case OpAtomicStore32: 393 return rewriteValueAMD64_OpAtomicStore32_0(v) 394 case OpAtomicStore64: 395 return rewriteValueAMD64_OpAtomicStore64_0(v) 396 case OpAtomicStorePtrNoWB: 397 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 398 case OpAvg64u: 399 return rewriteValueAMD64_OpAvg64u_0(v) 400 case OpBitLen32: 401 return rewriteValueAMD64_OpBitLen32_0(v) 402 case OpBitLen64: 403 return rewriteValueAMD64_OpBitLen64_0(v) 404 case OpBswap32: 405 return rewriteValueAMD64_OpBswap32_0(v) 406 case OpBswap64: 407 return rewriteValueAMD64_OpBswap64_0(v) 408 case OpClosureCall: 409 return rewriteValueAMD64_OpClosureCall_0(v) 410 case OpCom16: 411 return rewriteValueAMD64_OpCom16_0(v) 412 case OpCom32: 413 return rewriteValueAMD64_OpCom32_0(v) 414 case OpCom64: 415 return rewriteValueAMD64_OpCom64_0(v) 416 case OpCom8: 417 return rewriteValueAMD64_OpCom8_0(v) 418 case OpConst16: 419 return rewriteValueAMD64_OpConst16_0(v) 420 case OpConst32: 421 return rewriteValueAMD64_OpConst32_0(v) 422 case OpConst32F: 423 return rewriteValueAMD64_OpConst32F_0(v) 424 case OpConst64: 425 return rewriteValueAMD64_OpConst64_0(v) 426 case OpConst64F: 427 return rewriteValueAMD64_OpConst64F_0(v) 428 case OpConst8: 429 return rewriteValueAMD64_OpConst8_0(v) 430 case OpConstBool: 431 return rewriteValueAMD64_OpConstBool_0(v) 432 case OpConstNil: 433 return rewriteValueAMD64_OpConstNil_0(v) 434 case OpConvert: 435 return rewriteValueAMD64_OpConvert_0(v) 436 case OpCtz32: 437 return rewriteValueAMD64_OpCtz32_0(v) 438 case OpCtz64: 439 return rewriteValueAMD64_OpCtz64_0(v) 440 case OpCvt32Fto32: 441 return rewriteValueAMD64_OpCvt32Fto32_0(v) 442 case OpCvt32Fto64: 443 return rewriteValueAMD64_OpCvt32Fto64_0(v) 444 case OpCvt32Fto64F: 445 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 446 case OpCvt32to32F: 447 return rewriteValueAMD64_OpCvt32to32F_0(v) 448 case OpCvt32to64F: 449 return rewriteValueAMD64_OpCvt32to64F_0(v) 450 case OpCvt64Fto32: 451 return rewriteValueAMD64_OpCvt64Fto32_0(v) 452 case OpCvt64Fto32F: 453 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 454 case OpCvt64Fto64: 455 return rewriteValueAMD64_OpCvt64Fto64_0(v) 456 case OpCvt64to32F: 457 return rewriteValueAMD64_OpCvt64to32F_0(v) 458 case OpCvt64to64F: 459 return rewriteValueAMD64_OpCvt64to64F_0(v) 460 case OpDiv128u: 461 return rewriteValueAMD64_OpDiv128u_0(v) 462 case OpDiv16: 463 return rewriteValueAMD64_OpDiv16_0(v) 464 case OpDiv16u: 465 return rewriteValueAMD64_OpDiv16u_0(v) 466 case OpDiv32: 467 return rewriteValueAMD64_OpDiv32_0(v) 468 case OpDiv32F: 469 return rewriteValueAMD64_OpDiv32F_0(v) 470 case OpDiv32u: 471 return rewriteValueAMD64_OpDiv32u_0(v) 472 case OpDiv64: 473 return rewriteValueAMD64_OpDiv64_0(v) 474 case OpDiv64F: 475 return rewriteValueAMD64_OpDiv64F_0(v) 476 case OpDiv64u: 477 return rewriteValueAMD64_OpDiv64u_0(v) 478 case OpDiv8: 479 return rewriteValueAMD64_OpDiv8_0(v) 480 case OpDiv8u: 481 return rewriteValueAMD64_OpDiv8u_0(v) 482 case OpEq16: 483 return rewriteValueAMD64_OpEq16_0(v) 484 case OpEq32: 485 return rewriteValueAMD64_OpEq32_0(v) 486 case OpEq32F: 487 return rewriteValueAMD64_OpEq32F_0(v) 488 case OpEq64: 489 return rewriteValueAMD64_OpEq64_0(v) 490 case OpEq64F: 491 return rewriteValueAMD64_OpEq64F_0(v) 492 case OpEq8: 493 return rewriteValueAMD64_OpEq8_0(v) 494 case OpEqB: 495 return rewriteValueAMD64_OpEqB_0(v) 496 case OpEqPtr: 497 return rewriteValueAMD64_OpEqPtr_0(v) 498 case OpGeq16: 499 return rewriteValueAMD64_OpGeq16_0(v) 500 case OpGeq16U: 501 return rewriteValueAMD64_OpGeq16U_0(v) 502 case OpGeq32: 503 return rewriteValueAMD64_OpGeq32_0(v) 504 case OpGeq32F: 505 return rewriteValueAMD64_OpGeq32F_0(v) 506 case OpGeq32U: 507 return rewriteValueAMD64_OpGeq32U_0(v) 508 case OpGeq64: 509 return rewriteValueAMD64_OpGeq64_0(v) 510 case OpGeq64F: 511 return rewriteValueAMD64_OpGeq64F_0(v) 512 case OpGeq64U: 513 return rewriteValueAMD64_OpGeq64U_0(v) 514 case OpGeq8: 515 return rewriteValueAMD64_OpGeq8_0(v) 516 case OpGeq8U: 517 return rewriteValueAMD64_OpGeq8U_0(v) 518 case OpGetClosurePtr: 519 return rewriteValueAMD64_OpGetClosurePtr_0(v) 520 case OpGetG: 521 return rewriteValueAMD64_OpGetG_0(v) 522 case OpGreater16: 523 return rewriteValueAMD64_OpGreater16_0(v) 524 case OpGreater16U: 525 return rewriteValueAMD64_OpGreater16U_0(v) 526 case OpGreater32: 527 return rewriteValueAMD64_OpGreater32_0(v) 528 case OpGreater32F: 529 return rewriteValueAMD64_OpGreater32F_0(v) 530 case OpGreater32U: 531 return rewriteValueAMD64_OpGreater32U_0(v) 532 case OpGreater64: 533 return rewriteValueAMD64_OpGreater64_0(v) 534 case OpGreater64F: 535 return rewriteValueAMD64_OpGreater64F_0(v) 536 case OpGreater64U: 537 return rewriteValueAMD64_OpGreater64U_0(v) 538 case OpGreater8: 539 return rewriteValueAMD64_OpGreater8_0(v) 540 case OpGreater8U: 541 return rewriteValueAMD64_OpGreater8U_0(v) 542 case OpHmul32: 543 return rewriteValueAMD64_OpHmul32_0(v) 544 case OpHmul32u: 545 return rewriteValueAMD64_OpHmul32u_0(v) 546 case OpHmul64: 547 return rewriteValueAMD64_OpHmul64_0(v) 548 case OpHmul64u: 549 return rewriteValueAMD64_OpHmul64u_0(v) 550 case OpInt64Hi: 551 return rewriteValueAMD64_OpInt64Hi_0(v) 552 case OpInterCall: 553 return rewriteValueAMD64_OpInterCall_0(v) 554 case OpIsInBounds: 555 return rewriteValueAMD64_OpIsInBounds_0(v) 556 case OpIsNonNil: 557 return rewriteValueAMD64_OpIsNonNil_0(v) 558 case OpIsSliceInBounds: 559 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 560 case OpLeq16: 561 return rewriteValueAMD64_OpLeq16_0(v) 562 case OpLeq16U: 563 return rewriteValueAMD64_OpLeq16U_0(v) 564 case OpLeq32: 565 return rewriteValueAMD64_OpLeq32_0(v) 566 case OpLeq32F: 567 return rewriteValueAMD64_OpLeq32F_0(v) 568 case OpLeq32U: 569 return rewriteValueAMD64_OpLeq32U_0(v) 570 case OpLeq64: 571 return rewriteValueAMD64_OpLeq64_0(v) 572 case OpLeq64F: 573 return rewriteValueAMD64_OpLeq64F_0(v) 574 case OpLeq64U: 575 return rewriteValueAMD64_OpLeq64U_0(v) 576 case OpLeq8: 577 return rewriteValueAMD64_OpLeq8_0(v) 578 case OpLeq8U: 579 return rewriteValueAMD64_OpLeq8U_0(v) 580 case OpLess16: 581 return rewriteValueAMD64_OpLess16_0(v) 582 case OpLess16U: 583 return rewriteValueAMD64_OpLess16U_0(v) 584 case OpLess32: 585 return rewriteValueAMD64_OpLess32_0(v) 586 case OpLess32F: 587 return rewriteValueAMD64_OpLess32F_0(v) 588 case OpLess32U: 589 return rewriteValueAMD64_OpLess32U_0(v) 590 case OpLess64: 591 return rewriteValueAMD64_OpLess64_0(v) 592 case OpLess64F: 593 return rewriteValueAMD64_OpLess64F_0(v) 594 case OpLess64U: 595 return rewriteValueAMD64_OpLess64U_0(v) 596 case OpLess8: 597 return rewriteValueAMD64_OpLess8_0(v) 598 case OpLess8U: 599 return rewriteValueAMD64_OpLess8U_0(v) 600 case OpLoad: 601 return rewriteValueAMD64_OpLoad_0(v) 602 case OpLsh16x16: 603 return rewriteValueAMD64_OpLsh16x16_0(v) 604 case OpLsh16x32: 605 return rewriteValueAMD64_OpLsh16x32_0(v) 606 case OpLsh16x64: 607 return rewriteValueAMD64_OpLsh16x64_0(v) 608 case OpLsh16x8: 609 return rewriteValueAMD64_OpLsh16x8_0(v) 610 case OpLsh32x16: 611 return rewriteValueAMD64_OpLsh32x16_0(v) 612 case OpLsh32x32: 613 return rewriteValueAMD64_OpLsh32x32_0(v) 614 case OpLsh32x64: 615 return rewriteValueAMD64_OpLsh32x64_0(v) 616 case OpLsh32x8: 617 return rewriteValueAMD64_OpLsh32x8_0(v) 618 case OpLsh64x16: 619 return rewriteValueAMD64_OpLsh64x16_0(v) 620 case OpLsh64x32: 621 return rewriteValueAMD64_OpLsh64x32_0(v) 622 case OpLsh64x64: 623 return rewriteValueAMD64_OpLsh64x64_0(v) 624 case OpLsh64x8: 625 return rewriteValueAMD64_OpLsh64x8_0(v) 626 case OpLsh8x16: 627 return rewriteValueAMD64_OpLsh8x16_0(v) 628 case OpLsh8x32: 629 return rewriteValueAMD64_OpLsh8x32_0(v) 630 case OpLsh8x64: 631 return rewriteValueAMD64_OpLsh8x64_0(v) 632 case OpLsh8x8: 633 return rewriteValueAMD64_OpLsh8x8_0(v) 634 case OpMod16: 635 return rewriteValueAMD64_OpMod16_0(v) 636 case OpMod16u: 637 return rewriteValueAMD64_OpMod16u_0(v) 638 case OpMod32: 639 return rewriteValueAMD64_OpMod32_0(v) 640 case OpMod32u: 641 return rewriteValueAMD64_OpMod32u_0(v) 642 case OpMod64: 643 return rewriteValueAMD64_OpMod64_0(v) 644 case OpMod64u: 645 return rewriteValueAMD64_OpMod64u_0(v) 646 case OpMod8: 647 return rewriteValueAMD64_OpMod8_0(v) 648 case OpMod8u: 649 return rewriteValueAMD64_OpMod8u_0(v) 650 case OpMove: 651 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) 652 case OpMul16: 653 return rewriteValueAMD64_OpMul16_0(v) 654 case OpMul32: 655 return rewriteValueAMD64_OpMul32_0(v) 656 case OpMul32F: 657 return rewriteValueAMD64_OpMul32F_0(v) 658 case OpMul64: 659 return rewriteValueAMD64_OpMul64_0(v) 660 case OpMul64F: 661 return rewriteValueAMD64_OpMul64F_0(v) 662 case OpMul64uhilo: 663 return rewriteValueAMD64_OpMul64uhilo_0(v) 664 case OpMul8: 665 return rewriteValueAMD64_OpMul8_0(v) 666 case OpNeg16: 667 return rewriteValueAMD64_OpNeg16_0(v) 668 case OpNeg32: 669 return rewriteValueAMD64_OpNeg32_0(v) 670 case OpNeg32F: 671 return rewriteValueAMD64_OpNeg32F_0(v) 672 case OpNeg64: 673 return rewriteValueAMD64_OpNeg64_0(v) 674 case OpNeg64F: 675 return rewriteValueAMD64_OpNeg64F_0(v) 676 case OpNeg8: 677 return rewriteValueAMD64_OpNeg8_0(v) 678 case OpNeq16: 679 return rewriteValueAMD64_OpNeq16_0(v) 680 case OpNeq32: 681 return rewriteValueAMD64_OpNeq32_0(v) 682 case OpNeq32F: 683 return rewriteValueAMD64_OpNeq32F_0(v) 684 case OpNeq64: 685 return rewriteValueAMD64_OpNeq64_0(v) 686 case OpNeq64F: 687 return rewriteValueAMD64_OpNeq64F_0(v) 688 case OpNeq8: 689 return rewriteValueAMD64_OpNeq8_0(v) 690 case OpNeqB: 691 return rewriteValueAMD64_OpNeqB_0(v) 692 case OpNeqPtr: 693 return rewriteValueAMD64_OpNeqPtr_0(v) 694 case OpNilCheck: 695 return rewriteValueAMD64_OpNilCheck_0(v) 696 case OpNot: 697 return rewriteValueAMD64_OpNot_0(v) 698 case OpOffPtr: 699 return rewriteValueAMD64_OpOffPtr_0(v) 700 case OpOr16: 701 return rewriteValueAMD64_OpOr16_0(v) 702 case OpOr32: 703 return rewriteValueAMD64_OpOr32_0(v) 704 case OpOr64: 705 return rewriteValueAMD64_OpOr64_0(v) 706 case OpOr8: 707 return rewriteValueAMD64_OpOr8_0(v) 708 case OpOrB: 709 return rewriteValueAMD64_OpOrB_0(v) 710 case OpPopCount16: 711 return rewriteValueAMD64_OpPopCount16_0(v) 712 case OpPopCount32: 713 return rewriteValueAMD64_OpPopCount32_0(v) 714 case OpPopCount64: 715 return rewriteValueAMD64_OpPopCount64_0(v) 716 case OpPopCount8: 717 return rewriteValueAMD64_OpPopCount8_0(v) 718 case OpRound32F: 719 return rewriteValueAMD64_OpRound32F_0(v) 720 case OpRound64F: 721 return rewriteValueAMD64_OpRound64F_0(v) 722 case OpRsh16Ux16: 723 return rewriteValueAMD64_OpRsh16Ux16_0(v) 724 case OpRsh16Ux32: 725 return rewriteValueAMD64_OpRsh16Ux32_0(v) 726 case OpRsh16Ux64: 727 return rewriteValueAMD64_OpRsh16Ux64_0(v) 728 case OpRsh16Ux8: 729 return rewriteValueAMD64_OpRsh16Ux8_0(v) 730 case OpRsh16x16: 731 return rewriteValueAMD64_OpRsh16x16_0(v) 732 case OpRsh16x32: 733 return rewriteValueAMD64_OpRsh16x32_0(v) 734 case OpRsh16x64: 735 return rewriteValueAMD64_OpRsh16x64_0(v) 736 case OpRsh16x8: 737 return rewriteValueAMD64_OpRsh16x8_0(v) 738 case OpRsh32Ux16: 739 return rewriteValueAMD64_OpRsh32Ux16_0(v) 740 case OpRsh32Ux32: 741 return rewriteValueAMD64_OpRsh32Ux32_0(v) 742 case OpRsh32Ux64: 743 return rewriteValueAMD64_OpRsh32Ux64_0(v) 744 case OpRsh32Ux8: 745 return rewriteValueAMD64_OpRsh32Ux8_0(v) 746 case OpRsh32x16: 747 return rewriteValueAMD64_OpRsh32x16_0(v) 748 case OpRsh32x32: 749 return rewriteValueAMD64_OpRsh32x32_0(v) 750 case OpRsh32x64: 751 return rewriteValueAMD64_OpRsh32x64_0(v) 752 case OpRsh32x8: 753 return rewriteValueAMD64_OpRsh32x8_0(v) 754 case OpRsh64Ux16: 755 return rewriteValueAMD64_OpRsh64Ux16_0(v) 756 case OpRsh64Ux32: 757 return rewriteValueAMD64_OpRsh64Ux32_0(v) 758 case OpRsh64Ux64: 759 return rewriteValueAMD64_OpRsh64Ux64_0(v) 760 case OpRsh64Ux8: 761 return rewriteValueAMD64_OpRsh64Ux8_0(v) 762 case OpRsh64x16: 763 return rewriteValueAMD64_OpRsh64x16_0(v) 764 case OpRsh64x32: 765 return rewriteValueAMD64_OpRsh64x32_0(v) 766 case OpRsh64x64: 767 return rewriteValueAMD64_OpRsh64x64_0(v) 768 case OpRsh64x8: 769 return rewriteValueAMD64_OpRsh64x8_0(v) 770 case OpRsh8Ux16: 771 return rewriteValueAMD64_OpRsh8Ux16_0(v) 772 case OpRsh8Ux32: 773 return rewriteValueAMD64_OpRsh8Ux32_0(v) 774 case OpRsh8Ux64: 775 return rewriteValueAMD64_OpRsh8Ux64_0(v) 776 case OpRsh8Ux8: 777 return rewriteValueAMD64_OpRsh8Ux8_0(v) 778 case OpRsh8x16: 779 return rewriteValueAMD64_OpRsh8x16_0(v) 780 case OpRsh8x32: 781 return rewriteValueAMD64_OpRsh8x32_0(v) 782 case OpRsh8x64: 783 return rewriteValueAMD64_OpRsh8x64_0(v) 784 case OpRsh8x8: 785 return rewriteValueAMD64_OpRsh8x8_0(v) 786 case OpSelect0: 787 return rewriteValueAMD64_OpSelect0_0(v) 788 case OpSelect1: 789 return rewriteValueAMD64_OpSelect1_0(v) 790 case OpSignExt16to32: 791 return rewriteValueAMD64_OpSignExt16to32_0(v) 792 case OpSignExt16to64: 793 return rewriteValueAMD64_OpSignExt16to64_0(v) 794 case OpSignExt32to64: 795 return rewriteValueAMD64_OpSignExt32to64_0(v) 796 case OpSignExt8to16: 797 return rewriteValueAMD64_OpSignExt8to16_0(v) 798 case OpSignExt8to32: 799 return rewriteValueAMD64_OpSignExt8to32_0(v) 800 case OpSignExt8to64: 801 return rewriteValueAMD64_OpSignExt8to64_0(v) 802 case OpSlicemask: 803 return rewriteValueAMD64_OpSlicemask_0(v) 804 case OpSqrt: 805 return rewriteValueAMD64_OpSqrt_0(v) 806 case OpStaticCall: 807 return rewriteValueAMD64_OpStaticCall_0(v) 808 case OpStore: 809 return rewriteValueAMD64_OpStore_0(v) 810 case OpSub16: 811 return rewriteValueAMD64_OpSub16_0(v) 812 case OpSub32: 813 return rewriteValueAMD64_OpSub32_0(v) 814 case OpSub32F: 815 return rewriteValueAMD64_OpSub32F_0(v) 816 case OpSub64: 817 return rewriteValueAMD64_OpSub64_0(v) 818 case OpSub64F: 819 return rewriteValueAMD64_OpSub64F_0(v) 820 case OpSub8: 821 return rewriteValueAMD64_OpSub8_0(v) 822 case OpSubPtr: 823 return rewriteValueAMD64_OpSubPtr_0(v) 824 case OpTrunc16to8: 825 return rewriteValueAMD64_OpTrunc16to8_0(v) 826 case OpTrunc32to16: 827 return rewriteValueAMD64_OpTrunc32to16_0(v) 828 case OpTrunc32to8: 829 return rewriteValueAMD64_OpTrunc32to8_0(v) 830 case OpTrunc64to16: 831 return rewriteValueAMD64_OpTrunc64to16_0(v) 832 case OpTrunc64to32: 833 return rewriteValueAMD64_OpTrunc64to32_0(v) 834 case OpTrunc64to8: 835 return rewriteValueAMD64_OpTrunc64to8_0(v) 836 case OpXor16: 837 return rewriteValueAMD64_OpXor16_0(v) 838 case OpXor32: 839 return rewriteValueAMD64_OpXor32_0(v) 840 case OpXor64: 841 return rewriteValueAMD64_OpXor64_0(v) 842 case OpXor8: 843 return rewriteValueAMD64_OpXor8_0(v) 844 case OpZero: 845 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) 846 case OpZeroExt16to32: 847 return rewriteValueAMD64_OpZeroExt16to32_0(v) 848 case OpZeroExt16to64: 849 return rewriteValueAMD64_OpZeroExt16to64_0(v) 850 case OpZeroExt32to64: 851 return rewriteValueAMD64_OpZeroExt32to64_0(v) 852 case OpZeroExt8to16: 853 return rewriteValueAMD64_OpZeroExt8to16_0(v) 854 case OpZeroExt8to32: 855 return rewriteValueAMD64_OpZeroExt8to32_0(v) 856 case OpZeroExt8to64: 857 return rewriteValueAMD64_OpZeroExt8to64_0(v) 858 } 859 return false 860 } 861 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 862 // match: (ADDL x (MOVLconst [c])) 863 // cond: 864 // result: (ADDLconst [c] x) 865 for { 866 _ = v.Args[1] 867 x := v.Args[0] 868 v_1 := v.Args[1] 869 if v_1.Op != OpAMD64MOVLconst { 870 break 871 } 872 c := v_1.AuxInt 873 v.reset(OpAMD64ADDLconst) 874 v.AuxInt = c 875 v.AddArg(x) 876 return true 877 } 878 // match: (ADDL (MOVLconst [c]) x) 879 // cond: 880 // result: (ADDLconst [c] x) 881 for { 882 _ = v.Args[1] 883 v_0 := v.Args[0] 884 if v_0.Op != OpAMD64MOVLconst { 885 break 886 } 887 c := v_0.AuxInt 888 x := v.Args[1] 889 v.reset(OpAMD64ADDLconst) 890 v.AuxInt = c 891 v.AddArg(x) 892 return true 893 } 894 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 895 // cond: d==32-c 896 // result: (ROLLconst x [c]) 897 for { 898 _ = v.Args[1] 899 v_0 := v.Args[0] 900 if v_0.Op != OpAMD64SHLLconst { 901 break 902 } 903 c := v_0.AuxInt 904 x := v_0.Args[0] 905 v_1 := v.Args[1] 906 if v_1.Op != OpAMD64SHRLconst { 907 break 908 } 909 d := v_1.AuxInt 910 if x != v_1.Args[0] { 911 break 912 } 913 if !(d == 32-c) { 914 break 915 } 916 v.reset(OpAMD64ROLLconst) 917 v.AuxInt = c 918 v.AddArg(x) 919 return true 920 } 921 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 922 // cond: d==32-c 923 // result: (ROLLconst x [c]) 924 for { 925 _ = v.Args[1] 926 v_0 := v.Args[0] 927 if v_0.Op != OpAMD64SHRLconst { 928 break 929 } 930 d := v_0.AuxInt 931 x := v_0.Args[0] 932 v_1 := v.Args[1] 933 if v_1.Op != OpAMD64SHLLconst { 934 break 935 } 936 c := v_1.AuxInt 937 if x != v_1.Args[0] { 938 break 939 } 940 if !(d == 32-c) { 941 break 942 } 943 v.reset(OpAMD64ROLLconst) 944 v.AuxInt = c 945 v.AddArg(x) 946 return true 947 } 948 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 949 // cond: d==16-c && c < 16 && t.Size() == 2 950 // result: (ROLWconst x [c]) 951 for { 952 t := v.Type 953 _ = v.Args[1] 954 v_0 := v.Args[0] 955 if v_0.Op != OpAMD64SHLLconst { 956 break 957 } 958 c := v_0.AuxInt 959 x := v_0.Args[0] 960 v_1 := v.Args[1] 961 if v_1.Op != OpAMD64SHRWconst { 962 break 963 } 964 d := v_1.AuxInt 965 if x != v_1.Args[0] { 966 break 967 } 968 if !(d == 16-c && c < 16 && t.Size() == 2) { 969 break 970 } 971 v.reset(OpAMD64ROLWconst) 972 v.AuxInt = c 973 v.AddArg(x) 974 return true 975 } 976 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 977 // cond: d==16-c && c < 16 && t.Size() == 2 978 // result: (ROLWconst x [c]) 979 for { 980 t := v.Type 981 _ = v.Args[1] 982 v_0 := v.Args[0] 983 if v_0.Op != OpAMD64SHRWconst { 984 break 985 } 986 d := v_0.AuxInt 987 x := v_0.Args[0] 988 v_1 := v.Args[1] 989 if v_1.Op != OpAMD64SHLLconst { 990 break 991 } 992 c := v_1.AuxInt 993 if x != v_1.Args[0] { 994 break 995 } 996 if !(d == 16-c && c < 16 && t.Size() == 2) { 997 break 998 } 999 v.reset(OpAMD64ROLWconst) 1000 v.AuxInt = c 1001 v.AddArg(x) 1002 return true 1003 } 1004 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 1005 // cond: d==8-c && c < 8 && t.Size() == 1 1006 // result: (ROLBconst x [c]) 1007 for { 1008 t := v.Type 1009 _ = v.Args[1] 1010 v_0 := v.Args[0] 1011 if v_0.Op != OpAMD64SHLLconst { 1012 break 1013 } 1014 c := v_0.AuxInt 1015 x := v_0.Args[0] 1016 v_1 := v.Args[1] 1017 if v_1.Op != OpAMD64SHRBconst { 1018 break 1019 } 1020 d := v_1.AuxInt 1021 if x != v_1.Args[0] { 1022 break 1023 } 1024 if !(d == 8-c && c < 8 && t.Size() == 1) { 1025 break 1026 } 1027 v.reset(OpAMD64ROLBconst) 1028 v.AuxInt = c 1029 v.AddArg(x) 1030 return true 1031 } 1032 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1033 // cond: d==8-c && c < 8 && t.Size() == 1 1034 // result: (ROLBconst x [c]) 1035 for { 1036 t := v.Type 1037 _ = v.Args[1] 1038 v_0 := v.Args[0] 1039 if v_0.Op != OpAMD64SHRBconst { 1040 break 1041 } 1042 d := v_0.AuxInt 1043 x := v_0.Args[0] 1044 v_1 := v.Args[1] 1045 if v_1.Op != OpAMD64SHLLconst { 1046 break 1047 } 1048 c := v_1.AuxInt 1049 if x != v_1.Args[0] { 1050 break 1051 } 1052 if !(d == 8-c && c < 8 && t.Size() == 1) { 1053 break 1054 } 1055 v.reset(OpAMD64ROLBconst) 1056 v.AuxInt = c 1057 v.AddArg(x) 1058 return true 1059 } 1060 // match: (ADDL x (NEGL y)) 1061 // cond: 1062 // result: (SUBL x y) 1063 for { 1064 _ = v.Args[1] 1065 x := v.Args[0] 1066 v_1 := v.Args[1] 1067 if v_1.Op != OpAMD64NEGL { 1068 break 1069 } 1070 y := v_1.Args[0] 1071 v.reset(OpAMD64SUBL) 1072 v.AddArg(x) 1073 v.AddArg(y) 1074 return true 1075 } 1076 // match: (ADDL (NEGL y) x) 1077 // cond: 1078 // result: (SUBL x y) 1079 for { 1080 _ = v.Args[1] 1081 v_0 := v.Args[0] 1082 if v_0.Op != OpAMD64NEGL { 1083 break 1084 } 1085 y := v_0.Args[0] 1086 x := v.Args[1] 1087 v.reset(OpAMD64SUBL) 1088 v.AddArg(x) 1089 v.AddArg(y) 1090 return true 1091 } 1092 return false 1093 } 1094 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1095 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1096 // cond: canMergeLoad(v, l, x) && clobber(l) 1097 // result: (ADDLmem x [off] {sym} ptr mem) 1098 for { 1099 _ = v.Args[1] 1100 x := v.Args[0] 1101 l := v.Args[1] 1102 if l.Op != OpAMD64MOVLload { 1103 break 1104 } 1105 off := l.AuxInt 1106 sym := l.Aux 1107 _ = l.Args[1] 1108 ptr := l.Args[0] 1109 mem := l.Args[1] 1110 if !(canMergeLoad(v, l, x) && clobber(l)) { 1111 break 1112 } 1113 v.reset(OpAMD64ADDLmem) 1114 v.AuxInt = off 1115 v.Aux = sym 1116 v.AddArg(x) 1117 v.AddArg(ptr) 1118 v.AddArg(mem) 1119 return true 1120 } 1121 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1122 // cond: canMergeLoad(v, l, x) && clobber(l) 1123 // result: (ADDLmem x [off] {sym} ptr mem) 1124 for { 1125 _ = v.Args[1] 1126 l := v.Args[0] 1127 if l.Op != OpAMD64MOVLload { 1128 break 1129 } 1130 off := l.AuxInt 1131 sym := l.Aux 1132 _ = l.Args[1] 1133 ptr := l.Args[0] 1134 mem := l.Args[1] 1135 x := v.Args[1] 1136 if !(canMergeLoad(v, l, x) && clobber(l)) { 1137 break 1138 } 1139 v.reset(OpAMD64ADDLmem) 1140 v.AuxInt = off 1141 v.Aux = sym 1142 v.AddArg(x) 1143 v.AddArg(ptr) 1144 v.AddArg(mem) 1145 return true 1146 } 1147 return false 1148 } 1149 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1150 // match: (ADDLconst [c] x) 1151 // cond: int32(c)==0 1152 // result: x 1153 for { 1154 c := v.AuxInt 1155 x := v.Args[0] 1156 if !(int32(c) == 0) { 1157 break 1158 } 1159 v.reset(OpCopy) 1160 v.Type = x.Type 1161 v.AddArg(x) 1162 return true 1163 } 1164 // match: (ADDLconst [c] (MOVLconst [d])) 1165 // cond: 1166 // result: (MOVLconst [int64(int32(c+d))]) 1167 for { 1168 c := v.AuxInt 1169 v_0 := v.Args[0] 1170 if v_0.Op != OpAMD64MOVLconst { 1171 break 1172 } 1173 d := v_0.AuxInt 1174 v.reset(OpAMD64MOVLconst) 1175 v.AuxInt = int64(int32(c + d)) 1176 return true 1177 } 1178 // match: (ADDLconst [c] (ADDLconst [d] x)) 1179 // cond: 1180 // result: (ADDLconst [int64(int32(c+d))] x) 1181 for { 1182 c := v.AuxInt 1183 v_0 := v.Args[0] 1184 if v_0.Op != OpAMD64ADDLconst { 1185 break 1186 } 1187 d := v_0.AuxInt 1188 x := v_0.Args[0] 1189 v.reset(OpAMD64ADDLconst) 1190 v.AuxInt = int64(int32(c + d)) 1191 v.AddArg(x) 1192 return true 1193 } 1194 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1195 // cond: is32Bit(c+d) 1196 // result: (LEAL [c+d] {s} x) 1197 for { 1198 c := v.AuxInt 1199 v_0 := v.Args[0] 1200 if v_0.Op != OpAMD64LEAL { 1201 break 1202 } 1203 d := v_0.AuxInt 1204 s := v_0.Aux 1205 x := v_0.Args[0] 1206 if !(is32Bit(c + d)) { 1207 break 1208 } 1209 v.reset(OpAMD64LEAL) 1210 v.AuxInt = c + d 1211 v.Aux = s 1212 v.AddArg(x) 1213 return true 1214 } 1215 return false 1216 } 1217 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 1218 // match: (ADDQ x (MOVQconst [c])) 1219 // cond: is32Bit(c) 1220 // result: (ADDQconst [c] x) 1221 for { 1222 _ = v.Args[1] 1223 x := v.Args[0] 1224 v_1 := v.Args[1] 1225 if v_1.Op != OpAMD64MOVQconst { 1226 break 1227 } 1228 c := v_1.AuxInt 1229 if !(is32Bit(c)) { 1230 break 1231 } 1232 v.reset(OpAMD64ADDQconst) 1233 v.AuxInt = c 1234 v.AddArg(x) 1235 return true 1236 } 1237 // match: (ADDQ (MOVQconst [c]) x) 1238 // cond: is32Bit(c) 1239 // result: (ADDQconst [c] x) 1240 for { 1241 _ = v.Args[1] 1242 v_0 := v.Args[0] 1243 if v_0.Op != OpAMD64MOVQconst { 1244 break 1245 } 1246 c := v_0.AuxInt 1247 x := v.Args[1] 1248 if !(is32Bit(c)) { 1249 break 1250 } 1251 v.reset(OpAMD64ADDQconst) 1252 v.AuxInt = c 1253 v.AddArg(x) 1254 return true 1255 } 1256 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 1257 // cond: d==64-c 1258 // result: (ROLQconst x [c]) 1259 for { 1260 _ = v.Args[1] 1261 v_0 := v.Args[0] 1262 if v_0.Op != OpAMD64SHLQconst { 1263 break 1264 } 1265 c := v_0.AuxInt 1266 x := v_0.Args[0] 1267 v_1 := v.Args[1] 1268 if v_1.Op != OpAMD64SHRQconst { 1269 break 1270 } 1271 d := v_1.AuxInt 1272 if x != v_1.Args[0] { 1273 break 1274 } 1275 if !(d == 64-c) { 1276 break 1277 } 1278 v.reset(OpAMD64ROLQconst) 1279 v.AuxInt = c 1280 v.AddArg(x) 1281 return true 1282 } 1283 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 1284 // cond: d==64-c 1285 // result: (ROLQconst x [c]) 1286 for { 1287 _ = v.Args[1] 1288 v_0 := v.Args[0] 1289 if v_0.Op != OpAMD64SHRQconst { 1290 break 1291 } 1292 d := v_0.AuxInt 1293 x := v_0.Args[0] 1294 v_1 := v.Args[1] 1295 if v_1.Op != OpAMD64SHLQconst { 1296 break 1297 } 1298 c := v_1.AuxInt 1299 if x != v_1.Args[0] { 1300 break 1301 } 1302 if !(d == 64-c) { 1303 break 1304 } 1305 v.reset(OpAMD64ROLQconst) 1306 v.AuxInt = c 1307 v.AddArg(x) 1308 return true 1309 } 1310 // match: (ADDQ x (SHLQconst [3] y)) 1311 // cond: 1312 // result: (LEAQ8 x y) 1313 for { 1314 _ = v.Args[1] 1315 x := v.Args[0] 1316 v_1 := v.Args[1] 1317 if v_1.Op != OpAMD64SHLQconst { 1318 break 1319 } 1320 if v_1.AuxInt != 3 { 1321 break 1322 } 1323 y := v_1.Args[0] 1324 v.reset(OpAMD64LEAQ8) 1325 v.AddArg(x) 1326 v.AddArg(y) 1327 return true 1328 } 1329 // match: (ADDQ (SHLQconst [3] y) x) 1330 // cond: 1331 // result: (LEAQ8 x y) 1332 for { 1333 _ = v.Args[1] 1334 v_0 := v.Args[0] 1335 if v_0.Op != OpAMD64SHLQconst { 1336 break 1337 } 1338 if v_0.AuxInt != 3 { 1339 break 1340 } 1341 y := v_0.Args[0] 1342 x := v.Args[1] 1343 v.reset(OpAMD64LEAQ8) 1344 v.AddArg(x) 1345 v.AddArg(y) 1346 return true 1347 } 1348 // match: (ADDQ x (SHLQconst [2] y)) 1349 // cond: 1350 // result: (LEAQ4 x y) 1351 for { 1352 _ = v.Args[1] 1353 x := v.Args[0] 1354 v_1 := v.Args[1] 1355 if v_1.Op != OpAMD64SHLQconst { 1356 break 1357 } 1358 if v_1.AuxInt != 2 { 1359 break 1360 } 1361 y := v_1.Args[0] 1362 v.reset(OpAMD64LEAQ4) 1363 v.AddArg(x) 1364 v.AddArg(y) 1365 return true 1366 } 1367 // match: (ADDQ (SHLQconst [2] y) x) 1368 // cond: 1369 // result: (LEAQ4 x y) 1370 for { 1371 _ = v.Args[1] 1372 v_0 := v.Args[0] 1373 if v_0.Op != OpAMD64SHLQconst { 1374 break 1375 } 1376 if v_0.AuxInt != 2 { 1377 break 1378 } 1379 y := v_0.Args[0] 1380 x := v.Args[1] 1381 v.reset(OpAMD64LEAQ4) 1382 v.AddArg(x) 1383 v.AddArg(y) 1384 return true 1385 } 1386 // match: (ADDQ x (SHLQconst [1] y)) 1387 // cond: 1388 // result: (LEAQ2 x y) 1389 for { 1390 _ = v.Args[1] 1391 x := v.Args[0] 1392 v_1 := v.Args[1] 1393 if v_1.Op != OpAMD64SHLQconst { 1394 break 1395 } 1396 if v_1.AuxInt != 1 { 1397 break 1398 } 1399 y := v_1.Args[0] 1400 v.reset(OpAMD64LEAQ2) 1401 v.AddArg(x) 1402 v.AddArg(y) 1403 return true 1404 } 1405 // match: (ADDQ (SHLQconst [1] y) x) 1406 // cond: 1407 // result: (LEAQ2 x y) 1408 for { 1409 _ = v.Args[1] 1410 v_0 := v.Args[0] 1411 if v_0.Op != OpAMD64SHLQconst { 1412 break 1413 } 1414 if v_0.AuxInt != 1 { 1415 break 1416 } 1417 y := v_0.Args[0] 1418 x := v.Args[1] 1419 v.reset(OpAMD64LEAQ2) 1420 v.AddArg(x) 1421 v.AddArg(y) 1422 return true 1423 } 1424 return false 1425 } 1426 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 1427 // match: (ADDQ x (ADDQ y y)) 1428 // cond: 1429 // result: (LEAQ2 x y) 1430 for { 1431 _ = v.Args[1] 1432 x := v.Args[0] 1433 v_1 := v.Args[1] 1434 if v_1.Op != OpAMD64ADDQ { 1435 break 1436 } 1437 _ = v_1.Args[1] 1438 y := v_1.Args[0] 1439 if y != v_1.Args[1] { 1440 break 1441 } 1442 v.reset(OpAMD64LEAQ2) 1443 v.AddArg(x) 1444 v.AddArg(y) 1445 return true 1446 } 1447 // match: (ADDQ (ADDQ y y) x) 1448 // cond: 1449 // result: (LEAQ2 x y) 1450 for { 1451 _ = v.Args[1] 1452 v_0 := v.Args[0] 1453 if v_0.Op != OpAMD64ADDQ { 1454 break 1455 } 1456 _ = v_0.Args[1] 1457 y := v_0.Args[0] 1458 if y != v_0.Args[1] { 1459 break 1460 } 1461 x := v.Args[1] 1462 v.reset(OpAMD64LEAQ2) 1463 v.AddArg(x) 1464 v.AddArg(y) 1465 return true 1466 } 1467 // match: (ADDQ x (ADDQ x y)) 1468 // cond: 1469 // result: (LEAQ2 y x) 1470 for { 1471 _ = v.Args[1] 1472 x := v.Args[0] 1473 v_1 := v.Args[1] 1474 if v_1.Op != OpAMD64ADDQ { 1475 break 1476 } 1477 _ = v_1.Args[1] 1478 if x != v_1.Args[0] { 1479 break 1480 } 1481 y := v_1.Args[1] 1482 v.reset(OpAMD64LEAQ2) 1483 v.AddArg(y) 1484 v.AddArg(x) 1485 return true 1486 } 1487 // match: (ADDQ x (ADDQ y x)) 1488 // cond: 1489 // result: (LEAQ2 y x) 1490 for { 1491 _ = v.Args[1] 1492 x := v.Args[0] 1493 v_1 := v.Args[1] 1494 if v_1.Op != OpAMD64ADDQ { 1495 break 1496 } 1497 _ = v_1.Args[1] 1498 y := v_1.Args[0] 1499 if x != v_1.Args[1] { 1500 break 1501 } 1502 v.reset(OpAMD64LEAQ2) 1503 v.AddArg(y) 1504 v.AddArg(x) 1505 return true 1506 } 1507 // match: (ADDQ (ADDQ x y) x) 1508 // cond: 1509 // result: (LEAQ2 y x) 1510 for { 1511 _ = v.Args[1] 1512 v_0 := v.Args[0] 1513 if v_0.Op != OpAMD64ADDQ { 1514 break 1515 } 1516 _ = v_0.Args[1] 1517 x := v_0.Args[0] 1518 y := v_0.Args[1] 1519 if x != v.Args[1] { 1520 break 1521 } 1522 v.reset(OpAMD64LEAQ2) 1523 v.AddArg(y) 1524 v.AddArg(x) 1525 return true 1526 } 1527 // match: (ADDQ (ADDQ y x) x) 1528 // cond: 1529 // result: (LEAQ2 y x) 1530 for { 1531 _ = v.Args[1] 1532 v_0 := v.Args[0] 1533 if v_0.Op != OpAMD64ADDQ { 1534 break 1535 } 1536 _ = v_0.Args[1] 1537 y := v_0.Args[0] 1538 x := v_0.Args[1] 1539 if x != v.Args[1] { 1540 break 1541 } 1542 v.reset(OpAMD64LEAQ2) 1543 v.AddArg(y) 1544 v.AddArg(x) 1545 return true 1546 } 1547 // match: (ADDQ (ADDQconst [c] x) y) 1548 // cond: 1549 // result: (LEAQ1 [c] x y) 1550 for { 1551 _ = v.Args[1] 1552 v_0 := v.Args[0] 1553 if v_0.Op != OpAMD64ADDQconst { 1554 break 1555 } 1556 c := v_0.AuxInt 1557 x := v_0.Args[0] 1558 y := v.Args[1] 1559 v.reset(OpAMD64LEAQ1) 1560 v.AuxInt = c 1561 v.AddArg(x) 1562 v.AddArg(y) 1563 return true 1564 } 1565 // match: (ADDQ y (ADDQconst [c] x)) 1566 // cond: 1567 // result: (LEAQ1 [c] x y) 1568 for { 1569 _ = v.Args[1] 1570 y := v.Args[0] 1571 v_1 := v.Args[1] 1572 if v_1.Op != OpAMD64ADDQconst { 1573 break 1574 } 1575 c := v_1.AuxInt 1576 x := v_1.Args[0] 1577 v.reset(OpAMD64LEAQ1) 1578 v.AuxInt = c 1579 v.AddArg(x) 1580 v.AddArg(y) 1581 return true 1582 } 1583 // match: (ADDQ x (LEAQ [c] {s} y)) 1584 // cond: x.Op != OpSB && y.Op != OpSB 1585 // result: (LEAQ1 [c] {s} x y) 1586 for { 1587 _ = v.Args[1] 1588 x := v.Args[0] 1589 v_1 := v.Args[1] 1590 if v_1.Op != OpAMD64LEAQ { 1591 break 1592 } 1593 c := v_1.AuxInt 1594 s := v_1.Aux 1595 y := v_1.Args[0] 1596 if !(x.Op != OpSB && y.Op != OpSB) { 1597 break 1598 } 1599 v.reset(OpAMD64LEAQ1) 1600 v.AuxInt = c 1601 v.Aux = s 1602 v.AddArg(x) 1603 v.AddArg(y) 1604 return true 1605 } 1606 // match: (ADDQ (LEAQ [c] {s} y) x) 1607 // cond: x.Op != OpSB && y.Op != OpSB 1608 // result: (LEAQ1 [c] {s} x y) 1609 for { 1610 _ = v.Args[1] 1611 v_0 := v.Args[0] 1612 if v_0.Op != OpAMD64LEAQ { 1613 break 1614 } 1615 c := v_0.AuxInt 1616 s := v_0.Aux 1617 y := v_0.Args[0] 1618 x := v.Args[1] 1619 if !(x.Op != OpSB && y.Op != OpSB) { 1620 break 1621 } 1622 v.reset(OpAMD64LEAQ1) 1623 v.AuxInt = c 1624 v.Aux = s 1625 v.AddArg(x) 1626 v.AddArg(y) 1627 return true 1628 } 1629 return false 1630 } 1631 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 1632 // match: (ADDQ x (NEGQ y)) 1633 // cond: 1634 // result: (SUBQ x y) 1635 for { 1636 _ = v.Args[1] 1637 x := v.Args[0] 1638 v_1 := v.Args[1] 1639 if v_1.Op != OpAMD64NEGQ { 1640 break 1641 } 1642 y := v_1.Args[0] 1643 v.reset(OpAMD64SUBQ) 1644 v.AddArg(x) 1645 v.AddArg(y) 1646 return true 1647 } 1648 // match: (ADDQ (NEGQ y) x) 1649 // cond: 1650 // result: (SUBQ x y) 1651 for { 1652 _ = v.Args[1] 1653 v_0 := v.Args[0] 1654 if v_0.Op != OpAMD64NEGQ { 1655 break 1656 } 1657 y := v_0.Args[0] 1658 x := v.Args[1] 1659 v.reset(OpAMD64SUBQ) 1660 v.AddArg(x) 1661 v.AddArg(y) 1662 return true 1663 } 1664 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1665 // cond: canMergeLoad(v, l, x) && clobber(l) 1666 // result: (ADDQmem x [off] {sym} ptr mem) 1667 for { 1668 _ = v.Args[1] 1669 x := v.Args[0] 1670 l := v.Args[1] 1671 if l.Op != OpAMD64MOVQload { 1672 break 1673 } 1674 off := l.AuxInt 1675 sym := l.Aux 1676 _ = l.Args[1] 1677 ptr := l.Args[0] 1678 mem := l.Args[1] 1679 if !(canMergeLoad(v, l, x) && clobber(l)) { 1680 break 1681 } 1682 v.reset(OpAMD64ADDQmem) 1683 v.AuxInt = off 1684 v.Aux = sym 1685 v.AddArg(x) 1686 v.AddArg(ptr) 1687 v.AddArg(mem) 1688 return true 1689 } 1690 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 1691 // cond: canMergeLoad(v, l, x) && clobber(l) 1692 // result: (ADDQmem x [off] {sym} ptr mem) 1693 for { 1694 _ = v.Args[1] 1695 l := v.Args[0] 1696 if l.Op != OpAMD64MOVQload { 1697 break 1698 } 1699 off := l.AuxInt 1700 sym := l.Aux 1701 _ = l.Args[1] 1702 ptr := l.Args[0] 1703 mem := l.Args[1] 1704 x := v.Args[1] 1705 if !(canMergeLoad(v, l, x) && clobber(l)) { 1706 break 1707 } 1708 v.reset(OpAMD64ADDQmem) 1709 v.AuxInt = off 1710 v.Aux = sym 1711 v.AddArg(x) 1712 v.AddArg(ptr) 1713 v.AddArg(mem) 1714 return true 1715 } 1716 return false 1717 } 1718 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 1719 // match: (ADDQconst [c] (ADDQ x y)) 1720 // cond: 1721 // result: (LEAQ1 [c] x y) 1722 for { 1723 c := v.AuxInt 1724 v_0 := v.Args[0] 1725 if v_0.Op != OpAMD64ADDQ { 1726 break 1727 } 1728 _ = v_0.Args[1] 1729 x := v_0.Args[0] 1730 y := v_0.Args[1] 1731 v.reset(OpAMD64LEAQ1) 1732 v.AuxInt = c 1733 v.AddArg(x) 1734 v.AddArg(y) 1735 return true 1736 } 1737 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1738 // cond: is32Bit(c+d) 1739 // result: (LEAQ [c+d] {s} x) 1740 for { 1741 c := v.AuxInt 1742 v_0 := v.Args[0] 1743 if v_0.Op != OpAMD64LEAQ { 1744 break 1745 } 1746 d := v_0.AuxInt 1747 s := v_0.Aux 1748 x := v_0.Args[0] 1749 if !(is32Bit(c + d)) { 1750 break 1751 } 1752 v.reset(OpAMD64LEAQ) 1753 v.AuxInt = c + d 1754 v.Aux = s 1755 v.AddArg(x) 1756 return true 1757 } 1758 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1759 // cond: is32Bit(c+d) 1760 // result: (LEAQ1 [c+d] {s} x y) 1761 for { 1762 c := v.AuxInt 1763 v_0 := v.Args[0] 1764 if v_0.Op != OpAMD64LEAQ1 { 1765 break 1766 } 1767 d := v_0.AuxInt 1768 s := v_0.Aux 1769 _ = v_0.Args[1] 1770 x := v_0.Args[0] 1771 y := v_0.Args[1] 1772 if !(is32Bit(c + d)) { 1773 break 1774 } 1775 v.reset(OpAMD64LEAQ1) 1776 v.AuxInt = c + d 1777 v.Aux = s 1778 v.AddArg(x) 1779 v.AddArg(y) 1780 return true 1781 } 1782 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1783 // cond: is32Bit(c+d) 1784 // result: (LEAQ2 [c+d] {s} x y) 1785 for { 1786 c := v.AuxInt 1787 v_0 := v.Args[0] 1788 if v_0.Op != OpAMD64LEAQ2 { 1789 break 1790 } 1791 d := v_0.AuxInt 1792 s := v_0.Aux 1793 _ = v_0.Args[1] 1794 x := v_0.Args[0] 1795 y := v_0.Args[1] 1796 if !(is32Bit(c + d)) { 1797 break 1798 } 1799 v.reset(OpAMD64LEAQ2) 1800 v.AuxInt = c + d 1801 v.Aux = s 1802 v.AddArg(x) 1803 v.AddArg(y) 1804 return true 1805 } 1806 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1807 // cond: is32Bit(c+d) 1808 // result: (LEAQ4 [c+d] {s} x y) 1809 for { 1810 c := v.AuxInt 1811 v_0 := v.Args[0] 1812 if v_0.Op != OpAMD64LEAQ4 { 1813 break 1814 } 1815 d := v_0.AuxInt 1816 s := v_0.Aux 1817 _ = v_0.Args[1] 1818 x := v_0.Args[0] 1819 y := v_0.Args[1] 1820 if !(is32Bit(c + d)) { 1821 break 1822 } 1823 v.reset(OpAMD64LEAQ4) 1824 v.AuxInt = c + d 1825 v.Aux = s 1826 v.AddArg(x) 1827 v.AddArg(y) 1828 return true 1829 } 1830 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1831 // cond: is32Bit(c+d) 1832 // result: (LEAQ8 [c+d] {s} x y) 1833 for { 1834 c := v.AuxInt 1835 v_0 := v.Args[0] 1836 if v_0.Op != OpAMD64LEAQ8 { 1837 break 1838 } 1839 d := v_0.AuxInt 1840 s := v_0.Aux 1841 _ = v_0.Args[1] 1842 x := v_0.Args[0] 1843 y := v_0.Args[1] 1844 if !(is32Bit(c + d)) { 1845 break 1846 } 1847 v.reset(OpAMD64LEAQ8) 1848 v.AuxInt = c + d 1849 v.Aux = s 1850 v.AddArg(x) 1851 v.AddArg(y) 1852 return true 1853 } 1854 // match: (ADDQconst [0] x) 1855 // cond: 1856 // result: x 1857 for { 1858 if v.AuxInt != 0 { 1859 break 1860 } 1861 x := v.Args[0] 1862 v.reset(OpCopy) 1863 v.Type = x.Type 1864 v.AddArg(x) 1865 return true 1866 } 1867 // match: (ADDQconst [c] (MOVQconst [d])) 1868 // cond: 1869 // result: (MOVQconst [c+d]) 1870 for { 1871 c := v.AuxInt 1872 v_0 := v.Args[0] 1873 if v_0.Op != OpAMD64MOVQconst { 1874 break 1875 } 1876 d := v_0.AuxInt 1877 v.reset(OpAMD64MOVQconst) 1878 v.AuxInt = c + d 1879 return true 1880 } 1881 // match: (ADDQconst [c] (ADDQconst [d] x)) 1882 // cond: is32Bit(c+d) 1883 // result: (ADDQconst [c+d] x) 1884 for { 1885 c := v.AuxInt 1886 v_0 := v.Args[0] 1887 if v_0.Op != OpAMD64ADDQconst { 1888 break 1889 } 1890 d := v_0.AuxInt 1891 x := v_0.Args[0] 1892 if !(is32Bit(c + d)) { 1893 break 1894 } 1895 v.reset(OpAMD64ADDQconst) 1896 v.AuxInt = c + d 1897 v.AddArg(x) 1898 return true 1899 } 1900 return false 1901 } 1902 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 1903 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 1904 // cond: canMergeLoad(v, l, x) && clobber(l) 1905 // result: (ADDSDmem x [off] {sym} ptr mem) 1906 for { 1907 _ = v.Args[1] 1908 x := v.Args[0] 1909 l := v.Args[1] 1910 if l.Op != OpAMD64MOVSDload { 1911 break 1912 } 1913 off := l.AuxInt 1914 sym := l.Aux 1915 _ = l.Args[1] 1916 ptr := l.Args[0] 1917 mem := l.Args[1] 1918 if !(canMergeLoad(v, l, x) && clobber(l)) { 1919 break 1920 } 1921 v.reset(OpAMD64ADDSDmem) 1922 v.AuxInt = off 1923 v.Aux = sym 1924 v.AddArg(x) 1925 v.AddArg(ptr) 1926 v.AddArg(mem) 1927 return true 1928 } 1929 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 1930 // cond: canMergeLoad(v, l, x) && clobber(l) 1931 // result: (ADDSDmem x [off] {sym} ptr mem) 1932 for { 1933 _ = v.Args[1] 1934 l := v.Args[0] 1935 if l.Op != OpAMD64MOVSDload { 1936 break 1937 } 1938 off := l.AuxInt 1939 sym := l.Aux 1940 _ = l.Args[1] 1941 ptr := l.Args[0] 1942 mem := l.Args[1] 1943 x := v.Args[1] 1944 if !(canMergeLoad(v, l, x) && clobber(l)) { 1945 break 1946 } 1947 v.reset(OpAMD64ADDSDmem) 1948 v.AuxInt = off 1949 v.Aux = sym 1950 v.AddArg(x) 1951 v.AddArg(ptr) 1952 v.AddArg(mem) 1953 return true 1954 } 1955 return false 1956 } 1957 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 1958 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 1959 // cond: canMergeLoad(v, l, x) && clobber(l) 1960 // result: (ADDSSmem x [off] {sym} ptr mem) 1961 for { 1962 _ = v.Args[1] 1963 x := v.Args[0] 1964 l := v.Args[1] 1965 if l.Op != OpAMD64MOVSSload { 1966 break 1967 } 1968 off := l.AuxInt 1969 sym := l.Aux 1970 _ = l.Args[1] 1971 ptr := l.Args[0] 1972 mem := l.Args[1] 1973 if !(canMergeLoad(v, l, x) && clobber(l)) { 1974 break 1975 } 1976 v.reset(OpAMD64ADDSSmem) 1977 v.AuxInt = off 1978 v.Aux = sym 1979 v.AddArg(x) 1980 v.AddArg(ptr) 1981 v.AddArg(mem) 1982 return true 1983 } 1984 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 1985 // cond: canMergeLoad(v, l, x) && clobber(l) 1986 // result: (ADDSSmem x [off] {sym} ptr mem) 1987 for { 1988 _ = v.Args[1] 1989 l := v.Args[0] 1990 if l.Op != OpAMD64MOVSSload { 1991 break 1992 } 1993 off := l.AuxInt 1994 sym := l.Aux 1995 _ = l.Args[1] 1996 ptr := l.Args[0] 1997 mem := l.Args[1] 1998 x := v.Args[1] 1999 if !(canMergeLoad(v, l, x) && clobber(l)) { 2000 break 2001 } 2002 v.reset(OpAMD64ADDSSmem) 2003 v.AuxInt = off 2004 v.Aux = sym 2005 v.AddArg(x) 2006 v.AddArg(ptr) 2007 v.AddArg(mem) 2008 return true 2009 } 2010 return false 2011 } 2012 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 2013 // match: (ANDL x (MOVLconst [c])) 2014 // cond: 2015 // result: (ANDLconst [c] x) 2016 for { 2017 _ = v.Args[1] 2018 x := v.Args[0] 2019 v_1 := v.Args[1] 2020 if v_1.Op != OpAMD64MOVLconst { 2021 break 2022 } 2023 c := v_1.AuxInt 2024 v.reset(OpAMD64ANDLconst) 2025 v.AuxInt = c 2026 v.AddArg(x) 2027 return true 2028 } 2029 // match: (ANDL (MOVLconst [c]) x) 2030 // cond: 2031 // result: (ANDLconst [c] x) 2032 for { 2033 _ = v.Args[1] 2034 v_0 := v.Args[0] 2035 if v_0.Op != OpAMD64MOVLconst { 2036 break 2037 } 2038 c := v_0.AuxInt 2039 x := v.Args[1] 2040 v.reset(OpAMD64ANDLconst) 2041 v.AuxInt = c 2042 v.AddArg(x) 2043 return true 2044 } 2045 // match: (ANDL x x) 2046 // cond: 2047 // result: x 2048 for { 2049 _ = v.Args[1] 2050 x := v.Args[0] 2051 if x != v.Args[1] { 2052 break 2053 } 2054 v.reset(OpCopy) 2055 v.Type = x.Type 2056 v.AddArg(x) 2057 return true 2058 } 2059 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 2060 // cond: canMergeLoad(v, l, x) && clobber(l) 2061 // result: (ANDLmem x [off] {sym} ptr mem) 2062 for { 2063 _ = v.Args[1] 2064 x := v.Args[0] 2065 l := v.Args[1] 2066 if l.Op != OpAMD64MOVLload { 2067 break 2068 } 2069 off := l.AuxInt 2070 sym := l.Aux 2071 _ = l.Args[1] 2072 ptr := l.Args[0] 2073 mem := l.Args[1] 2074 if !(canMergeLoad(v, l, x) && clobber(l)) { 2075 break 2076 } 2077 v.reset(OpAMD64ANDLmem) 2078 v.AuxInt = off 2079 v.Aux = sym 2080 v.AddArg(x) 2081 v.AddArg(ptr) 2082 v.AddArg(mem) 2083 return true 2084 } 2085 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 2086 // cond: canMergeLoad(v, l, x) && clobber(l) 2087 // result: (ANDLmem x [off] {sym} ptr mem) 2088 for { 2089 _ = v.Args[1] 2090 l := v.Args[0] 2091 if l.Op != OpAMD64MOVLload { 2092 break 2093 } 2094 off := l.AuxInt 2095 sym := l.Aux 2096 _ = l.Args[1] 2097 ptr := l.Args[0] 2098 mem := l.Args[1] 2099 x := v.Args[1] 2100 if !(canMergeLoad(v, l, x) && clobber(l)) { 2101 break 2102 } 2103 v.reset(OpAMD64ANDLmem) 2104 v.AuxInt = off 2105 v.Aux = sym 2106 v.AddArg(x) 2107 v.AddArg(ptr) 2108 v.AddArg(mem) 2109 return true 2110 } 2111 return false 2112 } 2113 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 2114 // match: (ANDLconst [c] (ANDLconst [d] x)) 2115 // cond: 2116 // result: (ANDLconst [c & d] x) 2117 for { 2118 c := v.AuxInt 2119 v_0 := v.Args[0] 2120 if v_0.Op != OpAMD64ANDLconst { 2121 break 2122 } 2123 d := v_0.AuxInt 2124 x := v_0.Args[0] 2125 v.reset(OpAMD64ANDLconst) 2126 v.AuxInt = c & d 2127 v.AddArg(x) 2128 return true 2129 } 2130 // match: (ANDLconst [0xFF] x) 2131 // cond: 2132 // result: (MOVBQZX x) 2133 for { 2134 if v.AuxInt != 0xFF { 2135 break 2136 } 2137 x := v.Args[0] 2138 v.reset(OpAMD64MOVBQZX) 2139 v.AddArg(x) 2140 return true 2141 } 2142 // match: (ANDLconst [0xFFFF] x) 2143 // cond: 2144 // result: (MOVWQZX x) 2145 for { 2146 if v.AuxInt != 0xFFFF { 2147 break 2148 } 2149 x := v.Args[0] 2150 v.reset(OpAMD64MOVWQZX) 2151 v.AddArg(x) 2152 return true 2153 } 2154 // match: (ANDLconst [c] _) 2155 // cond: int32(c)==0 2156 // result: (MOVLconst [0]) 2157 for { 2158 c := v.AuxInt 2159 if !(int32(c) == 0) { 2160 break 2161 } 2162 v.reset(OpAMD64MOVLconst) 2163 v.AuxInt = 0 2164 return true 2165 } 2166 // match: (ANDLconst [c] x) 2167 // cond: int32(c)==-1 2168 // result: x 2169 for { 2170 c := v.AuxInt 2171 x := v.Args[0] 2172 if !(int32(c) == -1) { 2173 break 2174 } 2175 v.reset(OpCopy) 2176 v.Type = x.Type 2177 v.AddArg(x) 2178 return true 2179 } 2180 // match: (ANDLconst [c] (MOVLconst [d])) 2181 // cond: 2182 // result: (MOVLconst [c&d]) 2183 for { 2184 c := v.AuxInt 2185 v_0 := v.Args[0] 2186 if v_0.Op != OpAMD64MOVLconst { 2187 break 2188 } 2189 d := v_0.AuxInt 2190 v.reset(OpAMD64MOVLconst) 2191 v.AuxInt = c & d 2192 return true 2193 } 2194 return false 2195 } 2196 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 2197 // match: (ANDQ x (MOVQconst [c])) 2198 // cond: is32Bit(c) 2199 // result: (ANDQconst [c] x) 2200 for { 2201 _ = v.Args[1] 2202 x := v.Args[0] 2203 v_1 := v.Args[1] 2204 if v_1.Op != OpAMD64MOVQconst { 2205 break 2206 } 2207 c := v_1.AuxInt 2208 if !(is32Bit(c)) { 2209 break 2210 } 2211 v.reset(OpAMD64ANDQconst) 2212 v.AuxInt = c 2213 v.AddArg(x) 2214 return true 2215 } 2216 // match: (ANDQ (MOVQconst [c]) x) 2217 // cond: is32Bit(c) 2218 // result: (ANDQconst [c] x) 2219 for { 2220 _ = v.Args[1] 2221 v_0 := v.Args[0] 2222 if v_0.Op != OpAMD64MOVQconst { 2223 break 2224 } 2225 c := v_0.AuxInt 2226 x := v.Args[1] 2227 if !(is32Bit(c)) { 2228 break 2229 } 2230 v.reset(OpAMD64ANDQconst) 2231 v.AuxInt = c 2232 v.AddArg(x) 2233 return true 2234 } 2235 // match: (ANDQ x x) 2236 // cond: 2237 // result: x 2238 for { 2239 _ = v.Args[1] 2240 x := v.Args[0] 2241 if x != v.Args[1] { 2242 break 2243 } 2244 v.reset(OpCopy) 2245 v.Type = x.Type 2246 v.AddArg(x) 2247 return true 2248 } 2249 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 2250 // cond: canMergeLoad(v, l, x) && clobber(l) 2251 // result: (ANDQmem x [off] {sym} ptr mem) 2252 for { 2253 _ = v.Args[1] 2254 x := v.Args[0] 2255 l := v.Args[1] 2256 if l.Op != OpAMD64MOVQload { 2257 break 2258 } 2259 off := l.AuxInt 2260 sym := l.Aux 2261 _ = l.Args[1] 2262 ptr := l.Args[0] 2263 mem := l.Args[1] 2264 if !(canMergeLoad(v, l, x) && clobber(l)) { 2265 break 2266 } 2267 v.reset(OpAMD64ANDQmem) 2268 v.AuxInt = off 2269 v.Aux = sym 2270 v.AddArg(x) 2271 v.AddArg(ptr) 2272 v.AddArg(mem) 2273 return true 2274 } 2275 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 2276 // cond: canMergeLoad(v, l, x) && clobber(l) 2277 // result: (ANDQmem x [off] {sym} ptr mem) 2278 for { 2279 _ = v.Args[1] 2280 l := v.Args[0] 2281 if l.Op != OpAMD64MOVQload { 2282 break 2283 } 2284 off := l.AuxInt 2285 sym := l.Aux 2286 _ = l.Args[1] 2287 ptr := l.Args[0] 2288 mem := l.Args[1] 2289 x := v.Args[1] 2290 if !(canMergeLoad(v, l, x) && clobber(l)) { 2291 break 2292 } 2293 v.reset(OpAMD64ANDQmem) 2294 v.AuxInt = off 2295 v.Aux = sym 2296 v.AddArg(x) 2297 v.AddArg(ptr) 2298 v.AddArg(mem) 2299 return true 2300 } 2301 return false 2302 } 2303 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 2304 // match: (ANDQconst [c] (ANDQconst [d] x)) 2305 // cond: 2306 // result: (ANDQconst [c & d] x) 2307 for { 2308 c := v.AuxInt 2309 v_0 := v.Args[0] 2310 if v_0.Op != OpAMD64ANDQconst { 2311 break 2312 } 2313 d := v_0.AuxInt 2314 x := v_0.Args[0] 2315 v.reset(OpAMD64ANDQconst) 2316 v.AuxInt = c & d 2317 v.AddArg(x) 2318 return true 2319 } 2320 // match: (ANDQconst [0xFF] x) 2321 // cond: 2322 // result: (MOVBQZX x) 2323 for { 2324 if v.AuxInt != 0xFF { 2325 break 2326 } 2327 x := v.Args[0] 2328 v.reset(OpAMD64MOVBQZX) 2329 v.AddArg(x) 2330 return true 2331 } 2332 // match: (ANDQconst [0xFFFF] x) 2333 // cond: 2334 // result: (MOVWQZX x) 2335 for { 2336 if v.AuxInt != 0xFFFF { 2337 break 2338 } 2339 x := v.Args[0] 2340 v.reset(OpAMD64MOVWQZX) 2341 v.AddArg(x) 2342 return true 2343 } 2344 // match: (ANDQconst [0xFFFFFFFF] x) 2345 // cond: 2346 // result: (MOVLQZX x) 2347 for { 2348 if v.AuxInt != 0xFFFFFFFF { 2349 break 2350 } 2351 x := v.Args[0] 2352 v.reset(OpAMD64MOVLQZX) 2353 v.AddArg(x) 2354 return true 2355 } 2356 // match: (ANDQconst [0] _) 2357 // cond: 2358 // result: (MOVQconst [0]) 2359 for { 2360 if v.AuxInt != 0 { 2361 break 2362 } 2363 v.reset(OpAMD64MOVQconst) 2364 v.AuxInt = 0 2365 return true 2366 } 2367 // match: (ANDQconst [-1] x) 2368 // cond: 2369 // result: x 2370 for { 2371 if v.AuxInt != -1 { 2372 break 2373 } 2374 x := v.Args[0] 2375 v.reset(OpCopy) 2376 v.Type = x.Type 2377 v.AddArg(x) 2378 return true 2379 } 2380 // match: (ANDQconst [c] (MOVQconst [d])) 2381 // cond: 2382 // result: (MOVQconst [c&d]) 2383 for { 2384 c := v.AuxInt 2385 v_0 := v.Args[0] 2386 if v_0.Op != OpAMD64MOVQconst { 2387 break 2388 } 2389 d := v_0.AuxInt 2390 v.reset(OpAMD64MOVQconst) 2391 v.AuxInt = c & d 2392 return true 2393 } 2394 return false 2395 } 2396 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 2397 b := v.Block 2398 _ = b 2399 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 2400 // cond: 2401 // result: (BSFQ (ORQconst <t> [1<<8] x)) 2402 for { 2403 v_0 := v.Args[0] 2404 if v_0.Op != OpAMD64ORQconst { 2405 break 2406 } 2407 t := v_0.Type 2408 if v_0.AuxInt != 1<<8 { 2409 break 2410 } 2411 v_0_0 := v_0.Args[0] 2412 if v_0_0.Op != OpAMD64MOVBQZX { 2413 break 2414 } 2415 x := v_0_0.Args[0] 2416 v.reset(OpAMD64BSFQ) 2417 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2418 v0.AuxInt = 1 << 8 2419 v0.AddArg(x) 2420 v.AddArg(v0) 2421 return true 2422 } 2423 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 2424 // cond: 2425 // result: (BSFQ (ORQconst <t> [1<<16] x)) 2426 for { 2427 v_0 := v.Args[0] 2428 if v_0.Op != OpAMD64ORQconst { 2429 break 2430 } 2431 t := v_0.Type 2432 if v_0.AuxInt != 1<<16 { 2433 break 2434 } 2435 v_0_0 := v_0.Args[0] 2436 if v_0_0.Op != OpAMD64MOVWQZX { 2437 break 2438 } 2439 x := v_0_0.Args[0] 2440 v.reset(OpAMD64BSFQ) 2441 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2442 v0.AuxInt = 1 << 16 2443 v0.AddArg(x) 2444 v.AddArg(v0) 2445 return true 2446 } 2447 return false 2448 } 2449 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 2450 // match: (BTQconst [c] x) 2451 // cond: c < 32 2452 // result: (BTLconst [c] x) 2453 for { 2454 c := v.AuxInt 2455 x := v.Args[0] 2456 if !(c < 32) { 2457 break 2458 } 2459 v.reset(OpAMD64BTLconst) 2460 v.AuxInt = c 2461 v.AddArg(x) 2462 return true 2463 } 2464 return false 2465 } 2466 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 2467 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 2468 // cond: c != 0 2469 // result: x 2470 for { 2471 _ = v.Args[2] 2472 x := v.Args[0] 2473 v_2 := v.Args[2] 2474 if v_2.Op != OpSelect1 { 2475 break 2476 } 2477 v_2_0 := v_2.Args[0] 2478 if v_2_0.Op != OpAMD64BSFQ { 2479 break 2480 } 2481 v_2_0_0 := v_2_0.Args[0] 2482 if v_2_0_0.Op != OpAMD64ORQconst { 2483 break 2484 } 2485 c := v_2_0_0.AuxInt 2486 if !(c != 0) { 2487 break 2488 } 2489 v.reset(OpCopy) 2490 v.Type = x.Type 2491 v.AddArg(x) 2492 return true 2493 } 2494 return false 2495 } 2496 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 2497 b := v.Block 2498 _ = b 2499 // match: (CMPB x (MOVLconst [c])) 2500 // cond: 2501 // result: (CMPBconst x [int64(int8(c))]) 2502 for { 2503 _ = v.Args[1] 2504 x := v.Args[0] 2505 v_1 := v.Args[1] 2506 if v_1.Op != OpAMD64MOVLconst { 2507 break 2508 } 2509 c := v_1.AuxInt 2510 v.reset(OpAMD64CMPBconst) 2511 v.AuxInt = int64(int8(c)) 2512 v.AddArg(x) 2513 return true 2514 } 2515 // match: (CMPB (MOVLconst [c]) x) 2516 // cond: 2517 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 2518 for { 2519 _ = v.Args[1] 2520 v_0 := v.Args[0] 2521 if v_0.Op != OpAMD64MOVLconst { 2522 break 2523 } 2524 c := v_0.AuxInt 2525 x := v.Args[1] 2526 v.reset(OpAMD64InvertFlags) 2527 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 2528 v0.AuxInt = int64(int8(c)) 2529 v0.AddArg(x) 2530 v.AddArg(v0) 2531 return true 2532 } 2533 return false 2534 } 2535 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 2536 // match: (CMPBconst (MOVLconst [x]) [y]) 2537 // cond: int8(x)==int8(y) 2538 // result: (FlagEQ) 2539 for { 2540 y := v.AuxInt 2541 v_0 := v.Args[0] 2542 if v_0.Op != OpAMD64MOVLconst { 2543 break 2544 } 2545 x := v_0.AuxInt 2546 if !(int8(x) == int8(y)) { 2547 break 2548 } 2549 v.reset(OpAMD64FlagEQ) 2550 return true 2551 } 2552 // match: (CMPBconst (MOVLconst [x]) [y]) 2553 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 2554 // result: (FlagLT_ULT) 2555 for { 2556 y := v.AuxInt 2557 v_0 := v.Args[0] 2558 if v_0.Op != OpAMD64MOVLconst { 2559 break 2560 } 2561 x := v_0.AuxInt 2562 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 2563 break 2564 } 2565 v.reset(OpAMD64FlagLT_ULT) 2566 return true 2567 } 2568 // match: (CMPBconst (MOVLconst [x]) [y]) 2569 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 2570 // result: (FlagLT_UGT) 2571 for { 2572 y := v.AuxInt 2573 v_0 := v.Args[0] 2574 if v_0.Op != OpAMD64MOVLconst { 2575 break 2576 } 2577 x := v_0.AuxInt 2578 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 2579 break 2580 } 2581 v.reset(OpAMD64FlagLT_UGT) 2582 return true 2583 } 2584 // match: (CMPBconst (MOVLconst [x]) [y]) 2585 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 2586 // result: (FlagGT_ULT) 2587 for { 2588 y := v.AuxInt 2589 v_0 := v.Args[0] 2590 if v_0.Op != OpAMD64MOVLconst { 2591 break 2592 } 2593 x := v_0.AuxInt 2594 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 2595 break 2596 } 2597 v.reset(OpAMD64FlagGT_ULT) 2598 return true 2599 } 2600 // match: (CMPBconst (MOVLconst [x]) [y]) 2601 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 2602 // result: (FlagGT_UGT) 2603 for { 2604 y := v.AuxInt 2605 v_0 := v.Args[0] 2606 if v_0.Op != OpAMD64MOVLconst { 2607 break 2608 } 2609 x := v_0.AuxInt 2610 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 2611 break 2612 } 2613 v.reset(OpAMD64FlagGT_UGT) 2614 return true 2615 } 2616 // match: (CMPBconst (ANDLconst _ [m]) [n]) 2617 // cond: 0 <= int8(m) && int8(m) < int8(n) 2618 // result: (FlagLT_ULT) 2619 for { 2620 n := v.AuxInt 2621 v_0 := v.Args[0] 2622 if v_0.Op != OpAMD64ANDLconst { 2623 break 2624 } 2625 m := v_0.AuxInt 2626 if !(0 <= int8(m) && int8(m) < int8(n)) { 2627 break 2628 } 2629 v.reset(OpAMD64FlagLT_ULT) 2630 return true 2631 } 2632 // match: (CMPBconst (ANDL x y) [0]) 2633 // cond: 2634 // result: (TESTB x y) 2635 for { 2636 if v.AuxInt != 0 { 2637 break 2638 } 2639 v_0 := v.Args[0] 2640 if v_0.Op != OpAMD64ANDL { 2641 break 2642 } 2643 _ = v_0.Args[1] 2644 x := v_0.Args[0] 2645 y := v_0.Args[1] 2646 v.reset(OpAMD64TESTB) 2647 v.AddArg(x) 2648 v.AddArg(y) 2649 return true 2650 } 2651 // match: (CMPBconst (ANDLconst [c] x) [0]) 2652 // cond: 2653 // result: (TESTBconst [int64(int8(c))] x) 2654 for { 2655 if v.AuxInt != 0 { 2656 break 2657 } 2658 v_0 := v.Args[0] 2659 if v_0.Op != OpAMD64ANDLconst { 2660 break 2661 } 2662 c := v_0.AuxInt 2663 x := v_0.Args[0] 2664 v.reset(OpAMD64TESTBconst) 2665 v.AuxInt = int64(int8(c)) 2666 v.AddArg(x) 2667 return true 2668 } 2669 // match: (CMPBconst x [0]) 2670 // cond: 2671 // result: (TESTB x x) 2672 for { 2673 if v.AuxInt != 0 { 2674 break 2675 } 2676 x := v.Args[0] 2677 v.reset(OpAMD64TESTB) 2678 v.AddArg(x) 2679 v.AddArg(x) 2680 return true 2681 } 2682 return false 2683 } 2684 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 2685 b := v.Block 2686 _ = b 2687 // match: (CMPL x (MOVLconst [c])) 2688 // cond: 2689 // result: (CMPLconst x [c]) 2690 for { 2691 _ = v.Args[1] 2692 x := v.Args[0] 2693 v_1 := v.Args[1] 2694 if v_1.Op != OpAMD64MOVLconst { 2695 break 2696 } 2697 c := v_1.AuxInt 2698 v.reset(OpAMD64CMPLconst) 2699 v.AuxInt = c 2700 v.AddArg(x) 2701 return true 2702 } 2703 // match: (CMPL (MOVLconst [c]) x) 2704 // cond: 2705 // result: (InvertFlags (CMPLconst x [c])) 2706 for { 2707 _ = v.Args[1] 2708 v_0 := v.Args[0] 2709 if v_0.Op != OpAMD64MOVLconst { 2710 break 2711 } 2712 c := v_0.AuxInt 2713 x := v.Args[1] 2714 v.reset(OpAMD64InvertFlags) 2715 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 2716 v0.AuxInt = c 2717 v0.AddArg(x) 2718 v.AddArg(v0) 2719 return true 2720 } 2721 return false 2722 } 2723 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 2724 // match: (CMPLconst (MOVLconst [x]) [y]) 2725 // cond: int32(x)==int32(y) 2726 // result: (FlagEQ) 2727 for { 2728 y := v.AuxInt 2729 v_0 := v.Args[0] 2730 if v_0.Op != OpAMD64MOVLconst { 2731 break 2732 } 2733 x := v_0.AuxInt 2734 if !(int32(x) == int32(y)) { 2735 break 2736 } 2737 v.reset(OpAMD64FlagEQ) 2738 return true 2739 } 2740 // match: (CMPLconst (MOVLconst [x]) [y]) 2741 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 2742 // result: (FlagLT_ULT) 2743 for { 2744 y := v.AuxInt 2745 v_0 := v.Args[0] 2746 if v_0.Op != OpAMD64MOVLconst { 2747 break 2748 } 2749 x := v_0.AuxInt 2750 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 2751 break 2752 } 2753 v.reset(OpAMD64FlagLT_ULT) 2754 return true 2755 } 2756 // match: (CMPLconst (MOVLconst [x]) [y]) 2757 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 2758 // result: (FlagLT_UGT) 2759 for { 2760 y := v.AuxInt 2761 v_0 := v.Args[0] 2762 if v_0.Op != OpAMD64MOVLconst { 2763 break 2764 } 2765 x := v_0.AuxInt 2766 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 2767 break 2768 } 2769 v.reset(OpAMD64FlagLT_UGT) 2770 return true 2771 } 2772 // match: (CMPLconst (MOVLconst [x]) [y]) 2773 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 2774 // result: (FlagGT_ULT) 2775 for { 2776 y := v.AuxInt 2777 v_0 := v.Args[0] 2778 if v_0.Op != OpAMD64MOVLconst { 2779 break 2780 } 2781 x := v_0.AuxInt 2782 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 2783 break 2784 } 2785 v.reset(OpAMD64FlagGT_ULT) 2786 return true 2787 } 2788 // match: (CMPLconst (MOVLconst [x]) [y]) 2789 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 2790 // result: (FlagGT_UGT) 2791 for { 2792 y := v.AuxInt 2793 v_0 := v.Args[0] 2794 if v_0.Op != OpAMD64MOVLconst { 2795 break 2796 } 2797 x := v_0.AuxInt 2798 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 2799 break 2800 } 2801 v.reset(OpAMD64FlagGT_UGT) 2802 return true 2803 } 2804 // match: (CMPLconst (SHRLconst _ [c]) [n]) 2805 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 2806 // result: (FlagLT_ULT) 2807 for { 2808 n := v.AuxInt 2809 v_0 := v.Args[0] 2810 if v_0.Op != OpAMD64SHRLconst { 2811 break 2812 } 2813 c := v_0.AuxInt 2814 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 2815 break 2816 } 2817 v.reset(OpAMD64FlagLT_ULT) 2818 return true 2819 } 2820 // match: (CMPLconst (ANDLconst _ [m]) [n]) 2821 // cond: 0 <= int32(m) && int32(m) < int32(n) 2822 // result: (FlagLT_ULT) 2823 for { 2824 n := v.AuxInt 2825 v_0 := v.Args[0] 2826 if v_0.Op != OpAMD64ANDLconst { 2827 break 2828 } 2829 m := v_0.AuxInt 2830 if !(0 <= int32(m) && int32(m) < int32(n)) { 2831 break 2832 } 2833 v.reset(OpAMD64FlagLT_ULT) 2834 return true 2835 } 2836 // match: (CMPLconst (ANDL x y) [0]) 2837 // cond: 2838 // result: (TESTL x y) 2839 for { 2840 if v.AuxInt != 0 { 2841 break 2842 } 2843 v_0 := v.Args[0] 2844 if v_0.Op != OpAMD64ANDL { 2845 break 2846 } 2847 _ = v_0.Args[1] 2848 x := v_0.Args[0] 2849 y := v_0.Args[1] 2850 v.reset(OpAMD64TESTL) 2851 v.AddArg(x) 2852 v.AddArg(y) 2853 return true 2854 } 2855 // match: (CMPLconst (ANDLconst [c] x) [0]) 2856 // cond: 2857 // result: (TESTLconst [c] x) 2858 for { 2859 if v.AuxInt != 0 { 2860 break 2861 } 2862 v_0 := v.Args[0] 2863 if v_0.Op != OpAMD64ANDLconst { 2864 break 2865 } 2866 c := v_0.AuxInt 2867 x := v_0.Args[0] 2868 v.reset(OpAMD64TESTLconst) 2869 v.AuxInt = c 2870 v.AddArg(x) 2871 return true 2872 } 2873 // match: (CMPLconst x [0]) 2874 // cond: 2875 // result: (TESTL x x) 2876 for { 2877 if v.AuxInt != 0 { 2878 break 2879 } 2880 x := v.Args[0] 2881 v.reset(OpAMD64TESTL) 2882 v.AddArg(x) 2883 v.AddArg(x) 2884 return true 2885 } 2886 return false 2887 } 2888 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 2889 b := v.Block 2890 _ = b 2891 // match: (CMPQ x (MOVQconst [c])) 2892 // cond: is32Bit(c) 2893 // result: (CMPQconst x [c]) 2894 for { 2895 _ = v.Args[1] 2896 x := v.Args[0] 2897 v_1 := v.Args[1] 2898 if v_1.Op != OpAMD64MOVQconst { 2899 break 2900 } 2901 c := v_1.AuxInt 2902 if !(is32Bit(c)) { 2903 break 2904 } 2905 v.reset(OpAMD64CMPQconst) 2906 v.AuxInt = c 2907 v.AddArg(x) 2908 return true 2909 } 2910 // match: (CMPQ (MOVQconst [c]) x) 2911 // cond: is32Bit(c) 2912 // result: (InvertFlags (CMPQconst x [c])) 2913 for { 2914 _ = v.Args[1] 2915 v_0 := v.Args[0] 2916 if v_0.Op != OpAMD64MOVQconst { 2917 break 2918 } 2919 c := v_0.AuxInt 2920 x := v.Args[1] 2921 if !(is32Bit(c)) { 2922 break 2923 } 2924 v.reset(OpAMD64InvertFlags) 2925 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 2926 v0.AuxInt = c 2927 v0.AddArg(x) 2928 v.AddArg(v0) 2929 return true 2930 } 2931 return false 2932 } 2933 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 2934 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 2935 // cond: 2936 // result: (FlagLT_ULT) 2937 for { 2938 if v.AuxInt != 32 { 2939 break 2940 } 2941 v_0 := v.Args[0] 2942 if v_0.Op != OpAMD64NEGQ { 2943 break 2944 } 2945 v_0_0 := v_0.Args[0] 2946 if v_0_0.Op != OpAMD64ADDQconst { 2947 break 2948 } 2949 if v_0_0.AuxInt != -16 { 2950 break 2951 } 2952 v_0_0_0 := v_0_0.Args[0] 2953 if v_0_0_0.Op != OpAMD64ANDQconst { 2954 break 2955 } 2956 if v_0_0_0.AuxInt != 15 { 2957 break 2958 } 2959 v.reset(OpAMD64FlagLT_ULT) 2960 return true 2961 } 2962 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 2963 // cond: 2964 // result: (FlagLT_ULT) 2965 for { 2966 if v.AuxInt != 32 { 2967 break 2968 } 2969 v_0 := v.Args[0] 2970 if v_0.Op != OpAMD64NEGQ { 2971 break 2972 } 2973 v_0_0 := v_0.Args[0] 2974 if v_0_0.Op != OpAMD64ADDQconst { 2975 break 2976 } 2977 if v_0_0.AuxInt != -8 { 2978 break 2979 } 2980 v_0_0_0 := v_0_0.Args[0] 2981 if v_0_0_0.Op != OpAMD64ANDQconst { 2982 break 2983 } 2984 if v_0_0_0.AuxInt != 7 { 2985 break 2986 } 2987 v.reset(OpAMD64FlagLT_ULT) 2988 return true 2989 } 2990 // match: (CMPQconst (MOVQconst [x]) [y]) 2991 // cond: x==y 2992 // result: (FlagEQ) 2993 for { 2994 y := v.AuxInt 2995 v_0 := v.Args[0] 2996 if v_0.Op != OpAMD64MOVQconst { 2997 break 2998 } 2999 x := v_0.AuxInt 3000 if !(x == y) { 3001 break 3002 } 3003 v.reset(OpAMD64FlagEQ) 3004 return true 3005 } 3006 // match: (CMPQconst (MOVQconst [x]) [y]) 3007 // cond: x<y && uint64(x)<uint64(y) 3008 // result: (FlagLT_ULT) 3009 for { 3010 y := v.AuxInt 3011 v_0 := v.Args[0] 3012 if v_0.Op != OpAMD64MOVQconst { 3013 break 3014 } 3015 x := v_0.AuxInt 3016 if !(x < y && uint64(x) < uint64(y)) { 3017 break 3018 } 3019 v.reset(OpAMD64FlagLT_ULT) 3020 return true 3021 } 3022 // match: (CMPQconst (MOVQconst [x]) [y]) 3023 // cond: x<y && uint64(x)>uint64(y) 3024 // result: (FlagLT_UGT) 3025 for { 3026 y := v.AuxInt 3027 v_0 := v.Args[0] 3028 if v_0.Op != OpAMD64MOVQconst { 3029 break 3030 } 3031 x := v_0.AuxInt 3032 if !(x < y && uint64(x) > uint64(y)) { 3033 break 3034 } 3035 v.reset(OpAMD64FlagLT_UGT) 3036 return true 3037 } 3038 // match: (CMPQconst (MOVQconst [x]) [y]) 3039 // cond: x>y && uint64(x)<uint64(y) 3040 // result: (FlagGT_ULT) 3041 for { 3042 y := v.AuxInt 3043 v_0 := v.Args[0] 3044 if v_0.Op != OpAMD64MOVQconst { 3045 break 3046 } 3047 x := v_0.AuxInt 3048 if !(x > y && uint64(x) < uint64(y)) { 3049 break 3050 } 3051 v.reset(OpAMD64FlagGT_ULT) 3052 return true 3053 } 3054 // match: (CMPQconst (MOVQconst [x]) [y]) 3055 // cond: x>y && uint64(x)>uint64(y) 3056 // result: (FlagGT_UGT) 3057 for { 3058 y := v.AuxInt 3059 v_0 := v.Args[0] 3060 if v_0.Op != OpAMD64MOVQconst { 3061 break 3062 } 3063 x := v_0.AuxInt 3064 if !(x > y && uint64(x) > uint64(y)) { 3065 break 3066 } 3067 v.reset(OpAMD64FlagGT_UGT) 3068 return true 3069 } 3070 // match: (CMPQconst (MOVBQZX _) [c]) 3071 // cond: 0xFF < c 3072 // result: (FlagLT_ULT) 3073 for { 3074 c := v.AuxInt 3075 v_0 := v.Args[0] 3076 if v_0.Op != OpAMD64MOVBQZX { 3077 break 3078 } 3079 if !(0xFF < c) { 3080 break 3081 } 3082 v.reset(OpAMD64FlagLT_ULT) 3083 return true 3084 } 3085 // match: (CMPQconst (MOVWQZX _) [c]) 3086 // cond: 0xFFFF < c 3087 // result: (FlagLT_ULT) 3088 for { 3089 c := v.AuxInt 3090 v_0 := v.Args[0] 3091 if v_0.Op != OpAMD64MOVWQZX { 3092 break 3093 } 3094 if !(0xFFFF < c) { 3095 break 3096 } 3097 v.reset(OpAMD64FlagLT_ULT) 3098 return true 3099 } 3100 // match: (CMPQconst (MOVLQZX _) [c]) 3101 // cond: 0xFFFFFFFF < c 3102 // result: (FlagLT_ULT) 3103 for { 3104 c := v.AuxInt 3105 v_0 := v.Args[0] 3106 if v_0.Op != OpAMD64MOVLQZX { 3107 break 3108 } 3109 if !(0xFFFFFFFF < c) { 3110 break 3111 } 3112 v.reset(OpAMD64FlagLT_ULT) 3113 return true 3114 } 3115 return false 3116 } 3117 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 3118 // match: (CMPQconst (SHRQconst _ [c]) [n]) 3119 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 3120 // result: (FlagLT_ULT) 3121 for { 3122 n := v.AuxInt 3123 v_0 := v.Args[0] 3124 if v_0.Op != OpAMD64SHRQconst { 3125 break 3126 } 3127 c := v_0.AuxInt 3128 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 3129 break 3130 } 3131 v.reset(OpAMD64FlagLT_ULT) 3132 return true 3133 } 3134 // match: (CMPQconst (ANDQconst _ [m]) [n]) 3135 // cond: 0 <= m && m < n 3136 // result: (FlagLT_ULT) 3137 for { 3138 n := v.AuxInt 3139 v_0 := v.Args[0] 3140 if v_0.Op != OpAMD64ANDQconst { 3141 break 3142 } 3143 m := v_0.AuxInt 3144 if !(0 <= m && m < n) { 3145 break 3146 } 3147 v.reset(OpAMD64FlagLT_ULT) 3148 return true 3149 } 3150 // match: (CMPQconst (ANDLconst _ [m]) [n]) 3151 // cond: 0 <= m && m < n 3152 // result: (FlagLT_ULT) 3153 for { 3154 n := v.AuxInt 3155 v_0 := v.Args[0] 3156 if v_0.Op != OpAMD64ANDLconst { 3157 break 3158 } 3159 m := v_0.AuxInt 3160 if !(0 <= m && m < n) { 3161 break 3162 } 3163 v.reset(OpAMD64FlagLT_ULT) 3164 return true 3165 } 3166 // match: (CMPQconst (ANDQ x y) [0]) 3167 // cond: 3168 // result: (TESTQ x y) 3169 for { 3170 if v.AuxInt != 0 { 3171 break 3172 } 3173 v_0 := v.Args[0] 3174 if v_0.Op != OpAMD64ANDQ { 3175 break 3176 } 3177 _ = v_0.Args[1] 3178 x := v_0.Args[0] 3179 y := v_0.Args[1] 3180 v.reset(OpAMD64TESTQ) 3181 v.AddArg(x) 3182 v.AddArg(y) 3183 return true 3184 } 3185 // match: (CMPQconst (ANDQconst [c] x) [0]) 3186 // cond: 3187 // result: (TESTQconst [c] x) 3188 for { 3189 if v.AuxInt != 0 { 3190 break 3191 } 3192 v_0 := v.Args[0] 3193 if v_0.Op != OpAMD64ANDQconst { 3194 break 3195 } 3196 c := v_0.AuxInt 3197 x := v_0.Args[0] 3198 v.reset(OpAMD64TESTQconst) 3199 v.AuxInt = c 3200 v.AddArg(x) 3201 return true 3202 } 3203 // match: (CMPQconst x [0]) 3204 // cond: 3205 // result: (TESTQ x x) 3206 for { 3207 if v.AuxInt != 0 { 3208 break 3209 } 3210 x := v.Args[0] 3211 v.reset(OpAMD64TESTQ) 3212 v.AddArg(x) 3213 v.AddArg(x) 3214 return true 3215 } 3216 return false 3217 } 3218 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 3219 b := v.Block 3220 _ = b 3221 // match: (CMPW x (MOVLconst [c])) 3222 // cond: 3223 // result: (CMPWconst x [int64(int16(c))]) 3224 for { 3225 _ = v.Args[1] 3226 x := v.Args[0] 3227 v_1 := v.Args[1] 3228 if v_1.Op != OpAMD64MOVLconst { 3229 break 3230 } 3231 c := v_1.AuxInt 3232 v.reset(OpAMD64CMPWconst) 3233 v.AuxInt = int64(int16(c)) 3234 v.AddArg(x) 3235 return true 3236 } 3237 // match: (CMPW (MOVLconst [c]) x) 3238 // cond: 3239 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 3240 for { 3241 _ = v.Args[1] 3242 v_0 := v.Args[0] 3243 if v_0.Op != OpAMD64MOVLconst { 3244 break 3245 } 3246 c := v_0.AuxInt 3247 x := v.Args[1] 3248 v.reset(OpAMD64InvertFlags) 3249 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 3250 v0.AuxInt = int64(int16(c)) 3251 v0.AddArg(x) 3252 v.AddArg(v0) 3253 return true 3254 } 3255 return false 3256 } 3257 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 3258 // match: (CMPWconst (MOVLconst [x]) [y]) 3259 // cond: int16(x)==int16(y) 3260 // result: (FlagEQ) 3261 for { 3262 y := v.AuxInt 3263 v_0 := v.Args[0] 3264 if v_0.Op != OpAMD64MOVLconst { 3265 break 3266 } 3267 x := v_0.AuxInt 3268 if !(int16(x) == int16(y)) { 3269 break 3270 } 3271 v.reset(OpAMD64FlagEQ) 3272 return true 3273 } 3274 // match: (CMPWconst (MOVLconst [x]) [y]) 3275 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 3276 // result: (FlagLT_ULT) 3277 for { 3278 y := v.AuxInt 3279 v_0 := v.Args[0] 3280 if v_0.Op != OpAMD64MOVLconst { 3281 break 3282 } 3283 x := v_0.AuxInt 3284 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 3285 break 3286 } 3287 v.reset(OpAMD64FlagLT_ULT) 3288 return true 3289 } 3290 // match: (CMPWconst (MOVLconst [x]) [y]) 3291 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 3292 // result: (FlagLT_UGT) 3293 for { 3294 y := v.AuxInt 3295 v_0 := v.Args[0] 3296 if v_0.Op != OpAMD64MOVLconst { 3297 break 3298 } 3299 x := v_0.AuxInt 3300 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 3301 break 3302 } 3303 v.reset(OpAMD64FlagLT_UGT) 3304 return true 3305 } 3306 // match: (CMPWconst (MOVLconst [x]) [y]) 3307 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 3308 // result: (FlagGT_ULT) 3309 for { 3310 y := v.AuxInt 3311 v_0 := v.Args[0] 3312 if v_0.Op != OpAMD64MOVLconst { 3313 break 3314 } 3315 x := v_0.AuxInt 3316 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 3317 break 3318 } 3319 v.reset(OpAMD64FlagGT_ULT) 3320 return true 3321 } 3322 // match: (CMPWconst (MOVLconst [x]) [y]) 3323 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 3324 // result: (FlagGT_UGT) 3325 for { 3326 y := v.AuxInt 3327 v_0 := v.Args[0] 3328 if v_0.Op != OpAMD64MOVLconst { 3329 break 3330 } 3331 x := v_0.AuxInt 3332 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 3333 break 3334 } 3335 v.reset(OpAMD64FlagGT_UGT) 3336 return true 3337 } 3338 // match: (CMPWconst (ANDLconst _ [m]) [n]) 3339 // cond: 0 <= int16(m) && int16(m) < int16(n) 3340 // result: (FlagLT_ULT) 3341 for { 3342 n := v.AuxInt 3343 v_0 := v.Args[0] 3344 if v_0.Op != OpAMD64ANDLconst { 3345 break 3346 } 3347 m := v_0.AuxInt 3348 if !(0 <= int16(m) && int16(m) < int16(n)) { 3349 break 3350 } 3351 v.reset(OpAMD64FlagLT_ULT) 3352 return true 3353 } 3354 // match: (CMPWconst (ANDL x y) [0]) 3355 // cond: 3356 // result: (TESTW x y) 3357 for { 3358 if v.AuxInt != 0 { 3359 break 3360 } 3361 v_0 := v.Args[0] 3362 if v_0.Op != OpAMD64ANDL { 3363 break 3364 } 3365 _ = v_0.Args[1] 3366 x := v_0.Args[0] 3367 y := v_0.Args[1] 3368 v.reset(OpAMD64TESTW) 3369 v.AddArg(x) 3370 v.AddArg(y) 3371 return true 3372 } 3373 // match: (CMPWconst (ANDLconst [c] x) [0]) 3374 // cond: 3375 // result: (TESTWconst [int64(int16(c))] x) 3376 for { 3377 if v.AuxInt != 0 { 3378 break 3379 } 3380 v_0 := v.Args[0] 3381 if v_0.Op != OpAMD64ANDLconst { 3382 break 3383 } 3384 c := v_0.AuxInt 3385 x := v_0.Args[0] 3386 v.reset(OpAMD64TESTWconst) 3387 v.AuxInt = int64(int16(c)) 3388 v.AddArg(x) 3389 return true 3390 } 3391 // match: (CMPWconst x [0]) 3392 // cond: 3393 // result: (TESTW x x) 3394 for { 3395 if v.AuxInt != 0 { 3396 break 3397 } 3398 x := v.Args[0] 3399 v.reset(OpAMD64TESTW) 3400 v.AddArg(x) 3401 v.AddArg(x) 3402 return true 3403 } 3404 return false 3405 } 3406 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 3407 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3408 // cond: is32Bit(off1+off2) 3409 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 3410 for { 3411 off1 := v.AuxInt 3412 sym := v.Aux 3413 _ = v.Args[3] 3414 v_0 := v.Args[0] 3415 if v_0.Op != OpAMD64ADDQconst { 3416 break 3417 } 3418 off2 := v_0.AuxInt 3419 ptr := v_0.Args[0] 3420 old := v.Args[1] 3421 new_ := v.Args[2] 3422 mem := v.Args[3] 3423 if !(is32Bit(off1 + off2)) { 3424 break 3425 } 3426 v.reset(OpAMD64CMPXCHGLlock) 3427 v.AuxInt = off1 + off2 3428 v.Aux = sym 3429 v.AddArg(ptr) 3430 v.AddArg(old) 3431 v.AddArg(new_) 3432 v.AddArg(mem) 3433 return true 3434 } 3435 return false 3436 } 3437 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 3438 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3439 // cond: is32Bit(off1+off2) 3440 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 3441 for { 3442 off1 := v.AuxInt 3443 sym := v.Aux 3444 _ = v.Args[3] 3445 v_0 := v.Args[0] 3446 if v_0.Op != OpAMD64ADDQconst { 3447 break 3448 } 3449 off2 := v_0.AuxInt 3450 ptr := v_0.Args[0] 3451 old := v.Args[1] 3452 new_ := v.Args[2] 3453 mem := v.Args[3] 3454 if !(is32Bit(off1 + off2)) { 3455 break 3456 } 3457 v.reset(OpAMD64CMPXCHGQlock) 3458 v.AuxInt = off1 + off2 3459 v.Aux = sym 3460 v.AddArg(ptr) 3461 v.AddArg(old) 3462 v.AddArg(new_) 3463 v.AddArg(mem) 3464 return true 3465 } 3466 return false 3467 } 3468 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 3469 // match: (LEAL [c] {s} (ADDLconst [d] x)) 3470 // cond: is32Bit(c+d) 3471 // result: (LEAL [c+d] {s} x) 3472 for { 3473 c := v.AuxInt 3474 s := v.Aux 3475 v_0 := v.Args[0] 3476 if v_0.Op != OpAMD64ADDLconst { 3477 break 3478 } 3479 d := v_0.AuxInt 3480 x := v_0.Args[0] 3481 if !(is32Bit(c + d)) { 3482 break 3483 } 3484 v.reset(OpAMD64LEAL) 3485 v.AuxInt = c + d 3486 v.Aux = s 3487 v.AddArg(x) 3488 return true 3489 } 3490 return false 3491 } 3492 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 3493 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 3494 // cond: is32Bit(c+d) 3495 // result: (LEAQ [c+d] {s} x) 3496 for { 3497 c := v.AuxInt 3498 s := v.Aux 3499 v_0 := v.Args[0] 3500 if v_0.Op != OpAMD64ADDQconst { 3501 break 3502 } 3503 d := v_0.AuxInt 3504 x := v_0.Args[0] 3505 if !(is32Bit(c + d)) { 3506 break 3507 } 3508 v.reset(OpAMD64LEAQ) 3509 v.AuxInt = c + d 3510 v.Aux = s 3511 v.AddArg(x) 3512 return true 3513 } 3514 // match: (LEAQ [c] {s} (ADDQ x y)) 3515 // cond: x.Op != OpSB && y.Op != OpSB 3516 // result: (LEAQ1 [c] {s} x y) 3517 for { 3518 c := v.AuxInt 3519 s := v.Aux 3520 v_0 := v.Args[0] 3521 if v_0.Op != OpAMD64ADDQ { 3522 break 3523 } 3524 _ = v_0.Args[1] 3525 x := v_0.Args[0] 3526 y := v_0.Args[1] 3527 if !(x.Op != OpSB && y.Op != OpSB) { 3528 break 3529 } 3530 v.reset(OpAMD64LEAQ1) 3531 v.AuxInt = c 3532 v.Aux = s 3533 v.AddArg(x) 3534 v.AddArg(y) 3535 return true 3536 } 3537 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 3538 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3539 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 3540 for { 3541 off1 := v.AuxInt 3542 sym1 := v.Aux 3543 v_0 := v.Args[0] 3544 if v_0.Op != OpAMD64LEAQ { 3545 break 3546 } 3547 off2 := v_0.AuxInt 3548 sym2 := v_0.Aux 3549 x := v_0.Args[0] 3550 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3551 break 3552 } 3553 v.reset(OpAMD64LEAQ) 3554 v.AuxInt = off1 + off2 3555 v.Aux = mergeSym(sym1, sym2) 3556 v.AddArg(x) 3557 return true 3558 } 3559 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 3560 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3561 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3562 for { 3563 off1 := v.AuxInt 3564 sym1 := v.Aux 3565 v_0 := v.Args[0] 3566 if v_0.Op != OpAMD64LEAQ1 { 3567 break 3568 } 3569 off2 := v_0.AuxInt 3570 sym2 := v_0.Aux 3571 _ = v_0.Args[1] 3572 x := v_0.Args[0] 3573 y := v_0.Args[1] 3574 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3575 break 3576 } 3577 v.reset(OpAMD64LEAQ1) 3578 v.AuxInt = off1 + off2 3579 v.Aux = mergeSym(sym1, sym2) 3580 v.AddArg(x) 3581 v.AddArg(y) 3582 return true 3583 } 3584 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 3585 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3586 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3587 for { 3588 off1 := v.AuxInt 3589 sym1 := v.Aux 3590 v_0 := v.Args[0] 3591 if v_0.Op != OpAMD64LEAQ2 { 3592 break 3593 } 3594 off2 := v_0.AuxInt 3595 sym2 := v_0.Aux 3596 _ = v_0.Args[1] 3597 x := v_0.Args[0] 3598 y := v_0.Args[1] 3599 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3600 break 3601 } 3602 v.reset(OpAMD64LEAQ2) 3603 v.AuxInt = off1 + off2 3604 v.Aux = mergeSym(sym1, sym2) 3605 v.AddArg(x) 3606 v.AddArg(y) 3607 return true 3608 } 3609 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 3610 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3611 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3612 for { 3613 off1 := v.AuxInt 3614 sym1 := v.Aux 3615 v_0 := v.Args[0] 3616 if v_0.Op != OpAMD64LEAQ4 { 3617 break 3618 } 3619 off2 := v_0.AuxInt 3620 sym2 := v_0.Aux 3621 _ = v_0.Args[1] 3622 x := v_0.Args[0] 3623 y := v_0.Args[1] 3624 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3625 break 3626 } 3627 v.reset(OpAMD64LEAQ4) 3628 v.AuxInt = off1 + off2 3629 v.Aux = mergeSym(sym1, sym2) 3630 v.AddArg(x) 3631 v.AddArg(y) 3632 return true 3633 } 3634 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 3635 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3636 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3637 for { 3638 off1 := v.AuxInt 3639 sym1 := v.Aux 3640 v_0 := v.Args[0] 3641 if v_0.Op != OpAMD64LEAQ8 { 3642 break 3643 } 3644 off2 := v_0.AuxInt 3645 sym2 := v_0.Aux 3646 _ = v_0.Args[1] 3647 x := v_0.Args[0] 3648 y := v_0.Args[1] 3649 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3650 break 3651 } 3652 v.reset(OpAMD64LEAQ8) 3653 v.AuxInt = off1 + off2 3654 v.Aux = mergeSym(sym1, sym2) 3655 v.AddArg(x) 3656 v.AddArg(y) 3657 return true 3658 } 3659 return false 3660 } 3661 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 3662 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 3663 // cond: is32Bit(c+d) && x.Op != OpSB 3664 // result: (LEAQ1 [c+d] {s} x y) 3665 for { 3666 c := v.AuxInt 3667 s := v.Aux 3668 _ = v.Args[1] 3669 v_0 := v.Args[0] 3670 if v_0.Op != OpAMD64ADDQconst { 3671 break 3672 } 3673 d := v_0.AuxInt 3674 x := v_0.Args[0] 3675 y := v.Args[1] 3676 if !(is32Bit(c+d) && x.Op != OpSB) { 3677 break 3678 } 3679 v.reset(OpAMD64LEAQ1) 3680 v.AuxInt = c + d 3681 v.Aux = s 3682 v.AddArg(x) 3683 v.AddArg(y) 3684 return true 3685 } 3686 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 3687 // cond: is32Bit(c+d) && x.Op != OpSB 3688 // result: (LEAQ1 [c+d] {s} x y) 3689 for { 3690 c := v.AuxInt 3691 s := v.Aux 3692 _ = v.Args[1] 3693 y := v.Args[0] 3694 v_1 := v.Args[1] 3695 if v_1.Op != OpAMD64ADDQconst { 3696 break 3697 } 3698 d := v_1.AuxInt 3699 x := v_1.Args[0] 3700 if !(is32Bit(c+d) && x.Op != OpSB) { 3701 break 3702 } 3703 v.reset(OpAMD64LEAQ1) 3704 v.AuxInt = c + d 3705 v.Aux = s 3706 v.AddArg(x) 3707 v.AddArg(y) 3708 return true 3709 } 3710 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 3711 // cond: 3712 // result: (LEAQ2 [c] {s} x y) 3713 for { 3714 c := v.AuxInt 3715 s := v.Aux 3716 _ = v.Args[1] 3717 x := v.Args[0] 3718 v_1 := v.Args[1] 3719 if v_1.Op != OpAMD64SHLQconst { 3720 break 3721 } 3722 if v_1.AuxInt != 1 { 3723 break 3724 } 3725 y := v_1.Args[0] 3726 v.reset(OpAMD64LEAQ2) 3727 v.AuxInt = c 3728 v.Aux = s 3729 v.AddArg(x) 3730 v.AddArg(y) 3731 return true 3732 } 3733 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 3734 // cond: 3735 // result: (LEAQ2 [c] {s} x y) 3736 for { 3737 c := v.AuxInt 3738 s := v.Aux 3739 _ = v.Args[1] 3740 v_0 := v.Args[0] 3741 if v_0.Op != OpAMD64SHLQconst { 3742 break 3743 } 3744 if v_0.AuxInt != 1 { 3745 break 3746 } 3747 y := v_0.Args[0] 3748 x := v.Args[1] 3749 v.reset(OpAMD64LEAQ2) 3750 v.AuxInt = c 3751 v.Aux = s 3752 v.AddArg(x) 3753 v.AddArg(y) 3754 return true 3755 } 3756 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 3757 // cond: 3758 // result: (LEAQ4 [c] {s} x y) 3759 for { 3760 c := v.AuxInt 3761 s := v.Aux 3762 _ = v.Args[1] 3763 x := v.Args[0] 3764 v_1 := v.Args[1] 3765 if v_1.Op != OpAMD64SHLQconst { 3766 break 3767 } 3768 if v_1.AuxInt != 2 { 3769 break 3770 } 3771 y := v_1.Args[0] 3772 v.reset(OpAMD64LEAQ4) 3773 v.AuxInt = c 3774 v.Aux = s 3775 v.AddArg(x) 3776 v.AddArg(y) 3777 return true 3778 } 3779 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 3780 // cond: 3781 // result: (LEAQ4 [c] {s} x y) 3782 for { 3783 c := v.AuxInt 3784 s := v.Aux 3785 _ = v.Args[1] 3786 v_0 := v.Args[0] 3787 if v_0.Op != OpAMD64SHLQconst { 3788 break 3789 } 3790 if v_0.AuxInt != 2 { 3791 break 3792 } 3793 y := v_0.Args[0] 3794 x := v.Args[1] 3795 v.reset(OpAMD64LEAQ4) 3796 v.AuxInt = c 3797 v.Aux = s 3798 v.AddArg(x) 3799 v.AddArg(y) 3800 return true 3801 } 3802 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 3803 // cond: 3804 // result: (LEAQ8 [c] {s} x y) 3805 for { 3806 c := v.AuxInt 3807 s := v.Aux 3808 _ = v.Args[1] 3809 x := v.Args[0] 3810 v_1 := v.Args[1] 3811 if v_1.Op != OpAMD64SHLQconst { 3812 break 3813 } 3814 if v_1.AuxInt != 3 { 3815 break 3816 } 3817 y := v_1.Args[0] 3818 v.reset(OpAMD64LEAQ8) 3819 v.AuxInt = c 3820 v.Aux = s 3821 v.AddArg(x) 3822 v.AddArg(y) 3823 return true 3824 } 3825 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 3826 // cond: 3827 // result: (LEAQ8 [c] {s} x y) 3828 for { 3829 c := v.AuxInt 3830 s := v.Aux 3831 _ = v.Args[1] 3832 v_0 := v.Args[0] 3833 if v_0.Op != OpAMD64SHLQconst { 3834 break 3835 } 3836 if v_0.AuxInt != 3 { 3837 break 3838 } 3839 y := v_0.Args[0] 3840 x := v.Args[1] 3841 v.reset(OpAMD64LEAQ8) 3842 v.AuxInt = c 3843 v.Aux = s 3844 v.AddArg(x) 3845 v.AddArg(y) 3846 return true 3847 } 3848 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3849 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3850 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3851 for { 3852 off1 := v.AuxInt 3853 sym1 := v.Aux 3854 _ = v.Args[1] 3855 v_0 := v.Args[0] 3856 if v_0.Op != OpAMD64LEAQ { 3857 break 3858 } 3859 off2 := v_0.AuxInt 3860 sym2 := v_0.Aux 3861 x := v_0.Args[0] 3862 y := v.Args[1] 3863 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3864 break 3865 } 3866 v.reset(OpAMD64LEAQ1) 3867 v.AuxInt = off1 + off2 3868 v.Aux = mergeSym(sym1, sym2) 3869 v.AddArg(x) 3870 v.AddArg(y) 3871 return true 3872 } 3873 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 3874 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3875 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3876 for { 3877 off1 := v.AuxInt 3878 sym1 := v.Aux 3879 _ = v.Args[1] 3880 y := v.Args[0] 3881 v_1 := v.Args[1] 3882 if v_1.Op != OpAMD64LEAQ { 3883 break 3884 } 3885 off2 := v_1.AuxInt 3886 sym2 := v_1.Aux 3887 x := v_1.Args[0] 3888 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3889 break 3890 } 3891 v.reset(OpAMD64LEAQ1) 3892 v.AuxInt = off1 + off2 3893 v.Aux = mergeSym(sym1, sym2) 3894 v.AddArg(x) 3895 v.AddArg(y) 3896 return true 3897 } 3898 return false 3899 } 3900 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 3901 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 3902 // cond: is32Bit(c+d) && x.Op != OpSB 3903 // result: (LEAQ2 [c+d] {s} x y) 3904 for { 3905 c := v.AuxInt 3906 s := v.Aux 3907 _ = v.Args[1] 3908 v_0 := v.Args[0] 3909 if v_0.Op != OpAMD64ADDQconst { 3910 break 3911 } 3912 d := v_0.AuxInt 3913 x := v_0.Args[0] 3914 y := v.Args[1] 3915 if !(is32Bit(c+d) && x.Op != OpSB) { 3916 break 3917 } 3918 v.reset(OpAMD64LEAQ2) 3919 v.AuxInt = c + d 3920 v.Aux = s 3921 v.AddArg(x) 3922 v.AddArg(y) 3923 return true 3924 } 3925 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 3926 // cond: is32Bit(c+2*d) && y.Op != OpSB 3927 // result: (LEAQ2 [c+2*d] {s} x y) 3928 for { 3929 c := v.AuxInt 3930 s := v.Aux 3931 _ = v.Args[1] 3932 x := v.Args[0] 3933 v_1 := v.Args[1] 3934 if v_1.Op != OpAMD64ADDQconst { 3935 break 3936 } 3937 d := v_1.AuxInt 3938 y := v_1.Args[0] 3939 if !(is32Bit(c+2*d) && y.Op != OpSB) { 3940 break 3941 } 3942 v.reset(OpAMD64LEAQ2) 3943 v.AuxInt = c + 2*d 3944 v.Aux = s 3945 v.AddArg(x) 3946 v.AddArg(y) 3947 return true 3948 } 3949 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 3950 // cond: 3951 // result: (LEAQ4 [c] {s} x y) 3952 for { 3953 c := v.AuxInt 3954 s := v.Aux 3955 _ = v.Args[1] 3956 x := v.Args[0] 3957 v_1 := v.Args[1] 3958 if v_1.Op != OpAMD64SHLQconst { 3959 break 3960 } 3961 if v_1.AuxInt != 1 { 3962 break 3963 } 3964 y := v_1.Args[0] 3965 v.reset(OpAMD64LEAQ4) 3966 v.AuxInt = c 3967 v.Aux = s 3968 v.AddArg(x) 3969 v.AddArg(y) 3970 return true 3971 } 3972 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 3973 // cond: 3974 // result: (LEAQ8 [c] {s} x y) 3975 for { 3976 c := v.AuxInt 3977 s := v.Aux 3978 _ = v.Args[1] 3979 x := v.Args[0] 3980 v_1 := v.Args[1] 3981 if v_1.Op != OpAMD64SHLQconst { 3982 break 3983 } 3984 if v_1.AuxInt != 2 { 3985 break 3986 } 3987 y := v_1.Args[0] 3988 v.reset(OpAMD64LEAQ8) 3989 v.AuxInt = c 3990 v.Aux = s 3991 v.AddArg(x) 3992 v.AddArg(y) 3993 return true 3994 } 3995 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3996 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3997 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3998 for { 3999 off1 := v.AuxInt 4000 sym1 := v.Aux 4001 _ = v.Args[1] 4002 v_0 := v.Args[0] 4003 if v_0.Op != OpAMD64LEAQ { 4004 break 4005 } 4006 off2 := v_0.AuxInt 4007 sym2 := v_0.Aux 4008 x := v_0.Args[0] 4009 y := v.Args[1] 4010 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4011 break 4012 } 4013 v.reset(OpAMD64LEAQ2) 4014 v.AuxInt = off1 + off2 4015 v.Aux = mergeSym(sym1, sym2) 4016 v.AddArg(x) 4017 v.AddArg(y) 4018 return true 4019 } 4020 return false 4021 } 4022 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 4023 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 4024 // cond: is32Bit(c+d) && x.Op != OpSB 4025 // result: (LEAQ4 [c+d] {s} x y) 4026 for { 4027 c := v.AuxInt 4028 s := v.Aux 4029 _ = v.Args[1] 4030 v_0 := v.Args[0] 4031 if v_0.Op != OpAMD64ADDQconst { 4032 break 4033 } 4034 d := v_0.AuxInt 4035 x := v_0.Args[0] 4036 y := v.Args[1] 4037 if !(is32Bit(c+d) && x.Op != OpSB) { 4038 break 4039 } 4040 v.reset(OpAMD64LEAQ4) 4041 v.AuxInt = c + d 4042 v.Aux = s 4043 v.AddArg(x) 4044 v.AddArg(y) 4045 return true 4046 } 4047 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 4048 // cond: is32Bit(c+4*d) && y.Op != OpSB 4049 // result: (LEAQ4 [c+4*d] {s} x y) 4050 for { 4051 c := v.AuxInt 4052 s := v.Aux 4053 _ = v.Args[1] 4054 x := v.Args[0] 4055 v_1 := v.Args[1] 4056 if v_1.Op != OpAMD64ADDQconst { 4057 break 4058 } 4059 d := v_1.AuxInt 4060 y := v_1.Args[0] 4061 if !(is32Bit(c+4*d) && y.Op != OpSB) { 4062 break 4063 } 4064 v.reset(OpAMD64LEAQ4) 4065 v.AuxInt = c + 4*d 4066 v.Aux = s 4067 v.AddArg(x) 4068 v.AddArg(y) 4069 return true 4070 } 4071 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 4072 // cond: 4073 // result: (LEAQ8 [c] {s} x y) 4074 for { 4075 c := v.AuxInt 4076 s := v.Aux 4077 _ = v.Args[1] 4078 x := v.Args[0] 4079 v_1 := v.Args[1] 4080 if v_1.Op != OpAMD64SHLQconst { 4081 break 4082 } 4083 if v_1.AuxInt != 1 { 4084 break 4085 } 4086 y := v_1.Args[0] 4087 v.reset(OpAMD64LEAQ8) 4088 v.AuxInt = c 4089 v.Aux = s 4090 v.AddArg(x) 4091 v.AddArg(y) 4092 return true 4093 } 4094 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4095 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4096 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 4097 for { 4098 off1 := v.AuxInt 4099 sym1 := v.Aux 4100 _ = v.Args[1] 4101 v_0 := v.Args[0] 4102 if v_0.Op != OpAMD64LEAQ { 4103 break 4104 } 4105 off2 := v_0.AuxInt 4106 sym2 := v_0.Aux 4107 x := v_0.Args[0] 4108 y := v.Args[1] 4109 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4110 break 4111 } 4112 v.reset(OpAMD64LEAQ4) 4113 v.AuxInt = off1 + off2 4114 v.Aux = mergeSym(sym1, sym2) 4115 v.AddArg(x) 4116 v.AddArg(y) 4117 return true 4118 } 4119 return false 4120 } 4121 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 4122 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 4123 // cond: is32Bit(c+d) && x.Op != OpSB 4124 // result: (LEAQ8 [c+d] {s} x y) 4125 for { 4126 c := v.AuxInt 4127 s := v.Aux 4128 _ = v.Args[1] 4129 v_0 := v.Args[0] 4130 if v_0.Op != OpAMD64ADDQconst { 4131 break 4132 } 4133 d := v_0.AuxInt 4134 x := v_0.Args[0] 4135 y := v.Args[1] 4136 if !(is32Bit(c+d) && x.Op != OpSB) { 4137 break 4138 } 4139 v.reset(OpAMD64LEAQ8) 4140 v.AuxInt = c + d 4141 v.Aux = s 4142 v.AddArg(x) 4143 v.AddArg(y) 4144 return true 4145 } 4146 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 4147 // cond: is32Bit(c+8*d) && y.Op != OpSB 4148 // result: (LEAQ8 [c+8*d] {s} x y) 4149 for { 4150 c := v.AuxInt 4151 s := v.Aux 4152 _ = v.Args[1] 4153 x := v.Args[0] 4154 v_1 := v.Args[1] 4155 if v_1.Op != OpAMD64ADDQconst { 4156 break 4157 } 4158 d := v_1.AuxInt 4159 y := v_1.Args[0] 4160 if !(is32Bit(c+8*d) && y.Op != OpSB) { 4161 break 4162 } 4163 v.reset(OpAMD64LEAQ8) 4164 v.AuxInt = c + 8*d 4165 v.Aux = s 4166 v.AddArg(x) 4167 v.AddArg(y) 4168 return true 4169 } 4170 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4171 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4172 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4173 for { 4174 off1 := v.AuxInt 4175 sym1 := v.Aux 4176 _ = v.Args[1] 4177 v_0 := v.Args[0] 4178 if v_0.Op != OpAMD64LEAQ { 4179 break 4180 } 4181 off2 := v_0.AuxInt 4182 sym2 := v_0.Aux 4183 x := v_0.Args[0] 4184 y := v.Args[1] 4185 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4186 break 4187 } 4188 v.reset(OpAMD64LEAQ8) 4189 v.AuxInt = off1 + off2 4190 v.Aux = mergeSym(sym1, sym2) 4191 v.AddArg(x) 4192 v.AddArg(y) 4193 return true 4194 } 4195 return false 4196 } 4197 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 4198 b := v.Block 4199 _ = b 4200 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 4201 // cond: x.Uses == 1 && clobber(x) 4202 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4203 for { 4204 x := v.Args[0] 4205 if x.Op != OpAMD64MOVBload { 4206 break 4207 } 4208 off := x.AuxInt 4209 sym := x.Aux 4210 _ = x.Args[1] 4211 ptr := x.Args[0] 4212 mem := x.Args[1] 4213 if !(x.Uses == 1 && clobber(x)) { 4214 break 4215 } 4216 b = x.Block 4217 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4218 v.reset(OpCopy) 4219 v.AddArg(v0) 4220 v0.AuxInt = off 4221 v0.Aux = sym 4222 v0.AddArg(ptr) 4223 v0.AddArg(mem) 4224 return true 4225 } 4226 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 4227 // cond: x.Uses == 1 && clobber(x) 4228 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4229 for { 4230 x := v.Args[0] 4231 if x.Op != OpAMD64MOVWload { 4232 break 4233 } 4234 off := x.AuxInt 4235 sym := x.Aux 4236 _ = x.Args[1] 4237 ptr := x.Args[0] 4238 mem := x.Args[1] 4239 if !(x.Uses == 1 && clobber(x)) { 4240 break 4241 } 4242 b = x.Block 4243 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4244 v.reset(OpCopy) 4245 v.AddArg(v0) 4246 v0.AuxInt = off 4247 v0.Aux = sym 4248 v0.AddArg(ptr) 4249 v0.AddArg(mem) 4250 return true 4251 } 4252 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 4253 // cond: x.Uses == 1 && clobber(x) 4254 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4255 for { 4256 x := v.Args[0] 4257 if x.Op != OpAMD64MOVLload { 4258 break 4259 } 4260 off := x.AuxInt 4261 sym := x.Aux 4262 _ = x.Args[1] 4263 ptr := x.Args[0] 4264 mem := x.Args[1] 4265 if !(x.Uses == 1 && clobber(x)) { 4266 break 4267 } 4268 b = x.Block 4269 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4270 v.reset(OpCopy) 4271 v.AddArg(v0) 4272 v0.AuxInt = off 4273 v0.Aux = sym 4274 v0.AddArg(ptr) 4275 v0.AddArg(mem) 4276 return true 4277 } 4278 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 4279 // cond: x.Uses == 1 && clobber(x) 4280 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4281 for { 4282 x := v.Args[0] 4283 if x.Op != OpAMD64MOVQload { 4284 break 4285 } 4286 off := x.AuxInt 4287 sym := x.Aux 4288 _ = x.Args[1] 4289 ptr := x.Args[0] 4290 mem := x.Args[1] 4291 if !(x.Uses == 1 && clobber(x)) { 4292 break 4293 } 4294 b = x.Block 4295 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4296 v.reset(OpCopy) 4297 v.AddArg(v0) 4298 v0.AuxInt = off 4299 v0.Aux = sym 4300 v0.AddArg(ptr) 4301 v0.AddArg(mem) 4302 return true 4303 } 4304 // match: (MOVBQSX (ANDLconst [c] x)) 4305 // cond: c & 0x80 == 0 4306 // result: (ANDLconst [c & 0x7f] x) 4307 for { 4308 v_0 := v.Args[0] 4309 if v_0.Op != OpAMD64ANDLconst { 4310 break 4311 } 4312 c := v_0.AuxInt 4313 x := v_0.Args[0] 4314 if !(c&0x80 == 0) { 4315 break 4316 } 4317 v.reset(OpAMD64ANDLconst) 4318 v.AuxInt = c & 0x7f 4319 v.AddArg(x) 4320 return true 4321 } 4322 // match: (MOVBQSX x:(MOVBQSX _)) 4323 // cond: 4324 // result: x 4325 for { 4326 x := v.Args[0] 4327 if x.Op != OpAMD64MOVBQSX { 4328 break 4329 } 4330 v.reset(OpCopy) 4331 v.Type = x.Type 4332 v.AddArg(x) 4333 return true 4334 } 4335 return false 4336 } 4337 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 4338 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4339 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4340 // result: (MOVBQSX x) 4341 for { 4342 off := v.AuxInt 4343 sym := v.Aux 4344 _ = v.Args[1] 4345 ptr := v.Args[0] 4346 v_1 := v.Args[1] 4347 if v_1.Op != OpAMD64MOVBstore { 4348 break 4349 } 4350 off2 := v_1.AuxInt 4351 sym2 := v_1.Aux 4352 _ = v_1.Args[2] 4353 ptr2 := v_1.Args[0] 4354 x := v_1.Args[1] 4355 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4356 break 4357 } 4358 v.reset(OpAMD64MOVBQSX) 4359 v.AddArg(x) 4360 return true 4361 } 4362 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4363 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4364 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4365 for { 4366 off1 := v.AuxInt 4367 sym1 := v.Aux 4368 _ = v.Args[1] 4369 v_0 := v.Args[0] 4370 if v_0.Op != OpAMD64LEAQ { 4371 break 4372 } 4373 off2 := v_0.AuxInt 4374 sym2 := v_0.Aux 4375 base := v_0.Args[0] 4376 mem := v.Args[1] 4377 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4378 break 4379 } 4380 v.reset(OpAMD64MOVBQSXload) 4381 v.AuxInt = off1 + off2 4382 v.Aux = mergeSym(sym1, sym2) 4383 v.AddArg(base) 4384 v.AddArg(mem) 4385 return true 4386 } 4387 return false 4388 } 4389 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 4390 b := v.Block 4391 _ = b 4392 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 4393 // cond: x.Uses == 1 && clobber(x) 4394 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4395 for { 4396 x := v.Args[0] 4397 if x.Op != OpAMD64MOVBload { 4398 break 4399 } 4400 off := x.AuxInt 4401 sym := x.Aux 4402 _ = x.Args[1] 4403 ptr := x.Args[0] 4404 mem := x.Args[1] 4405 if !(x.Uses == 1 && clobber(x)) { 4406 break 4407 } 4408 b = x.Block 4409 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4410 v.reset(OpCopy) 4411 v.AddArg(v0) 4412 v0.AuxInt = off 4413 v0.Aux = sym 4414 v0.AddArg(ptr) 4415 v0.AddArg(mem) 4416 return true 4417 } 4418 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 4419 // cond: x.Uses == 1 && clobber(x) 4420 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4421 for { 4422 x := v.Args[0] 4423 if x.Op != OpAMD64MOVWload { 4424 break 4425 } 4426 off := x.AuxInt 4427 sym := x.Aux 4428 _ = x.Args[1] 4429 ptr := x.Args[0] 4430 mem := x.Args[1] 4431 if !(x.Uses == 1 && clobber(x)) { 4432 break 4433 } 4434 b = x.Block 4435 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4436 v.reset(OpCopy) 4437 v.AddArg(v0) 4438 v0.AuxInt = off 4439 v0.Aux = sym 4440 v0.AddArg(ptr) 4441 v0.AddArg(mem) 4442 return true 4443 } 4444 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 4445 // cond: x.Uses == 1 && clobber(x) 4446 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4447 for { 4448 x := v.Args[0] 4449 if x.Op != OpAMD64MOVLload { 4450 break 4451 } 4452 off := x.AuxInt 4453 sym := x.Aux 4454 _ = x.Args[1] 4455 ptr := x.Args[0] 4456 mem := x.Args[1] 4457 if !(x.Uses == 1 && clobber(x)) { 4458 break 4459 } 4460 b = x.Block 4461 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4462 v.reset(OpCopy) 4463 v.AddArg(v0) 4464 v0.AuxInt = off 4465 v0.Aux = sym 4466 v0.AddArg(ptr) 4467 v0.AddArg(mem) 4468 return true 4469 } 4470 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 4471 // cond: x.Uses == 1 && clobber(x) 4472 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4473 for { 4474 x := v.Args[0] 4475 if x.Op != OpAMD64MOVQload { 4476 break 4477 } 4478 off := x.AuxInt 4479 sym := x.Aux 4480 _ = x.Args[1] 4481 ptr := x.Args[0] 4482 mem := x.Args[1] 4483 if !(x.Uses == 1 && clobber(x)) { 4484 break 4485 } 4486 b = x.Block 4487 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4488 v.reset(OpCopy) 4489 v.AddArg(v0) 4490 v0.AuxInt = off 4491 v0.Aux = sym 4492 v0.AddArg(ptr) 4493 v0.AddArg(mem) 4494 return true 4495 } 4496 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 4497 // cond: x.Uses == 1 && clobber(x) 4498 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 4499 for { 4500 x := v.Args[0] 4501 if x.Op != OpAMD64MOVBloadidx1 { 4502 break 4503 } 4504 off := x.AuxInt 4505 sym := x.Aux 4506 _ = x.Args[2] 4507 ptr := x.Args[0] 4508 idx := x.Args[1] 4509 mem := x.Args[2] 4510 if !(x.Uses == 1 && clobber(x)) { 4511 break 4512 } 4513 b = x.Block 4514 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 4515 v.reset(OpCopy) 4516 v.AddArg(v0) 4517 v0.AuxInt = off 4518 v0.Aux = sym 4519 v0.AddArg(ptr) 4520 v0.AddArg(idx) 4521 v0.AddArg(mem) 4522 return true 4523 } 4524 // match: (MOVBQZX (ANDLconst [c] x)) 4525 // cond: 4526 // result: (ANDLconst [c & 0xff] x) 4527 for { 4528 v_0 := v.Args[0] 4529 if v_0.Op != OpAMD64ANDLconst { 4530 break 4531 } 4532 c := v_0.AuxInt 4533 x := v_0.Args[0] 4534 v.reset(OpAMD64ANDLconst) 4535 v.AuxInt = c & 0xff 4536 v.AddArg(x) 4537 return true 4538 } 4539 // match: (MOVBQZX x:(MOVBQZX _)) 4540 // cond: 4541 // result: x 4542 for { 4543 x := v.Args[0] 4544 if x.Op != OpAMD64MOVBQZX { 4545 break 4546 } 4547 v.reset(OpCopy) 4548 v.Type = x.Type 4549 v.AddArg(x) 4550 return true 4551 } 4552 return false 4553 } 4554 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 4555 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4556 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4557 // result: (MOVBQZX x) 4558 for { 4559 off := v.AuxInt 4560 sym := v.Aux 4561 _ = v.Args[1] 4562 ptr := v.Args[0] 4563 v_1 := v.Args[1] 4564 if v_1.Op != OpAMD64MOVBstore { 4565 break 4566 } 4567 off2 := v_1.AuxInt 4568 sym2 := v_1.Aux 4569 _ = v_1.Args[2] 4570 ptr2 := v_1.Args[0] 4571 x := v_1.Args[1] 4572 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4573 break 4574 } 4575 v.reset(OpAMD64MOVBQZX) 4576 v.AddArg(x) 4577 return true 4578 } 4579 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 4580 // cond: is32Bit(off1+off2) 4581 // result: (MOVBload [off1+off2] {sym} ptr mem) 4582 for { 4583 off1 := v.AuxInt 4584 sym := v.Aux 4585 _ = v.Args[1] 4586 v_0 := v.Args[0] 4587 if v_0.Op != OpAMD64ADDQconst { 4588 break 4589 } 4590 off2 := v_0.AuxInt 4591 ptr := v_0.Args[0] 4592 mem := v.Args[1] 4593 if !(is32Bit(off1 + off2)) { 4594 break 4595 } 4596 v.reset(OpAMD64MOVBload) 4597 v.AuxInt = off1 + off2 4598 v.Aux = sym 4599 v.AddArg(ptr) 4600 v.AddArg(mem) 4601 return true 4602 } 4603 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4604 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4605 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4606 for { 4607 off1 := v.AuxInt 4608 sym1 := v.Aux 4609 _ = v.Args[1] 4610 v_0 := v.Args[0] 4611 if v_0.Op != OpAMD64LEAQ { 4612 break 4613 } 4614 off2 := v_0.AuxInt 4615 sym2 := v_0.Aux 4616 base := v_0.Args[0] 4617 mem := v.Args[1] 4618 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4619 break 4620 } 4621 v.reset(OpAMD64MOVBload) 4622 v.AuxInt = off1 + off2 4623 v.Aux = mergeSym(sym1, sym2) 4624 v.AddArg(base) 4625 v.AddArg(mem) 4626 return true 4627 } 4628 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 4629 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4630 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4631 for { 4632 off1 := v.AuxInt 4633 sym1 := v.Aux 4634 _ = v.Args[1] 4635 v_0 := v.Args[0] 4636 if v_0.Op != OpAMD64LEAQ1 { 4637 break 4638 } 4639 off2 := v_0.AuxInt 4640 sym2 := v_0.Aux 4641 _ = v_0.Args[1] 4642 ptr := v_0.Args[0] 4643 idx := v_0.Args[1] 4644 mem := v.Args[1] 4645 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4646 break 4647 } 4648 v.reset(OpAMD64MOVBloadidx1) 4649 v.AuxInt = off1 + off2 4650 v.Aux = mergeSym(sym1, sym2) 4651 v.AddArg(ptr) 4652 v.AddArg(idx) 4653 v.AddArg(mem) 4654 return true 4655 } 4656 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 4657 // cond: ptr.Op != OpSB 4658 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 4659 for { 4660 off := v.AuxInt 4661 sym := v.Aux 4662 _ = v.Args[1] 4663 v_0 := v.Args[0] 4664 if v_0.Op != OpAMD64ADDQ { 4665 break 4666 } 4667 _ = v_0.Args[1] 4668 ptr := v_0.Args[0] 4669 idx := v_0.Args[1] 4670 mem := v.Args[1] 4671 if !(ptr.Op != OpSB) { 4672 break 4673 } 4674 v.reset(OpAMD64MOVBloadidx1) 4675 v.AuxInt = off 4676 v.Aux = sym 4677 v.AddArg(ptr) 4678 v.AddArg(idx) 4679 v.AddArg(mem) 4680 return true 4681 } 4682 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 4683 // cond: canMergeSym(sym1, sym2) 4684 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4685 for { 4686 off1 := v.AuxInt 4687 sym1 := v.Aux 4688 _ = v.Args[1] 4689 v_0 := v.Args[0] 4690 if v_0.Op != OpAMD64LEAL { 4691 break 4692 } 4693 off2 := v_0.AuxInt 4694 sym2 := v_0.Aux 4695 base := v_0.Args[0] 4696 mem := v.Args[1] 4697 if !(canMergeSym(sym1, sym2)) { 4698 break 4699 } 4700 v.reset(OpAMD64MOVBload) 4701 v.AuxInt = off1 + off2 4702 v.Aux = mergeSym(sym1, sym2) 4703 v.AddArg(base) 4704 v.AddArg(mem) 4705 return true 4706 } 4707 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 4708 // cond: is32Bit(off1+off2) 4709 // result: (MOVBload [off1+off2] {sym} ptr mem) 4710 for { 4711 off1 := v.AuxInt 4712 sym := v.Aux 4713 _ = v.Args[1] 4714 v_0 := v.Args[0] 4715 if v_0.Op != OpAMD64ADDLconst { 4716 break 4717 } 4718 off2 := v_0.AuxInt 4719 ptr := v_0.Args[0] 4720 mem := v.Args[1] 4721 if !(is32Bit(off1 + off2)) { 4722 break 4723 } 4724 v.reset(OpAMD64MOVBload) 4725 v.AuxInt = off1 + off2 4726 v.Aux = sym 4727 v.AddArg(ptr) 4728 v.AddArg(mem) 4729 return true 4730 } 4731 return false 4732 } 4733 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 4734 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 4735 // cond: 4736 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4737 for { 4738 c := v.AuxInt 4739 sym := v.Aux 4740 _ = v.Args[2] 4741 v_0 := v.Args[0] 4742 if v_0.Op != OpAMD64ADDQconst { 4743 break 4744 } 4745 d := v_0.AuxInt 4746 ptr := v_0.Args[0] 4747 idx := v.Args[1] 4748 mem := v.Args[2] 4749 v.reset(OpAMD64MOVBloadidx1) 4750 v.AuxInt = c + d 4751 v.Aux = sym 4752 v.AddArg(ptr) 4753 v.AddArg(idx) 4754 v.AddArg(mem) 4755 return true 4756 } 4757 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 4758 // cond: 4759 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4760 for { 4761 c := v.AuxInt 4762 sym := v.Aux 4763 _ = v.Args[2] 4764 idx := v.Args[0] 4765 v_1 := v.Args[1] 4766 if v_1.Op != OpAMD64ADDQconst { 4767 break 4768 } 4769 d := v_1.AuxInt 4770 ptr := v_1.Args[0] 4771 mem := v.Args[2] 4772 v.reset(OpAMD64MOVBloadidx1) 4773 v.AuxInt = c + d 4774 v.Aux = sym 4775 v.AddArg(ptr) 4776 v.AddArg(idx) 4777 v.AddArg(mem) 4778 return true 4779 } 4780 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 4781 // cond: 4782 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4783 for { 4784 c := v.AuxInt 4785 sym := v.Aux 4786 _ = v.Args[2] 4787 ptr := v.Args[0] 4788 v_1 := v.Args[1] 4789 if v_1.Op != OpAMD64ADDQconst { 4790 break 4791 } 4792 d := v_1.AuxInt 4793 idx := v_1.Args[0] 4794 mem := v.Args[2] 4795 v.reset(OpAMD64MOVBloadidx1) 4796 v.AuxInt = c + d 4797 v.Aux = sym 4798 v.AddArg(ptr) 4799 v.AddArg(idx) 4800 v.AddArg(mem) 4801 return true 4802 } 4803 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 4804 // cond: 4805 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4806 for { 4807 c := v.AuxInt 4808 sym := v.Aux 4809 _ = v.Args[2] 4810 v_0 := v.Args[0] 4811 if v_0.Op != OpAMD64ADDQconst { 4812 break 4813 } 4814 d := v_0.AuxInt 4815 idx := v_0.Args[0] 4816 ptr := v.Args[1] 4817 mem := v.Args[2] 4818 v.reset(OpAMD64MOVBloadidx1) 4819 v.AuxInt = c + d 4820 v.Aux = sym 4821 v.AddArg(ptr) 4822 v.AddArg(idx) 4823 v.AddArg(mem) 4824 return true 4825 } 4826 return false 4827 } 4828 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 4829 b := v.Block 4830 _ = b 4831 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 4832 // cond: 4833 // result: (MOVBstore [off] {sym} ptr x mem) 4834 for { 4835 off := v.AuxInt 4836 sym := v.Aux 4837 _ = v.Args[2] 4838 ptr := v.Args[0] 4839 v_1 := v.Args[1] 4840 if v_1.Op != OpAMD64MOVBQSX { 4841 break 4842 } 4843 x := v_1.Args[0] 4844 mem := v.Args[2] 4845 v.reset(OpAMD64MOVBstore) 4846 v.AuxInt = off 4847 v.Aux = sym 4848 v.AddArg(ptr) 4849 v.AddArg(x) 4850 v.AddArg(mem) 4851 return true 4852 } 4853 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 4854 // cond: 4855 // result: (MOVBstore [off] {sym} ptr x mem) 4856 for { 4857 off := v.AuxInt 4858 sym := v.Aux 4859 _ = v.Args[2] 4860 ptr := v.Args[0] 4861 v_1 := v.Args[1] 4862 if v_1.Op != OpAMD64MOVBQZX { 4863 break 4864 } 4865 x := v_1.Args[0] 4866 mem := v.Args[2] 4867 v.reset(OpAMD64MOVBstore) 4868 v.AuxInt = off 4869 v.Aux = sym 4870 v.AddArg(ptr) 4871 v.AddArg(x) 4872 v.AddArg(mem) 4873 return true 4874 } 4875 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 4876 // cond: is32Bit(off1+off2) 4877 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 4878 for { 4879 off1 := v.AuxInt 4880 sym := v.Aux 4881 _ = v.Args[2] 4882 v_0 := v.Args[0] 4883 if v_0.Op != OpAMD64ADDQconst { 4884 break 4885 } 4886 off2 := v_0.AuxInt 4887 ptr := v_0.Args[0] 4888 val := v.Args[1] 4889 mem := v.Args[2] 4890 if !(is32Bit(off1 + off2)) { 4891 break 4892 } 4893 v.reset(OpAMD64MOVBstore) 4894 v.AuxInt = off1 + off2 4895 v.Aux = sym 4896 v.AddArg(ptr) 4897 v.AddArg(val) 4898 v.AddArg(mem) 4899 return true 4900 } 4901 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 4902 // cond: validOff(off) 4903 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 4904 for { 4905 off := v.AuxInt 4906 sym := v.Aux 4907 _ = v.Args[2] 4908 ptr := v.Args[0] 4909 v_1 := v.Args[1] 4910 if v_1.Op != OpAMD64MOVLconst { 4911 break 4912 } 4913 c := v_1.AuxInt 4914 mem := v.Args[2] 4915 if !(validOff(off)) { 4916 break 4917 } 4918 v.reset(OpAMD64MOVBstoreconst) 4919 v.AuxInt = makeValAndOff(int64(int8(c)), off) 4920 v.Aux = sym 4921 v.AddArg(ptr) 4922 v.AddArg(mem) 4923 return true 4924 } 4925 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4926 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4927 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4928 for { 4929 off1 := v.AuxInt 4930 sym1 := v.Aux 4931 _ = v.Args[2] 4932 v_0 := v.Args[0] 4933 if v_0.Op != OpAMD64LEAQ { 4934 break 4935 } 4936 off2 := v_0.AuxInt 4937 sym2 := v_0.Aux 4938 base := v_0.Args[0] 4939 val := v.Args[1] 4940 mem := v.Args[2] 4941 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4942 break 4943 } 4944 v.reset(OpAMD64MOVBstore) 4945 v.AuxInt = off1 + off2 4946 v.Aux = mergeSym(sym1, sym2) 4947 v.AddArg(base) 4948 v.AddArg(val) 4949 v.AddArg(mem) 4950 return true 4951 } 4952 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 4953 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4954 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 4955 for { 4956 off1 := v.AuxInt 4957 sym1 := v.Aux 4958 _ = v.Args[2] 4959 v_0 := v.Args[0] 4960 if v_0.Op != OpAMD64LEAQ1 { 4961 break 4962 } 4963 off2 := v_0.AuxInt 4964 sym2 := v_0.Aux 4965 _ = v_0.Args[1] 4966 ptr := v_0.Args[0] 4967 idx := v_0.Args[1] 4968 val := v.Args[1] 4969 mem := v.Args[2] 4970 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4971 break 4972 } 4973 v.reset(OpAMD64MOVBstoreidx1) 4974 v.AuxInt = off1 + off2 4975 v.Aux = mergeSym(sym1, sym2) 4976 v.AddArg(ptr) 4977 v.AddArg(idx) 4978 v.AddArg(val) 4979 v.AddArg(mem) 4980 return true 4981 } 4982 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 4983 // cond: ptr.Op != OpSB 4984 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 4985 for { 4986 off := v.AuxInt 4987 sym := v.Aux 4988 _ = v.Args[2] 4989 v_0 := v.Args[0] 4990 if v_0.Op != OpAMD64ADDQ { 4991 break 4992 } 4993 _ = v_0.Args[1] 4994 ptr := v_0.Args[0] 4995 idx := v_0.Args[1] 4996 val := v.Args[1] 4997 mem := v.Args[2] 4998 if !(ptr.Op != OpSB) { 4999 break 5000 } 5001 v.reset(OpAMD64MOVBstoreidx1) 5002 v.AuxInt = off 5003 v.Aux = sym 5004 v.AddArg(ptr) 5005 v.AddArg(idx) 5006 v.AddArg(val) 5007 v.AddArg(mem) 5008 return true 5009 } 5010 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 5011 // cond: x0.Uses == 1 && clobber(x0) 5012 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 5013 for { 5014 i := v.AuxInt 5015 s := v.Aux 5016 _ = v.Args[2] 5017 p := v.Args[0] 5018 w := v.Args[1] 5019 x0 := v.Args[2] 5020 if x0.Op != OpAMD64MOVBstore { 5021 break 5022 } 5023 if x0.AuxInt != i-1 { 5024 break 5025 } 5026 if x0.Aux != s { 5027 break 5028 } 5029 _ = x0.Args[2] 5030 if p != x0.Args[0] { 5031 break 5032 } 5033 x0_1 := x0.Args[1] 5034 if x0_1.Op != OpAMD64SHRWconst { 5035 break 5036 } 5037 if x0_1.AuxInt != 8 { 5038 break 5039 } 5040 if w != x0_1.Args[0] { 5041 break 5042 } 5043 mem := x0.Args[2] 5044 if !(x0.Uses == 1 && clobber(x0)) { 5045 break 5046 } 5047 v.reset(OpAMD64MOVWstore) 5048 v.AuxInt = i - 1 5049 v.Aux = s 5050 v.AddArg(p) 5051 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5052 v0.AuxInt = 8 5053 v0.AddArg(w) 5054 v.AddArg(v0) 5055 v.AddArg(mem) 5056 return true 5057 } 5058 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 5059 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5060 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 5061 for { 5062 i := v.AuxInt 5063 s := v.Aux 5064 _ = v.Args[2] 5065 p := v.Args[0] 5066 w := v.Args[1] 5067 x2 := v.Args[2] 5068 if x2.Op != OpAMD64MOVBstore { 5069 break 5070 } 5071 if x2.AuxInt != i-1 { 5072 break 5073 } 5074 if x2.Aux != s { 5075 break 5076 } 5077 _ = x2.Args[2] 5078 if p != x2.Args[0] { 5079 break 5080 } 5081 x2_1 := x2.Args[1] 5082 if x2_1.Op != OpAMD64SHRLconst { 5083 break 5084 } 5085 if x2_1.AuxInt != 8 { 5086 break 5087 } 5088 if w != x2_1.Args[0] { 5089 break 5090 } 5091 x1 := x2.Args[2] 5092 if x1.Op != OpAMD64MOVBstore { 5093 break 5094 } 5095 if x1.AuxInt != i-2 { 5096 break 5097 } 5098 if x1.Aux != s { 5099 break 5100 } 5101 _ = x1.Args[2] 5102 if p != x1.Args[0] { 5103 break 5104 } 5105 x1_1 := x1.Args[1] 5106 if x1_1.Op != OpAMD64SHRLconst { 5107 break 5108 } 5109 if x1_1.AuxInt != 16 { 5110 break 5111 } 5112 if w != x1_1.Args[0] { 5113 break 5114 } 5115 x0 := x1.Args[2] 5116 if x0.Op != OpAMD64MOVBstore { 5117 break 5118 } 5119 if x0.AuxInt != i-3 { 5120 break 5121 } 5122 if x0.Aux != s { 5123 break 5124 } 5125 _ = x0.Args[2] 5126 if p != x0.Args[0] { 5127 break 5128 } 5129 x0_1 := x0.Args[1] 5130 if x0_1.Op != OpAMD64SHRLconst { 5131 break 5132 } 5133 if x0_1.AuxInt != 24 { 5134 break 5135 } 5136 if w != x0_1.Args[0] { 5137 break 5138 } 5139 mem := x0.Args[2] 5140 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5141 break 5142 } 5143 v.reset(OpAMD64MOVLstore) 5144 v.AuxInt = i - 3 5145 v.Aux = s 5146 v.AddArg(p) 5147 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5148 v0.AddArg(w) 5149 v.AddArg(v0) 5150 v.AddArg(mem) 5151 return true 5152 } 5153 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 5154 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5155 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 5156 for { 5157 i := v.AuxInt 5158 s := v.Aux 5159 _ = v.Args[2] 5160 p := v.Args[0] 5161 w := v.Args[1] 5162 x6 := v.Args[2] 5163 if x6.Op != OpAMD64MOVBstore { 5164 break 5165 } 5166 if x6.AuxInt != i-1 { 5167 break 5168 } 5169 if x6.Aux != s { 5170 break 5171 } 5172 _ = x6.Args[2] 5173 if p != x6.Args[0] { 5174 break 5175 } 5176 x6_1 := x6.Args[1] 5177 if x6_1.Op != OpAMD64SHRQconst { 5178 break 5179 } 5180 if x6_1.AuxInt != 8 { 5181 break 5182 } 5183 if w != x6_1.Args[0] { 5184 break 5185 } 5186 x5 := x6.Args[2] 5187 if x5.Op != OpAMD64MOVBstore { 5188 break 5189 } 5190 if x5.AuxInt != i-2 { 5191 break 5192 } 5193 if x5.Aux != s { 5194 break 5195 } 5196 _ = x5.Args[2] 5197 if p != x5.Args[0] { 5198 break 5199 } 5200 x5_1 := x5.Args[1] 5201 if x5_1.Op != OpAMD64SHRQconst { 5202 break 5203 } 5204 if x5_1.AuxInt != 16 { 5205 break 5206 } 5207 if w != x5_1.Args[0] { 5208 break 5209 } 5210 x4 := x5.Args[2] 5211 if x4.Op != OpAMD64MOVBstore { 5212 break 5213 } 5214 if x4.AuxInt != i-3 { 5215 break 5216 } 5217 if x4.Aux != s { 5218 break 5219 } 5220 _ = x4.Args[2] 5221 if p != x4.Args[0] { 5222 break 5223 } 5224 x4_1 := x4.Args[1] 5225 if x4_1.Op != OpAMD64SHRQconst { 5226 break 5227 } 5228 if x4_1.AuxInt != 24 { 5229 break 5230 } 5231 if w != x4_1.Args[0] { 5232 break 5233 } 5234 x3 := x4.Args[2] 5235 if x3.Op != OpAMD64MOVBstore { 5236 break 5237 } 5238 if x3.AuxInt != i-4 { 5239 break 5240 } 5241 if x3.Aux != s { 5242 break 5243 } 5244 _ = x3.Args[2] 5245 if p != x3.Args[0] { 5246 break 5247 } 5248 x3_1 := x3.Args[1] 5249 if x3_1.Op != OpAMD64SHRQconst { 5250 break 5251 } 5252 if x3_1.AuxInt != 32 { 5253 break 5254 } 5255 if w != x3_1.Args[0] { 5256 break 5257 } 5258 x2 := x3.Args[2] 5259 if x2.Op != OpAMD64MOVBstore { 5260 break 5261 } 5262 if x2.AuxInt != i-5 { 5263 break 5264 } 5265 if x2.Aux != s { 5266 break 5267 } 5268 _ = x2.Args[2] 5269 if p != x2.Args[0] { 5270 break 5271 } 5272 x2_1 := x2.Args[1] 5273 if x2_1.Op != OpAMD64SHRQconst { 5274 break 5275 } 5276 if x2_1.AuxInt != 40 { 5277 break 5278 } 5279 if w != x2_1.Args[0] { 5280 break 5281 } 5282 x1 := x2.Args[2] 5283 if x1.Op != OpAMD64MOVBstore { 5284 break 5285 } 5286 if x1.AuxInt != i-6 { 5287 break 5288 } 5289 if x1.Aux != s { 5290 break 5291 } 5292 _ = x1.Args[2] 5293 if p != x1.Args[0] { 5294 break 5295 } 5296 x1_1 := x1.Args[1] 5297 if x1_1.Op != OpAMD64SHRQconst { 5298 break 5299 } 5300 if x1_1.AuxInt != 48 { 5301 break 5302 } 5303 if w != x1_1.Args[0] { 5304 break 5305 } 5306 x0 := x1.Args[2] 5307 if x0.Op != OpAMD64MOVBstore { 5308 break 5309 } 5310 if x0.AuxInt != i-7 { 5311 break 5312 } 5313 if x0.Aux != s { 5314 break 5315 } 5316 _ = x0.Args[2] 5317 if p != x0.Args[0] { 5318 break 5319 } 5320 x0_1 := x0.Args[1] 5321 if x0_1.Op != OpAMD64SHRQconst { 5322 break 5323 } 5324 if x0_1.AuxInt != 56 { 5325 break 5326 } 5327 if w != x0_1.Args[0] { 5328 break 5329 } 5330 mem := x0.Args[2] 5331 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5332 break 5333 } 5334 v.reset(OpAMD64MOVQstore) 5335 v.AuxInt = i - 7 5336 v.Aux = s 5337 v.AddArg(p) 5338 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5339 v0.AddArg(w) 5340 v.AddArg(v0) 5341 v.AddArg(mem) 5342 return true 5343 } 5344 return false 5345 } 5346 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 5347 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 5348 // cond: x.Uses == 1 && clobber(x) 5349 // result: (MOVWstore [i-1] {s} p w mem) 5350 for { 5351 i := v.AuxInt 5352 s := v.Aux 5353 _ = v.Args[2] 5354 p := v.Args[0] 5355 v_1 := v.Args[1] 5356 if v_1.Op != OpAMD64SHRQconst { 5357 break 5358 } 5359 if v_1.AuxInt != 8 { 5360 break 5361 } 5362 w := v_1.Args[0] 5363 x := v.Args[2] 5364 if x.Op != OpAMD64MOVBstore { 5365 break 5366 } 5367 if x.AuxInt != i-1 { 5368 break 5369 } 5370 if x.Aux != s { 5371 break 5372 } 5373 _ = x.Args[2] 5374 if p != x.Args[0] { 5375 break 5376 } 5377 if w != x.Args[1] { 5378 break 5379 } 5380 mem := x.Args[2] 5381 if !(x.Uses == 1 && clobber(x)) { 5382 break 5383 } 5384 v.reset(OpAMD64MOVWstore) 5385 v.AuxInt = i - 1 5386 v.Aux = s 5387 v.AddArg(p) 5388 v.AddArg(w) 5389 v.AddArg(mem) 5390 return true 5391 } 5392 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 5393 // cond: x.Uses == 1 && clobber(x) 5394 // result: (MOVWstore [i-1] {s} p w0 mem) 5395 for { 5396 i := v.AuxInt 5397 s := v.Aux 5398 _ = v.Args[2] 5399 p := v.Args[0] 5400 v_1 := v.Args[1] 5401 if v_1.Op != OpAMD64SHRQconst { 5402 break 5403 } 5404 j := v_1.AuxInt 5405 w := v_1.Args[0] 5406 x := v.Args[2] 5407 if x.Op != OpAMD64MOVBstore { 5408 break 5409 } 5410 if x.AuxInt != i-1 { 5411 break 5412 } 5413 if x.Aux != s { 5414 break 5415 } 5416 _ = x.Args[2] 5417 if p != x.Args[0] { 5418 break 5419 } 5420 w0 := x.Args[1] 5421 if w0.Op != OpAMD64SHRQconst { 5422 break 5423 } 5424 if w0.AuxInt != j-8 { 5425 break 5426 } 5427 if w != w0.Args[0] { 5428 break 5429 } 5430 mem := x.Args[2] 5431 if !(x.Uses == 1 && clobber(x)) { 5432 break 5433 } 5434 v.reset(OpAMD64MOVWstore) 5435 v.AuxInt = i - 1 5436 v.Aux = s 5437 v.AddArg(p) 5438 v.AddArg(w0) 5439 v.AddArg(mem) 5440 return true 5441 } 5442 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 5443 // cond: canMergeSym(sym1, sym2) 5444 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5445 for { 5446 off1 := v.AuxInt 5447 sym1 := v.Aux 5448 _ = v.Args[2] 5449 v_0 := v.Args[0] 5450 if v_0.Op != OpAMD64LEAL { 5451 break 5452 } 5453 off2 := v_0.AuxInt 5454 sym2 := v_0.Aux 5455 base := v_0.Args[0] 5456 val := v.Args[1] 5457 mem := v.Args[2] 5458 if !(canMergeSym(sym1, sym2)) { 5459 break 5460 } 5461 v.reset(OpAMD64MOVBstore) 5462 v.AuxInt = off1 + off2 5463 v.Aux = mergeSym(sym1, sym2) 5464 v.AddArg(base) 5465 v.AddArg(val) 5466 v.AddArg(mem) 5467 return true 5468 } 5469 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 5470 // cond: is32Bit(off1+off2) 5471 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 5472 for { 5473 off1 := v.AuxInt 5474 sym := v.Aux 5475 _ = v.Args[2] 5476 v_0 := v.Args[0] 5477 if v_0.Op != OpAMD64ADDLconst { 5478 break 5479 } 5480 off2 := v_0.AuxInt 5481 ptr := v_0.Args[0] 5482 val := v.Args[1] 5483 mem := v.Args[2] 5484 if !(is32Bit(off1 + off2)) { 5485 break 5486 } 5487 v.reset(OpAMD64MOVBstore) 5488 v.AuxInt = off1 + off2 5489 v.Aux = sym 5490 v.AddArg(ptr) 5491 v.AddArg(val) 5492 v.AddArg(mem) 5493 return true 5494 } 5495 return false 5496 } 5497 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 5498 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 5499 // cond: ValAndOff(sc).canAdd(off) 5500 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5501 for { 5502 sc := v.AuxInt 5503 s := v.Aux 5504 _ = v.Args[1] 5505 v_0 := v.Args[0] 5506 if v_0.Op != OpAMD64ADDQconst { 5507 break 5508 } 5509 off := v_0.AuxInt 5510 ptr := v_0.Args[0] 5511 mem := v.Args[1] 5512 if !(ValAndOff(sc).canAdd(off)) { 5513 break 5514 } 5515 v.reset(OpAMD64MOVBstoreconst) 5516 v.AuxInt = ValAndOff(sc).add(off) 5517 v.Aux = s 5518 v.AddArg(ptr) 5519 v.AddArg(mem) 5520 return true 5521 } 5522 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 5523 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5524 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5525 for { 5526 sc := v.AuxInt 5527 sym1 := v.Aux 5528 _ = v.Args[1] 5529 v_0 := v.Args[0] 5530 if v_0.Op != OpAMD64LEAQ { 5531 break 5532 } 5533 off := v_0.AuxInt 5534 sym2 := v_0.Aux 5535 ptr := v_0.Args[0] 5536 mem := v.Args[1] 5537 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5538 break 5539 } 5540 v.reset(OpAMD64MOVBstoreconst) 5541 v.AuxInt = ValAndOff(sc).add(off) 5542 v.Aux = mergeSym(sym1, sym2) 5543 v.AddArg(ptr) 5544 v.AddArg(mem) 5545 return true 5546 } 5547 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 5548 // cond: canMergeSym(sym1, sym2) 5549 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5550 for { 5551 x := v.AuxInt 5552 sym1 := v.Aux 5553 _ = v.Args[1] 5554 v_0 := v.Args[0] 5555 if v_0.Op != OpAMD64LEAQ1 { 5556 break 5557 } 5558 off := v_0.AuxInt 5559 sym2 := v_0.Aux 5560 _ = v_0.Args[1] 5561 ptr := v_0.Args[0] 5562 idx := v_0.Args[1] 5563 mem := v.Args[1] 5564 if !(canMergeSym(sym1, sym2)) { 5565 break 5566 } 5567 v.reset(OpAMD64MOVBstoreconstidx1) 5568 v.AuxInt = ValAndOff(x).add(off) 5569 v.Aux = mergeSym(sym1, sym2) 5570 v.AddArg(ptr) 5571 v.AddArg(idx) 5572 v.AddArg(mem) 5573 return true 5574 } 5575 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 5576 // cond: 5577 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 5578 for { 5579 x := v.AuxInt 5580 sym := v.Aux 5581 _ = v.Args[1] 5582 v_0 := v.Args[0] 5583 if v_0.Op != OpAMD64ADDQ { 5584 break 5585 } 5586 _ = v_0.Args[1] 5587 ptr := v_0.Args[0] 5588 idx := v_0.Args[1] 5589 mem := v.Args[1] 5590 v.reset(OpAMD64MOVBstoreconstidx1) 5591 v.AuxInt = x 5592 v.Aux = sym 5593 v.AddArg(ptr) 5594 v.AddArg(idx) 5595 v.AddArg(mem) 5596 return true 5597 } 5598 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 5599 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 5600 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 5601 for { 5602 c := v.AuxInt 5603 s := v.Aux 5604 _ = v.Args[1] 5605 p := v.Args[0] 5606 x := v.Args[1] 5607 if x.Op != OpAMD64MOVBstoreconst { 5608 break 5609 } 5610 a := x.AuxInt 5611 if x.Aux != s { 5612 break 5613 } 5614 _ = x.Args[1] 5615 if p != x.Args[0] { 5616 break 5617 } 5618 mem := x.Args[1] 5619 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 5620 break 5621 } 5622 v.reset(OpAMD64MOVWstoreconst) 5623 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 5624 v.Aux = s 5625 v.AddArg(p) 5626 v.AddArg(mem) 5627 return true 5628 } 5629 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 5630 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5631 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5632 for { 5633 sc := v.AuxInt 5634 sym1 := v.Aux 5635 _ = v.Args[1] 5636 v_0 := v.Args[0] 5637 if v_0.Op != OpAMD64LEAL { 5638 break 5639 } 5640 off := v_0.AuxInt 5641 sym2 := v_0.Aux 5642 ptr := v_0.Args[0] 5643 mem := v.Args[1] 5644 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5645 break 5646 } 5647 v.reset(OpAMD64MOVBstoreconst) 5648 v.AuxInt = ValAndOff(sc).add(off) 5649 v.Aux = mergeSym(sym1, sym2) 5650 v.AddArg(ptr) 5651 v.AddArg(mem) 5652 return true 5653 } 5654 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 5655 // cond: ValAndOff(sc).canAdd(off) 5656 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5657 for { 5658 sc := v.AuxInt 5659 s := v.Aux 5660 _ = v.Args[1] 5661 v_0 := v.Args[0] 5662 if v_0.Op != OpAMD64ADDLconst { 5663 break 5664 } 5665 off := v_0.AuxInt 5666 ptr := v_0.Args[0] 5667 mem := v.Args[1] 5668 if !(ValAndOff(sc).canAdd(off)) { 5669 break 5670 } 5671 v.reset(OpAMD64MOVBstoreconst) 5672 v.AuxInt = ValAndOff(sc).add(off) 5673 v.Aux = s 5674 v.AddArg(ptr) 5675 v.AddArg(mem) 5676 return true 5677 } 5678 return false 5679 } 5680 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 5681 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 5682 // cond: 5683 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5684 for { 5685 x := v.AuxInt 5686 sym := v.Aux 5687 _ = v.Args[2] 5688 v_0 := v.Args[0] 5689 if v_0.Op != OpAMD64ADDQconst { 5690 break 5691 } 5692 c := v_0.AuxInt 5693 ptr := v_0.Args[0] 5694 idx := v.Args[1] 5695 mem := v.Args[2] 5696 v.reset(OpAMD64MOVBstoreconstidx1) 5697 v.AuxInt = ValAndOff(x).add(c) 5698 v.Aux = sym 5699 v.AddArg(ptr) 5700 v.AddArg(idx) 5701 v.AddArg(mem) 5702 return true 5703 } 5704 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 5705 // cond: 5706 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5707 for { 5708 x := v.AuxInt 5709 sym := v.Aux 5710 _ = v.Args[2] 5711 ptr := v.Args[0] 5712 v_1 := v.Args[1] 5713 if v_1.Op != OpAMD64ADDQconst { 5714 break 5715 } 5716 c := v_1.AuxInt 5717 idx := v_1.Args[0] 5718 mem := v.Args[2] 5719 v.reset(OpAMD64MOVBstoreconstidx1) 5720 v.AuxInt = ValAndOff(x).add(c) 5721 v.Aux = sym 5722 v.AddArg(ptr) 5723 v.AddArg(idx) 5724 v.AddArg(mem) 5725 return true 5726 } 5727 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 5728 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 5729 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 5730 for { 5731 c := v.AuxInt 5732 s := v.Aux 5733 _ = v.Args[2] 5734 p := v.Args[0] 5735 i := v.Args[1] 5736 x := v.Args[2] 5737 if x.Op != OpAMD64MOVBstoreconstidx1 { 5738 break 5739 } 5740 a := x.AuxInt 5741 if x.Aux != s { 5742 break 5743 } 5744 _ = x.Args[2] 5745 if p != x.Args[0] { 5746 break 5747 } 5748 if i != x.Args[1] { 5749 break 5750 } 5751 mem := x.Args[2] 5752 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 5753 break 5754 } 5755 v.reset(OpAMD64MOVWstoreconstidx1) 5756 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 5757 v.Aux = s 5758 v.AddArg(p) 5759 v.AddArg(i) 5760 v.AddArg(mem) 5761 return true 5762 } 5763 return false 5764 } 5765 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 5766 b := v.Block 5767 _ = b 5768 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 5769 // cond: 5770 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 5771 for { 5772 c := v.AuxInt 5773 sym := v.Aux 5774 _ = v.Args[3] 5775 v_0 := v.Args[0] 5776 if v_0.Op != OpAMD64ADDQconst { 5777 break 5778 } 5779 d := v_0.AuxInt 5780 ptr := v_0.Args[0] 5781 idx := v.Args[1] 5782 val := v.Args[2] 5783 mem := v.Args[3] 5784 v.reset(OpAMD64MOVBstoreidx1) 5785 v.AuxInt = c + d 5786 v.Aux = sym 5787 v.AddArg(ptr) 5788 v.AddArg(idx) 5789 v.AddArg(val) 5790 v.AddArg(mem) 5791 return true 5792 } 5793 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 5794 // cond: 5795 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 5796 for { 5797 c := v.AuxInt 5798 sym := v.Aux 5799 _ = v.Args[3] 5800 ptr := v.Args[0] 5801 v_1 := v.Args[1] 5802 if v_1.Op != OpAMD64ADDQconst { 5803 break 5804 } 5805 d := v_1.AuxInt 5806 idx := v_1.Args[0] 5807 val := v.Args[2] 5808 mem := v.Args[3] 5809 v.reset(OpAMD64MOVBstoreidx1) 5810 v.AuxInt = c + d 5811 v.Aux = sym 5812 v.AddArg(ptr) 5813 v.AddArg(idx) 5814 v.AddArg(val) 5815 v.AddArg(mem) 5816 return true 5817 } 5818 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 5819 // cond: x0.Uses == 1 && clobber(x0) 5820 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 5821 for { 5822 i := v.AuxInt 5823 s := v.Aux 5824 _ = v.Args[3] 5825 p := v.Args[0] 5826 idx := v.Args[1] 5827 w := v.Args[2] 5828 x0 := v.Args[3] 5829 if x0.Op != OpAMD64MOVBstoreidx1 { 5830 break 5831 } 5832 if x0.AuxInt != i-1 { 5833 break 5834 } 5835 if x0.Aux != s { 5836 break 5837 } 5838 _ = x0.Args[3] 5839 if p != x0.Args[0] { 5840 break 5841 } 5842 if idx != x0.Args[1] { 5843 break 5844 } 5845 x0_2 := x0.Args[2] 5846 if x0_2.Op != OpAMD64SHRWconst { 5847 break 5848 } 5849 if x0_2.AuxInt != 8 { 5850 break 5851 } 5852 if w != x0_2.Args[0] { 5853 break 5854 } 5855 mem := x0.Args[3] 5856 if !(x0.Uses == 1 && clobber(x0)) { 5857 break 5858 } 5859 v.reset(OpAMD64MOVWstoreidx1) 5860 v.AuxInt = i - 1 5861 v.Aux = s 5862 v.AddArg(p) 5863 v.AddArg(idx) 5864 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5865 v0.AuxInt = 8 5866 v0.AddArg(w) 5867 v.AddArg(v0) 5868 v.AddArg(mem) 5869 return true 5870 } 5871 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 5872 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5873 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 5874 for { 5875 i := v.AuxInt 5876 s := v.Aux 5877 _ = v.Args[3] 5878 p := v.Args[0] 5879 idx := v.Args[1] 5880 w := v.Args[2] 5881 x2 := v.Args[3] 5882 if x2.Op != OpAMD64MOVBstoreidx1 { 5883 break 5884 } 5885 if x2.AuxInt != i-1 { 5886 break 5887 } 5888 if x2.Aux != s { 5889 break 5890 } 5891 _ = x2.Args[3] 5892 if p != x2.Args[0] { 5893 break 5894 } 5895 if idx != x2.Args[1] { 5896 break 5897 } 5898 x2_2 := x2.Args[2] 5899 if x2_2.Op != OpAMD64SHRLconst { 5900 break 5901 } 5902 if x2_2.AuxInt != 8 { 5903 break 5904 } 5905 if w != x2_2.Args[0] { 5906 break 5907 } 5908 x1 := x2.Args[3] 5909 if x1.Op != OpAMD64MOVBstoreidx1 { 5910 break 5911 } 5912 if x1.AuxInt != i-2 { 5913 break 5914 } 5915 if x1.Aux != s { 5916 break 5917 } 5918 _ = x1.Args[3] 5919 if p != x1.Args[0] { 5920 break 5921 } 5922 if idx != x1.Args[1] { 5923 break 5924 } 5925 x1_2 := x1.Args[2] 5926 if x1_2.Op != OpAMD64SHRLconst { 5927 break 5928 } 5929 if x1_2.AuxInt != 16 { 5930 break 5931 } 5932 if w != x1_2.Args[0] { 5933 break 5934 } 5935 x0 := x1.Args[3] 5936 if x0.Op != OpAMD64MOVBstoreidx1 { 5937 break 5938 } 5939 if x0.AuxInt != i-3 { 5940 break 5941 } 5942 if x0.Aux != s { 5943 break 5944 } 5945 _ = x0.Args[3] 5946 if p != x0.Args[0] { 5947 break 5948 } 5949 if idx != x0.Args[1] { 5950 break 5951 } 5952 x0_2 := x0.Args[2] 5953 if x0_2.Op != OpAMD64SHRLconst { 5954 break 5955 } 5956 if x0_2.AuxInt != 24 { 5957 break 5958 } 5959 if w != x0_2.Args[0] { 5960 break 5961 } 5962 mem := x0.Args[3] 5963 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5964 break 5965 } 5966 v.reset(OpAMD64MOVLstoreidx1) 5967 v.AuxInt = i - 3 5968 v.Aux = s 5969 v.AddArg(p) 5970 v.AddArg(idx) 5971 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5972 v0.AddArg(w) 5973 v.AddArg(v0) 5974 v.AddArg(mem) 5975 return true 5976 } 5977 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 5978 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5979 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 5980 for { 5981 i := v.AuxInt 5982 s := v.Aux 5983 _ = v.Args[3] 5984 p := v.Args[0] 5985 idx := v.Args[1] 5986 w := v.Args[2] 5987 x6 := v.Args[3] 5988 if x6.Op != OpAMD64MOVBstoreidx1 { 5989 break 5990 } 5991 if x6.AuxInt != i-1 { 5992 break 5993 } 5994 if x6.Aux != s { 5995 break 5996 } 5997 _ = x6.Args[3] 5998 if p != x6.Args[0] { 5999 break 6000 } 6001 if idx != x6.Args[1] { 6002 break 6003 } 6004 x6_2 := x6.Args[2] 6005 if x6_2.Op != OpAMD64SHRQconst { 6006 break 6007 } 6008 if x6_2.AuxInt != 8 { 6009 break 6010 } 6011 if w != x6_2.Args[0] { 6012 break 6013 } 6014 x5 := x6.Args[3] 6015 if x5.Op != OpAMD64MOVBstoreidx1 { 6016 break 6017 } 6018 if x5.AuxInt != i-2 { 6019 break 6020 } 6021 if x5.Aux != s { 6022 break 6023 } 6024 _ = x5.Args[3] 6025 if p != x5.Args[0] { 6026 break 6027 } 6028 if idx != x5.Args[1] { 6029 break 6030 } 6031 x5_2 := x5.Args[2] 6032 if x5_2.Op != OpAMD64SHRQconst { 6033 break 6034 } 6035 if x5_2.AuxInt != 16 { 6036 break 6037 } 6038 if w != x5_2.Args[0] { 6039 break 6040 } 6041 x4 := x5.Args[3] 6042 if x4.Op != OpAMD64MOVBstoreidx1 { 6043 break 6044 } 6045 if x4.AuxInt != i-3 { 6046 break 6047 } 6048 if x4.Aux != s { 6049 break 6050 } 6051 _ = x4.Args[3] 6052 if p != x4.Args[0] { 6053 break 6054 } 6055 if idx != x4.Args[1] { 6056 break 6057 } 6058 x4_2 := x4.Args[2] 6059 if x4_2.Op != OpAMD64SHRQconst { 6060 break 6061 } 6062 if x4_2.AuxInt != 24 { 6063 break 6064 } 6065 if w != x4_2.Args[0] { 6066 break 6067 } 6068 x3 := x4.Args[3] 6069 if x3.Op != OpAMD64MOVBstoreidx1 { 6070 break 6071 } 6072 if x3.AuxInt != i-4 { 6073 break 6074 } 6075 if x3.Aux != s { 6076 break 6077 } 6078 _ = x3.Args[3] 6079 if p != x3.Args[0] { 6080 break 6081 } 6082 if idx != x3.Args[1] { 6083 break 6084 } 6085 x3_2 := x3.Args[2] 6086 if x3_2.Op != OpAMD64SHRQconst { 6087 break 6088 } 6089 if x3_2.AuxInt != 32 { 6090 break 6091 } 6092 if w != x3_2.Args[0] { 6093 break 6094 } 6095 x2 := x3.Args[3] 6096 if x2.Op != OpAMD64MOVBstoreidx1 { 6097 break 6098 } 6099 if x2.AuxInt != i-5 { 6100 break 6101 } 6102 if x2.Aux != s { 6103 break 6104 } 6105 _ = x2.Args[3] 6106 if p != x2.Args[0] { 6107 break 6108 } 6109 if idx != x2.Args[1] { 6110 break 6111 } 6112 x2_2 := x2.Args[2] 6113 if x2_2.Op != OpAMD64SHRQconst { 6114 break 6115 } 6116 if x2_2.AuxInt != 40 { 6117 break 6118 } 6119 if w != x2_2.Args[0] { 6120 break 6121 } 6122 x1 := x2.Args[3] 6123 if x1.Op != OpAMD64MOVBstoreidx1 { 6124 break 6125 } 6126 if x1.AuxInt != i-6 { 6127 break 6128 } 6129 if x1.Aux != s { 6130 break 6131 } 6132 _ = x1.Args[3] 6133 if p != x1.Args[0] { 6134 break 6135 } 6136 if idx != x1.Args[1] { 6137 break 6138 } 6139 x1_2 := x1.Args[2] 6140 if x1_2.Op != OpAMD64SHRQconst { 6141 break 6142 } 6143 if x1_2.AuxInt != 48 { 6144 break 6145 } 6146 if w != x1_2.Args[0] { 6147 break 6148 } 6149 x0 := x1.Args[3] 6150 if x0.Op != OpAMD64MOVBstoreidx1 { 6151 break 6152 } 6153 if x0.AuxInt != i-7 { 6154 break 6155 } 6156 if x0.Aux != s { 6157 break 6158 } 6159 _ = x0.Args[3] 6160 if p != x0.Args[0] { 6161 break 6162 } 6163 if idx != x0.Args[1] { 6164 break 6165 } 6166 x0_2 := x0.Args[2] 6167 if x0_2.Op != OpAMD64SHRQconst { 6168 break 6169 } 6170 if x0_2.AuxInt != 56 { 6171 break 6172 } 6173 if w != x0_2.Args[0] { 6174 break 6175 } 6176 mem := x0.Args[3] 6177 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 6178 break 6179 } 6180 v.reset(OpAMD64MOVQstoreidx1) 6181 v.AuxInt = i - 7 6182 v.Aux = s 6183 v.AddArg(p) 6184 v.AddArg(idx) 6185 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 6186 v0.AddArg(w) 6187 v.AddArg(v0) 6188 v.AddArg(mem) 6189 return true 6190 } 6191 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 6192 // cond: x.Uses == 1 && clobber(x) 6193 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 6194 for { 6195 i := v.AuxInt 6196 s := v.Aux 6197 _ = v.Args[3] 6198 p := v.Args[0] 6199 idx := v.Args[1] 6200 v_2 := v.Args[2] 6201 if v_2.Op != OpAMD64SHRQconst { 6202 break 6203 } 6204 if v_2.AuxInt != 8 { 6205 break 6206 } 6207 w := v_2.Args[0] 6208 x := v.Args[3] 6209 if x.Op != OpAMD64MOVBstoreidx1 { 6210 break 6211 } 6212 if x.AuxInt != i-1 { 6213 break 6214 } 6215 if x.Aux != s { 6216 break 6217 } 6218 _ = x.Args[3] 6219 if p != x.Args[0] { 6220 break 6221 } 6222 if idx != x.Args[1] { 6223 break 6224 } 6225 if w != x.Args[2] { 6226 break 6227 } 6228 mem := x.Args[3] 6229 if !(x.Uses == 1 && clobber(x)) { 6230 break 6231 } 6232 v.reset(OpAMD64MOVWstoreidx1) 6233 v.AuxInt = i - 1 6234 v.Aux = s 6235 v.AddArg(p) 6236 v.AddArg(idx) 6237 v.AddArg(w) 6238 v.AddArg(mem) 6239 return true 6240 } 6241 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 6242 // cond: x.Uses == 1 && clobber(x) 6243 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 6244 for { 6245 i := v.AuxInt 6246 s := v.Aux 6247 _ = v.Args[3] 6248 p := v.Args[0] 6249 idx := v.Args[1] 6250 v_2 := v.Args[2] 6251 if v_2.Op != OpAMD64SHRQconst { 6252 break 6253 } 6254 j := v_2.AuxInt 6255 w := v_2.Args[0] 6256 x := v.Args[3] 6257 if x.Op != OpAMD64MOVBstoreidx1 { 6258 break 6259 } 6260 if x.AuxInt != i-1 { 6261 break 6262 } 6263 if x.Aux != s { 6264 break 6265 } 6266 _ = x.Args[3] 6267 if p != x.Args[0] { 6268 break 6269 } 6270 if idx != x.Args[1] { 6271 break 6272 } 6273 w0 := x.Args[2] 6274 if w0.Op != OpAMD64SHRQconst { 6275 break 6276 } 6277 if w0.AuxInt != j-8 { 6278 break 6279 } 6280 if w != w0.Args[0] { 6281 break 6282 } 6283 mem := x.Args[3] 6284 if !(x.Uses == 1 && clobber(x)) { 6285 break 6286 } 6287 v.reset(OpAMD64MOVWstoreidx1) 6288 v.AuxInt = i - 1 6289 v.Aux = s 6290 v.AddArg(p) 6291 v.AddArg(idx) 6292 v.AddArg(w0) 6293 v.AddArg(mem) 6294 return true 6295 } 6296 return false 6297 } 6298 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 6299 b := v.Block 6300 _ = b 6301 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 6302 // cond: x.Uses == 1 && clobber(x) 6303 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6304 for { 6305 x := v.Args[0] 6306 if x.Op != OpAMD64MOVLload { 6307 break 6308 } 6309 off := x.AuxInt 6310 sym := x.Aux 6311 _ = x.Args[1] 6312 ptr := x.Args[0] 6313 mem := x.Args[1] 6314 if !(x.Uses == 1 && clobber(x)) { 6315 break 6316 } 6317 b = x.Block 6318 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6319 v.reset(OpCopy) 6320 v.AddArg(v0) 6321 v0.AuxInt = off 6322 v0.Aux = sym 6323 v0.AddArg(ptr) 6324 v0.AddArg(mem) 6325 return true 6326 } 6327 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 6328 // cond: x.Uses == 1 && clobber(x) 6329 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6330 for { 6331 x := v.Args[0] 6332 if x.Op != OpAMD64MOVQload { 6333 break 6334 } 6335 off := x.AuxInt 6336 sym := x.Aux 6337 _ = x.Args[1] 6338 ptr := x.Args[0] 6339 mem := x.Args[1] 6340 if !(x.Uses == 1 && clobber(x)) { 6341 break 6342 } 6343 b = x.Block 6344 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6345 v.reset(OpCopy) 6346 v.AddArg(v0) 6347 v0.AuxInt = off 6348 v0.Aux = sym 6349 v0.AddArg(ptr) 6350 v0.AddArg(mem) 6351 return true 6352 } 6353 // match: (MOVLQSX (ANDLconst [c] x)) 6354 // cond: c & 0x80000000 == 0 6355 // result: (ANDLconst [c & 0x7fffffff] x) 6356 for { 6357 v_0 := v.Args[0] 6358 if v_0.Op != OpAMD64ANDLconst { 6359 break 6360 } 6361 c := v_0.AuxInt 6362 x := v_0.Args[0] 6363 if !(c&0x80000000 == 0) { 6364 break 6365 } 6366 v.reset(OpAMD64ANDLconst) 6367 v.AuxInt = c & 0x7fffffff 6368 v.AddArg(x) 6369 return true 6370 } 6371 // match: (MOVLQSX x:(MOVLQSX _)) 6372 // cond: 6373 // result: x 6374 for { 6375 x := v.Args[0] 6376 if x.Op != OpAMD64MOVLQSX { 6377 break 6378 } 6379 v.reset(OpCopy) 6380 v.Type = x.Type 6381 v.AddArg(x) 6382 return true 6383 } 6384 // match: (MOVLQSX x:(MOVWQSX _)) 6385 // cond: 6386 // result: x 6387 for { 6388 x := v.Args[0] 6389 if x.Op != OpAMD64MOVWQSX { 6390 break 6391 } 6392 v.reset(OpCopy) 6393 v.Type = x.Type 6394 v.AddArg(x) 6395 return true 6396 } 6397 // match: (MOVLQSX x:(MOVBQSX _)) 6398 // cond: 6399 // result: x 6400 for { 6401 x := v.Args[0] 6402 if x.Op != OpAMD64MOVBQSX { 6403 break 6404 } 6405 v.reset(OpCopy) 6406 v.Type = x.Type 6407 v.AddArg(x) 6408 return true 6409 } 6410 return false 6411 } 6412 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 6413 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 6414 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6415 // result: (MOVLQSX x) 6416 for { 6417 off := v.AuxInt 6418 sym := v.Aux 6419 _ = v.Args[1] 6420 ptr := v.Args[0] 6421 v_1 := v.Args[1] 6422 if v_1.Op != OpAMD64MOVLstore { 6423 break 6424 } 6425 off2 := v_1.AuxInt 6426 sym2 := v_1.Aux 6427 _ = v_1.Args[2] 6428 ptr2 := v_1.Args[0] 6429 x := v_1.Args[1] 6430 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6431 break 6432 } 6433 v.reset(OpAMD64MOVLQSX) 6434 v.AddArg(x) 6435 return true 6436 } 6437 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6438 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6439 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6440 for { 6441 off1 := v.AuxInt 6442 sym1 := v.Aux 6443 _ = v.Args[1] 6444 v_0 := v.Args[0] 6445 if v_0.Op != OpAMD64LEAQ { 6446 break 6447 } 6448 off2 := v_0.AuxInt 6449 sym2 := v_0.Aux 6450 base := v_0.Args[0] 6451 mem := v.Args[1] 6452 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6453 break 6454 } 6455 v.reset(OpAMD64MOVLQSXload) 6456 v.AuxInt = off1 + off2 6457 v.Aux = mergeSym(sym1, sym2) 6458 v.AddArg(base) 6459 v.AddArg(mem) 6460 return true 6461 } 6462 return false 6463 } 6464 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 6465 b := v.Block 6466 _ = b 6467 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 6468 // cond: x.Uses == 1 && clobber(x) 6469 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6470 for { 6471 x := v.Args[0] 6472 if x.Op != OpAMD64MOVLload { 6473 break 6474 } 6475 off := x.AuxInt 6476 sym := x.Aux 6477 _ = x.Args[1] 6478 ptr := x.Args[0] 6479 mem := x.Args[1] 6480 if !(x.Uses == 1 && clobber(x)) { 6481 break 6482 } 6483 b = x.Block 6484 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6485 v.reset(OpCopy) 6486 v.AddArg(v0) 6487 v0.AuxInt = off 6488 v0.Aux = sym 6489 v0.AddArg(ptr) 6490 v0.AddArg(mem) 6491 return true 6492 } 6493 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 6494 // cond: x.Uses == 1 && clobber(x) 6495 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6496 for { 6497 x := v.Args[0] 6498 if x.Op != OpAMD64MOVQload { 6499 break 6500 } 6501 off := x.AuxInt 6502 sym := x.Aux 6503 _ = x.Args[1] 6504 ptr := x.Args[0] 6505 mem := x.Args[1] 6506 if !(x.Uses == 1 && clobber(x)) { 6507 break 6508 } 6509 b = x.Block 6510 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6511 v.reset(OpCopy) 6512 v.AddArg(v0) 6513 v0.AuxInt = off 6514 v0.Aux = sym 6515 v0.AddArg(ptr) 6516 v0.AddArg(mem) 6517 return true 6518 } 6519 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 6520 // cond: x.Uses == 1 && clobber(x) 6521 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 6522 for { 6523 x := v.Args[0] 6524 if x.Op != OpAMD64MOVLloadidx1 { 6525 break 6526 } 6527 off := x.AuxInt 6528 sym := x.Aux 6529 _ = x.Args[2] 6530 ptr := x.Args[0] 6531 idx := x.Args[1] 6532 mem := x.Args[2] 6533 if !(x.Uses == 1 && clobber(x)) { 6534 break 6535 } 6536 b = x.Block 6537 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 6538 v.reset(OpCopy) 6539 v.AddArg(v0) 6540 v0.AuxInt = off 6541 v0.Aux = sym 6542 v0.AddArg(ptr) 6543 v0.AddArg(idx) 6544 v0.AddArg(mem) 6545 return true 6546 } 6547 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 6548 // cond: x.Uses == 1 && clobber(x) 6549 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 6550 for { 6551 x := v.Args[0] 6552 if x.Op != OpAMD64MOVLloadidx4 { 6553 break 6554 } 6555 off := x.AuxInt 6556 sym := x.Aux 6557 _ = x.Args[2] 6558 ptr := x.Args[0] 6559 idx := x.Args[1] 6560 mem := x.Args[2] 6561 if !(x.Uses == 1 && clobber(x)) { 6562 break 6563 } 6564 b = x.Block 6565 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 6566 v.reset(OpCopy) 6567 v.AddArg(v0) 6568 v0.AuxInt = off 6569 v0.Aux = sym 6570 v0.AddArg(ptr) 6571 v0.AddArg(idx) 6572 v0.AddArg(mem) 6573 return true 6574 } 6575 // match: (MOVLQZX (ANDLconst [c] x)) 6576 // cond: 6577 // result: (ANDLconst [c] x) 6578 for { 6579 v_0 := v.Args[0] 6580 if v_0.Op != OpAMD64ANDLconst { 6581 break 6582 } 6583 c := v_0.AuxInt 6584 x := v_0.Args[0] 6585 v.reset(OpAMD64ANDLconst) 6586 v.AuxInt = c 6587 v.AddArg(x) 6588 return true 6589 } 6590 // match: (MOVLQZX x:(MOVLQZX _)) 6591 // cond: 6592 // result: x 6593 for { 6594 x := v.Args[0] 6595 if x.Op != OpAMD64MOVLQZX { 6596 break 6597 } 6598 v.reset(OpCopy) 6599 v.Type = x.Type 6600 v.AddArg(x) 6601 return true 6602 } 6603 // match: (MOVLQZX x:(MOVWQZX _)) 6604 // cond: 6605 // result: x 6606 for { 6607 x := v.Args[0] 6608 if x.Op != OpAMD64MOVWQZX { 6609 break 6610 } 6611 v.reset(OpCopy) 6612 v.Type = x.Type 6613 v.AddArg(x) 6614 return true 6615 } 6616 // match: (MOVLQZX x:(MOVBQZX _)) 6617 // cond: 6618 // result: x 6619 for { 6620 x := v.Args[0] 6621 if x.Op != OpAMD64MOVBQZX { 6622 break 6623 } 6624 v.reset(OpCopy) 6625 v.Type = x.Type 6626 v.AddArg(x) 6627 return true 6628 } 6629 return false 6630 } 6631 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 6632 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 6633 // cond: is32Bit(off1+off2) 6634 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 6635 for { 6636 off1 := v.AuxInt 6637 sym := v.Aux 6638 _ = v.Args[1] 6639 v_0 := v.Args[0] 6640 if v_0.Op != OpAMD64ADDQconst { 6641 break 6642 } 6643 off2 := v_0.AuxInt 6644 ptr := v_0.Args[0] 6645 mem := v.Args[1] 6646 if !(is32Bit(off1 + off2)) { 6647 break 6648 } 6649 v.reset(OpAMD64MOVLatomicload) 6650 v.AuxInt = off1 + off2 6651 v.Aux = sym 6652 v.AddArg(ptr) 6653 v.AddArg(mem) 6654 return true 6655 } 6656 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 6657 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6658 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 6659 for { 6660 off1 := v.AuxInt 6661 sym1 := v.Aux 6662 _ = v.Args[1] 6663 v_0 := v.Args[0] 6664 if v_0.Op != OpAMD64LEAQ { 6665 break 6666 } 6667 off2 := v_0.AuxInt 6668 sym2 := v_0.Aux 6669 ptr := v_0.Args[0] 6670 mem := v.Args[1] 6671 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6672 break 6673 } 6674 v.reset(OpAMD64MOVLatomicload) 6675 v.AuxInt = off1 + off2 6676 v.Aux = mergeSym(sym1, sym2) 6677 v.AddArg(ptr) 6678 v.AddArg(mem) 6679 return true 6680 } 6681 return false 6682 } 6683 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 6684 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 6685 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6686 // result: (MOVLQZX x) 6687 for { 6688 off := v.AuxInt 6689 sym := v.Aux 6690 _ = v.Args[1] 6691 ptr := v.Args[0] 6692 v_1 := v.Args[1] 6693 if v_1.Op != OpAMD64MOVLstore { 6694 break 6695 } 6696 off2 := v_1.AuxInt 6697 sym2 := v_1.Aux 6698 _ = v_1.Args[2] 6699 ptr2 := v_1.Args[0] 6700 x := v_1.Args[1] 6701 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6702 break 6703 } 6704 v.reset(OpAMD64MOVLQZX) 6705 v.AddArg(x) 6706 return true 6707 } 6708 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 6709 // cond: is32Bit(off1+off2) 6710 // result: (MOVLload [off1+off2] {sym} ptr mem) 6711 for { 6712 off1 := v.AuxInt 6713 sym := v.Aux 6714 _ = v.Args[1] 6715 v_0 := v.Args[0] 6716 if v_0.Op != OpAMD64ADDQconst { 6717 break 6718 } 6719 off2 := v_0.AuxInt 6720 ptr := v_0.Args[0] 6721 mem := v.Args[1] 6722 if !(is32Bit(off1 + off2)) { 6723 break 6724 } 6725 v.reset(OpAMD64MOVLload) 6726 v.AuxInt = off1 + off2 6727 v.Aux = sym 6728 v.AddArg(ptr) 6729 v.AddArg(mem) 6730 return true 6731 } 6732 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6733 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6734 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6735 for { 6736 off1 := v.AuxInt 6737 sym1 := v.Aux 6738 _ = v.Args[1] 6739 v_0 := v.Args[0] 6740 if v_0.Op != OpAMD64LEAQ { 6741 break 6742 } 6743 off2 := v_0.AuxInt 6744 sym2 := v_0.Aux 6745 base := v_0.Args[0] 6746 mem := v.Args[1] 6747 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6748 break 6749 } 6750 v.reset(OpAMD64MOVLload) 6751 v.AuxInt = off1 + off2 6752 v.Aux = mergeSym(sym1, sym2) 6753 v.AddArg(base) 6754 v.AddArg(mem) 6755 return true 6756 } 6757 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 6758 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6759 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6760 for { 6761 off1 := v.AuxInt 6762 sym1 := v.Aux 6763 _ = v.Args[1] 6764 v_0 := v.Args[0] 6765 if v_0.Op != OpAMD64LEAQ1 { 6766 break 6767 } 6768 off2 := v_0.AuxInt 6769 sym2 := v_0.Aux 6770 _ = v_0.Args[1] 6771 ptr := v_0.Args[0] 6772 idx := v_0.Args[1] 6773 mem := v.Args[1] 6774 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6775 break 6776 } 6777 v.reset(OpAMD64MOVLloadidx1) 6778 v.AuxInt = off1 + off2 6779 v.Aux = mergeSym(sym1, sym2) 6780 v.AddArg(ptr) 6781 v.AddArg(idx) 6782 v.AddArg(mem) 6783 return true 6784 } 6785 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 6786 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6787 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6788 for { 6789 off1 := v.AuxInt 6790 sym1 := v.Aux 6791 _ = v.Args[1] 6792 v_0 := v.Args[0] 6793 if v_0.Op != OpAMD64LEAQ4 { 6794 break 6795 } 6796 off2 := v_0.AuxInt 6797 sym2 := v_0.Aux 6798 _ = v_0.Args[1] 6799 ptr := v_0.Args[0] 6800 idx := v_0.Args[1] 6801 mem := v.Args[1] 6802 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6803 break 6804 } 6805 v.reset(OpAMD64MOVLloadidx4) 6806 v.AuxInt = off1 + off2 6807 v.Aux = mergeSym(sym1, sym2) 6808 v.AddArg(ptr) 6809 v.AddArg(idx) 6810 v.AddArg(mem) 6811 return true 6812 } 6813 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 6814 // cond: ptr.Op != OpSB 6815 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 6816 for { 6817 off := v.AuxInt 6818 sym := v.Aux 6819 _ = v.Args[1] 6820 v_0 := v.Args[0] 6821 if v_0.Op != OpAMD64ADDQ { 6822 break 6823 } 6824 _ = v_0.Args[1] 6825 ptr := v_0.Args[0] 6826 idx := v_0.Args[1] 6827 mem := v.Args[1] 6828 if !(ptr.Op != OpSB) { 6829 break 6830 } 6831 v.reset(OpAMD64MOVLloadidx1) 6832 v.AuxInt = off 6833 v.Aux = sym 6834 v.AddArg(ptr) 6835 v.AddArg(idx) 6836 v.AddArg(mem) 6837 return true 6838 } 6839 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 6840 // cond: canMergeSym(sym1, sym2) 6841 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6842 for { 6843 off1 := v.AuxInt 6844 sym1 := v.Aux 6845 _ = v.Args[1] 6846 v_0 := v.Args[0] 6847 if v_0.Op != OpAMD64LEAL { 6848 break 6849 } 6850 off2 := v_0.AuxInt 6851 sym2 := v_0.Aux 6852 base := v_0.Args[0] 6853 mem := v.Args[1] 6854 if !(canMergeSym(sym1, sym2)) { 6855 break 6856 } 6857 v.reset(OpAMD64MOVLload) 6858 v.AuxInt = off1 + off2 6859 v.Aux = mergeSym(sym1, sym2) 6860 v.AddArg(base) 6861 v.AddArg(mem) 6862 return true 6863 } 6864 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 6865 // cond: is32Bit(off1+off2) 6866 // result: (MOVLload [off1+off2] {sym} ptr mem) 6867 for { 6868 off1 := v.AuxInt 6869 sym := v.Aux 6870 _ = v.Args[1] 6871 v_0 := v.Args[0] 6872 if v_0.Op != OpAMD64ADDLconst { 6873 break 6874 } 6875 off2 := v_0.AuxInt 6876 ptr := v_0.Args[0] 6877 mem := v.Args[1] 6878 if !(is32Bit(off1 + off2)) { 6879 break 6880 } 6881 v.reset(OpAMD64MOVLload) 6882 v.AuxInt = off1 + off2 6883 v.Aux = sym 6884 v.AddArg(ptr) 6885 v.AddArg(mem) 6886 return true 6887 } 6888 return false 6889 } 6890 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 6891 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 6892 // cond: 6893 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 6894 for { 6895 c := v.AuxInt 6896 sym := v.Aux 6897 _ = v.Args[2] 6898 ptr := v.Args[0] 6899 v_1 := v.Args[1] 6900 if v_1.Op != OpAMD64SHLQconst { 6901 break 6902 } 6903 if v_1.AuxInt != 2 { 6904 break 6905 } 6906 idx := v_1.Args[0] 6907 mem := v.Args[2] 6908 v.reset(OpAMD64MOVLloadidx4) 6909 v.AuxInt = c 6910 v.Aux = sym 6911 v.AddArg(ptr) 6912 v.AddArg(idx) 6913 v.AddArg(mem) 6914 return true 6915 } 6916 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 6917 // cond: 6918 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 6919 for { 6920 c := v.AuxInt 6921 sym := v.Aux 6922 _ = v.Args[2] 6923 v_0 := v.Args[0] 6924 if v_0.Op != OpAMD64SHLQconst { 6925 break 6926 } 6927 if v_0.AuxInt != 2 { 6928 break 6929 } 6930 idx := v_0.Args[0] 6931 ptr := v.Args[1] 6932 mem := v.Args[2] 6933 v.reset(OpAMD64MOVLloadidx4) 6934 v.AuxInt = c 6935 v.Aux = sym 6936 v.AddArg(ptr) 6937 v.AddArg(idx) 6938 v.AddArg(mem) 6939 return true 6940 } 6941 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 6942 // cond: 6943 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6944 for { 6945 c := v.AuxInt 6946 sym := v.Aux 6947 _ = v.Args[2] 6948 v_0 := v.Args[0] 6949 if v_0.Op != OpAMD64ADDQconst { 6950 break 6951 } 6952 d := v_0.AuxInt 6953 ptr := v_0.Args[0] 6954 idx := v.Args[1] 6955 mem := v.Args[2] 6956 v.reset(OpAMD64MOVLloadidx1) 6957 v.AuxInt = c + d 6958 v.Aux = sym 6959 v.AddArg(ptr) 6960 v.AddArg(idx) 6961 v.AddArg(mem) 6962 return true 6963 } 6964 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 6965 // cond: 6966 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6967 for { 6968 c := v.AuxInt 6969 sym := v.Aux 6970 _ = v.Args[2] 6971 idx := v.Args[0] 6972 v_1 := v.Args[1] 6973 if v_1.Op != OpAMD64ADDQconst { 6974 break 6975 } 6976 d := v_1.AuxInt 6977 ptr := v_1.Args[0] 6978 mem := v.Args[2] 6979 v.reset(OpAMD64MOVLloadidx1) 6980 v.AuxInt = c + d 6981 v.Aux = sym 6982 v.AddArg(ptr) 6983 v.AddArg(idx) 6984 v.AddArg(mem) 6985 return true 6986 } 6987 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 6988 // cond: 6989 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6990 for { 6991 c := v.AuxInt 6992 sym := v.Aux 6993 _ = v.Args[2] 6994 ptr := v.Args[0] 6995 v_1 := v.Args[1] 6996 if v_1.Op != OpAMD64ADDQconst { 6997 break 6998 } 6999 d := v_1.AuxInt 7000 idx := v_1.Args[0] 7001 mem := v.Args[2] 7002 v.reset(OpAMD64MOVLloadidx1) 7003 v.AuxInt = c + d 7004 v.Aux = sym 7005 v.AddArg(ptr) 7006 v.AddArg(idx) 7007 v.AddArg(mem) 7008 return true 7009 } 7010 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 7011 // cond: 7012 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7013 for { 7014 c := v.AuxInt 7015 sym := v.Aux 7016 _ = v.Args[2] 7017 v_0 := v.Args[0] 7018 if v_0.Op != OpAMD64ADDQconst { 7019 break 7020 } 7021 d := v_0.AuxInt 7022 idx := v_0.Args[0] 7023 ptr := v.Args[1] 7024 mem := v.Args[2] 7025 v.reset(OpAMD64MOVLloadidx1) 7026 v.AuxInt = c + d 7027 v.Aux = sym 7028 v.AddArg(ptr) 7029 v.AddArg(idx) 7030 v.AddArg(mem) 7031 return true 7032 } 7033 return false 7034 } 7035 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 7036 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 7037 // cond: 7038 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 7039 for { 7040 c := v.AuxInt 7041 sym := v.Aux 7042 _ = v.Args[2] 7043 v_0 := v.Args[0] 7044 if v_0.Op != OpAMD64ADDQconst { 7045 break 7046 } 7047 d := v_0.AuxInt 7048 ptr := v_0.Args[0] 7049 idx := v.Args[1] 7050 mem := v.Args[2] 7051 v.reset(OpAMD64MOVLloadidx4) 7052 v.AuxInt = c + d 7053 v.Aux = sym 7054 v.AddArg(ptr) 7055 v.AddArg(idx) 7056 v.AddArg(mem) 7057 return true 7058 } 7059 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 7060 // cond: 7061 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 7062 for { 7063 c := v.AuxInt 7064 sym := v.Aux 7065 _ = v.Args[2] 7066 ptr := v.Args[0] 7067 v_1 := v.Args[1] 7068 if v_1.Op != OpAMD64ADDQconst { 7069 break 7070 } 7071 d := v_1.AuxInt 7072 idx := v_1.Args[0] 7073 mem := v.Args[2] 7074 v.reset(OpAMD64MOVLloadidx4) 7075 v.AuxInt = c + 4*d 7076 v.Aux = sym 7077 v.AddArg(ptr) 7078 v.AddArg(idx) 7079 v.AddArg(mem) 7080 return true 7081 } 7082 return false 7083 } 7084 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 7085 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 7086 // cond: 7087 // result: (MOVLstore [off] {sym} ptr x mem) 7088 for { 7089 off := v.AuxInt 7090 sym := v.Aux 7091 _ = v.Args[2] 7092 ptr := v.Args[0] 7093 v_1 := v.Args[1] 7094 if v_1.Op != OpAMD64MOVLQSX { 7095 break 7096 } 7097 x := v_1.Args[0] 7098 mem := v.Args[2] 7099 v.reset(OpAMD64MOVLstore) 7100 v.AuxInt = off 7101 v.Aux = sym 7102 v.AddArg(ptr) 7103 v.AddArg(x) 7104 v.AddArg(mem) 7105 return true 7106 } 7107 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 7108 // cond: 7109 // result: (MOVLstore [off] {sym} ptr x mem) 7110 for { 7111 off := v.AuxInt 7112 sym := v.Aux 7113 _ = v.Args[2] 7114 ptr := v.Args[0] 7115 v_1 := v.Args[1] 7116 if v_1.Op != OpAMD64MOVLQZX { 7117 break 7118 } 7119 x := v_1.Args[0] 7120 mem := v.Args[2] 7121 v.reset(OpAMD64MOVLstore) 7122 v.AuxInt = off 7123 v.Aux = sym 7124 v.AddArg(ptr) 7125 v.AddArg(x) 7126 v.AddArg(mem) 7127 return true 7128 } 7129 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7130 // cond: is32Bit(off1+off2) 7131 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 7132 for { 7133 off1 := v.AuxInt 7134 sym := v.Aux 7135 _ = v.Args[2] 7136 v_0 := v.Args[0] 7137 if v_0.Op != OpAMD64ADDQconst { 7138 break 7139 } 7140 off2 := v_0.AuxInt 7141 ptr := v_0.Args[0] 7142 val := v.Args[1] 7143 mem := v.Args[2] 7144 if !(is32Bit(off1 + off2)) { 7145 break 7146 } 7147 v.reset(OpAMD64MOVLstore) 7148 v.AuxInt = off1 + off2 7149 v.Aux = sym 7150 v.AddArg(ptr) 7151 v.AddArg(val) 7152 v.AddArg(mem) 7153 return true 7154 } 7155 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 7156 // cond: validOff(off) 7157 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 7158 for { 7159 off := v.AuxInt 7160 sym := v.Aux 7161 _ = v.Args[2] 7162 ptr := v.Args[0] 7163 v_1 := v.Args[1] 7164 if v_1.Op != OpAMD64MOVLconst { 7165 break 7166 } 7167 c := v_1.AuxInt 7168 mem := v.Args[2] 7169 if !(validOff(off)) { 7170 break 7171 } 7172 v.reset(OpAMD64MOVLstoreconst) 7173 v.AuxInt = makeValAndOff(int64(int32(c)), off) 7174 v.Aux = sym 7175 v.AddArg(ptr) 7176 v.AddArg(mem) 7177 return true 7178 } 7179 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7180 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7181 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7182 for { 7183 off1 := v.AuxInt 7184 sym1 := v.Aux 7185 _ = v.Args[2] 7186 v_0 := v.Args[0] 7187 if v_0.Op != OpAMD64LEAQ { 7188 break 7189 } 7190 off2 := v_0.AuxInt 7191 sym2 := v_0.Aux 7192 base := v_0.Args[0] 7193 val := v.Args[1] 7194 mem := v.Args[2] 7195 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7196 break 7197 } 7198 v.reset(OpAMD64MOVLstore) 7199 v.AuxInt = off1 + off2 7200 v.Aux = mergeSym(sym1, sym2) 7201 v.AddArg(base) 7202 v.AddArg(val) 7203 v.AddArg(mem) 7204 return true 7205 } 7206 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7207 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7208 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7209 for { 7210 off1 := v.AuxInt 7211 sym1 := v.Aux 7212 _ = v.Args[2] 7213 v_0 := v.Args[0] 7214 if v_0.Op != OpAMD64LEAQ1 { 7215 break 7216 } 7217 off2 := v_0.AuxInt 7218 sym2 := v_0.Aux 7219 _ = v_0.Args[1] 7220 ptr := v_0.Args[0] 7221 idx := v_0.Args[1] 7222 val := v.Args[1] 7223 mem := v.Args[2] 7224 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7225 break 7226 } 7227 v.reset(OpAMD64MOVLstoreidx1) 7228 v.AuxInt = off1 + off2 7229 v.Aux = mergeSym(sym1, sym2) 7230 v.AddArg(ptr) 7231 v.AddArg(idx) 7232 v.AddArg(val) 7233 v.AddArg(mem) 7234 return true 7235 } 7236 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 7237 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7238 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7239 for { 7240 off1 := v.AuxInt 7241 sym1 := v.Aux 7242 _ = v.Args[2] 7243 v_0 := v.Args[0] 7244 if v_0.Op != OpAMD64LEAQ4 { 7245 break 7246 } 7247 off2 := v_0.AuxInt 7248 sym2 := v_0.Aux 7249 _ = v_0.Args[1] 7250 ptr := v_0.Args[0] 7251 idx := v_0.Args[1] 7252 val := v.Args[1] 7253 mem := v.Args[2] 7254 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7255 break 7256 } 7257 v.reset(OpAMD64MOVLstoreidx4) 7258 v.AuxInt = off1 + off2 7259 v.Aux = mergeSym(sym1, sym2) 7260 v.AddArg(ptr) 7261 v.AddArg(idx) 7262 v.AddArg(val) 7263 v.AddArg(mem) 7264 return true 7265 } 7266 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 7267 // cond: ptr.Op != OpSB 7268 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 7269 for { 7270 off := v.AuxInt 7271 sym := v.Aux 7272 _ = v.Args[2] 7273 v_0 := v.Args[0] 7274 if v_0.Op != OpAMD64ADDQ { 7275 break 7276 } 7277 _ = v_0.Args[1] 7278 ptr := v_0.Args[0] 7279 idx := v_0.Args[1] 7280 val := v.Args[1] 7281 mem := v.Args[2] 7282 if !(ptr.Op != OpSB) { 7283 break 7284 } 7285 v.reset(OpAMD64MOVLstoreidx1) 7286 v.AuxInt = off 7287 v.Aux = sym 7288 v.AddArg(ptr) 7289 v.AddArg(idx) 7290 v.AddArg(val) 7291 v.AddArg(mem) 7292 return true 7293 } 7294 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 7295 // cond: x.Uses == 1 && clobber(x) 7296 // result: (MOVQstore [i-4] {s} p w mem) 7297 for { 7298 i := v.AuxInt 7299 s := v.Aux 7300 _ = v.Args[2] 7301 p := v.Args[0] 7302 v_1 := v.Args[1] 7303 if v_1.Op != OpAMD64SHRQconst { 7304 break 7305 } 7306 if v_1.AuxInt != 32 { 7307 break 7308 } 7309 w := v_1.Args[0] 7310 x := v.Args[2] 7311 if x.Op != OpAMD64MOVLstore { 7312 break 7313 } 7314 if x.AuxInt != i-4 { 7315 break 7316 } 7317 if x.Aux != s { 7318 break 7319 } 7320 _ = x.Args[2] 7321 if p != x.Args[0] { 7322 break 7323 } 7324 if w != x.Args[1] { 7325 break 7326 } 7327 mem := x.Args[2] 7328 if !(x.Uses == 1 && clobber(x)) { 7329 break 7330 } 7331 v.reset(OpAMD64MOVQstore) 7332 v.AuxInt = i - 4 7333 v.Aux = s 7334 v.AddArg(p) 7335 v.AddArg(w) 7336 v.AddArg(mem) 7337 return true 7338 } 7339 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 7340 // cond: x.Uses == 1 && clobber(x) 7341 // result: (MOVQstore [i-4] {s} p w0 mem) 7342 for { 7343 i := v.AuxInt 7344 s := v.Aux 7345 _ = v.Args[2] 7346 p := v.Args[0] 7347 v_1 := v.Args[1] 7348 if v_1.Op != OpAMD64SHRQconst { 7349 break 7350 } 7351 j := v_1.AuxInt 7352 w := v_1.Args[0] 7353 x := v.Args[2] 7354 if x.Op != OpAMD64MOVLstore { 7355 break 7356 } 7357 if x.AuxInt != i-4 { 7358 break 7359 } 7360 if x.Aux != s { 7361 break 7362 } 7363 _ = x.Args[2] 7364 if p != x.Args[0] { 7365 break 7366 } 7367 w0 := x.Args[1] 7368 if w0.Op != OpAMD64SHRQconst { 7369 break 7370 } 7371 if w0.AuxInt != j-32 { 7372 break 7373 } 7374 if w != w0.Args[0] { 7375 break 7376 } 7377 mem := x.Args[2] 7378 if !(x.Uses == 1 && clobber(x)) { 7379 break 7380 } 7381 v.reset(OpAMD64MOVQstore) 7382 v.AuxInt = i - 4 7383 v.Aux = s 7384 v.AddArg(p) 7385 v.AddArg(w0) 7386 v.AddArg(mem) 7387 return true 7388 } 7389 return false 7390 } 7391 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 7392 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 7393 // cond: canMergeSym(sym1, sym2) 7394 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7395 for { 7396 off1 := v.AuxInt 7397 sym1 := v.Aux 7398 _ = v.Args[2] 7399 v_0 := v.Args[0] 7400 if v_0.Op != OpAMD64LEAL { 7401 break 7402 } 7403 off2 := v_0.AuxInt 7404 sym2 := v_0.Aux 7405 base := v_0.Args[0] 7406 val := v.Args[1] 7407 mem := v.Args[2] 7408 if !(canMergeSym(sym1, sym2)) { 7409 break 7410 } 7411 v.reset(OpAMD64MOVLstore) 7412 v.AuxInt = off1 + off2 7413 v.Aux = mergeSym(sym1, sym2) 7414 v.AddArg(base) 7415 v.AddArg(val) 7416 v.AddArg(mem) 7417 return true 7418 } 7419 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 7420 // cond: is32Bit(off1+off2) 7421 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 7422 for { 7423 off1 := v.AuxInt 7424 sym := v.Aux 7425 _ = v.Args[2] 7426 v_0 := v.Args[0] 7427 if v_0.Op != OpAMD64ADDLconst { 7428 break 7429 } 7430 off2 := v_0.AuxInt 7431 ptr := v_0.Args[0] 7432 val := v.Args[1] 7433 mem := v.Args[2] 7434 if !(is32Bit(off1 + off2)) { 7435 break 7436 } 7437 v.reset(OpAMD64MOVLstore) 7438 v.AuxInt = off1 + off2 7439 v.Aux = sym 7440 v.AddArg(ptr) 7441 v.AddArg(val) 7442 v.AddArg(mem) 7443 return true 7444 } 7445 return false 7446 } 7447 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 7448 b := v.Block 7449 _ = b 7450 typ := &b.Func.Config.Types 7451 _ = typ 7452 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 7453 // cond: ValAndOff(sc).canAdd(off) 7454 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7455 for { 7456 sc := v.AuxInt 7457 s := v.Aux 7458 _ = v.Args[1] 7459 v_0 := v.Args[0] 7460 if v_0.Op != OpAMD64ADDQconst { 7461 break 7462 } 7463 off := v_0.AuxInt 7464 ptr := v_0.Args[0] 7465 mem := v.Args[1] 7466 if !(ValAndOff(sc).canAdd(off)) { 7467 break 7468 } 7469 v.reset(OpAMD64MOVLstoreconst) 7470 v.AuxInt = ValAndOff(sc).add(off) 7471 v.Aux = s 7472 v.AddArg(ptr) 7473 v.AddArg(mem) 7474 return true 7475 } 7476 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 7477 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7478 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7479 for { 7480 sc := v.AuxInt 7481 sym1 := v.Aux 7482 _ = v.Args[1] 7483 v_0 := v.Args[0] 7484 if v_0.Op != OpAMD64LEAQ { 7485 break 7486 } 7487 off := v_0.AuxInt 7488 sym2 := v_0.Aux 7489 ptr := v_0.Args[0] 7490 mem := v.Args[1] 7491 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7492 break 7493 } 7494 v.reset(OpAMD64MOVLstoreconst) 7495 v.AuxInt = ValAndOff(sc).add(off) 7496 v.Aux = mergeSym(sym1, sym2) 7497 v.AddArg(ptr) 7498 v.AddArg(mem) 7499 return true 7500 } 7501 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 7502 // cond: canMergeSym(sym1, sym2) 7503 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7504 for { 7505 x := v.AuxInt 7506 sym1 := v.Aux 7507 _ = v.Args[1] 7508 v_0 := v.Args[0] 7509 if v_0.Op != OpAMD64LEAQ1 { 7510 break 7511 } 7512 off := v_0.AuxInt 7513 sym2 := v_0.Aux 7514 _ = v_0.Args[1] 7515 ptr := v_0.Args[0] 7516 idx := v_0.Args[1] 7517 mem := v.Args[1] 7518 if !(canMergeSym(sym1, sym2)) { 7519 break 7520 } 7521 v.reset(OpAMD64MOVLstoreconstidx1) 7522 v.AuxInt = ValAndOff(x).add(off) 7523 v.Aux = mergeSym(sym1, sym2) 7524 v.AddArg(ptr) 7525 v.AddArg(idx) 7526 v.AddArg(mem) 7527 return true 7528 } 7529 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 7530 // cond: canMergeSym(sym1, sym2) 7531 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7532 for { 7533 x := v.AuxInt 7534 sym1 := v.Aux 7535 _ = v.Args[1] 7536 v_0 := v.Args[0] 7537 if v_0.Op != OpAMD64LEAQ4 { 7538 break 7539 } 7540 off := v_0.AuxInt 7541 sym2 := v_0.Aux 7542 _ = v_0.Args[1] 7543 ptr := v_0.Args[0] 7544 idx := v_0.Args[1] 7545 mem := v.Args[1] 7546 if !(canMergeSym(sym1, sym2)) { 7547 break 7548 } 7549 v.reset(OpAMD64MOVLstoreconstidx4) 7550 v.AuxInt = ValAndOff(x).add(off) 7551 v.Aux = mergeSym(sym1, sym2) 7552 v.AddArg(ptr) 7553 v.AddArg(idx) 7554 v.AddArg(mem) 7555 return true 7556 } 7557 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 7558 // cond: 7559 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 7560 for { 7561 x := v.AuxInt 7562 sym := v.Aux 7563 _ = v.Args[1] 7564 v_0 := v.Args[0] 7565 if v_0.Op != OpAMD64ADDQ { 7566 break 7567 } 7568 _ = v_0.Args[1] 7569 ptr := v_0.Args[0] 7570 idx := v_0.Args[1] 7571 mem := v.Args[1] 7572 v.reset(OpAMD64MOVLstoreconstidx1) 7573 v.AuxInt = x 7574 v.Aux = sym 7575 v.AddArg(ptr) 7576 v.AddArg(idx) 7577 v.AddArg(mem) 7578 return true 7579 } 7580 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 7581 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7582 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7583 for { 7584 c := v.AuxInt 7585 s := v.Aux 7586 _ = v.Args[1] 7587 p := v.Args[0] 7588 x := v.Args[1] 7589 if x.Op != OpAMD64MOVLstoreconst { 7590 break 7591 } 7592 a := x.AuxInt 7593 if x.Aux != s { 7594 break 7595 } 7596 _ = x.Args[1] 7597 if p != x.Args[0] { 7598 break 7599 } 7600 mem := x.Args[1] 7601 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7602 break 7603 } 7604 v.reset(OpAMD64MOVQstore) 7605 v.AuxInt = ValAndOff(a).Off() 7606 v.Aux = s 7607 v.AddArg(p) 7608 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 7609 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7610 v.AddArg(v0) 7611 v.AddArg(mem) 7612 return true 7613 } 7614 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 7615 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7616 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7617 for { 7618 sc := v.AuxInt 7619 sym1 := v.Aux 7620 _ = v.Args[1] 7621 v_0 := v.Args[0] 7622 if v_0.Op != OpAMD64LEAL { 7623 break 7624 } 7625 off := v_0.AuxInt 7626 sym2 := v_0.Aux 7627 ptr := v_0.Args[0] 7628 mem := v.Args[1] 7629 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7630 break 7631 } 7632 v.reset(OpAMD64MOVLstoreconst) 7633 v.AuxInt = ValAndOff(sc).add(off) 7634 v.Aux = mergeSym(sym1, sym2) 7635 v.AddArg(ptr) 7636 v.AddArg(mem) 7637 return true 7638 } 7639 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 7640 // cond: ValAndOff(sc).canAdd(off) 7641 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7642 for { 7643 sc := v.AuxInt 7644 s := v.Aux 7645 _ = v.Args[1] 7646 v_0 := v.Args[0] 7647 if v_0.Op != OpAMD64ADDLconst { 7648 break 7649 } 7650 off := v_0.AuxInt 7651 ptr := v_0.Args[0] 7652 mem := v.Args[1] 7653 if !(ValAndOff(sc).canAdd(off)) { 7654 break 7655 } 7656 v.reset(OpAMD64MOVLstoreconst) 7657 v.AuxInt = ValAndOff(sc).add(off) 7658 v.Aux = s 7659 v.AddArg(ptr) 7660 v.AddArg(mem) 7661 return true 7662 } 7663 return false 7664 } 7665 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 7666 b := v.Block 7667 _ = b 7668 typ := &b.Func.Config.Types 7669 _ = typ 7670 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7671 // cond: 7672 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 7673 for { 7674 c := v.AuxInt 7675 sym := v.Aux 7676 _ = v.Args[2] 7677 ptr := v.Args[0] 7678 v_1 := v.Args[1] 7679 if v_1.Op != OpAMD64SHLQconst { 7680 break 7681 } 7682 if v_1.AuxInt != 2 { 7683 break 7684 } 7685 idx := v_1.Args[0] 7686 mem := v.Args[2] 7687 v.reset(OpAMD64MOVLstoreconstidx4) 7688 v.AuxInt = c 7689 v.Aux = sym 7690 v.AddArg(ptr) 7691 v.AddArg(idx) 7692 v.AddArg(mem) 7693 return true 7694 } 7695 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 7696 // cond: 7697 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7698 for { 7699 x := v.AuxInt 7700 sym := v.Aux 7701 _ = v.Args[2] 7702 v_0 := v.Args[0] 7703 if v_0.Op != OpAMD64ADDQconst { 7704 break 7705 } 7706 c := v_0.AuxInt 7707 ptr := v_0.Args[0] 7708 idx := v.Args[1] 7709 mem := v.Args[2] 7710 v.reset(OpAMD64MOVLstoreconstidx1) 7711 v.AuxInt = ValAndOff(x).add(c) 7712 v.Aux = sym 7713 v.AddArg(ptr) 7714 v.AddArg(idx) 7715 v.AddArg(mem) 7716 return true 7717 } 7718 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 7719 // cond: 7720 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7721 for { 7722 x := v.AuxInt 7723 sym := v.Aux 7724 _ = v.Args[2] 7725 ptr := v.Args[0] 7726 v_1 := v.Args[1] 7727 if v_1.Op != OpAMD64ADDQconst { 7728 break 7729 } 7730 c := v_1.AuxInt 7731 idx := v_1.Args[0] 7732 mem := v.Args[2] 7733 v.reset(OpAMD64MOVLstoreconstidx1) 7734 v.AuxInt = ValAndOff(x).add(c) 7735 v.Aux = sym 7736 v.AddArg(ptr) 7737 v.AddArg(idx) 7738 v.AddArg(mem) 7739 return true 7740 } 7741 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 7742 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7743 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7744 for { 7745 c := v.AuxInt 7746 s := v.Aux 7747 _ = v.Args[2] 7748 p := v.Args[0] 7749 i := v.Args[1] 7750 x := v.Args[2] 7751 if x.Op != OpAMD64MOVLstoreconstidx1 { 7752 break 7753 } 7754 a := x.AuxInt 7755 if x.Aux != s { 7756 break 7757 } 7758 _ = x.Args[2] 7759 if p != x.Args[0] { 7760 break 7761 } 7762 if i != x.Args[1] { 7763 break 7764 } 7765 mem := x.Args[2] 7766 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7767 break 7768 } 7769 v.reset(OpAMD64MOVQstoreidx1) 7770 v.AuxInt = ValAndOff(a).Off() 7771 v.Aux = s 7772 v.AddArg(p) 7773 v.AddArg(i) 7774 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 7775 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7776 v.AddArg(v0) 7777 v.AddArg(mem) 7778 return true 7779 } 7780 return false 7781 } 7782 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 7783 b := v.Block 7784 _ = b 7785 typ := &b.Func.Config.Types 7786 _ = typ 7787 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 7788 // cond: 7789 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7790 for { 7791 x := v.AuxInt 7792 sym := v.Aux 7793 _ = v.Args[2] 7794 v_0 := v.Args[0] 7795 if v_0.Op != OpAMD64ADDQconst { 7796 break 7797 } 7798 c := v_0.AuxInt 7799 ptr := v_0.Args[0] 7800 idx := v.Args[1] 7801 mem := v.Args[2] 7802 v.reset(OpAMD64MOVLstoreconstidx4) 7803 v.AuxInt = ValAndOff(x).add(c) 7804 v.Aux = sym 7805 v.AddArg(ptr) 7806 v.AddArg(idx) 7807 v.AddArg(mem) 7808 return true 7809 } 7810 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 7811 // cond: 7812 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 7813 for { 7814 x := v.AuxInt 7815 sym := v.Aux 7816 _ = v.Args[2] 7817 ptr := v.Args[0] 7818 v_1 := v.Args[1] 7819 if v_1.Op != OpAMD64ADDQconst { 7820 break 7821 } 7822 c := v_1.AuxInt 7823 idx := v_1.Args[0] 7824 mem := v.Args[2] 7825 v.reset(OpAMD64MOVLstoreconstidx4) 7826 v.AuxInt = ValAndOff(x).add(4 * c) 7827 v.Aux = sym 7828 v.AddArg(ptr) 7829 v.AddArg(idx) 7830 v.AddArg(mem) 7831 return true 7832 } 7833 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 7834 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7835 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7836 for { 7837 c := v.AuxInt 7838 s := v.Aux 7839 _ = v.Args[2] 7840 p := v.Args[0] 7841 i := v.Args[1] 7842 x := v.Args[2] 7843 if x.Op != OpAMD64MOVLstoreconstidx4 { 7844 break 7845 } 7846 a := x.AuxInt 7847 if x.Aux != s { 7848 break 7849 } 7850 _ = x.Args[2] 7851 if p != x.Args[0] { 7852 break 7853 } 7854 if i != x.Args[1] { 7855 break 7856 } 7857 mem := x.Args[2] 7858 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7859 break 7860 } 7861 v.reset(OpAMD64MOVQstoreidx1) 7862 v.AuxInt = ValAndOff(a).Off() 7863 v.Aux = s 7864 v.AddArg(p) 7865 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 7866 v0.AuxInt = 2 7867 v0.AddArg(i) 7868 v.AddArg(v0) 7869 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 7870 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7871 v.AddArg(v1) 7872 v.AddArg(mem) 7873 return true 7874 } 7875 return false 7876 } 7877 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 7878 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 7879 // cond: 7880 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 7881 for { 7882 c := v.AuxInt 7883 sym := v.Aux 7884 _ = v.Args[3] 7885 ptr := v.Args[0] 7886 v_1 := v.Args[1] 7887 if v_1.Op != OpAMD64SHLQconst { 7888 break 7889 } 7890 if v_1.AuxInt != 2 { 7891 break 7892 } 7893 idx := v_1.Args[0] 7894 val := v.Args[2] 7895 mem := v.Args[3] 7896 v.reset(OpAMD64MOVLstoreidx4) 7897 v.AuxInt = c 7898 v.Aux = sym 7899 v.AddArg(ptr) 7900 v.AddArg(idx) 7901 v.AddArg(val) 7902 v.AddArg(mem) 7903 return true 7904 } 7905 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7906 // cond: 7907 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7908 for { 7909 c := v.AuxInt 7910 sym := v.Aux 7911 _ = v.Args[3] 7912 v_0 := v.Args[0] 7913 if v_0.Op != OpAMD64ADDQconst { 7914 break 7915 } 7916 d := v_0.AuxInt 7917 ptr := v_0.Args[0] 7918 idx := v.Args[1] 7919 val := v.Args[2] 7920 mem := v.Args[3] 7921 v.reset(OpAMD64MOVLstoreidx1) 7922 v.AuxInt = c + d 7923 v.Aux = sym 7924 v.AddArg(ptr) 7925 v.AddArg(idx) 7926 v.AddArg(val) 7927 v.AddArg(mem) 7928 return true 7929 } 7930 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7931 // cond: 7932 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7933 for { 7934 c := v.AuxInt 7935 sym := v.Aux 7936 _ = v.Args[3] 7937 ptr := v.Args[0] 7938 v_1 := v.Args[1] 7939 if v_1.Op != OpAMD64ADDQconst { 7940 break 7941 } 7942 d := v_1.AuxInt 7943 idx := v_1.Args[0] 7944 val := v.Args[2] 7945 mem := v.Args[3] 7946 v.reset(OpAMD64MOVLstoreidx1) 7947 v.AuxInt = c + d 7948 v.Aux = sym 7949 v.AddArg(ptr) 7950 v.AddArg(idx) 7951 v.AddArg(val) 7952 v.AddArg(mem) 7953 return true 7954 } 7955 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 7956 // cond: x.Uses == 1 && clobber(x) 7957 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 7958 for { 7959 i := v.AuxInt 7960 s := v.Aux 7961 _ = v.Args[3] 7962 p := v.Args[0] 7963 idx := v.Args[1] 7964 v_2 := v.Args[2] 7965 if v_2.Op != OpAMD64SHRQconst { 7966 break 7967 } 7968 if v_2.AuxInt != 32 { 7969 break 7970 } 7971 w := v_2.Args[0] 7972 x := v.Args[3] 7973 if x.Op != OpAMD64MOVLstoreidx1 { 7974 break 7975 } 7976 if x.AuxInt != i-4 { 7977 break 7978 } 7979 if x.Aux != s { 7980 break 7981 } 7982 _ = x.Args[3] 7983 if p != x.Args[0] { 7984 break 7985 } 7986 if idx != x.Args[1] { 7987 break 7988 } 7989 if w != x.Args[2] { 7990 break 7991 } 7992 mem := x.Args[3] 7993 if !(x.Uses == 1 && clobber(x)) { 7994 break 7995 } 7996 v.reset(OpAMD64MOVQstoreidx1) 7997 v.AuxInt = i - 4 7998 v.Aux = s 7999 v.AddArg(p) 8000 v.AddArg(idx) 8001 v.AddArg(w) 8002 v.AddArg(mem) 8003 return true 8004 } 8005 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 8006 // cond: x.Uses == 1 && clobber(x) 8007 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 8008 for { 8009 i := v.AuxInt 8010 s := v.Aux 8011 _ = v.Args[3] 8012 p := v.Args[0] 8013 idx := v.Args[1] 8014 v_2 := v.Args[2] 8015 if v_2.Op != OpAMD64SHRQconst { 8016 break 8017 } 8018 j := v_2.AuxInt 8019 w := v_2.Args[0] 8020 x := v.Args[3] 8021 if x.Op != OpAMD64MOVLstoreidx1 { 8022 break 8023 } 8024 if x.AuxInt != i-4 { 8025 break 8026 } 8027 if x.Aux != s { 8028 break 8029 } 8030 _ = x.Args[3] 8031 if p != x.Args[0] { 8032 break 8033 } 8034 if idx != x.Args[1] { 8035 break 8036 } 8037 w0 := x.Args[2] 8038 if w0.Op != OpAMD64SHRQconst { 8039 break 8040 } 8041 if w0.AuxInt != j-32 { 8042 break 8043 } 8044 if w != w0.Args[0] { 8045 break 8046 } 8047 mem := x.Args[3] 8048 if !(x.Uses == 1 && clobber(x)) { 8049 break 8050 } 8051 v.reset(OpAMD64MOVQstoreidx1) 8052 v.AuxInt = i - 4 8053 v.Aux = s 8054 v.AddArg(p) 8055 v.AddArg(idx) 8056 v.AddArg(w0) 8057 v.AddArg(mem) 8058 return true 8059 } 8060 return false 8061 } 8062 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 8063 b := v.Block 8064 _ = b 8065 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8066 // cond: 8067 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 8068 for { 8069 c := v.AuxInt 8070 sym := v.Aux 8071 _ = v.Args[3] 8072 v_0 := v.Args[0] 8073 if v_0.Op != OpAMD64ADDQconst { 8074 break 8075 } 8076 d := v_0.AuxInt 8077 ptr := v_0.Args[0] 8078 idx := v.Args[1] 8079 val := v.Args[2] 8080 mem := v.Args[3] 8081 v.reset(OpAMD64MOVLstoreidx4) 8082 v.AuxInt = c + d 8083 v.Aux = sym 8084 v.AddArg(ptr) 8085 v.AddArg(idx) 8086 v.AddArg(val) 8087 v.AddArg(mem) 8088 return true 8089 } 8090 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8091 // cond: 8092 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 8093 for { 8094 c := v.AuxInt 8095 sym := v.Aux 8096 _ = v.Args[3] 8097 ptr := v.Args[0] 8098 v_1 := v.Args[1] 8099 if v_1.Op != OpAMD64ADDQconst { 8100 break 8101 } 8102 d := v_1.AuxInt 8103 idx := v_1.Args[0] 8104 val := v.Args[2] 8105 mem := v.Args[3] 8106 v.reset(OpAMD64MOVLstoreidx4) 8107 v.AuxInt = c + 4*d 8108 v.Aux = sym 8109 v.AddArg(ptr) 8110 v.AddArg(idx) 8111 v.AddArg(val) 8112 v.AddArg(mem) 8113 return true 8114 } 8115 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 8116 // cond: x.Uses == 1 && clobber(x) 8117 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 8118 for { 8119 i := v.AuxInt 8120 s := v.Aux 8121 _ = v.Args[3] 8122 p := v.Args[0] 8123 idx := v.Args[1] 8124 v_2 := v.Args[2] 8125 if v_2.Op != OpAMD64SHRQconst { 8126 break 8127 } 8128 if v_2.AuxInt != 32 { 8129 break 8130 } 8131 w := v_2.Args[0] 8132 x := v.Args[3] 8133 if x.Op != OpAMD64MOVLstoreidx4 { 8134 break 8135 } 8136 if x.AuxInt != i-4 { 8137 break 8138 } 8139 if x.Aux != s { 8140 break 8141 } 8142 _ = x.Args[3] 8143 if p != x.Args[0] { 8144 break 8145 } 8146 if idx != x.Args[1] { 8147 break 8148 } 8149 if w != x.Args[2] { 8150 break 8151 } 8152 mem := x.Args[3] 8153 if !(x.Uses == 1 && clobber(x)) { 8154 break 8155 } 8156 v.reset(OpAMD64MOVQstoreidx1) 8157 v.AuxInt = i - 4 8158 v.Aux = s 8159 v.AddArg(p) 8160 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 8161 v0.AuxInt = 2 8162 v0.AddArg(idx) 8163 v.AddArg(v0) 8164 v.AddArg(w) 8165 v.AddArg(mem) 8166 return true 8167 } 8168 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 8169 // cond: x.Uses == 1 && clobber(x) 8170 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 8171 for { 8172 i := v.AuxInt 8173 s := v.Aux 8174 _ = v.Args[3] 8175 p := v.Args[0] 8176 idx := v.Args[1] 8177 v_2 := v.Args[2] 8178 if v_2.Op != OpAMD64SHRQconst { 8179 break 8180 } 8181 j := v_2.AuxInt 8182 w := v_2.Args[0] 8183 x := v.Args[3] 8184 if x.Op != OpAMD64MOVLstoreidx4 { 8185 break 8186 } 8187 if x.AuxInt != i-4 { 8188 break 8189 } 8190 if x.Aux != s { 8191 break 8192 } 8193 _ = x.Args[3] 8194 if p != x.Args[0] { 8195 break 8196 } 8197 if idx != x.Args[1] { 8198 break 8199 } 8200 w0 := x.Args[2] 8201 if w0.Op != OpAMD64SHRQconst { 8202 break 8203 } 8204 if w0.AuxInt != j-32 { 8205 break 8206 } 8207 if w != w0.Args[0] { 8208 break 8209 } 8210 mem := x.Args[3] 8211 if !(x.Uses == 1 && clobber(x)) { 8212 break 8213 } 8214 v.reset(OpAMD64MOVQstoreidx1) 8215 v.AuxInt = i - 4 8216 v.Aux = s 8217 v.AddArg(p) 8218 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 8219 v0.AuxInt = 2 8220 v0.AddArg(idx) 8221 v.AddArg(v0) 8222 v.AddArg(w0) 8223 v.AddArg(mem) 8224 return true 8225 } 8226 return false 8227 } 8228 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 8229 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 8230 // cond: is32Bit(off1+off2) 8231 // result: (MOVOload [off1+off2] {sym} ptr mem) 8232 for { 8233 off1 := v.AuxInt 8234 sym := v.Aux 8235 _ = v.Args[1] 8236 v_0 := v.Args[0] 8237 if v_0.Op != OpAMD64ADDQconst { 8238 break 8239 } 8240 off2 := v_0.AuxInt 8241 ptr := v_0.Args[0] 8242 mem := v.Args[1] 8243 if !(is32Bit(off1 + off2)) { 8244 break 8245 } 8246 v.reset(OpAMD64MOVOload) 8247 v.AuxInt = off1 + off2 8248 v.Aux = sym 8249 v.AddArg(ptr) 8250 v.AddArg(mem) 8251 return true 8252 } 8253 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8254 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8255 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8256 for { 8257 off1 := v.AuxInt 8258 sym1 := v.Aux 8259 _ = v.Args[1] 8260 v_0 := v.Args[0] 8261 if v_0.Op != OpAMD64LEAQ { 8262 break 8263 } 8264 off2 := v_0.AuxInt 8265 sym2 := v_0.Aux 8266 base := v_0.Args[0] 8267 mem := v.Args[1] 8268 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8269 break 8270 } 8271 v.reset(OpAMD64MOVOload) 8272 v.AuxInt = off1 + off2 8273 v.Aux = mergeSym(sym1, sym2) 8274 v.AddArg(base) 8275 v.AddArg(mem) 8276 return true 8277 } 8278 return false 8279 } 8280 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 8281 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8282 // cond: is32Bit(off1+off2) 8283 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 8284 for { 8285 off1 := v.AuxInt 8286 sym := v.Aux 8287 _ = v.Args[2] 8288 v_0 := v.Args[0] 8289 if v_0.Op != OpAMD64ADDQconst { 8290 break 8291 } 8292 off2 := v_0.AuxInt 8293 ptr := v_0.Args[0] 8294 val := v.Args[1] 8295 mem := v.Args[2] 8296 if !(is32Bit(off1 + off2)) { 8297 break 8298 } 8299 v.reset(OpAMD64MOVOstore) 8300 v.AuxInt = off1 + off2 8301 v.Aux = sym 8302 v.AddArg(ptr) 8303 v.AddArg(val) 8304 v.AddArg(mem) 8305 return true 8306 } 8307 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8308 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8309 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8310 for { 8311 off1 := v.AuxInt 8312 sym1 := v.Aux 8313 _ = v.Args[2] 8314 v_0 := v.Args[0] 8315 if v_0.Op != OpAMD64LEAQ { 8316 break 8317 } 8318 off2 := v_0.AuxInt 8319 sym2 := v_0.Aux 8320 base := v_0.Args[0] 8321 val := v.Args[1] 8322 mem := v.Args[2] 8323 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8324 break 8325 } 8326 v.reset(OpAMD64MOVOstore) 8327 v.AuxInt = off1 + off2 8328 v.Aux = mergeSym(sym1, sym2) 8329 v.AddArg(base) 8330 v.AddArg(val) 8331 v.AddArg(mem) 8332 return true 8333 } 8334 return false 8335 } 8336 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 8337 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 8338 // cond: is32Bit(off1+off2) 8339 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 8340 for { 8341 off1 := v.AuxInt 8342 sym := v.Aux 8343 _ = v.Args[1] 8344 v_0 := v.Args[0] 8345 if v_0.Op != OpAMD64ADDQconst { 8346 break 8347 } 8348 off2 := v_0.AuxInt 8349 ptr := v_0.Args[0] 8350 mem := v.Args[1] 8351 if !(is32Bit(off1 + off2)) { 8352 break 8353 } 8354 v.reset(OpAMD64MOVQatomicload) 8355 v.AuxInt = off1 + off2 8356 v.Aux = sym 8357 v.AddArg(ptr) 8358 v.AddArg(mem) 8359 return true 8360 } 8361 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 8362 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8363 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 8364 for { 8365 off1 := v.AuxInt 8366 sym1 := v.Aux 8367 _ = v.Args[1] 8368 v_0 := v.Args[0] 8369 if v_0.Op != OpAMD64LEAQ { 8370 break 8371 } 8372 off2 := v_0.AuxInt 8373 sym2 := v_0.Aux 8374 ptr := v_0.Args[0] 8375 mem := v.Args[1] 8376 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8377 break 8378 } 8379 v.reset(OpAMD64MOVQatomicload) 8380 v.AuxInt = off1 + off2 8381 v.Aux = mergeSym(sym1, sym2) 8382 v.AddArg(ptr) 8383 v.AddArg(mem) 8384 return true 8385 } 8386 return false 8387 } 8388 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 8389 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 8390 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 8391 // result: x 8392 for { 8393 off := v.AuxInt 8394 sym := v.Aux 8395 _ = v.Args[1] 8396 ptr := v.Args[0] 8397 v_1 := v.Args[1] 8398 if v_1.Op != OpAMD64MOVQstore { 8399 break 8400 } 8401 off2 := v_1.AuxInt 8402 sym2 := v_1.Aux 8403 _ = v_1.Args[2] 8404 ptr2 := v_1.Args[0] 8405 x := v_1.Args[1] 8406 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 8407 break 8408 } 8409 v.reset(OpCopy) 8410 v.Type = x.Type 8411 v.AddArg(x) 8412 return true 8413 } 8414 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 8415 // cond: is32Bit(off1+off2) 8416 // result: (MOVQload [off1+off2] {sym} ptr mem) 8417 for { 8418 off1 := v.AuxInt 8419 sym := v.Aux 8420 _ = v.Args[1] 8421 v_0 := v.Args[0] 8422 if v_0.Op != OpAMD64ADDQconst { 8423 break 8424 } 8425 off2 := v_0.AuxInt 8426 ptr := v_0.Args[0] 8427 mem := v.Args[1] 8428 if !(is32Bit(off1 + off2)) { 8429 break 8430 } 8431 v.reset(OpAMD64MOVQload) 8432 v.AuxInt = off1 + off2 8433 v.Aux = sym 8434 v.AddArg(ptr) 8435 v.AddArg(mem) 8436 return true 8437 } 8438 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8439 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8440 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8441 for { 8442 off1 := v.AuxInt 8443 sym1 := v.Aux 8444 _ = v.Args[1] 8445 v_0 := v.Args[0] 8446 if v_0.Op != OpAMD64LEAQ { 8447 break 8448 } 8449 off2 := v_0.AuxInt 8450 sym2 := v_0.Aux 8451 base := v_0.Args[0] 8452 mem := v.Args[1] 8453 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8454 break 8455 } 8456 v.reset(OpAMD64MOVQload) 8457 v.AuxInt = off1 + off2 8458 v.Aux = mergeSym(sym1, sym2) 8459 v.AddArg(base) 8460 v.AddArg(mem) 8461 return true 8462 } 8463 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8464 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8465 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8466 for { 8467 off1 := v.AuxInt 8468 sym1 := v.Aux 8469 _ = v.Args[1] 8470 v_0 := v.Args[0] 8471 if v_0.Op != OpAMD64LEAQ1 { 8472 break 8473 } 8474 off2 := v_0.AuxInt 8475 sym2 := v_0.Aux 8476 _ = v_0.Args[1] 8477 ptr := v_0.Args[0] 8478 idx := v_0.Args[1] 8479 mem := v.Args[1] 8480 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8481 break 8482 } 8483 v.reset(OpAMD64MOVQloadidx1) 8484 v.AuxInt = off1 + off2 8485 v.Aux = mergeSym(sym1, sym2) 8486 v.AddArg(ptr) 8487 v.AddArg(idx) 8488 v.AddArg(mem) 8489 return true 8490 } 8491 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 8492 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8493 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8494 for { 8495 off1 := v.AuxInt 8496 sym1 := v.Aux 8497 _ = v.Args[1] 8498 v_0 := v.Args[0] 8499 if v_0.Op != OpAMD64LEAQ8 { 8500 break 8501 } 8502 off2 := v_0.AuxInt 8503 sym2 := v_0.Aux 8504 _ = v_0.Args[1] 8505 ptr := v_0.Args[0] 8506 idx := v_0.Args[1] 8507 mem := v.Args[1] 8508 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8509 break 8510 } 8511 v.reset(OpAMD64MOVQloadidx8) 8512 v.AuxInt = off1 + off2 8513 v.Aux = mergeSym(sym1, sym2) 8514 v.AddArg(ptr) 8515 v.AddArg(idx) 8516 v.AddArg(mem) 8517 return true 8518 } 8519 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 8520 // cond: ptr.Op != OpSB 8521 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 8522 for { 8523 off := v.AuxInt 8524 sym := v.Aux 8525 _ = v.Args[1] 8526 v_0 := v.Args[0] 8527 if v_0.Op != OpAMD64ADDQ { 8528 break 8529 } 8530 _ = v_0.Args[1] 8531 ptr := v_0.Args[0] 8532 idx := v_0.Args[1] 8533 mem := v.Args[1] 8534 if !(ptr.Op != OpSB) { 8535 break 8536 } 8537 v.reset(OpAMD64MOVQloadidx1) 8538 v.AuxInt = off 8539 v.Aux = sym 8540 v.AddArg(ptr) 8541 v.AddArg(idx) 8542 v.AddArg(mem) 8543 return true 8544 } 8545 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 8546 // cond: canMergeSym(sym1, sym2) 8547 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8548 for { 8549 off1 := v.AuxInt 8550 sym1 := v.Aux 8551 _ = v.Args[1] 8552 v_0 := v.Args[0] 8553 if v_0.Op != OpAMD64LEAL { 8554 break 8555 } 8556 off2 := v_0.AuxInt 8557 sym2 := v_0.Aux 8558 base := v_0.Args[0] 8559 mem := v.Args[1] 8560 if !(canMergeSym(sym1, sym2)) { 8561 break 8562 } 8563 v.reset(OpAMD64MOVQload) 8564 v.AuxInt = off1 + off2 8565 v.Aux = mergeSym(sym1, sym2) 8566 v.AddArg(base) 8567 v.AddArg(mem) 8568 return true 8569 } 8570 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 8571 // cond: is32Bit(off1+off2) 8572 // result: (MOVQload [off1+off2] {sym} ptr mem) 8573 for { 8574 off1 := v.AuxInt 8575 sym := v.Aux 8576 _ = v.Args[1] 8577 v_0 := v.Args[0] 8578 if v_0.Op != OpAMD64ADDLconst { 8579 break 8580 } 8581 off2 := v_0.AuxInt 8582 ptr := v_0.Args[0] 8583 mem := v.Args[1] 8584 if !(is32Bit(off1 + off2)) { 8585 break 8586 } 8587 v.reset(OpAMD64MOVQload) 8588 v.AuxInt = off1 + off2 8589 v.Aux = sym 8590 v.AddArg(ptr) 8591 v.AddArg(mem) 8592 return true 8593 } 8594 return false 8595 } 8596 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 8597 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 8598 // cond: 8599 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 8600 for { 8601 c := v.AuxInt 8602 sym := v.Aux 8603 _ = v.Args[2] 8604 ptr := v.Args[0] 8605 v_1 := v.Args[1] 8606 if v_1.Op != OpAMD64SHLQconst { 8607 break 8608 } 8609 if v_1.AuxInt != 3 { 8610 break 8611 } 8612 idx := v_1.Args[0] 8613 mem := v.Args[2] 8614 v.reset(OpAMD64MOVQloadidx8) 8615 v.AuxInt = c 8616 v.Aux = sym 8617 v.AddArg(ptr) 8618 v.AddArg(idx) 8619 v.AddArg(mem) 8620 return true 8621 } 8622 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 8623 // cond: 8624 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 8625 for { 8626 c := v.AuxInt 8627 sym := v.Aux 8628 _ = v.Args[2] 8629 v_0 := v.Args[0] 8630 if v_0.Op != OpAMD64SHLQconst { 8631 break 8632 } 8633 if v_0.AuxInt != 3 { 8634 break 8635 } 8636 idx := v_0.Args[0] 8637 ptr := v.Args[1] 8638 mem := v.Args[2] 8639 v.reset(OpAMD64MOVQloadidx8) 8640 v.AuxInt = c 8641 v.Aux = sym 8642 v.AddArg(ptr) 8643 v.AddArg(idx) 8644 v.AddArg(mem) 8645 return true 8646 } 8647 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8648 // cond: 8649 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8650 for { 8651 c := v.AuxInt 8652 sym := v.Aux 8653 _ = v.Args[2] 8654 v_0 := v.Args[0] 8655 if v_0.Op != OpAMD64ADDQconst { 8656 break 8657 } 8658 d := v_0.AuxInt 8659 ptr := v_0.Args[0] 8660 idx := v.Args[1] 8661 mem := v.Args[2] 8662 v.reset(OpAMD64MOVQloadidx1) 8663 v.AuxInt = c + d 8664 v.Aux = sym 8665 v.AddArg(ptr) 8666 v.AddArg(idx) 8667 v.AddArg(mem) 8668 return true 8669 } 8670 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 8671 // cond: 8672 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8673 for { 8674 c := v.AuxInt 8675 sym := v.Aux 8676 _ = v.Args[2] 8677 idx := v.Args[0] 8678 v_1 := v.Args[1] 8679 if v_1.Op != OpAMD64ADDQconst { 8680 break 8681 } 8682 d := v_1.AuxInt 8683 ptr := v_1.Args[0] 8684 mem := v.Args[2] 8685 v.reset(OpAMD64MOVQloadidx1) 8686 v.AuxInt = c + d 8687 v.Aux = sym 8688 v.AddArg(ptr) 8689 v.AddArg(idx) 8690 v.AddArg(mem) 8691 return true 8692 } 8693 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8694 // cond: 8695 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8696 for { 8697 c := v.AuxInt 8698 sym := v.Aux 8699 _ = v.Args[2] 8700 ptr := v.Args[0] 8701 v_1 := v.Args[1] 8702 if v_1.Op != OpAMD64ADDQconst { 8703 break 8704 } 8705 d := v_1.AuxInt 8706 idx := v_1.Args[0] 8707 mem := v.Args[2] 8708 v.reset(OpAMD64MOVQloadidx1) 8709 v.AuxInt = c + d 8710 v.Aux = sym 8711 v.AddArg(ptr) 8712 v.AddArg(idx) 8713 v.AddArg(mem) 8714 return true 8715 } 8716 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 8717 // cond: 8718 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8719 for { 8720 c := v.AuxInt 8721 sym := v.Aux 8722 _ = v.Args[2] 8723 v_0 := v.Args[0] 8724 if v_0.Op != OpAMD64ADDQconst { 8725 break 8726 } 8727 d := v_0.AuxInt 8728 idx := v_0.Args[0] 8729 ptr := v.Args[1] 8730 mem := v.Args[2] 8731 v.reset(OpAMD64MOVQloadidx1) 8732 v.AuxInt = c + d 8733 v.Aux = sym 8734 v.AddArg(ptr) 8735 v.AddArg(idx) 8736 v.AddArg(mem) 8737 return true 8738 } 8739 return false 8740 } 8741 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 8742 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 8743 // cond: 8744 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 8745 for { 8746 c := v.AuxInt 8747 sym := v.Aux 8748 _ = v.Args[2] 8749 v_0 := v.Args[0] 8750 if v_0.Op != OpAMD64ADDQconst { 8751 break 8752 } 8753 d := v_0.AuxInt 8754 ptr := v_0.Args[0] 8755 idx := v.Args[1] 8756 mem := v.Args[2] 8757 v.reset(OpAMD64MOVQloadidx8) 8758 v.AuxInt = c + d 8759 v.Aux = sym 8760 v.AddArg(ptr) 8761 v.AddArg(idx) 8762 v.AddArg(mem) 8763 return true 8764 } 8765 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8766 // cond: 8767 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 8768 for { 8769 c := v.AuxInt 8770 sym := v.Aux 8771 _ = v.Args[2] 8772 ptr := v.Args[0] 8773 v_1 := v.Args[1] 8774 if v_1.Op != OpAMD64ADDQconst { 8775 break 8776 } 8777 d := v_1.AuxInt 8778 idx := v_1.Args[0] 8779 mem := v.Args[2] 8780 v.reset(OpAMD64MOVQloadidx8) 8781 v.AuxInt = c + 8*d 8782 v.Aux = sym 8783 v.AddArg(ptr) 8784 v.AddArg(idx) 8785 v.AddArg(mem) 8786 return true 8787 } 8788 return false 8789 } 8790 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 8791 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8792 // cond: is32Bit(off1+off2) 8793 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 8794 for { 8795 off1 := v.AuxInt 8796 sym := v.Aux 8797 _ = v.Args[2] 8798 v_0 := v.Args[0] 8799 if v_0.Op != OpAMD64ADDQconst { 8800 break 8801 } 8802 off2 := v_0.AuxInt 8803 ptr := v_0.Args[0] 8804 val := v.Args[1] 8805 mem := v.Args[2] 8806 if !(is32Bit(off1 + off2)) { 8807 break 8808 } 8809 v.reset(OpAMD64MOVQstore) 8810 v.AuxInt = off1 + off2 8811 v.Aux = sym 8812 v.AddArg(ptr) 8813 v.AddArg(val) 8814 v.AddArg(mem) 8815 return true 8816 } 8817 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 8818 // cond: validValAndOff(c,off) 8819 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 8820 for { 8821 off := v.AuxInt 8822 sym := v.Aux 8823 _ = v.Args[2] 8824 ptr := v.Args[0] 8825 v_1 := v.Args[1] 8826 if v_1.Op != OpAMD64MOVQconst { 8827 break 8828 } 8829 c := v_1.AuxInt 8830 mem := v.Args[2] 8831 if !(validValAndOff(c, off)) { 8832 break 8833 } 8834 v.reset(OpAMD64MOVQstoreconst) 8835 v.AuxInt = makeValAndOff(c, off) 8836 v.Aux = sym 8837 v.AddArg(ptr) 8838 v.AddArg(mem) 8839 return true 8840 } 8841 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8842 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8843 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8844 for { 8845 off1 := v.AuxInt 8846 sym1 := v.Aux 8847 _ = v.Args[2] 8848 v_0 := v.Args[0] 8849 if v_0.Op != OpAMD64LEAQ { 8850 break 8851 } 8852 off2 := v_0.AuxInt 8853 sym2 := v_0.Aux 8854 base := v_0.Args[0] 8855 val := v.Args[1] 8856 mem := v.Args[2] 8857 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8858 break 8859 } 8860 v.reset(OpAMD64MOVQstore) 8861 v.AuxInt = off1 + off2 8862 v.Aux = mergeSym(sym1, sym2) 8863 v.AddArg(base) 8864 v.AddArg(val) 8865 v.AddArg(mem) 8866 return true 8867 } 8868 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8869 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8870 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8871 for { 8872 off1 := v.AuxInt 8873 sym1 := v.Aux 8874 _ = v.Args[2] 8875 v_0 := v.Args[0] 8876 if v_0.Op != OpAMD64LEAQ1 { 8877 break 8878 } 8879 off2 := v_0.AuxInt 8880 sym2 := v_0.Aux 8881 _ = v_0.Args[1] 8882 ptr := v_0.Args[0] 8883 idx := v_0.Args[1] 8884 val := v.Args[1] 8885 mem := v.Args[2] 8886 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8887 break 8888 } 8889 v.reset(OpAMD64MOVQstoreidx1) 8890 v.AuxInt = off1 + off2 8891 v.Aux = mergeSym(sym1, sym2) 8892 v.AddArg(ptr) 8893 v.AddArg(idx) 8894 v.AddArg(val) 8895 v.AddArg(mem) 8896 return true 8897 } 8898 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8899 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8900 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8901 for { 8902 off1 := v.AuxInt 8903 sym1 := v.Aux 8904 _ = v.Args[2] 8905 v_0 := v.Args[0] 8906 if v_0.Op != OpAMD64LEAQ8 { 8907 break 8908 } 8909 off2 := v_0.AuxInt 8910 sym2 := v_0.Aux 8911 _ = v_0.Args[1] 8912 ptr := v_0.Args[0] 8913 idx := v_0.Args[1] 8914 val := v.Args[1] 8915 mem := v.Args[2] 8916 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8917 break 8918 } 8919 v.reset(OpAMD64MOVQstoreidx8) 8920 v.AuxInt = off1 + off2 8921 v.Aux = mergeSym(sym1, sym2) 8922 v.AddArg(ptr) 8923 v.AddArg(idx) 8924 v.AddArg(val) 8925 v.AddArg(mem) 8926 return true 8927 } 8928 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 8929 // cond: ptr.Op != OpSB 8930 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 8931 for { 8932 off := v.AuxInt 8933 sym := v.Aux 8934 _ = v.Args[2] 8935 v_0 := v.Args[0] 8936 if v_0.Op != OpAMD64ADDQ { 8937 break 8938 } 8939 _ = v_0.Args[1] 8940 ptr := v_0.Args[0] 8941 idx := v_0.Args[1] 8942 val := v.Args[1] 8943 mem := v.Args[2] 8944 if !(ptr.Op != OpSB) { 8945 break 8946 } 8947 v.reset(OpAMD64MOVQstoreidx1) 8948 v.AuxInt = off 8949 v.Aux = sym 8950 v.AddArg(ptr) 8951 v.AddArg(idx) 8952 v.AddArg(val) 8953 v.AddArg(mem) 8954 return true 8955 } 8956 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 8957 // cond: canMergeSym(sym1, sym2) 8958 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8959 for { 8960 off1 := v.AuxInt 8961 sym1 := v.Aux 8962 _ = v.Args[2] 8963 v_0 := v.Args[0] 8964 if v_0.Op != OpAMD64LEAL { 8965 break 8966 } 8967 off2 := v_0.AuxInt 8968 sym2 := v_0.Aux 8969 base := v_0.Args[0] 8970 val := v.Args[1] 8971 mem := v.Args[2] 8972 if !(canMergeSym(sym1, sym2)) { 8973 break 8974 } 8975 v.reset(OpAMD64MOVQstore) 8976 v.AuxInt = off1 + off2 8977 v.Aux = mergeSym(sym1, sym2) 8978 v.AddArg(base) 8979 v.AddArg(val) 8980 v.AddArg(mem) 8981 return true 8982 } 8983 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 8984 // cond: is32Bit(off1+off2) 8985 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 8986 for { 8987 off1 := v.AuxInt 8988 sym := v.Aux 8989 _ = v.Args[2] 8990 v_0 := v.Args[0] 8991 if v_0.Op != OpAMD64ADDLconst { 8992 break 8993 } 8994 off2 := v_0.AuxInt 8995 ptr := v_0.Args[0] 8996 val := v.Args[1] 8997 mem := v.Args[2] 8998 if !(is32Bit(off1 + off2)) { 8999 break 9000 } 9001 v.reset(OpAMD64MOVQstore) 9002 v.AuxInt = off1 + off2 9003 v.Aux = sym 9004 v.AddArg(ptr) 9005 v.AddArg(val) 9006 v.AddArg(mem) 9007 return true 9008 } 9009 return false 9010 } 9011 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 9012 b := v.Block 9013 _ = b 9014 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 9015 // cond: ValAndOff(sc).canAdd(off) 9016 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9017 for { 9018 sc := v.AuxInt 9019 s := v.Aux 9020 _ = v.Args[1] 9021 v_0 := v.Args[0] 9022 if v_0.Op != OpAMD64ADDQconst { 9023 break 9024 } 9025 off := v_0.AuxInt 9026 ptr := v_0.Args[0] 9027 mem := v.Args[1] 9028 if !(ValAndOff(sc).canAdd(off)) { 9029 break 9030 } 9031 v.reset(OpAMD64MOVQstoreconst) 9032 v.AuxInt = ValAndOff(sc).add(off) 9033 v.Aux = s 9034 v.AddArg(ptr) 9035 v.AddArg(mem) 9036 return true 9037 } 9038 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 9039 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9040 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9041 for { 9042 sc := v.AuxInt 9043 sym1 := v.Aux 9044 _ = v.Args[1] 9045 v_0 := v.Args[0] 9046 if v_0.Op != OpAMD64LEAQ { 9047 break 9048 } 9049 off := v_0.AuxInt 9050 sym2 := v_0.Aux 9051 ptr := v_0.Args[0] 9052 mem := v.Args[1] 9053 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9054 break 9055 } 9056 v.reset(OpAMD64MOVQstoreconst) 9057 v.AuxInt = ValAndOff(sc).add(off) 9058 v.Aux = mergeSym(sym1, sym2) 9059 v.AddArg(ptr) 9060 v.AddArg(mem) 9061 return true 9062 } 9063 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 9064 // cond: canMergeSym(sym1, sym2) 9065 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9066 for { 9067 x := v.AuxInt 9068 sym1 := v.Aux 9069 _ = v.Args[1] 9070 v_0 := v.Args[0] 9071 if v_0.Op != OpAMD64LEAQ1 { 9072 break 9073 } 9074 off := v_0.AuxInt 9075 sym2 := v_0.Aux 9076 _ = v_0.Args[1] 9077 ptr := v_0.Args[0] 9078 idx := v_0.Args[1] 9079 mem := v.Args[1] 9080 if !(canMergeSym(sym1, sym2)) { 9081 break 9082 } 9083 v.reset(OpAMD64MOVQstoreconstidx1) 9084 v.AuxInt = ValAndOff(x).add(off) 9085 v.Aux = mergeSym(sym1, sym2) 9086 v.AddArg(ptr) 9087 v.AddArg(idx) 9088 v.AddArg(mem) 9089 return true 9090 } 9091 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 9092 // cond: canMergeSym(sym1, sym2) 9093 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9094 for { 9095 x := v.AuxInt 9096 sym1 := v.Aux 9097 _ = v.Args[1] 9098 v_0 := v.Args[0] 9099 if v_0.Op != OpAMD64LEAQ8 { 9100 break 9101 } 9102 off := v_0.AuxInt 9103 sym2 := v_0.Aux 9104 _ = v_0.Args[1] 9105 ptr := v_0.Args[0] 9106 idx := v_0.Args[1] 9107 mem := v.Args[1] 9108 if !(canMergeSym(sym1, sym2)) { 9109 break 9110 } 9111 v.reset(OpAMD64MOVQstoreconstidx8) 9112 v.AuxInt = ValAndOff(x).add(off) 9113 v.Aux = mergeSym(sym1, sym2) 9114 v.AddArg(ptr) 9115 v.AddArg(idx) 9116 v.AddArg(mem) 9117 return true 9118 } 9119 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 9120 // cond: 9121 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 9122 for { 9123 x := v.AuxInt 9124 sym := v.Aux 9125 _ = v.Args[1] 9126 v_0 := v.Args[0] 9127 if v_0.Op != OpAMD64ADDQ { 9128 break 9129 } 9130 _ = v_0.Args[1] 9131 ptr := v_0.Args[0] 9132 idx := v_0.Args[1] 9133 mem := v.Args[1] 9134 v.reset(OpAMD64MOVQstoreconstidx1) 9135 v.AuxInt = x 9136 v.Aux = sym 9137 v.AddArg(ptr) 9138 v.AddArg(idx) 9139 v.AddArg(mem) 9140 return true 9141 } 9142 // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) 9143 // cond: x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x) 9144 // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) 9145 for { 9146 c := v.AuxInt 9147 s := v.Aux 9148 _ = v.Args[1] 9149 p := v.Args[0] 9150 x := v.Args[1] 9151 if x.Op != OpAMD64MOVQstoreconst { 9152 break 9153 } 9154 c2 := x.AuxInt 9155 if x.Aux != s { 9156 break 9157 } 9158 _ = x.Args[1] 9159 if p != x.Args[0] { 9160 break 9161 } 9162 mem := x.Args[1] 9163 if !(x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) { 9164 break 9165 } 9166 v.reset(OpAMD64MOVOstore) 9167 v.AuxInt = ValAndOff(c2).Off() 9168 v.Aux = s 9169 v.AddArg(p) 9170 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 9171 v0.AuxInt = 0 9172 v.AddArg(v0) 9173 v.AddArg(mem) 9174 return true 9175 } 9176 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 9177 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9178 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9179 for { 9180 sc := v.AuxInt 9181 sym1 := v.Aux 9182 _ = v.Args[1] 9183 v_0 := v.Args[0] 9184 if v_0.Op != OpAMD64LEAL { 9185 break 9186 } 9187 off := v_0.AuxInt 9188 sym2 := v_0.Aux 9189 ptr := v_0.Args[0] 9190 mem := v.Args[1] 9191 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9192 break 9193 } 9194 v.reset(OpAMD64MOVQstoreconst) 9195 v.AuxInt = ValAndOff(sc).add(off) 9196 v.Aux = mergeSym(sym1, sym2) 9197 v.AddArg(ptr) 9198 v.AddArg(mem) 9199 return true 9200 } 9201 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 9202 // cond: ValAndOff(sc).canAdd(off) 9203 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9204 for { 9205 sc := v.AuxInt 9206 s := v.Aux 9207 _ = v.Args[1] 9208 v_0 := v.Args[0] 9209 if v_0.Op != OpAMD64ADDLconst { 9210 break 9211 } 9212 off := v_0.AuxInt 9213 ptr := v_0.Args[0] 9214 mem := v.Args[1] 9215 if !(ValAndOff(sc).canAdd(off)) { 9216 break 9217 } 9218 v.reset(OpAMD64MOVQstoreconst) 9219 v.AuxInt = ValAndOff(sc).add(off) 9220 v.Aux = s 9221 v.AddArg(ptr) 9222 v.AddArg(mem) 9223 return true 9224 } 9225 return false 9226 } 9227 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 9228 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9229 // cond: 9230 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 9231 for { 9232 c := v.AuxInt 9233 sym := v.Aux 9234 _ = v.Args[2] 9235 ptr := v.Args[0] 9236 v_1 := v.Args[1] 9237 if v_1.Op != OpAMD64SHLQconst { 9238 break 9239 } 9240 if v_1.AuxInt != 3 { 9241 break 9242 } 9243 idx := v_1.Args[0] 9244 mem := v.Args[2] 9245 v.reset(OpAMD64MOVQstoreconstidx8) 9246 v.AuxInt = c 9247 v.Aux = sym 9248 v.AddArg(ptr) 9249 v.AddArg(idx) 9250 v.AddArg(mem) 9251 return true 9252 } 9253 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 9254 // cond: 9255 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9256 for { 9257 x := v.AuxInt 9258 sym := v.Aux 9259 _ = v.Args[2] 9260 v_0 := v.Args[0] 9261 if v_0.Op != OpAMD64ADDQconst { 9262 break 9263 } 9264 c := v_0.AuxInt 9265 ptr := v_0.Args[0] 9266 idx := v.Args[1] 9267 mem := v.Args[2] 9268 v.reset(OpAMD64MOVQstoreconstidx1) 9269 v.AuxInt = ValAndOff(x).add(c) 9270 v.Aux = sym 9271 v.AddArg(ptr) 9272 v.AddArg(idx) 9273 v.AddArg(mem) 9274 return true 9275 } 9276 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 9277 // cond: 9278 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9279 for { 9280 x := v.AuxInt 9281 sym := v.Aux 9282 _ = v.Args[2] 9283 ptr := v.Args[0] 9284 v_1 := v.Args[1] 9285 if v_1.Op != OpAMD64ADDQconst { 9286 break 9287 } 9288 c := v_1.AuxInt 9289 idx := v_1.Args[0] 9290 mem := v.Args[2] 9291 v.reset(OpAMD64MOVQstoreconstidx1) 9292 v.AuxInt = ValAndOff(x).add(c) 9293 v.Aux = sym 9294 v.AddArg(ptr) 9295 v.AddArg(idx) 9296 v.AddArg(mem) 9297 return true 9298 } 9299 return false 9300 } 9301 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 9302 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 9303 // cond: 9304 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9305 for { 9306 x := v.AuxInt 9307 sym := v.Aux 9308 _ = v.Args[2] 9309 v_0 := v.Args[0] 9310 if v_0.Op != OpAMD64ADDQconst { 9311 break 9312 } 9313 c := v_0.AuxInt 9314 ptr := v_0.Args[0] 9315 idx := v.Args[1] 9316 mem := v.Args[2] 9317 v.reset(OpAMD64MOVQstoreconstidx8) 9318 v.AuxInt = ValAndOff(x).add(c) 9319 v.Aux = sym 9320 v.AddArg(ptr) 9321 v.AddArg(idx) 9322 v.AddArg(mem) 9323 return true 9324 } 9325 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 9326 // cond: 9327 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 9328 for { 9329 x := v.AuxInt 9330 sym := v.Aux 9331 _ = v.Args[2] 9332 ptr := v.Args[0] 9333 v_1 := v.Args[1] 9334 if v_1.Op != OpAMD64ADDQconst { 9335 break 9336 } 9337 c := v_1.AuxInt 9338 idx := v_1.Args[0] 9339 mem := v.Args[2] 9340 v.reset(OpAMD64MOVQstoreconstidx8) 9341 v.AuxInt = ValAndOff(x).add(8 * c) 9342 v.Aux = sym 9343 v.AddArg(ptr) 9344 v.AddArg(idx) 9345 v.AddArg(mem) 9346 return true 9347 } 9348 return false 9349 } 9350 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 9351 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9352 // cond: 9353 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 9354 for { 9355 c := v.AuxInt 9356 sym := v.Aux 9357 _ = v.Args[3] 9358 ptr := v.Args[0] 9359 v_1 := v.Args[1] 9360 if v_1.Op != OpAMD64SHLQconst { 9361 break 9362 } 9363 if v_1.AuxInt != 3 { 9364 break 9365 } 9366 idx := v_1.Args[0] 9367 val := v.Args[2] 9368 mem := v.Args[3] 9369 v.reset(OpAMD64MOVQstoreidx8) 9370 v.AuxInt = c 9371 v.Aux = sym 9372 v.AddArg(ptr) 9373 v.AddArg(idx) 9374 v.AddArg(val) 9375 v.AddArg(mem) 9376 return true 9377 } 9378 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9379 // cond: 9380 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 9381 for { 9382 c := v.AuxInt 9383 sym := v.Aux 9384 _ = v.Args[3] 9385 v_0 := v.Args[0] 9386 if v_0.Op != OpAMD64ADDQconst { 9387 break 9388 } 9389 d := v_0.AuxInt 9390 ptr := v_0.Args[0] 9391 idx := v.Args[1] 9392 val := v.Args[2] 9393 mem := v.Args[3] 9394 v.reset(OpAMD64MOVQstoreidx1) 9395 v.AuxInt = c + d 9396 v.Aux = sym 9397 v.AddArg(ptr) 9398 v.AddArg(idx) 9399 v.AddArg(val) 9400 v.AddArg(mem) 9401 return true 9402 } 9403 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9404 // cond: 9405 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 9406 for { 9407 c := v.AuxInt 9408 sym := v.Aux 9409 _ = v.Args[3] 9410 ptr := v.Args[0] 9411 v_1 := v.Args[1] 9412 if v_1.Op != OpAMD64ADDQconst { 9413 break 9414 } 9415 d := v_1.AuxInt 9416 idx := v_1.Args[0] 9417 val := v.Args[2] 9418 mem := v.Args[3] 9419 v.reset(OpAMD64MOVQstoreidx1) 9420 v.AuxInt = c + d 9421 v.Aux = sym 9422 v.AddArg(ptr) 9423 v.AddArg(idx) 9424 v.AddArg(val) 9425 v.AddArg(mem) 9426 return true 9427 } 9428 return false 9429 } 9430 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 9431 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9432 // cond: 9433 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 9434 for { 9435 c := v.AuxInt 9436 sym := v.Aux 9437 _ = v.Args[3] 9438 v_0 := v.Args[0] 9439 if v_0.Op != OpAMD64ADDQconst { 9440 break 9441 } 9442 d := v_0.AuxInt 9443 ptr := v_0.Args[0] 9444 idx := v.Args[1] 9445 val := v.Args[2] 9446 mem := v.Args[3] 9447 v.reset(OpAMD64MOVQstoreidx8) 9448 v.AuxInt = c + d 9449 v.Aux = sym 9450 v.AddArg(ptr) 9451 v.AddArg(idx) 9452 v.AddArg(val) 9453 v.AddArg(mem) 9454 return true 9455 } 9456 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9457 // cond: 9458 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 9459 for { 9460 c := v.AuxInt 9461 sym := v.Aux 9462 _ = v.Args[3] 9463 ptr := v.Args[0] 9464 v_1 := v.Args[1] 9465 if v_1.Op != OpAMD64ADDQconst { 9466 break 9467 } 9468 d := v_1.AuxInt 9469 idx := v_1.Args[0] 9470 val := v.Args[2] 9471 mem := v.Args[3] 9472 v.reset(OpAMD64MOVQstoreidx8) 9473 v.AuxInt = c + 8*d 9474 v.Aux = sym 9475 v.AddArg(ptr) 9476 v.AddArg(idx) 9477 v.AddArg(val) 9478 v.AddArg(mem) 9479 return true 9480 } 9481 return false 9482 } 9483 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 9484 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 9485 // cond: is32Bit(off1+off2) 9486 // result: (MOVSDload [off1+off2] {sym} ptr mem) 9487 for { 9488 off1 := v.AuxInt 9489 sym := v.Aux 9490 _ = v.Args[1] 9491 v_0 := v.Args[0] 9492 if v_0.Op != OpAMD64ADDQconst { 9493 break 9494 } 9495 off2 := v_0.AuxInt 9496 ptr := v_0.Args[0] 9497 mem := v.Args[1] 9498 if !(is32Bit(off1 + off2)) { 9499 break 9500 } 9501 v.reset(OpAMD64MOVSDload) 9502 v.AuxInt = off1 + off2 9503 v.Aux = sym 9504 v.AddArg(ptr) 9505 v.AddArg(mem) 9506 return true 9507 } 9508 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9509 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9510 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9511 for { 9512 off1 := v.AuxInt 9513 sym1 := v.Aux 9514 _ = v.Args[1] 9515 v_0 := v.Args[0] 9516 if v_0.Op != OpAMD64LEAQ { 9517 break 9518 } 9519 off2 := v_0.AuxInt 9520 sym2 := v_0.Aux 9521 base := v_0.Args[0] 9522 mem := v.Args[1] 9523 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9524 break 9525 } 9526 v.reset(OpAMD64MOVSDload) 9527 v.AuxInt = off1 + off2 9528 v.Aux = mergeSym(sym1, sym2) 9529 v.AddArg(base) 9530 v.AddArg(mem) 9531 return true 9532 } 9533 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9534 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9535 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9536 for { 9537 off1 := v.AuxInt 9538 sym1 := v.Aux 9539 _ = v.Args[1] 9540 v_0 := v.Args[0] 9541 if v_0.Op != OpAMD64LEAQ1 { 9542 break 9543 } 9544 off2 := v_0.AuxInt 9545 sym2 := v_0.Aux 9546 _ = v_0.Args[1] 9547 ptr := v_0.Args[0] 9548 idx := v_0.Args[1] 9549 mem := v.Args[1] 9550 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9551 break 9552 } 9553 v.reset(OpAMD64MOVSDloadidx1) 9554 v.AuxInt = off1 + off2 9555 v.Aux = mergeSym(sym1, sym2) 9556 v.AddArg(ptr) 9557 v.AddArg(idx) 9558 v.AddArg(mem) 9559 return true 9560 } 9561 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 9562 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9563 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9564 for { 9565 off1 := v.AuxInt 9566 sym1 := v.Aux 9567 _ = v.Args[1] 9568 v_0 := v.Args[0] 9569 if v_0.Op != OpAMD64LEAQ8 { 9570 break 9571 } 9572 off2 := v_0.AuxInt 9573 sym2 := v_0.Aux 9574 _ = v_0.Args[1] 9575 ptr := v_0.Args[0] 9576 idx := v_0.Args[1] 9577 mem := v.Args[1] 9578 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9579 break 9580 } 9581 v.reset(OpAMD64MOVSDloadidx8) 9582 v.AuxInt = off1 + off2 9583 v.Aux = mergeSym(sym1, sym2) 9584 v.AddArg(ptr) 9585 v.AddArg(idx) 9586 v.AddArg(mem) 9587 return true 9588 } 9589 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 9590 // cond: ptr.Op != OpSB 9591 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 9592 for { 9593 off := v.AuxInt 9594 sym := v.Aux 9595 _ = v.Args[1] 9596 v_0 := v.Args[0] 9597 if v_0.Op != OpAMD64ADDQ { 9598 break 9599 } 9600 _ = v_0.Args[1] 9601 ptr := v_0.Args[0] 9602 idx := v_0.Args[1] 9603 mem := v.Args[1] 9604 if !(ptr.Op != OpSB) { 9605 break 9606 } 9607 v.reset(OpAMD64MOVSDloadidx1) 9608 v.AuxInt = off 9609 v.Aux = sym 9610 v.AddArg(ptr) 9611 v.AddArg(idx) 9612 v.AddArg(mem) 9613 return true 9614 } 9615 return false 9616 } 9617 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 9618 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9619 // cond: 9620 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 9621 for { 9622 c := v.AuxInt 9623 sym := v.Aux 9624 _ = v.Args[2] 9625 ptr := v.Args[0] 9626 v_1 := v.Args[1] 9627 if v_1.Op != OpAMD64SHLQconst { 9628 break 9629 } 9630 if v_1.AuxInt != 3 { 9631 break 9632 } 9633 idx := v_1.Args[0] 9634 mem := v.Args[2] 9635 v.reset(OpAMD64MOVSDloadidx8) 9636 v.AuxInt = c 9637 v.Aux = sym 9638 v.AddArg(ptr) 9639 v.AddArg(idx) 9640 v.AddArg(mem) 9641 return true 9642 } 9643 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9644 // cond: 9645 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9646 for { 9647 c := v.AuxInt 9648 sym := v.Aux 9649 _ = v.Args[2] 9650 v_0 := v.Args[0] 9651 if v_0.Op != OpAMD64ADDQconst { 9652 break 9653 } 9654 d := v_0.AuxInt 9655 ptr := v_0.Args[0] 9656 idx := v.Args[1] 9657 mem := v.Args[2] 9658 v.reset(OpAMD64MOVSDloadidx1) 9659 v.AuxInt = c + d 9660 v.Aux = sym 9661 v.AddArg(ptr) 9662 v.AddArg(idx) 9663 v.AddArg(mem) 9664 return true 9665 } 9666 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9667 // cond: 9668 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9669 for { 9670 c := v.AuxInt 9671 sym := v.Aux 9672 _ = v.Args[2] 9673 ptr := v.Args[0] 9674 v_1 := v.Args[1] 9675 if v_1.Op != OpAMD64ADDQconst { 9676 break 9677 } 9678 d := v_1.AuxInt 9679 idx := v_1.Args[0] 9680 mem := v.Args[2] 9681 v.reset(OpAMD64MOVSDloadidx1) 9682 v.AuxInt = c + d 9683 v.Aux = sym 9684 v.AddArg(ptr) 9685 v.AddArg(idx) 9686 v.AddArg(mem) 9687 return true 9688 } 9689 return false 9690 } 9691 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 9692 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 9693 // cond: 9694 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 9695 for { 9696 c := v.AuxInt 9697 sym := v.Aux 9698 _ = v.Args[2] 9699 v_0 := v.Args[0] 9700 if v_0.Op != OpAMD64ADDQconst { 9701 break 9702 } 9703 d := v_0.AuxInt 9704 ptr := v_0.Args[0] 9705 idx := v.Args[1] 9706 mem := v.Args[2] 9707 v.reset(OpAMD64MOVSDloadidx8) 9708 v.AuxInt = c + d 9709 v.Aux = sym 9710 v.AddArg(ptr) 9711 v.AddArg(idx) 9712 v.AddArg(mem) 9713 return true 9714 } 9715 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 9716 // cond: 9717 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 9718 for { 9719 c := v.AuxInt 9720 sym := v.Aux 9721 _ = v.Args[2] 9722 ptr := v.Args[0] 9723 v_1 := v.Args[1] 9724 if v_1.Op != OpAMD64ADDQconst { 9725 break 9726 } 9727 d := v_1.AuxInt 9728 idx := v_1.Args[0] 9729 mem := v.Args[2] 9730 v.reset(OpAMD64MOVSDloadidx8) 9731 v.AuxInt = c + 8*d 9732 v.Aux = sym 9733 v.AddArg(ptr) 9734 v.AddArg(idx) 9735 v.AddArg(mem) 9736 return true 9737 } 9738 return false 9739 } 9740 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 9741 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9742 // cond: is32Bit(off1+off2) 9743 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 9744 for { 9745 off1 := v.AuxInt 9746 sym := v.Aux 9747 _ = v.Args[2] 9748 v_0 := v.Args[0] 9749 if v_0.Op != OpAMD64ADDQconst { 9750 break 9751 } 9752 off2 := v_0.AuxInt 9753 ptr := v_0.Args[0] 9754 val := v.Args[1] 9755 mem := v.Args[2] 9756 if !(is32Bit(off1 + off2)) { 9757 break 9758 } 9759 v.reset(OpAMD64MOVSDstore) 9760 v.AuxInt = off1 + off2 9761 v.Aux = sym 9762 v.AddArg(ptr) 9763 v.AddArg(val) 9764 v.AddArg(mem) 9765 return true 9766 } 9767 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9768 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9769 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9770 for { 9771 off1 := v.AuxInt 9772 sym1 := v.Aux 9773 _ = v.Args[2] 9774 v_0 := v.Args[0] 9775 if v_0.Op != OpAMD64LEAQ { 9776 break 9777 } 9778 off2 := v_0.AuxInt 9779 sym2 := v_0.Aux 9780 base := v_0.Args[0] 9781 val := v.Args[1] 9782 mem := v.Args[2] 9783 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9784 break 9785 } 9786 v.reset(OpAMD64MOVSDstore) 9787 v.AuxInt = off1 + off2 9788 v.Aux = mergeSym(sym1, sym2) 9789 v.AddArg(base) 9790 v.AddArg(val) 9791 v.AddArg(mem) 9792 return true 9793 } 9794 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9795 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9796 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9797 for { 9798 off1 := v.AuxInt 9799 sym1 := v.Aux 9800 _ = v.Args[2] 9801 v_0 := v.Args[0] 9802 if v_0.Op != OpAMD64LEAQ1 { 9803 break 9804 } 9805 off2 := v_0.AuxInt 9806 sym2 := v_0.Aux 9807 _ = v_0.Args[1] 9808 ptr := v_0.Args[0] 9809 idx := v_0.Args[1] 9810 val := v.Args[1] 9811 mem := v.Args[2] 9812 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9813 break 9814 } 9815 v.reset(OpAMD64MOVSDstoreidx1) 9816 v.AuxInt = off1 + off2 9817 v.Aux = mergeSym(sym1, sym2) 9818 v.AddArg(ptr) 9819 v.AddArg(idx) 9820 v.AddArg(val) 9821 v.AddArg(mem) 9822 return true 9823 } 9824 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 9825 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9826 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9827 for { 9828 off1 := v.AuxInt 9829 sym1 := v.Aux 9830 _ = v.Args[2] 9831 v_0 := v.Args[0] 9832 if v_0.Op != OpAMD64LEAQ8 { 9833 break 9834 } 9835 off2 := v_0.AuxInt 9836 sym2 := v_0.Aux 9837 _ = v_0.Args[1] 9838 ptr := v_0.Args[0] 9839 idx := v_0.Args[1] 9840 val := v.Args[1] 9841 mem := v.Args[2] 9842 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9843 break 9844 } 9845 v.reset(OpAMD64MOVSDstoreidx8) 9846 v.AuxInt = off1 + off2 9847 v.Aux = mergeSym(sym1, sym2) 9848 v.AddArg(ptr) 9849 v.AddArg(idx) 9850 v.AddArg(val) 9851 v.AddArg(mem) 9852 return true 9853 } 9854 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 9855 // cond: ptr.Op != OpSB 9856 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 9857 for { 9858 off := v.AuxInt 9859 sym := v.Aux 9860 _ = v.Args[2] 9861 v_0 := v.Args[0] 9862 if v_0.Op != OpAMD64ADDQ { 9863 break 9864 } 9865 _ = v_0.Args[1] 9866 ptr := v_0.Args[0] 9867 idx := v_0.Args[1] 9868 val := v.Args[1] 9869 mem := v.Args[2] 9870 if !(ptr.Op != OpSB) { 9871 break 9872 } 9873 v.reset(OpAMD64MOVSDstoreidx1) 9874 v.AuxInt = off 9875 v.Aux = sym 9876 v.AddArg(ptr) 9877 v.AddArg(idx) 9878 v.AddArg(val) 9879 v.AddArg(mem) 9880 return true 9881 } 9882 return false 9883 } 9884 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 9885 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9886 // cond: 9887 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 9888 for { 9889 c := v.AuxInt 9890 sym := v.Aux 9891 _ = v.Args[3] 9892 ptr := v.Args[0] 9893 v_1 := v.Args[1] 9894 if v_1.Op != OpAMD64SHLQconst { 9895 break 9896 } 9897 if v_1.AuxInt != 3 { 9898 break 9899 } 9900 idx := v_1.Args[0] 9901 val := v.Args[2] 9902 mem := v.Args[3] 9903 v.reset(OpAMD64MOVSDstoreidx8) 9904 v.AuxInt = c 9905 v.Aux = sym 9906 v.AddArg(ptr) 9907 v.AddArg(idx) 9908 v.AddArg(val) 9909 v.AddArg(mem) 9910 return true 9911 } 9912 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9913 // cond: 9914 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 9915 for { 9916 c := v.AuxInt 9917 sym := v.Aux 9918 _ = v.Args[3] 9919 v_0 := v.Args[0] 9920 if v_0.Op != OpAMD64ADDQconst { 9921 break 9922 } 9923 d := v_0.AuxInt 9924 ptr := v_0.Args[0] 9925 idx := v.Args[1] 9926 val := v.Args[2] 9927 mem := v.Args[3] 9928 v.reset(OpAMD64MOVSDstoreidx1) 9929 v.AuxInt = c + d 9930 v.Aux = sym 9931 v.AddArg(ptr) 9932 v.AddArg(idx) 9933 v.AddArg(val) 9934 v.AddArg(mem) 9935 return true 9936 } 9937 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9938 // cond: 9939 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 9940 for { 9941 c := v.AuxInt 9942 sym := v.Aux 9943 _ = v.Args[3] 9944 ptr := v.Args[0] 9945 v_1 := v.Args[1] 9946 if v_1.Op != OpAMD64ADDQconst { 9947 break 9948 } 9949 d := v_1.AuxInt 9950 idx := v_1.Args[0] 9951 val := v.Args[2] 9952 mem := v.Args[3] 9953 v.reset(OpAMD64MOVSDstoreidx1) 9954 v.AuxInt = c + d 9955 v.Aux = sym 9956 v.AddArg(ptr) 9957 v.AddArg(idx) 9958 v.AddArg(val) 9959 v.AddArg(mem) 9960 return true 9961 } 9962 return false 9963 } 9964 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 9965 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9966 // cond: 9967 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 9968 for { 9969 c := v.AuxInt 9970 sym := v.Aux 9971 _ = v.Args[3] 9972 v_0 := v.Args[0] 9973 if v_0.Op != OpAMD64ADDQconst { 9974 break 9975 } 9976 d := v_0.AuxInt 9977 ptr := v_0.Args[0] 9978 idx := v.Args[1] 9979 val := v.Args[2] 9980 mem := v.Args[3] 9981 v.reset(OpAMD64MOVSDstoreidx8) 9982 v.AuxInt = c + d 9983 v.Aux = sym 9984 v.AddArg(ptr) 9985 v.AddArg(idx) 9986 v.AddArg(val) 9987 v.AddArg(mem) 9988 return true 9989 } 9990 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9991 // cond: 9992 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 9993 for { 9994 c := v.AuxInt 9995 sym := v.Aux 9996 _ = v.Args[3] 9997 ptr := v.Args[0] 9998 v_1 := v.Args[1] 9999 if v_1.Op != OpAMD64ADDQconst { 10000 break 10001 } 10002 d := v_1.AuxInt 10003 idx := v_1.Args[0] 10004 val := v.Args[2] 10005 mem := v.Args[3] 10006 v.reset(OpAMD64MOVSDstoreidx8) 10007 v.AuxInt = c + 8*d 10008 v.Aux = sym 10009 v.AddArg(ptr) 10010 v.AddArg(idx) 10011 v.AddArg(val) 10012 v.AddArg(mem) 10013 return true 10014 } 10015 return false 10016 } 10017 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 10018 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 10019 // cond: is32Bit(off1+off2) 10020 // result: (MOVSSload [off1+off2] {sym} ptr mem) 10021 for { 10022 off1 := v.AuxInt 10023 sym := v.Aux 10024 _ = v.Args[1] 10025 v_0 := v.Args[0] 10026 if v_0.Op != OpAMD64ADDQconst { 10027 break 10028 } 10029 off2 := v_0.AuxInt 10030 ptr := v_0.Args[0] 10031 mem := v.Args[1] 10032 if !(is32Bit(off1 + off2)) { 10033 break 10034 } 10035 v.reset(OpAMD64MOVSSload) 10036 v.AuxInt = off1 + off2 10037 v.Aux = sym 10038 v.AddArg(ptr) 10039 v.AddArg(mem) 10040 return true 10041 } 10042 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10043 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10044 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10045 for { 10046 off1 := v.AuxInt 10047 sym1 := v.Aux 10048 _ = v.Args[1] 10049 v_0 := v.Args[0] 10050 if v_0.Op != OpAMD64LEAQ { 10051 break 10052 } 10053 off2 := v_0.AuxInt 10054 sym2 := v_0.Aux 10055 base := v_0.Args[0] 10056 mem := v.Args[1] 10057 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10058 break 10059 } 10060 v.reset(OpAMD64MOVSSload) 10061 v.AuxInt = off1 + off2 10062 v.Aux = mergeSym(sym1, sym2) 10063 v.AddArg(base) 10064 v.AddArg(mem) 10065 return true 10066 } 10067 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 10068 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10069 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10070 for { 10071 off1 := v.AuxInt 10072 sym1 := v.Aux 10073 _ = v.Args[1] 10074 v_0 := v.Args[0] 10075 if v_0.Op != OpAMD64LEAQ1 { 10076 break 10077 } 10078 off2 := v_0.AuxInt 10079 sym2 := v_0.Aux 10080 _ = v_0.Args[1] 10081 ptr := v_0.Args[0] 10082 idx := v_0.Args[1] 10083 mem := v.Args[1] 10084 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10085 break 10086 } 10087 v.reset(OpAMD64MOVSSloadidx1) 10088 v.AuxInt = off1 + off2 10089 v.Aux = mergeSym(sym1, sym2) 10090 v.AddArg(ptr) 10091 v.AddArg(idx) 10092 v.AddArg(mem) 10093 return true 10094 } 10095 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 10096 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10097 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10098 for { 10099 off1 := v.AuxInt 10100 sym1 := v.Aux 10101 _ = v.Args[1] 10102 v_0 := v.Args[0] 10103 if v_0.Op != OpAMD64LEAQ4 { 10104 break 10105 } 10106 off2 := v_0.AuxInt 10107 sym2 := v_0.Aux 10108 _ = v_0.Args[1] 10109 ptr := v_0.Args[0] 10110 idx := v_0.Args[1] 10111 mem := v.Args[1] 10112 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10113 break 10114 } 10115 v.reset(OpAMD64MOVSSloadidx4) 10116 v.AuxInt = off1 + off2 10117 v.Aux = mergeSym(sym1, sym2) 10118 v.AddArg(ptr) 10119 v.AddArg(idx) 10120 v.AddArg(mem) 10121 return true 10122 } 10123 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 10124 // cond: ptr.Op != OpSB 10125 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 10126 for { 10127 off := v.AuxInt 10128 sym := v.Aux 10129 _ = v.Args[1] 10130 v_0 := v.Args[0] 10131 if v_0.Op != OpAMD64ADDQ { 10132 break 10133 } 10134 _ = v_0.Args[1] 10135 ptr := v_0.Args[0] 10136 idx := v_0.Args[1] 10137 mem := v.Args[1] 10138 if !(ptr.Op != OpSB) { 10139 break 10140 } 10141 v.reset(OpAMD64MOVSSloadidx1) 10142 v.AuxInt = off 10143 v.Aux = sym 10144 v.AddArg(ptr) 10145 v.AddArg(idx) 10146 v.AddArg(mem) 10147 return true 10148 } 10149 return false 10150 } 10151 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 10152 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 10153 // cond: 10154 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 10155 for { 10156 c := v.AuxInt 10157 sym := v.Aux 10158 _ = v.Args[2] 10159 ptr := v.Args[0] 10160 v_1 := v.Args[1] 10161 if v_1.Op != OpAMD64SHLQconst { 10162 break 10163 } 10164 if v_1.AuxInt != 2 { 10165 break 10166 } 10167 idx := v_1.Args[0] 10168 mem := v.Args[2] 10169 v.reset(OpAMD64MOVSSloadidx4) 10170 v.AuxInt = c 10171 v.Aux = sym 10172 v.AddArg(ptr) 10173 v.AddArg(idx) 10174 v.AddArg(mem) 10175 return true 10176 } 10177 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 10178 // cond: 10179 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 10180 for { 10181 c := v.AuxInt 10182 sym := v.Aux 10183 _ = v.Args[2] 10184 v_0 := v.Args[0] 10185 if v_0.Op != OpAMD64ADDQconst { 10186 break 10187 } 10188 d := v_0.AuxInt 10189 ptr := v_0.Args[0] 10190 idx := v.Args[1] 10191 mem := v.Args[2] 10192 v.reset(OpAMD64MOVSSloadidx1) 10193 v.AuxInt = c + d 10194 v.Aux = sym 10195 v.AddArg(ptr) 10196 v.AddArg(idx) 10197 v.AddArg(mem) 10198 return true 10199 } 10200 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 10201 // cond: 10202 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 10203 for { 10204 c := v.AuxInt 10205 sym := v.Aux 10206 _ = v.Args[2] 10207 ptr := v.Args[0] 10208 v_1 := v.Args[1] 10209 if v_1.Op != OpAMD64ADDQconst { 10210 break 10211 } 10212 d := v_1.AuxInt 10213 idx := v_1.Args[0] 10214 mem := v.Args[2] 10215 v.reset(OpAMD64MOVSSloadidx1) 10216 v.AuxInt = c + d 10217 v.Aux = sym 10218 v.AddArg(ptr) 10219 v.AddArg(idx) 10220 v.AddArg(mem) 10221 return true 10222 } 10223 return false 10224 } 10225 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 10226 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 10227 // cond: 10228 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 10229 for { 10230 c := v.AuxInt 10231 sym := v.Aux 10232 _ = v.Args[2] 10233 v_0 := v.Args[0] 10234 if v_0.Op != OpAMD64ADDQconst { 10235 break 10236 } 10237 d := v_0.AuxInt 10238 ptr := v_0.Args[0] 10239 idx := v.Args[1] 10240 mem := v.Args[2] 10241 v.reset(OpAMD64MOVSSloadidx4) 10242 v.AuxInt = c + d 10243 v.Aux = sym 10244 v.AddArg(ptr) 10245 v.AddArg(idx) 10246 v.AddArg(mem) 10247 return true 10248 } 10249 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 10250 // cond: 10251 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 10252 for { 10253 c := v.AuxInt 10254 sym := v.Aux 10255 _ = v.Args[2] 10256 ptr := v.Args[0] 10257 v_1 := v.Args[1] 10258 if v_1.Op != OpAMD64ADDQconst { 10259 break 10260 } 10261 d := v_1.AuxInt 10262 idx := v_1.Args[0] 10263 mem := v.Args[2] 10264 v.reset(OpAMD64MOVSSloadidx4) 10265 v.AuxInt = c + 4*d 10266 v.Aux = sym 10267 v.AddArg(ptr) 10268 v.AddArg(idx) 10269 v.AddArg(mem) 10270 return true 10271 } 10272 return false 10273 } 10274 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 10275 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10276 // cond: is32Bit(off1+off2) 10277 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 10278 for { 10279 off1 := v.AuxInt 10280 sym := v.Aux 10281 _ = v.Args[2] 10282 v_0 := v.Args[0] 10283 if v_0.Op != OpAMD64ADDQconst { 10284 break 10285 } 10286 off2 := v_0.AuxInt 10287 ptr := v_0.Args[0] 10288 val := v.Args[1] 10289 mem := v.Args[2] 10290 if !(is32Bit(off1 + off2)) { 10291 break 10292 } 10293 v.reset(OpAMD64MOVSSstore) 10294 v.AuxInt = off1 + off2 10295 v.Aux = sym 10296 v.AddArg(ptr) 10297 v.AddArg(val) 10298 v.AddArg(mem) 10299 return true 10300 } 10301 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10302 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10303 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10304 for { 10305 off1 := v.AuxInt 10306 sym1 := v.Aux 10307 _ = v.Args[2] 10308 v_0 := v.Args[0] 10309 if v_0.Op != OpAMD64LEAQ { 10310 break 10311 } 10312 off2 := v_0.AuxInt 10313 sym2 := v_0.Aux 10314 base := v_0.Args[0] 10315 val := v.Args[1] 10316 mem := v.Args[2] 10317 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10318 break 10319 } 10320 v.reset(OpAMD64MOVSSstore) 10321 v.AuxInt = off1 + off2 10322 v.Aux = mergeSym(sym1, sym2) 10323 v.AddArg(base) 10324 v.AddArg(val) 10325 v.AddArg(mem) 10326 return true 10327 } 10328 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 10329 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10330 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10331 for { 10332 off1 := v.AuxInt 10333 sym1 := v.Aux 10334 _ = v.Args[2] 10335 v_0 := v.Args[0] 10336 if v_0.Op != OpAMD64LEAQ1 { 10337 break 10338 } 10339 off2 := v_0.AuxInt 10340 sym2 := v_0.Aux 10341 _ = v_0.Args[1] 10342 ptr := v_0.Args[0] 10343 idx := v_0.Args[1] 10344 val := v.Args[1] 10345 mem := v.Args[2] 10346 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10347 break 10348 } 10349 v.reset(OpAMD64MOVSSstoreidx1) 10350 v.AuxInt = off1 + off2 10351 v.Aux = mergeSym(sym1, sym2) 10352 v.AddArg(ptr) 10353 v.AddArg(idx) 10354 v.AddArg(val) 10355 v.AddArg(mem) 10356 return true 10357 } 10358 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 10359 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10360 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10361 for { 10362 off1 := v.AuxInt 10363 sym1 := v.Aux 10364 _ = v.Args[2] 10365 v_0 := v.Args[0] 10366 if v_0.Op != OpAMD64LEAQ4 { 10367 break 10368 } 10369 off2 := v_0.AuxInt 10370 sym2 := v_0.Aux 10371 _ = v_0.Args[1] 10372 ptr := v_0.Args[0] 10373 idx := v_0.Args[1] 10374 val := v.Args[1] 10375 mem := v.Args[2] 10376 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10377 break 10378 } 10379 v.reset(OpAMD64MOVSSstoreidx4) 10380 v.AuxInt = off1 + off2 10381 v.Aux = mergeSym(sym1, sym2) 10382 v.AddArg(ptr) 10383 v.AddArg(idx) 10384 v.AddArg(val) 10385 v.AddArg(mem) 10386 return true 10387 } 10388 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 10389 // cond: ptr.Op != OpSB 10390 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 10391 for { 10392 off := v.AuxInt 10393 sym := v.Aux 10394 _ = v.Args[2] 10395 v_0 := v.Args[0] 10396 if v_0.Op != OpAMD64ADDQ { 10397 break 10398 } 10399 _ = v_0.Args[1] 10400 ptr := v_0.Args[0] 10401 idx := v_0.Args[1] 10402 val := v.Args[1] 10403 mem := v.Args[2] 10404 if !(ptr.Op != OpSB) { 10405 break 10406 } 10407 v.reset(OpAMD64MOVSSstoreidx1) 10408 v.AuxInt = off 10409 v.Aux = sym 10410 v.AddArg(ptr) 10411 v.AddArg(idx) 10412 v.AddArg(val) 10413 v.AddArg(mem) 10414 return true 10415 } 10416 return false 10417 } 10418 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 10419 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 10420 // cond: 10421 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 10422 for { 10423 c := v.AuxInt 10424 sym := v.Aux 10425 _ = v.Args[3] 10426 ptr := v.Args[0] 10427 v_1 := v.Args[1] 10428 if v_1.Op != OpAMD64SHLQconst { 10429 break 10430 } 10431 if v_1.AuxInt != 2 { 10432 break 10433 } 10434 idx := v_1.Args[0] 10435 val := v.Args[2] 10436 mem := v.Args[3] 10437 v.reset(OpAMD64MOVSSstoreidx4) 10438 v.AuxInt = c 10439 v.Aux = sym 10440 v.AddArg(ptr) 10441 v.AddArg(idx) 10442 v.AddArg(val) 10443 v.AddArg(mem) 10444 return true 10445 } 10446 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10447 // cond: 10448 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 10449 for { 10450 c := v.AuxInt 10451 sym := v.Aux 10452 _ = v.Args[3] 10453 v_0 := v.Args[0] 10454 if v_0.Op != OpAMD64ADDQconst { 10455 break 10456 } 10457 d := v_0.AuxInt 10458 ptr := v_0.Args[0] 10459 idx := v.Args[1] 10460 val := v.Args[2] 10461 mem := v.Args[3] 10462 v.reset(OpAMD64MOVSSstoreidx1) 10463 v.AuxInt = c + d 10464 v.Aux = sym 10465 v.AddArg(ptr) 10466 v.AddArg(idx) 10467 v.AddArg(val) 10468 v.AddArg(mem) 10469 return true 10470 } 10471 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10472 // cond: 10473 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 10474 for { 10475 c := v.AuxInt 10476 sym := v.Aux 10477 _ = v.Args[3] 10478 ptr := v.Args[0] 10479 v_1 := v.Args[1] 10480 if v_1.Op != OpAMD64ADDQconst { 10481 break 10482 } 10483 d := v_1.AuxInt 10484 idx := v_1.Args[0] 10485 val := v.Args[2] 10486 mem := v.Args[3] 10487 v.reset(OpAMD64MOVSSstoreidx1) 10488 v.AuxInt = c + d 10489 v.Aux = sym 10490 v.AddArg(ptr) 10491 v.AddArg(idx) 10492 v.AddArg(val) 10493 v.AddArg(mem) 10494 return true 10495 } 10496 return false 10497 } 10498 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 10499 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10500 // cond: 10501 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 10502 for { 10503 c := v.AuxInt 10504 sym := v.Aux 10505 _ = v.Args[3] 10506 v_0 := v.Args[0] 10507 if v_0.Op != OpAMD64ADDQconst { 10508 break 10509 } 10510 d := v_0.AuxInt 10511 ptr := v_0.Args[0] 10512 idx := v.Args[1] 10513 val := v.Args[2] 10514 mem := v.Args[3] 10515 v.reset(OpAMD64MOVSSstoreidx4) 10516 v.AuxInt = c + d 10517 v.Aux = sym 10518 v.AddArg(ptr) 10519 v.AddArg(idx) 10520 v.AddArg(val) 10521 v.AddArg(mem) 10522 return true 10523 } 10524 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10525 // cond: 10526 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 10527 for { 10528 c := v.AuxInt 10529 sym := v.Aux 10530 _ = v.Args[3] 10531 ptr := v.Args[0] 10532 v_1 := v.Args[1] 10533 if v_1.Op != OpAMD64ADDQconst { 10534 break 10535 } 10536 d := v_1.AuxInt 10537 idx := v_1.Args[0] 10538 val := v.Args[2] 10539 mem := v.Args[3] 10540 v.reset(OpAMD64MOVSSstoreidx4) 10541 v.AuxInt = c + 4*d 10542 v.Aux = sym 10543 v.AddArg(ptr) 10544 v.AddArg(idx) 10545 v.AddArg(val) 10546 v.AddArg(mem) 10547 return true 10548 } 10549 return false 10550 } 10551 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 10552 b := v.Block 10553 _ = b 10554 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 10555 // cond: x.Uses == 1 && clobber(x) 10556 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10557 for { 10558 x := v.Args[0] 10559 if x.Op != OpAMD64MOVWload { 10560 break 10561 } 10562 off := x.AuxInt 10563 sym := x.Aux 10564 _ = x.Args[1] 10565 ptr := x.Args[0] 10566 mem := x.Args[1] 10567 if !(x.Uses == 1 && clobber(x)) { 10568 break 10569 } 10570 b = x.Block 10571 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10572 v.reset(OpCopy) 10573 v.AddArg(v0) 10574 v0.AuxInt = off 10575 v0.Aux = sym 10576 v0.AddArg(ptr) 10577 v0.AddArg(mem) 10578 return true 10579 } 10580 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 10581 // cond: x.Uses == 1 && clobber(x) 10582 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10583 for { 10584 x := v.Args[0] 10585 if x.Op != OpAMD64MOVLload { 10586 break 10587 } 10588 off := x.AuxInt 10589 sym := x.Aux 10590 _ = x.Args[1] 10591 ptr := x.Args[0] 10592 mem := x.Args[1] 10593 if !(x.Uses == 1 && clobber(x)) { 10594 break 10595 } 10596 b = x.Block 10597 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10598 v.reset(OpCopy) 10599 v.AddArg(v0) 10600 v0.AuxInt = off 10601 v0.Aux = sym 10602 v0.AddArg(ptr) 10603 v0.AddArg(mem) 10604 return true 10605 } 10606 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 10607 // cond: x.Uses == 1 && clobber(x) 10608 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10609 for { 10610 x := v.Args[0] 10611 if x.Op != OpAMD64MOVQload { 10612 break 10613 } 10614 off := x.AuxInt 10615 sym := x.Aux 10616 _ = x.Args[1] 10617 ptr := x.Args[0] 10618 mem := x.Args[1] 10619 if !(x.Uses == 1 && clobber(x)) { 10620 break 10621 } 10622 b = x.Block 10623 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10624 v.reset(OpCopy) 10625 v.AddArg(v0) 10626 v0.AuxInt = off 10627 v0.Aux = sym 10628 v0.AddArg(ptr) 10629 v0.AddArg(mem) 10630 return true 10631 } 10632 // match: (MOVWQSX (ANDLconst [c] x)) 10633 // cond: c & 0x8000 == 0 10634 // result: (ANDLconst [c & 0x7fff] x) 10635 for { 10636 v_0 := v.Args[0] 10637 if v_0.Op != OpAMD64ANDLconst { 10638 break 10639 } 10640 c := v_0.AuxInt 10641 x := v_0.Args[0] 10642 if !(c&0x8000 == 0) { 10643 break 10644 } 10645 v.reset(OpAMD64ANDLconst) 10646 v.AuxInt = c & 0x7fff 10647 v.AddArg(x) 10648 return true 10649 } 10650 // match: (MOVWQSX x:(MOVWQSX _)) 10651 // cond: 10652 // result: x 10653 for { 10654 x := v.Args[0] 10655 if x.Op != OpAMD64MOVWQSX { 10656 break 10657 } 10658 v.reset(OpCopy) 10659 v.Type = x.Type 10660 v.AddArg(x) 10661 return true 10662 } 10663 // match: (MOVWQSX x:(MOVBQSX _)) 10664 // cond: 10665 // result: x 10666 for { 10667 x := v.Args[0] 10668 if x.Op != OpAMD64MOVBQSX { 10669 break 10670 } 10671 v.reset(OpCopy) 10672 v.Type = x.Type 10673 v.AddArg(x) 10674 return true 10675 } 10676 return false 10677 } 10678 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 10679 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 10680 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 10681 // result: (MOVWQSX x) 10682 for { 10683 off := v.AuxInt 10684 sym := v.Aux 10685 _ = v.Args[1] 10686 ptr := v.Args[0] 10687 v_1 := v.Args[1] 10688 if v_1.Op != OpAMD64MOVWstore { 10689 break 10690 } 10691 off2 := v_1.AuxInt 10692 sym2 := v_1.Aux 10693 _ = v_1.Args[2] 10694 ptr2 := v_1.Args[0] 10695 x := v_1.Args[1] 10696 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 10697 break 10698 } 10699 v.reset(OpAMD64MOVWQSX) 10700 v.AddArg(x) 10701 return true 10702 } 10703 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10704 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10705 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10706 for { 10707 off1 := v.AuxInt 10708 sym1 := v.Aux 10709 _ = v.Args[1] 10710 v_0 := v.Args[0] 10711 if v_0.Op != OpAMD64LEAQ { 10712 break 10713 } 10714 off2 := v_0.AuxInt 10715 sym2 := v_0.Aux 10716 base := v_0.Args[0] 10717 mem := v.Args[1] 10718 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10719 break 10720 } 10721 v.reset(OpAMD64MOVWQSXload) 10722 v.AuxInt = off1 + off2 10723 v.Aux = mergeSym(sym1, sym2) 10724 v.AddArg(base) 10725 v.AddArg(mem) 10726 return true 10727 } 10728 return false 10729 } 10730 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 10731 b := v.Block 10732 _ = b 10733 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 10734 // cond: x.Uses == 1 && clobber(x) 10735 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10736 for { 10737 x := v.Args[0] 10738 if x.Op != OpAMD64MOVWload { 10739 break 10740 } 10741 off := x.AuxInt 10742 sym := x.Aux 10743 _ = x.Args[1] 10744 ptr := x.Args[0] 10745 mem := x.Args[1] 10746 if !(x.Uses == 1 && clobber(x)) { 10747 break 10748 } 10749 b = x.Block 10750 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10751 v.reset(OpCopy) 10752 v.AddArg(v0) 10753 v0.AuxInt = off 10754 v0.Aux = sym 10755 v0.AddArg(ptr) 10756 v0.AddArg(mem) 10757 return true 10758 } 10759 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 10760 // cond: x.Uses == 1 && clobber(x) 10761 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10762 for { 10763 x := v.Args[0] 10764 if x.Op != OpAMD64MOVLload { 10765 break 10766 } 10767 off := x.AuxInt 10768 sym := x.Aux 10769 _ = x.Args[1] 10770 ptr := x.Args[0] 10771 mem := x.Args[1] 10772 if !(x.Uses == 1 && clobber(x)) { 10773 break 10774 } 10775 b = x.Block 10776 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10777 v.reset(OpCopy) 10778 v.AddArg(v0) 10779 v0.AuxInt = off 10780 v0.Aux = sym 10781 v0.AddArg(ptr) 10782 v0.AddArg(mem) 10783 return true 10784 } 10785 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 10786 // cond: x.Uses == 1 && clobber(x) 10787 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10788 for { 10789 x := v.Args[0] 10790 if x.Op != OpAMD64MOVQload { 10791 break 10792 } 10793 off := x.AuxInt 10794 sym := x.Aux 10795 _ = x.Args[1] 10796 ptr := x.Args[0] 10797 mem := x.Args[1] 10798 if !(x.Uses == 1 && clobber(x)) { 10799 break 10800 } 10801 b = x.Block 10802 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10803 v.reset(OpCopy) 10804 v.AddArg(v0) 10805 v0.AuxInt = off 10806 v0.Aux = sym 10807 v0.AddArg(ptr) 10808 v0.AddArg(mem) 10809 return true 10810 } 10811 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 10812 // cond: x.Uses == 1 && clobber(x) 10813 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 10814 for { 10815 x := v.Args[0] 10816 if x.Op != OpAMD64MOVWloadidx1 { 10817 break 10818 } 10819 off := x.AuxInt 10820 sym := x.Aux 10821 _ = x.Args[2] 10822 ptr := x.Args[0] 10823 idx := x.Args[1] 10824 mem := x.Args[2] 10825 if !(x.Uses == 1 && clobber(x)) { 10826 break 10827 } 10828 b = x.Block 10829 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 10830 v.reset(OpCopy) 10831 v.AddArg(v0) 10832 v0.AuxInt = off 10833 v0.Aux = sym 10834 v0.AddArg(ptr) 10835 v0.AddArg(idx) 10836 v0.AddArg(mem) 10837 return true 10838 } 10839 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 10840 // cond: x.Uses == 1 && clobber(x) 10841 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 10842 for { 10843 x := v.Args[0] 10844 if x.Op != OpAMD64MOVWloadidx2 { 10845 break 10846 } 10847 off := x.AuxInt 10848 sym := x.Aux 10849 _ = x.Args[2] 10850 ptr := x.Args[0] 10851 idx := x.Args[1] 10852 mem := x.Args[2] 10853 if !(x.Uses == 1 && clobber(x)) { 10854 break 10855 } 10856 b = x.Block 10857 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 10858 v.reset(OpCopy) 10859 v.AddArg(v0) 10860 v0.AuxInt = off 10861 v0.Aux = sym 10862 v0.AddArg(ptr) 10863 v0.AddArg(idx) 10864 v0.AddArg(mem) 10865 return true 10866 } 10867 // match: (MOVWQZX (ANDLconst [c] x)) 10868 // cond: 10869 // result: (ANDLconst [c & 0xffff] x) 10870 for { 10871 v_0 := v.Args[0] 10872 if v_0.Op != OpAMD64ANDLconst { 10873 break 10874 } 10875 c := v_0.AuxInt 10876 x := v_0.Args[0] 10877 v.reset(OpAMD64ANDLconst) 10878 v.AuxInt = c & 0xffff 10879 v.AddArg(x) 10880 return true 10881 } 10882 // match: (MOVWQZX x:(MOVWQZX _)) 10883 // cond: 10884 // result: x 10885 for { 10886 x := v.Args[0] 10887 if x.Op != OpAMD64MOVWQZX { 10888 break 10889 } 10890 v.reset(OpCopy) 10891 v.Type = x.Type 10892 v.AddArg(x) 10893 return true 10894 } 10895 // match: (MOVWQZX x:(MOVBQZX _)) 10896 // cond: 10897 // result: x 10898 for { 10899 x := v.Args[0] 10900 if x.Op != OpAMD64MOVBQZX { 10901 break 10902 } 10903 v.reset(OpCopy) 10904 v.Type = x.Type 10905 v.AddArg(x) 10906 return true 10907 } 10908 return false 10909 } 10910 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 10911 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 10912 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 10913 // result: (MOVWQZX x) 10914 for { 10915 off := v.AuxInt 10916 sym := v.Aux 10917 _ = v.Args[1] 10918 ptr := v.Args[0] 10919 v_1 := v.Args[1] 10920 if v_1.Op != OpAMD64MOVWstore { 10921 break 10922 } 10923 off2 := v_1.AuxInt 10924 sym2 := v_1.Aux 10925 _ = v_1.Args[2] 10926 ptr2 := v_1.Args[0] 10927 x := v_1.Args[1] 10928 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 10929 break 10930 } 10931 v.reset(OpAMD64MOVWQZX) 10932 v.AddArg(x) 10933 return true 10934 } 10935 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 10936 // cond: is32Bit(off1+off2) 10937 // result: (MOVWload [off1+off2] {sym} ptr mem) 10938 for { 10939 off1 := v.AuxInt 10940 sym := v.Aux 10941 _ = v.Args[1] 10942 v_0 := v.Args[0] 10943 if v_0.Op != OpAMD64ADDQconst { 10944 break 10945 } 10946 off2 := v_0.AuxInt 10947 ptr := v_0.Args[0] 10948 mem := v.Args[1] 10949 if !(is32Bit(off1 + off2)) { 10950 break 10951 } 10952 v.reset(OpAMD64MOVWload) 10953 v.AuxInt = off1 + off2 10954 v.Aux = sym 10955 v.AddArg(ptr) 10956 v.AddArg(mem) 10957 return true 10958 } 10959 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10960 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10961 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10962 for { 10963 off1 := v.AuxInt 10964 sym1 := v.Aux 10965 _ = v.Args[1] 10966 v_0 := v.Args[0] 10967 if v_0.Op != OpAMD64LEAQ { 10968 break 10969 } 10970 off2 := v_0.AuxInt 10971 sym2 := v_0.Aux 10972 base := v_0.Args[0] 10973 mem := v.Args[1] 10974 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10975 break 10976 } 10977 v.reset(OpAMD64MOVWload) 10978 v.AuxInt = off1 + off2 10979 v.Aux = mergeSym(sym1, sym2) 10980 v.AddArg(base) 10981 v.AddArg(mem) 10982 return true 10983 } 10984 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 10985 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10986 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10987 for { 10988 off1 := v.AuxInt 10989 sym1 := v.Aux 10990 _ = v.Args[1] 10991 v_0 := v.Args[0] 10992 if v_0.Op != OpAMD64LEAQ1 { 10993 break 10994 } 10995 off2 := v_0.AuxInt 10996 sym2 := v_0.Aux 10997 _ = v_0.Args[1] 10998 ptr := v_0.Args[0] 10999 idx := v_0.Args[1] 11000 mem := v.Args[1] 11001 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11002 break 11003 } 11004 v.reset(OpAMD64MOVWloadidx1) 11005 v.AuxInt = off1 + off2 11006 v.Aux = mergeSym(sym1, sym2) 11007 v.AddArg(ptr) 11008 v.AddArg(idx) 11009 v.AddArg(mem) 11010 return true 11011 } 11012 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 11013 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11014 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11015 for { 11016 off1 := v.AuxInt 11017 sym1 := v.Aux 11018 _ = v.Args[1] 11019 v_0 := v.Args[0] 11020 if v_0.Op != OpAMD64LEAQ2 { 11021 break 11022 } 11023 off2 := v_0.AuxInt 11024 sym2 := v_0.Aux 11025 _ = v_0.Args[1] 11026 ptr := v_0.Args[0] 11027 idx := v_0.Args[1] 11028 mem := v.Args[1] 11029 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11030 break 11031 } 11032 v.reset(OpAMD64MOVWloadidx2) 11033 v.AuxInt = off1 + off2 11034 v.Aux = mergeSym(sym1, sym2) 11035 v.AddArg(ptr) 11036 v.AddArg(idx) 11037 v.AddArg(mem) 11038 return true 11039 } 11040 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 11041 // cond: ptr.Op != OpSB 11042 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 11043 for { 11044 off := v.AuxInt 11045 sym := v.Aux 11046 _ = v.Args[1] 11047 v_0 := v.Args[0] 11048 if v_0.Op != OpAMD64ADDQ { 11049 break 11050 } 11051 _ = v_0.Args[1] 11052 ptr := v_0.Args[0] 11053 idx := v_0.Args[1] 11054 mem := v.Args[1] 11055 if !(ptr.Op != OpSB) { 11056 break 11057 } 11058 v.reset(OpAMD64MOVWloadidx1) 11059 v.AuxInt = off 11060 v.Aux = sym 11061 v.AddArg(ptr) 11062 v.AddArg(idx) 11063 v.AddArg(mem) 11064 return true 11065 } 11066 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 11067 // cond: canMergeSym(sym1, sym2) 11068 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11069 for { 11070 off1 := v.AuxInt 11071 sym1 := v.Aux 11072 _ = v.Args[1] 11073 v_0 := v.Args[0] 11074 if v_0.Op != OpAMD64LEAL { 11075 break 11076 } 11077 off2 := v_0.AuxInt 11078 sym2 := v_0.Aux 11079 base := v_0.Args[0] 11080 mem := v.Args[1] 11081 if !(canMergeSym(sym1, sym2)) { 11082 break 11083 } 11084 v.reset(OpAMD64MOVWload) 11085 v.AuxInt = off1 + off2 11086 v.Aux = mergeSym(sym1, sym2) 11087 v.AddArg(base) 11088 v.AddArg(mem) 11089 return true 11090 } 11091 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 11092 // cond: is32Bit(off1+off2) 11093 // result: (MOVWload [off1+off2] {sym} ptr mem) 11094 for { 11095 off1 := v.AuxInt 11096 sym := v.Aux 11097 _ = v.Args[1] 11098 v_0 := v.Args[0] 11099 if v_0.Op != OpAMD64ADDLconst { 11100 break 11101 } 11102 off2 := v_0.AuxInt 11103 ptr := v_0.Args[0] 11104 mem := v.Args[1] 11105 if !(is32Bit(off1 + off2)) { 11106 break 11107 } 11108 v.reset(OpAMD64MOVWload) 11109 v.AuxInt = off1 + off2 11110 v.Aux = sym 11111 v.AddArg(ptr) 11112 v.AddArg(mem) 11113 return true 11114 } 11115 return false 11116 } 11117 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 11118 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 11119 // cond: 11120 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 11121 for { 11122 c := v.AuxInt 11123 sym := v.Aux 11124 _ = v.Args[2] 11125 ptr := v.Args[0] 11126 v_1 := v.Args[1] 11127 if v_1.Op != OpAMD64SHLQconst { 11128 break 11129 } 11130 if v_1.AuxInt != 1 { 11131 break 11132 } 11133 idx := v_1.Args[0] 11134 mem := v.Args[2] 11135 v.reset(OpAMD64MOVWloadidx2) 11136 v.AuxInt = c 11137 v.Aux = sym 11138 v.AddArg(ptr) 11139 v.AddArg(idx) 11140 v.AddArg(mem) 11141 return true 11142 } 11143 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 11144 // cond: 11145 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 11146 for { 11147 c := v.AuxInt 11148 sym := v.Aux 11149 _ = v.Args[2] 11150 v_0 := v.Args[0] 11151 if v_0.Op != OpAMD64SHLQconst { 11152 break 11153 } 11154 if v_0.AuxInt != 1 { 11155 break 11156 } 11157 idx := v_0.Args[0] 11158 ptr := v.Args[1] 11159 mem := v.Args[2] 11160 v.reset(OpAMD64MOVWloadidx2) 11161 v.AuxInt = c 11162 v.Aux = sym 11163 v.AddArg(ptr) 11164 v.AddArg(idx) 11165 v.AddArg(mem) 11166 return true 11167 } 11168 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11169 // cond: 11170 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11171 for { 11172 c := v.AuxInt 11173 sym := v.Aux 11174 _ = v.Args[2] 11175 v_0 := v.Args[0] 11176 if v_0.Op != OpAMD64ADDQconst { 11177 break 11178 } 11179 d := v_0.AuxInt 11180 ptr := v_0.Args[0] 11181 idx := v.Args[1] 11182 mem := v.Args[2] 11183 v.reset(OpAMD64MOVWloadidx1) 11184 v.AuxInt = c + d 11185 v.Aux = sym 11186 v.AddArg(ptr) 11187 v.AddArg(idx) 11188 v.AddArg(mem) 11189 return true 11190 } 11191 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 11192 // cond: 11193 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11194 for { 11195 c := v.AuxInt 11196 sym := v.Aux 11197 _ = v.Args[2] 11198 idx := v.Args[0] 11199 v_1 := v.Args[1] 11200 if v_1.Op != OpAMD64ADDQconst { 11201 break 11202 } 11203 d := v_1.AuxInt 11204 ptr := v_1.Args[0] 11205 mem := v.Args[2] 11206 v.reset(OpAMD64MOVWloadidx1) 11207 v.AuxInt = c + d 11208 v.Aux = sym 11209 v.AddArg(ptr) 11210 v.AddArg(idx) 11211 v.AddArg(mem) 11212 return true 11213 } 11214 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11215 // cond: 11216 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11217 for { 11218 c := v.AuxInt 11219 sym := v.Aux 11220 _ = v.Args[2] 11221 ptr := v.Args[0] 11222 v_1 := v.Args[1] 11223 if v_1.Op != OpAMD64ADDQconst { 11224 break 11225 } 11226 d := v_1.AuxInt 11227 idx := v_1.Args[0] 11228 mem := v.Args[2] 11229 v.reset(OpAMD64MOVWloadidx1) 11230 v.AuxInt = c + d 11231 v.Aux = sym 11232 v.AddArg(ptr) 11233 v.AddArg(idx) 11234 v.AddArg(mem) 11235 return true 11236 } 11237 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 11238 // cond: 11239 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11240 for { 11241 c := v.AuxInt 11242 sym := v.Aux 11243 _ = v.Args[2] 11244 v_0 := v.Args[0] 11245 if v_0.Op != OpAMD64ADDQconst { 11246 break 11247 } 11248 d := v_0.AuxInt 11249 idx := v_0.Args[0] 11250 ptr := v.Args[1] 11251 mem := v.Args[2] 11252 v.reset(OpAMD64MOVWloadidx1) 11253 v.AuxInt = c + d 11254 v.Aux = sym 11255 v.AddArg(ptr) 11256 v.AddArg(idx) 11257 v.AddArg(mem) 11258 return true 11259 } 11260 return false 11261 } 11262 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 11263 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 11264 // cond: 11265 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 11266 for { 11267 c := v.AuxInt 11268 sym := v.Aux 11269 _ = v.Args[2] 11270 v_0 := v.Args[0] 11271 if v_0.Op != OpAMD64ADDQconst { 11272 break 11273 } 11274 d := v_0.AuxInt 11275 ptr := v_0.Args[0] 11276 idx := v.Args[1] 11277 mem := v.Args[2] 11278 v.reset(OpAMD64MOVWloadidx2) 11279 v.AuxInt = c + d 11280 v.Aux = sym 11281 v.AddArg(ptr) 11282 v.AddArg(idx) 11283 v.AddArg(mem) 11284 return true 11285 } 11286 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 11287 // cond: 11288 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 11289 for { 11290 c := v.AuxInt 11291 sym := v.Aux 11292 _ = v.Args[2] 11293 ptr := v.Args[0] 11294 v_1 := v.Args[1] 11295 if v_1.Op != OpAMD64ADDQconst { 11296 break 11297 } 11298 d := v_1.AuxInt 11299 idx := v_1.Args[0] 11300 mem := v.Args[2] 11301 v.reset(OpAMD64MOVWloadidx2) 11302 v.AuxInt = c + 2*d 11303 v.Aux = sym 11304 v.AddArg(ptr) 11305 v.AddArg(idx) 11306 v.AddArg(mem) 11307 return true 11308 } 11309 return false 11310 } 11311 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 11312 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 11313 // cond: 11314 // result: (MOVWstore [off] {sym} ptr x mem) 11315 for { 11316 off := v.AuxInt 11317 sym := v.Aux 11318 _ = v.Args[2] 11319 ptr := v.Args[0] 11320 v_1 := v.Args[1] 11321 if v_1.Op != OpAMD64MOVWQSX { 11322 break 11323 } 11324 x := v_1.Args[0] 11325 mem := v.Args[2] 11326 v.reset(OpAMD64MOVWstore) 11327 v.AuxInt = off 11328 v.Aux = sym 11329 v.AddArg(ptr) 11330 v.AddArg(x) 11331 v.AddArg(mem) 11332 return true 11333 } 11334 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 11335 // cond: 11336 // result: (MOVWstore [off] {sym} ptr x mem) 11337 for { 11338 off := v.AuxInt 11339 sym := v.Aux 11340 _ = v.Args[2] 11341 ptr := v.Args[0] 11342 v_1 := v.Args[1] 11343 if v_1.Op != OpAMD64MOVWQZX { 11344 break 11345 } 11346 x := v_1.Args[0] 11347 mem := v.Args[2] 11348 v.reset(OpAMD64MOVWstore) 11349 v.AuxInt = off 11350 v.Aux = sym 11351 v.AddArg(ptr) 11352 v.AddArg(x) 11353 v.AddArg(mem) 11354 return true 11355 } 11356 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11357 // cond: is32Bit(off1+off2) 11358 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 11359 for { 11360 off1 := v.AuxInt 11361 sym := v.Aux 11362 _ = v.Args[2] 11363 v_0 := v.Args[0] 11364 if v_0.Op != OpAMD64ADDQconst { 11365 break 11366 } 11367 off2 := v_0.AuxInt 11368 ptr := v_0.Args[0] 11369 val := v.Args[1] 11370 mem := v.Args[2] 11371 if !(is32Bit(off1 + off2)) { 11372 break 11373 } 11374 v.reset(OpAMD64MOVWstore) 11375 v.AuxInt = off1 + off2 11376 v.Aux = sym 11377 v.AddArg(ptr) 11378 v.AddArg(val) 11379 v.AddArg(mem) 11380 return true 11381 } 11382 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 11383 // cond: validOff(off) 11384 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 11385 for { 11386 off := v.AuxInt 11387 sym := v.Aux 11388 _ = v.Args[2] 11389 ptr := v.Args[0] 11390 v_1 := v.Args[1] 11391 if v_1.Op != OpAMD64MOVLconst { 11392 break 11393 } 11394 c := v_1.AuxInt 11395 mem := v.Args[2] 11396 if !(validOff(off)) { 11397 break 11398 } 11399 v.reset(OpAMD64MOVWstoreconst) 11400 v.AuxInt = makeValAndOff(int64(int16(c)), off) 11401 v.Aux = sym 11402 v.AddArg(ptr) 11403 v.AddArg(mem) 11404 return true 11405 } 11406 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11407 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11408 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11409 for { 11410 off1 := v.AuxInt 11411 sym1 := v.Aux 11412 _ = v.Args[2] 11413 v_0 := v.Args[0] 11414 if v_0.Op != OpAMD64LEAQ { 11415 break 11416 } 11417 off2 := v_0.AuxInt 11418 sym2 := v_0.Aux 11419 base := v_0.Args[0] 11420 val := v.Args[1] 11421 mem := v.Args[2] 11422 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11423 break 11424 } 11425 v.reset(OpAMD64MOVWstore) 11426 v.AuxInt = off1 + off2 11427 v.Aux = mergeSym(sym1, sym2) 11428 v.AddArg(base) 11429 v.AddArg(val) 11430 v.AddArg(mem) 11431 return true 11432 } 11433 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11434 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11435 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11436 for { 11437 off1 := v.AuxInt 11438 sym1 := v.Aux 11439 _ = v.Args[2] 11440 v_0 := v.Args[0] 11441 if v_0.Op != OpAMD64LEAQ1 { 11442 break 11443 } 11444 off2 := v_0.AuxInt 11445 sym2 := v_0.Aux 11446 _ = v_0.Args[1] 11447 ptr := v_0.Args[0] 11448 idx := v_0.Args[1] 11449 val := v.Args[1] 11450 mem := v.Args[2] 11451 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11452 break 11453 } 11454 v.reset(OpAMD64MOVWstoreidx1) 11455 v.AuxInt = off1 + off2 11456 v.Aux = mergeSym(sym1, sym2) 11457 v.AddArg(ptr) 11458 v.AddArg(idx) 11459 v.AddArg(val) 11460 v.AddArg(mem) 11461 return true 11462 } 11463 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 11464 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11465 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11466 for { 11467 off1 := v.AuxInt 11468 sym1 := v.Aux 11469 _ = v.Args[2] 11470 v_0 := v.Args[0] 11471 if v_0.Op != OpAMD64LEAQ2 { 11472 break 11473 } 11474 off2 := v_0.AuxInt 11475 sym2 := v_0.Aux 11476 _ = v_0.Args[1] 11477 ptr := v_0.Args[0] 11478 idx := v_0.Args[1] 11479 val := v.Args[1] 11480 mem := v.Args[2] 11481 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11482 break 11483 } 11484 v.reset(OpAMD64MOVWstoreidx2) 11485 v.AuxInt = off1 + off2 11486 v.Aux = mergeSym(sym1, sym2) 11487 v.AddArg(ptr) 11488 v.AddArg(idx) 11489 v.AddArg(val) 11490 v.AddArg(mem) 11491 return true 11492 } 11493 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 11494 // cond: ptr.Op != OpSB 11495 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 11496 for { 11497 off := v.AuxInt 11498 sym := v.Aux 11499 _ = v.Args[2] 11500 v_0 := v.Args[0] 11501 if v_0.Op != OpAMD64ADDQ { 11502 break 11503 } 11504 _ = v_0.Args[1] 11505 ptr := v_0.Args[0] 11506 idx := v_0.Args[1] 11507 val := v.Args[1] 11508 mem := v.Args[2] 11509 if !(ptr.Op != OpSB) { 11510 break 11511 } 11512 v.reset(OpAMD64MOVWstoreidx1) 11513 v.AuxInt = off 11514 v.Aux = sym 11515 v.AddArg(ptr) 11516 v.AddArg(idx) 11517 v.AddArg(val) 11518 v.AddArg(mem) 11519 return true 11520 } 11521 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 11522 // cond: x.Uses == 1 && clobber(x) 11523 // result: (MOVLstore [i-2] {s} p w mem) 11524 for { 11525 i := v.AuxInt 11526 s := v.Aux 11527 _ = v.Args[2] 11528 p := v.Args[0] 11529 v_1 := v.Args[1] 11530 if v_1.Op != OpAMD64SHRQconst { 11531 break 11532 } 11533 if v_1.AuxInt != 16 { 11534 break 11535 } 11536 w := v_1.Args[0] 11537 x := v.Args[2] 11538 if x.Op != OpAMD64MOVWstore { 11539 break 11540 } 11541 if x.AuxInt != i-2 { 11542 break 11543 } 11544 if x.Aux != s { 11545 break 11546 } 11547 _ = x.Args[2] 11548 if p != x.Args[0] { 11549 break 11550 } 11551 if w != x.Args[1] { 11552 break 11553 } 11554 mem := x.Args[2] 11555 if !(x.Uses == 1 && clobber(x)) { 11556 break 11557 } 11558 v.reset(OpAMD64MOVLstore) 11559 v.AuxInt = i - 2 11560 v.Aux = s 11561 v.AddArg(p) 11562 v.AddArg(w) 11563 v.AddArg(mem) 11564 return true 11565 } 11566 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 11567 // cond: x.Uses == 1 && clobber(x) 11568 // result: (MOVLstore [i-2] {s} p w0 mem) 11569 for { 11570 i := v.AuxInt 11571 s := v.Aux 11572 _ = v.Args[2] 11573 p := v.Args[0] 11574 v_1 := v.Args[1] 11575 if v_1.Op != OpAMD64SHRQconst { 11576 break 11577 } 11578 j := v_1.AuxInt 11579 w := v_1.Args[0] 11580 x := v.Args[2] 11581 if x.Op != OpAMD64MOVWstore { 11582 break 11583 } 11584 if x.AuxInt != i-2 { 11585 break 11586 } 11587 if x.Aux != s { 11588 break 11589 } 11590 _ = x.Args[2] 11591 if p != x.Args[0] { 11592 break 11593 } 11594 w0 := x.Args[1] 11595 if w0.Op != OpAMD64SHRQconst { 11596 break 11597 } 11598 if w0.AuxInt != j-16 { 11599 break 11600 } 11601 if w != w0.Args[0] { 11602 break 11603 } 11604 mem := x.Args[2] 11605 if !(x.Uses == 1 && clobber(x)) { 11606 break 11607 } 11608 v.reset(OpAMD64MOVLstore) 11609 v.AuxInt = i - 2 11610 v.Aux = s 11611 v.AddArg(p) 11612 v.AddArg(w0) 11613 v.AddArg(mem) 11614 return true 11615 } 11616 return false 11617 } 11618 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 11619 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 11620 // cond: canMergeSym(sym1, sym2) 11621 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11622 for { 11623 off1 := v.AuxInt 11624 sym1 := v.Aux 11625 _ = v.Args[2] 11626 v_0 := v.Args[0] 11627 if v_0.Op != OpAMD64LEAL { 11628 break 11629 } 11630 off2 := v_0.AuxInt 11631 sym2 := v_0.Aux 11632 base := v_0.Args[0] 11633 val := v.Args[1] 11634 mem := v.Args[2] 11635 if !(canMergeSym(sym1, sym2)) { 11636 break 11637 } 11638 v.reset(OpAMD64MOVWstore) 11639 v.AuxInt = off1 + off2 11640 v.Aux = mergeSym(sym1, sym2) 11641 v.AddArg(base) 11642 v.AddArg(val) 11643 v.AddArg(mem) 11644 return true 11645 } 11646 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 11647 // cond: is32Bit(off1+off2) 11648 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 11649 for { 11650 off1 := v.AuxInt 11651 sym := v.Aux 11652 _ = v.Args[2] 11653 v_0 := v.Args[0] 11654 if v_0.Op != OpAMD64ADDLconst { 11655 break 11656 } 11657 off2 := v_0.AuxInt 11658 ptr := v_0.Args[0] 11659 val := v.Args[1] 11660 mem := v.Args[2] 11661 if !(is32Bit(off1 + off2)) { 11662 break 11663 } 11664 v.reset(OpAMD64MOVWstore) 11665 v.AuxInt = off1 + off2 11666 v.Aux = sym 11667 v.AddArg(ptr) 11668 v.AddArg(val) 11669 v.AddArg(mem) 11670 return true 11671 } 11672 return false 11673 } 11674 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 11675 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 11676 // cond: ValAndOff(sc).canAdd(off) 11677 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 11678 for { 11679 sc := v.AuxInt 11680 s := v.Aux 11681 _ = v.Args[1] 11682 v_0 := v.Args[0] 11683 if v_0.Op != OpAMD64ADDQconst { 11684 break 11685 } 11686 off := v_0.AuxInt 11687 ptr := v_0.Args[0] 11688 mem := v.Args[1] 11689 if !(ValAndOff(sc).canAdd(off)) { 11690 break 11691 } 11692 v.reset(OpAMD64MOVWstoreconst) 11693 v.AuxInt = ValAndOff(sc).add(off) 11694 v.Aux = s 11695 v.AddArg(ptr) 11696 v.AddArg(mem) 11697 return true 11698 } 11699 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 11700 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 11701 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 11702 for { 11703 sc := v.AuxInt 11704 sym1 := v.Aux 11705 _ = v.Args[1] 11706 v_0 := v.Args[0] 11707 if v_0.Op != OpAMD64LEAQ { 11708 break 11709 } 11710 off := v_0.AuxInt 11711 sym2 := v_0.Aux 11712 ptr := v_0.Args[0] 11713 mem := v.Args[1] 11714 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 11715 break 11716 } 11717 v.reset(OpAMD64MOVWstoreconst) 11718 v.AuxInt = ValAndOff(sc).add(off) 11719 v.Aux = mergeSym(sym1, sym2) 11720 v.AddArg(ptr) 11721 v.AddArg(mem) 11722 return true 11723 } 11724 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 11725 // cond: canMergeSym(sym1, sym2) 11726 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 11727 for { 11728 x := v.AuxInt 11729 sym1 := v.Aux 11730 _ = v.Args[1] 11731 v_0 := v.Args[0] 11732 if v_0.Op != OpAMD64LEAQ1 { 11733 break 11734 } 11735 off := v_0.AuxInt 11736 sym2 := v_0.Aux 11737 _ = v_0.Args[1] 11738 ptr := v_0.Args[0] 11739 idx := v_0.Args[1] 11740 mem := v.Args[1] 11741 if !(canMergeSym(sym1, sym2)) { 11742 break 11743 } 11744 v.reset(OpAMD64MOVWstoreconstidx1) 11745 v.AuxInt = ValAndOff(x).add(off) 11746 v.Aux = mergeSym(sym1, sym2) 11747 v.AddArg(ptr) 11748 v.AddArg(idx) 11749 v.AddArg(mem) 11750 return true 11751 } 11752 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 11753 // cond: canMergeSym(sym1, sym2) 11754 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 11755 for { 11756 x := v.AuxInt 11757 sym1 := v.Aux 11758 _ = v.Args[1] 11759 v_0 := v.Args[0] 11760 if v_0.Op != OpAMD64LEAQ2 { 11761 break 11762 } 11763 off := v_0.AuxInt 11764 sym2 := v_0.Aux 11765 _ = v_0.Args[1] 11766 ptr := v_0.Args[0] 11767 idx := v_0.Args[1] 11768 mem := v.Args[1] 11769 if !(canMergeSym(sym1, sym2)) { 11770 break 11771 } 11772 v.reset(OpAMD64MOVWstoreconstidx2) 11773 v.AuxInt = ValAndOff(x).add(off) 11774 v.Aux = mergeSym(sym1, sym2) 11775 v.AddArg(ptr) 11776 v.AddArg(idx) 11777 v.AddArg(mem) 11778 return true 11779 } 11780 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 11781 // cond: 11782 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 11783 for { 11784 x := v.AuxInt 11785 sym := v.Aux 11786 _ = v.Args[1] 11787 v_0 := v.Args[0] 11788 if v_0.Op != OpAMD64ADDQ { 11789 break 11790 } 11791 _ = v_0.Args[1] 11792 ptr := v_0.Args[0] 11793 idx := v_0.Args[1] 11794 mem := v.Args[1] 11795 v.reset(OpAMD64MOVWstoreconstidx1) 11796 v.AuxInt = x 11797 v.Aux = sym 11798 v.AddArg(ptr) 11799 v.AddArg(idx) 11800 v.AddArg(mem) 11801 return true 11802 } 11803 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 11804 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 11805 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 11806 for { 11807 c := v.AuxInt 11808 s := v.Aux 11809 _ = v.Args[1] 11810 p := v.Args[0] 11811 x := v.Args[1] 11812 if x.Op != OpAMD64MOVWstoreconst { 11813 break 11814 } 11815 a := x.AuxInt 11816 if x.Aux != s { 11817 break 11818 } 11819 _ = x.Args[1] 11820 if p != x.Args[0] { 11821 break 11822 } 11823 mem := x.Args[1] 11824 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 11825 break 11826 } 11827 v.reset(OpAMD64MOVLstoreconst) 11828 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 11829 v.Aux = s 11830 v.AddArg(p) 11831 v.AddArg(mem) 11832 return true 11833 } 11834 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 11835 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 11836 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 11837 for { 11838 sc := v.AuxInt 11839 sym1 := v.Aux 11840 _ = v.Args[1] 11841 v_0 := v.Args[0] 11842 if v_0.Op != OpAMD64LEAL { 11843 break 11844 } 11845 off := v_0.AuxInt 11846 sym2 := v_0.Aux 11847 ptr := v_0.Args[0] 11848 mem := v.Args[1] 11849 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 11850 break 11851 } 11852 v.reset(OpAMD64MOVWstoreconst) 11853 v.AuxInt = ValAndOff(sc).add(off) 11854 v.Aux = mergeSym(sym1, sym2) 11855 v.AddArg(ptr) 11856 v.AddArg(mem) 11857 return true 11858 } 11859 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 11860 // cond: ValAndOff(sc).canAdd(off) 11861 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 11862 for { 11863 sc := v.AuxInt 11864 s := v.Aux 11865 _ = v.Args[1] 11866 v_0 := v.Args[0] 11867 if v_0.Op != OpAMD64ADDLconst { 11868 break 11869 } 11870 off := v_0.AuxInt 11871 ptr := v_0.Args[0] 11872 mem := v.Args[1] 11873 if !(ValAndOff(sc).canAdd(off)) { 11874 break 11875 } 11876 v.reset(OpAMD64MOVWstoreconst) 11877 v.AuxInt = ValAndOff(sc).add(off) 11878 v.Aux = s 11879 v.AddArg(ptr) 11880 v.AddArg(mem) 11881 return true 11882 } 11883 return false 11884 } 11885 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 11886 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 11887 // cond: 11888 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 11889 for { 11890 c := v.AuxInt 11891 sym := v.Aux 11892 _ = v.Args[2] 11893 ptr := v.Args[0] 11894 v_1 := v.Args[1] 11895 if v_1.Op != OpAMD64SHLQconst { 11896 break 11897 } 11898 if v_1.AuxInt != 1 { 11899 break 11900 } 11901 idx := v_1.Args[0] 11902 mem := v.Args[2] 11903 v.reset(OpAMD64MOVWstoreconstidx2) 11904 v.AuxInt = c 11905 v.Aux = sym 11906 v.AddArg(ptr) 11907 v.AddArg(idx) 11908 v.AddArg(mem) 11909 return true 11910 } 11911 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 11912 // cond: 11913 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 11914 for { 11915 x := v.AuxInt 11916 sym := v.Aux 11917 _ = v.Args[2] 11918 v_0 := v.Args[0] 11919 if v_0.Op != OpAMD64ADDQconst { 11920 break 11921 } 11922 c := v_0.AuxInt 11923 ptr := v_0.Args[0] 11924 idx := v.Args[1] 11925 mem := v.Args[2] 11926 v.reset(OpAMD64MOVWstoreconstidx1) 11927 v.AuxInt = ValAndOff(x).add(c) 11928 v.Aux = sym 11929 v.AddArg(ptr) 11930 v.AddArg(idx) 11931 v.AddArg(mem) 11932 return true 11933 } 11934 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 11935 // cond: 11936 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 11937 for { 11938 x := v.AuxInt 11939 sym := v.Aux 11940 _ = v.Args[2] 11941 ptr := v.Args[0] 11942 v_1 := v.Args[1] 11943 if v_1.Op != OpAMD64ADDQconst { 11944 break 11945 } 11946 c := v_1.AuxInt 11947 idx := v_1.Args[0] 11948 mem := v.Args[2] 11949 v.reset(OpAMD64MOVWstoreconstidx1) 11950 v.AuxInt = ValAndOff(x).add(c) 11951 v.Aux = sym 11952 v.AddArg(ptr) 11953 v.AddArg(idx) 11954 v.AddArg(mem) 11955 return true 11956 } 11957 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 11958 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 11959 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 11960 for { 11961 c := v.AuxInt 11962 s := v.Aux 11963 _ = v.Args[2] 11964 p := v.Args[0] 11965 i := v.Args[1] 11966 x := v.Args[2] 11967 if x.Op != OpAMD64MOVWstoreconstidx1 { 11968 break 11969 } 11970 a := x.AuxInt 11971 if x.Aux != s { 11972 break 11973 } 11974 _ = x.Args[2] 11975 if p != x.Args[0] { 11976 break 11977 } 11978 if i != x.Args[1] { 11979 break 11980 } 11981 mem := x.Args[2] 11982 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 11983 break 11984 } 11985 v.reset(OpAMD64MOVLstoreconstidx1) 11986 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 11987 v.Aux = s 11988 v.AddArg(p) 11989 v.AddArg(i) 11990 v.AddArg(mem) 11991 return true 11992 } 11993 return false 11994 } 11995 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 11996 b := v.Block 11997 _ = b 11998 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 11999 // cond: 12000 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 12001 for { 12002 x := v.AuxInt 12003 sym := v.Aux 12004 _ = v.Args[2] 12005 v_0 := v.Args[0] 12006 if v_0.Op != OpAMD64ADDQconst { 12007 break 12008 } 12009 c := v_0.AuxInt 12010 ptr := v_0.Args[0] 12011 idx := v.Args[1] 12012 mem := v.Args[2] 12013 v.reset(OpAMD64MOVWstoreconstidx2) 12014 v.AuxInt = ValAndOff(x).add(c) 12015 v.Aux = sym 12016 v.AddArg(ptr) 12017 v.AddArg(idx) 12018 v.AddArg(mem) 12019 return true 12020 } 12021 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 12022 // cond: 12023 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 12024 for { 12025 x := v.AuxInt 12026 sym := v.Aux 12027 _ = v.Args[2] 12028 ptr := v.Args[0] 12029 v_1 := v.Args[1] 12030 if v_1.Op != OpAMD64ADDQconst { 12031 break 12032 } 12033 c := v_1.AuxInt 12034 idx := v_1.Args[0] 12035 mem := v.Args[2] 12036 v.reset(OpAMD64MOVWstoreconstidx2) 12037 v.AuxInt = ValAndOff(x).add(2 * c) 12038 v.Aux = sym 12039 v.AddArg(ptr) 12040 v.AddArg(idx) 12041 v.AddArg(mem) 12042 return true 12043 } 12044 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 12045 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 12046 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 12047 for { 12048 c := v.AuxInt 12049 s := v.Aux 12050 _ = v.Args[2] 12051 p := v.Args[0] 12052 i := v.Args[1] 12053 x := v.Args[2] 12054 if x.Op != OpAMD64MOVWstoreconstidx2 { 12055 break 12056 } 12057 a := x.AuxInt 12058 if x.Aux != s { 12059 break 12060 } 12061 _ = x.Args[2] 12062 if p != x.Args[0] { 12063 break 12064 } 12065 if i != x.Args[1] { 12066 break 12067 } 12068 mem := x.Args[2] 12069 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 12070 break 12071 } 12072 v.reset(OpAMD64MOVLstoreconstidx1) 12073 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 12074 v.Aux = s 12075 v.AddArg(p) 12076 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 12077 v0.AuxInt = 1 12078 v0.AddArg(i) 12079 v.AddArg(v0) 12080 v.AddArg(mem) 12081 return true 12082 } 12083 return false 12084 } 12085 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 12086 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 12087 // cond: 12088 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 12089 for { 12090 c := v.AuxInt 12091 sym := v.Aux 12092 _ = v.Args[3] 12093 ptr := v.Args[0] 12094 v_1 := v.Args[1] 12095 if v_1.Op != OpAMD64SHLQconst { 12096 break 12097 } 12098 if v_1.AuxInt != 1 { 12099 break 12100 } 12101 idx := v_1.Args[0] 12102 val := v.Args[2] 12103 mem := v.Args[3] 12104 v.reset(OpAMD64MOVWstoreidx2) 12105 v.AuxInt = c 12106 v.Aux = sym 12107 v.AddArg(ptr) 12108 v.AddArg(idx) 12109 v.AddArg(val) 12110 v.AddArg(mem) 12111 return true 12112 } 12113 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12114 // cond: 12115 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 12116 for { 12117 c := v.AuxInt 12118 sym := v.Aux 12119 _ = v.Args[3] 12120 v_0 := v.Args[0] 12121 if v_0.Op != OpAMD64ADDQconst { 12122 break 12123 } 12124 d := v_0.AuxInt 12125 ptr := v_0.Args[0] 12126 idx := v.Args[1] 12127 val := v.Args[2] 12128 mem := v.Args[3] 12129 v.reset(OpAMD64MOVWstoreidx1) 12130 v.AuxInt = c + d 12131 v.Aux = sym 12132 v.AddArg(ptr) 12133 v.AddArg(idx) 12134 v.AddArg(val) 12135 v.AddArg(mem) 12136 return true 12137 } 12138 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12139 // cond: 12140 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 12141 for { 12142 c := v.AuxInt 12143 sym := v.Aux 12144 _ = v.Args[3] 12145 ptr := v.Args[0] 12146 v_1 := v.Args[1] 12147 if v_1.Op != OpAMD64ADDQconst { 12148 break 12149 } 12150 d := v_1.AuxInt 12151 idx := v_1.Args[0] 12152 val := v.Args[2] 12153 mem := v.Args[3] 12154 v.reset(OpAMD64MOVWstoreidx1) 12155 v.AuxInt = c + d 12156 v.Aux = sym 12157 v.AddArg(ptr) 12158 v.AddArg(idx) 12159 v.AddArg(val) 12160 v.AddArg(mem) 12161 return true 12162 } 12163 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 12164 // cond: x.Uses == 1 && clobber(x) 12165 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 12166 for { 12167 i := v.AuxInt 12168 s := v.Aux 12169 _ = v.Args[3] 12170 p := v.Args[0] 12171 idx := v.Args[1] 12172 v_2 := v.Args[2] 12173 if v_2.Op != OpAMD64SHRQconst { 12174 break 12175 } 12176 if v_2.AuxInt != 16 { 12177 break 12178 } 12179 w := v_2.Args[0] 12180 x := v.Args[3] 12181 if x.Op != OpAMD64MOVWstoreidx1 { 12182 break 12183 } 12184 if x.AuxInt != i-2 { 12185 break 12186 } 12187 if x.Aux != s { 12188 break 12189 } 12190 _ = x.Args[3] 12191 if p != x.Args[0] { 12192 break 12193 } 12194 if idx != x.Args[1] { 12195 break 12196 } 12197 if w != x.Args[2] { 12198 break 12199 } 12200 mem := x.Args[3] 12201 if !(x.Uses == 1 && clobber(x)) { 12202 break 12203 } 12204 v.reset(OpAMD64MOVLstoreidx1) 12205 v.AuxInt = i - 2 12206 v.Aux = s 12207 v.AddArg(p) 12208 v.AddArg(idx) 12209 v.AddArg(w) 12210 v.AddArg(mem) 12211 return true 12212 } 12213 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 12214 // cond: x.Uses == 1 && clobber(x) 12215 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 12216 for { 12217 i := v.AuxInt 12218 s := v.Aux 12219 _ = v.Args[3] 12220 p := v.Args[0] 12221 idx := v.Args[1] 12222 v_2 := v.Args[2] 12223 if v_2.Op != OpAMD64SHRQconst { 12224 break 12225 } 12226 j := v_2.AuxInt 12227 w := v_2.Args[0] 12228 x := v.Args[3] 12229 if x.Op != OpAMD64MOVWstoreidx1 { 12230 break 12231 } 12232 if x.AuxInt != i-2 { 12233 break 12234 } 12235 if x.Aux != s { 12236 break 12237 } 12238 _ = x.Args[3] 12239 if p != x.Args[0] { 12240 break 12241 } 12242 if idx != x.Args[1] { 12243 break 12244 } 12245 w0 := x.Args[2] 12246 if w0.Op != OpAMD64SHRQconst { 12247 break 12248 } 12249 if w0.AuxInt != j-16 { 12250 break 12251 } 12252 if w != w0.Args[0] { 12253 break 12254 } 12255 mem := x.Args[3] 12256 if !(x.Uses == 1 && clobber(x)) { 12257 break 12258 } 12259 v.reset(OpAMD64MOVLstoreidx1) 12260 v.AuxInt = i - 2 12261 v.Aux = s 12262 v.AddArg(p) 12263 v.AddArg(idx) 12264 v.AddArg(w0) 12265 v.AddArg(mem) 12266 return true 12267 } 12268 return false 12269 } 12270 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 12271 b := v.Block 12272 _ = b 12273 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12274 // cond: 12275 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 12276 for { 12277 c := v.AuxInt 12278 sym := v.Aux 12279 _ = v.Args[3] 12280 v_0 := v.Args[0] 12281 if v_0.Op != OpAMD64ADDQconst { 12282 break 12283 } 12284 d := v_0.AuxInt 12285 ptr := v_0.Args[0] 12286 idx := v.Args[1] 12287 val := v.Args[2] 12288 mem := v.Args[3] 12289 v.reset(OpAMD64MOVWstoreidx2) 12290 v.AuxInt = c + d 12291 v.Aux = sym 12292 v.AddArg(ptr) 12293 v.AddArg(idx) 12294 v.AddArg(val) 12295 v.AddArg(mem) 12296 return true 12297 } 12298 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12299 // cond: 12300 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 12301 for { 12302 c := v.AuxInt 12303 sym := v.Aux 12304 _ = v.Args[3] 12305 ptr := v.Args[0] 12306 v_1 := v.Args[1] 12307 if v_1.Op != OpAMD64ADDQconst { 12308 break 12309 } 12310 d := v_1.AuxInt 12311 idx := v_1.Args[0] 12312 val := v.Args[2] 12313 mem := v.Args[3] 12314 v.reset(OpAMD64MOVWstoreidx2) 12315 v.AuxInt = c + 2*d 12316 v.Aux = sym 12317 v.AddArg(ptr) 12318 v.AddArg(idx) 12319 v.AddArg(val) 12320 v.AddArg(mem) 12321 return true 12322 } 12323 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 12324 // cond: x.Uses == 1 && clobber(x) 12325 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 12326 for { 12327 i := v.AuxInt 12328 s := v.Aux 12329 _ = v.Args[3] 12330 p := v.Args[0] 12331 idx := v.Args[1] 12332 v_2 := v.Args[2] 12333 if v_2.Op != OpAMD64SHRQconst { 12334 break 12335 } 12336 if v_2.AuxInt != 16 { 12337 break 12338 } 12339 w := v_2.Args[0] 12340 x := v.Args[3] 12341 if x.Op != OpAMD64MOVWstoreidx2 { 12342 break 12343 } 12344 if x.AuxInt != i-2 { 12345 break 12346 } 12347 if x.Aux != s { 12348 break 12349 } 12350 _ = x.Args[3] 12351 if p != x.Args[0] { 12352 break 12353 } 12354 if idx != x.Args[1] { 12355 break 12356 } 12357 if w != x.Args[2] { 12358 break 12359 } 12360 mem := x.Args[3] 12361 if !(x.Uses == 1 && clobber(x)) { 12362 break 12363 } 12364 v.reset(OpAMD64MOVLstoreidx1) 12365 v.AuxInt = i - 2 12366 v.Aux = s 12367 v.AddArg(p) 12368 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 12369 v0.AuxInt = 1 12370 v0.AddArg(idx) 12371 v.AddArg(v0) 12372 v.AddArg(w) 12373 v.AddArg(mem) 12374 return true 12375 } 12376 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 12377 // cond: x.Uses == 1 && clobber(x) 12378 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 12379 for { 12380 i := v.AuxInt 12381 s := v.Aux 12382 _ = v.Args[3] 12383 p := v.Args[0] 12384 idx := v.Args[1] 12385 v_2 := v.Args[2] 12386 if v_2.Op != OpAMD64SHRQconst { 12387 break 12388 } 12389 j := v_2.AuxInt 12390 w := v_2.Args[0] 12391 x := v.Args[3] 12392 if x.Op != OpAMD64MOVWstoreidx2 { 12393 break 12394 } 12395 if x.AuxInt != i-2 { 12396 break 12397 } 12398 if x.Aux != s { 12399 break 12400 } 12401 _ = x.Args[3] 12402 if p != x.Args[0] { 12403 break 12404 } 12405 if idx != x.Args[1] { 12406 break 12407 } 12408 w0 := x.Args[2] 12409 if w0.Op != OpAMD64SHRQconst { 12410 break 12411 } 12412 if w0.AuxInt != j-16 { 12413 break 12414 } 12415 if w != w0.Args[0] { 12416 break 12417 } 12418 mem := x.Args[3] 12419 if !(x.Uses == 1 && clobber(x)) { 12420 break 12421 } 12422 v.reset(OpAMD64MOVLstoreidx1) 12423 v.AuxInt = i - 2 12424 v.Aux = s 12425 v.AddArg(p) 12426 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 12427 v0.AuxInt = 1 12428 v0.AddArg(idx) 12429 v.AddArg(v0) 12430 v.AddArg(w0) 12431 v.AddArg(mem) 12432 return true 12433 } 12434 return false 12435 } 12436 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 12437 // match: (MULL x (MOVLconst [c])) 12438 // cond: 12439 // result: (MULLconst [c] x) 12440 for { 12441 _ = v.Args[1] 12442 x := v.Args[0] 12443 v_1 := v.Args[1] 12444 if v_1.Op != OpAMD64MOVLconst { 12445 break 12446 } 12447 c := v_1.AuxInt 12448 v.reset(OpAMD64MULLconst) 12449 v.AuxInt = c 12450 v.AddArg(x) 12451 return true 12452 } 12453 // match: (MULL (MOVLconst [c]) x) 12454 // cond: 12455 // result: (MULLconst [c] x) 12456 for { 12457 _ = v.Args[1] 12458 v_0 := v.Args[0] 12459 if v_0.Op != OpAMD64MOVLconst { 12460 break 12461 } 12462 c := v_0.AuxInt 12463 x := v.Args[1] 12464 v.reset(OpAMD64MULLconst) 12465 v.AuxInt = c 12466 v.AddArg(x) 12467 return true 12468 } 12469 return false 12470 } 12471 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 12472 // match: (MULLconst [c] (MULLconst [d] x)) 12473 // cond: 12474 // result: (MULLconst [int64(int32(c * d))] x) 12475 for { 12476 c := v.AuxInt 12477 v_0 := v.Args[0] 12478 if v_0.Op != OpAMD64MULLconst { 12479 break 12480 } 12481 d := v_0.AuxInt 12482 x := v_0.Args[0] 12483 v.reset(OpAMD64MULLconst) 12484 v.AuxInt = int64(int32(c * d)) 12485 v.AddArg(x) 12486 return true 12487 } 12488 // match: (MULLconst [c] (MOVLconst [d])) 12489 // cond: 12490 // result: (MOVLconst [int64(int32(c*d))]) 12491 for { 12492 c := v.AuxInt 12493 v_0 := v.Args[0] 12494 if v_0.Op != OpAMD64MOVLconst { 12495 break 12496 } 12497 d := v_0.AuxInt 12498 v.reset(OpAMD64MOVLconst) 12499 v.AuxInt = int64(int32(c * d)) 12500 return true 12501 } 12502 return false 12503 } 12504 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 12505 // match: (MULQ x (MOVQconst [c])) 12506 // cond: is32Bit(c) 12507 // result: (MULQconst [c] x) 12508 for { 12509 _ = v.Args[1] 12510 x := v.Args[0] 12511 v_1 := v.Args[1] 12512 if v_1.Op != OpAMD64MOVQconst { 12513 break 12514 } 12515 c := v_1.AuxInt 12516 if !(is32Bit(c)) { 12517 break 12518 } 12519 v.reset(OpAMD64MULQconst) 12520 v.AuxInt = c 12521 v.AddArg(x) 12522 return true 12523 } 12524 // match: (MULQ (MOVQconst [c]) x) 12525 // cond: is32Bit(c) 12526 // result: (MULQconst [c] x) 12527 for { 12528 _ = v.Args[1] 12529 v_0 := v.Args[0] 12530 if v_0.Op != OpAMD64MOVQconst { 12531 break 12532 } 12533 c := v_0.AuxInt 12534 x := v.Args[1] 12535 if !(is32Bit(c)) { 12536 break 12537 } 12538 v.reset(OpAMD64MULQconst) 12539 v.AuxInt = c 12540 v.AddArg(x) 12541 return true 12542 } 12543 return false 12544 } 12545 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 12546 b := v.Block 12547 _ = b 12548 // match: (MULQconst [c] (MULQconst [d] x)) 12549 // cond: is32Bit(c*d) 12550 // result: (MULQconst [c * d] x) 12551 for { 12552 c := v.AuxInt 12553 v_0 := v.Args[0] 12554 if v_0.Op != OpAMD64MULQconst { 12555 break 12556 } 12557 d := v_0.AuxInt 12558 x := v_0.Args[0] 12559 if !(is32Bit(c * d)) { 12560 break 12561 } 12562 v.reset(OpAMD64MULQconst) 12563 v.AuxInt = c * d 12564 v.AddArg(x) 12565 return true 12566 } 12567 // match: (MULQconst [-1] x) 12568 // cond: 12569 // result: (NEGQ x) 12570 for { 12571 if v.AuxInt != -1 { 12572 break 12573 } 12574 x := v.Args[0] 12575 v.reset(OpAMD64NEGQ) 12576 v.AddArg(x) 12577 return true 12578 } 12579 // match: (MULQconst [0] _) 12580 // cond: 12581 // result: (MOVQconst [0]) 12582 for { 12583 if v.AuxInt != 0 { 12584 break 12585 } 12586 v.reset(OpAMD64MOVQconst) 12587 v.AuxInt = 0 12588 return true 12589 } 12590 // match: (MULQconst [1] x) 12591 // cond: 12592 // result: x 12593 for { 12594 if v.AuxInt != 1 { 12595 break 12596 } 12597 x := v.Args[0] 12598 v.reset(OpCopy) 12599 v.Type = x.Type 12600 v.AddArg(x) 12601 return true 12602 } 12603 // match: (MULQconst [3] x) 12604 // cond: 12605 // result: (LEAQ2 x x) 12606 for { 12607 if v.AuxInt != 3 { 12608 break 12609 } 12610 x := v.Args[0] 12611 v.reset(OpAMD64LEAQ2) 12612 v.AddArg(x) 12613 v.AddArg(x) 12614 return true 12615 } 12616 // match: (MULQconst [5] x) 12617 // cond: 12618 // result: (LEAQ4 x x) 12619 for { 12620 if v.AuxInt != 5 { 12621 break 12622 } 12623 x := v.Args[0] 12624 v.reset(OpAMD64LEAQ4) 12625 v.AddArg(x) 12626 v.AddArg(x) 12627 return true 12628 } 12629 // match: (MULQconst [7] x) 12630 // cond: 12631 // result: (LEAQ8 (NEGQ <v.Type> x) x) 12632 for { 12633 if v.AuxInt != 7 { 12634 break 12635 } 12636 x := v.Args[0] 12637 v.reset(OpAMD64LEAQ8) 12638 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 12639 v0.AddArg(x) 12640 v.AddArg(v0) 12641 v.AddArg(x) 12642 return true 12643 } 12644 // match: (MULQconst [9] x) 12645 // cond: 12646 // result: (LEAQ8 x x) 12647 for { 12648 if v.AuxInt != 9 { 12649 break 12650 } 12651 x := v.Args[0] 12652 v.reset(OpAMD64LEAQ8) 12653 v.AddArg(x) 12654 v.AddArg(x) 12655 return true 12656 } 12657 // match: (MULQconst [11] x) 12658 // cond: 12659 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 12660 for { 12661 if v.AuxInt != 11 { 12662 break 12663 } 12664 x := v.Args[0] 12665 v.reset(OpAMD64LEAQ2) 12666 v.AddArg(x) 12667 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12668 v0.AddArg(x) 12669 v0.AddArg(x) 12670 v.AddArg(v0) 12671 return true 12672 } 12673 // match: (MULQconst [13] x) 12674 // cond: 12675 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 12676 for { 12677 if v.AuxInt != 13 { 12678 break 12679 } 12680 x := v.Args[0] 12681 v.reset(OpAMD64LEAQ4) 12682 v.AddArg(x) 12683 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12684 v0.AddArg(x) 12685 v0.AddArg(x) 12686 v.AddArg(v0) 12687 return true 12688 } 12689 return false 12690 } 12691 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 12692 b := v.Block 12693 _ = b 12694 // match: (MULQconst [21] x) 12695 // cond: 12696 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 12697 for { 12698 if v.AuxInt != 21 { 12699 break 12700 } 12701 x := v.Args[0] 12702 v.reset(OpAMD64LEAQ4) 12703 v.AddArg(x) 12704 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12705 v0.AddArg(x) 12706 v0.AddArg(x) 12707 v.AddArg(v0) 12708 return true 12709 } 12710 // match: (MULQconst [25] x) 12711 // cond: 12712 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 12713 for { 12714 if v.AuxInt != 25 { 12715 break 12716 } 12717 x := v.Args[0] 12718 v.reset(OpAMD64LEAQ8) 12719 v.AddArg(x) 12720 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12721 v0.AddArg(x) 12722 v0.AddArg(x) 12723 v.AddArg(v0) 12724 return true 12725 } 12726 // match: (MULQconst [37] x) 12727 // cond: 12728 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 12729 for { 12730 if v.AuxInt != 37 { 12731 break 12732 } 12733 x := v.Args[0] 12734 v.reset(OpAMD64LEAQ4) 12735 v.AddArg(x) 12736 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12737 v0.AddArg(x) 12738 v0.AddArg(x) 12739 v.AddArg(v0) 12740 return true 12741 } 12742 // match: (MULQconst [41] x) 12743 // cond: 12744 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 12745 for { 12746 if v.AuxInt != 41 { 12747 break 12748 } 12749 x := v.Args[0] 12750 v.reset(OpAMD64LEAQ8) 12751 v.AddArg(x) 12752 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12753 v0.AddArg(x) 12754 v0.AddArg(x) 12755 v.AddArg(v0) 12756 return true 12757 } 12758 // match: (MULQconst [73] x) 12759 // cond: 12760 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 12761 for { 12762 if v.AuxInt != 73 { 12763 break 12764 } 12765 x := v.Args[0] 12766 v.reset(OpAMD64LEAQ8) 12767 v.AddArg(x) 12768 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12769 v0.AddArg(x) 12770 v0.AddArg(x) 12771 v.AddArg(v0) 12772 return true 12773 } 12774 // match: (MULQconst [c] x) 12775 // cond: isPowerOfTwo(c) 12776 // result: (SHLQconst [log2(c)] x) 12777 for { 12778 c := v.AuxInt 12779 x := v.Args[0] 12780 if !(isPowerOfTwo(c)) { 12781 break 12782 } 12783 v.reset(OpAMD64SHLQconst) 12784 v.AuxInt = log2(c) 12785 v.AddArg(x) 12786 return true 12787 } 12788 // match: (MULQconst [c] x) 12789 // cond: isPowerOfTwo(c+1) && c >= 15 12790 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 12791 for { 12792 c := v.AuxInt 12793 x := v.Args[0] 12794 if !(isPowerOfTwo(c+1) && c >= 15) { 12795 break 12796 } 12797 v.reset(OpAMD64SUBQ) 12798 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12799 v0.AuxInt = log2(c + 1) 12800 v0.AddArg(x) 12801 v.AddArg(v0) 12802 v.AddArg(x) 12803 return true 12804 } 12805 // match: (MULQconst [c] x) 12806 // cond: isPowerOfTwo(c-1) && c >= 17 12807 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 12808 for { 12809 c := v.AuxInt 12810 x := v.Args[0] 12811 if !(isPowerOfTwo(c-1) && c >= 17) { 12812 break 12813 } 12814 v.reset(OpAMD64LEAQ1) 12815 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12816 v0.AuxInt = log2(c - 1) 12817 v0.AddArg(x) 12818 v.AddArg(v0) 12819 v.AddArg(x) 12820 return true 12821 } 12822 // match: (MULQconst [c] x) 12823 // cond: isPowerOfTwo(c-2) && c >= 34 12824 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 12825 for { 12826 c := v.AuxInt 12827 x := v.Args[0] 12828 if !(isPowerOfTwo(c-2) && c >= 34) { 12829 break 12830 } 12831 v.reset(OpAMD64LEAQ2) 12832 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12833 v0.AuxInt = log2(c - 2) 12834 v0.AddArg(x) 12835 v.AddArg(v0) 12836 v.AddArg(x) 12837 return true 12838 } 12839 // match: (MULQconst [c] x) 12840 // cond: isPowerOfTwo(c-4) && c >= 68 12841 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 12842 for { 12843 c := v.AuxInt 12844 x := v.Args[0] 12845 if !(isPowerOfTwo(c-4) && c >= 68) { 12846 break 12847 } 12848 v.reset(OpAMD64LEAQ4) 12849 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12850 v0.AuxInt = log2(c - 4) 12851 v0.AddArg(x) 12852 v.AddArg(v0) 12853 v.AddArg(x) 12854 return true 12855 } 12856 return false 12857 } 12858 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 12859 b := v.Block 12860 _ = b 12861 // match: (MULQconst [c] x) 12862 // cond: isPowerOfTwo(c-8) && c >= 136 12863 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 12864 for { 12865 c := v.AuxInt 12866 x := v.Args[0] 12867 if !(isPowerOfTwo(c-8) && c >= 136) { 12868 break 12869 } 12870 v.reset(OpAMD64LEAQ8) 12871 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12872 v0.AuxInt = log2(c - 8) 12873 v0.AddArg(x) 12874 v.AddArg(v0) 12875 v.AddArg(x) 12876 return true 12877 } 12878 // match: (MULQconst [c] x) 12879 // cond: c%3 == 0 && isPowerOfTwo(c/3) 12880 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 12881 for { 12882 c := v.AuxInt 12883 x := v.Args[0] 12884 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 12885 break 12886 } 12887 v.reset(OpAMD64SHLQconst) 12888 v.AuxInt = log2(c / 3) 12889 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12890 v0.AddArg(x) 12891 v0.AddArg(x) 12892 v.AddArg(v0) 12893 return true 12894 } 12895 // match: (MULQconst [c] x) 12896 // cond: c%5 == 0 && isPowerOfTwo(c/5) 12897 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 12898 for { 12899 c := v.AuxInt 12900 x := v.Args[0] 12901 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 12902 break 12903 } 12904 v.reset(OpAMD64SHLQconst) 12905 v.AuxInt = log2(c / 5) 12906 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12907 v0.AddArg(x) 12908 v0.AddArg(x) 12909 v.AddArg(v0) 12910 return true 12911 } 12912 // match: (MULQconst [c] x) 12913 // cond: c%9 == 0 && isPowerOfTwo(c/9) 12914 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 12915 for { 12916 c := v.AuxInt 12917 x := v.Args[0] 12918 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 12919 break 12920 } 12921 v.reset(OpAMD64SHLQconst) 12922 v.AuxInt = log2(c / 9) 12923 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12924 v0.AddArg(x) 12925 v0.AddArg(x) 12926 v.AddArg(v0) 12927 return true 12928 } 12929 // match: (MULQconst [c] (MOVQconst [d])) 12930 // cond: 12931 // result: (MOVQconst [c*d]) 12932 for { 12933 c := v.AuxInt 12934 v_0 := v.Args[0] 12935 if v_0.Op != OpAMD64MOVQconst { 12936 break 12937 } 12938 d := v_0.AuxInt 12939 v.reset(OpAMD64MOVQconst) 12940 v.AuxInt = c * d 12941 return true 12942 } 12943 return false 12944 } 12945 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 12946 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 12947 // cond: canMergeLoad(v, l, x) && clobber(l) 12948 // result: (MULSDmem x [off] {sym} ptr mem) 12949 for { 12950 _ = v.Args[1] 12951 x := v.Args[0] 12952 l := v.Args[1] 12953 if l.Op != OpAMD64MOVSDload { 12954 break 12955 } 12956 off := l.AuxInt 12957 sym := l.Aux 12958 _ = l.Args[1] 12959 ptr := l.Args[0] 12960 mem := l.Args[1] 12961 if !(canMergeLoad(v, l, x) && clobber(l)) { 12962 break 12963 } 12964 v.reset(OpAMD64MULSDmem) 12965 v.AuxInt = off 12966 v.Aux = sym 12967 v.AddArg(x) 12968 v.AddArg(ptr) 12969 v.AddArg(mem) 12970 return true 12971 } 12972 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 12973 // cond: canMergeLoad(v, l, x) && clobber(l) 12974 // result: (MULSDmem x [off] {sym} ptr mem) 12975 for { 12976 _ = v.Args[1] 12977 l := v.Args[0] 12978 if l.Op != OpAMD64MOVSDload { 12979 break 12980 } 12981 off := l.AuxInt 12982 sym := l.Aux 12983 _ = l.Args[1] 12984 ptr := l.Args[0] 12985 mem := l.Args[1] 12986 x := v.Args[1] 12987 if !(canMergeLoad(v, l, x) && clobber(l)) { 12988 break 12989 } 12990 v.reset(OpAMD64MULSDmem) 12991 v.AuxInt = off 12992 v.Aux = sym 12993 v.AddArg(x) 12994 v.AddArg(ptr) 12995 v.AddArg(mem) 12996 return true 12997 } 12998 return false 12999 } 13000 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 13001 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 13002 // cond: canMergeLoad(v, l, x) && clobber(l) 13003 // result: (MULSSmem x [off] {sym} ptr mem) 13004 for { 13005 _ = v.Args[1] 13006 x := v.Args[0] 13007 l := v.Args[1] 13008 if l.Op != OpAMD64MOVSSload { 13009 break 13010 } 13011 off := l.AuxInt 13012 sym := l.Aux 13013 _ = l.Args[1] 13014 ptr := l.Args[0] 13015 mem := l.Args[1] 13016 if !(canMergeLoad(v, l, x) && clobber(l)) { 13017 break 13018 } 13019 v.reset(OpAMD64MULSSmem) 13020 v.AuxInt = off 13021 v.Aux = sym 13022 v.AddArg(x) 13023 v.AddArg(ptr) 13024 v.AddArg(mem) 13025 return true 13026 } 13027 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 13028 // cond: canMergeLoad(v, l, x) && clobber(l) 13029 // result: (MULSSmem x [off] {sym} ptr mem) 13030 for { 13031 _ = v.Args[1] 13032 l := v.Args[0] 13033 if l.Op != OpAMD64MOVSSload { 13034 break 13035 } 13036 off := l.AuxInt 13037 sym := l.Aux 13038 _ = l.Args[1] 13039 ptr := l.Args[0] 13040 mem := l.Args[1] 13041 x := v.Args[1] 13042 if !(canMergeLoad(v, l, x) && clobber(l)) { 13043 break 13044 } 13045 v.reset(OpAMD64MULSSmem) 13046 v.AuxInt = off 13047 v.Aux = sym 13048 v.AddArg(x) 13049 v.AddArg(ptr) 13050 v.AddArg(mem) 13051 return true 13052 } 13053 return false 13054 } 13055 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 13056 // match: (NEGL (MOVLconst [c])) 13057 // cond: 13058 // result: (MOVLconst [int64(int32(-c))]) 13059 for { 13060 v_0 := v.Args[0] 13061 if v_0.Op != OpAMD64MOVLconst { 13062 break 13063 } 13064 c := v_0.AuxInt 13065 v.reset(OpAMD64MOVLconst) 13066 v.AuxInt = int64(int32(-c)) 13067 return true 13068 } 13069 return false 13070 } 13071 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 13072 // match: (NEGQ (MOVQconst [c])) 13073 // cond: 13074 // result: (MOVQconst [-c]) 13075 for { 13076 v_0 := v.Args[0] 13077 if v_0.Op != OpAMD64MOVQconst { 13078 break 13079 } 13080 c := v_0.AuxInt 13081 v.reset(OpAMD64MOVQconst) 13082 v.AuxInt = -c 13083 return true 13084 } 13085 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 13086 // cond: c != -(1<<31) 13087 // result: (ADDQconst [-c] x) 13088 for { 13089 v_0 := v.Args[0] 13090 if v_0.Op != OpAMD64ADDQconst { 13091 break 13092 } 13093 c := v_0.AuxInt 13094 v_0_0 := v_0.Args[0] 13095 if v_0_0.Op != OpAMD64NEGQ { 13096 break 13097 } 13098 x := v_0_0.Args[0] 13099 if !(c != -(1 << 31)) { 13100 break 13101 } 13102 v.reset(OpAMD64ADDQconst) 13103 v.AuxInt = -c 13104 v.AddArg(x) 13105 return true 13106 } 13107 return false 13108 } 13109 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 13110 // match: (NOTL (MOVLconst [c])) 13111 // cond: 13112 // result: (MOVLconst [^c]) 13113 for { 13114 v_0 := v.Args[0] 13115 if v_0.Op != OpAMD64MOVLconst { 13116 break 13117 } 13118 c := v_0.AuxInt 13119 v.reset(OpAMD64MOVLconst) 13120 v.AuxInt = ^c 13121 return true 13122 } 13123 return false 13124 } 13125 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 13126 // match: (NOTQ (MOVQconst [c])) 13127 // cond: 13128 // result: (MOVQconst [^c]) 13129 for { 13130 v_0 := v.Args[0] 13131 if v_0.Op != OpAMD64MOVQconst { 13132 break 13133 } 13134 c := v_0.AuxInt 13135 v.reset(OpAMD64MOVQconst) 13136 v.AuxInt = ^c 13137 return true 13138 } 13139 return false 13140 } 13141 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 13142 // match: (ORL x (MOVLconst [c])) 13143 // cond: 13144 // result: (ORLconst [c] x) 13145 for { 13146 _ = v.Args[1] 13147 x := v.Args[0] 13148 v_1 := v.Args[1] 13149 if v_1.Op != OpAMD64MOVLconst { 13150 break 13151 } 13152 c := v_1.AuxInt 13153 v.reset(OpAMD64ORLconst) 13154 v.AuxInt = c 13155 v.AddArg(x) 13156 return true 13157 } 13158 // match: (ORL (MOVLconst [c]) x) 13159 // cond: 13160 // result: (ORLconst [c] x) 13161 for { 13162 _ = v.Args[1] 13163 v_0 := v.Args[0] 13164 if v_0.Op != OpAMD64MOVLconst { 13165 break 13166 } 13167 c := v_0.AuxInt 13168 x := v.Args[1] 13169 v.reset(OpAMD64ORLconst) 13170 v.AuxInt = c 13171 v.AddArg(x) 13172 return true 13173 } 13174 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 13175 // cond: d==32-c 13176 // result: (ROLLconst x [c]) 13177 for { 13178 _ = v.Args[1] 13179 v_0 := v.Args[0] 13180 if v_0.Op != OpAMD64SHLLconst { 13181 break 13182 } 13183 c := v_0.AuxInt 13184 x := v_0.Args[0] 13185 v_1 := v.Args[1] 13186 if v_1.Op != OpAMD64SHRLconst { 13187 break 13188 } 13189 d := v_1.AuxInt 13190 if x != v_1.Args[0] { 13191 break 13192 } 13193 if !(d == 32-c) { 13194 break 13195 } 13196 v.reset(OpAMD64ROLLconst) 13197 v.AuxInt = c 13198 v.AddArg(x) 13199 return true 13200 } 13201 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 13202 // cond: d==32-c 13203 // result: (ROLLconst x [c]) 13204 for { 13205 _ = v.Args[1] 13206 v_0 := v.Args[0] 13207 if v_0.Op != OpAMD64SHRLconst { 13208 break 13209 } 13210 d := v_0.AuxInt 13211 x := v_0.Args[0] 13212 v_1 := v.Args[1] 13213 if v_1.Op != OpAMD64SHLLconst { 13214 break 13215 } 13216 c := v_1.AuxInt 13217 if x != v_1.Args[0] { 13218 break 13219 } 13220 if !(d == 32-c) { 13221 break 13222 } 13223 v.reset(OpAMD64ROLLconst) 13224 v.AuxInt = c 13225 v.AddArg(x) 13226 return true 13227 } 13228 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 13229 // cond: d==16-c && c < 16 && t.Size() == 2 13230 // result: (ROLWconst x [c]) 13231 for { 13232 t := v.Type 13233 _ = v.Args[1] 13234 v_0 := v.Args[0] 13235 if v_0.Op != OpAMD64SHLLconst { 13236 break 13237 } 13238 c := v_0.AuxInt 13239 x := v_0.Args[0] 13240 v_1 := v.Args[1] 13241 if v_1.Op != OpAMD64SHRWconst { 13242 break 13243 } 13244 d := v_1.AuxInt 13245 if x != v_1.Args[0] { 13246 break 13247 } 13248 if !(d == 16-c && c < 16 && t.Size() == 2) { 13249 break 13250 } 13251 v.reset(OpAMD64ROLWconst) 13252 v.AuxInt = c 13253 v.AddArg(x) 13254 return true 13255 } 13256 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 13257 // cond: d==16-c && c < 16 && t.Size() == 2 13258 // result: (ROLWconst x [c]) 13259 for { 13260 t := v.Type 13261 _ = v.Args[1] 13262 v_0 := v.Args[0] 13263 if v_0.Op != OpAMD64SHRWconst { 13264 break 13265 } 13266 d := v_0.AuxInt 13267 x := v_0.Args[0] 13268 v_1 := v.Args[1] 13269 if v_1.Op != OpAMD64SHLLconst { 13270 break 13271 } 13272 c := v_1.AuxInt 13273 if x != v_1.Args[0] { 13274 break 13275 } 13276 if !(d == 16-c && c < 16 && t.Size() == 2) { 13277 break 13278 } 13279 v.reset(OpAMD64ROLWconst) 13280 v.AuxInt = c 13281 v.AddArg(x) 13282 return true 13283 } 13284 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 13285 // cond: d==8-c && c < 8 && t.Size() == 1 13286 // result: (ROLBconst x [c]) 13287 for { 13288 t := v.Type 13289 _ = v.Args[1] 13290 v_0 := v.Args[0] 13291 if v_0.Op != OpAMD64SHLLconst { 13292 break 13293 } 13294 c := v_0.AuxInt 13295 x := v_0.Args[0] 13296 v_1 := v.Args[1] 13297 if v_1.Op != OpAMD64SHRBconst { 13298 break 13299 } 13300 d := v_1.AuxInt 13301 if x != v_1.Args[0] { 13302 break 13303 } 13304 if !(d == 8-c && c < 8 && t.Size() == 1) { 13305 break 13306 } 13307 v.reset(OpAMD64ROLBconst) 13308 v.AuxInt = c 13309 v.AddArg(x) 13310 return true 13311 } 13312 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 13313 // cond: d==8-c && c < 8 && t.Size() == 1 13314 // result: (ROLBconst x [c]) 13315 for { 13316 t := v.Type 13317 _ = v.Args[1] 13318 v_0 := v.Args[0] 13319 if v_0.Op != OpAMD64SHRBconst { 13320 break 13321 } 13322 d := v_0.AuxInt 13323 x := v_0.Args[0] 13324 v_1 := v.Args[1] 13325 if v_1.Op != OpAMD64SHLLconst { 13326 break 13327 } 13328 c := v_1.AuxInt 13329 if x != v_1.Args[0] { 13330 break 13331 } 13332 if !(d == 8-c && c < 8 && t.Size() == 1) { 13333 break 13334 } 13335 v.reset(OpAMD64ROLBconst) 13336 v.AuxInt = c 13337 v.AddArg(x) 13338 return true 13339 } 13340 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 13341 // cond: 13342 // result: (ROLL x y) 13343 for { 13344 _ = v.Args[1] 13345 v_0 := v.Args[0] 13346 if v_0.Op != OpAMD64SHLL { 13347 break 13348 } 13349 _ = v_0.Args[1] 13350 x := v_0.Args[0] 13351 y := v_0.Args[1] 13352 v_1 := v.Args[1] 13353 if v_1.Op != OpAMD64ANDL { 13354 break 13355 } 13356 _ = v_1.Args[1] 13357 v_1_0 := v_1.Args[0] 13358 if v_1_0.Op != OpAMD64SHRL { 13359 break 13360 } 13361 _ = v_1_0.Args[1] 13362 if x != v_1_0.Args[0] { 13363 break 13364 } 13365 v_1_0_1 := v_1_0.Args[1] 13366 if v_1_0_1.Op != OpAMD64NEGQ { 13367 break 13368 } 13369 if y != v_1_0_1.Args[0] { 13370 break 13371 } 13372 v_1_1 := v_1.Args[1] 13373 if v_1_1.Op != OpAMD64SBBLcarrymask { 13374 break 13375 } 13376 v_1_1_0 := v_1_1.Args[0] 13377 if v_1_1_0.Op != OpAMD64CMPQconst { 13378 break 13379 } 13380 if v_1_1_0.AuxInt != 32 { 13381 break 13382 } 13383 v_1_1_0_0 := v_1_1_0.Args[0] 13384 if v_1_1_0_0.Op != OpAMD64NEGQ { 13385 break 13386 } 13387 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13388 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 13389 break 13390 } 13391 if v_1_1_0_0_0.AuxInt != -32 { 13392 break 13393 } 13394 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13395 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 13396 break 13397 } 13398 if v_1_1_0_0_0_0.AuxInt != 31 { 13399 break 13400 } 13401 if y != v_1_1_0_0_0_0.Args[0] { 13402 break 13403 } 13404 v.reset(OpAMD64ROLL) 13405 v.AddArg(x) 13406 v.AddArg(y) 13407 return true 13408 } 13409 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 13410 // cond: 13411 // result: (ROLL x y) 13412 for { 13413 _ = v.Args[1] 13414 v_0 := v.Args[0] 13415 if v_0.Op != OpAMD64SHLL { 13416 break 13417 } 13418 _ = v_0.Args[1] 13419 x := v_0.Args[0] 13420 y := v_0.Args[1] 13421 v_1 := v.Args[1] 13422 if v_1.Op != OpAMD64ANDL { 13423 break 13424 } 13425 _ = v_1.Args[1] 13426 v_1_0 := v_1.Args[0] 13427 if v_1_0.Op != OpAMD64SBBLcarrymask { 13428 break 13429 } 13430 v_1_0_0 := v_1_0.Args[0] 13431 if v_1_0_0.Op != OpAMD64CMPQconst { 13432 break 13433 } 13434 if v_1_0_0.AuxInt != 32 { 13435 break 13436 } 13437 v_1_0_0_0 := v_1_0_0.Args[0] 13438 if v_1_0_0_0.Op != OpAMD64NEGQ { 13439 break 13440 } 13441 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13442 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 13443 break 13444 } 13445 if v_1_0_0_0_0.AuxInt != -32 { 13446 break 13447 } 13448 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 13449 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 13450 break 13451 } 13452 if v_1_0_0_0_0_0.AuxInt != 31 { 13453 break 13454 } 13455 if y != v_1_0_0_0_0_0.Args[0] { 13456 break 13457 } 13458 v_1_1 := v_1.Args[1] 13459 if v_1_1.Op != OpAMD64SHRL { 13460 break 13461 } 13462 _ = v_1_1.Args[1] 13463 if x != v_1_1.Args[0] { 13464 break 13465 } 13466 v_1_1_1 := v_1_1.Args[1] 13467 if v_1_1_1.Op != OpAMD64NEGQ { 13468 break 13469 } 13470 if y != v_1_1_1.Args[0] { 13471 break 13472 } 13473 v.reset(OpAMD64ROLL) 13474 v.AddArg(x) 13475 v.AddArg(y) 13476 return true 13477 } 13478 return false 13479 } 13480 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 13481 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 13482 // cond: 13483 // result: (ROLL x y) 13484 for { 13485 _ = v.Args[1] 13486 v_0 := v.Args[0] 13487 if v_0.Op != OpAMD64ANDL { 13488 break 13489 } 13490 _ = v_0.Args[1] 13491 v_0_0 := v_0.Args[0] 13492 if v_0_0.Op != OpAMD64SHRL { 13493 break 13494 } 13495 _ = v_0_0.Args[1] 13496 x := v_0_0.Args[0] 13497 v_0_0_1 := v_0_0.Args[1] 13498 if v_0_0_1.Op != OpAMD64NEGQ { 13499 break 13500 } 13501 y := v_0_0_1.Args[0] 13502 v_0_1 := v_0.Args[1] 13503 if v_0_1.Op != OpAMD64SBBLcarrymask { 13504 break 13505 } 13506 v_0_1_0 := v_0_1.Args[0] 13507 if v_0_1_0.Op != OpAMD64CMPQconst { 13508 break 13509 } 13510 if v_0_1_0.AuxInt != 32 { 13511 break 13512 } 13513 v_0_1_0_0 := v_0_1_0.Args[0] 13514 if v_0_1_0_0.Op != OpAMD64NEGQ { 13515 break 13516 } 13517 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 13518 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 13519 break 13520 } 13521 if v_0_1_0_0_0.AuxInt != -32 { 13522 break 13523 } 13524 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 13525 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 13526 break 13527 } 13528 if v_0_1_0_0_0_0.AuxInt != 31 { 13529 break 13530 } 13531 if y != v_0_1_0_0_0_0.Args[0] { 13532 break 13533 } 13534 v_1 := v.Args[1] 13535 if v_1.Op != OpAMD64SHLL { 13536 break 13537 } 13538 _ = v_1.Args[1] 13539 if x != v_1.Args[0] { 13540 break 13541 } 13542 if y != v_1.Args[1] { 13543 break 13544 } 13545 v.reset(OpAMD64ROLL) 13546 v.AddArg(x) 13547 v.AddArg(y) 13548 return true 13549 } 13550 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 13551 // cond: 13552 // result: (ROLL x y) 13553 for { 13554 _ = v.Args[1] 13555 v_0 := v.Args[0] 13556 if v_0.Op != OpAMD64ANDL { 13557 break 13558 } 13559 _ = v_0.Args[1] 13560 v_0_0 := v_0.Args[0] 13561 if v_0_0.Op != OpAMD64SBBLcarrymask { 13562 break 13563 } 13564 v_0_0_0 := v_0_0.Args[0] 13565 if v_0_0_0.Op != OpAMD64CMPQconst { 13566 break 13567 } 13568 if v_0_0_0.AuxInt != 32 { 13569 break 13570 } 13571 v_0_0_0_0 := v_0_0_0.Args[0] 13572 if v_0_0_0_0.Op != OpAMD64NEGQ { 13573 break 13574 } 13575 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 13576 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 13577 break 13578 } 13579 if v_0_0_0_0_0.AuxInt != -32 { 13580 break 13581 } 13582 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 13583 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 13584 break 13585 } 13586 if v_0_0_0_0_0_0.AuxInt != 31 { 13587 break 13588 } 13589 y := v_0_0_0_0_0_0.Args[0] 13590 v_0_1 := v_0.Args[1] 13591 if v_0_1.Op != OpAMD64SHRL { 13592 break 13593 } 13594 _ = v_0_1.Args[1] 13595 x := v_0_1.Args[0] 13596 v_0_1_1 := v_0_1.Args[1] 13597 if v_0_1_1.Op != OpAMD64NEGQ { 13598 break 13599 } 13600 if y != v_0_1_1.Args[0] { 13601 break 13602 } 13603 v_1 := v.Args[1] 13604 if v_1.Op != OpAMD64SHLL { 13605 break 13606 } 13607 _ = v_1.Args[1] 13608 if x != v_1.Args[0] { 13609 break 13610 } 13611 if y != v_1.Args[1] { 13612 break 13613 } 13614 v.reset(OpAMD64ROLL) 13615 v.AddArg(x) 13616 v.AddArg(y) 13617 return true 13618 } 13619 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 13620 // cond: 13621 // result: (ROLL x y) 13622 for { 13623 _ = v.Args[1] 13624 v_0 := v.Args[0] 13625 if v_0.Op != OpAMD64SHLL { 13626 break 13627 } 13628 _ = v_0.Args[1] 13629 x := v_0.Args[0] 13630 y := v_0.Args[1] 13631 v_1 := v.Args[1] 13632 if v_1.Op != OpAMD64ANDL { 13633 break 13634 } 13635 _ = v_1.Args[1] 13636 v_1_0 := v_1.Args[0] 13637 if v_1_0.Op != OpAMD64SHRL { 13638 break 13639 } 13640 _ = v_1_0.Args[1] 13641 if x != v_1_0.Args[0] { 13642 break 13643 } 13644 v_1_0_1 := v_1_0.Args[1] 13645 if v_1_0_1.Op != OpAMD64NEGL { 13646 break 13647 } 13648 if y != v_1_0_1.Args[0] { 13649 break 13650 } 13651 v_1_1 := v_1.Args[1] 13652 if v_1_1.Op != OpAMD64SBBLcarrymask { 13653 break 13654 } 13655 v_1_1_0 := v_1_1.Args[0] 13656 if v_1_1_0.Op != OpAMD64CMPLconst { 13657 break 13658 } 13659 if v_1_1_0.AuxInt != 32 { 13660 break 13661 } 13662 v_1_1_0_0 := v_1_1_0.Args[0] 13663 if v_1_1_0_0.Op != OpAMD64NEGL { 13664 break 13665 } 13666 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13667 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 13668 break 13669 } 13670 if v_1_1_0_0_0.AuxInt != -32 { 13671 break 13672 } 13673 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13674 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 13675 break 13676 } 13677 if v_1_1_0_0_0_0.AuxInt != 31 { 13678 break 13679 } 13680 if y != v_1_1_0_0_0_0.Args[0] { 13681 break 13682 } 13683 v.reset(OpAMD64ROLL) 13684 v.AddArg(x) 13685 v.AddArg(y) 13686 return true 13687 } 13688 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 13689 // cond: 13690 // result: (ROLL x y) 13691 for { 13692 _ = v.Args[1] 13693 v_0 := v.Args[0] 13694 if v_0.Op != OpAMD64SHLL { 13695 break 13696 } 13697 _ = v_0.Args[1] 13698 x := v_0.Args[0] 13699 y := v_0.Args[1] 13700 v_1 := v.Args[1] 13701 if v_1.Op != OpAMD64ANDL { 13702 break 13703 } 13704 _ = v_1.Args[1] 13705 v_1_0 := v_1.Args[0] 13706 if v_1_0.Op != OpAMD64SBBLcarrymask { 13707 break 13708 } 13709 v_1_0_0 := v_1_0.Args[0] 13710 if v_1_0_0.Op != OpAMD64CMPLconst { 13711 break 13712 } 13713 if v_1_0_0.AuxInt != 32 { 13714 break 13715 } 13716 v_1_0_0_0 := v_1_0_0.Args[0] 13717 if v_1_0_0_0.Op != OpAMD64NEGL { 13718 break 13719 } 13720 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13721 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 13722 break 13723 } 13724 if v_1_0_0_0_0.AuxInt != -32 { 13725 break 13726 } 13727 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 13728 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 13729 break 13730 } 13731 if v_1_0_0_0_0_0.AuxInt != 31 { 13732 break 13733 } 13734 if y != v_1_0_0_0_0_0.Args[0] { 13735 break 13736 } 13737 v_1_1 := v_1.Args[1] 13738 if v_1_1.Op != OpAMD64SHRL { 13739 break 13740 } 13741 _ = v_1_1.Args[1] 13742 if x != v_1_1.Args[0] { 13743 break 13744 } 13745 v_1_1_1 := v_1_1.Args[1] 13746 if v_1_1_1.Op != OpAMD64NEGL { 13747 break 13748 } 13749 if y != v_1_1_1.Args[0] { 13750 break 13751 } 13752 v.reset(OpAMD64ROLL) 13753 v.AddArg(x) 13754 v.AddArg(y) 13755 return true 13756 } 13757 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 13758 // cond: 13759 // result: (ROLL x y) 13760 for { 13761 _ = v.Args[1] 13762 v_0 := v.Args[0] 13763 if v_0.Op != OpAMD64ANDL { 13764 break 13765 } 13766 _ = v_0.Args[1] 13767 v_0_0 := v_0.Args[0] 13768 if v_0_0.Op != OpAMD64SHRL { 13769 break 13770 } 13771 _ = v_0_0.Args[1] 13772 x := v_0_0.Args[0] 13773 v_0_0_1 := v_0_0.Args[1] 13774 if v_0_0_1.Op != OpAMD64NEGL { 13775 break 13776 } 13777 y := v_0_0_1.Args[0] 13778 v_0_1 := v_0.Args[1] 13779 if v_0_1.Op != OpAMD64SBBLcarrymask { 13780 break 13781 } 13782 v_0_1_0 := v_0_1.Args[0] 13783 if v_0_1_0.Op != OpAMD64CMPLconst { 13784 break 13785 } 13786 if v_0_1_0.AuxInt != 32 { 13787 break 13788 } 13789 v_0_1_0_0 := v_0_1_0.Args[0] 13790 if v_0_1_0_0.Op != OpAMD64NEGL { 13791 break 13792 } 13793 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 13794 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 13795 break 13796 } 13797 if v_0_1_0_0_0.AuxInt != -32 { 13798 break 13799 } 13800 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 13801 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 13802 break 13803 } 13804 if v_0_1_0_0_0_0.AuxInt != 31 { 13805 break 13806 } 13807 if y != v_0_1_0_0_0_0.Args[0] { 13808 break 13809 } 13810 v_1 := v.Args[1] 13811 if v_1.Op != OpAMD64SHLL { 13812 break 13813 } 13814 _ = v_1.Args[1] 13815 if x != v_1.Args[0] { 13816 break 13817 } 13818 if y != v_1.Args[1] { 13819 break 13820 } 13821 v.reset(OpAMD64ROLL) 13822 v.AddArg(x) 13823 v.AddArg(y) 13824 return true 13825 } 13826 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 13827 // cond: 13828 // result: (ROLL x y) 13829 for { 13830 _ = v.Args[1] 13831 v_0 := v.Args[0] 13832 if v_0.Op != OpAMD64ANDL { 13833 break 13834 } 13835 _ = v_0.Args[1] 13836 v_0_0 := v_0.Args[0] 13837 if v_0_0.Op != OpAMD64SBBLcarrymask { 13838 break 13839 } 13840 v_0_0_0 := v_0_0.Args[0] 13841 if v_0_0_0.Op != OpAMD64CMPLconst { 13842 break 13843 } 13844 if v_0_0_0.AuxInt != 32 { 13845 break 13846 } 13847 v_0_0_0_0 := v_0_0_0.Args[0] 13848 if v_0_0_0_0.Op != OpAMD64NEGL { 13849 break 13850 } 13851 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 13852 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 13853 break 13854 } 13855 if v_0_0_0_0_0.AuxInt != -32 { 13856 break 13857 } 13858 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 13859 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 13860 break 13861 } 13862 if v_0_0_0_0_0_0.AuxInt != 31 { 13863 break 13864 } 13865 y := v_0_0_0_0_0_0.Args[0] 13866 v_0_1 := v_0.Args[1] 13867 if v_0_1.Op != OpAMD64SHRL { 13868 break 13869 } 13870 _ = v_0_1.Args[1] 13871 x := v_0_1.Args[0] 13872 v_0_1_1 := v_0_1.Args[1] 13873 if v_0_1_1.Op != OpAMD64NEGL { 13874 break 13875 } 13876 if y != v_0_1_1.Args[0] { 13877 break 13878 } 13879 v_1 := v.Args[1] 13880 if v_1.Op != OpAMD64SHLL { 13881 break 13882 } 13883 _ = v_1.Args[1] 13884 if x != v_1.Args[0] { 13885 break 13886 } 13887 if y != v_1.Args[1] { 13888 break 13889 } 13890 v.reset(OpAMD64ROLL) 13891 v.AddArg(x) 13892 v.AddArg(y) 13893 return true 13894 } 13895 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 13896 // cond: 13897 // result: (RORL x y) 13898 for { 13899 _ = v.Args[1] 13900 v_0 := v.Args[0] 13901 if v_0.Op != OpAMD64SHRL { 13902 break 13903 } 13904 _ = v_0.Args[1] 13905 x := v_0.Args[0] 13906 y := v_0.Args[1] 13907 v_1 := v.Args[1] 13908 if v_1.Op != OpAMD64ANDL { 13909 break 13910 } 13911 _ = v_1.Args[1] 13912 v_1_0 := v_1.Args[0] 13913 if v_1_0.Op != OpAMD64SHLL { 13914 break 13915 } 13916 _ = v_1_0.Args[1] 13917 if x != v_1_0.Args[0] { 13918 break 13919 } 13920 v_1_0_1 := v_1_0.Args[1] 13921 if v_1_0_1.Op != OpAMD64NEGQ { 13922 break 13923 } 13924 if y != v_1_0_1.Args[0] { 13925 break 13926 } 13927 v_1_1 := v_1.Args[1] 13928 if v_1_1.Op != OpAMD64SBBLcarrymask { 13929 break 13930 } 13931 v_1_1_0 := v_1_1.Args[0] 13932 if v_1_1_0.Op != OpAMD64CMPQconst { 13933 break 13934 } 13935 if v_1_1_0.AuxInt != 32 { 13936 break 13937 } 13938 v_1_1_0_0 := v_1_1_0.Args[0] 13939 if v_1_1_0_0.Op != OpAMD64NEGQ { 13940 break 13941 } 13942 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13943 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 13944 break 13945 } 13946 if v_1_1_0_0_0.AuxInt != -32 { 13947 break 13948 } 13949 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13950 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 13951 break 13952 } 13953 if v_1_1_0_0_0_0.AuxInt != 31 { 13954 break 13955 } 13956 if y != v_1_1_0_0_0_0.Args[0] { 13957 break 13958 } 13959 v.reset(OpAMD64RORL) 13960 v.AddArg(x) 13961 v.AddArg(y) 13962 return true 13963 } 13964 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 13965 // cond: 13966 // result: (RORL x y) 13967 for { 13968 _ = v.Args[1] 13969 v_0 := v.Args[0] 13970 if v_0.Op != OpAMD64SHRL { 13971 break 13972 } 13973 _ = v_0.Args[1] 13974 x := v_0.Args[0] 13975 y := v_0.Args[1] 13976 v_1 := v.Args[1] 13977 if v_1.Op != OpAMD64ANDL { 13978 break 13979 } 13980 _ = v_1.Args[1] 13981 v_1_0 := v_1.Args[0] 13982 if v_1_0.Op != OpAMD64SBBLcarrymask { 13983 break 13984 } 13985 v_1_0_0 := v_1_0.Args[0] 13986 if v_1_0_0.Op != OpAMD64CMPQconst { 13987 break 13988 } 13989 if v_1_0_0.AuxInt != 32 { 13990 break 13991 } 13992 v_1_0_0_0 := v_1_0_0.Args[0] 13993 if v_1_0_0_0.Op != OpAMD64NEGQ { 13994 break 13995 } 13996 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13997 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 13998 break 13999 } 14000 if v_1_0_0_0_0.AuxInt != -32 { 14001 break 14002 } 14003 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14004 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 14005 break 14006 } 14007 if v_1_0_0_0_0_0.AuxInt != 31 { 14008 break 14009 } 14010 if y != v_1_0_0_0_0_0.Args[0] { 14011 break 14012 } 14013 v_1_1 := v_1.Args[1] 14014 if v_1_1.Op != OpAMD64SHLL { 14015 break 14016 } 14017 _ = v_1_1.Args[1] 14018 if x != v_1_1.Args[0] { 14019 break 14020 } 14021 v_1_1_1 := v_1_1.Args[1] 14022 if v_1_1_1.Op != OpAMD64NEGQ { 14023 break 14024 } 14025 if y != v_1_1_1.Args[0] { 14026 break 14027 } 14028 v.reset(OpAMD64RORL) 14029 v.AddArg(x) 14030 v.AddArg(y) 14031 return true 14032 } 14033 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 14034 // cond: 14035 // result: (RORL x y) 14036 for { 14037 _ = v.Args[1] 14038 v_0 := v.Args[0] 14039 if v_0.Op != OpAMD64ANDL { 14040 break 14041 } 14042 _ = v_0.Args[1] 14043 v_0_0 := v_0.Args[0] 14044 if v_0_0.Op != OpAMD64SHLL { 14045 break 14046 } 14047 _ = v_0_0.Args[1] 14048 x := v_0_0.Args[0] 14049 v_0_0_1 := v_0_0.Args[1] 14050 if v_0_0_1.Op != OpAMD64NEGQ { 14051 break 14052 } 14053 y := v_0_0_1.Args[0] 14054 v_0_1 := v_0.Args[1] 14055 if v_0_1.Op != OpAMD64SBBLcarrymask { 14056 break 14057 } 14058 v_0_1_0 := v_0_1.Args[0] 14059 if v_0_1_0.Op != OpAMD64CMPQconst { 14060 break 14061 } 14062 if v_0_1_0.AuxInt != 32 { 14063 break 14064 } 14065 v_0_1_0_0 := v_0_1_0.Args[0] 14066 if v_0_1_0_0.Op != OpAMD64NEGQ { 14067 break 14068 } 14069 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14070 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 14071 break 14072 } 14073 if v_0_1_0_0_0.AuxInt != -32 { 14074 break 14075 } 14076 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14077 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 14078 break 14079 } 14080 if v_0_1_0_0_0_0.AuxInt != 31 { 14081 break 14082 } 14083 if y != v_0_1_0_0_0_0.Args[0] { 14084 break 14085 } 14086 v_1 := v.Args[1] 14087 if v_1.Op != OpAMD64SHRL { 14088 break 14089 } 14090 _ = v_1.Args[1] 14091 if x != v_1.Args[0] { 14092 break 14093 } 14094 if y != v_1.Args[1] { 14095 break 14096 } 14097 v.reset(OpAMD64RORL) 14098 v.AddArg(x) 14099 v.AddArg(y) 14100 return true 14101 } 14102 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 14103 // cond: 14104 // result: (RORL x y) 14105 for { 14106 _ = v.Args[1] 14107 v_0 := v.Args[0] 14108 if v_0.Op != OpAMD64ANDL { 14109 break 14110 } 14111 _ = v_0.Args[1] 14112 v_0_0 := v_0.Args[0] 14113 if v_0_0.Op != OpAMD64SBBLcarrymask { 14114 break 14115 } 14116 v_0_0_0 := v_0_0.Args[0] 14117 if v_0_0_0.Op != OpAMD64CMPQconst { 14118 break 14119 } 14120 if v_0_0_0.AuxInt != 32 { 14121 break 14122 } 14123 v_0_0_0_0 := v_0_0_0.Args[0] 14124 if v_0_0_0_0.Op != OpAMD64NEGQ { 14125 break 14126 } 14127 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14128 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 14129 break 14130 } 14131 if v_0_0_0_0_0.AuxInt != -32 { 14132 break 14133 } 14134 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14135 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 14136 break 14137 } 14138 if v_0_0_0_0_0_0.AuxInt != 31 { 14139 break 14140 } 14141 y := v_0_0_0_0_0_0.Args[0] 14142 v_0_1 := v_0.Args[1] 14143 if v_0_1.Op != OpAMD64SHLL { 14144 break 14145 } 14146 _ = v_0_1.Args[1] 14147 x := v_0_1.Args[0] 14148 v_0_1_1 := v_0_1.Args[1] 14149 if v_0_1_1.Op != OpAMD64NEGQ { 14150 break 14151 } 14152 if y != v_0_1_1.Args[0] { 14153 break 14154 } 14155 v_1 := v.Args[1] 14156 if v_1.Op != OpAMD64SHRL { 14157 break 14158 } 14159 _ = v_1.Args[1] 14160 if x != v_1.Args[0] { 14161 break 14162 } 14163 if y != v_1.Args[1] { 14164 break 14165 } 14166 v.reset(OpAMD64RORL) 14167 v.AddArg(x) 14168 v.AddArg(y) 14169 return true 14170 } 14171 return false 14172 } 14173 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 14174 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 14175 // cond: 14176 // result: (RORL x y) 14177 for { 14178 _ = v.Args[1] 14179 v_0 := v.Args[0] 14180 if v_0.Op != OpAMD64SHRL { 14181 break 14182 } 14183 _ = v_0.Args[1] 14184 x := v_0.Args[0] 14185 y := v_0.Args[1] 14186 v_1 := v.Args[1] 14187 if v_1.Op != OpAMD64ANDL { 14188 break 14189 } 14190 _ = v_1.Args[1] 14191 v_1_0 := v_1.Args[0] 14192 if v_1_0.Op != OpAMD64SHLL { 14193 break 14194 } 14195 _ = v_1_0.Args[1] 14196 if x != v_1_0.Args[0] { 14197 break 14198 } 14199 v_1_0_1 := v_1_0.Args[1] 14200 if v_1_0_1.Op != OpAMD64NEGL { 14201 break 14202 } 14203 if y != v_1_0_1.Args[0] { 14204 break 14205 } 14206 v_1_1 := v_1.Args[1] 14207 if v_1_1.Op != OpAMD64SBBLcarrymask { 14208 break 14209 } 14210 v_1_1_0 := v_1_1.Args[0] 14211 if v_1_1_0.Op != OpAMD64CMPLconst { 14212 break 14213 } 14214 if v_1_1_0.AuxInt != 32 { 14215 break 14216 } 14217 v_1_1_0_0 := v_1_1_0.Args[0] 14218 if v_1_1_0_0.Op != OpAMD64NEGL { 14219 break 14220 } 14221 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14222 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 14223 break 14224 } 14225 if v_1_1_0_0_0.AuxInt != -32 { 14226 break 14227 } 14228 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14229 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 14230 break 14231 } 14232 if v_1_1_0_0_0_0.AuxInt != 31 { 14233 break 14234 } 14235 if y != v_1_1_0_0_0_0.Args[0] { 14236 break 14237 } 14238 v.reset(OpAMD64RORL) 14239 v.AddArg(x) 14240 v.AddArg(y) 14241 return true 14242 } 14243 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 14244 // cond: 14245 // result: (RORL x y) 14246 for { 14247 _ = v.Args[1] 14248 v_0 := v.Args[0] 14249 if v_0.Op != OpAMD64SHRL { 14250 break 14251 } 14252 _ = v_0.Args[1] 14253 x := v_0.Args[0] 14254 y := v_0.Args[1] 14255 v_1 := v.Args[1] 14256 if v_1.Op != OpAMD64ANDL { 14257 break 14258 } 14259 _ = v_1.Args[1] 14260 v_1_0 := v_1.Args[0] 14261 if v_1_0.Op != OpAMD64SBBLcarrymask { 14262 break 14263 } 14264 v_1_0_0 := v_1_0.Args[0] 14265 if v_1_0_0.Op != OpAMD64CMPLconst { 14266 break 14267 } 14268 if v_1_0_0.AuxInt != 32 { 14269 break 14270 } 14271 v_1_0_0_0 := v_1_0_0.Args[0] 14272 if v_1_0_0_0.Op != OpAMD64NEGL { 14273 break 14274 } 14275 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14276 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 14277 break 14278 } 14279 if v_1_0_0_0_0.AuxInt != -32 { 14280 break 14281 } 14282 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14283 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 14284 break 14285 } 14286 if v_1_0_0_0_0_0.AuxInt != 31 { 14287 break 14288 } 14289 if y != v_1_0_0_0_0_0.Args[0] { 14290 break 14291 } 14292 v_1_1 := v_1.Args[1] 14293 if v_1_1.Op != OpAMD64SHLL { 14294 break 14295 } 14296 _ = v_1_1.Args[1] 14297 if x != v_1_1.Args[0] { 14298 break 14299 } 14300 v_1_1_1 := v_1_1.Args[1] 14301 if v_1_1_1.Op != OpAMD64NEGL { 14302 break 14303 } 14304 if y != v_1_1_1.Args[0] { 14305 break 14306 } 14307 v.reset(OpAMD64RORL) 14308 v.AddArg(x) 14309 v.AddArg(y) 14310 return true 14311 } 14312 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 14313 // cond: 14314 // result: (RORL x y) 14315 for { 14316 _ = v.Args[1] 14317 v_0 := v.Args[0] 14318 if v_0.Op != OpAMD64ANDL { 14319 break 14320 } 14321 _ = v_0.Args[1] 14322 v_0_0 := v_0.Args[0] 14323 if v_0_0.Op != OpAMD64SHLL { 14324 break 14325 } 14326 _ = v_0_0.Args[1] 14327 x := v_0_0.Args[0] 14328 v_0_0_1 := v_0_0.Args[1] 14329 if v_0_0_1.Op != OpAMD64NEGL { 14330 break 14331 } 14332 y := v_0_0_1.Args[0] 14333 v_0_1 := v_0.Args[1] 14334 if v_0_1.Op != OpAMD64SBBLcarrymask { 14335 break 14336 } 14337 v_0_1_0 := v_0_1.Args[0] 14338 if v_0_1_0.Op != OpAMD64CMPLconst { 14339 break 14340 } 14341 if v_0_1_0.AuxInt != 32 { 14342 break 14343 } 14344 v_0_1_0_0 := v_0_1_0.Args[0] 14345 if v_0_1_0_0.Op != OpAMD64NEGL { 14346 break 14347 } 14348 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14349 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 14350 break 14351 } 14352 if v_0_1_0_0_0.AuxInt != -32 { 14353 break 14354 } 14355 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14356 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 14357 break 14358 } 14359 if v_0_1_0_0_0_0.AuxInt != 31 { 14360 break 14361 } 14362 if y != v_0_1_0_0_0_0.Args[0] { 14363 break 14364 } 14365 v_1 := v.Args[1] 14366 if v_1.Op != OpAMD64SHRL { 14367 break 14368 } 14369 _ = v_1.Args[1] 14370 if x != v_1.Args[0] { 14371 break 14372 } 14373 if y != v_1.Args[1] { 14374 break 14375 } 14376 v.reset(OpAMD64RORL) 14377 v.AddArg(x) 14378 v.AddArg(y) 14379 return true 14380 } 14381 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 14382 // cond: 14383 // result: (RORL x y) 14384 for { 14385 _ = v.Args[1] 14386 v_0 := v.Args[0] 14387 if v_0.Op != OpAMD64ANDL { 14388 break 14389 } 14390 _ = v_0.Args[1] 14391 v_0_0 := v_0.Args[0] 14392 if v_0_0.Op != OpAMD64SBBLcarrymask { 14393 break 14394 } 14395 v_0_0_0 := v_0_0.Args[0] 14396 if v_0_0_0.Op != OpAMD64CMPLconst { 14397 break 14398 } 14399 if v_0_0_0.AuxInt != 32 { 14400 break 14401 } 14402 v_0_0_0_0 := v_0_0_0.Args[0] 14403 if v_0_0_0_0.Op != OpAMD64NEGL { 14404 break 14405 } 14406 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14407 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 14408 break 14409 } 14410 if v_0_0_0_0_0.AuxInt != -32 { 14411 break 14412 } 14413 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14414 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 14415 break 14416 } 14417 if v_0_0_0_0_0_0.AuxInt != 31 { 14418 break 14419 } 14420 y := v_0_0_0_0_0_0.Args[0] 14421 v_0_1 := v_0.Args[1] 14422 if v_0_1.Op != OpAMD64SHLL { 14423 break 14424 } 14425 _ = v_0_1.Args[1] 14426 x := v_0_1.Args[0] 14427 v_0_1_1 := v_0_1.Args[1] 14428 if v_0_1_1.Op != OpAMD64NEGL { 14429 break 14430 } 14431 if y != v_0_1_1.Args[0] { 14432 break 14433 } 14434 v_1 := v.Args[1] 14435 if v_1.Op != OpAMD64SHRL { 14436 break 14437 } 14438 _ = v_1.Args[1] 14439 if x != v_1.Args[0] { 14440 break 14441 } 14442 if y != v_1.Args[1] { 14443 break 14444 } 14445 v.reset(OpAMD64RORL) 14446 v.AddArg(x) 14447 v.AddArg(y) 14448 return true 14449 } 14450 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 14451 // cond: v.Type.Size() == 2 14452 // result: (ROLW x y) 14453 for { 14454 _ = v.Args[1] 14455 v_0 := v.Args[0] 14456 if v_0.Op != OpAMD64SHLL { 14457 break 14458 } 14459 _ = v_0.Args[1] 14460 x := v_0.Args[0] 14461 v_0_1 := v_0.Args[1] 14462 if v_0_1.Op != OpAMD64ANDQconst { 14463 break 14464 } 14465 if v_0_1.AuxInt != 15 { 14466 break 14467 } 14468 y := v_0_1.Args[0] 14469 v_1 := v.Args[1] 14470 if v_1.Op != OpAMD64ANDL { 14471 break 14472 } 14473 _ = v_1.Args[1] 14474 v_1_0 := v_1.Args[0] 14475 if v_1_0.Op != OpAMD64SHRW { 14476 break 14477 } 14478 _ = v_1_0.Args[1] 14479 if x != v_1_0.Args[0] { 14480 break 14481 } 14482 v_1_0_1 := v_1_0.Args[1] 14483 if v_1_0_1.Op != OpAMD64NEGQ { 14484 break 14485 } 14486 v_1_0_1_0 := v_1_0_1.Args[0] 14487 if v_1_0_1_0.Op != OpAMD64ADDQconst { 14488 break 14489 } 14490 if v_1_0_1_0.AuxInt != -16 { 14491 break 14492 } 14493 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 14494 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 14495 break 14496 } 14497 if v_1_0_1_0_0.AuxInt != 15 { 14498 break 14499 } 14500 if y != v_1_0_1_0_0.Args[0] { 14501 break 14502 } 14503 v_1_1 := v_1.Args[1] 14504 if v_1_1.Op != OpAMD64SBBLcarrymask { 14505 break 14506 } 14507 v_1_1_0 := v_1_1.Args[0] 14508 if v_1_1_0.Op != OpAMD64CMPQconst { 14509 break 14510 } 14511 if v_1_1_0.AuxInt != 16 { 14512 break 14513 } 14514 v_1_1_0_0 := v_1_1_0.Args[0] 14515 if v_1_1_0_0.Op != OpAMD64NEGQ { 14516 break 14517 } 14518 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14519 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 14520 break 14521 } 14522 if v_1_1_0_0_0.AuxInt != -16 { 14523 break 14524 } 14525 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14526 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 14527 break 14528 } 14529 if v_1_1_0_0_0_0.AuxInt != 15 { 14530 break 14531 } 14532 if y != v_1_1_0_0_0_0.Args[0] { 14533 break 14534 } 14535 if !(v.Type.Size() == 2) { 14536 break 14537 } 14538 v.reset(OpAMD64ROLW) 14539 v.AddArg(x) 14540 v.AddArg(y) 14541 return true 14542 } 14543 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 14544 // cond: v.Type.Size() == 2 14545 // result: (ROLW x y) 14546 for { 14547 _ = v.Args[1] 14548 v_0 := v.Args[0] 14549 if v_0.Op != OpAMD64SHLL { 14550 break 14551 } 14552 _ = v_0.Args[1] 14553 x := v_0.Args[0] 14554 v_0_1 := v_0.Args[1] 14555 if v_0_1.Op != OpAMD64ANDQconst { 14556 break 14557 } 14558 if v_0_1.AuxInt != 15 { 14559 break 14560 } 14561 y := v_0_1.Args[0] 14562 v_1 := v.Args[1] 14563 if v_1.Op != OpAMD64ANDL { 14564 break 14565 } 14566 _ = v_1.Args[1] 14567 v_1_0 := v_1.Args[0] 14568 if v_1_0.Op != OpAMD64SBBLcarrymask { 14569 break 14570 } 14571 v_1_0_0 := v_1_0.Args[0] 14572 if v_1_0_0.Op != OpAMD64CMPQconst { 14573 break 14574 } 14575 if v_1_0_0.AuxInt != 16 { 14576 break 14577 } 14578 v_1_0_0_0 := v_1_0_0.Args[0] 14579 if v_1_0_0_0.Op != OpAMD64NEGQ { 14580 break 14581 } 14582 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14583 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 14584 break 14585 } 14586 if v_1_0_0_0_0.AuxInt != -16 { 14587 break 14588 } 14589 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14590 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 14591 break 14592 } 14593 if v_1_0_0_0_0_0.AuxInt != 15 { 14594 break 14595 } 14596 if y != v_1_0_0_0_0_0.Args[0] { 14597 break 14598 } 14599 v_1_1 := v_1.Args[1] 14600 if v_1_1.Op != OpAMD64SHRW { 14601 break 14602 } 14603 _ = v_1_1.Args[1] 14604 if x != v_1_1.Args[0] { 14605 break 14606 } 14607 v_1_1_1 := v_1_1.Args[1] 14608 if v_1_1_1.Op != OpAMD64NEGQ { 14609 break 14610 } 14611 v_1_1_1_0 := v_1_1_1.Args[0] 14612 if v_1_1_1_0.Op != OpAMD64ADDQconst { 14613 break 14614 } 14615 if v_1_1_1_0.AuxInt != -16 { 14616 break 14617 } 14618 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 14619 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 14620 break 14621 } 14622 if v_1_1_1_0_0.AuxInt != 15 { 14623 break 14624 } 14625 if y != v_1_1_1_0_0.Args[0] { 14626 break 14627 } 14628 if !(v.Type.Size() == 2) { 14629 break 14630 } 14631 v.reset(OpAMD64ROLW) 14632 v.AddArg(x) 14633 v.AddArg(y) 14634 return true 14635 } 14636 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 14637 // cond: v.Type.Size() == 2 14638 // result: (ROLW x y) 14639 for { 14640 _ = v.Args[1] 14641 v_0 := v.Args[0] 14642 if v_0.Op != OpAMD64ANDL { 14643 break 14644 } 14645 _ = v_0.Args[1] 14646 v_0_0 := v_0.Args[0] 14647 if v_0_0.Op != OpAMD64SHRW { 14648 break 14649 } 14650 _ = v_0_0.Args[1] 14651 x := v_0_0.Args[0] 14652 v_0_0_1 := v_0_0.Args[1] 14653 if v_0_0_1.Op != OpAMD64NEGQ { 14654 break 14655 } 14656 v_0_0_1_0 := v_0_0_1.Args[0] 14657 if v_0_0_1_0.Op != OpAMD64ADDQconst { 14658 break 14659 } 14660 if v_0_0_1_0.AuxInt != -16 { 14661 break 14662 } 14663 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 14664 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 14665 break 14666 } 14667 if v_0_0_1_0_0.AuxInt != 15 { 14668 break 14669 } 14670 y := v_0_0_1_0_0.Args[0] 14671 v_0_1 := v_0.Args[1] 14672 if v_0_1.Op != OpAMD64SBBLcarrymask { 14673 break 14674 } 14675 v_0_1_0 := v_0_1.Args[0] 14676 if v_0_1_0.Op != OpAMD64CMPQconst { 14677 break 14678 } 14679 if v_0_1_0.AuxInt != 16 { 14680 break 14681 } 14682 v_0_1_0_0 := v_0_1_0.Args[0] 14683 if v_0_1_0_0.Op != OpAMD64NEGQ { 14684 break 14685 } 14686 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14687 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 14688 break 14689 } 14690 if v_0_1_0_0_0.AuxInt != -16 { 14691 break 14692 } 14693 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14694 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 14695 break 14696 } 14697 if v_0_1_0_0_0_0.AuxInt != 15 { 14698 break 14699 } 14700 if y != v_0_1_0_0_0_0.Args[0] { 14701 break 14702 } 14703 v_1 := v.Args[1] 14704 if v_1.Op != OpAMD64SHLL { 14705 break 14706 } 14707 _ = v_1.Args[1] 14708 if x != v_1.Args[0] { 14709 break 14710 } 14711 v_1_1 := v_1.Args[1] 14712 if v_1_1.Op != OpAMD64ANDQconst { 14713 break 14714 } 14715 if v_1_1.AuxInt != 15 { 14716 break 14717 } 14718 if y != v_1_1.Args[0] { 14719 break 14720 } 14721 if !(v.Type.Size() == 2) { 14722 break 14723 } 14724 v.reset(OpAMD64ROLW) 14725 v.AddArg(x) 14726 v.AddArg(y) 14727 return true 14728 } 14729 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 14730 // cond: v.Type.Size() == 2 14731 // result: (ROLW x y) 14732 for { 14733 _ = v.Args[1] 14734 v_0 := v.Args[0] 14735 if v_0.Op != OpAMD64ANDL { 14736 break 14737 } 14738 _ = v_0.Args[1] 14739 v_0_0 := v_0.Args[0] 14740 if v_0_0.Op != OpAMD64SBBLcarrymask { 14741 break 14742 } 14743 v_0_0_0 := v_0_0.Args[0] 14744 if v_0_0_0.Op != OpAMD64CMPQconst { 14745 break 14746 } 14747 if v_0_0_0.AuxInt != 16 { 14748 break 14749 } 14750 v_0_0_0_0 := v_0_0_0.Args[0] 14751 if v_0_0_0_0.Op != OpAMD64NEGQ { 14752 break 14753 } 14754 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14755 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 14756 break 14757 } 14758 if v_0_0_0_0_0.AuxInt != -16 { 14759 break 14760 } 14761 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14762 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 14763 break 14764 } 14765 if v_0_0_0_0_0_0.AuxInt != 15 { 14766 break 14767 } 14768 y := v_0_0_0_0_0_0.Args[0] 14769 v_0_1 := v_0.Args[1] 14770 if v_0_1.Op != OpAMD64SHRW { 14771 break 14772 } 14773 _ = v_0_1.Args[1] 14774 x := v_0_1.Args[0] 14775 v_0_1_1 := v_0_1.Args[1] 14776 if v_0_1_1.Op != OpAMD64NEGQ { 14777 break 14778 } 14779 v_0_1_1_0 := v_0_1_1.Args[0] 14780 if v_0_1_1_0.Op != OpAMD64ADDQconst { 14781 break 14782 } 14783 if v_0_1_1_0.AuxInt != -16 { 14784 break 14785 } 14786 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 14787 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 14788 break 14789 } 14790 if v_0_1_1_0_0.AuxInt != 15 { 14791 break 14792 } 14793 if y != v_0_1_1_0_0.Args[0] { 14794 break 14795 } 14796 v_1 := v.Args[1] 14797 if v_1.Op != OpAMD64SHLL { 14798 break 14799 } 14800 _ = v_1.Args[1] 14801 if x != v_1.Args[0] { 14802 break 14803 } 14804 v_1_1 := v_1.Args[1] 14805 if v_1_1.Op != OpAMD64ANDQconst { 14806 break 14807 } 14808 if v_1_1.AuxInt != 15 { 14809 break 14810 } 14811 if y != v_1_1.Args[0] { 14812 break 14813 } 14814 if !(v.Type.Size() == 2) { 14815 break 14816 } 14817 v.reset(OpAMD64ROLW) 14818 v.AddArg(x) 14819 v.AddArg(y) 14820 return true 14821 } 14822 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 14823 // cond: v.Type.Size() == 2 14824 // result: (ROLW x y) 14825 for { 14826 _ = v.Args[1] 14827 v_0 := v.Args[0] 14828 if v_0.Op != OpAMD64SHLL { 14829 break 14830 } 14831 _ = v_0.Args[1] 14832 x := v_0.Args[0] 14833 v_0_1 := v_0.Args[1] 14834 if v_0_1.Op != OpAMD64ANDLconst { 14835 break 14836 } 14837 if v_0_1.AuxInt != 15 { 14838 break 14839 } 14840 y := v_0_1.Args[0] 14841 v_1 := v.Args[1] 14842 if v_1.Op != OpAMD64ANDL { 14843 break 14844 } 14845 _ = v_1.Args[1] 14846 v_1_0 := v_1.Args[0] 14847 if v_1_0.Op != OpAMD64SHRW { 14848 break 14849 } 14850 _ = v_1_0.Args[1] 14851 if x != v_1_0.Args[0] { 14852 break 14853 } 14854 v_1_0_1 := v_1_0.Args[1] 14855 if v_1_0_1.Op != OpAMD64NEGL { 14856 break 14857 } 14858 v_1_0_1_0 := v_1_0_1.Args[0] 14859 if v_1_0_1_0.Op != OpAMD64ADDLconst { 14860 break 14861 } 14862 if v_1_0_1_0.AuxInt != -16 { 14863 break 14864 } 14865 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 14866 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 14867 break 14868 } 14869 if v_1_0_1_0_0.AuxInt != 15 { 14870 break 14871 } 14872 if y != v_1_0_1_0_0.Args[0] { 14873 break 14874 } 14875 v_1_1 := v_1.Args[1] 14876 if v_1_1.Op != OpAMD64SBBLcarrymask { 14877 break 14878 } 14879 v_1_1_0 := v_1_1.Args[0] 14880 if v_1_1_0.Op != OpAMD64CMPLconst { 14881 break 14882 } 14883 if v_1_1_0.AuxInt != 16 { 14884 break 14885 } 14886 v_1_1_0_0 := v_1_1_0.Args[0] 14887 if v_1_1_0_0.Op != OpAMD64NEGL { 14888 break 14889 } 14890 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14891 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 14892 break 14893 } 14894 if v_1_1_0_0_0.AuxInt != -16 { 14895 break 14896 } 14897 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14898 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 14899 break 14900 } 14901 if v_1_1_0_0_0_0.AuxInt != 15 { 14902 break 14903 } 14904 if y != v_1_1_0_0_0_0.Args[0] { 14905 break 14906 } 14907 if !(v.Type.Size() == 2) { 14908 break 14909 } 14910 v.reset(OpAMD64ROLW) 14911 v.AddArg(x) 14912 v.AddArg(y) 14913 return true 14914 } 14915 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 14916 // cond: v.Type.Size() == 2 14917 // result: (ROLW x y) 14918 for { 14919 _ = v.Args[1] 14920 v_0 := v.Args[0] 14921 if v_0.Op != OpAMD64SHLL { 14922 break 14923 } 14924 _ = v_0.Args[1] 14925 x := v_0.Args[0] 14926 v_0_1 := v_0.Args[1] 14927 if v_0_1.Op != OpAMD64ANDLconst { 14928 break 14929 } 14930 if v_0_1.AuxInt != 15 { 14931 break 14932 } 14933 y := v_0_1.Args[0] 14934 v_1 := v.Args[1] 14935 if v_1.Op != OpAMD64ANDL { 14936 break 14937 } 14938 _ = v_1.Args[1] 14939 v_1_0 := v_1.Args[0] 14940 if v_1_0.Op != OpAMD64SBBLcarrymask { 14941 break 14942 } 14943 v_1_0_0 := v_1_0.Args[0] 14944 if v_1_0_0.Op != OpAMD64CMPLconst { 14945 break 14946 } 14947 if v_1_0_0.AuxInt != 16 { 14948 break 14949 } 14950 v_1_0_0_0 := v_1_0_0.Args[0] 14951 if v_1_0_0_0.Op != OpAMD64NEGL { 14952 break 14953 } 14954 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14955 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 14956 break 14957 } 14958 if v_1_0_0_0_0.AuxInt != -16 { 14959 break 14960 } 14961 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14962 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 14963 break 14964 } 14965 if v_1_0_0_0_0_0.AuxInt != 15 { 14966 break 14967 } 14968 if y != v_1_0_0_0_0_0.Args[0] { 14969 break 14970 } 14971 v_1_1 := v_1.Args[1] 14972 if v_1_1.Op != OpAMD64SHRW { 14973 break 14974 } 14975 _ = v_1_1.Args[1] 14976 if x != v_1_1.Args[0] { 14977 break 14978 } 14979 v_1_1_1 := v_1_1.Args[1] 14980 if v_1_1_1.Op != OpAMD64NEGL { 14981 break 14982 } 14983 v_1_1_1_0 := v_1_1_1.Args[0] 14984 if v_1_1_1_0.Op != OpAMD64ADDLconst { 14985 break 14986 } 14987 if v_1_1_1_0.AuxInt != -16 { 14988 break 14989 } 14990 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 14991 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 14992 break 14993 } 14994 if v_1_1_1_0_0.AuxInt != 15 { 14995 break 14996 } 14997 if y != v_1_1_1_0_0.Args[0] { 14998 break 14999 } 15000 if !(v.Type.Size() == 2) { 15001 break 15002 } 15003 v.reset(OpAMD64ROLW) 15004 v.AddArg(x) 15005 v.AddArg(y) 15006 return true 15007 } 15008 return false 15009 } 15010 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 15011 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 15012 // cond: v.Type.Size() == 2 15013 // result: (ROLW x y) 15014 for { 15015 _ = v.Args[1] 15016 v_0 := v.Args[0] 15017 if v_0.Op != OpAMD64ANDL { 15018 break 15019 } 15020 _ = v_0.Args[1] 15021 v_0_0 := v_0.Args[0] 15022 if v_0_0.Op != OpAMD64SHRW { 15023 break 15024 } 15025 _ = v_0_0.Args[1] 15026 x := v_0_0.Args[0] 15027 v_0_0_1 := v_0_0.Args[1] 15028 if v_0_0_1.Op != OpAMD64NEGL { 15029 break 15030 } 15031 v_0_0_1_0 := v_0_0_1.Args[0] 15032 if v_0_0_1_0.Op != OpAMD64ADDLconst { 15033 break 15034 } 15035 if v_0_0_1_0.AuxInt != -16 { 15036 break 15037 } 15038 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 15039 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 15040 break 15041 } 15042 if v_0_0_1_0_0.AuxInt != 15 { 15043 break 15044 } 15045 y := v_0_0_1_0_0.Args[0] 15046 v_0_1 := v_0.Args[1] 15047 if v_0_1.Op != OpAMD64SBBLcarrymask { 15048 break 15049 } 15050 v_0_1_0 := v_0_1.Args[0] 15051 if v_0_1_0.Op != OpAMD64CMPLconst { 15052 break 15053 } 15054 if v_0_1_0.AuxInt != 16 { 15055 break 15056 } 15057 v_0_1_0_0 := v_0_1_0.Args[0] 15058 if v_0_1_0_0.Op != OpAMD64NEGL { 15059 break 15060 } 15061 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15062 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 15063 break 15064 } 15065 if v_0_1_0_0_0.AuxInt != -16 { 15066 break 15067 } 15068 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15069 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 15070 break 15071 } 15072 if v_0_1_0_0_0_0.AuxInt != 15 { 15073 break 15074 } 15075 if y != v_0_1_0_0_0_0.Args[0] { 15076 break 15077 } 15078 v_1 := v.Args[1] 15079 if v_1.Op != OpAMD64SHLL { 15080 break 15081 } 15082 _ = v_1.Args[1] 15083 if x != v_1.Args[0] { 15084 break 15085 } 15086 v_1_1 := v_1.Args[1] 15087 if v_1_1.Op != OpAMD64ANDLconst { 15088 break 15089 } 15090 if v_1_1.AuxInt != 15 { 15091 break 15092 } 15093 if y != v_1_1.Args[0] { 15094 break 15095 } 15096 if !(v.Type.Size() == 2) { 15097 break 15098 } 15099 v.reset(OpAMD64ROLW) 15100 v.AddArg(x) 15101 v.AddArg(y) 15102 return true 15103 } 15104 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 15105 // cond: v.Type.Size() == 2 15106 // result: (ROLW x y) 15107 for { 15108 _ = v.Args[1] 15109 v_0 := v.Args[0] 15110 if v_0.Op != OpAMD64ANDL { 15111 break 15112 } 15113 _ = v_0.Args[1] 15114 v_0_0 := v_0.Args[0] 15115 if v_0_0.Op != OpAMD64SBBLcarrymask { 15116 break 15117 } 15118 v_0_0_0 := v_0_0.Args[0] 15119 if v_0_0_0.Op != OpAMD64CMPLconst { 15120 break 15121 } 15122 if v_0_0_0.AuxInt != 16 { 15123 break 15124 } 15125 v_0_0_0_0 := v_0_0_0.Args[0] 15126 if v_0_0_0_0.Op != OpAMD64NEGL { 15127 break 15128 } 15129 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15130 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 15131 break 15132 } 15133 if v_0_0_0_0_0.AuxInt != -16 { 15134 break 15135 } 15136 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15137 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 15138 break 15139 } 15140 if v_0_0_0_0_0_0.AuxInt != 15 { 15141 break 15142 } 15143 y := v_0_0_0_0_0_0.Args[0] 15144 v_0_1 := v_0.Args[1] 15145 if v_0_1.Op != OpAMD64SHRW { 15146 break 15147 } 15148 _ = v_0_1.Args[1] 15149 x := v_0_1.Args[0] 15150 v_0_1_1 := v_0_1.Args[1] 15151 if v_0_1_1.Op != OpAMD64NEGL { 15152 break 15153 } 15154 v_0_1_1_0 := v_0_1_1.Args[0] 15155 if v_0_1_1_0.Op != OpAMD64ADDLconst { 15156 break 15157 } 15158 if v_0_1_1_0.AuxInt != -16 { 15159 break 15160 } 15161 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 15162 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 15163 break 15164 } 15165 if v_0_1_1_0_0.AuxInt != 15 { 15166 break 15167 } 15168 if y != v_0_1_1_0_0.Args[0] { 15169 break 15170 } 15171 v_1 := v.Args[1] 15172 if v_1.Op != OpAMD64SHLL { 15173 break 15174 } 15175 _ = v_1.Args[1] 15176 if x != v_1.Args[0] { 15177 break 15178 } 15179 v_1_1 := v_1.Args[1] 15180 if v_1_1.Op != OpAMD64ANDLconst { 15181 break 15182 } 15183 if v_1_1.AuxInt != 15 { 15184 break 15185 } 15186 if y != v_1_1.Args[0] { 15187 break 15188 } 15189 if !(v.Type.Size() == 2) { 15190 break 15191 } 15192 v.reset(OpAMD64ROLW) 15193 v.AddArg(x) 15194 v.AddArg(y) 15195 return true 15196 } 15197 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 15198 // cond: v.Type.Size() == 2 15199 // result: (RORW x y) 15200 for { 15201 _ = v.Args[1] 15202 v_0 := v.Args[0] 15203 if v_0.Op != OpAMD64SHRW { 15204 break 15205 } 15206 _ = v_0.Args[1] 15207 x := v_0.Args[0] 15208 v_0_1 := v_0.Args[1] 15209 if v_0_1.Op != OpAMD64ANDQconst { 15210 break 15211 } 15212 if v_0_1.AuxInt != 15 { 15213 break 15214 } 15215 y := v_0_1.Args[0] 15216 v_1 := v.Args[1] 15217 if v_1.Op != OpAMD64SHLL { 15218 break 15219 } 15220 _ = v_1.Args[1] 15221 if x != v_1.Args[0] { 15222 break 15223 } 15224 v_1_1 := v_1.Args[1] 15225 if v_1_1.Op != OpAMD64NEGQ { 15226 break 15227 } 15228 v_1_1_0 := v_1_1.Args[0] 15229 if v_1_1_0.Op != OpAMD64ADDQconst { 15230 break 15231 } 15232 if v_1_1_0.AuxInt != -16 { 15233 break 15234 } 15235 v_1_1_0_0 := v_1_1_0.Args[0] 15236 if v_1_1_0_0.Op != OpAMD64ANDQconst { 15237 break 15238 } 15239 if v_1_1_0_0.AuxInt != 15 { 15240 break 15241 } 15242 if y != v_1_1_0_0.Args[0] { 15243 break 15244 } 15245 if !(v.Type.Size() == 2) { 15246 break 15247 } 15248 v.reset(OpAMD64RORW) 15249 v.AddArg(x) 15250 v.AddArg(y) 15251 return true 15252 } 15253 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 15254 // cond: v.Type.Size() == 2 15255 // result: (RORW x y) 15256 for { 15257 _ = v.Args[1] 15258 v_0 := v.Args[0] 15259 if v_0.Op != OpAMD64SHLL { 15260 break 15261 } 15262 _ = v_0.Args[1] 15263 x := v_0.Args[0] 15264 v_0_1 := v_0.Args[1] 15265 if v_0_1.Op != OpAMD64NEGQ { 15266 break 15267 } 15268 v_0_1_0 := v_0_1.Args[0] 15269 if v_0_1_0.Op != OpAMD64ADDQconst { 15270 break 15271 } 15272 if v_0_1_0.AuxInt != -16 { 15273 break 15274 } 15275 v_0_1_0_0 := v_0_1_0.Args[0] 15276 if v_0_1_0_0.Op != OpAMD64ANDQconst { 15277 break 15278 } 15279 if v_0_1_0_0.AuxInt != 15 { 15280 break 15281 } 15282 y := v_0_1_0_0.Args[0] 15283 v_1 := v.Args[1] 15284 if v_1.Op != OpAMD64SHRW { 15285 break 15286 } 15287 _ = v_1.Args[1] 15288 if x != v_1.Args[0] { 15289 break 15290 } 15291 v_1_1 := v_1.Args[1] 15292 if v_1_1.Op != OpAMD64ANDQconst { 15293 break 15294 } 15295 if v_1_1.AuxInt != 15 { 15296 break 15297 } 15298 if y != v_1_1.Args[0] { 15299 break 15300 } 15301 if !(v.Type.Size() == 2) { 15302 break 15303 } 15304 v.reset(OpAMD64RORW) 15305 v.AddArg(x) 15306 v.AddArg(y) 15307 return true 15308 } 15309 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 15310 // cond: v.Type.Size() == 2 15311 // result: (RORW x y) 15312 for { 15313 _ = v.Args[1] 15314 v_0 := v.Args[0] 15315 if v_0.Op != OpAMD64SHRW { 15316 break 15317 } 15318 _ = v_0.Args[1] 15319 x := v_0.Args[0] 15320 v_0_1 := v_0.Args[1] 15321 if v_0_1.Op != OpAMD64ANDLconst { 15322 break 15323 } 15324 if v_0_1.AuxInt != 15 { 15325 break 15326 } 15327 y := v_0_1.Args[0] 15328 v_1 := v.Args[1] 15329 if v_1.Op != OpAMD64SHLL { 15330 break 15331 } 15332 _ = v_1.Args[1] 15333 if x != v_1.Args[0] { 15334 break 15335 } 15336 v_1_1 := v_1.Args[1] 15337 if v_1_1.Op != OpAMD64NEGL { 15338 break 15339 } 15340 v_1_1_0 := v_1_1.Args[0] 15341 if v_1_1_0.Op != OpAMD64ADDLconst { 15342 break 15343 } 15344 if v_1_1_0.AuxInt != -16 { 15345 break 15346 } 15347 v_1_1_0_0 := v_1_1_0.Args[0] 15348 if v_1_1_0_0.Op != OpAMD64ANDLconst { 15349 break 15350 } 15351 if v_1_1_0_0.AuxInt != 15 { 15352 break 15353 } 15354 if y != v_1_1_0_0.Args[0] { 15355 break 15356 } 15357 if !(v.Type.Size() == 2) { 15358 break 15359 } 15360 v.reset(OpAMD64RORW) 15361 v.AddArg(x) 15362 v.AddArg(y) 15363 return true 15364 } 15365 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 15366 // cond: v.Type.Size() == 2 15367 // result: (RORW x y) 15368 for { 15369 _ = v.Args[1] 15370 v_0 := v.Args[0] 15371 if v_0.Op != OpAMD64SHLL { 15372 break 15373 } 15374 _ = v_0.Args[1] 15375 x := v_0.Args[0] 15376 v_0_1 := v_0.Args[1] 15377 if v_0_1.Op != OpAMD64NEGL { 15378 break 15379 } 15380 v_0_1_0 := v_0_1.Args[0] 15381 if v_0_1_0.Op != OpAMD64ADDLconst { 15382 break 15383 } 15384 if v_0_1_0.AuxInt != -16 { 15385 break 15386 } 15387 v_0_1_0_0 := v_0_1_0.Args[0] 15388 if v_0_1_0_0.Op != OpAMD64ANDLconst { 15389 break 15390 } 15391 if v_0_1_0_0.AuxInt != 15 { 15392 break 15393 } 15394 y := v_0_1_0_0.Args[0] 15395 v_1 := v.Args[1] 15396 if v_1.Op != OpAMD64SHRW { 15397 break 15398 } 15399 _ = v_1.Args[1] 15400 if x != v_1.Args[0] { 15401 break 15402 } 15403 v_1_1 := v_1.Args[1] 15404 if v_1_1.Op != OpAMD64ANDLconst { 15405 break 15406 } 15407 if v_1_1.AuxInt != 15 { 15408 break 15409 } 15410 if y != v_1_1.Args[0] { 15411 break 15412 } 15413 if !(v.Type.Size() == 2) { 15414 break 15415 } 15416 v.reset(OpAMD64RORW) 15417 v.AddArg(x) 15418 v.AddArg(y) 15419 return true 15420 } 15421 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 15422 // cond: v.Type.Size() == 1 15423 // result: (ROLB x y) 15424 for { 15425 _ = v.Args[1] 15426 v_0 := v.Args[0] 15427 if v_0.Op != OpAMD64SHLL { 15428 break 15429 } 15430 _ = v_0.Args[1] 15431 x := v_0.Args[0] 15432 v_0_1 := v_0.Args[1] 15433 if v_0_1.Op != OpAMD64ANDQconst { 15434 break 15435 } 15436 if v_0_1.AuxInt != 7 { 15437 break 15438 } 15439 y := v_0_1.Args[0] 15440 v_1 := v.Args[1] 15441 if v_1.Op != OpAMD64ANDL { 15442 break 15443 } 15444 _ = v_1.Args[1] 15445 v_1_0 := v_1.Args[0] 15446 if v_1_0.Op != OpAMD64SHRB { 15447 break 15448 } 15449 _ = v_1_0.Args[1] 15450 if x != v_1_0.Args[0] { 15451 break 15452 } 15453 v_1_0_1 := v_1_0.Args[1] 15454 if v_1_0_1.Op != OpAMD64NEGQ { 15455 break 15456 } 15457 v_1_0_1_0 := v_1_0_1.Args[0] 15458 if v_1_0_1_0.Op != OpAMD64ADDQconst { 15459 break 15460 } 15461 if v_1_0_1_0.AuxInt != -8 { 15462 break 15463 } 15464 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 15465 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 15466 break 15467 } 15468 if v_1_0_1_0_0.AuxInt != 7 { 15469 break 15470 } 15471 if y != v_1_0_1_0_0.Args[0] { 15472 break 15473 } 15474 v_1_1 := v_1.Args[1] 15475 if v_1_1.Op != OpAMD64SBBLcarrymask { 15476 break 15477 } 15478 v_1_1_0 := v_1_1.Args[0] 15479 if v_1_1_0.Op != OpAMD64CMPQconst { 15480 break 15481 } 15482 if v_1_1_0.AuxInt != 8 { 15483 break 15484 } 15485 v_1_1_0_0 := v_1_1_0.Args[0] 15486 if v_1_1_0_0.Op != OpAMD64NEGQ { 15487 break 15488 } 15489 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15490 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15491 break 15492 } 15493 if v_1_1_0_0_0.AuxInt != -8 { 15494 break 15495 } 15496 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15497 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15498 break 15499 } 15500 if v_1_1_0_0_0_0.AuxInt != 7 { 15501 break 15502 } 15503 if y != v_1_1_0_0_0_0.Args[0] { 15504 break 15505 } 15506 if !(v.Type.Size() == 1) { 15507 break 15508 } 15509 v.reset(OpAMD64ROLB) 15510 v.AddArg(x) 15511 v.AddArg(y) 15512 return true 15513 } 15514 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 15515 // cond: v.Type.Size() == 1 15516 // result: (ROLB x y) 15517 for { 15518 _ = v.Args[1] 15519 v_0 := v.Args[0] 15520 if v_0.Op != OpAMD64SHLL { 15521 break 15522 } 15523 _ = v_0.Args[1] 15524 x := v_0.Args[0] 15525 v_0_1 := v_0.Args[1] 15526 if v_0_1.Op != OpAMD64ANDQconst { 15527 break 15528 } 15529 if v_0_1.AuxInt != 7 { 15530 break 15531 } 15532 y := v_0_1.Args[0] 15533 v_1 := v.Args[1] 15534 if v_1.Op != OpAMD64ANDL { 15535 break 15536 } 15537 _ = v_1.Args[1] 15538 v_1_0 := v_1.Args[0] 15539 if v_1_0.Op != OpAMD64SBBLcarrymask { 15540 break 15541 } 15542 v_1_0_0 := v_1_0.Args[0] 15543 if v_1_0_0.Op != OpAMD64CMPQconst { 15544 break 15545 } 15546 if v_1_0_0.AuxInt != 8 { 15547 break 15548 } 15549 v_1_0_0_0 := v_1_0_0.Args[0] 15550 if v_1_0_0_0.Op != OpAMD64NEGQ { 15551 break 15552 } 15553 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15554 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15555 break 15556 } 15557 if v_1_0_0_0_0.AuxInt != -8 { 15558 break 15559 } 15560 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15561 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15562 break 15563 } 15564 if v_1_0_0_0_0_0.AuxInt != 7 { 15565 break 15566 } 15567 if y != v_1_0_0_0_0_0.Args[0] { 15568 break 15569 } 15570 v_1_1 := v_1.Args[1] 15571 if v_1_1.Op != OpAMD64SHRB { 15572 break 15573 } 15574 _ = v_1_1.Args[1] 15575 if x != v_1_1.Args[0] { 15576 break 15577 } 15578 v_1_1_1 := v_1_1.Args[1] 15579 if v_1_1_1.Op != OpAMD64NEGQ { 15580 break 15581 } 15582 v_1_1_1_0 := v_1_1_1.Args[0] 15583 if v_1_1_1_0.Op != OpAMD64ADDQconst { 15584 break 15585 } 15586 if v_1_1_1_0.AuxInt != -8 { 15587 break 15588 } 15589 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 15590 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 15591 break 15592 } 15593 if v_1_1_1_0_0.AuxInt != 7 { 15594 break 15595 } 15596 if y != v_1_1_1_0_0.Args[0] { 15597 break 15598 } 15599 if !(v.Type.Size() == 1) { 15600 break 15601 } 15602 v.reset(OpAMD64ROLB) 15603 v.AddArg(x) 15604 v.AddArg(y) 15605 return true 15606 } 15607 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 15608 // cond: v.Type.Size() == 1 15609 // result: (ROLB x y) 15610 for { 15611 _ = v.Args[1] 15612 v_0 := v.Args[0] 15613 if v_0.Op != OpAMD64ANDL { 15614 break 15615 } 15616 _ = v_0.Args[1] 15617 v_0_0 := v_0.Args[0] 15618 if v_0_0.Op != OpAMD64SHRB { 15619 break 15620 } 15621 _ = v_0_0.Args[1] 15622 x := v_0_0.Args[0] 15623 v_0_0_1 := v_0_0.Args[1] 15624 if v_0_0_1.Op != OpAMD64NEGQ { 15625 break 15626 } 15627 v_0_0_1_0 := v_0_0_1.Args[0] 15628 if v_0_0_1_0.Op != OpAMD64ADDQconst { 15629 break 15630 } 15631 if v_0_0_1_0.AuxInt != -8 { 15632 break 15633 } 15634 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 15635 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 15636 break 15637 } 15638 if v_0_0_1_0_0.AuxInt != 7 { 15639 break 15640 } 15641 y := v_0_0_1_0_0.Args[0] 15642 v_0_1 := v_0.Args[1] 15643 if v_0_1.Op != OpAMD64SBBLcarrymask { 15644 break 15645 } 15646 v_0_1_0 := v_0_1.Args[0] 15647 if v_0_1_0.Op != OpAMD64CMPQconst { 15648 break 15649 } 15650 if v_0_1_0.AuxInt != 8 { 15651 break 15652 } 15653 v_0_1_0_0 := v_0_1_0.Args[0] 15654 if v_0_1_0_0.Op != OpAMD64NEGQ { 15655 break 15656 } 15657 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15658 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15659 break 15660 } 15661 if v_0_1_0_0_0.AuxInt != -8 { 15662 break 15663 } 15664 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15665 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15666 break 15667 } 15668 if v_0_1_0_0_0_0.AuxInt != 7 { 15669 break 15670 } 15671 if y != v_0_1_0_0_0_0.Args[0] { 15672 break 15673 } 15674 v_1 := v.Args[1] 15675 if v_1.Op != OpAMD64SHLL { 15676 break 15677 } 15678 _ = v_1.Args[1] 15679 if x != v_1.Args[0] { 15680 break 15681 } 15682 v_1_1 := v_1.Args[1] 15683 if v_1_1.Op != OpAMD64ANDQconst { 15684 break 15685 } 15686 if v_1_1.AuxInt != 7 { 15687 break 15688 } 15689 if y != v_1_1.Args[0] { 15690 break 15691 } 15692 if !(v.Type.Size() == 1) { 15693 break 15694 } 15695 v.reset(OpAMD64ROLB) 15696 v.AddArg(x) 15697 v.AddArg(y) 15698 return true 15699 } 15700 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 15701 // cond: v.Type.Size() == 1 15702 // result: (ROLB x y) 15703 for { 15704 _ = v.Args[1] 15705 v_0 := v.Args[0] 15706 if v_0.Op != OpAMD64ANDL { 15707 break 15708 } 15709 _ = v_0.Args[1] 15710 v_0_0 := v_0.Args[0] 15711 if v_0_0.Op != OpAMD64SBBLcarrymask { 15712 break 15713 } 15714 v_0_0_0 := v_0_0.Args[0] 15715 if v_0_0_0.Op != OpAMD64CMPQconst { 15716 break 15717 } 15718 if v_0_0_0.AuxInt != 8 { 15719 break 15720 } 15721 v_0_0_0_0 := v_0_0_0.Args[0] 15722 if v_0_0_0_0.Op != OpAMD64NEGQ { 15723 break 15724 } 15725 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15726 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15727 break 15728 } 15729 if v_0_0_0_0_0.AuxInt != -8 { 15730 break 15731 } 15732 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15733 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15734 break 15735 } 15736 if v_0_0_0_0_0_0.AuxInt != 7 { 15737 break 15738 } 15739 y := v_0_0_0_0_0_0.Args[0] 15740 v_0_1 := v_0.Args[1] 15741 if v_0_1.Op != OpAMD64SHRB { 15742 break 15743 } 15744 _ = v_0_1.Args[1] 15745 x := v_0_1.Args[0] 15746 v_0_1_1 := v_0_1.Args[1] 15747 if v_0_1_1.Op != OpAMD64NEGQ { 15748 break 15749 } 15750 v_0_1_1_0 := v_0_1_1.Args[0] 15751 if v_0_1_1_0.Op != OpAMD64ADDQconst { 15752 break 15753 } 15754 if v_0_1_1_0.AuxInt != -8 { 15755 break 15756 } 15757 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 15758 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 15759 break 15760 } 15761 if v_0_1_1_0_0.AuxInt != 7 { 15762 break 15763 } 15764 if y != v_0_1_1_0_0.Args[0] { 15765 break 15766 } 15767 v_1 := v.Args[1] 15768 if v_1.Op != OpAMD64SHLL { 15769 break 15770 } 15771 _ = v_1.Args[1] 15772 if x != v_1.Args[0] { 15773 break 15774 } 15775 v_1_1 := v_1.Args[1] 15776 if v_1_1.Op != OpAMD64ANDQconst { 15777 break 15778 } 15779 if v_1_1.AuxInt != 7 { 15780 break 15781 } 15782 if y != v_1_1.Args[0] { 15783 break 15784 } 15785 if !(v.Type.Size() == 1) { 15786 break 15787 } 15788 v.reset(OpAMD64ROLB) 15789 v.AddArg(x) 15790 v.AddArg(y) 15791 return true 15792 } 15793 return false 15794 } 15795 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 15796 b := v.Block 15797 _ = b 15798 typ := &b.Func.Config.Types 15799 _ = typ 15800 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 15801 // cond: v.Type.Size() == 1 15802 // result: (ROLB x y) 15803 for { 15804 _ = v.Args[1] 15805 v_0 := v.Args[0] 15806 if v_0.Op != OpAMD64SHLL { 15807 break 15808 } 15809 _ = v_0.Args[1] 15810 x := v_0.Args[0] 15811 v_0_1 := v_0.Args[1] 15812 if v_0_1.Op != OpAMD64ANDLconst { 15813 break 15814 } 15815 if v_0_1.AuxInt != 7 { 15816 break 15817 } 15818 y := v_0_1.Args[0] 15819 v_1 := v.Args[1] 15820 if v_1.Op != OpAMD64ANDL { 15821 break 15822 } 15823 _ = v_1.Args[1] 15824 v_1_0 := v_1.Args[0] 15825 if v_1_0.Op != OpAMD64SHRB { 15826 break 15827 } 15828 _ = v_1_0.Args[1] 15829 if x != v_1_0.Args[0] { 15830 break 15831 } 15832 v_1_0_1 := v_1_0.Args[1] 15833 if v_1_0_1.Op != OpAMD64NEGL { 15834 break 15835 } 15836 v_1_0_1_0 := v_1_0_1.Args[0] 15837 if v_1_0_1_0.Op != OpAMD64ADDLconst { 15838 break 15839 } 15840 if v_1_0_1_0.AuxInt != -8 { 15841 break 15842 } 15843 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 15844 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 15845 break 15846 } 15847 if v_1_0_1_0_0.AuxInt != 7 { 15848 break 15849 } 15850 if y != v_1_0_1_0_0.Args[0] { 15851 break 15852 } 15853 v_1_1 := v_1.Args[1] 15854 if v_1_1.Op != OpAMD64SBBLcarrymask { 15855 break 15856 } 15857 v_1_1_0 := v_1_1.Args[0] 15858 if v_1_1_0.Op != OpAMD64CMPLconst { 15859 break 15860 } 15861 if v_1_1_0.AuxInt != 8 { 15862 break 15863 } 15864 v_1_1_0_0 := v_1_1_0.Args[0] 15865 if v_1_1_0_0.Op != OpAMD64NEGL { 15866 break 15867 } 15868 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15869 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15870 break 15871 } 15872 if v_1_1_0_0_0.AuxInt != -8 { 15873 break 15874 } 15875 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15876 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15877 break 15878 } 15879 if v_1_1_0_0_0_0.AuxInt != 7 { 15880 break 15881 } 15882 if y != v_1_1_0_0_0_0.Args[0] { 15883 break 15884 } 15885 if !(v.Type.Size() == 1) { 15886 break 15887 } 15888 v.reset(OpAMD64ROLB) 15889 v.AddArg(x) 15890 v.AddArg(y) 15891 return true 15892 } 15893 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 15894 // cond: v.Type.Size() == 1 15895 // result: (ROLB x y) 15896 for { 15897 _ = v.Args[1] 15898 v_0 := v.Args[0] 15899 if v_0.Op != OpAMD64SHLL { 15900 break 15901 } 15902 _ = v_0.Args[1] 15903 x := v_0.Args[0] 15904 v_0_1 := v_0.Args[1] 15905 if v_0_1.Op != OpAMD64ANDLconst { 15906 break 15907 } 15908 if v_0_1.AuxInt != 7 { 15909 break 15910 } 15911 y := v_0_1.Args[0] 15912 v_1 := v.Args[1] 15913 if v_1.Op != OpAMD64ANDL { 15914 break 15915 } 15916 _ = v_1.Args[1] 15917 v_1_0 := v_1.Args[0] 15918 if v_1_0.Op != OpAMD64SBBLcarrymask { 15919 break 15920 } 15921 v_1_0_0 := v_1_0.Args[0] 15922 if v_1_0_0.Op != OpAMD64CMPLconst { 15923 break 15924 } 15925 if v_1_0_0.AuxInt != 8 { 15926 break 15927 } 15928 v_1_0_0_0 := v_1_0_0.Args[0] 15929 if v_1_0_0_0.Op != OpAMD64NEGL { 15930 break 15931 } 15932 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15933 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 15934 break 15935 } 15936 if v_1_0_0_0_0.AuxInt != -8 { 15937 break 15938 } 15939 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15940 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 15941 break 15942 } 15943 if v_1_0_0_0_0_0.AuxInt != 7 { 15944 break 15945 } 15946 if y != v_1_0_0_0_0_0.Args[0] { 15947 break 15948 } 15949 v_1_1 := v_1.Args[1] 15950 if v_1_1.Op != OpAMD64SHRB { 15951 break 15952 } 15953 _ = v_1_1.Args[1] 15954 if x != v_1_1.Args[0] { 15955 break 15956 } 15957 v_1_1_1 := v_1_1.Args[1] 15958 if v_1_1_1.Op != OpAMD64NEGL { 15959 break 15960 } 15961 v_1_1_1_0 := v_1_1_1.Args[0] 15962 if v_1_1_1_0.Op != OpAMD64ADDLconst { 15963 break 15964 } 15965 if v_1_1_1_0.AuxInt != -8 { 15966 break 15967 } 15968 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 15969 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 15970 break 15971 } 15972 if v_1_1_1_0_0.AuxInt != 7 { 15973 break 15974 } 15975 if y != v_1_1_1_0_0.Args[0] { 15976 break 15977 } 15978 if !(v.Type.Size() == 1) { 15979 break 15980 } 15981 v.reset(OpAMD64ROLB) 15982 v.AddArg(x) 15983 v.AddArg(y) 15984 return true 15985 } 15986 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 15987 // cond: v.Type.Size() == 1 15988 // result: (ROLB x y) 15989 for { 15990 _ = v.Args[1] 15991 v_0 := v.Args[0] 15992 if v_0.Op != OpAMD64ANDL { 15993 break 15994 } 15995 _ = v_0.Args[1] 15996 v_0_0 := v_0.Args[0] 15997 if v_0_0.Op != OpAMD64SHRB { 15998 break 15999 } 16000 _ = v_0_0.Args[1] 16001 x := v_0_0.Args[0] 16002 v_0_0_1 := v_0_0.Args[1] 16003 if v_0_0_1.Op != OpAMD64NEGL { 16004 break 16005 } 16006 v_0_0_1_0 := v_0_0_1.Args[0] 16007 if v_0_0_1_0.Op != OpAMD64ADDLconst { 16008 break 16009 } 16010 if v_0_0_1_0.AuxInt != -8 { 16011 break 16012 } 16013 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16014 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 16015 break 16016 } 16017 if v_0_0_1_0_0.AuxInt != 7 { 16018 break 16019 } 16020 y := v_0_0_1_0_0.Args[0] 16021 v_0_1 := v_0.Args[1] 16022 if v_0_1.Op != OpAMD64SBBLcarrymask { 16023 break 16024 } 16025 v_0_1_0 := v_0_1.Args[0] 16026 if v_0_1_0.Op != OpAMD64CMPLconst { 16027 break 16028 } 16029 if v_0_1_0.AuxInt != 8 { 16030 break 16031 } 16032 v_0_1_0_0 := v_0_1_0.Args[0] 16033 if v_0_1_0_0.Op != OpAMD64NEGL { 16034 break 16035 } 16036 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16037 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16038 break 16039 } 16040 if v_0_1_0_0_0.AuxInt != -8 { 16041 break 16042 } 16043 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16044 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16045 break 16046 } 16047 if v_0_1_0_0_0_0.AuxInt != 7 { 16048 break 16049 } 16050 if y != v_0_1_0_0_0_0.Args[0] { 16051 break 16052 } 16053 v_1 := v.Args[1] 16054 if v_1.Op != OpAMD64SHLL { 16055 break 16056 } 16057 _ = v_1.Args[1] 16058 if x != v_1.Args[0] { 16059 break 16060 } 16061 v_1_1 := v_1.Args[1] 16062 if v_1_1.Op != OpAMD64ANDLconst { 16063 break 16064 } 16065 if v_1_1.AuxInt != 7 { 16066 break 16067 } 16068 if y != v_1_1.Args[0] { 16069 break 16070 } 16071 if !(v.Type.Size() == 1) { 16072 break 16073 } 16074 v.reset(OpAMD64ROLB) 16075 v.AddArg(x) 16076 v.AddArg(y) 16077 return true 16078 } 16079 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 16080 // cond: v.Type.Size() == 1 16081 // result: (ROLB x y) 16082 for { 16083 _ = v.Args[1] 16084 v_0 := v.Args[0] 16085 if v_0.Op != OpAMD64ANDL { 16086 break 16087 } 16088 _ = v_0.Args[1] 16089 v_0_0 := v_0.Args[0] 16090 if v_0_0.Op != OpAMD64SBBLcarrymask { 16091 break 16092 } 16093 v_0_0_0 := v_0_0.Args[0] 16094 if v_0_0_0.Op != OpAMD64CMPLconst { 16095 break 16096 } 16097 if v_0_0_0.AuxInt != 8 { 16098 break 16099 } 16100 v_0_0_0_0 := v_0_0_0.Args[0] 16101 if v_0_0_0_0.Op != OpAMD64NEGL { 16102 break 16103 } 16104 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16105 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16106 break 16107 } 16108 if v_0_0_0_0_0.AuxInt != -8 { 16109 break 16110 } 16111 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16112 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16113 break 16114 } 16115 if v_0_0_0_0_0_0.AuxInt != 7 { 16116 break 16117 } 16118 y := v_0_0_0_0_0_0.Args[0] 16119 v_0_1 := v_0.Args[1] 16120 if v_0_1.Op != OpAMD64SHRB { 16121 break 16122 } 16123 _ = v_0_1.Args[1] 16124 x := v_0_1.Args[0] 16125 v_0_1_1 := v_0_1.Args[1] 16126 if v_0_1_1.Op != OpAMD64NEGL { 16127 break 16128 } 16129 v_0_1_1_0 := v_0_1_1.Args[0] 16130 if v_0_1_1_0.Op != OpAMD64ADDLconst { 16131 break 16132 } 16133 if v_0_1_1_0.AuxInt != -8 { 16134 break 16135 } 16136 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16137 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 16138 break 16139 } 16140 if v_0_1_1_0_0.AuxInt != 7 { 16141 break 16142 } 16143 if y != v_0_1_1_0_0.Args[0] { 16144 break 16145 } 16146 v_1 := v.Args[1] 16147 if v_1.Op != OpAMD64SHLL { 16148 break 16149 } 16150 _ = v_1.Args[1] 16151 if x != v_1.Args[0] { 16152 break 16153 } 16154 v_1_1 := v_1.Args[1] 16155 if v_1_1.Op != OpAMD64ANDLconst { 16156 break 16157 } 16158 if v_1_1.AuxInt != 7 { 16159 break 16160 } 16161 if y != v_1_1.Args[0] { 16162 break 16163 } 16164 if !(v.Type.Size() == 1) { 16165 break 16166 } 16167 v.reset(OpAMD64ROLB) 16168 v.AddArg(x) 16169 v.AddArg(y) 16170 return true 16171 } 16172 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 16173 // cond: v.Type.Size() == 1 16174 // result: (RORB x y) 16175 for { 16176 _ = v.Args[1] 16177 v_0 := v.Args[0] 16178 if v_0.Op != OpAMD64SHRB { 16179 break 16180 } 16181 _ = v_0.Args[1] 16182 x := v_0.Args[0] 16183 v_0_1 := v_0.Args[1] 16184 if v_0_1.Op != OpAMD64ANDQconst { 16185 break 16186 } 16187 if v_0_1.AuxInt != 7 { 16188 break 16189 } 16190 y := v_0_1.Args[0] 16191 v_1 := v.Args[1] 16192 if v_1.Op != OpAMD64SHLL { 16193 break 16194 } 16195 _ = v_1.Args[1] 16196 if x != v_1.Args[0] { 16197 break 16198 } 16199 v_1_1 := v_1.Args[1] 16200 if v_1_1.Op != OpAMD64NEGQ { 16201 break 16202 } 16203 v_1_1_0 := v_1_1.Args[0] 16204 if v_1_1_0.Op != OpAMD64ADDQconst { 16205 break 16206 } 16207 if v_1_1_0.AuxInt != -8 { 16208 break 16209 } 16210 v_1_1_0_0 := v_1_1_0.Args[0] 16211 if v_1_1_0_0.Op != OpAMD64ANDQconst { 16212 break 16213 } 16214 if v_1_1_0_0.AuxInt != 7 { 16215 break 16216 } 16217 if y != v_1_1_0_0.Args[0] { 16218 break 16219 } 16220 if !(v.Type.Size() == 1) { 16221 break 16222 } 16223 v.reset(OpAMD64RORB) 16224 v.AddArg(x) 16225 v.AddArg(y) 16226 return true 16227 } 16228 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 16229 // cond: v.Type.Size() == 1 16230 // result: (RORB x y) 16231 for { 16232 _ = v.Args[1] 16233 v_0 := v.Args[0] 16234 if v_0.Op != OpAMD64SHLL { 16235 break 16236 } 16237 _ = v_0.Args[1] 16238 x := v_0.Args[0] 16239 v_0_1 := v_0.Args[1] 16240 if v_0_1.Op != OpAMD64NEGQ { 16241 break 16242 } 16243 v_0_1_0 := v_0_1.Args[0] 16244 if v_0_1_0.Op != OpAMD64ADDQconst { 16245 break 16246 } 16247 if v_0_1_0.AuxInt != -8 { 16248 break 16249 } 16250 v_0_1_0_0 := v_0_1_0.Args[0] 16251 if v_0_1_0_0.Op != OpAMD64ANDQconst { 16252 break 16253 } 16254 if v_0_1_0_0.AuxInt != 7 { 16255 break 16256 } 16257 y := v_0_1_0_0.Args[0] 16258 v_1 := v.Args[1] 16259 if v_1.Op != OpAMD64SHRB { 16260 break 16261 } 16262 _ = v_1.Args[1] 16263 if x != v_1.Args[0] { 16264 break 16265 } 16266 v_1_1 := v_1.Args[1] 16267 if v_1_1.Op != OpAMD64ANDQconst { 16268 break 16269 } 16270 if v_1_1.AuxInt != 7 { 16271 break 16272 } 16273 if y != v_1_1.Args[0] { 16274 break 16275 } 16276 if !(v.Type.Size() == 1) { 16277 break 16278 } 16279 v.reset(OpAMD64RORB) 16280 v.AddArg(x) 16281 v.AddArg(y) 16282 return true 16283 } 16284 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 16285 // cond: v.Type.Size() == 1 16286 // result: (RORB x y) 16287 for { 16288 _ = v.Args[1] 16289 v_0 := v.Args[0] 16290 if v_0.Op != OpAMD64SHRB { 16291 break 16292 } 16293 _ = v_0.Args[1] 16294 x := v_0.Args[0] 16295 v_0_1 := v_0.Args[1] 16296 if v_0_1.Op != OpAMD64ANDLconst { 16297 break 16298 } 16299 if v_0_1.AuxInt != 7 { 16300 break 16301 } 16302 y := v_0_1.Args[0] 16303 v_1 := v.Args[1] 16304 if v_1.Op != OpAMD64SHLL { 16305 break 16306 } 16307 _ = v_1.Args[1] 16308 if x != v_1.Args[0] { 16309 break 16310 } 16311 v_1_1 := v_1.Args[1] 16312 if v_1_1.Op != OpAMD64NEGL { 16313 break 16314 } 16315 v_1_1_0 := v_1_1.Args[0] 16316 if v_1_1_0.Op != OpAMD64ADDLconst { 16317 break 16318 } 16319 if v_1_1_0.AuxInt != -8 { 16320 break 16321 } 16322 v_1_1_0_0 := v_1_1_0.Args[0] 16323 if v_1_1_0_0.Op != OpAMD64ANDLconst { 16324 break 16325 } 16326 if v_1_1_0_0.AuxInt != 7 { 16327 break 16328 } 16329 if y != v_1_1_0_0.Args[0] { 16330 break 16331 } 16332 if !(v.Type.Size() == 1) { 16333 break 16334 } 16335 v.reset(OpAMD64RORB) 16336 v.AddArg(x) 16337 v.AddArg(y) 16338 return true 16339 } 16340 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 16341 // cond: v.Type.Size() == 1 16342 // result: (RORB x y) 16343 for { 16344 _ = v.Args[1] 16345 v_0 := v.Args[0] 16346 if v_0.Op != OpAMD64SHLL { 16347 break 16348 } 16349 _ = v_0.Args[1] 16350 x := v_0.Args[0] 16351 v_0_1 := v_0.Args[1] 16352 if v_0_1.Op != OpAMD64NEGL { 16353 break 16354 } 16355 v_0_1_0 := v_0_1.Args[0] 16356 if v_0_1_0.Op != OpAMD64ADDLconst { 16357 break 16358 } 16359 if v_0_1_0.AuxInt != -8 { 16360 break 16361 } 16362 v_0_1_0_0 := v_0_1_0.Args[0] 16363 if v_0_1_0_0.Op != OpAMD64ANDLconst { 16364 break 16365 } 16366 if v_0_1_0_0.AuxInt != 7 { 16367 break 16368 } 16369 y := v_0_1_0_0.Args[0] 16370 v_1 := v.Args[1] 16371 if v_1.Op != OpAMD64SHRB { 16372 break 16373 } 16374 _ = v_1.Args[1] 16375 if x != v_1.Args[0] { 16376 break 16377 } 16378 v_1_1 := v_1.Args[1] 16379 if v_1_1.Op != OpAMD64ANDLconst { 16380 break 16381 } 16382 if v_1_1.AuxInt != 7 { 16383 break 16384 } 16385 if y != v_1_1.Args[0] { 16386 break 16387 } 16388 if !(v.Type.Size() == 1) { 16389 break 16390 } 16391 v.reset(OpAMD64RORB) 16392 v.AddArg(x) 16393 v.AddArg(y) 16394 return true 16395 } 16396 // match: (ORL x x) 16397 // cond: 16398 // result: x 16399 for { 16400 _ = v.Args[1] 16401 x := v.Args[0] 16402 if x != v.Args[1] { 16403 break 16404 } 16405 v.reset(OpCopy) 16406 v.Type = x.Type 16407 v.AddArg(x) 16408 return true 16409 } 16410 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 16411 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16412 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 16413 for { 16414 _ = v.Args[1] 16415 x0 := v.Args[0] 16416 if x0.Op != OpAMD64MOVBload { 16417 break 16418 } 16419 i0 := x0.AuxInt 16420 s := x0.Aux 16421 _ = x0.Args[1] 16422 p := x0.Args[0] 16423 mem := x0.Args[1] 16424 sh := v.Args[1] 16425 if sh.Op != OpAMD64SHLLconst { 16426 break 16427 } 16428 if sh.AuxInt != 8 { 16429 break 16430 } 16431 x1 := sh.Args[0] 16432 if x1.Op != OpAMD64MOVBload { 16433 break 16434 } 16435 i1 := x1.AuxInt 16436 if x1.Aux != s { 16437 break 16438 } 16439 _ = x1.Args[1] 16440 if p != x1.Args[0] { 16441 break 16442 } 16443 if mem != x1.Args[1] { 16444 break 16445 } 16446 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16447 break 16448 } 16449 b = mergePoint(b, x0, x1) 16450 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16451 v.reset(OpCopy) 16452 v.AddArg(v0) 16453 v0.AuxInt = i0 16454 v0.Aux = s 16455 v0.AddArg(p) 16456 v0.AddArg(mem) 16457 return true 16458 } 16459 return false 16460 } 16461 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 16462 b := v.Block 16463 _ = b 16464 typ := &b.Func.Config.Types 16465 _ = typ 16466 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 16467 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16468 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 16469 for { 16470 _ = v.Args[1] 16471 sh := v.Args[0] 16472 if sh.Op != OpAMD64SHLLconst { 16473 break 16474 } 16475 if sh.AuxInt != 8 { 16476 break 16477 } 16478 x1 := sh.Args[0] 16479 if x1.Op != OpAMD64MOVBload { 16480 break 16481 } 16482 i1 := x1.AuxInt 16483 s := x1.Aux 16484 _ = x1.Args[1] 16485 p := x1.Args[0] 16486 mem := x1.Args[1] 16487 x0 := v.Args[1] 16488 if x0.Op != OpAMD64MOVBload { 16489 break 16490 } 16491 i0 := x0.AuxInt 16492 if x0.Aux != s { 16493 break 16494 } 16495 _ = x0.Args[1] 16496 if p != x0.Args[0] { 16497 break 16498 } 16499 if mem != x0.Args[1] { 16500 break 16501 } 16502 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16503 break 16504 } 16505 b = mergePoint(b, x0, x1) 16506 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16507 v.reset(OpCopy) 16508 v.AddArg(v0) 16509 v0.AuxInt = i0 16510 v0.Aux = s 16511 v0.AddArg(p) 16512 v0.AddArg(mem) 16513 return true 16514 } 16515 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 16516 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16517 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 16518 for { 16519 _ = v.Args[1] 16520 x0 := v.Args[0] 16521 if x0.Op != OpAMD64MOVWload { 16522 break 16523 } 16524 i0 := x0.AuxInt 16525 s := x0.Aux 16526 _ = x0.Args[1] 16527 p := x0.Args[0] 16528 mem := x0.Args[1] 16529 sh := v.Args[1] 16530 if sh.Op != OpAMD64SHLLconst { 16531 break 16532 } 16533 if sh.AuxInt != 16 { 16534 break 16535 } 16536 x1 := sh.Args[0] 16537 if x1.Op != OpAMD64MOVWload { 16538 break 16539 } 16540 i1 := x1.AuxInt 16541 if x1.Aux != s { 16542 break 16543 } 16544 _ = x1.Args[1] 16545 if p != x1.Args[0] { 16546 break 16547 } 16548 if mem != x1.Args[1] { 16549 break 16550 } 16551 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16552 break 16553 } 16554 b = mergePoint(b, x0, x1) 16555 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 16556 v.reset(OpCopy) 16557 v.AddArg(v0) 16558 v0.AuxInt = i0 16559 v0.Aux = s 16560 v0.AddArg(p) 16561 v0.AddArg(mem) 16562 return true 16563 } 16564 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 16565 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16566 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 16567 for { 16568 _ = v.Args[1] 16569 sh := v.Args[0] 16570 if sh.Op != OpAMD64SHLLconst { 16571 break 16572 } 16573 if sh.AuxInt != 16 { 16574 break 16575 } 16576 x1 := sh.Args[0] 16577 if x1.Op != OpAMD64MOVWload { 16578 break 16579 } 16580 i1 := x1.AuxInt 16581 s := x1.Aux 16582 _ = x1.Args[1] 16583 p := x1.Args[0] 16584 mem := x1.Args[1] 16585 x0 := v.Args[1] 16586 if x0.Op != OpAMD64MOVWload { 16587 break 16588 } 16589 i0 := x0.AuxInt 16590 if x0.Aux != s { 16591 break 16592 } 16593 _ = x0.Args[1] 16594 if p != x0.Args[0] { 16595 break 16596 } 16597 if mem != x0.Args[1] { 16598 break 16599 } 16600 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16601 break 16602 } 16603 b = mergePoint(b, x0, x1) 16604 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 16605 v.reset(OpCopy) 16606 v.AddArg(v0) 16607 v0.AuxInt = i0 16608 v0.Aux = s 16609 v0.AddArg(p) 16610 v0.AddArg(mem) 16611 return true 16612 } 16613 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 16614 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16615 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16616 for { 16617 _ = v.Args[1] 16618 s1 := v.Args[0] 16619 if s1.Op != OpAMD64SHLLconst { 16620 break 16621 } 16622 j1 := s1.AuxInt 16623 x1 := s1.Args[0] 16624 if x1.Op != OpAMD64MOVBload { 16625 break 16626 } 16627 i1 := x1.AuxInt 16628 s := x1.Aux 16629 _ = x1.Args[1] 16630 p := x1.Args[0] 16631 mem := x1.Args[1] 16632 or := v.Args[1] 16633 if or.Op != OpAMD64ORL { 16634 break 16635 } 16636 _ = or.Args[1] 16637 s0 := or.Args[0] 16638 if s0.Op != OpAMD64SHLLconst { 16639 break 16640 } 16641 j0 := s0.AuxInt 16642 x0 := s0.Args[0] 16643 if x0.Op != OpAMD64MOVBload { 16644 break 16645 } 16646 i0 := x0.AuxInt 16647 if x0.Aux != s { 16648 break 16649 } 16650 _ = x0.Args[1] 16651 if p != x0.Args[0] { 16652 break 16653 } 16654 if mem != x0.Args[1] { 16655 break 16656 } 16657 y := or.Args[1] 16658 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16659 break 16660 } 16661 b = mergePoint(b, x0, x1) 16662 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16663 v.reset(OpCopy) 16664 v.AddArg(v0) 16665 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16666 v1.AuxInt = j0 16667 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16668 v2.AuxInt = i0 16669 v2.Aux = s 16670 v2.AddArg(p) 16671 v2.AddArg(mem) 16672 v1.AddArg(v2) 16673 v0.AddArg(v1) 16674 v0.AddArg(y) 16675 return true 16676 } 16677 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 16678 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16679 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16680 for { 16681 _ = v.Args[1] 16682 s1 := v.Args[0] 16683 if s1.Op != OpAMD64SHLLconst { 16684 break 16685 } 16686 j1 := s1.AuxInt 16687 x1 := s1.Args[0] 16688 if x1.Op != OpAMD64MOVBload { 16689 break 16690 } 16691 i1 := x1.AuxInt 16692 s := x1.Aux 16693 _ = x1.Args[1] 16694 p := x1.Args[0] 16695 mem := x1.Args[1] 16696 or := v.Args[1] 16697 if or.Op != OpAMD64ORL { 16698 break 16699 } 16700 _ = or.Args[1] 16701 y := or.Args[0] 16702 s0 := or.Args[1] 16703 if s0.Op != OpAMD64SHLLconst { 16704 break 16705 } 16706 j0 := s0.AuxInt 16707 x0 := s0.Args[0] 16708 if x0.Op != OpAMD64MOVBload { 16709 break 16710 } 16711 i0 := x0.AuxInt 16712 if x0.Aux != s { 16713 break 16714 } 16715 _ = x0.Args[1] 16716 if p != x0.Args[0] { 16717 break 16718 } 16719 if mem != x0.Args[1] { 16720 break 16721 } 16722 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16723 break 16724 } 16725 b = mergePoint(b, x0, x1) 16726 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16727 v.reset(OpCopy) 16728 v.AddArg(v0) 16729 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16730 v1.AuxInt = j0 16731 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16732 v2.AuxInt = i0 16733 v2.Aux = s 16734 v2.AddArg(p) 16735 v2.AddArg(mem) 16736 v1.AddArg(v2) 16737 v0.AddArg(v1) 16738 v0.AddArg(y) 16739 return true 16740 } 16741 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 16742 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16743 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16744 for { 16745 _ = v.Args[1] 16746 or := v.Args[0] 16747 if or.Op != OpAMD64ORL { 16748 break 16749 } 16750 _ = or.Args[1] 16751 s0 := or.Args[0] 16752 if s0.Op != OpAMD64SHLLconst { 16753 break 16754 } 16755 j0 := s0.AuxInt 16756 x0 := s0.Args[0] 16757 if x0.Op != OpAMD64MOVBload { 16758 break 16759 } 16760 i0 := x0.AuxInt 16761 s := x0.Aux 16762 _ = x0.Args[1] 16763 p := x0.Args[0] 16764 mem := x0.Args[1] 16765 y := or.Args[1] 16766 s1 := v.Args[1] 16767 if s1.Op != OpAMD64SHLLconst { 16768 break 16769 } 16770 j1 := s1.AuxInt 16771 x1 := s1.Args[0] 16772 if x1.Op != OpAMD64MOVBload { 16773 break 16774 } 16775 i1 := x1.AuxInt 16776 if x1.Aux != s { 16777 break 16778 } 16779 _ = x1.Args[1] 16780 if p != x1.Args[0] { 16781 break 16782 } 16783 if mem != x1.Args[1] { 16784 break 16785 } 16786 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16787 break 16788 } 16789 b = mergePoint(b, x0, x1) 16790 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16791 v.reset(OpCopy) 16792 v.AddArg(v0) 16793 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16794 v1.AuxInt = j0 16795 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16796 v2.AuxInt = i0 16797 v2.Aux = s 16798 v2.AddArg(p) 16799 v2.AddArg(mem) 16800 v1.AddArg(v2) 16801 v0.AddArg(v1) 16802 v0.AddArg(y) 16803 return true 16804 } 16805 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 16806 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16807 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16808 for { 16809 _ = v.Args[1] 16810 or := v.Args[0] 16811 if or.Op != OpAMD64ORL { 16812 break 16813 } 16814 _ = or.Args[1] 16815 y := or.Args[0] 16816 s0 := or.Args[1] 16817 if s0.Op != OpAMD64SHLLconst { 16818 break 16819 } 16820 j0 := s0.AuxInt 16821 x0 := s0.Args[0] 16822 if x0.Op != OpAMD64MOVBload { 16823 break 16824 } 16825 i0 := x0.AuxInt 16826 s := x0.Aux 16827 _ = x0.Args[1] 16828 p := x0.Args[0] 16829 mem := x0.Args[1] 16830 s1 := v.Args[1] 16831 if s1.Op != OpAMD64SHLLconst { 16832 break 16833 } 16834 j1 := s1.AuxInt 16835 x1 := s1.Args[0] 16836 if x1.Op != OpAMD64MOVBload { 16837 break 16838 } 16839 i1 := x1.AuxInt 16840 if x1.Aux != s { 16841 break 16842 } 16843 _ = x1.Args[1] 16844 if p != x1.Args[0] { 16845 break 16846 } 16847 if mem != x1.Args[1] { 16848 break 16849 } 16850 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16851 break 16852 } 16853 b = mergePoint(b, x0, x1) 16854 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16855 v.reset(OpCopy) 16856 v.AddArg(v0) 16857 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16858 v1.AuxInt = j0 16859 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16860 v2.AuxInt = i0 16861 v2.Aux = s 16862 v2.AddArg(p) 16863 v2.AddArg(mem) 16864 v1.AddArg(v2) 16865 v0.AddArg(v1) 16866 v0.AddArg(y) 16867 return true 16868 } 16869 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 16870 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16871 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16872 for { 16873 _ = v.Args[1] 16874 x0 := v.Args[0] 16875 if x0.Op != OpAMD64MOVBloadidx1 { 16876 break 16877 } 16878 i0 := x0.AuxInt 16879 s := x0.Aux 16880 _ = x0.Args[2] 16881 p := x0.Args[0] 16882 idx := x0.Args[1] 16883 mem := x0.Args[2] 16884 sh := v.Args[1] 16885 if sh.Op != OpAMD64SHLLconst { 16886 break 16887 } 16888 if sh.AuxInt != 8 { 16889 break 16890 } 16891 x1 := sh.Args[0] 16892 if x1.Op != OpAMD64MOVBloadidx1 { 16893 break 16894 } 16895 i1 := x1.AuxInt 16896 if x1.Aux != s { 16897 break 16898 } 16899 _ = x1.Args[2] 16900 if p != x1.Args[0] { 16901 break 16902 } 16903 if idx != x1.Args[1] { 16904 break 16905 } 16906 if mem != x1.Args[2] { 16907 break 16908 } 16909 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16910 break 16911 } 16912 b = mergePoint(b, x0, x1) 16913 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16914 v.reset(OpCopy) 16915 v.AddArg(v0) 16916 v0.AuxInt = i0 16917 v0.Aux = s 16918 v0.AddArg(p) 16919 v0.AddArg(idx) 16920 v0.AddArg(mem) 16921 return true 16922 } 16923 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 16924 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16925 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16926 for { 16927 _ = v.Args[1] 16928 x0 := v.Args[0] 16929 if x0.Op != OpAMD64MOVBloadidx1 { 16930 break 16931 } 16932 i0 := x0.AuxInt 16933 s := x0.Aux 16934 _ = x0.Args[2] 16935 idx := x0.Args[0] 16936 p := x0.Args[1] 16937 mem := x0.Args[2] 16938 sh := v.Args[1] 16939 if sh.Op != OpAMD64SHLLconst { 16940 break 16941 } 16942 if sh.AuxInt != 8 { 16943 break 16944 } 16945 x1 := sh.Args[0] 16946 if x1.Op != OpAMD64MOVBloadidx1 { 16947 break 16948 } 16949 i1 := x1.AuxInt 16950 if x1.Aux != s { 16951 break 16952 } 16953 _ = x1.Args[2] 16954 if p != x1.Args[0] { 16955 break 16956 } 16957 if idx != x1.Args[1] { 16958 break 16959 } 16960 if mem != x1.Args[2] { 16961 break 16962 } 16963 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16964 break 16965 } 16966 b = mergePoint(b, x0, x1) 16967 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16968 v.reset(OpCopy) 16969 v.AddArg(v0) 16970 v0.AuxInt = i0 16971 v0.Aux = s 16972 v0.AddArg(p) 16973 v0.AddArg(idx) 16974 v0.AddArg(mem) 16975 return true 16976 } 16977 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 16978 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16979 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16980 for { 16981 _ = v.Args[1] 16982 x0 := v.Args[0] 16983 if x0.Op != OpAMD64MOVBloadidx1 { 16984 break 16985 } 16986 i0 := x0.AuxInt 16987 s := x0.Aux 16988 _ = x0.Args[2] 16989 p := x0.Args[0] 16990 idx := x0.Args[1] 16991 mem := x0.Args[2] 16992 sh := v.Args[1] 16993 if sh.Op != OpAMD64SHLLconst { 16994 break 16995 } 16996 if sh.AuxInt != 8 { 16997 break 16998 } 16999 x1 := sh.Args[0] 17000 if x1.Op != OpAMD64MOVBloadidx1 { 17001 break 17002 } 17003 i1 := x1.AuxInt 17004 if x1.Aux != s { 17005 break 17006 } 17007 _ = x1.Args[2] 17008 if idx != x1.Args[0] { 17009 break 17010 } 17011 if p != x1.Args[1] { 17012 break 17013 } 17014 if mem != x1.Args[2] { 17015 break 17016 } 17017 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17018 break 17019 } 17020 b = mergePoint(b, x0, x1) 17021 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17022 v.reset(OpCopy) 17023 v.AddArg(v0) 17024 v0.AuxInt = i0 17025 v0.Aux = s 17026 v0.AddArg(p) 17027 v0.AddArg(idx) 17028 v0.AddArg(mem) 17029 return true 17030 } 17031 return false 17032 } 17033 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 17034 b := v.Block 17035 _ = b 17036 typ := &b.Func.Config.Types 17037 _ = typ 17038 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 17039 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17040 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17041 for { 17042 _ = v.Args[1] 17043 x0 := v.Args[0] 17044 if x0.Op != OpAMD64MOVBloadidx1 { 17045 break 17046 } 17047 i0 := x0.AuxInt 17048 s := x0.Aux 17049 _ = x0.Args[2] 17050 idx := x0.Args[0] 17051 p := x0.Args[1] 17052 mem := x0.Args[2] 17053 sh := v.Args[1] 17054 if sh.Op != OpAMD64SHLLconst { 17055 break 17056 } 17057 if sh.AuxInt != 8 { 17058 break 17059 } 17060 x1 := sh.Args[0] 17061 if x1.Op != OpAMD64MOVBloadidx1 { 17062 break 17063 } 17064 i1 := x1.AuxInt 17065 if x1.Aux != s { 17066 break 17067 } 17068 _ = x1.Args[2] 17069 if idx != x1.Args[0] { 17070 break 17071 } 17072 if p != x1.Args[1] { 17073 break 17074 } 17075 if mem != x1.Args[2] { 17076 break 17077 } 17078 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17079 break 17080 } 17081 b = mergePoint(b, x0, x1) 17082 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17083 v.reset(OpCopy) 17084 v.AddArg(v0) 17085 v0.AuxInt = i0 17086 v0.Aux = s 17087 v0.AddArg(p) 17088 v0.AddArg(idx) 17089 v0.AddArg(mem) 17090 return true 17091 } 17092 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 17093 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17094 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17095 for { 17096 _ = v.Args[1] 17097 sh := v.Args[0] 17098 if sh.Op != OpAMD64SHLLconst { 17099 break 17100 } 17101 if sh.AuxInt != 8 { 17102 break 17103 } 17104 x1 := sh.Args[0] 17105 if x1.Op != OpAMD64MOVBloadidx1 { 17106 break 17107 } 17108 i1 := x1.AuxInt 17109 s := x1.Aux 17110 _ = x1.Args[2] 17111 p := x1.Args[0] 17112 idx := x1.Args[1] 17113 mem := x1.Args[2] 17114 x0 := v.Args[1] 17115 if x0.Op != OpAMD64MOVBloadidx1 { 17116 break 17117 } 17118 i0 := x0.AuxInt 17119 if x0.Aux != s { 17120 break 17121 } 17122 _ = x0.Args[2] 17123 if p != x0.Args[0] { 17124 break 17125 } 17126 if idx != x0.Args[1] { 17127 break 17128 } 17129 if mem != x0.Args[2] { 17130 break 17131 } 17132 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17133 break 17134 } 17135 b = mergePoint(b, x0, x1) 17136 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17137 v.reset(OpCopy) 17138 v.AddArg(v0) 17139 v0.AuxInt = i0 17140 v0.Aux = s 17141 v0.AddArg(p) 17142 v0.AddArg(idx) 17143 v0.AddArg(mem) 17144 return true 17145 } 17146 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 17147 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17148 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17149 for { 17150 _ = v.Args[1] 17151 sh := v.Args[0] 17152 if sh.Op != OpAMD64SHLLconst { 17153 break 17154 } 17155 if sh.AuxInt != 8 { 17156 break 17157 } 17158 x1 := sh.Args[0] 17159 if x1.Op != OpAMD64MOVBloadidx1 { 17160 break 17161 } 17162 i1 := x1.AuxInt 17163 s := x1.Aux 17164 _ = x1.Args[2] 17165 idx := x1.Args[0] 17166 p := x1.Args[1] 17167 mem := x1.Args[2] 17168 x0 := v.Args[1] 17169 if x0.Op != OpAMD64MOVBloadidx1 { 17170 break 17171 } 17172 i0 := x0.AuxInt 17173 if x0.Aux != s { 17174 break 17175 } 17176 _ = x0.Args[2] 17177 if p != x0.Args[0] { 17178 break 17179 } 17180 if idx != x0.Args[1] { 17181 break 17182 } 17183 if mem != x0.Args[2] { 17184 break 17185 } 17186 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17187 break 17188 } 17189 b = mergePoint(b, x0, x1) 17190 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17191 v.reset(OpCopy) 17192 v.AddArg(v0) 17193 v0.AuxInt = i0 17194 v0.Aux = s 17195 v0.AddArg(p) 17196 v0.AddArg(idx) 17197 v0.AddArg(mem) 17198 return true 17199 } 17200 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 17201 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17202 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17203 for { 17204 _ = v.Args[1] 17205 sh := v.Args[0] 17206 if sh.Op != OpAMD64SHLLconst { 17207 break 17208 } 17209 if sh.AuxInt != 8 { 17210 break 17211 } 17212 x1 := sh.Args[0] 17213 if x1.Op != OpAMD64MOVBloadidx1 { 17214 break 17215 } 17216 i1 := x1.AuxInt 17217 s := x1.Aux 17218 _ = x1.Args[2] 17219 p := x1.Args[0] 17220 idx := x1.Args[1] 17221 mem := x1.Args[2] 17222 x0 := v.Args[1] 17223 if x0.Op != OpAMD64MOVBloadidx1 { 17224 break 17225 } 17226 i0 := x0.AuxInt 17227 if x0.Aux != s { 17228 break 17229 } 17230 _ = x0.Args[2] 17231 if idx != x0.Args[0] { 17232 break 17233 } 17234 if p != x0.Args[1] { 17235 break 17236 } 17237 if mem != x0.Args[2] { 17238 break 17239 } 17240 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17241 break 17242 } 17243 b = mergePoint(b, x0, x1) 17244 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17245 v.reset(OpCopy) 17246 v.AddArg(v0) 17247 v0.AuxInt = i0 17248 v0.Aux = s 17249 v0.AddArg(p) 17250 v0.AddArg(idx) 17251 v0.AddArg(mem) 17252 return true 17253 } 17254 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 17255 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17256 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17257 for { 17258 _ = v.Args[1] 17259 sh := v.Args[0] 17260 if sh.Op != OpAMD64SHLLconst { 17261 break 17262 } 17263 if sh.AuxInt != 8 { 17264 break 17265 } 17266 x1 := sh.Args[0] 17267 if x1.Op != OpAMD64MOVBloadidx1 { 17268 break 17269 } 17270 i1 := x1.AuxInt 17271 s := x1.Aux 17272 _ = x1.Args[2] 17273 idx := x1.Args[0] 17274 p := x1.Args[1] 17275 mem := x1.Args[2] 17276 x0 := v.Args[1] 17277 if x0.Op != OpAMD64MOVBloadidx1 { 17278 break 17279 } 17280 i0 := x0.AuxInt 17281 if x0.Aux != s { 17282 break 17283 } 17284 _ = x0.Args[2] 17285 if idx != x0.Args[0] { 17286 break 17287 } 17288 if p != x0.Args[1] { 17289 break 17290 } 17291 if mem != x0.Args[2] { 17292 break 17293 } 17294 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17295 break 17296 } 17297 b = mergePoint(b, x0, x1) 17298 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17299 v.reset(OpCopy) 17300 v.AddArg(v0) 17301 v0.AuxInt = i0 17302 v0.Aux = s 17303 v0.AddArg(p) 17304 v0.AddArg(idx) 17305 v0.AddArg(mem) 17306 return true 17307 } 17308 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 17309 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17310 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17311 for { 17312 _ = v.Args[1] 17313 x0 := v.Args[0] 17314 if x0.Op != OpAMD64MOVWloadidx1 { 17315 break 17316 } 17317 i0 := x0.AuxInt 17318 s := x0.Aux 17319 _ = x0.Args[2] 17320 p := x0.Args[0] 17321 idx := x0.Args[1] 17322 mem := x0.Args[2] 17323 sh := v.Args[1] 17324 if sh.Op != OpAMD64SHLLconst { 17325 break 17326 } 17327 if sh.AuxInt != 16 { 17328 break 17329 } 17330 x1 := sh.Args[0] 17331 if x1.Op != OpAMD64MOVWloadidx1 { 17332 break 17333 } 17334 i1 := x1.AuxInt 17335 if x1.Aux != s { 17336 break 17337 } 17338 _ = x1.Args[2] 17339 if p != x1.Args[0] { 17340 break 17341 } 17342 if idx != x1.Args[1] { 17343 break 17344 } 17345 if mem != x1.Args[2] { 17346 break 17347 } 17348 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17349 break 17350 } 17351 b = mergePoint(b, x0, x1) 17352 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17353 v.reset(OpCopy) 17354 v.AddArg(v0) 17355 v0.AuxInt = i0 17356 v0.Aux = s 17357 v0.AddArg(p) 17358 v0.AddArg(idx) 17359 v0.AddArg(mem) 17360 return true 17361 } 17362 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 17363 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17364 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17365 for { 17366 _ = v.Args[1] 17367 x0 := v.Args[0] 17368 if x0.Op != OpAMD64MOVWloadidx1 { 17369 break 17370 } 17371 i0 := x0.AuxInt 17372 s := x0.Aux 17373 _ = x0.Args[2] 17374 idx := x0.Args[0] 17375 p := x0.Args[1] 17376 mem := x0.Args[2] 17377 sh := v.Args[1] 17378 if sh.Op != OpAMD64SHLLconst { 17379 break 17380 } 17381 if sh.AuxInt != 16 { 17382 break 17383 } 17384 x1 := sh.Args[0] 17385 if x1.Op != OpAMD64MOVWloadidx1 { 17386 break 17387 } 17388 i1 := x1.AuxInt 17389 if x1.Aux != s { 17390 break 17391 } 17392 _ = x1.Args[2] 17393 if p != x1.Args[0] { 17394 break 17395 } 17396 if idx != x1.Args[1] { 17397 break 17398 } 17399 if mem != x1.Args[2] { 17400 break 17401 } 17402 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17403 break 17404 } 17405 b = mergePoint(b, x0, x1) 17406 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17407 v.reset(OpCopy) 17408 v.AddArg(v0) 17409 v0.AuxInt = i0 17410 v0.Aux = s 17411 v0.AddArg(p) 17412 v0.AddArg(idx) 17413 v0.AddArg(mem) 17414 return true 17415 } 17416 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 17417 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17418 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17419 for { 17420 _ = v.Args[1] 17421 x0 := v.Args[0] 17422 if x0.Op != OpAMD64MOVWloadidx1 { 17423 break 17424 } 17425 i0 := x0.AuxInt 17426 s := x0.Aux 17427 _ = x0.Args[2] 17428 p := x0.Args[0] 17429 idx := x0.Args[1] 17430 mem := x0.Args[2] 17431 sh := v.Args[1] 17432 if sh.Op != OpAMD64SHLLconst { 17433 break 17434 } 17435 if sh.AuxInt != 16 { 17436 break 17437 } 17438 x1 := sh.Args[0] 17439 if x1.Op != OpAMD64MOVWloadidx1 { 17440 break 17441 } 17442 i1 := x1.AuxInt 17443 if x1.Aux != s { 17444 break 17445 } 17446 _ = x1.Args[2] 17447 if idx != x1.Args[0] { 17448 break 17449 } 17450 if p != x1.Args[1] { 17451 break 17452 } 17453 if mem != x1.Args[2] { 17454 break 17455 } 17456 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17457 break 17458 } 17459 b = mergePoint(b, x0, x1) 17460 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17461 v.reset(OpCopy) 17462 v.AddArg(v0) 17463 v0.AuxInt = i0 17464 v0.Aux = s 17465 v0.AddArg(p) 17466 v0.AddArg(idx) 17467 v0.AddArg(mem) 17468 return true 17469 } 17470 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 17471 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17472 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17473 for { 17474 _ = v.Args[1] 17475 x0 := v.Args[0] 17476 if x0.Op != OpAMD64MOVWloadidx1 { 17477 break 17478 } 17479 i0 := x0.AuxInt 17480 s := x0.Aux 17481 _ = x0.Args[2] 17482 idx := x0.Args[0] 17483 p := x0.Args[1] 17484 mem := x0.Args[2] 17485 sh := v.Args[1] 17486 if sh.Op != OpAMD64SHLLconst { 17487 break 17488 } 17489 if sh.AuxInt != 16 { 17490 break 17491 } 17492 x1 := sh.Args[0] 17493 if x1.Op != OpAMD64MOVWloadidx1 { 17494 break 17495 } 17496 i1 := x1.AuxInt 17497 if x1.Aux != s { 17498 break 17499 } 17500 _ = x1.Args[2] 17501 if idx != x1.Args[0] { 17502 break 17503 } 17504 if p != x1.Args[1] { 17505 break 17506 } 17507 if mem != x1.Args[2] { 17508 break 17509 } 17510 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17511 break 17512 } 17513 b = mergePoint(b, x0, x1) 17514 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17515 v.reset(OpCopy) 17516 v.AddArg(v0) 17517 v0.AuxInt = i0 17518 v0.Aux = s 17519 v0.AddArg(p) 17520 v0.AddArg(idx) 17521 v0.AddArg(mem) 17522 return true 17523 } 17524 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 17525 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17526 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17527 for { 17528 _ = v.Args[1] 17529 sh := v.Args[0] 17530 if sh.Op != OpAMD64SHLLconst { 17531 break 17532 } 17533 if sh.AuxInt != 16 { 17534 break 17535 } 17536 x1 := sh.Args[0] 17537 if x1.Op != OpAMD64MOVWloadidx1 { 17538 break 17539 } 17540 i1 := x1.AuxInt 17541 s := x1.Aux 17542 _ = x1.Args[2] 17543 p := x1.Args[0] 17544 idx := x1.Args[1] 17545 mem := x1.Args[2] 17546 x0 := v.Args[1] 17547 if x0.Op != OpAMD64MOVWloadidx1 { 17548 break 17549 } 17550 i0 := x0.AuxInt 17551 if x0.Aux != s { 17552 break 17553 } 17554 _ = x0.Args[2] 17555 if p != x0.Args[0] { 17556 break 17557 } 17558 if idx != x0.Args[1] { 17559 break 17560 } 17561 if mem != x0.Args[2] { 17562 break 17563 } 17564 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17565 break 17566 } 17567 b = mergePoint(b, x0, x1) 17568 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17569 v.reset(OpCopy) 17570 v.AddArg(v0) 17571 v0.AuxInt = i0 17572 v0.Aux = s 17573 v0.AddArg(p) 17574 v0.AddArg(idx) 17575 v0.AddArg(mem) 17576 return true 17577 } 17578 return false 17579 } 17580 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 17581 b := v.Block 17582 _ = b 17583 typ := &b.Func.Config.Types 17584 _ = typ 17585 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 17586 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17587 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17588 for { 17589 _ = v.Args[1] 17590 sh := v.Args[0] 17591 if sh.Op != OpAMD64SHLLconst { 17592 break 17593 } 17594 if sh.AuxInt != 16 { 17595 break 17596 } 17597 x1 := sh.Args[0] 17598 if x1.Op != OpAMD64MOVWloadidx1 { 17599 break 17600 } 17601 i1 := x1.AuxInt 17602 s := x1.Aux 17603 _ = x1.Args[2] 17604 idx := x1.Args[0] 17605 p := x1.Args[1] 17606 mem := x1.Args[2] 17607 x0 := v.Args[1] 17608 if x0.Op != OpAMD64MOVWloadidx1 { 17609 break 17610 } 17611 i0 := x0.AuxInt 17612 if x0.Aux != s { 17613 break 17614 } 17615 _ = x0.Args[2] 17616 if p != x0.Args[0] { 17617 break 17618 } 17619 if idx != x0.Args[1] { 17620 break 17621 } 17622 if mem != x0.Args[2] { 17623 break 17624 } 17625 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17626 break 17627 } 17628 b = mergePoint(b, x0, x1) 17629 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17630 v.reset(OpCopy) 17631 v.AddArg(v0) 17632 v0.AuxInt = i0 17633 v0.Aux = s 17634 v0.AddArg(p) 17635 v0.AddArg(idx) 17636 v0.AddArg(mem) 17637 return true 17638 } 17639 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 17640 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17641 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17642 for { 17643 _ = v.Args[1] 17644 sh := v.Args[0] 17645 if sh.Op != OpAMD64SHLLconst { 17646 break 17647 } 17648 if sh.AuxInt != 16 { 17649 break 17650 } 17651 x1 := sh.Args[0] 17652 if x1.Op != OpAMD64MOVWloadidx1 { 17653 break 17654 } 17655 i1 := x1.AuxInt 17656 s := x1.Aux 17657 _ = x1.Args[2] 17658 p := x1.Args[0] 17659 idx := x1.Args[1] 17660 mem := x1.Args[2] 17661 x0 := v.Args[1] 17662 if x0.Op != OpAMD64MOVWloadidx1 { 17663 break 17664 } 17665 i0 := x0.AuxInt 17666 if x0.Aux != s { 17667 break 17668 } 17669 _ = x0.Args[2] 17670 if idx != x0.Args[0] { 17671 break 17672 } 17673 if p != x0.Args[1] { 17674 break 17675 } 17676 if mem != x0.Args[2] { 17677 break 17678 } 17679 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17680 break 17681 } 17682 b = mergePoint(b, x0, x1) 17683 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17684 v.reset(OpCopy) 17685 v.AddArg(v0) 17686 v0.AuxInt = i0 17687 v0.Aux = s 17688 v0.AddArg(p) 17689 v0.AddArg(idx) 17690 v0.AddArg(mem) 17691 return true 17692 } 17693 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 17694 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17695 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17696 for { 17697 _ = v.Args[1] 17698 sh := v.Args[0] 17699 if sh.Op != OpAMD64SHLLconst { 17700 break 17701 } 17702 if sh.AuxInt != 16 { 17703 break 17704 } 17705 x1 := sh.Args[0] 17706 if x1.Op != OpAMD64MOVWloadidx1 { 17707 break 17708 } 17709 i1 := x1.AuxInt 17710 s := x1.Aux 17711 _ = x1.Args[2] 17712 idx := x1.Args[0] 17713 p := x1.Args[1] 17714 mem := x1.Args[2] 17715 x0 := v.Args[1] 17716 if x0.Op != OpAMD64MOVWloadidx1 { 17717 break 17718 } 17719 i0 := x0.AuxInt 17720 if x0.Aux != s { 17721 break 17722 } 17723 _ = x0.Args[2] 17724 if idx != x0.Args[0] { 17725 break 17726 } 17727 if p != x0.Args[1] { 17728 break 17729 } 17730 if mem != x0.Args[2] { 17731 break 17732 } 17733 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17734 break 17735 } 17736 b = mergePoint(b, x0, x1) 17737 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17738 v.reset(OpCopy) 17739 v.AddArg(v0) 17740 v0.AuxInt = i0 17741 v0.Aux = s 17742 v0.AddArg(p) 17743 v0.AddArg(idx) 17744 v0.AddArg(mem) 17745 return true 17746 } 17747 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 17748 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17749 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17750 for { 17751 _ = v.Args[1] 17752 s1 := v.Args[0] 17753 if s1.Op != OpAMD64SHLLconst { 17754 break 17755 } 17756 j1 := s1.AuxInt 17757 x1 := s1.Args[0] 17758 if x1.Op != OpAMD64MOVBloadidx1 { 17759 break 17760 } 17761 i1 := x1.AuxInt 17762 s := x1.Aux 17763 _ = x1.Args[2] 17764 p := x1.Args[0] 17765 idx := x1.Args[1] 17766 mem := x1.Args[2] 17767 or := v.Args[1] 17768 if or.Op != OpAMD64ORL { 17769 break 17770 } 17771 _ = or.Args[1] 17772 s0 := or.Args[0] 17773 if s0.Op != OpAMD64SHLLconst { 17774 break 17775 } 17776 j0 := s0.AuxInt 17777 x0 := s0.Args[0] 17778 if x0.Op != OpAMD64MOVBloadidx1 { 17779 break 17780 } 17781 i0 := x0.AuxInt 17782 if x0.Aux != s { 17783 break 17784 } 17785 _ = x0.Args[2] 17786 if p != x0.Args[0] { 17787 break 17788 } 17789 if idx != x0.Args[1] { 17790 break 17791 } 17792 if mem != x0.Args[2] { 17793 break 17794 } 17795 y := or.Args[1] 17796 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17797 break 17798 } 17799 b = mergePoint(b, x0, x1) 17800 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17801 v.reset(OpCopy) 17802 v.AddArg(v0) 17803 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17804 v1.AuxInt = j0 17805 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 17806 v2.AuxInt = i0 17807 v2.Aux = s 17808 v2.AddArg(p) 17809 v2.AddArg(idx) 17810 v2.AddArg(mem) 17811 v1.AddArg(v2) 17812 v0.AddArg(v1) 17813 v0.AddArg(y) 17814 return true 17815 } 17816 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 17817 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17818 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17819 for { 17820 _ = v.Args[1] 17821 s1 := v.Args[0] 17822 if s1.Op != OpAMD64SHLLconst { 17823 break 17824 } 17825 j1 := s1.AuxInt 17826 x1 := s1.Args[0] 17827 if x1.Op != OpAMD64MOVBloadidx1 { 17828 break 17829 } 17830 i1 := x1.AuxInt 17831 s := x1.Aux 17832 _ = x1.Args[2] 17833 idx := x1.Args[0] 17834 p := x1.Args[1] 17835 mem := x1.Args[2] 17836 or := v.Args[1] 17837 if or.Op != OpAMD64ORL { 17838 break 17839 } 17840 _ = or.Args[1] 17841 s0 := or.Args[0] 17842 if s0.Op != OpAMD64SHLLconst { 17843 break 17844 } 17845 j0 := s0.AuxInt 17846 x0 := s0.Args[0] 17847 if x0.Op != OpAMD64MOVBloadidx1 { 17848 break 17849 } 17850 i0 := x0.AuxInt 17851 if x0.Aux != s { 17852 break 17853 } 17854 _ = x0.Args[2] 17855 if p != x0.Args[0] { 17856 break 17857 } 17858 if idx != x0.Args[1] { 17859 break 17860 } 17861 if mem != x0.Args[2] { 17862 break 17863 } 17864 y := or.Args[1] 17865 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17866 break 17867 } 17868 b = mergePoint(b, x0, x1) 17869 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17870 v.reset(OpCopy) 17871 v.AddArg(v0) 17872 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17873 v1.AuxInt = j0 17874 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 17875 v2.AuxInt = i0 17876 v2.Aux = s 17877 v2.AddArg(p) 17878 v2.AddArg(idx) 17879 v2.AddArg(mem) 17880 v1.AddArg(v2) 17881 v0.AddArg(v1) 17882 v0.AddArg(y) 17883 return true 17884 } 17885 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 17886 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17887 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17888 for { 17889 _ = v.Args[1] 17890 s1 := v.Args[0] 17891 if s1.Op != OpAMD64SHLLconst { 17892 break 17893 } 17894 j1 := s1.AuxInt 17895 x1 := s1.Args[0] 17896 if x1.Op != OpAMD64MOVBloadidx1 { 17897 break 17898 } 17899 i1 := x1.AuxInt 17900 s := x1.Aux 17901 _ = x1.Args[2] 17902 p := x1.Args[0] 17903 idx := x1.Args[1] 17904 mem := x1.Args[2] 17905 or := v.Args[1] 17906 if or.Op != OpAMD64ORL { 17907 break 17908 } 17909 _ = or.Args[1] 17910 s0 := or.Args[0] 17911 if s0.Op != OpAMD64SHLLconst { 17912 break 17913 } 17914 j0 := s0.AuxInt 17915 x0 := s0.Args[0] 17916 if x0.Op != OpAMD64MOVBloadidx1 { 17917 break 17918 } 17919 i0 := x0.AuxInt 17920 if x0.Aux != s { 17921 break 17922 } 17923 _ = x0.Args[2] 17924 if idx != x0.Args[0] { 17925 break 17926 } 17927 if p != x0.Args[1] { 17928 break 17929 } 17930 if mem != x0.Args[2] { 17931 break 17932 } 17933 y := or.Args[1] 17934 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17935 break 17936 } 17937 b = mergePoint(b, x0, x1) 17938 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17939 v.reset(OpCopy) 17940 v.AddArg(v0) 17941 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17942 v1.AuxInt = j0 17943 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 17944 v2.AuxInt = i0 17945 v2.Aux = s 17946 v2.AddArg(p) 17947 v2.AddArg(idx) 17948 v2.AddArg(mem) 17949 v1.AddArg(v2) 17950 v0.AddArg(v1) 17951 v0.AddArg(y) 17952 return true 17953 } 17954 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 17955 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17956 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17957 for { 17958 _ = v.Args[1] 17959 s1 := v.Args[0] 17960 if s1.Op != OpAMD64SHLLconst { 17961 break 17962 } 17963 j1 := s1.AuxInt 17964 x1 := s1.Args[0] 17965 if x1.Op != OpAMD64MOVBloadidx1 { 17966 break 17967 } 17968 i1 := x1.AuxInt 17969 s := x1.Aux 17970 _ = x1.Args[2] 17971 idx := x1.Args[0] 17972 p := x1.Args[1] 17973 mem := x1.Args[2] 17974 or := v.Args[1] 17975 if or.Op != OpAMD64ORL { 17976 break 17977 } 17978 _ = or.Args[1] 17979 s0 := or.Args[0] 17980 if s0.Op != OpAMD64SHLLconst { 17981 break 17982 } 17983 j0 := s0.AuxInt 17984 x0 := s0.Args[0] 17985 if x0.Op != OpAMD64MOVBloadidx1 { 17986 break 17987 } 17988 i0 := x0.AuxInt 17989 if x0.Aux != s { 17990 break 17991 } 17992 _ = x0.Args[2] 17993 if idx != x0.Args[0] { 17994 break 17995 } 17996 if p != x0.Args[1] { 17997 break 17998 } 17999 if mem != x0.Args[2] { 18000 break 18001 } 18002 y := or.Args[1] 18003 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18004 break 18005 } 18006 b = mergePoint(b, x0, x1) 18007 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18008 v.reset(OpCopy) 18009 v.AddArg(v0) 18010 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18011 v1.AuxInt = j0 18012 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18013 v2.AuxInt = i0 18014 v2.Aux = s 18015 v2.AddArg(p) 18016 v2.AddArg(idx) 18017 v2.AddArg(mem) 18018 v1.AddArg(v2) 18019 v0.AddArg(v1) 18020 v0.AddArg(y) 18021 return true 18022 } 18023 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 18024 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18025 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18026 for { 18027 _ = v.Args[1] 18028 s1 := v.Args[0] 18029 if s1.Op != OpAMD64SHLLconst { 18030 break 18031 } 18032 j1 := s1.AuxInt 18033 x1 := s1.Args[0] 18034 if x1.Op != OpAMD64MOVBloadidx1 { 18035 break 18036 } 18037 i1 := x1.AuxInt 18038 s := x1.Aux 18039 _ = x1.Args[2] 18040 p := x1.Args[0] 18041 idx := x1.Args[1] 18042 mem := x1.Args[2] 18043 or := v.Args[1] 18044 if or.Op != OpAMD64ORL { 18045 break 18046 } 18047 _ = or.Args[1] 18048 y := or.Args[0] 18049 s0 := or.Args[1] 18050 if s0.Op != OpAMD64SHLLconst { 18051 break 18052 } 18053 j0 := s0.AuxInt 18054 x0 := s0.Args[0] 18055 if x0.Op != OpAMD64MOVBloadidx1 { 18056 break 18057 } 18058 i0 := x0.AuxInt 18059 if x0.Aux != s { 18060 break 18061 } 18062 _ = x0.Args[2] 18063 if p != x0.Args[0] { 18064 break 18065 } 18066 if idx != x0.Args[1] { 18067 break 18068 } 18069 if mem != x0.Args[2] { 18070 break 18071 } 18072 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18073 break 18074 } 18075 b = mergePoint(b, x0, x1) 18076 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18077 v.reset(OpCopy) 18078 v.AddArg(v0) 18079 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18080 v1.AuxInt = j0 18081 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18082 v2.AuxInt = i0 18083 v2.Aux = s 18084 v2.AddArg(p) 18085 v2.AddArg(idx) 18086 v2.AddArg(mem) 18087 v1.AddArg(v2) 18088 v0.AddArg(v1) 18089 v0.AddArg(y) 18090 return true 18091 } 18092 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 18093 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18094 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18095 for { 18096 _ = v.Args[1] 18097 s1 := v.Args[0] 18098 if s1.Op != OpAMD64SHLLconst { 18099 break 18100 } 18101 j1 := s1.AuxInt 18102 x1 := s1.Args[0] 18103 if x1.Op != OpAMD64MOVBloadidx1 { 18104 break 18105 } 18106 i1 := x1.AuxInt 18107 s := x1.Aux 18108 _ = x1.Args[2] 18109 idx := x1.Args[0] 18110 p := x1.Args[1] 18111 mem := x1.Args[2] 18112 or := v.Args[1] 18113 if or.Op != OpAMD64ORL { 18114 break 18115 } 18116 _ = or.Args[1] 18117 y := or.Args[0] 18118 s0 := or.Args[1] 18119 if s0.Op != OpAMD64SHLLconst { 18120 break 18121 } 18122 j0 := s0.AuxInt 18123 x0 := s0.Args[0] 18124 if x0.Op != OpAMD64MOVBloadidx1 { 18125 break 18126 } 18127 i0 := x0.AuxInt 18128 if x0.Aux != s { 18129 break 18130 } 18131 _ = x0.Args[2] 18132 if p != x0.Args[0] { 18133 break 18134 } 18135 if idx != x0.Args[1] { 18136 break 18137 } 18138 if mem != x0.Args[2] { 18139 break 18140 } 18141 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18142 break 18143 } 18144 b = mergePoint(b, x0, x1) 18145 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18146 v.reset(OpCopy) 18147 v.AddArg(v0) 18148 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18149 v1.AuxInt = j0 18150 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18151 v2.AuxInt = i0 18152 v2.Aux = s 18153 v2.AddArg(p) 18154 v2.AddArg(idx) 18155 v2.AddArg(mem) 18156 v1.AddArg(v2) 18157 v0.AddArg(v1) 18158 v0.AddArg(y) 18159 return true 18160 } 18161 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 18162 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18163 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18164 for { 18165 _ = v.Args[1] 18166 s1 := v.Args[0] 18167 if s1.Op != OpAMD64SHLLconst { 18168 break 18169 } 18170 j1 := s1.AuxInt 18171 x1 := s1.Args[0] 18172 if x1.Op != OpAMD64MOVBloadidx1 { 18173 break 18174 } 18175 i1 := x1.AuxInt 18176 s := x1.Aux 18177 _ = x1.Args[2] 18178 p := x1.Args[0] 18179 idx := x1.Args[1] 18180 mem := x1.Args[2] 18181 or := v.Args[1] 18182 if or.Op != OpAMD64ORL { 18183 break 18184 } 18185 _ = or.Args[1] 18186 y := or.Args[0] 18187 s0 := or.Args[1] 18188 if s0.Op != OpAMD64SHLLconst { 18189 break 18190 } 18191 j0 := s0.AuxInt 18192 x0 := s0.Args[0] 18193 if x0.Op != OpAMD64MOVBloadidx1 { 18194 break 18195 } 18196 i0 := x0.AuxInt 18197 if x0.Aux != s { 18198 break 18199 } 18200 _ = x0.Args[2] 18201 if idx != x0.Args[0] { 18202 break 18203 } 18204 if p != x0.Args[1] { 18205 break 18206 } 18207 if mem != x0.Args[2] { 18208 break 18209 } 18210 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18211 break 18212 } 18213 b = mergePoint(b, x0, x1) 18214 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18215 v.reset(OpCopy) 18216 v.AddArg(v0) 18217 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18218 v1.AuxInt = j0 18219 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18220 v2.AuxInt = i0 18221 v2.Aux = s 18222 v2.AddArg(p) 18223 v2.AddArg(idx) 18224 v2.AddArg(mem) 18225 v1.AddArg(v2) 18226 v0.AddArg(v1) 18227 v0.AddArg(y) 18228 return true 18229 } 18230 return false 18231 } 18232 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 18233 b := v.Block 18234 _ = b 18235 typ := &b.Func.Config.Types 18236 _ = typ 18237 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 18238 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18239 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18240 for { 18241 _ = v.Args[1] 18242 s1 := v.Args[0] 18243 if s1.Op != OpAMD64SHLLconst { 18244 break 18245 } 18246 j1 := s1.AuxInt 18247 x1 := s1.Args[0] 18248 if x1.Op != OpAMD64MOVBloadidx1 { 18249 break 18250 } 18251 i1 := x1.AuxInt 18252 s := x1.Aux 18253 _ = x1.Args[2] 18254 idx := x1.Args[0] 18255 p := x1.Args[1] 18256 mem := x1.Args[2] 18257 or := v.Args[1] 18258 if or.Op != OpAMD64ORL { 18259 break 18260 } 18261 _ = or.Args[1] 18262 y := or.Args[0] 18263 s0 := or.Args[1] 18264 if s0.Op != OpAMD64SHLLconst { 18265 break 18266 } 18267 j0 := s0.AuxInt 18268 x0 := s0.Args[0] 18269 if x0.Op != OpAMD64MOVBloadidx1 { 18270 break 18271 } 18272 i0 := x0.AuxInt 18273 if x0.Aux != s { 18274 break 18275 } 18276 _ = x0.Args[2] 18277 if idx != x0.Args[0] { 18278 break 18279 } 18280 if p != x0.Args[1] { 18281 break 18282 } 18283 if mem != x0.Args[2] { 18284 break 18285 } 18286 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18287 break 18288 } 18289 b = mergePoint(b, x0, x1) 18290 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18291 v.reset(OpCopy) 18292 v.AddArg(v0) 18293 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18294 v1.AuxInt = j0 18295 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18296 v2.AuxInt = i0 18297 v2.Aux = s 18298 v2.AddArg(p) 18299 v2.AddArg(idx) 18300 v2.AddArg(mem) 18301 v1.AddArg(v2) 18302 v0.AddArg(v1) 18303 v0.AddArg(y) 18304 return true 18305 } 18306 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18307 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18308 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18309 for { 18310 _ = v.Args[1] 18311 or := v.Args[0] 18312 if or.Op != OpAMD64ORL { 18313 break 18314 } 18315 _ = or.Args[1] 18316 s0 := or.Args[0] 18317 if s0.Op != OpAMD64SHLLconst { 18318 break 18319 } 18320 j0 := s0.AuxInt 18321 x0 := s0.Args[0] 18322 if x0.Op != OpAMD64MOVBloadidx1 { 18323 break 18324 } 18325 i0 := x0.AuxInt 18326 s := x0.Aux 18327 _ = x0.Args[2] 18328 p := x0.Args[0] 18329 idx := x0.Args[1] 18330 mem := x0.Args[2] 18331 y := or.Args[1] 18332 s1 := v.Args[1] 18333 if s1.Op != OpAMD64SHLLconst { 18334 break 18335 } 18336 j1 := s1.AuxInt 18337 x1 := s1.Args[0] 18338 if x1.Op != OpAMD64MOVBloadidx1 { 18339 break 18340 } 18341 i1 := x1.AuxInt 18342 if x1.Aux != s { 18343 break 18344 } 18345 _ = x1.Args[2] 18346 if p != x1.Args[0] { 18347 break 18348 } 18349 if idx != x1.Args[1] { 18350 break 18351 } 18352 if mem != x1.Args[2] { 18353 break 18354 } 18355 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18356 break 18357 } 18358 b = mergePoint(b, x0, x1) 18359 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18360 v.reset(OpCopy) 18361 v.AddArg(v0) 18362 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18363 v1.AuxInt = j0 18364 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18365 v2.AuxInt = i0 18366 v2.Aux = s 18367 v2.AddArg(p) 18368 v2.AddArg(idx) 18369 v2.AddArg(mem) 18370 v1.AddArg(v2) 18371 v0.AddArg(v1) 18372 v0.AddArg(y) 18373 return true 18374 } 18375 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18376 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18377 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18378 for { 18379 _ = v.Args[1] 18380 or := v.Args[0] 18381 if or.Op != OpAMD64ORL { 18382 break 18383 } 18384 _ = or.Args[1] 18385 s0 := or.Args[0] 18386 if s0.Op != OpAMD64SHLLconst { 18387 break 18388 } 18389 j0 := s0.AuxInt 18390 x0 := s0.Args[0] 18391 if x0.Op != OpAMD64MOVBloadidx1 { 18392 break 18393 } 18394 i0 := x0.AuxInt 18395 s := x0.Aux 18396 _ = x0.Args[2] 18397 idx := x0.Args[0] 18398 p := x0.Args[1] 18399 mem := x0.Args[2] 18400 y := or.Args[1] 18401 s1 := v.Args[1] 18402 if s1.Op != OpAMD64SHLLconst { 18403 break 18404 } 18405 j1 := s1.AuxInt 18406 x1 := s1.Args[0] 18407 if x1.Op != OpAMD64MOVBloadidx1 { 18408 break 18409 } 18410 i1 := x1.AuxInt 18411 if x1.Aux != s { 18412 break 18413 } 18414 _ = x1.Args[2] 18415 if p != x1.Args[0] { 18416 break 18417 } 18418 if idx != x1.Args[1] { 18419 break 18420 } 18421 if mem != x1.Args[2] { 18422 break 18423 } 18424 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18425 break 18426 } 18427 b = mergePoint(b, x0, x1) 18428 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18429 v.reset(OpCopy) 18430 v.AddArg(v0) 18431 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18432 v1.AuxInt = j0 18433 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18434 v2.AuxInt = i0 18435 v2.Aux = s 18436 v2.AddArg(p) 18437 v2.AddArg(idx) 18438 v2.AddArg(mem) 18439 v1.AddArg(v2) 18440 v0.AddArg(v1) 18441 v0.AddArg(y) 18442 return true 18443 } 18444 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18445 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18446 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18447 for { 18448 _ = v.Args[1] 18449 or := v.Args[0] 18450 if or.Op != OpAMD64ORL { 18451 break 18452 } 18453 _ = or.Args[1] 18454 y := or.Args[0] 18455 s0 := or.Args[1] 18456 if s0.Op != OpAMD64SHLLconst { 18457 break 18458 } 18459 j0 := s0.AuxInt 18460 x0 := s0.Args[0] 18461 if x0.Op != OpAMD64MOVBloadidx1 { 18462 break 18463 } 18464 i0 := x0.AuxInt 18465 s := x0.Aux 18466 _ = x0.Args[2] 18467 p := x0.Args[0] 18468 idx := x0.Args[1] 18469 mem := x0.Args[2] 18470 s1 := v.Args[1] 18471 if s1.Op != OpAMD64SHLLconst { 18472 break 18473 } 18474 j1 := s1.AuxInt 18475 x1 := s1.Args[0] 18476 if x1.Op != OpAMD64MOVBloadidx1 { 18477 break 18478 } 18479 i1 := x1.AuxInt 18480 if x1.Aux != s { 18481 break 18482 } 18483 _ = x1.Args[2] 18484 if p != x1.Args[0] { 18485 break 18486 } 18487 if idx != x1.Args[1] { 18488 break 18489 } 18490 if mem != x1.Args[2] { 18491 break 18492 } 18493 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18494 break 18495 } 18496 b = mergePoint(b, x0, x1) 18497 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18498 v.reset(OpCopy) 18499 v.AddArg(v0) 18500 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18501 v1.AuxInt = j0 18502 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18503 v2.AuxInt = i0 18504 v2.Aux = s 18505 v2.AddArg(p) 18506 v2.AddArg(idx) 18507 v2.AddArg(mem) 18508 v1.AddArg(v2) 18509 v0.AddArg(v1) 18510 v0.AddArg(y) 18511 return true 18512 } 18513 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18514 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18515 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18516 for { 18517 _ = v.Args[1] 18518 or := v.Args[0] 18519 if or.Op != OpAMD64ORL { 18520 break 18521 } 18522 _ = or.Args[1] 18523 y := or.Args[0] 18524 s0 := or.Args[1] 18525 if s0.Op != OpAMD64SHLLconst { 18526 break 18527 } 18528 j0 := s0.AuxInt 18529 x0 := s0.Args[0] 18530 if x0.Op != OpAMD64MOVBloadidx1 { 18531 break 18532 } 18533 i0 := x0.AuxInt 18534 s := x0.Aux 18535 _ = x0.Args[2] 18536 idx := x0.Args[0] 18537 p := x0.Args[1] 18538 mem := x0.Args[2] 18539 s1 := v.Args[1] 18540 if s1.Op != OpAMD64SHLLconst { 18541 break 18542 } 18543 j1 := s1.AuxInt 18544 x1 := s1.Args[0] 18545 if x1.Op != OpAMD64MOVBloadidx1 { 18546 break 18547 } 18548 i1 := x1.AuxInt 18549 if x1.Aux != s { 18550 break 18551 } 18552 _ = x1.Args[2] 18553 if p != x1.Args[0] { 18554 break 18555 } 18556 if idx != x1.Args[1] { 18557 break 18558 } 18559 if mem != x1.Args[2] { 18560 break 18561 } 18562 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18563 break 18564 } 18565 b = mergePoint(b, x0, x1) 18566 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18567 v.reset(OpCopy) 18568 v.AddArg(v0) 18569 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18570 v1.AuxInt = j0 18571 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18572 v2.AuxInt = i0 18573 v2.Aux = s 18574 v2.AddArg(p) 18575 v2.AddArg(idx) 18576 v2.AddArg(mem) 18577 v1.AddArg(v2) 18578 v0.AddArg(v1) 18579 v0.AddArg(y) 18580 return true 18581 } 18582 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18583 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18584 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18585 for { 18586 _ = v.Args[1] 18587 or := v.Args[0] 18588 if or.Op != OpAMD64ORL { 18589 break 18590 } 18591 _ = or.Args[1] 18592 s0 := or.Args[0] 18593 if s0.Op != OpAMD64SHLLconst { 18594 break 18595 } 18596 j0 := s0.AuxInt 18597 x0 := s0.Args[0] 18598 if x0.Op != OpAMD64MOVBloadidx1 { 18599 break 18600 } 18601 i0 := x0.AuxInt 18602 s := x0.Aux 18603 _ = x0.Args[2] 18604 p := x0.Args[0] 18605 idx := x0.Args[1] 18606 mem := x0.Args[2] 18607 y := or.Args[1] 18608 s1 := v.Args[1] 18609 if s1.Op != OpAMD64SHLLconst { 18610 break 18611 } 18612 j1 := s1.AuxInt 18613 x1 := s1.Args[0] 18614 if x1.Op != OpAMD64MOVBloadidx1 { 18615 break 18616 } 18617 i1 := x1.AuxInt 18618 if x1.Aux != s { 18619 break 18620 } 18621 _ = x1.Args[2] 18622 if idx != x1.Args[0] { 18623 break 18624 } 18625 if p != x1.Args[1] { 18626 break 18627 } 18628 if mem != x1.Args[2] { 18629 break 18630 } 18631 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18632 break 18633 } 18634 b = mergePoint(b, x0, x1) 18635 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18636 v.reset(OpCopy) 18637 v.AddArg(v0) 18638 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18639 v1.AuxInt = j0 18640 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18641 v2.AuxInt = i0 18642 v2.Aux = s 18643 v2.AddArg(p) 18644 v2.AddArg(idx) 18645 v2.AddArg(mem) 18646 v1.AddArg(v2) 18647 v0.AddArg(v1) 18648 v0.AddArg(y) 18649 return true 18650 } 18651 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18652 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18653 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18654 for { 18655 _ = v.Args[1] 18656 or := v.Args[0] 18657 if or.Op != OpAMD64ORL { 18658 break 18659 } 18660 _ = or.Args[1] 18661 s0 := or.Args[0] 18662 if s0.Op != OpAMD64SHLLconst { 18663 break 18664 } 18665 j0 := s0.AuxInt 18666 x0 := s0.Args[0] 18667 if x0.Op != OpAMD64MOVBloadidx1 { 18668 break 18669 } 18670 i0 := x0.AuxInt 18671 s := x0.Aux 18672 _ = x0.Args[2] 18673 idx := x0.Args[0] 18674 p := x0.Args[1] 18675 mem := x0.Args[2] 18676 y := or.Args[1] 18677 s1 := v.Args[1] 18678 if s1.Op != OpAMD64SHLLconst { 18679 break 18680 } 18681 j1 := s1.AuxInt 18682 x1 := s1.Args[0] 18683 if x1.Op != OpAMD64MOVBloadidx1 { 18684 break 18685 } 18686 i1 := x1.AuxInt 18687 if x1.Aux != s { 18688 break 18689 } 18690 _ = x1.Args[2] 18691 if idx != x1.Args[0] { 18692 break 18693 } 18694 if p != x1.Args[1] { 18695 break 18696 } 18697 if mem != x1.Args[2] { 18698 break 18699 } 18700 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18701 break 18702 } 18703 b = mergePoint(b, x0, x1) 18704 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18705 v.reset(OpCopy) 18706 v.AddArg(v0) 18707 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18708 v1.AuxInt = j0 18709 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18710 v2.AuxInt = i0 18711 v2.Aux = s 18712 v2.AddArg(p) 18713 v2.AddArg(idx) 18714 v2.AddArg(mem) 18715 v1.AddArg(v2) 18716 v0.AddArg(v1) 18717 v0.AddArg(y) 18718 return true 18719 } 18720 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18721 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18722 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18723 for { 18724 _ = v.Args[1] 18725 or := v.Args[0] 18726 if or.Op != OpAMD64ORL { 18727 break 18728 } 18729 _ = or.Args[1] 18730 y := or.Args[0] 18731 s0 := or.Args[1] 18732 if s0.Op != OpAMD64SHLLconst { 18733 break 18734 } 18735 j0 := s0.AuxInt 18736 x0 := s0.Args[0] 18737 if x0.Op != OpAMD64MOVBloadidx1 { 18738 break 18739 } 18740 i0 := x0.AuxInt 18741 s := x0.Aux 18742 _ = x0.Args[2] 18743 p := x0.Args[0] 18744 idx := x0.Args[1] 18745 mem := x0.Args[2] 18746 s1 := v.Args[1] 18747 if s1.Op != OpAMD64SHLLconst { 18748 break 18749 } 18750 j1 := s1.AuxInt 18751 x1 := s1.Args[0] 18752 if x1.Op != OpAMD64MOVBloadidx1 { 18753 break 18754 } 18755 i1 := x1.AuxInt 18756 if x1.Aux != s { 18757 break 18758 } 18759 _ = x1.Args[2] 18760 if idx != x1.Args[0] { 18761 break 18762 } 18763 if p != x1.Args[1] { 18764 break 18765 } 18766 if mem != x1.Args[2] { 18767 break 18768 } 18769 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18770 break 18771 } 18772 b = mergePoint(b, x0, x1) 18773 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18774 v.reset(OpCopy) 18775 v.AddArg(v0) 18776 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18777 v1.AuxInt = j0 18778 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18779 v2.AuxInt = i0 18780 v2.Aux = s 18781 v2.AddArg(p) 18782 v2.AddArg(idx) 18783 v2.AddArg(mem) 18784 v1.AddArg(v2) 18785 v0.AddArg(v1) 18786 v0.AddArg(y) 18787 return true 18788 } 18789 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18790 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18791 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18792 for { 18793 _ = v.Args[1] 18794 or := v.Args[0] 18795 if or.Op != OpAMD64ORL { 18796 break 18797 } 18798 _ = or.Args[1] 18799 y := or.Args[0] 18800 s0 := or.Args[1] 18801 if s0.Op != OpAMD64SHLLconst { 18802 break 18803 } 18804 j0 := s0.AuxInt 18805 x0 := s0.Args[0] 18806 if x0.Op != OpAMD64MOVBloadidx1 { 18807 break 18808 } 18809 i0 := x0.AuxInt 18810 s := x0.Aux 18811 _ = x0.Args[2] 18812 idx := x0.Args[0] 18813 p := x0.Args[1] 18814 mem := x0.Args[2] 18815 s1 := v.Args[1] 18816 if s1.Op != OpAMD64SHLLconst { 18817 break 18818 } 18819 j1 := s1.AuxInt 18820 x1 := s1.Args[0] 18821 if x1.Op != OpAMD64MOVBloadidx1 { 18822 break 18823 } 18824 i1 := x1.AuxInt 18825 if x1.Aux != s { 18826 break 18827 } 18828 _ = x1.Args[2] 18829 if idx != x1.Args[0] { 18830 break 18831 } 18832 if p != x1.Args[1] { 18833 break 18834 } 18835 if mem != x1.Args[2] { 18836 break 18837 } 18838 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18839 break 18840 } 18841 b = mergePoint(b, x0, x1) 18842 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18843 v.reset(OpCopy) 18844 v.AddArg(v0) 18845 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18846 v1.AuxInt = j0 18847 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18848 v2.AuxInt = i0 18849 v2.Aux = s 18850 v2.AddArg(p) 18851 v2.AddArg(idx) 18852 v2.AddArg(mem) 18853 v1.AddArg(v2) 18854 v0.AddArg(v1) 18855 v0.AddArg(y) 18856 return true 18857 } 18858 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 18859 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18860 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 18861 for { 18862 _ = v.Args[1] 18863 x1 := v.Args[0] 18864 if x1.Op != OpAMD64MOVBload { 18865 break 18866 } 18867 i1 := x1.AuxInt 18868 s := x1.Aux 18869 _ = x1.Args[1] 18870 p := x1.Args[0] 18871 mem := x1.Args[1] 18872 sh := v.Args[1] 18873 if sh.Op != OpAMD64SHLLconst { 18874 break 18875 } 18876 if sh.AuxInt != 8 { 18877 break 18878 } 18879 x0 := sh.Args[0] 18880 if x0.Op != OpAMD64MOVBload { 18881 break 18882 } 18883 i0 := x0.AuxInt 18884 if x0.Aux != s { 18885 break 18886 } 18887 _ = x0.Args[1] 18888 if p != x0.Args[0] { 18889 break 18890 } 18891 if mem != x0.Args[1] { 18892 break 18893 } 18894 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18895 break 18896 } 18897 b = mergePoint(b, x0, x1) 18898 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18899 v.reset(OpCopy) 18900 v.AddArg(v0) 18901 v0.AuxInt = 8 18902 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18903 v1.AuxInt = i0 18904 v1.Aux = s 18905 v1.AddArg(p) 18906 v1.AddArg(mem) 18907 v0.AddArg(v1) 18908 return true 18909 } 18910 return false 18911 } 18912 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 18913 b := v.Block 18914 _ = b 18915 typ := &b.Func.Config.Types 18916 _ = typ 18917 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 18918 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18919 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 18920 for { 18921 _ = v.Args[1] 18922 sh := v.Args[0] 18923 if sh.Op != OpAMD64SHLLconst { 18924 break 18925 } 18926 if sh.AuxInt != 8 { 18927 break 18928 } 18929 x0 := sh.Args[0] 18930 if x0.Op != OpAMD64MOVBload { 18931 break 18932 } 18933 i0 := x0.AuxInt 18934 s := x0.Aux 18935 _ = x0.Args[1] 18936 p := x0.Args[0] 18937 mem := x0.Args[1] 18938 x1 := v.Args[1] 18939 if x1.Op != OpAMD64MOVBload { 18940 break 18941 } 18942 i1 := x1.AuxInt 18943 if x1.Aux != s { 18944 break 18945 } 18946 _ = x1.Args[1] 18947 if p != x1.Args[0] { 18948 break 18949 } 18950 if mem != x1.Args[1] { 18951 break 18952 } 18953 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18954 break 18955 } 18956 b = mergePoint(b, x0, x1) 18957 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18958 v.reset(OpCopy) 18959 v.AddArg(v0) 18960 v0.AuxInt = 8 18961 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18962 v1.AuxInt = i0 18963 v1.Aux = s 18964 v1.AddArg(p) 18965 v1.AddArg(mem) 18966 v0.AddArg(v1) 18967 return true 18968 } 18969 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 18970 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 18971 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 18972 for { 18973 _ = v.Args[1] 18974 r1 := v.Args[0] 18975 if r1.Op != OpAMD64ROLWconst { 18976 break 18977 } 18978 if r1.AuxInt != 8 { 18979 break 18980 } 18981 x1 := r1.Args[0] 18982 if x1.Op != OpAMD64MOVWload { 18983 break 18984 } 18985 i1 := x1.AuxInt 18986 s := x1.Aux 18987 _ = x1.Args[1] 18988 p := x1.Args[0] 18989 mem := x1.Args[1] 18990 sh := v.Args[1] 18991 if sh.Op != OpAMD64SHLLconst { 18992 break 18993 } 18994 if sh.AuxInt != 16 { 18995 break 18996 } 18997 r0 := sh.Args[0] 18998 if r0.Op != OpAMD64ROLWconst { 18999 break 19000 } 19001 if r0.AuxInt != 8 { 19002 break 19003 } 19004 x0 := r0.Args[0] 19005 if x0.Op != OpAMD64MOVWload { 19006 break 19007 } 19008 i0 := x0.AuxInt 19009 if x0.Aux != s { 19010 break 19011 } 19012 _ = x0.Args[1] 19013 if p != x0.Args[0] { 19014 break 19015 } 19016 if mem != x0.Args[1] { 19017 break 19018 } 19019 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19020 break 19021 } 19022 b = mergePoint(b, x0, x1) 19023 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19024 v.reset(OpCopy) 19025 v.AddArg(v0) 19026 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 19027 v1.AuxInt = i0 19028 v1.Aux = s 19029 v1.AddArg(p) 19030 v1.AddArg(mem) 19031 v0.AddArg(v1) 19032 return true 19033 } 19034 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 19035 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19036 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 19037 for { 19038 _ = v.Args[1] 19039 sh := v.Args[0] 19040 if sh.Op != OpAMD64SHLLconst { 19041 break 19042 } 19043 if sh.AuxInt != 16 { 19044 break 19045 } 19046 r0 := sh.Args[0] 19047 if r0.Op != OpAMD64ROLWconst { 19048 break 19049 } 19050 if r0.AuxInt != 8 { 19051 break 19052 } 19053 x0 := r0.Args[0] 19054 if x0.Op != OpAMD64MOVWload { 19055 break 19056 } 19057 i0 := x0.AuxInt 19058 s := x0.Aux 19059 _ = x0.Args[1] 19060 p := x0.Args[0] 19061 mem := x0.Args[1] 19062 r1 := v.Args[1] 19063 if r1.Op != OpAMD64ROLWconst { 19064 break 19065 } 19066 if r1.AuxInt != 8 { 19067 break 19068 } 19069 x1 := r1.Args[0] 19070 if x1.Op != OpAMD64MOVWload { 19071 break 19072 } 19073 i1 := x1.AuxInt 19074 if x1.Aux != s { 19075 break 19076 } 19077 _ = x1.Args[1] 19078 if p != x1.Args[0] { 19079 break 19080 } 19081 if mem != x1.Args[1] { 19082 break 19083 } 19084 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19085 break 19086 } 19087 b = mergePoint(b, x0, x1) 19088 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19089 v.reset(OpCopy) 19090 v.AddArg(v0) 19091 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 19092 v1.AuxInt = i0 19093 v1.Aux = s 19094 v1.AddArg(p) 19095 v1.AddArg(mem) 19096 v0.AddArg(v1) 19097 return true 19098 } 19099 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 19100 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19101 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19102 for { 19103 _ = v.Args[1] 19104 s0 := v.Args[0] 19105 if s0.Op != OpAMD64SHLLconst { 19106 break 19107 } 19108 j0 := s0.AuxInt 19109 x0 := s0.Args[0] 19110 if x0.Op != OpAMD64MOVBload { 19111 break 19112 } 19113 i0 := x0.AuxInt 19114 s := x0.Aux 19115 _ = x0.Args[1] 19116 p := x0.Args[0] 19117 mem := x0.Args[1] 19118 or := v.Args[1] 19119 if or.Op != OpAMD64ORL { 19120 break 19121 } 19122 _ = or.Args[1] 19123 s1 := or.Args[0] 19124 if s1.Op != OpAMD64SHLLconst { 19125 break 19126 } 19127 j1 := s1.AuxInt 19128 x1 := s1.Args[0] 19129 if x1.Op != OpAMD64MOVBload { 19130 break 19131 } 19132 i1 := x1.AuxInt 19133 if x1.Aux != s { 19134 break 19135 } 19136 _ = x1.Args[1] 19137 if p != x1.Args[0] { 19138 break 19139 } 19140 if mem != x1.Args[1] { 19141 break 19142 } 19143 y := or.Args[1] 19144 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19145 break 19146 } 19147 b = mergePoint(b, x0, x1) 19148 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19149 v.reset(OpCopy) 19150 v.AddArg(v0) 19151 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19152 v1.AuxInt = j1 19153 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19154 v2.AuxInt = 8 19155 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19156 v3.AuxInt = i0 19157 v3.Aux = s 19158 v3.AddArg(p) 19159 v3.AddArg(mem) 19160 v2.AddArg(v3) 19161 v1.AddArg(v2) 19162 v0.AddArg(v1) 19163 v0.AddArg(y) 19164 return true 19165 } 19166 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 19167 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19168 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19169 for { 19170 _ = v.Args[1] 19171 s0 := v.Args[0] 19172 if s0.Op != OpAMD64SHLLconst { 19173 break 19174 } 19175 j0 := s0.AuxInt 19176 x0 := s0.Args[0] 19177 if x0.Op != OpAMD64MOVBload { 19178 break 19179 } 19180 i0 := x0.AuxInt 19181 s := x0.Aux 19182 _ = x0.Args[1] 19183 p := x0.Args[0] 19184 mem := x0.Args[1] 19185 or := v.Args[1] 19186 if or.Op != OpAMD64ORL { 19187 break 19188 } 19189 _ = or.Args[1] 19190 y := or.Args[0] 19191 s1 := or.Args[1] 19192 if s1.Op != OpAMD64SHLLconst { 19193 break 19194 } 19195 j1 := s1.AuxInt 19196 x1 := s1.Args[0] 19197 if x1.Op != OpAMD64MOVBload { 19198 break 19199 } 19200 i1 := x1.AuxInt 19201 if x1.Aux != s { 19202 break 19203 } 19204 _ = x1.Args[1] 19205 if p != x1.Args[0] { 19206 break 19207 } 19208 if mem != x1.Args[1] { 19209 break 19210 } 19211 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19212 break 19213 } 19214 b = mergePoint(b, x0, x1) 19215 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19216 v.reset(OpCopy) 19217 v.AddArg(v0) 19218 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19219 v1.AuxInt = j1 19220 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19221 v2.AuxInt = 8 19222 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19223 v3.AuxInt = i0 19224 v3.Aux = s 19225 v3.AddArg(p) 19226 v3.AddArg(mem) 19227 v2.AddArg(v3) 19228 v1.AddArg(v2) 19229 v0.AddArg(v1) 19230 v0.AddArg(y) 19231 return true 19232 } 19233 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 19234 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19235 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19236 for { 19237 _ = v.Args[1] 19238 or := v.Args[0] 19239 if or.Op != OpAMD64ORL { 19240 break 19241 } 19242 _ = or.Args[1] 19243 s1 := or.Args[0] 19244 if s1.Op != OpAMD64SHLLconst { 19245 break 19246 } 19247 j1 := s1.AuxInt 19248 x1 := s1.Args[0] 19249 if x1.Op != OpAMD64MOVBload { 19250 break 19251 } 19252 i1 := x1.AuxInt 19253 s := x1.Aux 19254 _ = x1.Args[1] 19255 p := x1.Args[0] 19256 mem := x1.Args[1] 19257 y := or.Args[1] 19258 s0 := v.Args[1] 19259 if s0.Op != OpAMD64SHLLconst { 19260 break 19261 } 19262 j0 := s0.AuxInt 19263 x0 := s0.Args[0] 19264 if x0.Op != OpAMD64MOVBload { 19265 break 19266 } 19267 i0 := x0.AuxInt 19268 if x0.Aux != s { 19269 break 19270 } 19271 _ = x0.Args[1] 19272 if p != x0.Args[0] { 19273 break 19274 } 19275 if mem != x0.Args[1] { 19276 break 19277 } 19278 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19279 break 19280 } 19281 b = mergePoint(b, x0, x1) 19282 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19283 v.reset(OpCopy) 19284 v.AddArg(v0) 19285 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19286 v1.AuxInt = j1 19287 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19288 v2.AuxInt = 8 19289 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19290 v3.AuxInt = i0 19291 v3.Aux = s 19292 v3.AddArg(p) 19293 v3.AddArg(mem) 19294 v2.AddArg(v3) 19295 v1.AddArg(v2) 19296 v0.AddArg(v1) 19297 v0.AddArg(y) 19298 return true 19299 } 19300 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 19301 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19302 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19303 for { 19304 _ = v.Args[1] 19305 or := v.Args[0] 19306 if or.Op != OpAMD64ORL { 19307 break 19308 } 19309 _ = or.Args[1] 19310 y := or.Args[0] 19311 s1 := or.Args[1] 19312 if s1.Op != OpAMD64SHLLconst { 19313 break 19314 } 19315 j1 := s1.AuxInt 19316 x1 := s1.Args[0] 19317 if x1.Op != OpAMD64MOVBload { 19318 break 19319 } 19320 i1 := x1.AuxInt 19321 s := x1.Aux 19322 _ = x1.Args[1] 19323 p := x1.Args[0] 19324 mem := x1.Args[1] 19325 s0 := v.Args[1] 19326 if s0.Op != OpAMD64SHLLconst { 19327 break 19328 } 19329 j0 := s0.AuxInt 19330 x0 := s0.Args[0] 19331 if x0.Op != OpAMD64MOVBload { 19332 break 19333 } 19334 i0 := x0.AuxInt 19335 if x0.Aux != s { 19336 break 19337 } 19338 _ = x0.Args[1] 19339 if p != x0.Args[0] { 19340 break 19341 } 19342 if mem != x0.Args[1] { 19343 break 19344 } 19345 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19346 break 19347 } 19348 b = mergePoint(b, x0, x1) 19349 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19350 v.reset(OpCopy) 19351 v.AddArg(v0) 19352 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19353 v1.AuxInt = j1 19354 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19355 v2.AuxInt = 8 19356 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19357 v3.AuxInt = i0 19358 v3.Aux = s 19359 v3.AddArg(p) 19360 v3.AddArg(mem) 19361 v2.AddArg(v3) 19362 v1.AddArg(v2) 19363 v0.AddArg(v1) 19364 v0.AddArg(y) 19365 return true 19366 } 19367 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 19368 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19369 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19370 for { 19371 _ = v.Args[1] 19372 x1 := v.Args[0] 19373 if x1.Op != OpAMD64MOVBloadidx1 { 19374 break 19375 } 19376 i1 := x1.AuxInt 19377 s := x1.Aux 19378 _ = x1.Args[2] 19379 p := x1.Args[0] 19380 idx := x1.Args[1] 19381 mem := x1.Args[2] 19382 sh := v.Args[1] 19383 if sh.Op != OpAMD64SHLLconst { 19384 break 19385 } 19386 if sh.AuxInt != 8 { 19387 break 19388 } 19389 x0 := sh.Args[0] 19390 if x0.Op != OpAMD64MOVBloadidx1 { 19391 break 19392 } 19393 i0 := x0.AuxInt 19394 if x0.Aux != s { 19395 break 19396 } 19397 _ = x0.Args[2] 19398 if p != x0.Args[0] { 19399 break 19400 } 19401 if idx != x0.Args[1] { 19402 break 19403 } 19404 if mem != x0.Args[2] { 19405 break 19406 } 19407 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19408 break 19409 } 19410 b = mergePoint(b, x0, x1) 19411 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19412 v.reset(OpCopy) 19413 v.AddArg(v0) 19414 v0.AuxInt = 8 19415 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19416 v1.AuxInt = i0 19417 v1.Aux = s 19418 v1.AddArg(p) 19419 v1.AddArg(idx) 19420 v1.AddArg(mem) 19421 v0.AddArg(v1) 19422 return true 19423 } 19424 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 19425 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19426 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19427 for { 19428 _ = v.Args[1] 19429 x1 := v.Args[0] 19430 if x1.Op != OpAMD64MOVBloadidx1 { 19431 break 19432 } 19433 i1 := x1.AuxInt 19434 s := x1.Aux 19435 _ = x1.Args[2] 19436 idx := x1.Args[0] 19437 p := x1.Args[1] 19438 mem := x1.Args[2] 19439 sh := v.Args[1] 19440 if sh.Op != OpAMD64SHLLconst { 19441 break 19442 } 19443 if sh.AuxInt != 8 { 19444 break 19445 } 19446 x0 := sh.Args[0] 19447 if x0.Op != OpAMD64MOVBloadidx1 { 19448 break 19449 } 19450 i0 := x0.AuxInt 19451 if x0.Aux != s { 19452 break 19453 } 19454 _ = x0.Args[2] 19455 if p != x0.Args[0] { 19456 break 19457 } 19458 if idx != x0.Args[1] { 19459 break 19460 } 19461 if mem != x0.Args[2] { 19462 break 19463 } 19464 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19465 break 19466 } 19467 b = mergePoint(b, x0, x1) 19468 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19469 v.reset(OpCopy) 19470 v.AddArg(v0) 19471 v0.AuxInt = 8 19472 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19473 v1.AuxInt = i0 19474 v1.Aux = s 19475 v1.AddArg(p) 19476 v1.AddArg(idx) 19477 v1.AddArg(mem) 19478 v0.AddArg(v1) 19479 return true 19480 } 19481 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 19482 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19483 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19484 for { 19485 _ = v.Args[1] 19486 x1 := v.Args[0] 19487 if x1.Op != OpAMD64MOVBloadidx1 { 19488 break 19489 } 19490 i1 := x1.AuxInt 19491 s := x1.Aux 19492 _ = x1.Args[2] 19493 p := x1.Args[0] 19494 idx := x1.Args[1] 19495 mem := x1.Args[2] 19496 sh := v.Args[1] 19497 if sh.Op != OpAMD64SHLLconst { 19498 break 19499 } 19500 if sh.AuxInt != 8 { 19501 break 19502 } 19503 x0 := sh.Args[0] 19504 if x0.Op != OpAMD64MOVBloadidx1 { 19505 break 19506 } 19507 i0 := x0.AuxInt 19508 if x0.Aux != s { 19509 break 19510 } 19511 _ = x0.Args[2] 19512 if idx != x0.Args[0] { 19513 break 19514 } 19515 if p != x0.Args[1] { 19516 break 19517 } 19518 if mem != x0.Args[2] { 19519 break 19520 } 19521 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19522 break 19523 } 19524 b = mergePoint(b, x0, x1) 19525 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19526 v.reset(OpCopy) 19527 v.AddArg(v0) 19528 v0.AuxInt = 8 19529 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19530 v1.AuxInt = i0 19531 v1.Aux = s 19532 v1.AddArg(p) 19533 v1.AddArg(idx) 19534 v1.AddArg(mem) 19535 v0.AddArg(v1) 19536 return true 19537 } 19538 return false 19539 } 19540 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 19541 b := v.Block 19542 _ = b 19543 typ := &b.Func.Config.Types 19544 _ = typ 19545 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 19546 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19547 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19548 for { 19549 _ = v.Args[1] 19550 x1 := v.Args[0] 19551 if x1.Op != OpAMD64MOVBloadidx1 { 19552 break 19553 } 19554 i1 := x1.AuxInt 19555 s := x1.Aux 19556 _ = x1.Args[2] 19557 idx := x1.Args[0] 19558 p := x1.Args[1] 19559 mem := x1.Args[2] 19560 sh := v.Args[1] 19561 if sh.Op != OpAMD64SHLLconst { 19562 break 19563 } 19564 if sh.AuxInt != 8 { 19565 break 19566 } 19567 x0 := sh.Args[0] 19568 if x0.Op != OpAMD64MOVBloadidx1 { 19569 break 19570 } 19571 i0 := x0.AuxInt 19572 if x0.Aux != s { 19573 break 19574 } 19575 _ = x0.Args[2] 19576 if idx != x0.Args[0] { 19577 break 19578 } 19579 if p != x0.Args[1] { 19580 break 19581 } 19582 if mem != x0.Args[2] { 19583 break 19584 } 19585 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19586 break 19587 } 19588 b = mergePoint(b, x0, x1) 19589 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19590 v.reset(OpCopy) 19591 v.AddArg(v0) 19592 v0.AuxInt = 8 19593 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19594 v1.AuxInt = i0 19595 v1.Aux = s 19596 v1.AddArg(p) 19597 v1.AddArg(idx) 19598 v1.AddArg(mem) 19599 v0.AddArg(v1) 19600 return true 19601 } 19602 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 19603 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19604 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19605 for { 19606 _ = v.Args[1] 19607 sh := v.Args[0] 19608 if sh.Op != OpAMD64SHLLconst { 19609 break 19610 } 19611 if sh.AuxInt != 8 { 19612 break 19613 } 19614 x0 := sh.Args[0] 19615 if x0.Op != OpAMD64MOVBloadidx1 { 19616 break 19617 } 19618 i0 := x0.AuxInt 19619 s := x0.Aux 19620 _ = x0.Args[2] 19621 p := x0.Args[0] 19622 idx := x0.Args[1] 19623 mem := x0.Args[2] 19624 x1 := v.Args[1] 19625 if x1.Op != OpAMD64MOVBloadidx1 { 19626 break 19627 } 19628 i1 := x1.AuxInt 19629 if x1.Aux != s { 19630 break 19631 } 19632 _ = x1.Args[2] 19633 if p != x1.Args[0] { 19634 break 19635 } 19636 if idx != x1.Args[1] { 19637 break 19638 } 19639 if mem != x1.Args[2] { 19640 break 19641 } 19642 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19643 break 19644 } 19645 b = mergePoint(b, x0, x1) 19646 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19647 v.reset(OpCopy) 19648 v.AddArg(v0) 19649 v0.AuxInt = 8 19650 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19651 v1.AuxInt = i0 19652 v1.Aux = s 19653 v1.AddArg(p) 19654 v1.AddArg(idx) 19655 v1.AddArg(mem) 19656 v0.AddArg(v1) 19657 return true 19658 } 19659 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 19660 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19661 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19662 for { 19663 _ = v.Args[1] 19664 sh := v.Args[0] 19665 if sh.Op != OpAMD64SHLLconst { 19666 break 19667 } 19668 if sh.AuxInt != 8 { 19669 break 19670 } 19671 x0 := sh.Args[0] 19672 if x0.Op != OpAMD64MOVBloadidx1 { 19673 break 19674 } 19675 i0 := x0.AuxInt 19676 s := x0.Aux 19677 _ = x0.Args[2] 19678 idx := x0.Args[0] 19679 p := x0.Args[1] 19680 mem := x0.Args[2] 19681 x1 := v.Args[1] 19682 if x1.Op != OpAMD64MOVBloadidx1 { 19683 break 19684 } 19685 i1 := x1.AuxInt 19686 if x1.Aux != s { 19687 break 19688 } 19689 _ = x1.Args[2] 19690 if p != x1.Args[0] { 19691 break 19692 } 19693 if idx != x1.Args[1] { 19694 break 19695 } 19696 if mem != x1.Args[2] { 19697 break 19698 } 19699 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19700 break 19701 } 19702 b = mergePoint(b, x0, x1) 19703 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19704 v.reset(OpCopy) 19705 v.AddArg(v0) 19706 v0.AuxInt = 8 19707 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19708 v1.AuxInt = i0 19709 v1.Aux = s 19710 v1.AddArg(p) 19711 v1.AddArg(idx) 19712 v1.AddArg(mem) 19713 v0.AddArg(v1) 19714 return true 19715 } 19716 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 19717 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19718 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19719 for { 19720 _ = v.Args[1] 19721 sh := v.Args[0] 19722 if sh.Op != OpAMD64SHLLconst { 19723 break 19724 } 19725 if sh.AuxInt != 8 { 19726 break 19727 } 19728 x0 := sh.Args[0] 19729 if x0.Op != OpAMD64MOVBloadidx1 { 19730 break 19731 } 19732 i0 := x0.AuxInt 19733 s := x0.Aux 19734 _ = x0.Args[2] 19735 p := x0.Args[0] 19736 idx := x0.Args[1] 19737 mem := x0.Args[2] 19738 x1 := v.Args[1] 19739 if x1.Op != OpAMD64MOVBloadidx1 { 19740 break 19741 } 19742 i1 := x1.AuxInt 19743 if x1.Aux != s { 19744 break 19745 } 19746 _ = x1.Args[2] 19747 if idx != x1.Args[0] { 19748 break 19749 } 19750 if p != x1.Args[1] { 19751 break 19752 } 19753 if mem != x1.Args[2] { 19754 break 19755 } 19756 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19757 break 19758 } 19759 b = mergePoint(b, x0, x1) 19760 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19761 v.reset(OpCopy) 19762 v.AddArg(v0) 19763 v0.AuxInt = 8 19764 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19765 v1.AuxInt = i0 19766 v1.Aux = s 19767 v1.AddArg(p) 19768 v1.AddArg(idx) 19769 v1.AddArg(mem) 19770 v0.AddArg(v1) 19771 return true 19772 } 19773 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 19774 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19775 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19776 for { 19777 _ = v.Args[1] 19778 sh := v.Args[0] 19779 if sh.Op != OpAMD64SHLLconst { 19780 break 19781 } 19782 if sh.AuxInt != 8 { 19783 break 19784 } 19785 x0 := sh.Args[0] 19786 if x0.Op != OpAMD64MOVBloadidx1 { 19787 break 19788 } 19789 i0 := x0.AuxInt 19790 s := x0.Aux 19791 _ = x0.Args[2] 19792 idx := x0.Args[0] 19793 p := x0.Args[1] 19794 mem := x0.Args[2] 19795 x1 := v.Args[1] 19796 if x1.Op != OpAMD64MOVBloadidx1 { 19797 break 19798 } 19799 i1 := x1.AuxInt 19800 if x1.Aux != s { 19801 break 19802 } 19803 _ = x1.Args[2] 19804 if idx != x1.Args[0] { 19805 break 19806 } 19807 if p != x1.Args[1] { 19808 break 19809 } 19810 if mem != x1.Args[2] { 19811 break 19812 } 19813 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19814 break 19815 } 19816 b = mergePoint(b, x0, x1) 19817 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19818 v.reset(OpCopy) 19819 v.AddArg(v0) 19820 v0.AuxInt = 8 19821 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19822 v1.AuxInt = i0 19823 v1.Aux = s 19824 v1.AddArg(p) 19825 v1.AddArg(idx) 19826 v1.AddArg(mem) 19827 v0.AddArg(v1) 19828 return true 19829 } 19830 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 19831 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19832 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19833 for { 19834 _ = v.Args[1] 19835 r1 := v.Args[0] 19836 if r1.Op != OpAMD64ROLWconst { 19837 break 19838 } 19839 if r1.AuxInt != 8 { 19840 break 19841 } 19842 x1 := r1.Args[0] 19843 if x1.Op != OpAMD64MOVWloadidx1 { 19844 break 19845 } 19846 i1 := x1.AuxInt 19847 s := x1.Aux 19848 _ = x1.Args[2] 19849 p := x1.Args[0] 19850 idx := x1.Args[1] 19851 mem := x1.Args[2] 19852 sh := v.Args[1] 19853 if sh.Op != OpAMD64SHLLconst { 19854 break 19855 } 19856 if sh.AuxInt != 16 { 19857 break 19858 } 19859 r0 := sh.Args[0] 19860 if r0.Op != OpAMD64ROLWconst { 19861 break 19862 } 19863 if r0.AuxInt != 8 { 19864 break 19865 } 19866 x0 := r0.Args[0] 19867 if x0.Op != OpAMD64MOVWloadidx1 { 19868 break 19869 } 19870 i0 := x0.AuxInt 19871 if x0.Aux != s { 19872 break 19873 } 19874 _ = x0.Args[2] 19875 if p != x0.Args[0] { 19876 break 19877 } 19878 if idx != x0.Args[1] { 19879 break 19880 } 19881 if mem != x0.Args[2] { 19882 break 19883 } 19884 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19885 break 19886 } 19887 b = mergePoint(b, x0, x1) 19888 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19889 v.reset(OpCopy) 19890 v.AddArg(v0) 19891 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19892 v1.AuxInt = i0 19893 v1.Aux = s 19894 v1.AddArg(p) 19895 v1.AddArg(idx) 19896 v1.AddArg(mem) 19897 v0.AddArg(v1) 19898 return true 19899 } 19900 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 19901 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19902 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19903 for { 19904 _ = v.Args[1] 19905 r1 := v.Args[0] 19906 if r1.Op != OpAMD64ROLWconst { 19907 break 19908 } 19909 if r1.AuxInt != 8 { 19910 break 19911 } 19912 x1 := r1.Args[0] 19913 if x1.Op != OpAMD64MOVWloadidx1 { 19914 break 19915 } 19916 i1 := x1.AuxInt 19917 s := x1.Aux 19918 _ = x1.Args[2] 19919 idx := x1.Args[0] 19920 p := x1.Args[1] 19921 mem := x1.Args[2] 19922 sh := v.Args[1] 19923 if sh.Op != OpAMD64SHLLconst { 19924 break 19925 } 19926 if sh.AuxInt != 16 { 19927 break 19928 } 19929 r0 := sh.Args[0] 19930 if r0.Op != OpAMD64ROLWconst { 19931 break 19932 } 19933 if r0.AuxInt != 8 { 19934 break 19935 } 19936 x0 := r0.Args[0] 19937 if x0.Op != OpAMD64MOVWloadidx1 { 19938 break 19939 } 19940 i0 := x0.AuxInt 19941 if x0.Aux != s { 19942 break 19943 } 19944 _ = x0.Args[2] 19945 if p != x0.Args[0] { 19946 break 19947 } 19948 if idx != x0.Args[1] { 19949 break 19950 } 19951 if mem != x0.Args[2] { 19952 break 19953 } 19954 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19955 break 19956 } 19957 b = mergePoint(b, x0, x1) 19958 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19959 v.reset(OpCopy) 19960 v.AddArg(v0) 19961 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19962 v1.AuxInt = i0 19963 v1.Aux = s 19964 v1.AddArg(p) 19965 v1.AddArg(idx) 19966 v1.AddArg(mem) 19967 v0.AddArg(v1) 19968 return true 19969 } 19970 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 19971 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19972 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19973 for { 19974 _ = v.Args[1] 19975 r1 := v.Args[0] 19976 if r1.Op != OpAMD64ROLWconst { 19977 break 19978 } 19979 if r1.AuxInt != 8 { 19980 break 19981 } 19982 x1 := r1.Args[0] 19983 if x1.Op != OpAMD64MOVWloadidx1 { 19984 break 19985 } 19986 i1 := x1.AuxInt 19987 s := x1.Aux 19988 _ = x1.Args[2] 19989 p := x1.Args[0] 19990 idx := x1.Args[1] 19991 mem := x1.Args[2] 19992 sh := v.Args[1] 19993 if sh.Op != OpAMD64SHLLconst { 19994 break 19995 } 19996 if sh.AuxInt != 16 { 19997 break 19998 } 19999 r0 := sh.Args[0] 20000 if r0.Op != OpAMD64ROLWconst { 20001 break 20002 } 20003 if r0.AuxInt != 8 { 20004 break 20005 } 20006 x0 := r0.Args[0] 20007 if x0.Op != OpAMD64MOVWloadidx1 { 20008 break 20009 } 20010 i0 := x0.AuxInt 20011 if x0.Aux != s { 20012 break 20013 } 20014 _ = x0.Args[2] 20015 if idx != x0.Args[0] { 20016 break 20017 } 20018 if p != x0.Args[1] { 20019 break 20020 } 20021 if mem != x0.Args[2] { 20022 break 20023 } 20024 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20025 break 20026 } 20027 b = mergePoint(b, x0, x1) 20028 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20029 v.reset(OpCopy) 20030 v.AddArg(v0) 20031 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20032 v1.AuxInt = i0 20033 v1.Aux = s 20034 v1.AddArg(p) 20035 v1.AddArg(idx) 20036 v1.AddArg(mem) 20037 v0.AddArg(v1) 20038 return true 20039 } 20040 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 20041 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20042 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20043 for { 20044 _ = v.Args[1] 20045 r1 := v.Args[0] 20046 if r1.Op != OpAMD64ROLWconst { 20047 break 20048 } 20049 if r1.AuxInt != 8 { 20050 break 20051 } 20052 x1 := r1.Args[0] 20053 if x1.Op != OpAMD64MOVWloadidx1 { 20054 break 20055 } 20056 i1 := x1.AuxInt 20057 s := x1.Aux 20058 _ = x1.Args[2] 20059 idx := x1.Args[0] 20060 p := x1.Args[1] 20061 mem := x1.Args[2] 20062 sh := v.Args[1] 20063 if sh.Op != OpAMD64SHLLconst { 20064 break 20065 } 20066 if sh.AuxInt != 16 { 20067 break 20068 } 20069 r0 := sh.Args[0] 20070 if r0.Op != OpAMD64ROLWconst { 20071 break 20072 } 20073 if r0.AuxInt != 8 { 20074 break 20075 } 20076 x0 := r0.Args[0] 20077 if x0.Op != OpAMD64MOVWloadidx1 { 20078 break 20079 } 20080 i0 := x0.AuxInt 20081 if x0.Aux != s { 20082 break 20083 } 20084 _ = x0.Args[2] 20085 if idx != x0.Args[0] { 20086 break 20087 } 20088 if p != x0.Args[1] { 20089 break 20090 } 20091 if mem != x0.Args[2] { 20092 break 20093 } 20094 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20095 break 20096 } 20097 b = mergePoint(b, x0, x1) 20098 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20099 v.reset(OpCopy) 20100 v.AddArg(v0) 20101 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20102 v1.AuxInt = i0 20103 v1.Aux = s 20104 v1.AddArg(p) 20105 v1.AddArg(idx) 20106 v1.AddArg(mem) 20107 v0.AddArg(v1) 20108 return true 20109 } 20110 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 20111 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20112 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20113 for { 20114 _ = v.Args[1] 20115 sh := v.Args[0] 20116 if sh.Op != OpAMD64SHLLconst { 20117 break 20118 } 20119 if sh.AuxInt != 16 { 20120 break 20121 } 20122 r0 := sh.Args[0] 20123 if r0.Op != OpAMD64ROLWconst { 20124 break 20125 } 20126 if r0.AuxInt != 8 { 20127 break 20128 } 20129 x0 := r0.Args[0] 20130 if x0.Op != OpAMD64MOVWloadidx1 { 20131 break 20132 } 20133 i0 := x0.AuxInt 20134 s := x0.Aux 20135 _ = x0.Args[2] 20136 p := x0.Args[0] 20137 idx := x0.Args[1] 20138 mem := x0.Args[2] 20139 r1 := v.Args[1] 20140 if r1.Op != OpAMD64ROLWconst { 20141 break 20142 } 20143 if r1.AuxInt != 8 { 20144 break 20145 } 20146 x1 := r1.Args[0] 20147 if x1.Op != OpAMD64MOVWloadidx1 { 20148 break 20149 } 20150 i1 := x1.AuxInt 20151 if x1.Aux != s { 20152 break 20153 } 20154 _ = x1.Args[2] 20155 if p != x1.Args[0] { 20156 break 20157 } 20158 if idx != x1.Args[1] { 20159 break 20160 } 20161 if mem != x1.Args[2] { 20162 break 20163 } 20164 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20165 break 20166 } 20167 b = mergePoint(b, x0, x1) 20168 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20169 v.reset(OpCopy) 20170 v.AddArg(v0) 20171 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20172 v1.AuxInt = i0 20173 v1.Aux = s 20174 v1.AddArg(p) 20175 v1.AddArg(idx) 20176 v1.AddArg(mem) 20177 v0.AddArg(v1) 20178 return true 20179 } 20180 return false 20181 } 20182 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 20183 b := v.Block 20184 _ = b 20185 typ := &b.Func.Config.Types 20186 _ = typ 20187 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 20188 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20189 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20190 for { 20191 _ = v.Args[1] 20192 sh := v.Args[0] 20193 if sh.Op != OpAMD64SHLLconst { 20194 break 20195 } 20196 if sh.AuxInt != 16 { 20197 break 20198 } 20199 r0 := sh.Args[0] 20200 if r0.Op != OpAMD64ROLWconst { 20201 break 20202 } 20203 if r0.AuxInt != 8 { 20204 break 20205 } 20206 x0 := r0.Args[0] 20207 if x0.Op != OpAMD64MOVWloadidx1 { 20208 break 20209 } 20210 i0 := x0.AuxInt 20211 s := x0.Aux 20212 _ = x0.Args[2] 20213 idx := x0.Args[0] 20214 p := x0.Args[1] 20215 mem := x0.Args[2] 20216 r1 := v.Args[1] 20217 if r1.Op != OpAMD64ROLWconst { 20218 break 20219 } 20220 if r1.AuxInt != 8 { 20221 break 20222 } 20223 x1 := r1.Args[0] 20224 if x1.Op != OpAMD64MOVWloadidx1 { 20225 break 20226 } 20227 i1 := x1.AuxInt 20228 if x1.Aux != s { 20229 break 20230 } 20231 _ = x1.Args[2] 20232 if p != x1.Args[0] { 20233 break 20234 } 20235 if idx != x1.Args[1] { 20236 break 20237 } 20238 if mem != x1.Args[2] { 20239 break 20240 } 20241 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20242 break 20243 } 20244 b = mergePoint(b, x0, x1) 20245 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20246 v.reset(OpCopy) 20247 v.AddArg(v0) 20248 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20249 v1.AuxInt = i0 20250 v1.Aux = s 20251 v1.AddArg(p) 20252 v1.AddArg(idx) 20253 v1.AddArg(mem) 20254 v0.AddArg(v1) 20255 return true 20256 } 20257 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 20258 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20259 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20260 for { 20261 _ = v.Args[1] 20262 sh := v.Args[0] 20263 if sh.Op != OpAMD64SHLLconst { 20264 break 20265 } 20266 if sh.AuxInt != 16 { 20267 break 20268 } 20269 r0 := sh.Args[0] 20270 if r0.Op != OpAMD64ROLWconst { 20271 break 20272 } 20273 if r0.AuxInt != 8 { 20274 break 20275 } 20276 x0 := r0.Args[0] 20277 if x0.Op != OpAMD64MOVWloadidx1 { 20278 break 20279 } 20280 i0 := x0.AuxInt 20281 s := x0.Aux 20282 _ = x0.Args[2] 20283 p := x0.Args[0] 20284 idx := x0.Args[1] 20285 mem := x0.Args[2] 20286 r1 := v.Args[1] 20287 if r1.Op != OpAMD64ROLWconst { 20288 break 20289 } 20290 if r1.AuxInt != 8 { 20291 break 20292 } 20293 x1 := r1.Args[0] 20294 if x1.Op != OpAMD64MOVWloadidx1 { 20295 break 20296 } 20297 i1 := x1.AuxInt 20298 if x1.Aux != s { 20299 break 20300 } 20301 _ = x1.Args[2] 20302 if idx != x1.Args[0] { 20303 break 20304 } 20305 if p != x1.Args[1] { 20306 break 20307 } 20308 if mem != x1.Args[2] { 20309 break 20310 } 20311 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20312 break 20313 } 20314 b = mergePoint(b, x0, x1) 20315 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20316 v.reset(OpCopy) 20317 v.AddArg(v0) 20318 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20319 v1.AuxInt = i0 20320 v1.Aux = s 20321 v1.AddArg(p) 20322 v1.AddArg(idx) 20323 v1.AddArg(mem) 20324 v0.AddArg(v1) 20325 return true 20326 } 20327 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 20328 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20329 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20330 for { 20331 _ = v.Args[1] 20332 sh := v.Args[0] 20333 if sh.Op != OpAMD64SHLLconst { 20334 break 20335 } 20336 if sh.AuxInt != 16 { 20337 break 20338 } 20339 r0 := sh.Args[0] 20340 if r0.Op != OpAMD64ROLWconst { 20341 break 20342 } 20343 if r0.AuxInt != 8 { 20344 break 20345 } 20346 x0 := r0.Args[0] 20347 if x0.Op != OpAMD64MOVWloadidx1 { 20348 break 20349 } 20350 i0 := x0.AuxInt 20351 s := x0.Aux 20352 _ = x0.Args[2] 20353 idx := x0.Args[0] 20354 p := x0.Args[1] 20355 mem := x0.Args[2] 20356 r1 := v.Args[1] 20357 if r1.Op != OpAMD64ROLWconst { 20358 break 20359 } 20360 if r1.AuxInt != 8 { 20361 break 20362 } 20363 x1 := r1.Args[0] 20364 if x1.Op != OpAMD64MOVWloadidx1 { 20365 break 20366 } 20367 i1 := x1.AuxInt 20368 if x1.Aux != s { 20369 break 20370 } 20371 _ = x1.Args[2] 20372 if idx != x1.Args[0] { 20373 break 20374 } 20375 if p != x1.Args[1] { 20376 break 20377 } 20378 if mem != x1.Args[2] { 20379 break 20380 } 20381 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20382 break 20383 } 20384 b = mergePoint(b, x0, x1) 20385 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20386 v.reset(OpCopy) 20387 v.AddArg(v0) 20388 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20389 v1.AuxInt = i0 20390 v1.Aux = s 20391 v1.AddArg(p) 20392 v1.AddArg(idx) 20393 v1.AddArg(mem) 20394 v0.AddArg(v1) 20395 return true 20396 } 20397 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 20398 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20399 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20400 for { 20401 _ = v.Args[1] 20402 s0 := v.Args[0] 20403 if s0.Op != OpAMD64SHLLconst { 20404 break 20405 } 20406 j0 := s0.AuxInt 20407 x0 := s0.Args[0] 20408 if x0.Op != OpAMD64MOVBloadidx1 { 20409 break 20410 } 20411 i0 := x0.AuxInt 20412 s := x0.Aux 20413 _ = x0.Args[2] 20414 p := x0.Args[0] 20415 idx := x0.Args[1] 20416 mem := x0.Args[2] 20417 or := v.Args[1] 20418 if or.Op != OpAMD64ORL { 20419 break 20420 } 20421 _ = or.Args[1] 20422 s1 := or.Args[0] 20423 if s1.Op != OpAMD64SHLLconst { 20424 break 20425 } 20426 j1 := s1.AuxInt 20427 x1 := s1.Args[0] 20428 if x1.Op != OpAMD64MOVBloadidx1 { 20429 break 20430 } 20431 i1 := x1.AuxInt 20432 if x1.Aux != s { 20433 break 20434 } 20435 _ = x1.Args[2] 20436 if p != x1.Args[0] { 20437 break 20438 } 20439 if idx != x1.Args[1] { 20440 break 20441 } 20442 if mem != x1.Args[2] { 20443 break 20444 } 20445 y := or.Args[1] 20446 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20447 break 20448 } 20449 b = mergePoint(b, x0, x1) 20450 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20451 v.reset(OpCopy) 20452 v.AddArg(v0) 20453 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20454 v1.AuxInt = j1 20455 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20456 v2.AuxInt = 8 20457 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20458 v3.AuxInt = i0 20459 v3.Aux = s 20460 v3.AddArg(p) 20461 v3.AddArg(idx) 20462 v3.AddArg(mem) 20463 v2.AddArg(v3) 20464 v1.AddArg(v2) 20465 v0.AddArg(v1) 20466 v0.AddArg(y) 20467 return true 20468 } 20469 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 20470 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20471 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20472 for { 20473 _ = v.Args[1] 20474 s0 := v.Args[0] 20475 if s0.Op != OpAMD64SHLLconst { 20476 break 20477 } 20478 j0 := s0.AuxInt 20479 x0 := s0.Args[0] 20480 if x0.Op != OpAMD64MOVBloadidx1 { 20481 break 20482 } 20483 i0 := x0.AuxInt 20484 s := x0.Aux 20485 _ = x0.Args[2] 20486 idx := x0.Args[0] 20487 p := x0.Args[1] 20488 mem := x0.Args[2] 20489 or := v.Args[1] 20490 if or.Op != OpAMD64ORL { 20491 break 20492 } 20493 _ = or.Args[1] 20494 s1 := or.Args[0] 20495 if s1.Op != OpAMD64SHLLconst { 20496 break 20497 } 20498 j1 := s1.AuxInt 20499 x1 := s1.Args[0] 20500 if x1.Op != OpAMD64MOVBloadidx1 { 20501 break 20502 } 20503 i1 := x1.AuxInt 20504 if x1.Aux != s { 20505 break 20506 } 20507 _ = x1.Args[2] 20508 if p != x1.Args[0] { 20509 break 20510 } 20511 if idx != x1.Args[1] { 20512 break 20513 } 20514 if mem != x1.Args[2] { 20515 break 20516 } 20517 y := or.Args[1] 20518 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20519 break 20520 } 20521 b = mergePoint(b, x0, x1) 20522 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20523 v.reset(OpCopy) 20524 v.AddArg(v0) 20525 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20526 v1.AuxInt = j1 20527 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20528 v2.AuxInt = 8 20529 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20530 v3.AuxInt = i0 20531 v3.Aux = s 20532 v3.AddArg(p) 20533 v3.AddArg(idx) 20534 v3.AddArg(mem) 20535 v2.AddArg(v3) 20536 v1.AddArg(v2) 20537 v0.AddArg(v1) 20538 v0.AddArg(y) 20539 return true 20540 } 20541 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 20542 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20543 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20544 for { 20545 _ = v.Args[1] 20546 s0 := v.Args[0] 20547 if s0.Op != OpAMD64SHLLconst { 20548 break 20549 } 20550 j0 := s0.AuxInt 20551 x0 := s0.Args[0] 20552 if x0.Op != OpAMD64MOVBloadidx1 { 20553 break 20554 } 20555 i0 := x0.AuxInt 20556 s := x0.Aux 20557 _ = x0.Args[2] 20558 p := x0.Args[0] 20559 idx := x0.Args[1] 20560 mem := x0.Args[2] 20561 or := v.Args[1] 20562 if or.Op != OpAMD64ORL { 20563 break 20564 } 20565 _ = or.Args[1] 20566 s1 := or.Args[0] 20567 if s1.Op != OpAMD64SHLLconst { 20568 break 20569 } 20570 j1 := s1.AuxInt 20571 x1 := s1.Args[0] 20572 if x1.Op != OpAMD64MOVBloadidx1 { 20573 break 20574 } 20575 i1 := x1.AuxInt 20576 if x1.Aux != s { 20577 break 20578 } 20579 _ = x1.Args[2] 20580 if idx != x1.Args[0] { 20581 break 20582 } 20583 if p != x1.Args[1] { 20584 break 20585 } 20586 if mem != x1.Args[2] { 20587 break 20588 } 20589 y := or.Args[1] 20590 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20591 break 20592 } 20593 b = mergePoint(b, x0, x1) 20594 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20595 v.reset(OpCopy) 20596 v.AddArg(v0) 20597 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20598 v1.AuxInt = j1 20599 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20600 v2.AuxInt = 8 20601 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20602 v3.AuxInt = i0 20603 v3.Aux = s 20604 v3.AddArg(p) 20605 v3.AddArg(idx) 20606 v3.AddArg(mem) 20607 v2.AddArg(v3) 20608 v1.AddArg(v2) 20609 v0.AddArg(v1) 20610 v0.AddArg(y) 20611 return true 20612 } 20613 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 20614 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20615 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20616 for { 20617 _ = v.Args[1] 20618 s0 := v.Args[0] 20619 if s0.Op != OpAMD64SHLLconst { 20620 break 20621 } 20622 j0 := s0.AuxInt 20623 x0 := s0.Args[0] 20624 if x0.Op != OpAMD64MOVBloadidx1 { 20625 break 20626 } 20627 i0 := x0.AuxInt 20628 s := x0.Aux 20629 _ = x0.Args[2] 20630 idx := x0.Args[0] 20631 p := x0.Args[1] 20632 mem := x0.Args[2] 20633 or := v.Args[1] 20634 if or.Op != OpAMD64ORL { 20635 break 20636 } 20637 _ = or.Args[1] 20638 s1 := or.Args[0] 20639 if s1.Op != OpAMD64SHLLconst { 20640 break 20641 } 20642 j1 := s1.AuxInt 20643 x1 := s1.Args[0] 20644 if x1.Op != OpAMD64MOVBloadidx1 { 20645 break 20646 } 20647 i1 := x1.AuxInt 20648 if x1.Aux != s { 20649 break 20650 } 20651 _ = x1.Args[2] 20652 if idx != x1.Args[0] { 20653 break 20654 } 20655 if p != x1.Args[1] { 20656 break 20657 } 20658 if mem != x1.Args[2] { 20659 break 20660 } 20661 y := or.Args[1] 20662 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20663 break 20664 } 20665 b = mergePoint(b, x0, x1) 20666 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20667 v.reset(OpCopy) 20668 v.AddArg(v0) 20669 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20670 v1.AuxInt = j1 20671 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20672 v2.AuxInt = 8 20673 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20674 v3.AuxInt = i0 20675 v3.Aux = s 20676 v3.AddArg(p) 20677 v3.AddArg(idx) 20678 v3.AddArg(mem) 20679 v2.AddArg(v3) 20680 v1.AddArg(v2) 20681 v0.AddArg(v1) 20682 v0.AddArg(y) 20683 return true 20684 } 20685 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 20686 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20687 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20688 for { 20689 _ = v.Args[1] 20690 s0 := v.Args[0] 20691 if s0.Op != OpAMD64SHLLconst { 20692 break 20693 } 20694 j0 := s0.AuxInt 20695 x0 := s0.Args[0] 20696 if x0.Op != OpAMD64MOVBloadidx1 { 20697 break 20698 } 20699 i0 := x0.AuxInt 20700 s := x0.Aux 20701 _ = x0.Args[2] 20702 p := x0.Args[0] 20703 idx := x0.Args[1] 20704 mem := x0.Args[2] 20705 or := v.Args[1] 20706 if or.Op != OpAMD64ORL { 20707 break 20708 } 20709 _ = or.Args[1] 20710 y := or.Args[0] 20711 s1 := or.Args[1] 20712 if s1.Op != OpAMD64SHLLconst { 20713 break 20714 } 20715 j1 := s1.AuxInt 20716 x1 := s1.Args[0] 20717 if x1.Op != OpAMD64MOVBloadidx1 { 20718 break 20719 } 20720 i1 := x1.AuxInt 20721 if x1.Aux != s { 20722 break 20723 } 20724 _ = x1.Args[2] 20725 if p != x1.Args[0] { 20726 break 20727 } 20728 if idx != x1.Args[1] { 20729 break 20730 } 20731 if mem != x1.Args[2] { 20732 break 20733 } 20734 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20735 break 20736 } 20737 b = mergePoint(b, x0, x1) 20738 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20739 v.reset(OpCopy) 20740 v.AddArg(v0) 20741 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20742 v1.AuxInt = j1 20743 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20744 v2.AuxInt = 8 20745 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20746 v3.AuxInt = i0 20747 v3.Aux = s 20748 v3.AddArg(p) 20749 v3.AddArg(idx) 20750 v3.AddArg(mem) 20751 v2.AddArg(v3) 20752 v1.AddArg(v2) 20753 v0.AddArg(v1) 20754 v0.AddArg(y) 20755 return true 20756 } 20757 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 20758 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20759 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20760 for { 20761 _ = v.Args[1] 20762 s0 := v.Args[0] 20763 if s0.Op != OpAMD64SHLLconst { 20764 break 20765 } 20766 j0 := s0.AuxInt 20767 x0 := s0.Args[0] 20768 if x0.Op != OpAMD64MOVBloadidx1 { 20769 break 20770 } 20771 i0 := x0.AuxInt 20772 s := x0.Aux 20773 _ = x0.Args[2] 20774 idx := x0.Args[0] 20775 p := x0.Args[1] 20776 mem := x0.Args[2] 20777 or := v.Args[1] 20778 if or.Op != OpAMD64ORL { 20779 break 20780 } 20781 _ = or.Args[1] 20782 y := or.Args[0] 20783 s1 := or.Args[1] 20784 if s1.Op != OpAMD64SHLLconst { 20785 break 20786 } 20787 j1 := s1.AuxInt 20788 x1 := s1.Args[0] 20789 if x1.Op != OpAMD64MOVBloadidx1 { 20790 break 20791 } 20792 i1 := x1.AuxInt 20793 if x1.Aux != s { 20794 break 20795 } 20796 _ = x1.Args[2] 20797 if p != x1.Args[0] { 20798 break 20799 } 20800 if idx != x1.Args[1] { 20801 break 20802 } 20803 if mem != x1.Args[2] { 20804 break 20805 } 20806 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20807 break 20808 } 20809 b = mergePoint(b, x0, x1) 20810 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20811 v.reset(OpCopy) 20812 v.AddArg(v0) 20813 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20814 v1.AuxInt = j1 20815 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20816 v2.AuxInt = 8 20817 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20818 v3.AuxInt = i0 20819 v3.Aux = s 20820 v3.AddArg(p) 20821 v3.AddArg(idx) 20822 v3.AddArg(mem) 20823 v2.AddArg(v3) 20824 v1.AddArg(v2) 20825 v0.AddArg(v1) 20826 v0.AddArg(y) 20827 return true 20828 } 20829 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 20830 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20831 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20832 for { 20833 _ = v.Args[1] 20834 s0 := v.Args[0] 20835 if s0.Op != OpAMD64SHLLconst { 20836 break 20837 } 20838 j0 := s0.AuxInt 20839 x0 := s0.Args[0] 20840 if x0.Op != OpAMD64MOVBloadidx1 { 20841 break 20842 } 20843 i0 := x0.AuxInt 20844 s := x0.Aux 20845 _ = x0.Args[2] 20846 p := x0.Args[0] 20847 idx := x0.Args[1] 20848 mem := x0.Args[2] 20849 or := v.Args[1] 20850 if or.Op != OpAMD64ORL { 20851 break 20852 } 20853 _ = or.Args[1] 20854 y := or.Args[0] 20855 s1 := or.Args[1] 20856 if s1.Op != OpAMD64SHLLconst { 20857 break 20858 } 20859 j1 := s1.AuxInt 20860 x1 := s1.Args[0] 20861 if x1.Op != OpAMD64MOVBloadidx1 { 20862 break 20863 } 20864 i1 := x1.AuxInt 20865 if x1.Aux != s { 20866 break 20867 } 20868 _ = x1.Args[2] 20869 if idx != x1.Args[0] { 20870 break 20871 } 20872 if p != x1.Args[1] { 20873 break 20874 } 20875 if mem != x1.Args[2] { 20876 break 20877 } 20878 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20879 break 20880 } 20881 b = mergePoint(b, x0, x1) 20882 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20883 v.reset(OpCopy) 20884 v.AddArg(v0) 20885 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20886 v1.AuxInt = j1 20887 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20888 v2.AuxInt = 8 20889 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20890 v3.AuxInt = i0 20891 v3.Aux = s 20892 v3.AddArg(p) 20893 v3.AddArg(idx) 20894 v3.AddArg(mem) 20895 v2.AddArg(v3) 20896 v1.AddArg(v2) 20897 v0.AddArg(v1) 20898 v0.AddArg(y) 20899 return true 20900 } 20901 return false 20902 } 20903 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 20904 b := v.Block 20905 _ = b 20906 typ := &b.Func.Config.Types 20907 _ = typ 20908 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 20909 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20910 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20911 for { 20912 _ = v.Args[1] 20913 s0 := v.Args[0] 20914 if s0.Op != OpAMD64SHLLconst { 20915 break 20916 } 20917 j0 := s0.AuxInt 20918 x0 := s0.Args[0] 20919 if x0.Op != OpAMD64MOVBloadidx1 { 20920 break 20921 } 20922 i0 := x0.AuxInt 20923 s := x0.Aux 20924 _ = x0.Args[2] 20925 idx := x0.Args[0] 20926 p := x0.Args[1] 20927 mem := x0.Args[2] 20928 or := v.Args[1] 20929 if or.Op != OpAMD64ORL { 20930 break 20931 } 20932 _ = or.Args[1] 20933 y := or.Args[0] 20934 s1 := or.Args[1] 20935 if s1.Op != OpAMD64SHLLconst { 20936 break 20937 } 20938 j1 := s1.AuxInt 20939 x1 := s1.Args[0] 20940 if x1.Op != OpAMD64MOVBloadidx1 { 20941 break 20942 } 20943 i1 := x1.AuxInt 20944 if x1.Aux != s { 20945 break 20946 } 20947 _ = x1.Args[2] 20948 if idx != x1.Args[0] { 20949 break 20950 } 20951 if p != x1.Args[1] { 20952 break 20953 } 20954 if mem != x1.Args[2] { 20955 break 20956 } 20957 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20958 break 20959 } 20960 b = mergePoint(b, x0, x1) 20961 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20962 v.reset(OpCopy) 20963 v.AddArg(v0) 20964 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20965 v1.AuxInt = j1 20966 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20967 v2.AuxInt = 8 20968 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20969 v3.AuxInt = i0 20970 v3.Aux = s 20971 v3.AddArg(p) 20972 v3.AddArg(idx) 20973 v3.AddArg(mem) 20974 v2.AddArg(v3) 20975 v1.AddArg(v2) 20976 v0.AddArg(v1) 20977 v0.AddArg(y) 20978 return true 20979 } 20980 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 20981 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20982 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20983 for { 20984 _ = v.Args[1] 20985 or := v.Args[0] 20986 if or.Op != OpAMD64ORL { 20987 break 20988 } 20989 _ = or.Args[1] 20990 s1 := or.Args[0] 20991 if s1.Op != OpAMD64SHLLconst { 20992 break 20993 } 20994 j1 := s1.AuxInt 20995 x1 := s1.Args[0] 20996 if x1.Op != OpAMD64MOVBloadidx1 { 20997 break 20998 } 20999 i1 := x1.AuxInt 21000 s := x1.Aux 21001 _ = x1.Args[2] 21002 p := x1.Args[0] 21003 idx := x1.Args[1] 21004 mem := x1.Args[2] 21005 y := or.Args[1] 21006 s0 := v.Args[1] 21007 if s0.Op != OpAMD64SHLLconst { 21008 break 21009 } 21010 j0 := s0.AuxInt 21011 x0 := s0.Args[0] 21012 if x0.Op != OpAMD64MOVBloadidx1 { 21013 break 21014 } 21015 i0 := x0.AuxInt 21016 if x0.Aux != s { 21017 break 21018 } 21019 _ = x0.Args[2] 21020 if p != x0.Args[0] { 21021 break 21022 } 21023 if idx != x0.Args[1] { 21024 break 21025 } 21026 if mem != x0.Args[2] { 21027 break 21028 } 21029 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21030 break 21031 } 21032 b = mergePoint(b, x0, x1) 21033 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21034 v.reset(OpCopy) 21035 v.AddArg(v0) 21036 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21037 v1.AuxInt = j1 21038 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21039 v2.AuxInt = 8 21040 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21041 v3.AuxInt = i0 21042 v3.Aux = s 21043 v3.AddArg(p) 21044 v3.AddArg(idx) 21045 v3.AddArg(mem) 21046 v2.AddArg(v3) 21047 v1.AddArg(v2) 21048 v0.AddArg(v1) 21049 v0.AddArg(y) 21050 return true 21051 } 21052 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21053 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21054 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21055 for { 21056 _ = v.Args[1] 21057 or := v.Args[0] 21058 if or.Op != OpAMD64ORL { 21059 break 21060 } 21061 _ = or.Args[1] 21062 s1 := or.Args[0] 21063 if s1.Op != OpAMD64SHLLconst { 21064 break 21065 } 21066 j1 := s1.AuxInt 21067 x1 := s1.Args[0] 21068 if x1.Op != OpAMD64MOVBloadidx1 { 21069 break 21070 } 21071 i1 := x1.AuxInt 21072 s := x1.Aux 21073 _ = x1.Args[2] 21074 idx := x1.Args[0] 21075 p := x1.Args[1] 21076 mem := x1.Args[2] 21077 y := or.Args[1] 21078 s0 := v.Args[1] 21079 if s0.Op != OpAMD64SHLLconst { 21080 break 21081 } 21082 j0 := s0.AuxInt 21083 x0 := s0.Args[0] 21084 if x0.Op != OpAMD64MOVBloadidx1 { 21085 break 21086 } 21087 i0 := x0.AuxInt 21088 if x0.Aux != s { 21089 break 21090 } 21091 _ = x0.Args[2] 21092 if p != x0.Args[0] { 21093 break 21094 } 21095 if idx != x0.Args[1] { 21096 break 21097 } 21098 if mem != x0.Args[2] { 21099 break 21100 } 21101 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21102 break 21103 } 21104 b = mergePoint(b, x0, x1) 21105 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21106 v.reset(OpCopy) 21107 v.AddArg(v0) 21108 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21109 v1.AuxInt = j1 21110 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21111 v2.AuxInt = 8 21112 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21113 v3.AuxInt = i0 21114 v3.Aux = s 21115 v3.AddArg(p) 21116 v3.AddArg(idx) 21117 v3.AddArg(mem) 21118 v2.AddArg(v3) 21119 v1.AddArg(v2) 21120 v0.AddArg(v1) 21121 v0.AddArg(y) 21122 return true 21123 } 21124 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21125 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21126 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21127 for { 21128 _ = v.Args[1] 21129 or := v.Args[0] 21130 if or.Op != OpAMD64ORL { 21131 break 21132 } 21133 _ = or.Args[1] 21134 y := or.Args[0] 21135 s1 := or.Args[1] 21136 if s1.Op != OpAMD64SHLLconst { 21137 break 21138 } 21139 j1 := s1.AuxInt 21140 x1 := s1.Args[0] 21141 if x1.Op != OpAMD64MOVBloadidx1 { 21142 break 21143 } 21144 i1 := x1.AuxInt 21145 s := x1.Aux 21146 _ = x1.Args[2] 21147 p := x1.Args[0] 21148 idx := x1.Args[1] 21149 mem := x1.Args[2] 21150 s0 := v.Args[1] 21151 if s0.Op != OpAMD64SHLLconst { 21152 break 21153 } 21154 j0 := s0.AuxInt 21155 x0 := s0.Args[0] 21156 if x0.Op != OpAMD64MOVBloadidx1 { 21157 break 21158 } 21159 i0 := x0.AuxInt 21160 if x0.Aux != s { 21161 break 21162 } 21163 _ = x0.Args[2] 21164 if p != x0.Args[0] { 21165 break 21166 } 21167 if idx != x0.Args[1] { 21168 break 21169 } 21170 if mem != x0.Args[2] { 21171 break 21172 } 21173 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21174 break 21175 } 21176 b = mergePoint(b, x0, x1) 21177 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21178 v.reset(OpCopy) 21179 v.AddArg(v0) 21180 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21181 v1.AuxInt = j1 21182 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21183 v2.AuxInt = 8 21184 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21185 v3.AuxInt = i0 21186 v3.Aux = s 21187 v3.AddArg(p) 21188 v3.AddArg(idx) 21189 v3.AddArg(mem) 21190 v2.AddArg(v3) 21191 v1.AddArg(v2) 21192 v0.AddArg(v1) 21193 v0.AddArg(y) 21194 return true 21195 } 21196 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21197 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21198 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21199 for { 21200 _ = v.Args[1] 21201 or := v.Args[0] 21202 if or.Op != OpAMD64ORL { 21203 break 21204 } 21205 _ = or.Args[1] 21206 y := or.Args[0] 21207 s1 := or.Args[1] 21208 if s1.Op != OpAMD64SHLLconst { 21209 break 21210 } 21211 j1 := s1.AuxInt 21212 x1 := s1.Args[0] 21213 if x1.Op != OpAMD64MOVBloadidx1 { 21214 break 21215 } 21216 i1 := x1.AuxInt 21217 s := x1.Aux 21218 _ = x1.Args[2] 21219 idx := x1.Args[0] 21220 p := x1.Args[1] 21221 mem := x1.Args[2] 21222 s0 := v.Args[1] 21223 if s0.Op != OpAMD64SHLLconst { 21224 break 21225 } 21226 j0 := s0.AuxInt 21227 x0 := s0.Args[0] 21228 if x0.Op != OpAMD64MOVBloadidx1 { 21229 break 21230 } 21231 i0 := x0.AuxInt 21232 if x0.Aux != s { 21233 break 21234 } 21235 _ = x0.Args[2] 21236 if p != x0.Args[0] { 21237 break 21238 } 21239 if idx != x0.Args[1] { 21240 break 21241 } 21242 if mem != x0.Args[2] { 21243 break 21244 } 21245 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21246 break 21247 } 21248 b = mergePoint(b, x0, x1) 21249 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21250 v.reset(OpCopy) 21251 v.AddArg(v0) 21252 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21253 v1.AuxInt = j1 21254 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21255 v2.AuxInt = 8 21256 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21257 v3.AuxInt = i0 21258 v3.Aux = s 21259 v3.AddArg(p) 21260 v3.AddArg(idx) 21261 v3.AddArg(mem) 21262 v2.AddArg(v3) 21263 v1.AddArg(v2) 21264 v0.AddArg(v1) 21265 v0.AddArg(y) 21266 return true 21267 } 21268 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21269 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21270 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21271 for { 21272 _ = v.Args[1] 21273 or := v.Args[0] 21274 if or.Op != OpAMD64ORL { 21275 break 21276 } 21277 _ = or.Args[1] 21278 s1 := or.Args[0] 21279 if s1.Op != OpAMD64SHLLconst { 21280 break 21281 } 21282 j1 := s1.AuxInt 21283 x1 := s1.Args[0] 21284 if x1.Op != OpAMD64MOVBloadidx1 { 21285 break 21286 } 21287 i1 := x1.AuxInt 21288 s := x1.Aux 21289 _ = x1.Args[2] 21290 p := x1.Args[0] 21291 idx := x1.Args[1] 21292 mem := x1.Args[2] 21293 y := or.Args[1] 21294 s0 := v.Args[1] 21295 if s0.Op != OpAMD64SHLLconst { 21296 break 21297 } 21298 j0 := s0.AuxInt 21299 x0 := s0.Args[0] 21300 if x0.Op != OpAMD64MOVBloadidx1 { 21301 break 21302 } 21303 i0 := x0.AuxInt 21304 if x0.Aux != s { 21305 break 21306 } 21307 _ = x0.Args[2] 21308 if idx != x0.Args[0] { 21309 break 21310 } 21311 if p != x0.Args[1] { 21312 break 21313 } 21314 if mem != x0.Args[2] { 21315 break 21316 } 21317 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21318 break 21319 } 21320 b = mergePoint(b, x0, x1) 21321 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21322 v.reset(OpCopy) 21323 v.AddArg(v0) 21324 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21325 v1.AuxInt = j1 21326 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21327 v2.AuxInt = 8 21328 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21329 v3.AuxInt = i0 21330 v3.Aux = s 21331 v3.AddArg(p) 21332 v3.AddArg(idx) 21333 v3.AddArg(mem) 21334 v2.AddArg(v3) 21335 v1.AddArg(v2) 21336 v0.AddArg(v1) 21337 v0.AddArg(y) 21338 return true 21339 } 21340 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21341 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21342 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21343 for { 21344 _ = v.Args[1] 21345 or := v.Args[0] 21346 if or.Op != OpAMD64ORL { 21347 break 21348 } 21349 _ = or.Args[1] 21350 s1 := or.Args[0] 21351 if s1.Op != OpAMD64SHLLconst { 21352 break 21353 } 21354 j1 := s1.AuxInt 21355 x1 := s1.Args[0] 21356 if x1.Op != OpAMD64MOVBloadidx1 { 21357 break 21358 } 21359 i1 := x1.AuxInt 21360 s := x1.Aux 21361 _ = x1.Args[2] 21362 idx := x1.Args[0] 21363 p := x1.Args[1] 21364 mem := x1.Args[2] 21365 y := or.Args[1] 21366 s0 := v.Args[1] 21367 if s0.Op != OpAMD64SHLLconst { 21368 break 21369 } 21370 j0 := s0.AuxInt 21371 x0 := s0.Args[0] 21372 if x0.Op != OpAMD64MOVBloadidx1 { 21373 break 21374 } 21375 i0 := x0.AuxInt 21376 if x0.Aux != s { 21377 break 21378 } 21379 _ = x0.Args[2] 21380 if idx != x0.Args[0] { 21381 break 21382 } 21383 if p != x0.Args[1] { 21384 break 21385 } 21386 if mem != x0.Args[2] { 21387 break 21388 } 21389 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21390 break 21391 } 21392 b = mergePoint(b, x0, x1) 21393 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21394 v.reset(OpCopy) 21395 v.AddArg(v0) 21396 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21397 v1.AuxInt = j1 21398 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21399 v2.AuxInt = 8 21400 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21401 v3.AuxInt = i0 21402 v3.Aux = s 21403 v3.AddArg(p) 21404 v3.AddArg(idx) 21405 v3.AddArg(mem) 21406 v2.AddArg(v3) 21407 v1.AddArg(v2) 21408 v0.AddArg(v1) 21409 v0.AddArg(y) 21410 return true 21411 } 21412 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21413 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21414 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21415 for { 21416 _ = v.Args[1] 21417 or := v.Args[0] 21418 if or.Op != OpAMD64ORL { 21419 break 21420 } 21421 _ = or.Args[1] 21422 y := or.Args[0] 21423 s1 := or.Args[1] 21424 if s1.Op != OpAMD64SHLLconst { 21425 break 21426 } 21427 j1 := s1.AuxInt 21428 x1 := s1.Args[0] 21429 if x1.Op != OpAMD64MOVBloadidx1 { 21430 break 21431 } 21432 i1 := x1.AuxInt 21433 s := x1.Aux 21434 _ = x1.Args[2] 21435 p := x1.Args[0] 21436 idx := x1.Args[1] 21437 mem := x1.Args[2] 21438 s0 := v.Args[1] 21439 if s0.Op != OpAMD64SHLLconst { 21440 break 21441 } 21442 j0 := s0.AuxInt 21443 x0 := s0.Args[0] 21444 if x0.Op != OpAMD64MOVBloadidx1 { 21445 break 21446 } 21447 i0 := x0.AuxInt 21448 if x0.Aux != s { 21449 break 21450 } 21451 _ = x0.Args[2] 21452 if idx != x0.Args[0] { 21453 break 21454 } 21455 if p != x0.Args[1] { 21456 break 21457 } 21458 if mem != x0.Args[2] { 21459 break 21460 } 21461 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21462 break 21463 } 21464 b = mergePoint(b, x0, x1) 21465 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21466 v.reset(OpCopy) 21467 v.AddArg(v0) 21468 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21469 v1.AuxInt = j1 21470 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21471 v2.AuxInt = 8 21472 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21473 v3.AuxInt = i0 21474 v3.Aux = s 21475 v3.AddArg(p) 21476 v3.AddArg(idx) 21477 v3.AddArg(mem) 21478 v2.AddArg(v3) 21479 v1.AddArg(v2) 21480 v0.AddArg(v1) 21481 v0.AddArg(y) 21482 return true 21483 } 21484 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21485 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21486 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21487 for { 21488 _ = v.Args[1] 21489 or := v.Args[0] 21490 if or.Op != OpAMD64ORL { 21491 break 21492 } 21493 _ = or.Args[1] 21494 y := or.Args[0] 21495 s1 := or.Args[1] 21496 if s1.Op != OpAMD64SHLLconst { 21497 break 21498 } 21499 j1 := s1.AuxInt 21500 x1 := s1.Args[0] 21501 if x1.Op != OpAMD64MOVBloadidx1 { 21502 break 21503 } 21504 i1 := x1.AuxInt 21505 s := x1.Aux 21506 _ = x1.Args[2] 21507 idx := x1.Args[0] 21508 p := x1.Args[1] 21509 mem := x1.Args[2] 21510 s0 := v.Args[1] 21511 if s0.Op != OpAMD64SHLLconst { 21512 break 21513 } 21514 j0 := s0.AuxInt 21515 x0 := s0.Args[0] 21516 if x0.Op != OpAMD64MOVBloadidx1 { 21517 break 21518 } 21519 i0 := x0.AuxInt 21520 if x0.Aux != s { 21521 break 21522 } 21523 _ = x0.Args[2] 21524 if idx != x0.Args[0] { 21525 break 21526 } 21527 if p != x0.Args[1] { 21528 break 21529 } 21530 if mem != x0.Args[2] { 21531 break 21532 } 21533 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21534 break 21535 } 21536 b = mergePoint(b, x0, x1) 21537 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21538 v.reset(OpCopy) 21539 v.AddArg(v0) 21540 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21541 v1.AuxInt = j1 21542 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21543 v2.AuxInt = 8 21544 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21545 v3.AuxInt = i0 21546 v3.Aux = s 21547 v3.AddArg(p) 21548 v3.AddArg(idx) 21549 v3.AddArg(mem) 21550 v2.AddArg(v3) 21551 v1.AddArg(v2) 21552 v0.AddArg(v1) 21553 v0.AddArg(y) 21554 return true 21555 } 21556 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 21557 // cond: canMergeLoad(v, l, x) && clobber(l) 21558 // result: (ORLmem x [off] {sym} ptr mem) 21559 for { 21560 _ = v.Args[1] 21561 x := v.Args[0] 21562 l := v.Args[1] 21563 if l.Op != OpAMD64MOVLload { 21564 break 21565 } 21566 off := l.AuxInt 21567 sym := l.Aux 21568 _ = l.Args[1] 21569 ptr := l.Args[0] 21570 mem := l.Args[1] 21571 if !(canMergeLoad(v, l, x) && clobber(l)) { 21572 break 21573 } 21574 v.reset(OpAMD64ORLmem) 21575 v.AuxInt = off 21576 v.Aux = sym 21577 v.AddArg(x) 21578 v.AddArg(ptr) 21579 v.AddArg(mem) 21580 return true 21581 } 21582 return false 21583 } 21584 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 21585 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 21586 // cond: canMergeLoad(v, l, x) && clobber(l) 21587 // result: (ORLmem x [off] {sym} ptr mem) 21588 for { 21589 _ = v.Args[1] 21590 l := v.Args[0] 21591 if l.Op != OpAMD64MOVLload { 21592 break 21593 } 21594 off := l.AuxInt 21595 sym := l.Aux 21596 _ = l.Args[1] 21597 ptr := l.Args[0] 21598 mem := l.Args[1] 21599 x := v.Args[1] 21600 if !(canMergeLoad(v, l, x) && clobber(l)) { 21601 break 21602 } 21603 v.reset(OpAMD64ORLmem) 21604 v.AuxInt = off 21605 v.Aux = sym 21606 v.AddArg(x) 21607 v.AddArg(ptr) 21608 v.AddArg(mem) 21609 return true 21610 } 21611 return false 21612 } 21613 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 21614 // match: (ORLconst [c] x) 21615 // cond: int32(c)==0 21616 // result: x 21617 for { 21618 c := v.AuxInt 21619 x := v.Args[0] 21620 if !(int32(c) == 0) { 21621 break 21622 } 21623 v.reset(OpCopy) 21624 v.Type = x.Type 21625 v.AddArg(x) 21626 return true 21627 } 21628 // match: (ORLconst [c] _) 21629 // cond: int32(c)==-1 21630 // result: (MOVLconst [-1]) 21631 for { 21632 c := v.AuxInt 21633 if !(int32(c) == -1) { 21634 break 21635 } 21636 v.reset(OpAMD64MOVLconst) 21637 v.AuxInt = -1 21638 return true 21639 } 21640 // match: (ORLconst [c] (MOVLconst [d])) 21641 // cond: 21642 // result: (MOVLconst [c|d]) 21643 for { 21644 c := v.AuxInt 21645 v_0 := v.Args[0] 21646 if v_0.Op != OpAMD64MOVLconst { 21647 break 21648 } 21649 d := v_0.AuxInt 21650 v.reset(OpAMD64MOVLconst) 21651 v.AuxInt = c | d 21652 return true 21653 } 21654 return false 21655 } 21656 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 21657 // match: (ORQ x (MOVQconst [c])) 21658 // cond: is32Bit(c) 21659 // result: (ORQconst [c] x) 21660 for { 21661 _ = v.Args[1] 21662 x := v.Args[0] 21663 v_1 := v.Args[1] 21664 if v_1.Op != OpAMD64MOVQconst { 21665 break 21666 } 21667 c := v_1.AuxInt 21668 if !(is32Bit(c)) { 21669 break 21670 } 21671 v.reset(OpAMD64ORQconst) 21672 v.AuxInt = c 21673 v.AddArg(x) 21674 return true 21675 } 21676 // match: (ORQ (MOVQconst [c]) x) 21677 // cond: is32Bit(c) 21678 // result: (ORQconst [c] x) 21679 for { 21680 _ = v.Args[1] 21681 v_0 := v.Args[0] 21682 if v_0.Op != OpAMD64MOVQconst { 21683 break 21684 } 21685 c := v_0.AuxInt 21686 x := v.Args[1] 21687 if !(is32Bit(c)) { 21688 break 21689 } 21690 v.reset(OpAMD64ORQconst) 21691 v.AuxInt = c 21692 v.AddArg(x) 21693 return true 21694 } 21695 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 21696 // cond: d==64-c 21697 // result: (ROLQconst x [c]) 21698 for { 21699 _ = v.Args[1] 21700 v_0 := v.Args[0] 21701 if v_0.Op != OpAMD64SHLQconst { 21702 break 21703 } 21704 c := v_0.AuxInt 21705 x := v_0.Args[0] 21706 v_1 := v.Args[1] 21707 if v_1.Op != OpAMD64SHRQconst { 21708 break 21709 } 21710 d := v_1.AuxInt 21711 if x != v_1.Args[0] { 21712 break 21713 } 21714 if !(d == 64-c) { 21715 break 21716 } 21717 v.reset(OpAMD64ROLQconst) 21718 v.AuxInt = c 21719 v.AddArg(x) 21720 return true 21721 } 21722 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 21723 // cond: d==64-c 21724 // result: (ROLQconst x [c]) 21725 for { 21726 _ = v.Args[1] 21727 v_0 := v.Args[0] 21728 if v_0.Op != OpAMD64SHRQconst { 21729 break 21730 } 21731 d := v_0.AuxInt 21732 x := v_0.Args[0] 21733 v_1 := v.Args[1] 21734 if v_1.Op != OpAMD64SHLQconst { 21735 break 21736 } 21737 c := v_1.AuxInt 21738 if x != v_1.Args[0] { 21739 break 21740 } 21741 if !(d == 64-c) { 21742 break 21743 } 21744 v.reset(OpAMD64ROLQconst) 21745 v.AuxInt = c 21746 v.AddArg(x) 21747 return true 21748 } 21749 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 21750 // cond: 21751 // result: (ROLQ x y) 21752 for { 21753 _ = v.Args[1] 21754 v_0 := v.Args[0] 21755 if v_0.Op != OpAMD64SHLQ { 21756 break 21757 } 21758 _ = v_0.Args[1] 21759 x := v_0.Args[0] 21760 y := v_0.Args[1] 21761 v_1 := v.Args[1] 21762 if v_1.Op != OpAMD64ANDQ { 21763 break 21764 } 21765 _ = v_1.Args[1] 21766 v_1_0 := v_1.Args[0] 21767 if v_1_0.Op != OpAMD64SHRQ { 21768 break 21769 } 21770 _ = v_1_0.Args[1] 21771 if x != v_1_0.Args[0] { 21772 break 21773 } 21774 v_1_0_1 := v_1_0.Args[1] 21775 if v_1_0_1.Op != OpAMD64NEGQ { 21776 break 21777 } 21778 if y != v_1_0_1.Args[0] { 21779 break 21780 } 21781 v_1_1 := v_1.Args[1] 21782 if v_1_1.Op != OpAMD64SBBQcarrymask { 21783 break 21784 } 21785 v_1_1_0 := v_1_1.Args[0] 21786 if v_1_1_0.Op != OpAMD64CMPQconst { 21787 break 21788 } 21789 if v_1_1_0.AuxInt != 64 { 21790 break 21791 } 21792 v_1_1_0_0 := v_1_1_0.Args[0] 21793 if v_1_1_0_0.Op != OpAMD64NEGQ { 21794 break 21795 } 21796 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 21797 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 21798 break 21799 } 21800 if v_1_1_0_0_0.AuxInt != -64 { 21801 break 21802 } 21803 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 21804 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 21805 break 21806 } 21807 if v_1_1_0_0_0_0.AuxInt != 63 { 21808 break 21809 } 21810 if y != v_1_1_0_0_0_0.Args[0] { 21811 break 21812 } 21813 v.reset(OpAMD64ROLQ) 21814 v.AddArg(x) 21815 v.AddArg(y) 21816 return true 21817 } 21818 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 21819 // cond: 21820 // result: (ROLQ x y) 21821 for { 21822 _ = v.Args[1] 21823 v_0 := v.Args[0] 21824 if v_0.Op != OpAMD64SHLQ { 21825 break 21826 } 21827 _ = v_0.Args[1] 21828 x := v_0.Args[0] 21829 y := v_0.Args[1] 21830 v_1 := v.Args[1] 21831 if v_1.Op != OpAMD64ANDQ { 21832 break 21833 } 21834 _ = v_1.Args[1] 21835 v_1_0 := v_1.Args[0] 21836 if v_1_0.Op != OpAMD64SBBQcarrymask { 21837 break 21838 } 21839 v_1_0_0 := v_1_0.Args[0] 21840 if v_1_0_0.Op != OpAMD64CMPQconst { 21841 break 21842 } 21843 if v_1_0_0.AuxInt != 64 { 21844 break 21845 } 21846 v_1_0_0_0 := v_1_0_0.Args[0] 21847 if v_1_0_0_0.Op != OpAMD64NEGQ { 21848 break 21849 } 21850 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 21851 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 21852 break 21853 } 21854 if v_1_0_0_0_0.AuxInt != -64 { 21855 break 21856 } 21857 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 21858 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 21859 break 21860 } 21861 if v_1_0_0_0_0_0.AuxInt != 63 { 21862 break 21863 } 21864 if y != v_1_0_0_0_0_0.Args[0] { 21865 break 21866 } 21867 v_1_1 := v_1.Args[1] 21868 if v_1_1.Op != OpAMD64SHRQ { 21869 break 21870 } 21871 _ = v_1_1.Args[1] 21872 if x != v_1_1.Args[0] { 21873 break 21874 } 21875 v_1_1_1 := v_1_1.Args[1] 21876 if v_1_1_1.Op != OpAMD64NEGQ { 21877 break 21878 } 21879 if y != v_1_1_1.Args[0] { 21880 break 21881 } 21882 v.reset(OpAMD64ROLQ) 21883 v.AddArg(x) 21884 v.AddArg(y) 21885 return true 21886 } 21887 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 21888 // cond: 21889 // result: (ROLQ x y) 21890 for { 21891 _ = v.Args[1] 21892 v_0 := v.Args[0] 21893 if v_0.Op != OpAMD64ANDQ { 21894 break 21895 } 21896 _ = v_0.Args[1] 21897 v_0_0 := v_0.Args[0] 21898 if v_0_0.Op != OpAMD64SHRQ { 21899 break 21900 } 21901 _ = v_0_0.Args[1] 21902 x := v_0_0.Args[0] 21903 v_0_0_1 := v_0_0.Args[1] 21904 if v_0_0_1.Op != OpAMD64NEGQ { 21905 break 21906 } 21907 y := v_0_0_1.Args[0] 21908 v_0_1 := v_0.Args[1] 21909 if v_0_1.Op != OpAMD64SBBQcarrymask { 21910 break 21911 } 21912 v_0_1_0 := v_0_1.Args[0] 21913 if v_0_1_0.Op != OpAMD64CMPQconst { 21914 break 21915 } 21916 if v_0_1_0.AuxInt != 64 { 21917 break 21918 } 21919 v_0_1_0_0 := v_0_1_0.Args[0] 21920 if v_0_1_0_0.Op != OpAMD64NEGQ { 21921 break 21922 } 21923 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 21924 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 21925 break 21926 } 21927 if v_0_1_0_0_0.AuxInt != -64 { 21928 break 21929 } 21930 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 21931 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 21932 break 21933 } 21934 if v_0_1_0_0_0_0.AuxInt != 63 { 21935 break 21936 } 21937 if y != v_0_1_0_0_0_0.Args[0] { 21938 break 21939 } 21940 v_1 := v.Args[1] 21941 if v_1.Op != OpAMD64SHLQ { 21942 break 21943 } 21944 _ = v_1.Args[1] 21945 if x != v_1.Args[0] { 21946 break 21947 } 21948 if y != v_1.Args[1] { 21949 break 21950 } 21951 v.reset(OpAMD64ROLQ) 21952 v.AddArg(x) 21953 v.AddArg(y) 21954 return true 21955 } 21956 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 21957 // cond: 21958 // result: (ROLQ x y) 21959 for { 21960 _ = v.Args[1] 21961 v_0 := v.Args[0] 21962 if v_0.Op != OpAMD64ANDQ { 21963 break 21964 } 21965 _ = v_0.Args[1] 21966 v_0_0 := v_0.Args[0] 21967 if v_0_0.Op != OpAMD64SBBQcarrymask { 21968 break 21969 } 21970 v_0_0_0 := v_0_0.Args[0] 21971 if v_0_0_0.Op != OpAMD64CMPQconst { 21972 break 21973 } 21974 if v_0_0_0.AuxInt != 64 { 21975 break 21976 } 21977 v_0_0_0_0 := v_0_0_0.Args[0] 21978 if v_0_0_0_0.Op != OpAMD64NEGQ { 21979 break 21980 } 21981 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 21982 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 21983 break 21984 } 21985 if v_0_0_0_0_0.AuxInt != -64 { 21986 break 21987 } 21988 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 21989 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 21990 break 21991 } 21992 if v_0_0_0_0_0_0.AuxInt != 63 { 21993 break 21994 } 21995 y := v_0_0_0_0_0_0.Args[0] 21996 v_0_1 := v_0.Args[1] 21997 if v_0_1.Op != OpAMD64SHRQ { 21998 break 21999 } 22000 _ = v_0_1.Args[1] 22001 x := v_0_1.Args[0] 22002 v_0_1_1 := v_0_1.Args[1] 22003 if v_0_1_1.Op != OpAMD64NEGQ { 22004 break 22005 } 22006 if y != v_0_1_1.Args[0] { 22007 break 22008 } 22009 v_1 := v.Args[1] 22010 if v_1.Op != OpAMD64SHLQ { 22011 break 22012 } 22013 _ = v_1.Args[1] 22014 if x != v_1.Args[0] { 22015 break 22016 } 22017 if y != v_1.Args[1] { 22018 break 22019 } 22020 v.reset(OpAMD64ROLQ) 22021 v.AddArg(x) 22022 v.AddArg(y) 22023 return true 22024 } 22025 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 22026 // cond: 22027 // result: (ROLQ x y) 22028 for { 22029 _ = v.Args[1] 22030 v_0 := v.Args[0] 22031 if v_0.Op != OpAMD64SHLQ { 22032 break 22033 } 22034 _ = v_0.Args[1] 22035 x := v_0.Args[0] 22036 y := v_0.Args[1] 22037 v_1 := v.Args[1] 22038 if v_1.Op != OpAMD64ANDQ { 22039 break 22040 } 22041 _ = v_1.Args[1] 22042 v_1_0 := v_1.Args[0] 22043 if v_1_0.Op != OpAMD64SHRQ { 22044 break 22045 } 22046 _ = v_1_0.Args[1] 22047 if x != v_1_0.Args[0] { 22048 break 22049 } 22050 v_1_0_1 := v_1_0.Args[1] 22051 if v_1_0_1.Op != OpAMD64NEGL { 22052 break 22053 } 22054 if y != v_1_0_1.Args[0] { 22055 break 22056 } 22057 v_1_1 := v_1.Args[1] 22058 if v_1_1.Op != OpAMD64SBBQcarrymask { 22059 break 22060 } 22061 v_1_1_0 := v_1_1.Args[0] 22062 if v_1_1_0.Op != OpAMD64CMPLconst { 22063 break 22064 } 22065 if v_1_1_0.AuxInt != 64 { 22066 break 22067 } 22068 v_1_1_0_0 := v_1_1_0.Args[0] 22069 if v_1_1_0_0.Op != OpAMD64NEGL { 22070 break 22071 } 22072 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 22073 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 22074 break 22075 } 22076 if v_1_1_0_0_0.AuxInt != -64 { 22077 break 22078 } 22079 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 22080 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 22081 break 22082 } 22083 if v_1_1_0_0_0_0.AuxInt != 63 { 22084 break 22085 } 22086 if y != v_1_1_0_0_0_0.Args[0] { 22087 break 22088 } 22089 v.reset(OpAMD64ROLQ) 22090 v.AddArg(x) 22091 v.AddArg(y) 22092 return true 22093 } 22094 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 22095 // cond: 22096 // result: (ROLQ x y) 22097 for { 22098 _ = v.Args[1] 22099 v_0 := v.Args[0] 22100 if v_0.Op != OpAMD64SHLQ { 22101 break 22102 } 22103 _ = v_0.Args[1] 22104 x := v_0.Args[0] 22105 y := v_0.Args[1] 22106 v_1 := v.Args[1] 22107 if v_1.Op != OpAMD64ANDQ { 22108 break 22109 } 22110 _ = v_1.Args[1] 22111 v_1_0 := v_1.Args[0] 22112 if v_1_0.Op != OpAMD64SBBQcarrymask { 22113 break 22114 } 22115 v_1_0_0 := v_1_0.Args[0] 22116 if v_1_0_0.Op != OpAMD64CMPLconst { 22117 break 22118 } 22119 if v_1_0_0.AuxInt != 64 { 22120 break 22121 } 22122 v_1_0_0_0 := v_1_0_0.Args[0] 22123 if v_1_0_0_0.Op != OpAMD64NEGL { 22124 break 22125 } 22126 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 22127 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 22128 break 22129 } 22130 if v_1_0_0_0_0.AuxInt != -64 { 22131 break 22132 } 22133 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 22134 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 22135 break 22136 } 22137 if v_1_0_0_0_0_0.AuxInt != 63 { 22138 break 22139 } 22140 if y != v_1_0_0_0_0_0.Args[0] { 22141 break 22142 } 22143 v_1_1 := v_1.Args[1] 22144 if v_1_1.Op != OpAMD64SHRQ { 22145 break 22146 } 22147 _ = v_1_1.Args[1] 22148 if x != v_1_1.Args[0] { 22149 break 22150 } 22151 v_1_1_1 := v_1_1.Args[1] 22152 if v_1_1_1.Op != OpAMD64NEGL { 22153 break 22154 } 22155 if y != v_1_1_1.Args[0] { 22156 break 22157 } 22158 v.reset(OpAMD64ROLQ) 22159 v.AddArg(x) 22160 v.AddArg(y) 22161 return true 22162 } 22163 return false 22164 } 22165 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 22166 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 22167 // cond: 22168 // result: (ROLQ x y) 22169 for { 22170 _ = v.Args[1] 22171 v_0 := v.Args[0] 22172 if v_0.Op != OpAMD64ANDQ { 22173 break 22174 } 22175 _ = v_0.Args[1] 22176 v_0_0 := v_0.Args[0] 22177 if v_0_0.Op != OpAMD64SHRQ { 22178 break 22179 } 22180 _ = v_0_0.Args[1] 22181 x := v_0_0.Args[0] 22182 v_0_0_1 := v_0_0.Args[1] 22183 if v_0_0_1.Op != OpAMD64NEGL { 22184 break 22185 } 22186 y := v_0_0_1.Args[0] 22187 v_0_1 := v_0.Args[1] 22188 if v_0_1.Op != OpAMD64SBBQcarrymask { 22189 break 22190 } 22191 v_0_1_0 := v_0_1.Args[0] 22192 if v_0_1_0.Op != OpAMD64CMPLconst { 22193 break 22194 } 22195 if v_0_1_0.AuxInt != 64 { 22196 break 22197 } 22198 v_0_1_0_0 := v_0_1_0.Args[0] 22199 if v_0_1_0_0.Op != OpAMD64NEGL { 22200 break 22201 } 22202 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 22203 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 22204 break 22205 } 22206 if v_0_1_0_0_0.AuxInt != -64 { 22207 break 22208 } 22209 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 22210 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 22211 break 22212 } 22213 if v_0_1_0_0_0_0.AuxInt != 63 { 22214 break 22215 } 22216 if y != v_0_1_0_0_0_0.Args[0] { 22217 break 22218 } 22219 v_1 := v.Args[1] 22220 if v_1.Op != OpAMD64SHLQ { 22221 break 22222 } 22223 _ = v_1.Args[1] 22224 if x != v_1.Args[0] { 22225 break 22226 } 22227 if y != v_1.Args[1] { 22228 break 22229 } 22230 v.reset(OpAMD64ROLQ) 22231 v.AddArg(x) 22232 v.AddArg(y) 22233 return true 22234 } 22235 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 22236 // cond: 22237 // result: (ROLQ x y) 22238 for { 22239 _ = v.Args[1] 22240 v_0 := v.Args[0] 22241 if v_0.Op != OpAMD64ANDQ { 22242 break 22243 } 22244 _ = v_0.Args[1] 22245 v_0_0 := v_0.Args[0] 22246 if v_0_0.Op != OpAMD64SBBQcarrymask { 22247 break 22248 } 22249 v_0_0_0 := v_0_0.Args[0] 22250 if v_0_0_0.Op != OpAMD64CMPLconst { 22251 break 22252 } 22253 if v_0_0_0.AuxInt != 64 { 22254 break 22255 } 22256 v_0_0_0_0 := v_0_0_0.Args[0] 22257 if v_0_0_0_0.Op != OpAMD64NEGL { 22258 break 22259 } 22260 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 22261 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 22262 break 22263 } 22264 if v_0_0_0_0_0.AuxInt != -64 { 22265 break 22266 } 22267 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 22268 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 22269 break 22270 } 22271 if v_0_0_0_0_0_0.AuxInt != 63 { 22272 break 22273 } 22274 y := v_0_0_0_0_0_0.Args[0] 22275 v_0_1 := v_0.Args[1] 22276 if v_0_1.Op != OpAMD64SHRQ { 22277 break 22278 } 22279 _ = v_0_1.Args[1] 22280 x := v_0_1.Args[0] 22281 v_0_1_1 := v_0_1.Args[1] 22282 if v_0_1_1.Op != OpAMD64NEGL { 22283 break 22284 } 22285 if y != v_0_1_1.Args[0] { 22286 break 22287 } 22288 v_1 := v.Args[1] 22289 if v_1.Op != OpAMD64SHLQ { 22290 break 22291 } 22292 _ = v_1.Args[1] 22293 if x != v_1.Args[0] { 22294 break 22295 } 22296 if y != v_1.Args[1] { 22297 break 22298 } 22299 v.reset(OpAMD64ROLQ) 22300 v.AddArg(x) 22301 v.AddArg(y) 22302 return true 22303 } 22304 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 22305 // cond: 22306 // result: (RORQ x y) 22307 for { 22308 _ = v.Args[1] 22309 v_0 := v.Args[0] 22310 if v_0.Op != OpAMD64SHRQ { 22311 break 22312 } 22313 _ = v_0.Args[1] 22314 x := v_0.Args[0] 22315 y := v_0.Args[1] 22316 v_1 := v.Args[1] 22317 if v_1.Op != OpAMD64ANDQ { 22318 break 22319 } 22320 _ = v_1.Args[1] 22321 v_1_0 := v_1.Args[0] 22322 if v_1_0.Op != OpAMD64SHLQ { 22323 break 22324 } 22325 _ = v_1_0.Args[1] 22326 if x != v_1_0.Args[0] { 22327 break 22328 } 22329 v_1_0_1 := v_1_0.Args[1] 22330 if v_1_0_1.Op != OpAMD64NEGQ { 22331 break 22332 } 22333 if y != v_1_0_1.Args[0] { 22334 break 22335 } 22336 v_1_1 := v_1.Args[1] 22337 if v_1_1.Op != OpAMD64SBBQcarrymask { 22338 break 22339 } 22340 v_1_1_0 := v_1_1.Args[0] 22341 if v_1_1_0.Op != OpAMD64CMPQconst { 22342 break 22343 } 22344 if v_1_1_0.AuxInt != 64 { 22345 break 22346 } 22347 v_1_1_0_0 := v_1_1_0.Args[0] 22348 if v_1_1_0_0.Op != OpAMD64NEGQ { 22349 break 22350 } 22351 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 22352 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 22353 break 22354 } 22355 if v_1_1_0_0_0.AuxInt != -64 { 22356 break 22357 } 22358 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 22359 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 22360 break 22361 } 22362 if v_1_1_0_0_0_0.AuxInt != 63 { 22363 break 22364 } 22365 if y != v_1_1_0_0_0_0.Args[0] { 22366 break 22367 } 22368 v.reset(OpAMD64RORQ) 22369 v.AddArg(x) 22370 v.AddArg(y) 22371 return true 22372 } 22373 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 22374 // cond: 22375 // result: (RORQ x y) 22376 for { 22377 _ = v.Args[1] 22378 v_0 := v.Args[0] 22379 if v_0.Op != OpAMD64SHRQ { 22380 break 22381 } 22382 _ = v_0.Args[1] 22383 x := v_0.Args[0] 22384 y := v_0.Args[1] 22385 v_1 := v.Args[1] 22386 if v_1.Op != OpAMD64ANDQ { 22387 break 22388 } 22389 _ = v_1.Args[1] 22390 v_1_0 := v_1.Args[0] 22391 if v_1_0.Op != OpAMD64SBBQcarrymask { 22392 break 22393 } 22394 v_1_0_0 := v_1_0.Args[0] 22395 if v_1_0_0.Op != OpAMD64CMPQconst { 22396 break 22397 } 22398 if v_1_0_0.AuxInt != 64 { 22399 break 22400 } 22401 v_1_0_0_0 := v_1_0_0.Args[0] 22402 if v_1_0_0_0.Op != OpAMD64NEGQ { 22403 break 22404 } 22405 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 22406 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 22407 break 22408 } 22409 if v_1_0_0_0_0.AuxInt != -64 { 22410 break 22411 } 22412 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 22413 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 22414 break 22415 } 22416 if v_1_0_0_0_0_0.AuxInt != 63 { 22417 break 22418 } 22419 if y != v_1_0_0_0_0_0.Args[0] { 22420 break 22421 } 22422 v_1_1 := v_1.Args[1] 22423 if v_1_1.Op != OpAMD64SHLQ { 22424 break 22425 } 22426 _ = v_1_1.Args[1] 22427 if x != v_1_1.Args[0] { 22428 break 22429 } 22430 v_1_1_1 := v_1_1.Args[1] 22431 if v_1_1_1.Op != OpAMD64NEGQ { 22432 break 22433 } 22434 if y != v_1_1_1.Args[0] { 22435 break 22436 } 22437 v.reset(OpAMD64RORQ) 22438 v.AddArg(x) 22439 v.AddArg(y) 22440 return true 22441 } 22442 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 22443 // cond: 22444 // result: (RORQ x y) 22445 for { 22446 _ = v.Args[1] 22447 v_0 := v.Args[0] 22448 if v_0.Op != OpAMD64ANDQ { 22449 break 22450 } 22451 _ = v_0.Args[1] 22452 v_0_0 := v_0.Args[0] 22453 if v_0_0.Op != OpAMD64SHLQ { 22454 break 22455 } 22456 _ = v_0_0.Args[1] 22457 x := v_0_0.Args[0] 22458 v_0_0_1 := v_0_0.Args[1] 22459 if v_0_0_1.Op != OpAMD64NEGQ { 22460 break 22461 } 22462 y := v_0_0_1.Args[0] 22463 v_0_1 := v_0.Args[1] 22464 if v_0_1.Op != OpAMD64SBBQcarrymask { 22465 break 22466 } 22467 v_0_1_0 := v_0_1.Args[0] 22468 if v_0_1_0.Op != OpAMD64CMPQconst { 22469 break 22470 } 22471 if v_0_1_0.AuxInt != 64 { 22472 break 22473 } 22474 v_0_1_0_0 := v_0_1_0.Args[0] 22475 if v_0_1_0_0.Op != OpAMD64NEGQ { 22476 break 22477 } 22478 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 22479 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 22480 break 22481 } 22482 if v_0_1_0_0_0.AuxInt != -64 { 22483 break 22484 } 22485 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 22486 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 22487 break 22488 } 22489 if v_0_1_0_0_0_0.AuxInt != 63 { 22490 break 22491 } 22492 if y != v_0_1_0_0_0_0.Args[0] { 22493 break 22494 } 22495 v_1 := v.Args[1] 22496 if v_1.Op != OpAMD64SHRQ { 22497 break 22498 } 22499 _ = v_1.Args[1] 22500 if x != v_1.Args[0] { 22501 break 22502 } 22503 if y != v_1.Args[1] { 22504 break 22505 } 22506 v.reset(OpAMD64RORQ) 22507 v.AddArg(x) 22508 v.AddArg(y) 22509 return true 22510 } 22511 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 22512 // cond: 22513 // result: (RORQ x y) 22514 for { 22515 _ = v.Args[1] 22516 v_0 := v.Args[0] 22517 if v_0.Op != OpAMD64ANDQ { 22518 break 22519 } 22520 _ = v_0.Args[1] 22521 v_0_0 := v_0.Args[0] 22522 if v_0_0.Op != OpAMD64SBBQcarrymask { 22523 break 22524 } 22525 v_0_0_0 := v_0_0.Args[0] 22526 if v_0_0_0.Op != OpAMD64CMPQconst { 22527 break 22528 } 22529 if v_0_0_0.AuxInt != 64 { 22530 break 22531 } 22532 v_0_0_0_0 := v_0_0_0.Args[0] 22533 if v_0_0_0_0.Op != OpAMD64NEGQ { 22534 break 22535 } 22536 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 22537 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 22538 break 22539 } 22540 if v_0_0_0_0_0.AuxInt != -64 { 22541 break 22542 } 22543 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 22544 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 22545 break 22546 } 22547 if v_0_0_0_0_0_0.AuxInt != 63 { 22548 break 22549 } 22550 y := v_0_0_0_0_0_0.Args[0] 22551 v_0_1 := v_0.Args[1] 22552 if v_0_1.Op != OpAMD64SHLQ { 22553 break 22554 } 22555 _ = v_0_1.Args[1] 22556 x := v_0_1.Args[0] 22557 v_0_1_1 := v_0_1.Args[1] 22558 if v_0_1_1.Op != OpAMD64NEGQ { 22559 break 22560 } 22561 if y != v_0_1_1.Args[0] { 22562 break 22563 } 22564 v_1 := v.Args[1] 22565 if v_1.Op != OpAMD64SHRQ { 22566 break 22567 } 22568 _ = v_1.Args[1] 22569 if x != v_1.Args[0] { 22570 break 22571 } 22572 if y != v_1.Args[1] { 22573 break 22574 } 22575 v.reset(OpAMD64RORQ) 22576 v.AddArg(x) 22577 v.AddArg(y) 22578 return true 22579 } 22580 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 22581 // cond: 22582 // result: (RORQ x y) 22583 for { 22584 _ = v.Args[1] 22585 v_0 := v.Args[0] 22586 if v_0.Op != OpAMD64SHRQ { 22587 break 22588 } 22589 _ = v_0.Args[1] 22590 x := v_0.Args[0] 22591 y := v_0.Args[1] 22592 v_1 := v.Args[1] 22593 if v_1.Op != OpAMD64ANDQ { 22594 break 22595 } 22596 _ = v_1.Args[1] 22597 v_1_0 := v_1.Args[0] 22598 if v_1_0.Op != OpAMD64SHLQ { 22599 break 22600 } 22601 _ = v_1_0.Args[1] 22602 if x != v_1_0.Args[0] { 22603 break 22604 } 22605 v_1_0_1 := v_1_0.Args[1] 22606 if v_1_0_1.Op != OpAMD64NEGL { 22607 break 22608 } 22609 if y != v_1_0_1.Args[0] { 22610 break 22611 } 22612 v_1_1 := v_1.Args[1] 22613 if v_1_1.Op != OpAMD64SBBQcarrymask { 22614 break 22615 } 22616 v_1_1_0 := v_1_1.Args[0] 22617 if v_1_1_0.Op != OpAMD64CMPLconst { 22618 break 22619 } 22620 if v_1_1_0.AuxInt != 64 { 22621 break 22622 } 22623 v_1_1_0_0 := v_1_1_0.Args[0] 22624 if v_1_1_0_0.Op != OpAMD64NEGL { 22625 break 22626 } 22627 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 22628 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 22629 break 22630 } 22631 if v_1_1_0_0_0.AuxInt != -64 { 22632 break 22633 } 22634 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 22635 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 22636 break 22637 } 22638 if v_1_1_0_0_0_0.AuxInt != 63 { 22639 break 22640 } 22641 if y != v_1_1_0_0_0_0.Args[0] { 22642 break 22643 } 22644 v.reset(OpAMD64RORQ) 22645 v.AddArg(x) 22646 v.AddArg(y) 22647 return true 22648 } 22649 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 22650 // cond: 22651 // result: (RORQ x y) 22652 for { 22653 _ = v.Args[1] 22654 v_0 := v.Args[0] 22655 if v_0.Op != OpAMD64SHRQ { 22656 break 22657 } 22658 _ = v_0.Args[1] 22659 x := v_0.Args[0] 22660 y := v_0.Args[1] 22661 v_1 := v.Args[1] 22662 if v_1.Op != OpAMD64ANDQ { 22663 break 22664 } 22665 _ = v_1.Args[1] 22666 v_1_0 := v_1.Args[0] 22667 if v_1_0.Op != OpAMD64SBBQcarrymask { 22668 break 22669 } 22670 v_1_0_0 := v_1_0.Args[0] 22671 if v_1_0_0.Op != OpAMD64CMPLconst { 22672 break 22673 } 22674 if v_1_0_0.AuxInt != 64 { 22675 break 22676 } 22677 v_1_0_0_0 := v_1_0_0.Args[0] 22678 if v_1_0_0_0.Op != OpAMD64NEGL { 22679 break 22680 } 22681 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 22682 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 22683 break 22684 } 22685 if v_1_0_0_0_0.AuxInt != -64 { 22686 break 22687 } 22688 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 22689 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 22690 break 22691 } 22692 if v_1_0_0_0_0_0.AuxInt != 63 { 22693 break 22694 } 22695 if y != v_1_0_0_0_0_0.Args[0] { 22696 break 22697 } 22698 v_1_1 := v_1.Args[1] 22699 if v_1_1.Op != OpAMD64SHLQ { 22700 break 22701 } 22702 _ = v_1_1.Args[1] 22703 if x != v_1_1.Args[0] { 22704 break 22705 } 22706 v_1_1_1 := v_1_1.Args[1] 22707 if v_1_1_1.Op != OpAMD64NEGL { 22708 break 22709 } 22710 if y != v_1_1_1.Args[0] { 22711 break 22712 } 22713 v.reset(OpAMD64RORQ) 22714 v.AddArg(x) 22715 v.AddArg(y) 22716 return true 22717 } 22718 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 22719 // cond: 22720 // result: (RORQ x y) 22721 for { 22722 _ = v.Args[1] 22723 v_0 := v.Args[0] 22724 if v_0.Op != OpAMD64ANDQ { 22725 break 22726 } 22727 _ = v_0.Args[1] 22728 v_0_0 := v_0.Args[0] 22729 if v_0_0.Op != OpAMD64SHLQ { 22730 break 22731 } 22732 _ = v_0_0.Args[1] 22733 x := v_0_0.Args[0] 22734 v_0_0_1 := v_0_0.Args[1] 22735 if v_0_0_1.Op != OpAMD64NEGL { 22736 break 22737 } 22738 y := v_0_0_1.Args[0] 22739 v_0_1 := v_0.Args[1] 22740 if v_0_1.Op != OpAMD64SBBQcarrymask { 22741 break 22742 } 22743 v_0_1_0 := v_0_1.Args[0] 22744 if v_0_1_0.Op != OpAMD64CMPLconst { 22745 break 22746 } 22747 if v_0_1_0.AuxInt != 64 { 22748 break 22749 } 22750 v_0_1_0_0 := v_0_1_0.Args[0] 22751 if v_0_1_0_0.Op != OpAMD64NEGL { 22752 break 22753 } 22754 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 22755 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 22756 break 22757 } 22758 if v_0_1_0_0_0.AuxInt != -64 { 22759 break 22760 } 22761 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 22762 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 22763 break 22764 } 22765 if v_0_1_0_0_0_0.AuxInt != 63 { 22766 break 22767 } 22768 if y != v_0_1_0_0_0_0.Args[0] { 22769 break 22770 } 22771 v_1 := v.Args[1] 22772 if v_1.Op != OpAMD64SHRQ { 22773 break 22774 } 22775 _ = v_1.Args[1] 22776 if x != v_1.Args[0] { 22777 break 22778 } 22779 if y != v_1.Args[1] { 22780 break 22781 } 22782 v.reset(OpAMD64RORQ) 22783 v.AddArg(x) 22784 v.AddArg(y) 22785 return true 22786 } 22787 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 22788 // cond: 22789 // result: (RORQ x y) 22790 for { 22791 _ = v.Args[1] 22792 v_0 := v.Args[0] 22793 if v_0.Op != OpAMD64ANDQ { 22794 break 22795 } 22796 _ = v_0.Args[1] 22797 v_0_0 := v_0.Args[0] 22798 if v_0_0.Op != OpAMD64SBBQcarrymask { 22799 break 22800 } 22801 v_0_0_0 := v_0_0.Args[0] 22802 if v_0_0_0.Op != OpAMD64CMPLconst { 22803 break 22804 } 22805 if v_0_0_0.AuxInt != 64 { 22806 break 22807 } 22808 v_0_0_0_0 := v_0_0_0.Args[0] 22809 if v_0_0_0_0.Op != OpAMD64NEGL { 22810 break 22811 } 22812 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 22813 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 22814 break 22815 } 22816 if v_0_0_0_0_0.AuxInt != -64 { 22817 break 22818 } 22819 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 22820 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 22821 break 22822 } 22823 if v_0_0_0_0_0_0.AuxInt != 63 { 22824 break 22825 } 22826 y := v_0_0_0_0_0_0.Args[0] 22827 v_0_1 := v_0.Args[1] 22828 if v_0_1.Op != OpAMD64SHLQ { 22829 break 22830 } 22831 _ = v_0_1.Args[1] 22832 x := v_0_1.Args[0] 22833 v_0_1_1 := v_0_1.Args[1] 22834 if v_0_1_1.Op != OpAMD64NEGL { 22835 break 22836 } 22837 if y != v_0_1_1.Args[0] { 22838 break 22839 } 22840 v_1 := v.Args[1] 22841 if v_1.Op != OpAMD64SHRQ { 22842 break 22843 } 22844 _ = v_1.Args[1] 22845 if x != v_1.Args[0] { 22846 break 22847 } 22848 if y != v_1.Args[1] { 22849 break 22850 } 22851 v.reset(OpAMD64RORQ) 22852 v.AddArg(x) 22853 v.AddArg(y) 22854 return true 22855 } 22856 return false 22857 } 22858 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 22859 b := v.Block 22860 _ = b 22861 typ := &b.Func.Config.Types 22862 _ = typ 22863 // match: (ORQ x x) 22864 // cond: 22865 // result: x 22866 for { 22867 _ = v.Args[1] 22868 x := v.Args[0] 22869 if x != v.Args[1] { 22870 break 22871 } 22872 v.reset(OpCopy) 22873 v.Type = x.Type 22874 v.AddArg(x) 22875 return true 22876 } 22877 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 22878 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22879 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 22880 for { 22881 _ = v.Args[1] 22882 x0 := v.Args[0] 22883 if x0.Op != OpAMD64MOVBload { 22884 break 22885 } 22886 i0 := x0.AuxInt 22887 s := x0.Aux 22888 _ = x0.Args[1] 22889 p := x0.Args[0] 22890 mem := x0.Args[1] 22891 sh := v.Args[1] 22892 if sh.Op != OpAMD64SHLQconst { 22893 break 22894 } 22895 if sh.AuxInt != 8 { 22896 break 22897 } 22898 x1 := sh.Args[0] 22899 if x1.Op != OpAMD64MOVBload { 22900 break 22901 } 22902 i1 := x1.AuxInt 22903 if x1.Aux != s { 22904 break 22905 } 22906 _ = x1.Args[1] 22907 if p != x1.Args[0] { 22908 break 22909 } 22910 if mem != x1.Args[1] { 22911 break 22912 } 22913 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22914 break 22915 } 22916 b = mergePoint(b, x0, x1) 22917 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 22918 v.reset(OpCopy) 22919 v.AddArg(v0) 22920 v0.AuxInt = i0 22921 v0.Aux = s 22922 v0.AddArg(p) 22923 v0.AddArg(mem) 22924 return true 22925 } 22926 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 22927 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22928 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 22929 for { 22930 _ = v.Args[1] 22931 sh := v.Args[0] 22932 if sh.Op != OpAMD64SHLQconst { 22933 break 22934 } 22935 if sh.AuxInt != 8 { 22936 break 22937 } 22938 x1 := sh.Args[0] 22939 if x1.Op != OpAMD64MOVBload { 22940 break 22941 } 22942 i1 := x1.AuxInt 22943 s := x1.Aux 22944 _ = x1.Args[1] 22945 p := x1.Args[0] 22946 mem := x1.Args[1] 22947 x0 := v.Args[1] 22948 if x0.Op != OpAMD64MOVBload { 22949 break 22950 } 22951 i0 := x0.AuxInt 22952 if x0.Aux != s { 22953 break 22954 } 22955 _ = x0.Args[1] 22956 if p != x0.Args[0] { 22957 break 22958 } 22959 if mem != x0.Args[1] { 22960 break 22961 } 22962 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22963 break 22964 } 22965 b = mergePoint(b, x0, x1) 22966 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 22967 v.reset(OpCopy) 22968 v.AddArg(v0) 22969 v0.AuxInt = i0 22970 v0.Aux = s 22971 v0.AddArg(p) 22972 v0.AddArg(mem) 22973 return true 22974 } 22975 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 22976 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22977 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 22978 for { 22979 _ = v.Args[1] 22980 x0 := v.Args[0] 22981 if x0.Op != OpAMD64MOVWload { 22982 break 22983 } 22984 i0 := x0.AuxInt 22985 s := x0.Aux 22986 _ = x0.Args[1] 22987 p := x0.Args[0] 22988 mem := x0.Args[1] 22989 sh := v.Args[1] 22990 if sh.Op != OpAMD64SHLQconst { 22991 break 22992 } 22993 if sh.AuxInt != 16 { 22994 break 22995 } 22996 x1 := sh.Args[0] 22997 if x1.Op != OpAMD64MOVWload { 22998 break 22999 } 23000 i1 := x1.AuxInt 23001 if x1.Aux != s { 23002 break 23003 } 23004 _ = x1.Args[1] 23005 if p != x1.Args[0] { 23006 break 23007 } 23008 if mem != x1.Args[1] { 23009 break 23010 } 23011 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23012 break 23013 } 23014 b = mergePoint(b, x0, x1) 23015 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23016 v.reset(OpCopy) 23017 v.AddArg(v0) 23018 v0.AuxInt = i0 23019 v0.Aux = s 23020 v0.AddArg(p) 23021 v0.AddArg(mem) 23022 return true 23023 } 23024 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 23025 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23026 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 23027 for { 23028 _ = v.Args[1] 23029 sh := v.Args[0] 23030 if sh.Op != OpAMD64SHLQconst { 23031 break 23032 } 23033 if sh.AuxInt != 16 { 23034 break 23035 } 23036 x1 := sh.Args[0] 23037 if x1.Op != OpAMD64MOVWload { 23038 break 23039 } 23040 i1 := x1.AuxInt 23041 s := x1.Aux 23042 _ = x1.Args[1] 23043 p := x1.Args[0] 23044 mem := x1.Args[1] 23045 x0 := v.Args[1] 23046 if x0.Op != OpAMD64MOVWload { 23047 break 23048 } 23049 i0 := x0.AuxInt 23050 if x0.Aux != s { 23051 break 23052 } 23053 _ = x0.Args[1] 23054 if p != x0.Args[0] { 23055 break 23056 } 23057 if mem != x0.Args[1] { 23058 break 23059 } 23060 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23061 break 23062 } 23063 b = mergePoint(b, x0, x1) 23064 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23065 v.reset(OpCopy) 23066 v.AddArg(v0) 23067 v0.AuxInt = i0 23068 v0.Aux = s 23069 v0.AddArg(p) 23070 v0.AddArg(mem) 23071 return true 23072 } 23073 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 23074 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23075 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 23076 for { 23077 _ = v.Args[1] 23078 x0 := v.Args[0] 23079 if x0.Op != OpAMD64MOVLload { 23080 break 23081 } 23082 i0 := x0.AuxInt 23083 s := x0.Aux 23084 _ = x0.Args[1] 23085 p := x0.Args[0] 23086 mem := x0.Args[1] 23087 sh := v.Args[1] 23088 if sh.Op != OpAMD64SHLQconst { 23089 break 23090 } 23091 if sh.AuxInt != 32 { 23092 break 23093 } 23094 x1 := sh.Args[0] 23095 if x1.Op != OpAMD64MOVLload { 23096 break 23097 } 23098 i1 := x1.AuxInt 23099 if x1.Aux != s { 23100 break 23101 } 23102 _ = x1.Args[1] 23103 if p != x1.Args[0] { 23104 break 23105 } 23106 if mem != x1.Args[1] { 23107 break 23108 } 23109 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23110 break 23111 } 23112 b = mergePoint(b, x0, x1) 23113 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 23114 v.reset(OpCopy) 23115 v.AddArg(v0) 23116 v0.AuxInt = i0 23117 v0.Aux = s 23118 v0.AddArg(p) 23119 v0.AddArg(mem) 23120 return true 23121 } 23122 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 23123 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23124 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 23125 for { 23126 _ = v.Args[1] 23127 sh := v.Args[0] 23128 if sh.Op != OpAMD64SHLQconst { 23129 break 23130 } 23131 if sh.AuxInt != 32 { 23132 break 23133 } 23134 x1 := sh.Args[0] 23135 if x1.Op != OpAMD64MOVLload { 23136 break 23137 } 23138 i1 := x1.AuxInt 23139 s := x1.Aux 23140 _ = x1.Args[1] 23141 p := x1.Args[0] 23142 mem := x1.Args[1] 23143 x0 := v.Args[1] 23144 if x0.Op != OpAMD64MOVLload { 23145 break 23146 } 23147 i0 := x0.AuxInt 23148 if x0.Aux != s { 23149 break 23150 } 23151 _ = x0.Args[1] 23152 if p != x0.Args[0] { 23153 break 23154 } 23155 if mem != x0.Args[1] { 23156 break 23157 } 23158 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23159 break 23160 } 23161 b = mergePoint(b, x0, x1) 23162 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 23163 v.reset(OpCopy) 23164 v.AddArg(v0) 23165 v0.AuxInt = i0 23166 v0.Aux = s 23167 v0.AddArg(p) 23168 v0.AddArg(mem) 23169 return true 23170 } 23171 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 23172 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23173 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23174 for { 23175 _ = v.Args[1] 23176 s1 := v.Args[0] 23177 if s1.Op != OpAMD64SHLQconst { 23178 break 23179 } 23180 j1 := s1.AuxInt 23181 x1 := s1.Args[0] 23182 if x1.Op != OpAMD64MOVBload { 23183 break 23184 } 23185 i1 := x1.AuxInt 23186 s := x1.Aux 23187 _ = x1.Args[1] 23188 p := x1.Args[0] 23189 mem := x1.Args[1] 23190 or := v.Args[1] 23191 if or.Op != OpAMD64ORQ { 23192 break 23193 } 23194 _ = or.Args[1] 23195 s0 := or.Args[0] 23196 if s0.Op != OpAMD64SHLQconst { 23197 break 23198 } 23199 j0 := s0.AuxInt 23200 x0 := s0.Args[0] 23201 if x0.Op != OpAMD64MOVBload { 23202 break 23203 } 23204 i0 := x0.AuxInt 23205 if x0.Aux != s { 23206 break 23207 } 23208 _ = x0.Args[1] 23209 if p != x0.Args[0] { 23210 break 23211 } 23212 if mem != x0.Args[1] { 23213 break 23214 } 23215 y := or.Args[1] 23216 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23217 break 23218 } 23219 b = mergePoint(b, x0, x1) 23220 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23221 v.reset(OpCopy) 23222 v.AddArg(v0) 23223 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23224 v1.AuxInt = j0 23225 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23226 v2.AuxInt = i0 23227 v2.Aux = s 23228 v2.AddArg(p) 23229 v2.AddArg(mem) 23230 v1.AddArg(v2) 23231 v0.AddArg(v1) 23232 v0.AddArg(y) 23233 return true 23234 } 23235 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 23236 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23237 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23238 for { 23239 _ = v.Args[1] 23240 s1 := v.Args[0] 23241 if s1.Op != OpAMD64SHLQconst { 23242 break 23243 } 23244 j1 := s1.AuxInt 23245 x1 := s1.Args[0] 23246 if x1.Op != OpAMD64MOVBload { 23247 break 23248 } 23249 i1 := x1.AuxInt 23250 s := x1.Aux 23251 _ = x1.Args[1] 23252 p := x1.Args[0] 23253 mem := x1.Args[1] 23254 or := v.Args[1] 23255 if or.Op != OpAMD64ORQ { 23256 break 23257 } 23258 _ = or.Args[1] 23259 y := or.Args[0] 23260 s0 := or.Args[1] 23261 if s0.Op != OpAMD64SHLQconst { 23262 break 23263 } 23264 j0 := s0.AuxInt 23265 x0 := s0.Args[0] 23266 if x0.Op != OpAMD64MOVBload { 23267 break 23268 } 23269 i0 := x0.AuxInt 23270 if x0.Aux != s { 23271 break 23272 } 23273 _ = x0.Args[1] 23274 if p != x0.Args[0] { 23275 break 23276 } 23277 if mem != x0.Args[1] { 23278 break 23279 } 23280 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23281 break 23282 } 23283 b = mergePoint(b, x0, x1) 23284 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23285 v.reset(OpCopy) 23286 v.AddArg(v0) 23287 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23288 v1.AuxInt = j0 23289 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23290 v2.AuxInt = i0 23291 v2.Aux = s 23292 v2.AddArg(p) 23293 v2.AddArg(mem) 23294 v1.AddArg(v2) 23295 v0.AddArg(v1) 23296 v0.AddArg(y) 23297 return true 23298 } 23299 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 23300 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23301 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23302 for { 23303 _ = v.Args[1] 23304 or := v.Args[0] 23305 if or.Op != OpAMD64ORQ { 23306 break 23307 } 23308 _ = or.Args[1] 23309 s0 := or.Args[0] 23310 if s0.Op != OpAMD64SHLQconst { 23311 break 23312 } 23313 j0 := s0.AuxInt 23314 x0 := s0.Args[0] 23315 if x0.Op != OpAMD64MOVBload { 23316 break 23317 } 23318 i0 := x0.AuxInt 23319 s := x0.Aux 23320 _ = x0.Args[1] 23321 p := x0.Args[0] 23322 mem := x0.Args[1] 23323 y := or.Args[1] 23324 s1 := v.Args[1] 23325 if s1.Op != OpAMD64SHLQconst { 23326 break 23327 } 23328 j1 := s1.AuxInt 23329 x1 := s1.Args[0] 23330 if x1.Op != OpAMD64MOVBload { 23331 break 23332 } 23333 i1 := x1.AuxInt 23334 if x1.Aux != s { 23335 break 23336 } 23337 _ = x1.Args[1] 23338 if p != x1.Args[0] { 23339 break 23340 } 23341 if mem != x1.Args[1] { 23342 break 23343 } 23344 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23345 break 23346 } 23347 b = mergePoint(b, x0, x1) 23348 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23349 v.reset(OpCopy) 23350 v.AddArg(v0) 23351 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23352 v1.AuxInt = j0 23353 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23354 v2.AuxInt = i0 23355 v2.Aux = s 23356 v2.AddArg(p) 23357 v2.AddArg(mem) 23358 v1.AddArg(v2) 23359 v0.AddArg(v1) 23360 v0.AddArg(y) 23361 return true 23362 } 23363 return false 23364 } 23365 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 23366 b := v.Block 23367 _ = b 23368 typ := &b.Func.Config.Types 23369 _ = typ 23370 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 23371 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23372 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23373 for { 23374 _ = v.Args[1] 23375 or := v.Args[0] 23376 if or.Op != OpAMD64ORQ { 23377 break 23378 } 23379 _ = or.Args[1] 23380 y := or.Args[0] 23381 s0 := or.Args[1] 23382 if s0.Op != OpAMD64SHLQconst { 23383 break 23384 } 23385 j0 := s0.AuxInt 23386 x0 := s0.Args[0] 23387 if x0.Op != OpAMD64MOVBload { 23388 break 23389 } 23390 i0 := x0.AuxInt 23391 s := x0.Aux 23392 _ = x0.Args[1] 23393 p := x0.Args[0] 23394 mem := x0.Args[1] 23395 s1 := v.Args[1] 23396 if s1.Op != OpAMD64SHLQconst { 23397 break 23398 } 23399 j1 := s1.AuxInt 23400 x1 := s1.Args[0] 23401 if x1.Op != OpAMD64MOVBload { 23402 break 23403 } 23404 i1 := x1.AuxInt 23405 if x1.Aux != s { 23406 break 23407 } 23408 _ = x1.Args[1] 23409 if p != x1.Args[0] { 23410 break 23411 } 23412 if mem != x1.Args[1] { 23413 break 23414 } 23415 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23416 break 23417 } 23418 b = mergePoint(b, x0, x1) 23419 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23420 v.reset(OpCopy) 23421 v.AddArg(v0) 23422 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23423 v1.AuxInt = j0 23424 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23425 v2.AuxInt = i0 23426 v2.Aux = s 23427 v2.AddArg(p) 23428 v2.AddArg(mem) 23429 v1.AddArg(v2) 23430 v0.AddArg(v1) 23431 v0.AddArg(y) 23432 return true 23433 } 23434 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 23435 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23436 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23437 for { 23438 _ = v.Args[1] 23439 s1 := v.Args[0] 23440 if s1.Op != OpAMD64SHLQconst { 23441 break 23442 } 23443 j1 := s1.AuxInt 23444 x1 := s1.Args[0] 23445 if x1.Op != OpAMD64MOVWload { 23446 break 23447 } 23448 i1 := x1.AuxInt 23449 s := x1.Aux 23450 _ = x1.Args[1] 23451 p := x1.Args[0] 23452 mem := x1.Args[1] 23453 or := v.Args[1] 23454 if or.Op != OpAMD64ORQ { 23455 break 23456 } 23457 _ = or.Args[1] 23458 s0 := or.Args[0] 23459 if s0.Op != OpAMD64SHLQconst { 23460 break 23461 } 23462 j0 := s0.AuxInt 23463 x0 := s0.Args[0] 23464 if x0.Op != OpAMD64MOVWload { 23465 break 23466 } 23467 i0 := x0.AuxInt 23468 if x0.Aux != s { 23469 break 23470 } 23471 _ = x0.Args[1] 23472 if p != x0.Args[0] { 23473 break 23474 } 23475 if mem != x0.Args[1] { 23476 break 23477 } 23478 y := or.Args[1] 23479 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23480 break 23481 } 23482 b = mergePoint(b, x0, x1) 23483 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23484 v.reset(OpCopy) 23485 v.AddArg(v0) 23486 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23487 v1.AuxInt = j0 23488 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23489 v2.AuxInt = i0 23490 v2.Aux = s 23491 v2.AddArg(p) 23492 v2.AddArg(mem) 23493 v1.AddArg(v2) 23494 v0.AddArg(v1) 23495 v0.AddArg(y) 23496 return true 23497 } 23498 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 23499 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23500 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23501 for { 23502 _ = v.Args[1] 23503 s1 := v.Args[0] 23504 if s1.Op != OpAMD64SHLQconst { 23505 break 23506 } 23507 j1 := s1.AuxInt 23508 x1 := s1.Args[0] 23509 if x1.Op != OpAMD64MOVWload { 23510 break 23511 } 23512 i1 := x1.AuxInt 23513 s := x1.Aux 23514 _ = x1.Args[1] 23515 p := x1.Args[0] 23516 mem := x1.Args[1] 23517 or := v.Args[1] 23518 if or.Op != OpAMD64ORQ { 23519 break 23520 } 23521 _ = or.Args[1] 23522 y := or.Args[0] 23523 s0 := or.Args[1] 23524 if s0.Op != OpAMD64SHLQconst { 23525 break 23526 } 23527 j0 := s0.AuxInt 23528 x0 := s0.Args[0] 23529 if x0.Op != OpAMD64MOVWload { 23530 break 23531 } 23532 i0 := x0.AuxInt 23533 if x0.Aux != s { 23534 break 23535 } 23536 _ = x0.Args[1] 23537 if p != x0.Args[0] { 23538 break 23539 } 23540 if mem != x0.Args[1] { 23541 break 23542 } 23543 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23544 break 23545 } 23546 b = mergePoint(b, x0, x1) 23547 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23548 v.reset(OpCopy) 23549 v.AddArg(v0) 23550 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23551 v1.AuxInt = j0 23552 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23553 v2.AuxInt = i0 23554 v2.Aux = s 23555 v2.AddArg(p) 23556 v2.AddArg(mem) 23557 v1.AddArg(v2) 23558 v0.AddArg(v1) 23559 v0.AddArg(y) 23560 return true 23561 } 23562 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 23563 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23564 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23565 for { 23566 _ = v.Args[1] 23567 or := v.Args[0] 23568 if or.Op != OpAMD64ORQ { 23569 break 23570 } 23571 _ = or.Args[1] 23572 s0 := or.Args[0] 23573 if s0.Op != OpAMD64SHLQconst { 23574 break 23575 } 23576 j0 := s0.AuxInt 23577 x0 := s0.Args[0] 23578 if x0.Op != OpAMD64MOVWload { 23579 break 23580 } 23581 i0 := x0.AuxInt 23582 s := x0.Aux 23583 _ = x0.Args[1] 23584 p := x0.Args[0] 23585 mem := x0.Args[1] 23586 y := or.Args[1] 23587 s1 := v.Args[1] 23588 if s1.Op != OpAMD64SHLQconst { 23589 break 23590 } 23591 j1 := s1.AuxInt 23592 x1 := s1.Args[0] 23593 if x1.Op != OpAMD64MOVWload { 23594 break 23595 } 23596 i1 := x1.AuxInt 23597 if x1.Aux != s { 23598 break 23599 } 23600 _ = x1.Args[1] 23601 if p != x1.Args[0] { 23602 break 23603 } 23604 if mem != x1.Args[1] { 23605 break 23606 } 23607 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23608 break 23609 } 23610 b = mergePoint(b, x0, x1) 23611 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23612 v.reset(OpCopy) 23613 v.AddArg(v0) 23614 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23615 v1.AuxInt = j0 23616 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23617 v2.AuxInt = i0 23618 v2.Aux = s 23619 v2.AddArg(p) 23620 v2.AddArg(mem) 23621 v1.AddArg(v2) 23622 v0.AddArg(v1) 23623 v0.AddArg(y) 23624 return true 23625 } 23626 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 23627 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23628 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23629 for { 23630 _ = v.Args[1] 23631 or := v.Args[0] 23632 if or.Op != OpAMD64ORQ { 23633 break 23634 } 23635 _ = or.Args[1] 23636 y := or.Args[0] 23637 s0 := or.Args[1] 23638 if s0.Op != OpAMD64SHLQconst { 23639 break 23640 } 23641 j0 := s0.AuxInt 23642 x0 := s0.Args[0] 23643 if x0.Op != OpAMD64MOVWload { 23644 break 23645 } 23646 i0 := x0.AuxInt 23647 s := x0.Aux 23648 _ = x0.Args[1] 23649 p := x0.Args[0] 23650 mem := x0.Args[1] 23651 s1 := v.Args[1] 23652 if s1.Op != OpAMD64SHLQconst { 23653 break 23654 } 23655 j1 := s1.AuxInt 23656 x1 := s1.Args[0] 23657 if x1.Op != OpAMD64MOVWload { 23658 break 23659 } 23660 i1 := x1.AuxInt 23661 if x1.Aux != s { 23662 break 23663 } 23664 _ = x1.Args[1] 23665 if p != x1.Args[0] { 23666 break 23667 } 23668 if mem != x1.Args[1] { 23669 break 23670 } 23671 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23672 break 23673 } 23674 b = mergePoint(b, x0, x1) 23675 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23676 v.reset(OpCopy) 23677 v.AddArg(v0) 23678 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23679 v1.AuxInt = j0 23680 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23681 v2.AuxInt = i0 23682 v2.Aux = s 23683 v2.AddArg(p) 23684 v2.AddArg(mem) 23685 v1.AddArg(v2) 23686 v0.AddArg(v1) 23687 v0.AddArg(y) 23688 return true 23689 } 23690 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 23691 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23692 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23693 for { 23694 _ = v.Args[1] 23695 x0 := v.Args[0] 23696 if x0.Op != OpAMD64MOVBloadidx1 { 23697 break 23698 } 23699 i0 := x0.AuxInt 23700 s := x0.Aux 23701 _ = x0.Args[2] 23702 p := x0.Args[0] 23703 idx := x0.Args[1] 23704 mem := x0.Args[2] 23705 sh := v.Args[1] 23706 if sh.Op != OpAMD64SHLQconst { 23707 break 23708 } 23709 if sh.AuxInt != 8 { 23710 break 23711 } 23712 x1 := sh.Args[0] 23713 if x1.Op != OpAMD64MOVBloadidx1 { 23714 break 23715 } 23716 i1 := x1.AuxInt 23717 if x1.Aux != s { 23718 break 23719 } 23720 _ = x1.Args[2] 23721 if p != x1.Args[0] { 23722 break 23723 } 23724 if idx != x1.Args[1] { 23725 break 23726 } 23727 if mem != x1.Args[2] { 23728 break 23729 } 23730 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23731 break 23732 } 23733 b = mergePoint(b, x0, x1) 23734 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23735 v.reset(OpCopy) 23736 v.AddArg(v0) 23737 v0.AuxInt = i0 23738 v0.Aux = s 23739 v0.AddArg(p) 23740 v0.AddArg(idx) 23741 v0.AddArg(mem) 23742 return true 23743 } 23744 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 23745 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23746 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23747 for { 23748 _ = v.Args[1] 23749 x0 := v.Args[0] 23750 if x0.Op != OpAMD64MOVBloadidx1 { 23751 break 23752 } 23753 i0 := x0.AuxInt 23754 s := x0.Aux 23755 _ = x0.Args[2] 23756 idx := x0.Args[0] 23757 p := x0.Args[1] 23758 mem := x0.Args[2] 23759 sh := v.Args[1] 23760 if sh.Op != OpAMD64SHLQconst { 23761 break 23762 } 23763 if sh.AuxInt != 8 { 23764 break 23765 } 23766 x1 := sh.Args[0] 23767 if x1.Op != OpAMD64MOVBloadidx1 { 23768 break 23769 } 23770 i1 := x1.AuxInt 23771 if x1.Aux != s { 23772 break 23773 } 23774 _ = x1.Args[2] 23775 if p != x1.Args[0] { 23776 break 23777 } 23778 if idx != x1.Args[1] { 23779 break 23780 } 23781 if mem != x1.Args[2] { 23782 break 23783 } 23784 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23785 break 23786 } 23787 b = mergePoint(b, x0, x1) 23788 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23789 v.reset(OpCopy) 23790 v.AddArg(v0) 23791 v0.AuxInt = i0 23792 v0.Aux = s 23793 v0.AddArg(p) 23794 v0.AddArg(idx) 23795 v0.AddArg(mem) 23796 return true 23797 } 23798 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 23799 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23800 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23801 for { 23802 _ = v.Args[1] 23803 x0 := v.Args[0] 23804 if x0.Op != OpAMD64MOVBloadidx1 { 23805 break 23806 } 23807 i0 := x0.AuxInt 23808 s := x0.Aux 23809 _ = x0.Args[2] 23810 p := x0.Args[0] 23811 idx := x0.Args[1] 23812 mem := x0.Args[2] 23813 sh := v.Args[1] 23814 if sh.Op != OpAMD64SHLQconst { 23815 break 23816 } 23817 if sh.AuxInt != 8 { 23818 break 23819 } 23820 x1 := sh.Args[0] 23821 if x1.Op != OpAMD64MOVBloadidx1 { 23822 break 23823 } 23824 i1 := x1.AuxInt 23825 if x1.Aux != s { 23826 break 23827 } 23828 _ = x1.Args[2] 23829 if idx != x1.Args[0] { 23830 break 23831 } 23832 if p != x1.Args[1] { 23833 break 23834 } 23835 if mem != x1.Args[2] { 23836 break 23837 } 23838 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23839 break 23840 } 23841 b = mergePoint(b, x0, x1) 23842 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23843 v.reset(OpCopy) 23844 v.AddArg(v0) 23845 v0.AuxInt = i0 23846 v0.Aux = s 23847 v0.AddArg(p) 23848 v0.AddArg(idx) 23849 v0.AddArg(mem) 23850 return true 23851 } 23852 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 23853 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23854 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23855 for { 23856 _ = v.Args[1] 23857 x0 := v.Args[0] 23858 if x0.Op != OpAMD64MOVBloadidx1 { 23859 break 23860 } 23861 i0 := x0.AuxInt 23862 s := x0.Aux 23863 _ = x0.Args[2] 23864 idx := x0.Args[0] 23865 p := x0.Args[1] 23866 mem := x0.Args[2] 23867 sh := v.Args[1] 23868 if sh.Op != OpAMD64SHLQconst { 23869 break 23870 } 23871 if sh.AuxInt != 8 { 23872 break 23873 } 23874 x1 := sh.Args[0] 23875 if x1.Op != OpAMD64MOVBloadidx1 { 23876 break 23877 } 23878 i1 := x1.AuxInt 23879 if x1.Aux != s { 23880 break 23881 } 23882 _ = x1.Args[2] 23883 if idx != x1.Args[0] { 23884 break 23885 } 23886 if p != x1.Args[1] { 23887 break 23888 } 23889 if mem != x1.Args[2] { 23890 break 23891 } 23892 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23893 break 23894 } 23895 b = mergePoint(b, x0, x1) 23896 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23897 v.reset(OpCopy) 23898 v.AddArg(v0) 23899 v0.AuxInt = i0 23900 v0.Aux = s 23901 v0.AddArg(p) 23902 v0.AddArg(idx) 23903 v0.AddArg(mem) 23904 return true 23905 } 23906 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 23907 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23908 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23909 for { 23910 _ = v.Args[1] 23911 sh := v.Args[0] 23912 if sh.Op != OpAMD64SHLQconst { 23913 break 23914 } 23915 if sh.AuxInt != 8 { 23916 break 23917 } 23918 x1 := sh.Args[0] 23919 if x1.Op != OpAMD64MOVBloadidx1 { 23920 break 23921 } 23922 i1 := x1.AuxInt 23923 s := x1.Aux 23924 _ = x1.Args[2] 23925 p := x1.Args[0] 23926 idx := x1.Args[1] 23927 mem := x1.Args[2] 23928 x0 := v.Args[1] 23929 if x0.Op != OpAMD64MOVBloadidx1 { 23930 break 23931 } 23932 i0 := x0.AuxInt 23933 if x0.Aux != s { 23934 break 23935 } 23936 _ = x0.Args[2] 23937 if p != x0.Args[0] { 23938 break 23939 } 23940 if idx != x0.Args[1] { 23941 break 23942 } 23943 if mem != x0.Args[2] { 23944 break 23945 } 23946 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23947 break 23948 } 23949 b = mergePoint(b, x0, x1) 23950 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23951 v.reset(OpCopy) 23952 v.AddArg(v0) 23953 v0.AuxInt = i0 23954 v0.Aux = s 23955 v0.AddArg(p) 23956 v0.AddArg(idx) 23957 v0.AddArg(mem) 23958 return true 23959 } 23960 return false 23961 } 23962 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 23963 b := v.Block 23964 _ = b 23965 typ := &b.Func.Config.Types 23966 _ = typ 23967 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 23968 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23969 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23970 for { 23971 _ = v.Args[1] 23972 sh := v.Args[0] 23973 if sh.Op != OpAMD64SHLQconst { 23974 break 23975 } 23976 if sh.AuxInt != 8 { 23977 break 23978 } 23979 x1 := sh.Args[0] 23980 if x1.Op != OpAMD64MOVBloadidx1 { 23981 break 23982 } 23983 i1 := x1.AuxInt 23984 s := x1.Aux 23985 _ = x1.Args[2] 23986 idx := x1.Args[0] 23987 p := x1.Args[1] 23988 mem := x1.Args[2] 23989 x0 := v.Args[1] 23990 if x0.Op != OpAMD64MOVBloadidx1 { 23991 break 23992 } 23993 i0 := x0.AuxInt 23994 if x0.Aux != s { 23995 break 23996 } 23997 _ = x0.Args[2] 23998 if p != x0.Args[0] { 23999 break 24000 } 24001 if idx != x0.Args[1] { 24002 break 24003 } 24004 if mem != x0.Args[2] { 24005 break 24006 } 24007 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24008 break 24009 } 24010 b = mergePoint(b, x0, x1) 24011 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 24012 v.reset(OpCopy) 24013 v.AddArg(v0) 24014 v0.AuxInt = i0 24015 v0.Aux = s 24016 v0.AddArg(p) 24017 v0.AddArg(idx) 24018 v0.AddArg(mem) 24019 return true 24020 } 24021 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 24022 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24023 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 24024 for { 24025 _ = v.Args[1] 24026 sh := v.Args[0] 24027 if sh.Op != OpAMD64SHLQconst { 24028 break 24029 } 24030 if sh.AuxInt != 8 { 24031 break 24032 } 24033 x1 := sh.Args[0] 24034 if x1.Op != OpAMD64MOVBloadidx1 { 24035 break 24036 } 24037 i1 := x1.AuxInt 24038 s := x1.Aux 24039 _ = x1.Args[2] 24040 p := x1.Args[0] 24041 idx := x1.Args[1] 24042 mem := x1.Args[2] 24043 x0 := v.Args[1] 24044 if x0.Op != OpAMD64MOVBloadidx1 { 24045 break 24046 } 24047 i0 := x0.AuxInt 24048 if x0.Aux != s { 24049 break 24050 } 24051 _ = x0.Args[2] 24052 if idx != x0.Args[0] { 24053 break 24054 } 24055 if p != x0.Args[1] { 24056 break 24057 } 24058 if mem != x0.Args[2] { 24059 break 24060 } 24061 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24062 break 24063 } 24064 b = mergePoint(b, x0, x1) 24065 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 24066 v.reset(OpCopy) 24067 v.AddArg(v0) 24068 v0.AuxInt = i0 24069 v0.Aux = s 24070 v0.AddArg(p) 24071 v0.AddArg(idx) 24072 v0.AddArg(mem) 24073 return true 24074 } 24075 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 24076 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24077 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 24078 for { 24079 _ = v.Args[1] 24080 sh := v.Args[0] 24081 if sh.Op != OpAMD64SHLQconst { 24082 break 24083 } 24084 if sh.AuxInt != 8 { 24085 break 24086 } 24087 x1 := sh.Args[0] 24088 if x1.Op != OpAMD64MOVBloadidx1 { 24089 break 24090 } 24091 i1 := x1.AuxInt 24092 s := x1.Aux 24093 _ = x1.Args[2] 24094 idx := x1.Args[0] 24095 p := x1.Args[1] 24096 mem := x1.Args[2] 24097 x0 := v.Args[1] 24098 if x0.Op != OpAMD64MOVBloadidx1 { 24099 break 24100 } 24101 i0 := x0.AuxInt 24102 if x0.Aux != s { 24103 break 24104 } 24105 _ = x0.Args[2] 24106 if idx != x0.Args[0] { 24107 break 24108 } 24109 if p != x0.Args[1] { 24110 break 24111 } 24112 if mem != x0.Args[2] { 24113 break 24114 } 24115 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24116 break 24117 } 24118 b = mergePoint(b, x0, x1) 24119 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 24120 v.reset(OpCopy) 24121 v.AddArg(v0) 24122 v0.AuxInt = i0 24123 v0.Aux = s 24124 v0.AddArg(p) 24125 v0.AddArg(idx) 24126 v0.AddArg(mem) 24127 return true 24128 } 24129 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 24130 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24131 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24132 for { 24133 _ = v.Args[1] 24134 x0 := v.Args[0] 24135 if x0.Op != OpAMD64MOVWloadidx1 { 24136 break 24137 } 24138 i0 := x0.AuxInt 24139 s := x0.Aux 24140 _ = x0.Args[2] 24141 p := x0.Args[0] 24142 idx := x0.Args[1] 24143 mem := x0.Args[2] 24144 sh := v.Args[1] 24145 if sh.Op != OpAMD64SHLQconst { 24146 break 24147 } 24148 if sh.AuxInt != 16 { 24149 break 24150 } 24151 x1 := sh.Args[0] 24152 if x1.Op != OpAMD64MOVWloadidx1 { 24153 break 24154 } 24155 i1 := x1.AuxInt 24156 if x1.Aux != s { 24157 break 24158 } 24159 _ = x1.Args[2] 24160 if p != x1.Args[0] { 24161 break 24162 } 24163 if idx != x1.Args[1] { 24164 break 24165 } 24166 if mem != x1.Args[2] { 24167 break 24168 } 24169 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24170 break 24171 } 24172 b = mergePoint(b, x0, x1) 24173 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24174 v.reset(OpCopy) 24175 v.AddArg(v0) 24176 v0.AuxInt = i0 24177 v0.Aux = s 24178 v0.AddArg(p) 24179 v0.AddArg(idx) 24180 v0.AddArg(mem) 24181 return true 24182 } 24183 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 24184 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24185 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24186 for { 24187 _ = v.Args[1] 24188 x0 := v.Args[0] 24189 if x0.Op != OpAMD64MOVWloadidx1 { 24190 break 24191 } 24192 i0 := x0.AuxInt 24193 s := x0.Aux 24194 _ = x0.Args[2] 24195 idx := x0.Args[0] 24196 p := x0.Args[1] 24197 mem := x0.Args[2] 24198 sh := v.Args[1] 24199 if sh.Op != OpAMD64SHLQconst { 24200 break 24201 } 24202 if sh.AuxInt != 16 { 24203 break 24204 } 24205 x1 := sh.Args[0] 24206 if x1.Op != OpAMD64MOVWloadidx1 { 24207 break 24208 } 24209 i1 := x1.AuxInt 24210 if x1.Aux != s { 24211 break 24212 } 24213 _ = x1.Args[2] 24214 if p != x1.Args[0] { 24215 break 24216 } 24217 if idx != x1.Args[1] { 24218 break 24219 } 24220 if mem != x1.Args[2] { 24221 break 24222 } 24223 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24224 break 24225 } 24226 b = mergePoint(b, x0, x1) 24227 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24228 v.reset(OpCopy) 24229 v.AddArg(v0) 24230 v0.AuxInt = i0 24231 v0.Aux = s 24232 v0.AddArg(p) 24233 v0.AddArg(idx) 24234 v0.AddArg(mem) 24235 return true 24236 } 24237 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 24238 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24239 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24240 for { 24241 _ = v.Args[1] 24242 x0 := v.Args[0] 24243 if x0.Op != OpAMD64MOVWloadidx1 { 24244 break 24245 } 24246 i0 := x0.AuxInt 24247 s := x0.Aux 24248 _ = x0.Args[2] 24249 p := x0.Args[0] 24250 idx := x0.Args[1] 24251 mem := x0.Args[2] 24252 sh := v.Args[1] 24253 if sh.Op != OpAMD64SHLQconst { 24254 break 24255 } 24256 if sh.AuxInt != 16 { 24257 break 24258 } 24259 x1 := sh.Args[0] 24260 if x1.Op != OpAMD64MOVWloadidx1 { 24261 break 24262 } 24263 i1 := x1.AuxInt 24264 if x1.Aux != s { 24265 break 24266 } 24267 _ = x1.Args[2] 24268 if idx != x1.Args[0] { 24269 break 24270 } 24271 if p != x1.Args[1] { 24272 break 24273 } 24274 if mem != x1.Args[2] { 24275 break 24276 } 24277 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24278 break 24279 } 24280 b = mergePoint(b, x0, x1) 24281 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24282 v.reset(OpCopy) 24283 v.AddArg(v0) 24284 v0.AuxInt = i0 24285 v0.Aux = s 24286 v0.AddArg(p) 24287 v0.AddArg(idx) 24288 v0.AddArg(mem) 24289 return true 24290 } 24291 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 24292 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24293 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24294 for { 24295 _ = v.Args[1] 24296 x0 := v.Args[0] 24297 if x0.Op != OpAMD64MOVWloadidx1 { 24298 break 24299 } 24300 i0 := x0.AuxInt 24301 s := x0.Aux 24302 _ = x0.Args[2] 24303 idx := x0.Args[0] 24304 p := x0.Args[1] 24305 mem := x0.Args[2] 24306 sh := v.Args[1] 24307 if sh.Op != OpAMD64SHLQconst { 24308 break 24309 } 24310 if sh.AuxInt != 16 { 24311 break 24312 } 24313 x1 := sh.Args[0] 24314 if x1.Op != OpAMD64MOVWloadidx1 { 24315 break 24316 } 24317 i1 := x1.AuxInt 24318 if x1.Aux != s { 24319 break 24320 } 24321 _ = x1.Args[2] 24322 if idx != x1.Args[0] { 24323 break 24324 } 24325 if p != x1.Args[1] { 24326 break 24327 } 24328 if mem != x1.Args[2] { 24329 break 24330 } 24331 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24332 break 24333 } 24334 b = mergePoint(b, x0, x1) 24335 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24336 v.reset(OpCopy) 24337 v.AddArg(v0) 24338 v0.AuxInt = i0 24339 v0.Aux = s 24340 v0.AddArg(p) 24341 v0.AddArg(idx) 24342 v0.AddArg(mem) 24343 return true 24344 } 24345 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 24346 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24347 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24348 for { 24349 _ = v.Args[1] 24350 sh := v.Args[0] 24351 if sh.Op != OpAMD64SHLQconst { 24352 break 24353 } 24354 if sh.AuxInt != 16 { 24355 break 24356 } 24357 x1 := sh.Args[0] 24358 if x1.Op != OpAMD64MOVWloadidx1 { 24359 break 24360 } 24361 i1 := x1.AuxInt 24362 s := x1.Aux 24363 _ = x1.Args[2] 24364 p := x1.Args[0] 24365 idx := x1.Args[1] 24366 mem := x1.Args[2] 24367 x0 := v.Args[1] 24368 if x0.Op != OpAMD64MOVWloadidx1 { 24369 break 24370 } 24371 i0 := x0.AuxInt 24372 if x0.Aux != s { 24373 break 24374 } 24375 _ = x0.Args[2] 24376 if p != x0.Args[0] { 24377 break 24378 } 24379 if idx != x0.Args[1] { 24380 break 24381 } 24382 if mem != x0.Args[2] { 24383 break 24384 } 24385 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24386 break 24387 } 24388 b = mergePoint(b, x0, x1) 24389 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24390 v.reset(OpCopy) 24391 v.AddArg(v0) 24392 v0.AuxInt = i0 24393 v0.Aux = s 24394 v0.AddArg(p) 24395 v0.AddArg(idx) 24396 v0.AddArg(mem) 24397 return true 24398 } 24399 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 24400 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24401 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24402 for { 24403 _ = v.Args[1] 24404 sh := v.Args[0] 24405 if sh.Op != OpAMD64SHLQconst { 24406 break 24407 } 24408 if sh.AuxInt != 16 { 24409 break 24410 } 24411 x1 := sh.Args[0] 24412 if x1.Op != OpAMD64MOVWloadidx1 { 24413 break 24414 } 24415 i1 := x1.AuxInt 24416 s := x1.Aux 24417 _ = x1.Args[2] 24418 idx := x1.Args[0] 24419 p := x1.Args[1] 24420 mem := x1.Args[2] 24421 x0 := v.Args[1] 24422 if x0.Op != OpAMD64MOVWloadidx1 { 24423 break 24424 } 24425 i0 := x0.AuxInt 24426 if x0.Aux != s { 24427 break 24428 } 24429 _ = x0.Args[2] 24430 if p != x0.Args[0] { 24431 break 24432 } 24433 if idx != x0.Args[1] { 24434 break 24435 } 24436 if mem != x0.Args[2] { 24437 break 24438 } 24439 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24440 break 24441 } 24442 b = mergePoint(b, x0, x1) 24443 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24444 v.reset(OpCopy) 24445 v.AddArg(v0) 24446 v0.AuxInt = i0 24447 v0.Aux = s 24448 v0.AddArg(p) 24449 v0.AddArg(idx) 24450 v0.AddArg(mem) 24451 return true 24452 } 24453 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 24454 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24455 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24456 for { 24457 _ = v.Args[1] 24458 sh := v.Args[0] 24459 if sh.Op != OpAMD64SHLQconst { 24460 break 24461 } 24462 if sh.AuxInt != 16 { 24463 break 24464 } 24465 x1 := sh.Args[0] 24466 if x1.Op != OpAMD64MOVWloadidx1 { 24467 break 24468 } 24469 i1 := x1.AuxInt 24470 s := x1.Aux 24471 _ = x1.Args[2] 24472 p := x1.Args[0] 24473 idx := x1.Args[1] 24474 mem := x1.Args[2] 24475 x0 := v.Args[1] 24476 if x0.Op != OpAMD64MOVWloadidx1 { 24477 break 24478 } 24479 i0 := x0.AuxInt 24480 if x0.Aux != s { 24481 break 24482 } 24483 _ = x0.Args[2] 24484 if idx != x0.Args[0] { 24485 break 24486 } 24487 if p != x0.Args[1] { 24488 break 24489 } 24490 if mem != x0.Args[2] { 24491 break 24492 } 24493 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24494 break 24495 } 24496 b = mergePoint(b, x0, x1) 24497 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24498 v.reset(OpCopy) 24499 v.AddArg(v0) 24500 v0.AuxInt = i0 24501 v0.Aux = s 24502 v0.AddArg(p) 24503 v0.AddArg(idx) 24504 v0.AddArg(mem) 24505 return true 24506 } 24507 return false 24508 } 24509 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 24510 b := v.Block 24511 _ = b 24512 typ := &b.Func.Config.Types 24513 _ = typ 24514 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 24515 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24516 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24517 for { 24518 _ = v.Args[1] 24519 sh := v.Args[0] 24520 if sh.Op != OpAMD64SHLQconst { 24521 break 24522 } 24523 if sh.AuxInt != 16 { 24524 break 24525 } 24526 x1 := sh.Args[0] 24527 if x1.Op != OpAMD64MOVWloadidx1 { 24528 break 24529 } 24530 i1 := x1.AuxInt 24531 s := x1.Aux 24532 _ = x1.Args[2] 24533 idx := x1.Args[0] 24534 p := x1.Args[1] 24535 mem := x1.Args[2] 24536 x0 := v.Args[1] 24537 if x0.Op != OpAMD64MOVWloadidx1 { 24538 break 24539 } 24540 i0 := x0.AuxInt 24541 if x0.Aux != s { 24542 break 24543 } 24544 _ = x0.Args[2] 24545 if idx != x0.Args[0] { 24546 break 24547 } 24548 if p != x0.Args[1] { 24549 break 24550 } 24551 if mem != x0.Args[2] { 24552 break 24553 } 24554 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24555 break 24556 } 24557 b = mergePoint(b, x0, x1) 24558 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24559 v.reset(OpCopy) 24560 v.AddArg(v0) 24561 v0.AuxInt = i0 24562 v0.Aux = s 24563 v0.AddArg(p) 24564 v0.AddArg(idx) 24565 v0.AddArg(mem) 24566 return true 24567 } 24568 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 24569 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24570 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24571 for { 24572 _ = v.Args[1] 24573 x0 := v.Args[0] 24574 if x0.Op != OpAMD64MOVLloadidx1 { 24575 break 24576 } 24577 i0 := x0.AuxInt 24578 s := x0.Aux 24579 _ = x0.Args[2] 24580 p := x0.Args[0] 24581 idx := x0.Args[1] 24582 mem := x0.Args[2] 24583 sh := v.Args[1] 24584 if sh.Op != OpAMD64SHLQconst { 24585 break 24586 } 24587 if sh.AuxInt != 32 { 24588 break 24589 } 24590 x1 := sh.Args[0] 24591 if x1.Op != OpAMD64MOVLloadidx1 { 24592 break 24593 } 24594 i1 := x1.AuxInt 24595 if x1.Aux != s { 24596 break 24597 } 24598 _ = x1.Args[2] 24599 if p != x1.Args[0] { 24600 break 24601 } 24602 if idx != x1.Args[1] { 24603 break 24604 } 24605 if mem != x1.Args[2] { 24606 break 24607 } 24608 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24609 break 24610 } 24611 b = mergePoint(b, x0, x1) 24612 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24613 v.reset(OpCopy) 24614 v.AddArg(v0) 24615 v0.AuxInt = i0 24616 v0.Aux = s 24617 v0.AddArg(p) 24618 v0.AddArg(idx) 24619 v0.AddArg(mem) 24620 return true 24621 } 24622 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 24623 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24624 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24625 for { 24626 _ = v.Args[1] 24627 x0 := v.Args[0] 24628 if x0.Op != OpAMD64MOVLloadidx1 { 24629 break 24630 } 24631 i0 := x0.AuxInt 24632 s := x0.Aux 24633 _ = x0.Args[2] 24634 idx := x0.Args[0] 24635 p := x0.Args[1] 24636 mem := x0.Args[2] 24637 sh := v.Args[1] 24638 if sh.Op != OpAMD64SHLQconst { 24639 break 24640 } 24641 if sh.AuxInt != 32 { 24642 break 24643 } 24644 x1 := sh.Args[0] 24645 if x1.Op != OpAMD64MOVLloadidx1 { 24646 break 24647 } 24648 i1 := x1.AuxInt 24649 if x1.Aux != s { 24650 break 24651 } 24652 _ = x1.Args[2] 24653 if p != x1.Args[0] { 24654 break 24655 } 24656 if idx != x1.Args[1] { 24657 break 24658 } 24659 if mem != x1.Args[2] { 24660 break 24661 } 24662 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24663 break 24664 } 24665 b = mergePoint(b, x0, x1) 24666 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24667 v.reset(OpCopy) 24668 v.AddArg(v0) 24669 v0.AuxInt = i0 24670 v0.Aux = s 24671 v0.AddArg(p) 24672 v0.AddArg(idx) 24673 v0.AddArg(mem) 24674 return true 24675 } 24676 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 24677 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24678 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24679 for { 24680 _ = v.Args[1] 24681 x0 := v.Args[0] 24682 if x0.Op != OpAMD64MOVLloadidx1 { 24683 break 24684 } 24685 i0 := x0.AuxInt 24686 s := x0.Aux 24687 _ = x0.Args[2] 24688 p := x0.Args[0] 24689 idx := x0.Args[1] 24690 mem := x0.Args[2] 24691 sh := v.Args[1] 24692 if sh.Op != OpAMD64SHLQconst { 24693 break 24694 } 24695 if sh.AuxInt != 32 { 24696 break 24697 } 24698 x1 := sh.Args[0] 24699 if x1.Op != OpAMD64MOVLloadidx1 { 24700 break 24701 } 24702 i1 := x1.AuxInt 24703 if x1.Aux != s { 24704 break 24705 } 24706 _ = x1.Args[2] 24707 if idx != x1.Args[0] { 24708 break 24709 } 24710 if p != x1.Args[1] { 24711 break 24712 } 24713 if mem != x1.Args[2] { 24714 break 24715 } 24716 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24717 break 24718 } 24719 b = mergePoint(b, x0, x1) 24720 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24721 v.reset(OpCopy) 24722 v.AddArg(v0) 24723 v0.AuxInt = i0 24724 v0.Aux = s 24725 v0.AddArg(p) 24726 v0.AddArg(idx) 24727 v0.AddArg(mem) 24728 return true 24729 } 24730 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 24731 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24732 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24733 for { 24734 _ = v.Args[1] 24735 x0 := v.Args[0] 24736 if x0.Op != OpAMD64MOVLloadidx1 { 24737 break 24738 } 24739 i0 := x0.AuxInt 24740 s := x0.Aux 24741 _ = x0.Args[2] 24742 idx := x0.Args[0] 24743 p := x0.Args[1] 24744 mem := x0.Args[2] 24745 sh := v.Args[1] 24746 if sh.Op != OpAMD64SHLQconst { 24747 break 24748 } 24749 if sh.AuxInt != 32 { 24750 break 24751 } 24752 x1 := sh.Args[0] 24753 if x1.Op != OpAMD64MOVLloadidx1 { 24754 break 24755 } 24756 i1 := x1.AuxInt 24757 if x1.Aux != s { 24758 break 24759 } 24760 _ = x1.Args[2] 24761 if idx != x1.Args[0] { 24762 break 24763 } 24764 if p != x1.Args[1] { 24765 break 24766 } 24767 if mem != x1.Args[2] { 24768 break 24769 } 24770 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24771 break 24772 } 24773 b = mergePoint(b, x0, x1) 24774 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24775 v.reset(OpCopy) 24776 v.AddArg(v0) 24777 v0.AuxInt = i0 24778 v0.Aux = s 24779 v0.AddArg(p) 24780 v0.AddArg(idx) 24781 v0.AddArg(mem) 24782 return true 24783 } 24784 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 24785 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24786 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24787 for { 24788 _ = v.Args[1] 24789 sh := v.Args[0] 24790 if sh.Op != OpAMD64SHLQconst { 24791 break 24792 } 24793 if sh.AuxInt != 32 { 24794 break 24795 } 24796 x1 := sh.Args[0] 24797 if x1.Op != OpAMD64MOVLloadidx1 { 24798 break 24799 } 24800 i1 := x1.AuxInt 24801 s := x1.Aux 24802 _ = x1.Args[2] 24803 p := x1.Args[0] 24804 idx := x1.Args[1] 24805 mem := x1.Args[2] 24806 x0 := v.Args[1] 24807 if x0.Op != OpAMD64MOVLloadidx1 { 24808 break 24809 } 24810 i0 := x0.AuxInt 24811 if x0.Aux != s { 24812 break 24813 } 24814 _ = x0.Args[2] 24815 if p != x0.Args[0] { 24816 break 24817 } 24818 if idx != x0.Args[1] { 24819 break 24820 } 24821 if mem != x0.Args[2] { 24822 break 24823 } 24824 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24825 break 24826 } 24827 b = mergePoint(b, x0, x1) 24828 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24829 v.reset(OpCopy) 24830 v.AddArg(v0) 24831 v0.AuxInt = i0 24832 v0.Aux = s 24833 v0.AddArg(p) 24834 v0.AddArg(idx) 24835 v0.AddArg(mem) 24836 return true 24837 } 24838 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 24839 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24840 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24841 for { 24842 _ = v.Args[1] 24843 sh := v.Args[0] 24844 if sh.Op != OpAMD64SHLQconst { 24845 break 24846 } 24847 if sh.AuxInt != 32 { 24848 break 24849 } 24850 x1 := sh.Args[0] 24851 if x1.Op != OpAMD64MOVLloadidx1 { 24852 break 24853 } 24854 i1 := x1.AuxInt 24855 s := x1.Aux 24856 _ = x1.Args[2] 24857 idx := x1.Args[0] 24858 p := x1.Args[1] 24859 mem := x1.Args[2] 24860 x0 := v.Args[1] 24861 if x0.Op != OpAMD64MOVLloadidx1 { 24862 break 24863 } 24864 i0 := x0.AuxInt 24865 if x0.Aux != s { 24866 break 24867 } 24868 _ = x0.Args[2] 24869 if p != x0.Args[0] { 24870 break 24871 } 24872 if idx != x0.Args[1] { 24873 break 24874 } 24875 if mem != x0.Args[2] { 24876 break 24877 } 24878 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24879 break 24880 } 24881 b = mergePoint(b, x0, x1) 24882 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24883 v.reset(OpCopy) 24884 v.AddArg(v0) 24885 v0.AuxInt = i0 24886 v0.Aux = s 24887 v0.AddArg(p) 24888 v0.AddArg(idx) 24889 v0.AddArg(mem) 24890 return true 24891 } 24892 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 24893 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24894 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24895 for { 24896 _ = v.Args[1] 24897 sh := v.Args[0] 24898 if sh.Op != OpAMD64SHLQconst { 24899 break 24900 } 24901 if sh.AuxInt != 32 { 24902 break 24903 } 24904 x1 := sh.Args[0] 24905 if x1.Op != OpAMD64MOVLloadidx1 { 24906 break 24907 } 24908 i1 := x1.AuxInt 24909 s := x1.Aux 24910 _ = x1.Args[2] 24911 p := x1.Args[0] 24912 idx := x1.Args[1] 24913 mem := x1.Args[2] 24914 x0 := v.Args[1] 24915 if x0.Op != OpAMD64MOVLloadidx1 { 24916 break 24917 } 24918 i0 := x0.AuxInt 24919 if x0.Aux != s { 24920 break 24921 } 24922 _ = x0.Args[2] 24923 if idx != x0.Args[0] { 24924 break 24925 } 24926 if p != x0.Args[1] { 24927 break 24928 } 24929 if mem != x0.Args[2] { 24930 break 24931 } 24932 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24933 break 24934 } 24935 b = mergePoint(b, x0, x1) 24936 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24937 v.reset(OpCopy) 24938 v.AddArg(v0) 24939 v0.AuxInt = i0 24940 v0.Aux = s 24941 v0.AddArg(p) 24942 v0.AddArg(idx) 24943 v0.AddArg(mem) 24944 return true 24945 } 24946 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 24947 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24948 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24949 for { 24950 _ = v.Args[1] 24951 sh := v.Args[0] 24952 if sh.Op != OpAMD64SHLQconst { 24953 break 24954 } 24955 if sh.AuxInt != 32 { 24956 break 24957 } 24958 x1 := sh.Args[0] 24959 if x1.Op != OpAMD64MOVLloadidx1 { 24960 break 24961 } 24962 i1 := x1.AuxInt 24963 s := x1.Aux 24964 _ = x1.Args[2] 24965 idx := x1.Args[0] 24966 p := x1.Args[1] 24967 mem := x1.Args[2] 24968 x0 := v.Args[1] 24969 if x0.Op != OpAMD64MOVLloadidx1 { 24970 break 24971 } 24972 i0 := x0.AuxInt 24973 if x0.Aux != s { 24974 break 24975 } 24976 _ = x0.Args[2] 24977 if idx != x0.Args[0] { 24978 break 24979 } 24980 if p != x0.Args[1] { 24981 break 24982 } 24983 if mem != x0.Args[2] { 24984 break 24985 } 24986 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24987 break 24988 } 24989 b = mergePoint(b, x0, x1) 24990 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24991 v.reset(OpCopy) 24992 v.AddArg(v0) 24993 v0.AuxInt = i0 24994 v0.Aux = s 24995 v0.AddArg(p) 24996 v0.AddArg(idx) 24997 v0.AddArg(mem) 24998 return true 24999 } 25000 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 25001 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25002 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25003 for { 25004 _ = v.Args[1] 25005 s1 := v.Args[0] 25006 if s1.Op != OpAMD64SHLQconst { 25007 break 25008 } 25009 j1 := s1.AuxInt 25010 x1 := s1.Args[0] 25011 if x1.Op != OpAMD64MOVBloadidx1 { 25012 break 25013 } 25014 i1 := x1.AuxInt 25015 s := x1.Aux 25016 _ = x1.Args[2] 25017 p := x1.Args[0] 25018 idx := x1.Args[1] 25019 mem := x1.Args[2] 25020 or := v.Args[1] 25021 if or.Op != OpAMD64ORQ { 25022 break 25023 } 25024 _ = or.Args[1] 25025 s0 := or.Args[0] 25026 if s0.Op != OpAMD64SHLQconst { 25027 break 25028 } 25029 j0 := s0.AuxInt 25030 x0 := s0.Args[0] 25031 if x0.Op != OpAMD64MOVBloadidx1 { 25032 break 25033 } 25034 i0 := x0.AuxInt 25035 if x0.Aux != s { 25036 break 25037 } 25038 _ = x0.Args[2] 25039 if p != x0.Args[0] { 25040 break 25041 } 25042 if idx != x0.Args[1] { 25043 break 25044 } 25045 if mem != x0.Args[2] { 25046 break 25047 } 25048 y := or.Args[1] 25049 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25050 break 25051 } 25052 b = mergePoint(b, x0, x1) 25053 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25054 v.reset(OpCopy) 25055 v.AddArg(v0) 25056 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25057 v1.AuxInt = j0 25058 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25059 v2.AuxInt = i0 25060 v2.Aux = s 25061 v2.AddArg(p) 25062 v2.AddArg(idx) 25063 v2.AddArg(mem) 25064 v1.AddArg(v2) 25065 v0.AddArg(v1) 25066 v0.AddArg(y) 25067 return true 25068 } 25069 return false 25070 } 25071 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 25072 b := v.Block 25073 _ = b 25074 typ := &b.Func.Config.Types 25075 _ = typ 25076 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 25077 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25078 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25079 for { 25080 _ = v.Args[1] 25081 s1 := v.Args[0] 25082 if s1.Op != OpAMD64SHLQconst { 25083 break 25084 } 25085 j1 := s1.AuxInt 25086 x1 := s1.Args[0] 25087 if x1.Op != OpAMD64MOVBloadidx1 { 25088 break 25089 } 25090 i1 := x1.AuxInt 25091 s := x1.Aux 25092 _ = x1.Args[2] 25093 idx := x1.Args[0] 25094 p := x1.Args[1] 25095 mem := x1.Args[2] 25096 or := v.Args[1] 25097 if or.Op != OpAMD64ORQ { 25098 break 25099 } 25100 _ = or.Args[1] 25101 s0 := or.Args[0] 25102 if s0.Op != OpAMD64SHLQconst { 25103 break 25104 } 25105 j0 := s0.AuxInt 25106 x0 := s0.Args[0] 25107 if x0.Op != OpAMD64MOVBloadidx1 { 25108 break 25109 } 25110 i0 := x0.AuxInt 25111 if x0.Aux != s { 25112 break 25113 } 25114 _ = x0.Args[2] 25115 if p != x0.Args[0] { 25116 break 25117 } 25118 if idx != x0.Args[1] { 25119 break 25120 } 25121 if mem != x0.Args[2] { 25122 break 25123 } 25124 y := or.Args[1] 25125 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25126 break 25127 } 25128 b = mergePoint(b, x0, x1) 25129 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25130 v.reset(OpCopy) 25131 v.AddArg(v0) 25132 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25133 v1.AuxInt = j0 25134 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25135 v2.AuxInt = i0 25136 v2.Aux = s 25137 v2.AddArg(p) 25138 v2.AddArg(idx) 25139 v2.AddArg(mem) 25140 v1.AddArg(v2) 25141 v0.AddArg(v1) 25142 v0.AddArg(y) 25143 return true 25144 } 25145 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 25146 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25147 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25148 for { 25149 _ = v.Args[1] 25150 s1 := v.Args[0] 25151 if s1.Op != OpAMD64SHLQconst { 25152 break 25153 } 25154 j1 := s1.AuxInt 25155 x1 := s1.Args[0] 25156 if x1.Op != OpAMD64MOVBloadidx1 { 25157 break 25158 } 25159 i1 := x1.AuxInt 25160 s := x1.Aux 25161 _ = x1.Args[2] 25162 p := x1.Args[0] 25163 idx := x1.Args[1] 25164 mem := x1.Args[2] 25165 or := v.Args[1] 25166 if or.Op != OpAMD64ORQ { 25167 break 25168 } 25169 _ = or.Args[1] 25170 s0 := or.Args[0] 25171 if s0.Op != OpAMD64SHLQconst { 25172 break 25173 } 25174 j0 := s0.AuxInt 25175 x0 := s0.Args[0] 25176 if x0.Op != OpAMD64MOVBloadidx1 { 25177 break 25178 } 25179 i0 := x0.AuxInt 25180 if x0.Aux != s { 25181 break 25182 } 25183 _ = x0.Args[2] 25184 if idx != x0.Args[0] { 25185 break 25186 } 25187 if p != x0.Args[1] { 25188 break 25189 } 25190 if mem != x0.Args[2] { 25191 break 25192 } 25193 y := or.Args[1] 25194 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25195 break 25196 } 25197 b = mergePoint(b, x0, x1) 25198 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25199 v.reset(OpCopy) 25200 v.AddArg(v0) 25201 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25202 v1.AuxInt = j0 25203 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25204 v2.AuxInt = i0 25205 v2.Aux = s 25206 v2.AddArg(p) 25207 v2.AddArg(idx) 25208 v2.AddArg(mem) 25209 v1.AddArg(v2) 25210 v0.AddArg(v1) 25211 v0.AddArg(y) 25212 return true 25213 } 25214 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 25215 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25216 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25217 for { 25218 _ = v.Args[1] 25219 s1 := v.Args[0] 25220 if s1.Op != OpAMD64SHLQconst { 25221 break 25222 } 25223 j1 := s1.AuxInt 25224 x1 := s1.Args[0] 25225 if x1.Op != OpAMD64MOVBloadidx1 { 25226 break 25227 } 25228 i1 := x1.AuxInt 25229 s := x1.Aux 25230 _ = x1.Args[2] 25231 idx := x1.Args[0] 25232 p := x1.Args[1] 25233 mem := x1.Args[2] 25234 or := v.Args[1] 25235 if or.Op != OpAMD64ORQ { 25236 break 25237 } 25238 _ = or.Args[1] 25239 s0 := or.Args[0] 25240 if s0.Op != OpAMD64SHLQconst { 25241 break 25242 } 25243 j0 := s0.AuxInt 25244 x0 := s0.Args[0] 25245 if x0.Op != OpAMD64MOVBloadidx1 { 25246 break 25247 } 25248 i0 := x0.AuxInt 25249 if x0.Aux != s { 25250 break 25251 } 25252 _ = x0.Args[2] 25253 if idx != x0.Args[0] { 25254 break 25255 } 25256 if p != x0.Args[1] { 25257 break 25258 } 25259 if mem != x0.Args[2] { 25260 break 25261 } 25262 y := or.Args[1] 25263 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25264 break 25265 } 25266 b = mergePoint(b, x0, x1) 25267 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25268 v.reset(OpCopy) 25269 v.AddArg(v0) 25270 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25271 v1.AuxInt = j0 25272 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25273 v2.AuxInt = i0 25274 v2.Aux = s 25275 v2.AddArg(p) 25276 v2.AddArg(idx) 25277 v2.AddArg(mem) 25278 v1.AddArg(v2) 25279 v0.AddArg(v1) 25280 v0.AddArg(y) 25281 return true 25282 } 25283 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 25284 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25285 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25286 for { 25287 _ = v.Args[1] 25288 s1 := v.Args[0] 25289 if s1.Op != OpAMD64SHLQconst { 25290 break 25291 } 25292 j1 := s1.AuxInt 25293 x1 := s1.Args[0] 25294 if x1.Op != OpAMD64MOVBloadidx1 { 25295 break 25296 } 25297 i1 := x1.AuxInt 25298 s := x1.Aux 25299 _ = x1.Args[2] 25300 p := x1.Args[0] 25301 idx := x1.Args[1] 25302 mem := x1.Args[2] 25303 or := v.Args[1] 25304 if or.Op != OpAMD64ORQ { 25305 break 25306 } 25307 _ = or.Args[1] 25308 y := or.Args[0] 25309 s0 := or.Args[1] 25310 if s0.Op != OpAMD64SHLQconst { 25311 break 25312 } 25313 j0 := s0.AuxInt 25314 x0 := s0.Args[0] 25315 if x0.Op != OpAMD64MOVBloadidx1 { 25316 break 25317 } 25318 i0 := x0.AuxInt 25319 if x0.Aux != s { 25320 break 25321 } 25322 _ = x0.Args[2] 25323 if p != x0.Args[0] { 25324 break 25325 } 25326 if idx != x0.Args[1] { 25327 break 25328 } 25329 if mem != x0.Args[2] { 25330 break 25331 } 25332 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25333 break 25334 } 25335 b = mergePoint(b, x0, x1) 25336 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25337 v.reset(OpCopy) 25338 v.AddArg(v0) 25339 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25340 v1.AuxInt = j0 25341 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25342 v2.AuxInt = i0 25343 v2.Aux = s 25344 v2.AddArg(p) 25345 v2.AddArg(idx) 25346 v2.AddArg(mem) 25347 v1.AddArg(v2) 25348 v0.AddArg(v1) 25349 v0.AddArg(y) 25350 return true 25351 } 25352 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 25353 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25354 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25355 for { 25356 _ = v.Args[1] 25357 s1 := v.Args[0] 25358 if s1.Op != OpAMD64SHLQconst { 25359 break 25360 } 25361 j1 := s1.AuxInt 25362 x1 := s1.Args[0] 25363 if x1.Op != OpAMD64MOVBloadidx1 { 25364 break 25365 } 25366 i1 := x1.AuxInt 25367 s := x1.Aux 25368 _ = x1.Args[2] 25369 idx := x1.Args[0] 25370 p := x1.Args[1] 25371 mem := x1.Args[2] 25372 or := v.Args[1] 25373 if or.Op != OpAMD64ORQ { 25374 break 25375 } 25376 _ = or.Args[1] 25377 y := or.Args[0] 25378 s0 := or.Args[1] 25379 if s0.Op != OpAMD64SHLQconst { 25380 break 25381 } 25382 j0 := s0.AuxInt 25383 x0 := s0.Args[0] 25384 if x0.Op != OpAMD64MOVBloadidx1 { 25385 break 25386 } 25387 i0 := x0.AuxInt 25388 if x0.Aux != s { 25389 break 25390 } 25391 _ = x0.Args[2] 25392 if p != x0.Args[0] { 25393 break 25394 } 25395 if idx != x0.Args[1] { 25396 break 25397 } 25398 if mem != x0.Args[2] { 25399 break 25400 } 25401 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25402 break 25403 } 25404 b = mergePoint(b, x0, x1) 25405 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25406 v.reset(OpCopy) 25407 v.AddArg(v0) 25408 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25409 v1.AuxInt = j0 25410 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25411 v2.AuxInt = i0 25412 v2.Aux = s 25413 v2.AddArg(p) 25414 v2.AddArg(idx) 25415 v2.AddArg(mem) 25416 v1.AddArg(v2) 25417 v0.AddArg(v1) 25418 v0.AddArg(y) 25419 return true 25420 } 25421 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 25422 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25423 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25424 for { 25425 _ = v.Args[1] 25426 s1 := v.Args[0] 25427 if s1.Op != OpAMD64SHLQconst { 25428 break 25429 } 25430 j1 := s1.AuxInt 25431 x1 := s1.Args[0] 25432 if x1.Op != OpAMD64MOVBloadidx1 { 25433 break 25434 } 25435 i1 := x1.AuxInt 25436 s := x1.Aux 25437 _ = x1.Args[2] 25438 p := x1.Args[0] 25439 idx := x1.Args[1] 25440 mem := x1.Args[2] 25441 or := v.Args[1] 25442 if or.Op != OpAMD64ORQ { 25443 break 25444 } 25445 _ = or.Args[1] 25446 y := or.Args[0] 25447 s0 := or.Args[1] 25448 if s0.Op != OpAMD64SHLQconst { 25449 break 25450 } 25451 j0 := s0.AuxInt 25452 x0 := s0.Args[0] 25453 if x0.Op != OpAMD64MOVBloadidx1 { 25454 break 25455 } 25456 i0 := x0.AuxInt 25457 if x0.Aux != s { 25458 break 25459 } 25460 _ = x0.Args[2] 25461 if idx != x0.Args[0] { 25462 break 25463 } 25464 if p != x0.Args[1] { 25465 break 25466 } 25467 if mem != x0.Args[2] { 25468 break 25469 } 25470 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25471 break 25472 } 25473 b = mergePoint(b, x0, x1) 25474 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25475 v.reset(OpCopy) 25476 v.AddArg(v0) 25477 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25478 v1.AuxInt = j0 25479 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25480 v2.AuxInt = i0 25481 v2.Aux = s 25482 v2.AddArg(p) 25483 v2.AddArg(idx) 25484 v2.AddArg(mem) 25485 v1.AddArg(v2) 25486 v0.AddArg(v1) 25487 v0.AddArg(y) 25488 return true 25489 } 25490 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 25491 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25492 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25493 for { 25494 _ = v.Args[1] 25495 s1 := v.Args[0] 25496 if s1.Op != OpAMD64SHLQconst { 25497 break 25498 } 25499 j1 := s1.AuxInt 25500 x1 := s1.Args[0] 25501 if x1.Op != OpAMD64MOVBloadidx1 { 25502 break 25503 } 25504 i1 := x1.AuxInt 25505 s := x1.Aux 25506 _ = x1.Args[2] 25507 idx := x1.Args[0] 25508 p := x1.Args[1] 25509 mem := x1.Args[2] 25510 or := v.Args[1] 25511 if or.Op != OpAMD64ORQ { 25512 break 25513 } 25514 _ = or.Args[1] 25515 y := or.Args[0] 25516 s0 := or.Args[1] 25517 if s0.Op != OpAMD64SHLQconst { 25518 break 25519 } 25520 j0 := s0.AuxInt 25521 x0 := s0.Args[0] 25522 if x0.Op != OpAMD64MOVBloadidx1 { 25523 break 25524 } 25525 i0 := x0.AuxInt 25526 if x0.Aux != s { 25527 break 25528 } 25529 _ = x0.Args[2] 25530 if idx != x0.Args[0] { 25531 break 25532 } 25533 if p != x0.Args[1] { 25534 break 25535 } 25536 if mem != x0.Args[2] { 25537 break 25538 } 25539 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25540 break 25541 } 25542 b = mergePoint(b, x0, x1) 25543 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25544 v.reset(OpCopy) 25545 v.AddArg(v0) 25546 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25547 v1.AuxInt = j0 25548 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25549 v2.AuxInt = i0 25550 v2.Aux = s 25551 v2.AddArg(p) 25552 v2.AddArg(idx) 25553 v2.AddArg(mem) 25554 v1.AddArg(v2) 25555 v0.AddArg(v1) 25556 v0.AddArg(y) 25557 return true 25558 } 25559 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25560 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25561 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25562 for { 25563 _ = v.Args[1] 25564 or := v.Args[0] 25565 if or.Op != OpAMD64ORQ { 25566 break 25567 } 25568 _ = or.Args[1] 25569 s0 := or.Args[0] 25570 if s0.Op != OpAMD64SHLQconst { 25571 break 25572 } 25573 j0 := s0.AuxInt 25574 x0 := s0.Args[0] 25575 if x0.Op != OpAMD64MOVBloadidx1 { 25576 break 25577 } 25578 i0 := x0.AuxInt 25579 s := x0.Aux 25580 _ = x0.Args[2] 25581 p := x0.Args[0] 25582 idx := x0.Args[1] 25583 mem := x0.Args[2] 25584 y := or.Args[1] 25585 s1 := v.Args[1] 25586 if s1.Op != OpAMD64SHLQconst { 25587 break 25588 } 25589 j1 := s1.AuxInt 25590 x1 := s1.Args[0] 25591 if x1.Op != OpAMD64MOVBloadidx1 { 25592 break 25593 } 25594 i1 := x1.AuxInt 25595 if x1.Aux != s { 25596 break 25597 } 25598 _ = x1.Args[2] 25599 if p != x1.Args[0] { 25600 break 25601 } 25602 if idx != x1.Args[1] { 25603 break 25604 } 25605 if mem != x1.Args[2] { 25606 break 25607 } 25608 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25609 break 25610 } 25611 b = mergePoint(b, x0, x1) 25612 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25613 v.reset(OpCopy) 25614 v.AddArg(v0) 25615 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25616 v1.AuxInt = j0 25617 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25618 v2.AuxInt = i0 25619 v2.Aux = s 25620 v2.AddArg(p) 25621 v2.AddArg(idx) 25622 v2.AddArg(mem) 25623 v1.AddArg(v2) 25624 v0.AddArg(v1) 25625 v0.AddArg(y) 25626 return true 25627 } 25628 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25629 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25630 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25631 for { 25632 _ = v.Args[1] 25633 or := v.Args[0] 25634 if or.Op != OpAMD64ORQ { 25635 break 25636 } 25637 _ = or.Args[1] 25638 s0 := or.Args[0] 25639 if s0.Op != OpAMD64SHLQconst { 25640 break 25641 } 25642 j0 := s0.AuxInt 25643 x0 := s0.Args[0] 25644 if x0.Op != OpAMD64MOVBloadidx1 { 25645 break 25646 } 25647 i0 := x0.AuxInt 25648 s := x0.Aux 25649 _ = x0.Args[2] 25650 idx := x0.Args[0] 25651 p := x0.Args[1] 25652 mem := x0.Args[2] 25653 y := or.Args[1] 25654 s1 := v.Args[1] 25655 if s1.Op != OpAMD64SHLQconst { 25656 break 25657 } 25658 j1 := s1.AuxInt 25659 x1 := s1.Args[0] 25660 if x1.Op != OpAMD64MOVBloadidx1 { 25661 break 25662 } 25663 i1 := x1.AuxInt 25664 if x1.Aux != s { 25665 break 25666 } 25667 _ = x1.Args[2] 25668 if p != x1.Args[0] { 25669 break 25670 } 25671 if idx != x1.Args[1] { 25672 break 25673 } 25674 if mem != x1.Args[2] { 25675 break 25676 } 25677 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25678 break 25679 } 25680 b = mergePoint(b, x0, x1) 25681 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25682 v.reset(OpCopy) 25683 v.AddArg(v0) 25684 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25685 v1.AuxInt = j0 25686 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25687 v2.AuxInt = i0 25688 v2.Aux = s 25689 v2.AddArg(p) 25690 v2.AddArg(idx) 25691 v2.AddArg(mem) 25692 v1.AddArg(v2) 25693 v0.AddArg(v1) 25694 v0.AddArg(y) 25695 return true 25696 } 25697 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25698 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25699 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25700 for { 25701 _ = v.Args[1] 25702 or := v.Args[0] 25703 if or.Op != OpAMD64ORQ { 25704 break 25705 } 25706 _ = or.Args[1] 25707 y := or.Args[0] 25708 s0 := or.Args[1] 25709 if s0.Op != OpAMD64SHLQconst { 25710 break 25711 } 25712 j0 := s0.AuxInt 25713 x0 := s0.Args[0] 25714 if x0.Op != OpAMD64MOVBloadidx1 { 25715 break 25716 } 25717 i0 := x0.AuxInt 25718 s := x0.Aux 25719 _ = x0.Args[2] 25720 p := x0.Args[0] 25721 idx := x0.Args[1] 25722 mem := x0.Args[2] 25723 s1 := v.Args[1] 25724 if s1.Op != OpAMD64SHLQconst { 25725 break 25726 } 25727 j1 := s1.AuxInt 25728 x1 := s1.Args[0] 25729 if x1.Op != OpAMD64MOVBloadidx1 { 25730 break 25731 } 25732 i1 := x1.AuxInt 25733 if x1.Aux != s { 25734 break 25735 } 25736 _ = x1.Args[2] 25737 if p != x1.Args[0] { 25738 break 25739 } 25740 if idx != x1.Args[1] { 25741 break 25742 } 25743 if mem != x1.Args[2] { 25744 break 25745 } 25746 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25747 break 25748 } 25749 b = mergePoint(b, x0, x1) 25750 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25751 v.reset(OpCopy) 25752 v.AddArg(v0) 25753 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25754 v1.AuxInt = j0 25755 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25756 v2.AuxInt = i0 25757 v2.Aux = s 25758 v2.AddArg(p) 25759 v2.AddArg(idx) 25760 v2.AddArg(mem) 25761 v1.AddArg(v2) 25762 v0.AddArg(v1) 25763 v0.AddArg(y) 25764 return true 25765 } 25766 return false 25767 } 25768 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 25769 b := v.Block 25770 _ = b 25771 typ := &b.Func.Config.Types 25772 _ = typ 25773 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25774 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25775 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25776 for { 25777 _ = v.Args[1] 25778 or := v.Args[0] 25779 if or.Op != OpAMD64ORQ { 25780 break 25781 } 25782 _ = or.Args[1] 25783 y := or.Args[0] 25784 s0 := or.Args[1] 25785 if s0.Op != OpAMD64SHLQconst { 25786 break 25787 } 25788 j0 := s0.AuxInt 25789 x0 := s0.Args[0] 25790 if x0.Op != OpAMD64MOVBloadidx1 { 25791 break 25792 } 25793 i0 := x0.AuxInt 25794 s := x0.Aux 25795 _ = x0.Args[2] 25796 idx := x0.Args[0] 25797 p := x0.Args[1] 25798 mem := x0.Args[2] 25799 s1 := v.Args[1] 25800 if s1.Op != OpAMD64SHLQconst { 25801 break 25802 } 25803 j1 := s1.AuxInt 25804 x1 := s1.Args[0] 25805 if x1.Op != OpAMD64MOVBloadidx1 { 25806 break 25807 } 25808 i1 := x1.AuxInt 25809 if x1.Aux != s { 25810 break 25811 } 25812 _ = x1.Args[2] 25813 if p != x1.Args[0] { 25814 break 25815 } 25816 if idx != x1.Args[1] { 25817 break 25818 } 25819 if mem != x1.Args[2] { 25820 break 25821 } 25822 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25823 break 25824 } 25825 b = mergePoint(b, x0, x1) 25826 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25827 v.reset(OpCopy) 25828 v.AddArg(v0) 25829 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25830 v1.AuxInt = j0 25831 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25832 v2.AuxInt = i0 25833 v2.Aux = s 25834 v2.AddArg(p) 25835 v2.AddArg(idx) 25836 v2.AddArg(mem) 25837 v1.AddArg(v2) 25838 v0.AddArg(v1) 25839 v0.AddArg(y) 25840 return true 25841 } 25842 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25843 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25844 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25845 for { 25846 _ = v.Args[1] 25847 or := v.Args[0] 25848 if or.Op != OpAMD64ORQ { 25849 break 25850 } 25851 _ = or.Args[1] 25852 s0 := or.Args[0] 25853 if s0.Op != OpAMD64SHLQconst { 25854 break 25855 } 25856 j0 := s0.AuxInt 25857 x0 := s0.Args[0] 25858 if x0.Op != OpAMD64MOVBloadidx1 { 25859 break 25860 } 25861 i0 := x0.AuxInt 25862 s := x0.Aux 25863 _ = x0.Args[2] 25864 p := x0.Args[0] 25865 idx := x0.Args[1] 25866 mem := x0.Args[2] 25867 y := or.Args[1] 25868 s1 := v.Args[1] 25869 if s1.Op != OpAMD64SHLQconst { 25870 break 25871 } 25872 j1 := s1.AuxInt 25873 x1 := s1.Args[0] 25874 if x1.Op != OpAMD64MOVBloadidx1 { 25875 break 25876 } 25877 i1 := x1.AuxInt 25878 if x1.Aux != s { 25879 break 25880 } 25881 _ = x1.Args[2] 25882 if idx != x1.Args[0] { 25883 break 25884 } 25885 if p != x1.Args[1] { 25886 break 25887 } 25888 if mem != x1.Args[2] { 25889 break 25890 } 25891 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25892 break 25893 } 25894 b = mergePoint(b, x0, x1) 25895 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25896 v.reset(OpCopy) 25897 v.AddArg(v0) 25898 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25899 v1.AuxInt = j0 25900 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25901 v2.AuxInt = i0 25902 v2.Aux = s 25903 v2.AddArg(p) 25904 v2.AddArg(idx) 25905 v2.AddArg(mem) 25906 v1.AddArg(v2) 25907 v0.AddArg(v1) 25908 v0.AddArg(y) 25909 return true 25910 } 25911 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25912 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25913 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25914 for { 25915 _ = v.Args[1] 25916 or := v.Args[0] 25917 if or.Op != OpAMD64ORQ { 25918 break 25919 } 25920 _ = or.Args[1] 25921 s0 := or.Args[0] 25922 if s0.Op != OpAMD64SHLQconst { 25923 break 25924 } 25925 j0 := s0.AuxInt 25926 x0 := s0.Args[0] 25927 if x0.Op != OpAMD64MOVBloadidx1 { 25928 break 25929 } 25930 i0 := x0.AuxInt 25931 s := x0.Aux 25932 _ = x0.Args[2] 25933 idx := x0.Args[0] 25934 p := x0.Args[1] 25935 mem := x0.Args[2] 25936 y := or.Args[1] 25937 s1 := v.Args[1] 25938 if s1.Op != OpAMD64SHLQconst { 25939 break 25940 } 25941 j1 := s1.AuxInt 25942 x1 := s1.Args[0] 25943 if x1.Op != OpAMD64MOVBloadidx1 { 25944 break 25945 } 25946 i1 := x1.AuxInt 25947 if x1.Aux != s { 25948 break 25949 } 25950 _ = x1.Args[2] 25951 if idx != x1.Args[0] { 25952 break 25953 } 25954 if p != x1.Args[1] { 25955 break 25956 } 25957 if mem != x1.Args[2] { 25958 break 25959 } 25960 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25961 break 25962 } 25963 b = mergePoint(b, x0, x1) 25964 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25965 v.reset(OpCopy) 25966 v.AddArg(v0) 25967 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25968 v1.AuxInt = j0 25969 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25970 v2.AuxInt = i0 25971 v2.Aux = s 25972 v2.AddArg(p) 25973 v2.AddArg(idx) 25974 v2.AddArg(mem) 25975 v1.AddArg(v2) 25976 v0.AddArg(v1) 25977 v0.AddArg(y) 25978 return true 25979 } 25980 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25981 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25982 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25983 for { 25984 _ = v.Args[1] 25985 or := v.Args[0] 25986 if or.Op != OpAMD64ORQ { 25987 break 25988 } 25989 _ = or.Args[1] 25990 y := or.Args[0] 25991 s0 := or.Args[1] 25992 if s0.Op != OpAMD64SHLQconst { 25993 break 25994 } 25995 j0 := s0.AuxInt 25996 x0 := s0.Args[0] 25997 if x0.Op != OpAMD64MOVBloadidx1 { 25998 break 25999 } 26000 i0 := x0.AuxInt 26001 s := x0.Aux 26002 _ = x0.Args[2] 26003 p := x0.Args[0] 26004 idx := x0.Args[1] 26005 mem := x0.Args[2] 26006 s1 := v.Args[1] 26007 if s1.Op != OpAMD64SHLQconst { 26008 break 26009 } 26010 j1 := s1.AuxInt 26011 x1 := s1.Args[0] 26012 if x1.Op != OpAMD64MOVBloadidx1 { 26013 break 26014 } 26015 i1 := x1.AuxInt 26016 if x1.Aux != s { 26017 break 26018 } 26019 _ = x1.Args[2] 26020 if idx != x1.Args[0] { 26021 break 26022 } 26023 if p != x1.Args[1] { 26024 break 26025 } 26026 if mem != x1.Args[2] { 26027 break 26028 } 26029 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26030 break 26031 } 26032 b = mergePoint(b, x0, x1) 26033 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26034 v.reset(OpCopy) 26035 v.AddArg(v0) 26036 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26037 v1.AuxInt = j0 26038 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26039 v2.AuxInt = i0 26040 v2.Aux = s 26041 v2.AddArg(p) 26042 v2.AddArg(idx) 26043 v2.AddArg(mem) 26044 v1.AddArg(v2) 26045 v0.AddArg(v1) 26046 v0.AddArg(y) 26047 return true 26048 } 26049 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 26050 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26051 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26052 for { 26053 _ = v.Args[1] 26054 or := v.Args[0] 26055 if or.Op != OpAMD64ORQ { 26056 break 26057 } 26058 _ = or.Args[1] 26059 y := or.Args[0] 26060 s0 := or.Args[1] 26061 if s0.Op != OpAMD64SHLQconst { 26062 break 26063 } 26064 j0 := s0.AuxInt 26065 x0 := s0.Args[0] 26066 if x0.Op != OpAMD64MOVBloadidx1 { 26067 break 26068 } 26069 i0 := x0.AuxInt 26070 s := x0.Aux 26071 _ = x0.Args[2] 26072 idx := x0.Args[0] 26073 p := x0.Args[1] 26074 mem := x0.Args[2] 26075 s1 := v.Args[1] 26076 if s1.Op != OpAMD64SHLQconst { 26077 break 26078 } 26079 j1 := s1.AuxInt 26080 x1 := s1.Args[0] 26081 if x1.Op != OpAMD64MOVBloadidx1 { 26082 break 26083 } 26084 i1 := x1.AuxInt 26085 if x1.Aux != s { 26086 break 26087 } 26088 _ = x1.Args[2] 26089 if idx != x1.Args[0] { 26090 break 26091 } 26092 if p != x1.Args[1] { 26093 break 26094 } 26095 if mem != x1.Args[2] { 26096 break 26097 } 26098 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26099 break 26100 } 26101 b = mergePoint(b, x0, x1) 26102 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26103 v.reset(OpCopy) 26104 v.AddArg(v0) 26105 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26106 v1.AuxInt = j0 26107 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26108 v2.AuxInt = i0 26109 v2.Aux = s 26110 v2.AddArg(p) 26111 v2.AddArg(idx) 26112 v2.AddArg(mem) 26113 v1.AddArg(v2) 26114 v0.AddArg(v1) 26115 v0.AddArg(y) 26116 return true 26117 } 26118 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 26119 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26120 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26121 for { 26122 _ = v.Args[1] 26123 s1 := v.Args[0] 26124 if s1.Op != OpAMD64SHLQconst { 26125 break 26126 } 26127 j1 := s1.AuxInt 26128 x1 := s1.Args[0] 26129 if x1.Op != OpAMD64MOVWloadidx1 { 26130 break 26131 } 26132 i1 := x1.AuxInt 26133 s := x1.Aux 26134 _ = x1.Args[2] 26135 p := x1.Args[0] 26136 idx := x1.Args[1] 26137 mem := x1.Args[2] 26138 or := v.Args[1] 26139 if or.Op != OpAMD64ORQ { 26140 break 26141 } 26142 _ = or.Args[1] 26143 s0 := or.Args[0] 26144 if s0.Op != OpAMD64SHLQconst { 26145 break 26146 } 26147 j0 := s0.AuxInt 26148 x0 := s0.Args[0] 26149 if x0.Op != OpAMD64MOVWloadidx1 { 26150 break 26151 } 26152 i0 := x0.AuxInt 26153 if x0.Aux != s { 26154 break 26155 } 26156 _ = x0.Args[2] 26157 if p != x0.Args[0] { 26158 break 26159 } 26160 if idx != x0.Args[1] { 26161 break 26162 } 26163 if mem != x0.Args[2] { 26164 break 26165 } 26166 y := or.Args[1] 26167 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26168 break 26169 } 26170 b = mergePoint(b, x0, x1) 26171 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26172 v.reset(OpCopy) 26173 v.AddArg(v0) 26174 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26175 v1.AuxInt = j0 26176 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26177 v2.AuxInt = i0 26178 v2.Aux = s 26179 v2.AddArg(p) 26180 v2.AddArg(idx) 26181 v2.AddArg(mem) 26182 v1.AddArg(v2) 26183 v0.AddArg(v1) 26184 v0.AddArg(y) 26185 return true 26186 } 26187 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 26188 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26189 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26190 for { 26191 _ = v.Args[1] 26192 s1 := v.Args[0] 26193 if s1.Op != OpAMD64SHLQconst { 26194 break 26195 } 26196 j1 := s1.AuxInt 26197 x1 := s1.Args[0] 26198 if x1.Op != OpAMD64MOVWloadidx1 { 26199 break 26200 } 26201 i1 := x1.AuxInt 26202 s := x1.Aux 26203 _ = x1.Args[2] 26204 idx := x1.Args[0] 26205 p := x1.Args[1] 26206 mem := x1.Args[2] 26207 or := v.Args[1] 26208 if or.Op != OpAMD64ORQ { 26209 break 26210 } 26211 _ = or.Args[1] 26212 s0 := or.Args[0] 26213 if s0.Op != OpAMD64SHLQconst { 26214 break 26215 } 26216 j0 := s0.AuxInt 26217 x0 := s0.Args[0] 26218 if x0.Op != OpAMD64MOVWloadidx1 { 26219 break 26220 } 26221 i0 := x0.AuxInt 26222 if x0.Aux != s { 26223 break 26224 } 26225 _ = x0.Args[2] 26226 if p != x0.Args[0] { 26227 break 26228 } 26229 if idx != x0.Args[1] { 26230 break 26231 } 26232 if mem != x0.Args[2] { 26233 break 26234 } 26235 y := or.Args[1] 26236 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26237 break 26238 } 26239 b = mergePoint(b, x0, x1) 26240 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26241 v.reset(OpCopy) 26242 v.AddArg(v0) 26243 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26244 v1.AuxInt = j0 26245 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26246 v2.AuxInt = i0 26247 v2.Aux = s 26248 v2.AddArg(p) 26249 v2.AddArg(idx) 26250 v2.AddArg(mem) 26251 v1.AddArg(v2) 26252 v0.AddArg(v1) 26253 v0.AddArg(y) 26254 return true 26255 } 26256 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 26257 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26258 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26259 for { 26260 _ = v.Args[1] 26261 s1 := v.Args[0] 26262 if s1.Op != OpAMD64SHLQconst { 26263 break 26264 } 26265 j1 := s1.AuxInt 26266 x1 := s1.Args[0] 26267 if x1.Op != OpAMD64MOVWloadidx1 { 26268 break 26269 } 26270 i1 := x1.AuxInt 26271 s := x1.Aux 26272 _ = x1.Args[2] 26273 p := x1.Args[0] 26274 idx := x1.Args[1] 26275 mem := x1.Args[2] 26276 or := v.Args[1] 26277 if or.Op != OpAMD64ORQ { 26278 break 26279 } 26280 _ = or.Args[1] 26281 s0 := or.Args[0] 26282 if s0.Op != OpAMD64SHLQconst { 26283 break 26284 } 26285 j0 := s0.AuxInt 26286 x0 := s0.Args[0] 26287 if x0.Op != OpAMD64MOVWloadidx1 { 26288 break 26289 } 26290 i0 := x0.AuxInt 26291 if x0.Aux != s { 26292 break 26293 } 26294 _ = x0.Args[2] 26295 if idx != x0.Args[0] { 26296 break 26297 } 26298 if p != x0.Args[1] { 26299 break 26300 } 26301 if mem != x0.Args[2] { 26302 break 26303 } 26304 y := or.Args[1] 26305 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26306 break 26307 } 26308 b = mergePoint(b, x0, x1) 26309 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26310 v.reset(OpCopy) 26311 v.AddArg(v0) 26312 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26313 v1.AuxInt = j0 26314 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26315 v2.AuxInt = i0 26316 v2.Aux = s 26317 v2.AddArg(p) 26318 v2.AddArg(idx) 26319 v2.AddArg(mem) 26320 v1.AddArg(v2) 26321 v0.AddArg(v1) 26322 v0.AddArg(y) 26323 return true 26324 } 26325 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 26326 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26327 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26328 for { 26329 _ = v.Args[1] 26330 s1 := v.Args[0] 26331 if s1.Op != OpAMD64SHLQconst { 26332 break 26333 } 26334 j1 := s1.AuxInt 26335 x1 := s1.Args[0] 26336 if x1.Op != OpAMD64MOVWloadidx1 { 26337 break 26338 } 26339 i1 := x1.AuxInt 26340 s := x1.Aux 26341 _ = x1.Args[2] 26342 idx := x1.Args[0] 26343 p := x1.Args[1] 26344 mem := x1.Args[2] 26345 or := v.Args[1] 26346 if or.Op != OpAMD64ORQ { 26347 break 26348 } 26349 _ = or.Args[1] 26350 s0 := or.Args[0] 26351 if s0.Op != OpAMD64SHLQconst { 26352 break 26353 } 26354 j0 := s0.AuxInt 26355 x0 := s0.Args[0] 26356 if x0.Op != OpAMD64MOVWloadidx1 { 26357 break 26358 } 26359 i0 := x0.AuxInt 26360 if x0.Aux != s { 26361 break 26362 } 26363 _ = x0.Args[2] 26364 if idx != x0.Args[0] { 26365 break 26366 } 26367 if p != x0.Args[1] { 26368 break 26369 } 26370 if mem != x0.Args[2] { 26371 break 26372 } 26373 y := or.Args[1] 26374 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26375 break 26376 } 26377 b = mergePoint(b, x0, x1) 26378 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26379 v.reset(OpCopy) 26380 v.AddArg(v0) 26381 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26382 v1.AuxInt = j0 26383 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26384 v2.AuxInt = i0 26385 v2.Aux = s 26386 v2.AddArg(p) 26387 v2.AddArg(idx) 26388 v2.AddArg(mem) 26389 v1.AddArg(v2) 26390 v0.AddArg(v1) 26391 v0.AddArg(y) 26392 return true 26393 } 26394 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 26395 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26396 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26397 for { 26398 _ = v.Args[1] 26399 s1 := v.Args[0] 26400 if s1.Op != OpAMD64SHLQconst { 26401 break 26402 } 26403 j1 := s1.AuxInt 26404 x1 := s1.Args[0] 26405 if x1.Op != OpAMD64MOVWloadidx1 { 26406 break 26407 } 26408 i1 := x1.AuxInt 26409 s := x1.Aux 26410 _ = x1.Args[2] 26411 p := x1.Args[0] 26412 idx := x1.Args[1] 26413 mem := x1.Args[2] 26414 or := v.Args[1] 26415 if or.Op != OpAMD64ORQ { 26416 break 26417 } 26418 _ = or.Args[1] 26419 y := or.Args[0] 26420 s0 := or.Args[1] 26421 if s0.Op != OpAMD64SHLQconst { 26422 break 26423 } 26424 j0 := s0.AuxInt 26425 x0 := s0.Args[0] 26426 if x0.Op != OpAMD64MOVWloadidx1 { 26427 break 26428 } 26429 i0 := x0.AuxInt 26430 if x0.Aux != s { 26431 break 26432 } 26433 _ = x0.Args[2] 26434 if p != x0.Args[0] { 26435 break 26436 } 26437 if idx != x0.Args[1] { 26438 break 26439 } 26440 if mem != x0.Args[2] { 26441 break 26442 } 26443 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26444 break 26445 } 26446 b = mergePoint(b, x0, x1) 26447 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26448 v.reset(OpCopy) 26449 v.AddArg(v0) 26450 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26451 v1.AuxInt = j0 26452 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26453 v2.AuxInt = i0 26454 v2.Aux = s 26455 v2.AddArg(p) 26456 v2.AddArg(idx) 26457 v2.AddArg(mem) 26458 v1.AddArg(v2) 26459 v0.AddArg(v1) 26460 v0.AddArg(y) 26461 return true 26462 } 26463 return false 26464 } 26465 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 26466 b := v.Block 26467 _ = b 26468 typ := &b.Func.Config.Types 26469 _ = typ 26470 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 26471 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26472 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26473 for { 26474 _ = v.Args[1] 26475 s1 := v.Args[0] 26476 if s1.Op != OpAMD64SHLQconst { 26477 break 26478 } 26479 j1 := s1.AuxInt 26480 x1 := s1.Args[0] 26481 if x1.Op != OpAMD64MOVWloadidx1 { 26482 break 26483 } 26484 i1 := x1.AuxInt 26485 s := x1.Aux 26486 _ = x1.Args[2] 26487 idx := x1.Args[0] 26488 p := x1.Args[1] 26489 mem := x1.Args[2] 26490 or := v.Args[1] 26491 if or.Op != OpAMD64ORQ { 26492 break 26493 } 26494 _ = or.Args[1] 26495 y := or.Args[0] 26496 s0 := or.Args[1] 26497 if s0.Op != OpAMD64SHLQconst { 26498 break 26499 } 26500 j0 := s0.AuxInt 26501 x0 := s0.Args[0] 26502 if x0.Op != OpAMD64MOVWloadidx1 { 26503 break 26504 } 26505 i0 := x0.AuxInt 26506 if x0.Aux != s { 26507 break 26508 } 26509 _ = x0.Args[2] 26510 if p != x0.Args[0] { 26511 break 26512 } 26513 if idx != x0.Args[1] { 26514 break 26515 } 26516 if mem != x0.Args[2] { 26517 break 26518 } 26519 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26520 break 26521 } 26522 b = mergePoint(b, x0, x1) 26523 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26524 v.reset(OpCopy) 26525 v.AddArg(v0) 26526 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26527 v1.AuxInt = j0 26528 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26529 v2.AuxInt = i0 26530 v2.Aux = s 26531 v2.AddArg(p) 26532 v2.AddArg(idx) 26533 v2.AddArg(mem) 26534 v1.AddArg(v2) 26535 v0.AddArg(v1) 26536 v0.AddArg(y) 26537 return true 26538 } 26539 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 26540 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26541 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26542 for { 26543 _ = v.Args[1] 26544 s1 := v.Args[0] 26545 if s1.Op != OpAMD64SHLQconst { 26546 break 26547 } 26548 j1 := s1.AuxInt 26549 x1 := s1.Args[0] 26550 if x1.Op != OpAMD64MOVWloadidx1 { 26551 break 26552 } 26553 i1 := x1.AuxInt 26554 s := x1.Aux 26555 _ = x1.Args[2] 26556 p := x1.Args[0] 26557 idx := x1.Args[1] 26558 mem := x1.Args[2] 26559 or := v.Args[1] 26560 if or.Op != OpAMD64ORQ { 26561 break 26562 } 26563 _ = or.Args[1] 26564 y := or.Args[0] 26565 s0 := or.Args[1] 26566 if s0.Op != OpAMD64SHLQconst { 26567 break 26568 } 26569 j0 := s0.AuxInt 26570 x0 := s0.Args[0] 26571 if x0.Op != OpAMD64MOVWloadidx1 { 26572 break 26573 } 26574 i0 := x0.AuxInt 26575 if x0.Aux != s { 26576 break 26577 } 26578 _ = x0.Args[2] 26579 if idx != x0.Args[0] { 26580 break 26581 } 26582 if p != x0.Args[1] { 26583 break 26584 } 26585 if mem != x0.Args[2] { 26586 break 26587 } 26588 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26589 break 26590 } 26591 b = mergePoint(b, x0, x1) 26592 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26593 v.reset(OpCopy) 26594 v.AddArg(v0) 26595 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26596 v1.AuxInt = j0 26597 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26598 v2.AuxInt = i0 26599 v2.Aux = s 26600 v2.AddArg(p) 26601 v2.AddArg(idx) 26602 v2.AddArg(mem) 26603 v1.AddArg(v2) 26604 v0.AddArg(v1) 26605 v0.AddArg(y) 26606 return true 26607 } 26608 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 26609 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26610 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26611 for { 26612 _ = v.Args[1] 26613 s1 := v.Args[0] 26614 if s1.Op != OpAMD64SHLQconst { 26615 break 26616 } 26617 j1 := s1.AuxInt 26618 x1 := s1.Args[0] 26619 if x1.Op != OpAMD64MOVWloadidx1 { 26620 break 26621 } 26622 i1 := x1.AuxInt 26623 s := x1.Aux 26624 _ = x1.Args[2] 26625 idx := x1.Args[0] 26626 p := x1.Args[1] 26627 mem := x1.Args[2] 26628 or := v.Args[1] 26629 if or.Op != OpAMD64ORQ { 26630 break 26631 } 26632 _ = or.Args[1] 26633 y := or.Args[0] 26634 s0 := or.Args[1] 26635 if s0.Op != OpAMD64SHLQconst { 26636 break 26637 } 26638 j0 := s0.AuxInt 26639 x0 := s0.Args[0] 26640 if x0.Op != OpAMD64MOVWloadidx1 { 26641 break 26642 } 26643 i0 := x0.AuxInt 26644 if x0.Aux != s { 26645 break 26646 } 26647 _ = x0.Args[2] 26648 if idx != x0.Args[0] { 26649 break 26650 } 26651 if p != x0.Args[1] { 26652 break 26653 } 26654 if mem != x0.Args[2] { 26655 break 26656 } 26657 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26658 break 26659 } 26660 b = mergePoint(b, x0, x1) 26661 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26662 v.reset(OpCopy) 26663 v.AddArg(v0) 26664 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26665 v1.AuxInt = j0 26666 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26667 v2.AuxInt = i0 26668 v2.Aux = s 26669 v2.AddArg(p) 26670 v2.AddArg(idx) 26671 v2.AddArg(mem) 26672 v1.AddArg(v2) 26673 v0.AddArg(v1) 26674 v0.AddArg(y) 26675 return true 26676 } 26677 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26678 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26679 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26680 for { 26681 _ = v.Args[1] 26682 or := v.Args[0] 26683 if or.Op != OpAMD64ORQ { 26684 break 26685 } 26686 _ = or.Args[1] 26687 s0 := or.Args[0] 26688 if s0.Op != OpAMD64SHLQconst { 26689 break 26690 } 26691 j0 := s0.AuxInt 26692 x0 := s0.Args[0] 26693 if x0.Op != OpAMD64MOVWloadidx1 { 26694 break 26695 } 26696 i0 := x0.AuxInt 26697 s := x0.Aux 26698 _ = x0.Args[2] 26699 p := x0.Args[0] 26700 idx := x0.Args[1] 26701 mem := x0.Args[2] 26702 y := or.Args[1] 26703 s1 := v.Args[1] 26704 if s1.Op != OpAMD64SHLQconst { 26705 break 26706 } 26707 j1 := s1.AuxInt 26708 x1 := s1.Args[0] 26709 if x1.Op != OpAMD64MOVWloadidx1 { 26710 break 26711 } 26712 i1 := x1.AuxInt 26713 if x1.Aux != s { 26714 break 26715 } 26716 _ = x1.Args[2] 26717 if p != x1.Args[0] { 26718 break 26719 } 26720 if idx != x1.Args[1] { 26721 break 26722 } 26723 if mem != x1.Args[2] { 26724 break 26725 } 26726 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26727 break 26728 } 26729 b = mergePoint(b, x0, x1) 26730 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26731 v.reset(OpCopy) 26732 v.AddArg(v0) 26733 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26734 v1.AuxInt = j0 26735 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26736 v2.AuxInt = i0 26737 v2.Aux = s 26738 v2.AddArg(p) 26739 v2.AddArg(idx) 26740 v2.AddArg(mem) 26741 v1.AddArg(v2) 26742 v0.AddArg(v1) 26743 v0.AddArg(y) 26744 return true 26745 } 26746 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26747 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26748 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26749 for { 26750 _ = v.Args[1] 26751 or := v.Args[0] 26752 if or.Op != OpAMD64ORQ { 26753 break 26754 } 26755 _ = or.Args[1] 26756 s0 := or.Args[0] 26757 if s0.Op != OpAMD64SHLQconst { 26758 break 26759 } 26760 j0 := s0.AuxInt 26761 x0 := s0.Args[0] 26762 if x0.Op != OpAMD64MOVWloadidx1 { 26763 break 26764 } 26765 i0 := x0.AuxInt 26766 s := x0.Aux 26767 _ = x0.Args[2] 26768 idx := x0.Args[0] 26769 p := x0.Args[1] 26770 mem := x0.Args[2] 26771 y := or.Args[1] 26772 s1 := v.Args[1] 26773 if s1.Op != OpAMD64SHLQconst { 26774 break 26775 } 26776 j1 := s1.AuxInt 26777 x1 := s1.Args[0] 26778 if x1.Op != OpAMD64MOVWloadidx1 { 26779 break 26780 } 26781 i1 := x1.AuxInt 26782 if x1.Aux != s { 26783 break 26784 } 26785 _ = x1.Args[2] 26786 if p != x1.Args[0] { 26787 break 26788 } 26789 if idx != x1.Args[1] { 26790 break 26791 } 26792 if mem != x1.Args[2] { 26793 break 26794 } 26795 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26796 break 26797 } 26798 b = mergePoint(b, x0, x1) 26799 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26800 v.reset(OpCopy) 26801 v.AddArg(v0) 26802 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26803 v1.AuxInt = j0 26804 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26805 v2.AuxInt = i0 26806 v2.Aux = s 26807 v2.AddArg(p) 26808 v2.AddArg(idx) 26809 v2.AddArg(mem) 26810 v1.AddArg(v2) 26811 v0.AddArg(v1) 26812 v0.AddArg(y) 26813 return true 26814 } 26815 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26816 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26817 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26818 for { 26819 _ = v.Args[1] 26820 or := v.Args[0] 26821 if or.Op != OpAMD64ORQ { 26822 break 26823 } 26824 _ = or.Args[1] 26825 y := or.Args[0] 26826 s0 := or.Args[1] 26827 if s0.Op != OpAMD64SHLQconst { 26828 break 26829 } 26830 j0 := s0.AuxInt 26831 x0 := s0.Args[0] 26832 if x0.Op != OpAMD64MOVWloadidx1 { 26833 break 26834 } 26835 i0 := x0.AuxInt 26836 s := x0.Aux 26837 _ = x0.Args[2] 26838 p := x0.Args[0] 26839 idx := x0.Args[1] 26840 mem := x0.Args[2] 26841 s1 := v.Args[1] 26842 if s1.Op != OpAMD64SHLQconst { 26843 break 26844 } 26845 j1 := s1.AuxInt 26846 x1 := s1.Args[0] 26847 if x1.Op != OpAMD64MOVWloadidx1 { 26848 break 26849 } 26850 i1 := x1.AuxInt 26851 if x1.Aux != s { 26852 break 26853 } 26854 _ = x1.Args[2] 26855 if p != x1.Args[0] { 26856 break 26857 } 26858 if idx != x1.Args[1] { 26859 break 26860 } 26861 if mem != x1.Args[2] { 26862 break 26863 } 26864 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26865 break 26866 } 26867 b = mergePoint(b, x0, x1) 26868 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26869 v.reset(OpCopy) 26870 v.AddArg(v0) 26871 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26872 v1.AuxInt = j0 26873 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26874 v2.AuxInt = i0 26875 v2.Aux = s 26876 v2.AddArg(p) 26877 v2.AddArg(idx) 26878 v2.AddArg(mem) 26879 v1.AddArg(v2) 26880 v0.AddArg(v1) 26881 v0.AddArg(y) 26882 return true 26883 } 26884 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26885 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26886 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26887 for { 26888 _ = v.Args[1] 26889 or := v.Args[0] 26890 if or.Op != OpAMD64ORQ { 26891 break 26892 } 26893 _ = or.Args[1] 26894 y := or.Args[0] 26895 s0 := or.Args[1] 26896 if s0.Op != OpAMD64SHLQconst { 26897 break 26898 } 26899 j0 := s0.AuxInt 26900 x0 := s0.Args[0] 26901 if x0.Op != OpAMD64MOVWloadidx1 { 26902 break 26903 } 26904 i0 := x0.AuxInt 26905 s := x0.Aux 26906 _ = x0.Args[2] 26907 idx := x0.Args[0] 26908 p := x0.Args[1] 26909 mem := x0.Args[2] 26910 s1 := v.Args[1] 26911 if s1.Op != OpAMD64SHLQconst { 26912 break 26913 } 26914 j1 := s1.AuxInt 26915 x1 := s1.Args[0] 26916 if x1.Op != OpAMD64MOVWloadidx1 { 26917 break 26918 } 26919 i1 := x1.AuxInt 26920 if x1.Aux != s { 26921 break 26922 } 26923 _ = x1.Args[2] 26924 if p != x1.Args[0] { 26925 break 26926 } 26927 if idx != x1.Args[1] { 26928 break 26929 } 26930 if mem != x1.Args[2] { 26931 break 26932 } 26933 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26934 break 26935 } 26936 b = mergePoint(b, x0, x1) 26937 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26938 v.reset(OpCopy) 26939 v.AddArg(v0) 26940 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26941 v1.AuxInt = j0 26942 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26943 v2.AuxInt = i0 26944 v2.Aux = s 26945 v2.AddArg(p) 26946 v2.AddArg(idx) 26947 v2.AddArg(mem) 26948 v1.AddArg(v2) 26949 v0.AddArg(v1) 26950 v0.AddArg(y) 26951 return true 26952 } 26953 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 26954 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26955 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26956 for { 26957 _ = v.Args[1] 26958 or := v.Args[0] 26959 if or.Op != OpAMD64ORQ { 26960 break 26961 } 26962 _ = or.Args[1] 26963 s0 := or.Args[0] 26964 if s0.Op != OpAMD64SHLQconst { 26965 break 26966 } 26967 j0 := s0.AuxInt 26968 x0 := s0.Args[0] 26969 if x0.Op != OpAMD64MOVWloadidx1 { 26970 break 26971 } 26972 i0 := x0.AuxInt 26973 s := x0.Aux 26974 _ = x0.Args[2] 26975 p := x0.Args[0] 26976 idx := x0.Args[1] 26977 mem := x0.Args[2] 26978 y := or.Args[1] 26979 s1 := v.Args[1] 26980 if s1.Op != OpAMD64SHLQconst { 26981 break 26982 } 26983 j1 := s1.AuxInt 26984 x1 := s1.Args[0] 26985 if x1.Op != OpAMD64MOVWloadidx1 { 26986 break 26987 } 26988 i1 := x1.AuxInt 26989 if x1.Aux != s { 26990 break 26991 } 26992 _ = x1.Args[2] 26993 if idx != x1.Args[0] { 26994 break 26995 } 26996 if p != x1.Args[1] { 26997 break 26998 } 26999 if mem != x1.Args[2] { 27000 break 27001 } 27002 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27003 break 27004 } 27005 b = mergePoint(b, x0, x1) 27006 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27007 v.reset(OpCopy) 27008 v.AddArg(v0) 27009 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27010 v1.AuxInt = j0 27011 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27012 v2.AuxInt = i0 27013 v2.Aux = s 27014 v2.AddArg(p) 27015 v2.AddArg(idx) 27016 v2.AddArg(mem) 27017 v1.AddArg(v2) 27018 v0.AddArg(v1) 27019 v0.AddArg(y) 27020 return true 27021 } 27022 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27023 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27024 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27025 for { 27026 _ = v.Args[1] 27027 or := v.Args[0] 27028 if or.Op != OpAMD64ORQ { 27029 break 27030 } 27031 _ = or.Args[1] 27032 s0 := or.Args[0] 27033 if s0.Op != OpAMD64SHLQconst { 27034 break 27035 } 27036 j0 := s0.AuxInt 27037 x0 := s0.Args[0] 27038 if x0.Op != OpAMD64MOVWloadidx1 { 27039 break 27040 } 27041 i0 := x0.AuxInt 27042 s := x0.Aux 27043 _ = x0.Args[2] 27044 idx := x0.Args[0] 27045 p := x0.Args[1] 27046 mem := x0.Args[2] 27047 y := or.Args[1] 27048 s1 := v.Args[1] 27049 if s1.Op != OpAMD64SHLQconst { 27050 break 27051 } 27052 j1 := s1.AuxInt 27053 x1 := s1.Args[0] 27054 if x1.Op != OpAMD64MOVWloadidx1 { 27055 break 27056 } 27057 i1 := x1.AuxInt 27058 if x1.Aux != s { 27059 break 27060 } 27061 _ = x1.Args[2] 27062 if idx != x1.Args[0] { 27063 break 27064 } 27065 if p != x1.Args[1] { 27066 break 27067 } 27068 if mem != x1.Args[2] { 27069 break 27070 } 27071 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27072 break 27073 } 27074 b = mergePoint(b, x0, x1) 27075 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27076 v.reset(OpCopy) 27077 v.AddArg(v0) 27078 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27079 v1.AuxInt = j0 27080 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27081 v2.AuxInt = i0 27082 v2.Aux = s 27083 v2.AddArg(p) 27084 v2.AddArg(idx) 27085 v2.AddArg(mem) 27086 v1.AddArg(v2) 27087 v0.AddArg(v1) 27088 v0.AddArg(y) 27089 return true 27090 } 27091 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27092 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27093 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27094 for { 27095 _ = v.Args[1] 27096 or := v.Args[0] 27097 if or.Op != OpAMD64ORQ { 27098 break 27099 } 27100 _ = or.Args[1] 27101 y := or.Args[0] 27102 s0 := or.Args[1] 27103 if s0.Op != OpAMD64SHLQconst { 27104 break 27105 } 27106 j0 := s0.AuxInt 27107 x0 := s0.Args[0] 27108 if x0.Op != OpAMD64MOVWloadidx1 { 27109 break 27110 } 27111 i0 := x0.AuxInt 27112 s := x0.Aux 27113 _ = x0.Args[2] 27114 p := x0.Args[0] 27115 idx := x0.Args[1] 27116 mem := x0.Args[2] 27117 s1 := v.Args[1] 27118 if s1.Op != OpAMD64SHLQconst { 27119 break 27120 } 27121 j1 := s1.AuxInt 27122 x1 := s1.Args[0] 27123 if x1.Op != OpAMD64MOVWloadidx1 { 27124 break 27125 } 27126 i1 := x1.AuxInt 27127 if x1.Aux != s { 27128 break 27129 } 27130 _ = x1.Args[2] 27131 if idx != x1.Args[0] { 27132 break 27133 } 27134 if p != x1.Args[1] { 27135 break 27136 } 27137 if mem != x1.Args[2] { 27138 break 27139 } 27140 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27141 break 27142 } 27143 b = mergePoint(b, x0, x1) 27144 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27145 v.reset(OpCopy) 27146 v.AddArg(v0) 27147 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27148 v1.AuxInt = j0 27149 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27150 v2.AuxInt = i0 27151 v2.Aux = s 27152 v2.AddArg(p) 27153 v2.AddArg(idx) 27154 v2.AddArg(mem) 27155 v1.AddArg(v2) 27156 v0.AddArg(v1) 27157 v0.AddArg(y) 27158 return true 27159 } 27160 return false 27161 } 27162 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 27163 b := v.Block 27164 _ = b 27165 typ := &b.Func.Config.Types 27166 _ = typ 27167 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27168 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27169 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27170 for { 27171 _ = v.Args[1] 27172 or := v.Args[0] 27173 if or.Op != OpAMD64ORQ { 27174 break 27175 } 27176 _ = or.Args[1] 27177 y := or.Args[0] 27178 s0 := or.Args[1] 27179 if s0.Op != OpAMD64SHLQconst { 27180 break 27181 } 27182 j0 := s0.AuxInt 27183 x0 := s0.Args[0] 27184 if x0.Op != OpAMD64MOVWloadidx1 { 27185 break 27186 } 27187 i0 := x0.AuxInt 27188 s := x0.Aux 27189 _ = x0.Args[2] 27190 idx := x0.Args[0] 27191 p := x0.Args[1] 27192 mem := x0.Args[2] 27193 s1 := v.Args[1] 27194 if s1.Op != OpAMD64SHLQconst { 27195 break 27196 } 27197 j1 := s1.AuxInt 27198 x1 := s1.Args[0] 27199 if x1.Op != OpAMD64MOVWloadidx1 { 27200 break 27201 } 27202 i1 := x1.AuxInt 27203 if x1.Aux != s { 27204 break 27205 } 27206 _ = x1.Args[2] 27207 if idx != x1.Args[0] { 27208 break 27209 } 27210 if p != x1.Args[1] { 27211 break 27212 } 27213 if mem != x1.Args[2] { 27214 break 27215 } 27216 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27217 break 27218 } 27219 b = mergePoint(b, x0, x1) 27220 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27221 v.reset(OpCopy) 27222 v.AddArg(v0) 27223 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27224 v1.AuxInt = j0 27225 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27226 v2.AuxInt = i0 27227 v2.Aux = s 27228 v2.AddArg(p) 27229 v2.AddArg(idx) 27230 v2.AddArg(mem) 27231 v1.AddArg(v2) 27232 v0.AddArg(v1) 27233 v0.AddArg(y) 27234 return true 27235 } 27236 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 27237 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27238 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 27239 for { 27240 _ = v.Args[1] 27241 x1 := v.Args[0] 27242 if x1.Op != OpAMD64MOVBload { 27243 break 27244 } 27245 i1 := x1.AuxInt 27246 s := x1.Aux 27247 _ = x1.Args[1] 27248 p := x1.Args[0] 27249 mem := x1.Args[1] 27250 sh := v.Args[1] 27251 if sh.Op != OpAMD64SHLQconst { 27252 break 27253 } 27254 if sh.AuxInt != 8 { 27255 break 27256 } 27257 x0 := sh.Args[0] 27258 if x0.Op != OpAMD64MOVBload { 27259 break 27260 } 27261 i0 := x0.AuxInt 27262 if x0.Aux != s { 27263 break 27264 } 27265 _ = x0.Args[1] 27266 if p != x0.Args[0] { 27267 break 27268 } 27269 if mem != x0.Args[1] { 27270 break 27271 } 27272 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27273 break 27274 } 27275 b = mergePoint(b, x0, x1) 27276 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 27277 v.reset(OpCopy) 27278 v.AddArg(v0) 27279 v0.AuxInt = 8 27280 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27281 v1.AuxInt = i0 27282 v1.Aux = s 27283 v1.AddArg(p) 27284 v1.AddArg(mem) 27285 v0.AddArg(v1) 27286 return true 27287 } 27288 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 27289 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27290 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 27291 for { 27292 _ = v.Args[1] 27293 sh := v.Args[0] 27294 if sh.Op != OpAMD64SHLQconst { 27295 break 27296 } 27297 if sh.AuxInt != 8 { 27298 break 27299 } 27300 x0 := sh.Args[0] 27301 if x0.Op != OpAMD64MOVBload { 27302 break 27303 } 27304 i0 := x0.AuxInt 27305 s := x0.Aux 27306 _ = x0.Args[1] 27307 p := x0.Args[0] 27308 mem := x0.Args[1] 27309 x1 := v.Args[1] 27310 if x1.Op != OpAMD64MOVBload { 27311 break 27312 } 27313 i1 := x1.AuxInt 27314 if x1.Aux != s { 27315 break 27316 } 27317 _ = x1.Args[1] 27318 if p != x1.Args[0] { 27319 break 27320 } 27321 if mem != x1.Args[1] { 27322 break 27323 } 27324 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27325 break 27326 } 27327 b = mergePoint(b, x0, x1) 27328 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 27329 v.reset(OpCopy) 27330 v.AddArg(v0) 27331 v0.AuxInt = 8 27332 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27333 v1.AuxInt = i0 27334 v1.Aux = s 27335 v1.AddArg(p) 27336 v1.AddArg(mem) 27337 v0.AddArg(v1) 27338 return true 27339 } 27340 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 27341 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27342 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 27343 for { 27344 _ = v.Args[1] 27345 r1 := v.Args[0] 27346 if r1.Op != OpAMD64ROLWconst { 27347 break 27348 } 27349 if r1.AuxInt != 8 { 27350 break 27351 } 27352 x1 := r1.Args[0] 27353 if x1.Op != OpAMD64MOVWload { 27354 break 27355 } 27356 i1 := x1.AuxInt 27357 s := x1.Aux 27358 _ = x1.Args[1] 27359 p := x1.Args[0] 27360 mem := x1.Args[1] 27361 sh := v.Args[1] 27362 if sh.Op != OpAMD64SHLQconst { 27363 break 27364 } 27365 if sh.AuxInt != 16 { 27366 break 27367 } 27368 r0 := sh.Args[0] 27369 if r0.Op != OpAMD64ROLWconst { 27370 break 27371 } 27372 if r0.AuxInt != 8 { 27373 break 27374 } 27375 x0 := r0.Args[0] 27376 if x0.Op != OpAMD64MOVWload { 27377 break 27378 } 27379 i0 := x0.AuxInt 27380 if x0.Aux != s { 27381 break 27382 } 27383 _ = x0.Args[1] 27384 if p != x0.Args[0] { 27385 break 27386 } 27387 if mem != x0.Args[1] { 27388 break 27389 } 27390 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27391 break 27392 } 27393 b = mergePoint(b, x0, x1) 27394 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27395 v.reset(OpCopy) 27396 v.AddArg(v0) 27397 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27398 v1.AuxInt = i0 27399 v1.Aux = s 27400 v1.AddArg(p) 27401 v1.AddArg(mem) 27402 v0.AddArg(v1) 27403 return true 27404 } 27405 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 27406 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27407 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 27408 for { 27409 _ = v.Args[1] 27410 sh := v.Args[0] 27411 if sh.Op != OpAMD64SHLQconst { 27412 break 27413 } 27414 if sh.AuxInt != 16 { 27415 break 27416 } 27417 r0 := sh.Args[0] 27418 if r0.Op != OpAMD64ROLWconst { 27419 break 27420 } 27421 if r0.AuxInt != 8 { 27422 break 27423 } 27424 x0 := r0.Args[0] 27425 if x0.Op != OpAMD64MOVWload { 27426 break 27427 } 27428 i0 := x0.AuxInt 27429 s := x0.Aux 27430 _ = x0.Args[1] 27431 p := x0.Args[0] 27432 mem := x0.Args[1] 27433 r1 := v.Args[1] 27434 if r1.Op != OpAMD64ROLWconst { 27435 break 27436 } 27437 if r1.AuxInt != 8 { 27438 break 27439 } 27440 x1 := r1.Args[0] 27441 if x1.Op != OpAMD64MOVWload { 27442 break 27443 } 27444 i1 := x1.AuxInt 27445 if x1.Aux != s { 27446 break 27447 } 27448 _ = x1.Args[1] 27449 if p != x1.Args[0] { 27450 break 27451 } 27452 if mem != x1.Args[1] { 27453 break 27454 } 27455 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27456 break 27457 } 27458 b = mergePoint(b, x0, x1) 27459 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27460 v.reset(OpCopy) 27461 v.AddArg(v0) 27462 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27463 v1.AuxInt = i0 27464 v1.Aux = s 27465 v1.AddArg(p) 27466 v1.AddArg(mem) 27467 v0.AddArg(v1) 27468 return true 27469 } 27470 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 27471 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27472 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 27473 for { 27474 _ = v.Args[1] 27475 r1 := v.Args[0] 27476 if r1.Op != OpAMD64BSWAPL { 27477 break 27478 } 27479 x1 := r1.Args[0] 27480 if x1.Op != OpAMD64MOVLload { 27481 break 27482 } 27483 i1 := x1.AuxInt 27484 s := x1.Aux 27485 _ = x1.Args[1] 27486 p := x1.Args[0] 27487 mem := x1.Args[1] 27488 sh := v.Args[1] 27489 if sh.Op != OpAMD64SHLQconst { 27490 break 27491 } 27492 if sh.AuxInt != 32 { 27493 break 27494 } 27495 r0 := sh.Args[0] 27496 if r0.Op != OpAMD64BSWAPL { 27497 break 27498 } 27499 x0 := r0.Args[0] 27500 if x0.Op != OpAMD64MOVLload { 27501 break 27502 } 27503 i0 := x0.AuxInt 27504 if x0.Aux != s { 27505 break 27506 } 27507 _ = x0.Args[1] 27508 if p != x0.Args[0] { 27509 break 27510 } 27511 if mem != x0.Args[1] { 27512 break 27513 } 27514 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27515 break 27516 } 27517 b = mergePoint(b, x0, x1) 27518 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 27519 v.reset(OpCopy) 27520 v.AddArg(v0) 27521 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27522 v1.AuxInt = i0 27523 v1.Aux = s 27524 v1.AddArg(p) 27525 v1.AddArg(mem) 27526 v0.AddArg(v1) 27527 return true 27528 } 27529 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 27530 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27531 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 27532 for { 27533 _ = v.Args[1] 27534 sh := v.Args[0] 27535 if sh.Op != OpAMD64SHLQconst { 27536 break 27537 } 27538 if sh.AuxInt != 32 { 27539 break 27540 } 27541 r0 := sh.Args[0] 27542 if r0.Op != OpAMD64BSWAPL { 27543 break 27544 } 27545 x0 := r0.Args[0] 27546 if x0.Op != OpAMD64MOVLload { 27547 break 27548 } 27549 i0 := x0.AuxInt 27550 s := x0.Aux 27551 _ = x0.Args[1] 27552 p := x0.Args[0] 27553 mem := x0.Args[1] 27554 r1 := v.Args[1] 27555 if r1.Op != OpAMD64BSWAPL { 27556 break 27557 } 27558 x1 := r1.Args[0] 27559 if x1.Op != OpAMD64MOVLload { 27560 break 27561 } 27562 i1 := x1.AuxInt 27563 if x1.Aux != s { 27564 break 27565 } 27566 _ = x1.Args[1] 27567 if p != x1.Args[0] { 27568 break 27569 } 27570 if mem != x1.Args[1] { 27571 break 27572 } 27573 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27574 break 27575 } 27576 b = mergePoint(b, x0, x1) 27577 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 27578 v.reset(OpCopy) 27579 v.AddArg(v0) 27580 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27581 v1.AuxInt = i0 27582 v1.Aux = s 27583 v1.AddArg(p) 27584 v1.AddArg(mem) 27585 v0.AddArg(v1) 27586 return true 27587 } 27588 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 27589 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27590 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27591 for { 27592 _ = v.Args[1] 27593 s0 := v.Args[0] 27594 if s0.Op != OpAMD64SHLQconst { 27595 break 27596 } 27597 j0 := s0.AuxInt 27598 x0 := s0.Args[0] 27599 if x0.Op != OpAMD64MOVBload { 27600 break 27601 } 27602 i0 := x0.AuxInt 27603 s := x0.Aux 27604 _ = x0.Args[1] 27605 p := x0.Args[0] 27606 mem := x0.Args[1] 27607 or := v.Args[1] 27608 if or.Op != OpAMD64ORQ { 27609 break 27610 } 27611 _ = or.Args[1] 27612 s1 := or.Args[0] 27613 if s1.Op != OpAMD64SHLQconst { 27614 break 27615 } 27616 j1 := s1.AuxInt 27617 x1 := s1.Args[0] 27618 if x1.Op != OpAMD64MOVBload { 27619 break 27620 } 27621 i1 := x1.AuxInt 27622 if x1.Aux != s { 27623 break 27624 } 27625 _ = x1.Args[1] 27626 if p != x1.Args[0] { 27627 break 27628 } 27629 if mem != x1.Args[1] { 27630 break 27631 } 27632 y := or.Args[1] 27633 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27634 break 27635 } 27636 b = mergePoint(b, x0, x1) 27637 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27638 v.reset(OpCopy) 27639 v.AddArg(v0) 27640 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27641 v1.AuxInt = j1 27642 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 27643 v2.AuxInt = 8 27644 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27645 v3.AuxInt = i0 27646 v3.Aux = s 27647 v3.AddArg(p) 27648 v3.AddArg(mem) 27649 v2.AddArg(v3) 27650 v1.AddArg(v2) 27651 v0.AddArg(v1) 27652 v0.AddArg(y) 27653 return true 27654 } 27655 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 27656 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27657 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27658 for { 27659 _ = v.Args[1] 27660 s0 := v.Args[0] 27661 if s0.Op != OpAMD64SHLQconst { 27662 break 27663 } 27664 j0 := s0.AuxInt 27665 x0 := s0.Args[0] 27666 if x0.Op != OpAMD64MOVBload { 27667 break 27668 } 27669 i0 := x0.AuxInt 27670 s := x0.Aux 27671 _ = x0.Args[1] 27672 p := x0.Args[0] 27673 mem := x0.Args[1] 27674 or := v.Args[1] 27675 if or.Op != OpAMD64ORQ { 27676 break 27677 } 27678 _ = or.Args[1] 27679 y := or.Args[0] 27680 s1 := or.Args[1] 27681 if s1.Op != OpAMD64SHLQconst { 27682 break 27683 } 27684 j1 := s1.AuxInt 27685 x1 := s1.Args[0] 27686 if x1.Op != OpAMD64MOVBload { 27687 break 27688 } 27689 i1 := x1.AuxInt 27690 if x1.Aux != s { 27691 break 27692 } 27693 _ = x1.Args[1] 27694 if p != x1.Args[0] { 27695 break 27696 } 27697 if mem != x1.Args[1] { 27698 break 27699 } 27700 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27701 break 27702 } 27703 b = mergePoint(b, x0, x1) 27704 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27705 v.reset(OpCopy) 27706 v.AddArg(v0) 27707 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27708 v1.AuxInt = j1 27709 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 27710 v2.AuxInt = 8 27711 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27712 v3.AuxInt = i0 27713 v3.Aux = s 27714 v3.AddArg(p) 27715 v3.AddArg(mem) 27716 v2.AddArg(v3) 27717 v1.AddArg(v2) 27718 v0.AddArg(v1) 27719 v0.AddArg(y) 27720 return true 27721 } 27722 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 27723 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27724 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27725 for { 27726 _ = v.Args[1] 27727 or := v.Args[0] 27728 if or.Op != OpAMD64ORQ { 27729 break 27730 } 27731 _ = or.Args[1] 27732 s1 := or.Args[0] 27733 if s1.Op != OpAMD64SHLQconst { 27734 break 27735 } 27736 j1 := s1.AuxInt 27737 x1 := s1.Args[0] 27738 if x1.Op != OpAMD64MOVBload { 27739 break 27740 } 27741 i1 := x1.AuxInt 27742 s := x1.Aux 27743 _ = x1.Args[1] 27744 p := x1.Args[0] 27745 mem := x1.Args[1] 27746 y := or.Args[1] 27747 s0 := v.Args[1] 27748 if s0.Op != OpAMD64SHLQconst { 27749 break 27750 } 27751 j0 := s0.AuxInt 27752 x0 := s0.Args[0] 27753 if x0.Op != OpAMD64MOVBload { 27754 break 27755 } 27756 i0 := x0.AuxInt 27757 if x0.Aux != s { 27758 break 27759 } 27760 _ = x0.Args[1] 27761 if p != x0.Args[0] { 27762 break 27763 } 27764 if mem != x0.Args[1] { 27765 break 27766 } 27767 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27768 break 27769 } 27770 b = mergePoint(b, x0, x1) 27771 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27772 v.reset(OpCopy) 27773 v.AddArg(v0) 27774 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27775 v1.AuxInt = j1 27776 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 27777 v2.AuxInt = 8 27778 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27779 v3.AuxInt = i0 27780 v3.Aux = s 27781 v3.AddArg(p) 27782 v3.AddArg(mem) 27783 v2.AddArg(v3) 27784 v1.AddArg(v2) 27785 v0.AddArg(v1) 27786 v0.AddArg(y) 27787 return true 27788 } 27789 return false 27790 } 27791 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 27792 b := v.Block 27793 _ = b 27794 typ := &b.Func.Config.Types 27795 _ = typ 27796 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 27797 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27798 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27799 for { 27800 _ = v.Args[1] 27801 or := v.Args[0] 27802 if or.Op != OpAMD64ORQ { 27803 break 27804 } 27805 _ = or.Args[1] 27806 y := or.Args[0] 27807 s1 := or.Args[1] 27808 if s1.Op != OpAMD64SHLQconst { 27809 break 27810 } 27811 j1 := s1.AuxInt 27812 x1 := s1.Args[0] 27813 if x1.Op != OpAMD64MOVBload { 27814 break 27815 } 27816 i1 := x1.AuxInt 27817 s := x1.Aux 27818 _ = x1.Args[1] 27819 p := x1.Args[0] 27820 mem := x1.Args[1] 27821 s0 := v.Args[1] 27822 if s0.Op != OpAMD64SHLQconst { 27823 break 27824 } 27825 j0 := s0.AuxInt 27826 x0 := s0.Args[0] 27827 if x0.Op != OpAMD64MOVBload { 27828 break 27829 } 27830 i0 := x0.AuxInt 27831 if x0.Aux != s { 27832 break 27833 } 27834 _ = x0.Args[1] 27835 if p != x0.Args[0] { 27836 break 27837 } 27838 if mem != x0.Args[1] { 27839 break 27840 } 27841 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27842 break 27843 } 27844 b = mergePoint(b, x0, x1) 27845 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27846 v.reset(OpCopy) 27847 v.AddArg(v0) 27848 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27849 v1.AuxInt = j1 27850 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 27851 v2.AuxInt = 8 27852 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27853 v3.AuxInt = i0 27854 v3.Aux = s 27855 v3.AddArg(p) 27856 v3.AddArg(mem) 27857 v2.AddArg(v3) 27858 v1.AddArg(v2) 27859 v0.AddArg(v1) 27860 v0.AddArg(y) 27861 return true 27862 } 27863 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 27864 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 27865 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 27866 for { 27867 _ = v.Args[1] 27868 s0 := v.Args[0] 27869 if s0.Op != OpAMD64SHLQconst { 27870 break 27871 } 27872 j0 := s0.AuxInt 27873 r0 := s0.Args[0] 27874 if r0.Op != OpAMD64ROLWconst { 27875 break 27876 } 27877 if r0.AuxInt != 8 { 27878 break 27879 } 27880 x0 := r0.Args[0] 27881 if x0.Op != OpAMD64MOVWload { 27882 break 27883 } 27884 i0 := x0.AuxInt 27885 s := x0.Aux 27886 _ = x0.Args[1] 27887 p := x0.Args[0] 27888 mem := x0.Args[1] 27889 or := v.Args[1] 27890 if or.Op != OpAMD64ORQ { 27891 break 27892 } 27893 _ = or.Args[1] 27894 s1 := or.Args[0] 27895 if s1.Op != OpAMD64SHLQconst { 27896 break 27897 } 27898 j1 := s1.AuxInt 27899 r1 := s1.Args[0] 27900 if r1.Op != OpAMD64ROLWconst { 27901 break 27902 } 27903 if r1.AuxInt != 8 { 27904 break 27905 } 27906 x1 := r1.Args[0] 27907 if x1.Op != OpAMD64MOVWload { 27908 break 27909 } 27910 i1 := x1.AuxInt 27911 if x1.Aux != s { 27912 break 27913 } 27914 _ = x1.Args[1] 27915 if p != x1.Args[0] { 27916 break 27917 } 27918 if mem != x1.Args[1] { 27919 break 27920 } 27921 y := or.Args[1] 27922 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 27923 break 27924 } 27925 b = mergePoint(b, x0, x1) 27926 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27927 v.reset(OpCopy) 27928 v.AddArg(v0) 27929 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27930 v1.AuxInt = j1 27931 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 27932 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27933 v3.AuxInt = i0 27934 v3.Aux = s 27935 v3.AddArg(p) 27936 v3.AddArg(mem) 27937 v2.AddArg(v3) 27938 v1.AddArg(v2) 27939 v0.AddArg(v1) 27940 v0.AddArg(y) 27941 return true 27942 } 27943 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 27944 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 27945 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 27946 for { 27947 _ = v.Args[1] 27948 s0 := v.Args[0] 27949 if s0.Op != OpAMD64SHLQconst { 27950 break 27951 } 27952 j0 := s0.AuxInt 27953 r0 := s0.Args[0] 27954 if r0.Op != OpAMD64ROLWconst { 27955 break 27956 } 27957 if r0.AuxInt != 8 { 27958 break 27959 } 27960 x0 := r0.Args[0] 27961 if x0.Op != OpAMD64MOVWload { 27962 break 27963 } 27964 i0 := x0.AuxInt 27965 s := x0.Aux 27966 _ = x0.Args[1] 27967 p := x0.Args[0] 27968 mem := x0.Args[1] 27969 or := v.Args[1] 27970 if or.Op != OpAMD64ORQ { 27971 break 27972 } 27973 _ = or.Args[1] 27974 y := or.Args[0] 27975 s1 := or.Args[1] 27976 if s1.Op != OpAMD64SHLQconst { 27977 break 27978 } 27979 j1 := s1.AuxInt 27980 r1 := s1.Args[0] 27981 if r1.Op != OpAMD64ROLWconst { 27982 break 27983 } 27984 if r1.AuxInt != 8 { 27985 break 27986 } 27987 x1 := r1.Args[0] 27988 if x1.Op != OpAMD64MOVWload { 27989 break 27990 } 27991 i1 := x1.AuxInt 27992 if x1.Aux != s { 27993 break 27994 } 27995 _ = x1.Args[1] 27996 if p != x1.Args[0] { 27997 break 27998 } 27999 if mem != x1.Args[1] { 28000 break 28001 } 28002 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 28003 break 28004 } 28005 b = mergePoint(b, x0, x1) 28006 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28007 v.reset(OpCopy) 28008 v.AddArg(v0) 28009 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28010 v1.AuxInt = j1 28011 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 28012 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 28013 v3.AuxInt = i0 28014 v3.Aux = s 28015 v3.AddArg(p) 28016 v3.AddArg(mem) 28017 v2.AddArg(v3) 28018 v1.AddArg(v2) 28019 v0.AddArg(v1) 28020 v0.AddArg(y) 28021 return true 28022 } 28023 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 28024 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 28025 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 28026 for { 28027 _ = v.Args[1] 28028 or := v.Args[0] 28029 if or.Op != OpAMD64ORQ { 28030 break 28031 } 28032 _ = or.Args[1] 28033 s1 := or.Args[0] 28034 if s1.Op != OpAMD64SHLQconst { 28035 break 28036 } 28037 j1 := s1.AuxInt 28038 r1 := s1.Args[0] 28039 if r1.Op != OpAMD64ROLWconst { 28040 break 28041 } 28042 if r1.AuxInt != 8 { 28043 break 28044 } 28045 x1 := r1.Args[0] 28046 if x1.Op != OpAMD64MOVWload { 28047 break 28048 } 28049 i1 := x1.AuxInt 28050 s := x1.Aux 28051 _ = x1.Args[1] 28052 p := x1.Args[0] 28053 mem := x1.Args[1] 28054 y := or.Args[1] 28055 s0 := v.Args[1] 28056 if s0.Op != OpAMD64SHLQconst { 28057 break 28058 } 28059 j0 := s0.AuxInt 28060 r0 := s0.Args[0] 28061 if r0.Op != OpAMD64ROLWconst { 28062 break 28063 } 28064 if r0.AuxInt != 8 { 28065 break 28066 } 28067 x0 := r0.Args[0] 28068 if x0.Op != OpAMD64MOVWload { 28069 break 28070 } 28071 i0 := x0.AuxInt 28072 if x0.Aux != s { 28073 break 28074 } 28075 _ = x0.Args[1] 28076 if p != x0.Args[0] { 28077 break 28078 } 28079 if mem != x0.Args[1] { 28080 break 28081 } 28082 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 28083 break 28084 } 28085 b = mergePoint(b, x0, x1) 28086 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28087 v.reset(OpCopy) 28088 v.AddArg(v0) 28089 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28090 v1.AuxInt = j1 28091 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 28092 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 28093 v3.AuxInt = i0 28094 v3.Aux = s 28095 v3.AddArg(p) 28096 v3.AddArg(mem) 28097 v2.AddArg(v3) 28098 v1.AddArg(v2) 28099 v0.AddArg(v1) 28100 v0.AddArg(y) 28101 return true 28102 } 28103 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 28104 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 28105 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 28106 for { 28107 _ = v.Args[1] 28108 or := v.Args[0] 28109 if or.Op != OpAMD64ORQ { 28110 break 28111 } 28112 _ = or.Args[1] 28113 y := or.Args[0] 28114 s1 := or.Args[1] 28115 if s1.Op != OpAMD64SHLQconst { 28116 break 28117 } 28118 j1 := s1.AuxInt 28119 r1 := s1.Args[0] 28120 if r1.Op != OpAMD64ROLWconst { 28121 break 28122 } 28123 if r1.AuxInt != 8 { 28124 break 28125 } 28126 x1 := r1.Args[0] 28127 if x1.Op != OpAMD64MOVWload { 28128 break 28129 } 28130 i1 := x1.AuxInt 28131 s := x1.Aux 28132 _ = x1.Args[1] 28133 p := x1.Args[0] 28134 mem := x1.Args[1] 28135 s0 := v.Args[1] 28136 if s0.Op != OpAMD64SHLQconst { 28137 break 28138 } 28139 j0 := s0.AuxInt 28140 r0 := s0.Args[0] 28141 if r0.Op != OpAMD64ROLWconst { 28142 break 28143 } 28144 if r0.AuxInt != 8 { 28145 break 28146 } 28147 x0 := r0.Args[0] 28148 if x0.Op != OpAMD64MOVWload { 28149 break 28150 } 28151 i0 := x0.AuxInt 28152 if x0.Aux != s { 28153 break 28154 } 28155 _ = x0.Args[1] 28156 if p != x0.Args[0] { 28157 break 28158 } 28159 if mem != x0.Args[1] { 28160 break 28161 } 28162 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 28163 break 28164 } 28165 b = mergePoint(b, x0, x1) 28166 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28167 v.reset(OpCopy) 28168 v.AddArg(v0) 28169 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28170 v1.AuxInt = j1 28171 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 28172 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 28173 v3.AuxInt = i0 28174 v3.Aux = s 28175 v3.AddArg(p) 28176 v3.AddArg(mem) 28177 v2.AddArg(v3) 28178 v1.AddArg(v2) 28179 v0.AddArg(v1) 28180 v0.AddArg(y) 28181 return true 28182 } 28183 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 28184 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28185 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28186 for { 28187 _ = v.Args[1] 28188 x1 := v.Args[0] 28189 if x1.Op != OpAMD64MOVBloadidx1 { 28190 break 28191 } 28192 i1 := x1.AuxInt 28193 s := x1.Aux 28194 _ = x1.Args[2] 28195 p := x1.Args[0] 28196 idx := x1.Args[1] 28197 mem := x1.Args[2] 28198 sh := v.Args[1] 28199 if sh.Op != OpAMD64SHLQconst { 28200 break 28201 } 28202 if sh.AuxInt != 8 { 28203 break 28204 } 28205 x0 := sh.Args[0] 28206 if x0.Op != OpAMD64MOVBloadidx1 { 28207 break 28208 } 28209 i0 := x0.AuxInt 28210 if x0.Aux != s { 28211 break 28212 } 28213 _ = x0.Args[2] 28214 if p != x0.Args[0] { 28215 break 28216 } 28217 if idx != x0.Args[1] { 28218 break 28219 } 28220 if mem != x0.Args[2] { 28221 break 28222 } 28223 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28224 break 28225 } 28226 b = mergePoint(b, x0, x1) 28227 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28228 v.reset(OpCopy) 28229 v.AddArg(v0) 28230 v0.AuxInt = 8 28231 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28232 v1.AuxInt = i0 28233 v1.Aux = s 28234 v1.AddArg(p) 28235 v1.AddArg(idx) 28236 v1.AddArg(mem) 28237 v0.AddArg(v1) 28238 return true 28239 } 28240 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 28241 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28242 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28243 for { 28244 _ = v.Args[1] 28245 x1 := v.Args[0] 28246 if x1.Op != OpAMD64MOVBloadidx1 { 28247 break 28248 } 28249 i1 := x1.AuxInt 28250 s := x1.Aux 28251 _ = x1.Args[2] 28252 idx := x1.Args[0] 28253 p := x1.Args[1] 28254 mem := x1.Args[2] 28255 sh := v.Args[1] 28256 if sh.Op != OpAMD64SHLQconst { 28257 break 28258 } 28259 if sh.AuxInt != 8 { 28260 break 28261 } 28262 x0 := sh.Args[0] 28263 if x0.Op != OpAMD64MOVBloadidx1 { 28264 break 28265 } 28266 i0 := x0.AuxInt 28267 if x0.Aux != s { 28268 break 28269 } 28270 _ = x0.Args[2] 28271 if p != x0.Args[0] { 28272 break 28273 } 28274 if idx != x0.Args[1] { 28275 break 28276 } 28277 if mem != x0.Args[2] { 28278 break 28279 } 28280 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28281 break 28282 } 28283 b = mergePoint(b, x0, x1) 28284 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28285 v.reset(OpCopy) 28286 v.AddArg(v0) 28287 v0.AuxInt = 8 28288 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28289 v1.AuxInt = i0 28290 v1.Aux = s 28291 v1.AddArg(p) 28292 v1.AddArg(idx) 28293 v1.AddArg(mem) 28294 v0.AddArg(v1) 28295 return true 28296 } 28297 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 28298 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28299 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28300 for { 28301 _ = v.Args[1] 28302 x1 := v.Args[0] 28303 if x1.Op != OpAMD64MOVBloadidx1 { 28304 break 28305 } 28306 i1 := x1.AuxInt 28307 s := x1.Aux 28308 _ = x1.Args[2] 28309 p := x1.Args[0] 28310 idx := x1.Args[1] 28311 mem := x1.Args[2] 28312 sh := v.Args[1] 28313 if sh.Op != OpAMD64SHLQconst { 28314 break 28315 } 28316 if sh.AuxInt != 8 { 28317 break 28318 } 28319 x0 := sh.Args[0] 28320 if x0.Op != OpAMD64MOVBloadidx1 { 28321 break 28322 } 28323 i0 := x0.AuxInt 28324 if x0.Aux != s { 28325 break 28326 } 28327 _ = x0.Args[2] 28328 if idx != x0.Args[0] { 28329 break 28330 } 28331 if p != x0.Args[1] { 28332 break 28333 } 28334 if mem != x0.Args[2] { 28335 break 28336 } 28337 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28338 break 28339 } 28340 b = mergePoint(b, x0, x1) 28341 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28342 v.reset(OpCopy) 28343 v.AddArg(v0) 28344 v0.AuxInt = 8 28345 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28346 v1.AuxInt = i0 28347 v1.Aux = s 28348 v1.AddArg(p) 28349 v1.AddArg(idx) 28350 v1.AddArg(mem) 28351 v0.AddArg(v1) 28352 return true 28353 } 28354 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 28355 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28356 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28357 for { 28358 _ = v.Args[1] 28359 x1 := v.Args[0] 28360 if x1.Op != OpAMD64MOVBloadidx1 { 28361 break 28362 } 28363 i1 := x1.AuxInt 28364 s := x1.Aux 28365 _ = x1.Args[2] 28366 idx := x1.Args[0] 28367 p := x1.Args[1] 28368 mem := x1.Args[2] 28369 sh := v.Args[1] 28370 if sh.Op != OpAMD64SHLQconst { 28371 break 28372 } 28373 if sh.AuxInt != 8 { 28374 break 28375 } 28376 x0 := sh.Args[0] 28377 if x0.Op != OpAMD64MOVBloadidx1 { 28378 break 28379 } 28380 i0 := x0.AuxInt 28381 if x0.Aux != s { 28382 break 28383 } 28384 _ = x0.Args[2] 28385 if idx != x0.Args[0] { 28386 break 28387 } 28388 if p != x0.Args[1] { 28389 break 28390 } 28391 if mem != x0.Args[2] { 28392 break 28393 } 28394 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28395 break 28396 } 28397 b = mergePoint(b, x0, x1) 28398 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28399 v.reset(OpCopy) 28400 v.AddArg(v0) 28401 v0.AuxInt = 8 28402 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28403 v1.AuxInt = i0 28404 v1.Aux = s 28405 v1.AddArg(p) 28406 v1.AddArg(idx) 28407 v1.AddArg(mem) 28408 v0.AddArg(v1) 28409 return true 28410 } 28411 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 28412 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28413 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28414 for { 28415 _ = v.Args[1] 28416 sh := v.Args[0] 28417 if sh.Op != OpAMD64SHLQconst { 28418 break 28419 } 28420 if sh.AuxInt != 8 { 28421 break 28422 } 28423 x0 := sh.Args[0] 28424 if x0.Op != OpAMD64MOVBloadidx1 { 28425 break 28426 } 28427 i0 := x0.AuxInt 28428 s := x0.Aux 28429 _ = x0.Args[2] 28430 p := x0.Args[0] 28431 idx := x0.Args[1] 28432 mem := x0.Args[2] 28433 x1 := v.Args[1] 28434 if x1.Op != OpAMD64MOVBloadidx1 { 28435 break 28436 } 28437 i1 := x1.AuxInt 28438 if x1.Aux != s { 28439 break 28440 } 28441 _ = x1.Args[2] 28442 if p != x1.Args[0] { 28443 break 28444 } 28445 if idx != x1.Args[1] { 28446 break 28447 } 28448 if mem != x1.Args[2] { 28449 break 28450 } 28451 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28452 break 28453 } 28454 b = mergePoint(b, x0, x1) 28455 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28456 v.reset(OpCopy) 28457 v.AddArg(v0) 28458 v0.AuxInt = 8 28459 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28460 v1.AuxInt = i0 28461 v1.Aux = s 28462 v1.AddArg(p) 28463 v1.AddArg(idx) 28464 v1.AddArg(mem) 28465 v0.AddArg(v1) 28466 return true 28467 } 28468 return false 28469 } 28470 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 28471 b := v.Block 28472 _ = b 28473 typ := &b.Func.Config.Types 28474 _ = typ 28475 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 28476 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28477 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28478 for { 28479 _ = v.Args[1] 28480 sh := v.Args[0] 28481 if sh.Op != OpAMD64SHLQconst { 28482 break 28483 } 28484 if sh.AuxInt != 8 { 28485 break 28486 } 28487 x0 := sh.Args[0] 28488 if x0.Op != OpAMD64MOVBloadidx1 { 28489 break 28490 } 28491 i0 := x0.AuxInt 28492 s := x0.Aux 28493 _ = x0.Args[2] 28494 idx := x0.Args[0] 28495 p := x0.Args[1] 28496 mem := x0.Args[2] 28497 x1 := v.Args[1] 28498 if x1.Op != OpAMD64MOVBloadidx1 { 28499 break 28500 } 28501 i1 := x1.AuxInt 28502 if x1.Aux != s { 28503 break 28504 } 28505 _ = x1.Args[2] 28506 if p != x1.Args[0] { 28507 break 28508 } 28509 if idx != x1.Args[1] { 28510 break 28511 } 28512 if mem != x1.Args[2] { 28513 break 28514 } 28515 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28516 break 28517 } 28518 b = mergePoint(b, x0, x1) 28519 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28520 v.reset(OpCopy) 28521 v.AddArg(v0) 28522 v0.AuxInt = 8 28523 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28524 v1.AuxInt = i0 28525 v1.Aux = s 28526 v1.AddArg(p) 28527 v1.AddArg(idx) 28528 v1.AddArg(mem) 28529 v0.AddArg(v1) 28530 return true 28531 } 28532 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 28533 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28534 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28535 for { 28536 _ = v.Args[1] 28537 sh := v.Args[0] 28538 if sh.Op != OpAMD64SHLQconst { 28539 break 28540 } 28541 if sh.AuxInt != 8 { 28542 break 28543 } 28544 x0 := sh.Args[0] 28545 if x0.Op != OpAMD64MOVBloadidx1 { 28546 break 28547 } 28548 i0 := x0.AuxInt 28549 s := x0.Aux 28550 _ = x0.Args[2] 28551 p := x0.Args[0] 28552 idx := x0.Args[1] 28553 mem := x0.Args[2] 28554 x1 := v.Args[1] 28555 if x1.Op != OpAMD64MOVBloadidx1 { 28556 break 28557 } 28558 i1 := x1.AuxInt 28559 if x1.Aux != s { 28560 break 28561 } 28562 _ = x1.Args[2] 28563 if idx != x1.Args[0] { 28564 break 28565 } 28566 if p != x1.Args[1] { 28567 break 28568 } 28569 if mem != x1.Args[2] { 28570 break 28571 } 28572 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28573 break 28574 } 28575 b = mergePoint(b, x0, x1) 28576 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28577 v.reset(OpCopy) 28578 v.AddArg(v0) 28579 v0.AuxInt = 8 28580 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28581 v1.AuxInt = i0 28582 v1.Aux = s 28583 v1.AddArg(p) 28584 v1.AddArg(idx) 28585 v1.AddArg(mem) 28586 v0.AddArg(v1) 28587 return true 28588 } 28589 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 28590 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28591 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28592 for { 28593 _ = v.Args[1] 28594 sh := v.Args[0] 28595 if sh.Op != OpAMD64SHLQconst { 28596 break 28597 } 28598 if sh.AuxInt != 8 { 28599 break 28600 } 28601 x0 := sh.Args[0] 28602 if x0.Op != OpAMD64MOVBloadidx1 { 28603 break 28604 } 28605 i0 := x0.AuxInt 28606 s := x0.Aux 28607 _ = x0.Args[2] 28608 idx := x0.Args[0] 28609 p := x0.Args[1] 28610 mem := x0.Args[2] 28611 x1 := v.Args[1] 28612 if x1.Op != OpAMD64MOVBloadidx1 { 28613 break 28614 } 28615 i1 := x1.AuxInt 28616 if x1.Aux != s { 28617 break 28618 } 28619 _ = x1.Args[2] 28620 if idx != x1.Args[0] { 28621 break 28622 } 28623 if p != x1.Args[1] { 28624 break 28625 } 28626 if mem != x1.Args[2] { 28627 break 28628 } 28629 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28630 break 28631 } 28632 b = mergePoint(b, x0, x1) 28633 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28634 v.reset(OpCopy) 28635 v.AddArg(v0) 28636 v0.AuxInt = 8 28637 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28638 v1.AuxInt = i0 28639 v1.Aux = s 28640 v1.AddArg(p) 28641 v1.AddArg(idx) 28642 v1.AddArg(mem) 28643 v0.AddArg(v1) 28644 return true 28645 } 28646 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28647 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28648 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28649 for { 28650 _ = v.Args[1] 28651 r1 := v.Args[0] 28652 if r1.Op != OpAMD64ROLWconst { 28653 break 28654 } 28655 if r1.AuxInt != 8 { 28656 break 28657 } 28658 x1 := r1.Args[0] 28659 if x1.Op != OpAMD64MOVWloadidx1 { 28660 break 28661 } 28662 i1 := x1.AuxInt 28663 s := x1.Aux 28664 _ = x1.Args[2] 28665 p := x1.Args[0] 28666 idx := x1.Args[1] 28667 mem := x1.Args[2] 28668 sh := v.Args[1] 28669 if sh.Op != OpAMD64SHLQconst { 28670 break 28671 } 28672 if sh.AuxInt != 16 { 28673 break 28674 } 28675 r0 := sh.Args[0] 28676 if r0.Op != OpAMD64ROLWconst { 28677 break 28678 } 28679 if r0.AuxInt != 8 { 28680 break 28681 } 28682 x0 := r0.Args[0] 28683 if x0.Op != OpAMD64MOVWloadidx1 { 28684 break 28685 } 28686 i0 := x0.AuxInt 28687 if x0.Aux != s { 28688 break 28689 } 28690 _ = x0.Args[2] 28691 if p != x0.Args[0] { 28692 break 28693 } 28694 if idx != x0.Args[1] { 28695 break 28696 } 28697 if mem != x0.Args[2] { 28698 break 28699 } 28700 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28701 break 28702 } 28703 b = mergePoint(b, x0, x1) 28704 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28705 v.reset(OpCopy) 28706 v.AddArg(v0) 28707 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28708 v1.AuxInt = i0 28709 v1.Aux = s 28710 v1.AddArg(p) 28711 v1.AddArg(idx) 28712 v1.AddArg(mem) 28713 v0.AddArg(v1) 28714 return true 28715 } 28716 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28717 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28718 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28719 for { 28720 _ = v.Args[1] 28721 r1 := v.Args[0] 28722 if r1.Op != OpAMD64ROLWconst { 28723 break 28724 } 28725 if r1.AuxInt != 8 { 28726 break 28727 } 28728 x1 := r1.Args[0] 28729 if x1.Op != OpAMD64MOVWloadidx1 { 28730 break 28731 } 28732 i1 := x1.AuxInt 28733 s := x1.Aux 28734 _ = x1.Args[2] 28735 idx := x1.Args[0] 28736 p := x1.Args[1] 28737 mem := x1.Args[2] 28738 sh := v.Args[1] 28739 if sh.Op != OpAMD64SHLQconst { 28740 break 28741 } 28742 if sh.AuxInt != 16 { 28743 break 28744 } 28745 r0 := sh.Args[0] 28746 if r0.Op != OpAMD64ROLWconst { 28747 break 28748 } 28749 if r0.AuxInt != 8 { 28750 break 28751 } 28752 x0 := r0.Args[0] 28753 if x0.Op != OpAMD64MOVWloadidx1 { 28754 break 28755 } 28756 i0 := x0.AuxInt 28757 if x0.Aux != s { 28758 break 28759 } 28760 _ = x0.Args[2] 28761 if p != x0.Args[0] { 28762 break 28763 } 28764 if idx != x0.Args[1] { 28765 break 28766 } 28767 if mem != x0.Args[2] { 28768 break 28769 } 28770 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28771 break 28772 } 28773 b = mergePoint(b, x0, x1) 28774 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28775 v.reset(OpCopy) 28776 v.AddArg(v0) 28777 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28778 v1.AuxInt = i0 28779 v1.Aux = s 28780 v1.AddArg(p) 28781 v1.AddArg(idx) 28782 v1.AddArg(mem) 28783 v0.AddArg(v1) 28784 return true 28785 } 28786 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28787 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28788 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28789 for { 28790 _ = v.Args[1] 28791 r1 := v.Args[0] 28792 if r1.Op != OpAMD64ROLWconst { 28793 break 28794 } 28795 if r1.AuxInt != 8 { 28796 break 28797 } 28798 x1 := r1.Args[0] 28799 if x1.Op != OpAMD64MOVWloadidx1 { 28800 break 28801 } 28802 i1 := x1.AuxInt 28803 s := x1.Aux 28804 _ = x1.Args[2] 28805 p := x1.Args[0] 28806 idx := x1.Args[1] 28807 mem := x1.Args[2] 28808 sh := v.Args[1] 28809 if sh.Op != OpAMD64SHLQconst { 28810 break 28811 } 28812 if sh.AuxInt != 16 { 28813 break 28814 } 28815 r0 := sh.Args[0] 28816 if r0.Op != OpAMD64ROLWconst { 28817 break 28818 } 28819 if r0.AuxInt != 8 { 28820 break 28821 } 28822 x0 := r0.Args[0] 28823 if x0.Op != OpAMD64MOVWloadidx1 { 28824 break 28825 } 28826 i0 := x0.AuxInt 28827 if x0.Aux != s { 28828 break 28829 } 28830 _ = x0.Args[2] 28831 if idx != x0.Args[0] { 28832 break 28833 } 28834 if p != x0.Args[1] { 28835 break 28836 } 28837 if mem != x0.Args[2] { 28838 break 28839 } 28840 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28841 break 28842 } 28843 b = mergePoint(b, x0, x1) 28844 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28845 v.reset(OpCopy) 28846 v.AddArg(v0) 28847 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28848 v1.AuxInt = i0 28849 v1.Aux = s 28850 v1.AddArg(p) 28851 v1.AddArg(idx) 28852 v1.AddArg(mem) 28853 v0.AddArg(v1) 28854 return true 28855 } 28856 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28857 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28858 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28859 for { 28860 _ = v.Args[1] 28861 r1 := v.Args[0] 28862 if r1.Op != OpAMD64ROLWconst { 28863 break 28864 } 28865 if r1.AuxInt != 8 { 28866 break 28867 } 28868 x1 := r1.Args[0] 28869 if x1.Op != OpAMD64MOVWloadidx1 { 28870 break 28871 } 28872 i1 := x1.AuxInt 28873 s := x1.Aux 28874 _ = x1.Args[2] 28875 idx := x1.Args[0] 28876 p := x1.Args[1] 28877 mem := x1.Args[2] 28878 sh := v.Args[1] 28879 if sh.Op != OpAMD64SHLQconst { 28880 break 28881 } 28882 if sh.AuxInt != 16 { 28883 break 28884 } 28885 r0 := sh.Args[0] 28886 if r0.Op != OpAMD64ROLWconst { 28887 break 28888 } 28889 if r0.AuxInt != 8 { 28890 break 28891 } 28892 x0 := r0.Args[0] 28893 if x0.Op != OpAMD64MOVWloadidx1 { 28894 break 28895 } 28896 i0 := x0.AuxInt 28897 if x0.Aux != s { 28898 break 28899 } 28900 _ = x0.Args[2] 28901 if idx != x0.Args[0] { 28902 break 28903 } 28904 if p != x0.Args[1] { 28905 break 28906 } 28907 if mem != x0.Args[2] { 28908 break 28909 } 28910 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28911 break 28912 } 28913 b = mergePoint(b, x0, x1) 28914 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28915 v.reset(OpCopy) 28916 v.AddArg(v0) 28917 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28918 v1.AuxInt = i0 28919 v1.Aux = s 28920 v1.AddArg(p) 28921 v1.AddArg(idx) 28922 v1.AddArg(mem) 28923 v0.AddArg(v1) 28924 return true 28925 } 28926 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28927 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28928 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28929 for { 28930 _ = v.Args[1] 28931 sh := v.Args[0] 28932 if sh.Op != OpAMD64SHLQconst { 28933 break 28934 } 28935 if sh.AuxInt != 16 { 28936 break 28937 } 28938 r0 := sh.Args[0] 28939 if r0.Op != OpAMD64ROLWconst { 28940 break 28941 } 28942 if r0.AuxInt != 8 { 28943 break 28944 } 28945 x0 := r0.Args[0] 28946 if x0.Op != OpAMD64MOVWloadidx1 { 28947 break 28948 } 28949 i0 := x0.AuxInt 28950 s := x0.Aux 28951 _ = x0.Args[2] 28952 p := x0.Args[0] 28953 idx := x0.Args[1] 28954 mem := x0.Args[2] 28955 r1 := v.Args[1] 28956 if r1.Op != OpAMD64ROLWconst { 28957 break 28958 } 28959 if r1.AuxInt != 8 { 28960 break 28961 } 28962 x1 := r1.Args[0] 28963 if x1.Op != OpAMD64MOVWloadidx1 { 28964 break 28965 } 28966 i1 := x1.AuxInt 28967 if x1.Aux != s { 28968 break 28969 } 28970 _ = x1.Args[2] 28971 if p != x1.Args[0] { 28972 break 28973 } 28974 if idx != x1.Args[1] { 28975 break 28976 } 28977 if mem != x1.Args[2] { 28978 break 28979 } 28980 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28981 break 28982 } 28983 b = mergePoint(b, x0, x1) 28984 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28985 v.reset(OpCopy) 28986 v.AddArg(v0) 28987 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28988 v1.AuxInt = i0 28989 v1.Aux = s 28990 v1.AddArg(p) 28991 v1.AddArg(idx) 28992 v1.AddArg(mem) 28993 v0.AddArg(v1) 28994 return true 28995 } 28996 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28997 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28998 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28999 for { 29000 _ = v.Args[1] 29001 sh := v.Args[0] 29002 if sh.Op != OpAMD64SHLQconst { 29003 break 29004 } 29005 if sh.AuxInt != 16 { 29006 break 29007 } 29008 r0 := sh.Args[0] 29009 if r0.Op != OpAMD64ROLWconst { 29010 break 29011 } 29012 if r0.AuxInt != 8 { 29013 break 29014 } 29015 x0 := r0.Args[0] 29016 if x0.Op != OpAMD64MOVWloadidx1 { 29017 break 29018 } 29019 i0 := x0.AuxInt 29020 s := x0.Aux 29021 _ = x0.Args[2] 29022 idx := x0.Args[0] 29023 p := x0.Args[1] 29024 mem := x0.Args[2] 29025 r1 := v.Args[1] 29026 if r1.Op != OpAMD64ROLWconst { 29027 break 29028 } 29029 if r1.AuxInt != 8 { 29030 break 29031 } 29032 x1 := r1.Args[0] 29033 if x1.Op != OpAMD64MOVWloadidx1 { 29034 break 29035 } 29036 i1 := x1.AuxInt 29037 if x1.Aux != s { 29038 break 29039 } 29040 _ = x1.Args[2] 29041 if p != x1.Args[0] { 29042 break 29043 } 29044 if idx != x1.Args[1] { 29045 break 29046 } 29047 if mem != x1.Args[2] { 29048 break 29049 } 29050 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29051 break 29052 } 29053 b = mergePoint(b, x0, x1) 29054 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29055 v.reset(OpCopy) 29056 v.AddArg(v0) 29057 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29058 v1.AuxInt = i0 29059 v1.Aux = s 29060 v1.AddArg(p) 29061 v1.AddArg(idx) 29062 v1.AddArg(mem) 29063 v0.AddArg(v1) 29064 return true 29065 } 29066 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 29067 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29068 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29069 for { 29070 _ = v.Args[1] 29071 sh := v.Args[0] 29072 if sh.Op != OpAMD64SHLQconst { 29073 break 29074 } 29075 if sh.AuxInt != 16 { 29076 break 29077 } 29078 r0 := sh.Args[0] 29079 if r0.Op != OpAMD64ROLWconst { 29080 break 29081 } 29082 if r0.AuxInt != 8 { 29083 break 29084 } 29085 x0 := r0.Args[0] 29086 if x0.Op != OpAMD64MOVWloadidx1 { 29087 break 29088 } 29089 i0 := x0.AuxInt 29090 s := x0.Aux 29091 _ = x0.Args[2] 29092 p := x0.Args[0] 29093 idx := x0.Args[1] 29094 mem := x0.Args[2] 29095 r1 := v.Args[1] 29096 if r1.Op != OpAMD64ROLWconst { 29097 break 29098 } 29099 if r1.AuxInt != 8 { 29100 break 29101 } 29102 x1 := r1.Args[0] 29103 if x1.Op != OpAMD64MOVWloadidx1 { 29104 break 29105 } 29106 i1 := x1.AuxInt 29107 if x1.Aux != s { 29108 break 29109 } 29110 _ = x1.Args[2] 29111 if idx != x1.Args[0] { 29112 break 29113 } 29114 if p != x1.Args[1] { 29115 break 29116 } 29117 if mem != x1.Args[2] { 29118 break 29119 } 29120 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29121 break 29122 } 29123 b = mergePoint(b, x0, x1) 29124 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29125 v.reset(OpCopy) 29126 v.AddArg(v0) 29127 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29128 v1.AuxInt = i0 29129 v1.Aux = s 29130 v1.AddArg(p) 29131 v1.AddArg(idx) 29132 v1.AddArg(mem) 29133 v0.AddArg(v1) 29134 return true 29135 } 29136 return false 29137 } 29138 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 29139 b := v.Block 29140 _ = b 29141 typ := &b.Func.Config.Types 29142 _ = typ 29143 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 29144 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29145 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29146 for { 29147 _ = v.Args[1] 29148 sh := v.Args[0] 29149 if sh.Op != OpAMD64SHLQconst { 29150 break 29151 } 29152 if sh.AuxInt != 16 { 29153 break 29154 } 29155 r0 := sh.Args[0] 29156 if r0.Op != OpAMD64ROLWconst { 29157 break 29158 } 29159 if r0.AuxInt != 8 { 29160 break 29161 } 29162 x0 := r0.Args[0] 29163 if x0.Op != OpAMD64MOVWloadidx1 { 29164 break 29165 } 29166 i0 := x0.AuxInt 29167 s := x0.Aux 29168 _ = x0.Args[2] 29169 idx := x0.Args[0] 29170 p := x0.Args[1] 29171 mem := x0.Args[2] 29172 r1 := v.Args[1] 29173 if r1.Op != OpAMD64ROLWconst { 29174 break 29175 } 29176 if r1.AuxInt != 8 { 29177 break 29178 } 29179 x1 := r1.Args[0] 29180 if x1.Op != OpAMD64MOVWloadidx1 { 29181 break 29182 } 29183 i1 := x1.AuxInt 29184 if x1.Aux != s { 29185 break 29186 } 29187 _ = x1.Args[2] 29188 if idx != x1.Args[0] { 29189 break 29190 } 29191 if p != x1.Args[1] { 29192 break 29193 } 29194 if mem != x1.Args[2] { 29195 break 29196 } 29197 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29198 break 29199 } 29200 b = mergePoint(b, x0, x1) 29201 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29202 v.reset(OpCopy) 29203 v.AddArg(v0) 29204 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29205 v1.AuxInt = i0 29206 v1.Aux = s 29207 v1.AddArg(p) 29208 v1.AddArg(idx) 29209 v1.AddArg(mem) 29210 v0.AddArg(v1) 29211 return true 29212 } 29213 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 29214 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29215 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29216 for { 29217 _ = v.Args[1] 29218 r1 := v.Args[0] 29219 if r1.Op != OpAMD64BSWAPL { 29220 break 29221 } 29222 x1 := r1.Args[0] 29223 if x1.Op != OpAMD64MOVLloadidx1 { 29224 break 29225 } 29226 i1 := x1.AuxInt 29227 s := x1.Aux 29228 _ = x1.Args[2] 29229 p := x1.Args[0] 29230 idx := x1.Args[1] 29231 mem := x1.Args[2] 29232 sh := v.Args[1] 29233 if sh.Op != OpAMD64SHLQconst { 29234 break 29235 } 29236 if sh.AuxInt != 32 { 29237 break 29238 } 29239 r0 := sh.Args[0] 29240 if r0.Op != OpAMD64BSWAPL { 29241 break 29242 } 29243 x0 := r0.Args[0] 29244 if x0.Op != OpAMD64MOVLloadidx1 { 29245 break 29246 } 29247 i0 := x0.AuxInt 29248 if x0.Aux != s { 29249 break 29250 } 29251 _ = x0.Args[2] 29252 if p != x0.Args[0] { 29253 break 29254 } 29255 if idx != x0.Args[1] { 29256 break 29257 } 29258 if mem != x0.Args[2] { 29259 break 29260 } 29261 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29262 break 29263 } 29264 b = mergePoint(b, x0, x1) 29265 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29266 v.reset(OpCopy) 29267 v.AddArg(v0) 29268 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29269 v1.AuxInt = i0 29270 v1.Aux = s 29271 v1.AddArg(p) 29272 v1.AddArg(idx) 29273 v1.AddArg(mem) 29274 v0.AddArg(v1) 29275 return true 29276 } 29277 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 29278 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29279 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29280 for { 29281 _ = v.Args[1] 29282 r1 := v.Args[0] 29283 if r1.Op != OpAMD64BSWAPL { 29284 break 29285 } 29286 x1 := r1.Args[0] 29287 if x1.Op != OpAMD64MOVLloadidx1 { 29288 break 29289 } 29290 i1 := x1.AuxInt 29291 s := x1.Aux 29292 _ = x1.Args[2] 29293 idx := x1.Args[0] 29294 p := x1.Args[1] 29295 mem := x1.Args[2] 29296 sh := v.Args[1] 29297 if sh.Op != OpAMD64SHLQconst { 29298 break 29299 } 29300 if sh.AuxInt != 32 { 29301 break 29302 } 29303 r0 := sh.Args[0] 29304 if r0.Op != OpAMD64BSWAPL { 29305 break 29306 } 29307 x0 := r0.Args[0] 29308 if x0.Op != OpAMD64MOVLloadidx1 { 29309 break 29310 } 29311 i0 := x0.AuxInt 29312 if x0.Aux != s { 29313 break 29314 } 29315 _ = x0.Args[2] 29316 if p != x0.Args[0] { 29317 break 29318 } 29319 if idx != x0.Args[1] { 29320 break 29321 } 29322 if mem != x0.Args[2] { 29323 break 29324 } 29325 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29326 break 29327 } 29328 b = mergePoint(b, x0, x1) 29329 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29330 v.reset(OpCopy) 29331 v.AddArg(v0) 29332 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29333 v1.AuxInt = i0 29334 v1.Aux = s 29335 v1.AddArg(p) 29336 v1.AddArg(idx) 29337 v1.AddArg(mem) 29338 v0.AddArg(v1) 29339 return true 29340 } 29341 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 29342 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29343 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29344 for { 29345 _ = v.Args[1] 29346 r1 := v.Args[0] 29347 if r1.Op != OpAMD64BSWAPL { 29348 break 29349 } 29350 x1 := r1.Args[0] 29351 if x1.Op != OpAMD64MOVLloadidx1 { 29352 break 29353 } 29354 i1 := x1.AuxInt 29355 s := x1.Aux 29356 _ = x1.Args[2] 29357 p := x1.Args[0] 29358 idx := x1.Args[1] 29359 mem := x1.Args[2] 29360 sh := v.Args[1] 29361 if sh.Op != OpAMD64SHLQconst { 29362 break 29363 } 29364 if sh.AuxInt != 32 { 29365 break 29366 } 29367 r0 := sh.Args[0] 29368 if r0.Op != OpAMD64BSWAPL { 29369 break 29370 } 29371 x0 := r0.Args[0] 29372 if x0.Op != OpAMD64MOVLloadidx1 { 29373 break 29374 } 29375 i0 := x0.AuxInt 29376 if x0.Aux != s { 29377 break 29378 } 29379 _ = x0.Args[2] 29380 if idx != x0.Args[0] { 29381 break 29382 } 29383 if p != x0.Args[1] { 29384 break 29385 } 29386 if mem != x0.Args[2] { 29387 break 29388 } 29389 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29390 break 29391 } 29392 b = mergePoint(b, x0, x1) 29393 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29394 v.reset(OpCopy) 29395 v.AddArg(v0) 29396 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29397 v1.AuxInt = i0 29398 v1.Aux = s 29399 v1.AddArg(p) 29400 v1.AddArg(idx) 29401 v1.AddArg(mem) 29402 v0.AddArg(v1) 29403 return true 29404 } 29405 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 29406 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29407 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29408 for { 29409 _ = v.Args[1] 29410 r1 := v.Args[0] 29411 if r1.Op != OpAMD64BSWAPL { 29412 break 29413 } 29414 x1 := r1.Args[0] 29415 if x1.Op != OpAMD64MOVLloadidx1 { 29416 break 29417 } 29418 i1 := x1.AuxInt 29419 s := x1.Aux 29420 _ = x1.Args[2] 29421 idx := x1.Args[0] 29422 p := x1.Args[1] 29423 mem := x1.Args[2] 29424 sh := v.Args[1] 29425 if sh.Op != OpAMD64SHLQconst { 29426 break 29427 } 29428 if sh.AuxInt != 32 { 29429 break 29430 } 29431 r0 := sh.Args[0] 29432 if r0.Op != OpAMD64BSWAPL { 29433 break 29434 } 29435 x0 := r0.Args[0] 29436 if x0.Op != OpAMD64MOVLloadidx1 { 29437 break 29438 } 29439 i0 := x0.AuxInt 29440 if x0.Aux != s { 29441 break 29442 } 29443 _ = x0.Args[2] 29444 if idx != x0.Args[0] { 29445 break 29446 } 29447 if p != x0.Args[1] { 29448 break 29449 } 29450 if mem != x0.Args[2] { 29451 break 29452 } 29453 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29454 break 29455 } 29456 b = mergePoint(b, x0, x1) 29457 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29458 v.reset(OpCopy) 29459 v.AddArg(v0) 29460 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29461 v1.AuxInt = i0 29462 v1.Aux = s 29463 v1.AddArg(p) 29464 v1.AddArg(idx) 29465 v1.AddArg(mem) 29466 v0.AddArg(v1) 29467 return true 29468 } 29469 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 29470 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29471 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29472 for { 29473 _ = v.Args[1] 29474 sh := v.Args[0] 29475 if sh.Op != OpAMD64SHLQconst { 29476 break 29477 } 29478 if sh.AuxInt != 32 { 29479 break 29480 } 29481 r0 := sh.Args[0] 29482 if r0.Op != OpAMD64BSWAPL { 29483 break 29484 } 29485 x0 := r0.Args[0] 29486 if x0.Op != OpAMD64MOVLloadidx1 { 29487 break 29488 } 29489 i0 := x0.AuxInt 29490 s := x0.Aux 29491 _ = x0.Args[2] 29492 p := x0.Args[0] 29493 idx := x0.Args[1] 29494 mem := x0.Args[2] 29495 r1 := v.Args[1] 29496 if r1.Op != OpAMD64BSWAPL { 29497 break 29498 } 29499 x1 := r1.Args[0] 29500 if x1.Op != OpAMD64MOVLloadidx1 { 29501 break 29502 } 29503 i1 := x1.AuxInt 29504 if x1.Aux != s { 29505 break 29506 } 29507 _ = x1.Args[2] 29508 if p != x1.Args[0] { 29509 break 29510 } 29511 if idx != x1.Args[1] { 29512 break 29513 } 29514 if mem != x1.Args[2] { 29515 break 29516 } 29517 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29518 break 29519 } 29520 b = mergePoint(b, x0, x1) 29521 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29522 v.reset(OpCopy) 29523 v.AddArg(v0) 29524 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29525 v1.AuxInt = i0 29526 v1.Aux = s 29527 v1.AddArg(p) 29528 v1.AddArg(idx) 29529 v1.AddArg(mem) 29530 v0.AddArg(v1) 29531 return true 29532 } 29533 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 29534 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29535 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29536 for { 29537 _ = v.Args[1] 29538 sh := v.Args[0] 29539 if sh.Op != OpAMD64SHLQconst { 29540 break 29541 } 29542 if sh.AuxInt != 32 { 29543 break 29544 } 29545 r0 := sh.Args[0] 29546 if r0.Op != OpAMD64BSWAPL { 29547 break 29548 } 29549 x0 := r0.Args[0] 29550 if x0.Op != OpAMD64MOVLloadidx1 { 29551 break 29552 } 29553 i0 := x0.AuxInt 29554 s := x0.Aux 29555 _ = x0.Args[2] 29556 idx := x0.Args[0] 29557 p := x0.Args[1] 29558 mem := x0.Args[2] 29559 r1 := v.Args[1] 29560 if r1.Op != OpAMD64BSWAPL { 29561 break 29562 } 29563 x1 := r1.Args[0] 29564 if x1.Op != OpAMD64MOVLloadidx1 { 29565 break 29566 } 29567 i1 := x1.AuxInt 29568 if x1.Aux != s { 29569 break 29570 } 29571 _ = x1.Args[2] 29572 if p != x1.Args[0] { 29573 break 29574 } 29575 if idx != x1.Args[1] { 29576 break 29577 } 29578 if mem != x1.Args[2] { 29579 break 29580 } 29581 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29582 break 29583 } 29584 b = mergePoint(b, x0, x1) 29585 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29586 v.reset(OpCopy) 29587 v.AddArg(v0) 29588 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29589 v1.AuxInt = i0 29590 v1.Aux = s 29591 v1.AddArg(p) 29592 v1.AddArg(idx) 29593 v1.AddArg(mem) 29594 v0.AddArg(v1) 29595 return true 29596 } 29597 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 29598 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29599 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29600 for { 29601 _ = v.Args[1] 29602 sh := v.Args[0] 29603 if sh.Op != OpAMD64SHLQconst { 29604 break 29605 } 29606 if sh.AuxInt != 32 { 29607 break 29608 } 29609 r0 := sh.Args[0] 29610 if r0.Op != OpAMD64BSWAPL { 29611 break 29612 } 29613 x0 := r0.Args[0] 29614 if x0.Op != OpAMD64MOVLloadidx1 { 29615 break 29616 } 29617 i0 := x0.AuxInt 29618 s := x0.Aux 29619 _ = x0.Args[2] 29620 p := x0.Args[0] 29621 idx := x0.Args[1] 29622 mem := x0.Args[2] 29623 r1 := v.Args[1] 29624 if r1.Op != OpAMD64BSWAPL { 29625 break 29626 } 29627 x1 := r1.Args[0] 29628 if x1.Op != OpAMD64MOVLloadidx1 { 29629 break 29630 } 29631 i1 := x1.AuxInt 29632 if x1.Aux != s { 29633 break 29634 } 29635 _ = x1.Args[2] 29636 if idx != x1.Args[0] { 29637 break 29638 } 29639 if p != x1.Args[1] { 29640 break 29641 } 29642 if mem != x1.Args[2] { 29643 break 29644 } 29645 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29646 break 29647 } 29648 b = mergePoint(b, x0, x1) 29649 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29650 v.reset(OpCopy) 29651 v.AddArg(v0) 29652 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29653 v1.AuxInt = i0 29654 v1.Aux = s 29655 v1.AddArg(p) 29656 v1.AddArg(idx) 29657 v1.AddArg(mem) 29658 v0.AddArg(v1) 29659 return true 29660 } 29661 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 29662 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29663 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29664 for { 29665 _ = v.Args[1] 29666 sh := v.Args[0] 29667 if sh.Op != OpAMD64SHLQconst { 29668 break 29669 } 29670 if sh.AuxInt != 32 { 29671 break 29672 } 29673 r0 := sh.Args[0] 29674 if r0.Op != OpAMD64BSWAPL { 29675 break 29676 } 29677 x0 := r0.Args[0] 29678 if x0.Op != OpAMD64MOVLloadidx1 { 29679 break 29680 } 29681 i0 := x0.AuxInt 29682 s := x0.Aux 29683 _ = x0.Args[2] 29684 idx := x0.Args[0] 29685 p := x0.Args[1] 29686 mem := x0.Args[2] 29687 r1 := v.Args[1] 29688 if r1.Op != OpAMD64BSWAPL { 29689 break 29690 } 29691 x1 := r1.Args[0] 29692 if x1.Op != OpAMD64MOVLloadidx1 { 29693 break 29694 } 29695 i1 := x1.AuxInt 29696 if x1.Aux != s { 29697 break 29698 } 29699 _ = x1.Args[2] 29700 if idx != x1.Args[0] { 29701 break 29702 } 29703 if p != x1.Args[1] { 29704 break 29705 } 29706 if mem != x1.Args[2] { 29707 break 29708 } 29709 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29710 break 29711 } 29712 b = mergePoint(b, x0, x1) 29713 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29714 v.reset(OpCopy) 29715 v.AddArg(v0) 29716 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29717 v1.AuxInt = i0 29718 v1.Aux = s 29719 v1.AddArg(p) 29720 v1.AddArg(idx) 29721 v1.AddArg(mem) 29722 v0.AddArg(v1) 29723 return true 29724 } 29725 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 29726 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29727 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29728 for { 29729 _ = v.Args[1] 29730 s0 := v.Args[0] 29731 if s0.Op != OpAMD64SHLQconst { 29732 break 29733 } 29734 j0 := s0.AuxInt 29735 x0 := s0.Args[0] 29736 if x0.Op != OpAMD64MOVBloadidx1 { 29737 break 29738 } 29739 i0 := x0.AuxInt 29740 s := x0.Aux 29741 _ = x0.Args[2] 29742 p := x0.Args[0] 29743 idx := x0.Args[1] 29744 mem := x0.Args[2] 29745 or := v.Args[1] 29746 if or.Op != OpAMD64ORQ { 29747 break 29748 } 29749 _ = or.Args[1] 29750 s1 := or.Args[0] 29751 if s1.Op != OpAMD64SHLQconst { 29752 break 29753 } 29754 j1 := s1.AuxInt 29755 x1 := s1.Args[0] 29756 if x1.Op != OpAMD64MOVBloadidx1 { 29757 break 29758 } 29759 i1 := x1.AuxInt 29760 if x1.Aux != s { 29761 break 29762 } 29763 _ = x1.Args[2] 29764 if p != x1.Args[0] { 29765 break 29766 } 29767 if idx != x1.Args[1] { 29768 break 29769 } 29770 if mem != x1.Args[2] { 29771 break 29772 } 29773 y := or.Args[1] 29774 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29775 break 29776 } 29777 b = mergePoint(b, x0, x1) 29778 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29779 v.reset(OpCopy) 29780 v.AddArg(v0) 29781 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29782 v1.AuxInt = j1 29783 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29784 v2.AuxInt = 8 29785 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29786 v3.AuxInt = i0 29787 v3.Aux = s 29788 v3.AddArg(p) 29789 v3.AddArg(idx) 29790 v3.AddArg(mem) 29791 v2.AddArg(v3) 29792 v1.AddArg(v2) 29793 v0.AddArg(v1) 29794 v0.AddArg(y) 29795 return true 29796 } 29797 return false 29798 } 29799 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 29800 b := v.Block 29801 _ = b 29802 typ := &b.Func.Config.Types 29803 _ = typ 29804 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 29805 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29806 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29807 for { 29808 _ = v.Args[1] 29809 s0 := v.Args[0] 29810 if s0.Op != OpAMD64SHLQconst { 29811 break 29812 } 29813 j0 := s0.AuxInt 29814 x0 := s0.Args[0] 29815 if x0.Op != OpAMD64MOVBloadidx1 { 29816 break 29817 } 29818 i0 := x0.AuxInt 29819 s := x0.Aux 29820 _ = x0.Args[2] 29821 idx := x0.Args[0] 29822 p := x0.Args[1] 29823 mem := x0.Args[2] 29824 or := v.Args[1] 29825 if or.Op != OpAMD64ORQ { 29826 break 29827 } 29828 _ = or.Args[1] 29829 s1 := or.Args[0] 29830 if s1.Op != OpAMD64SHLQconst { 29831 break 29832 } 29833 j1 := s1.AuxInt 29834 x1 := s1.Args[0] 29835 if x1.Op != OpAMD64MOVBloadidx1 { 29836 break 29837 } 29838 i1 := x1.AuxInt 29839 if x1.Aux != s { 29840 break 29841 } 29842 _ = x1.Args[2] 29843 if p != x1.Args[0] { 29844 break 29845 } 29846 if idx != x1.Args[1] { 29847 break 29848 } 29849 if mem != x1.Args[2] { 29850 break 29851 } 29852 y := or.Args[1] 29853 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29854 break 29855 } 29856 b = mergePoint(b, x0, x1) 29857 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29858 v.reset(OpCopy) 29859 v.AddArg(v0) 29860 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29861 v1.AuxInt = j1 29862 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29863 v2.AuxInt = 8 29864 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29865 v3.AuxInt = i0 29866 v3.Aux = s 29867 v3.AddArg(p) 29868 v3.AddArg(idx) 29869 v3.AddArg(mem) 29870 v2.AddArg(v3) 29871 v1.AddArg(v2) 29872 v0.AddArg(v1) 29873 v0.AddArg(y) 29874 return true 29875 } 29876 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 29877 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29878 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29879 for { 29880 _ = v.Args[1] 29881 s0 := v.Args[0] 29882 if s0.Op != OpAMD64SHLQconst { 29883 break 29884 } 29885 j0 := s0.AuxInt 29886 x0 := s0.Args[0] 29887 if x0.Op != OpAMD64MOVBloadidx1 { 29888 break 29889 } 29890 i0 := x0.AuxInt 29891 s := x0.Aux 29892 _ = x0.Args[2] 29893 p := x0.Args[0] 29894 idx := x0.Args[1] 29895 mem := x0.Args[2] 29896 or := v.Args[1] 29897 if or.Op != OpAMD64ORQ { 29898 break 29899 } 29900 _ = or.Args[1] 29901 s1 := or.Args[0] 29902 if s1.Op != OpAMD64SHLQconst { 29903 break 29904 } 29905 j1 := s1.AuxInt 29906 x1 := s1.Args[0] 29907 if x1.Op != OpAMD64MOVBloadidx1 { 29908 break 29909 } 29910 i1 := x1.AuxInt 29911 if x1.Aux != s { 29912 break 29913 } 29914 _ = x1.Args[2] 29915 if idx != x1.Args[0] { 29916 break 29917 } 29918 if p != x1.Args[1] { 29919 break 29920 } 29921 if mem != x1.Args[2] { 29922 break 29923 } 29924 y := or.Args[1] 29925 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29926 break 29927 } 29928 b = mergePoint(b, x0, x1) 29929 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29930 v.reset(OpCopy) 29931 v.AddArg(v0) 29932 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29933 v1.AuxInt = j1 29934 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29935 v2.AuxInt = 8 29936 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29937 v3.AuxInt = i0 29938 v3.Aux = s 29939 v3.AddArg(p) 29940 v3.AddArg(idx) 29941 v3.AddArg(mem) 29942 v2.AddArg(v3) 29943 v1.AddArg(v2) 29944 v0.AddArg(v1) 29945 v0.AddArg(y) 29946 return true 29947 } 29948 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 29949 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29950 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29951 for { 29952 _ = v.Args[1] 29953 s0 := v.Args[0] 29954 if s0.Op != OpAMD64SHLQconst { 29955 break 29956 } 29957 j0 := s0.AuxInt 29958 x0 := s0.Args[0] 29959 if x0.Op != OpAMD64MOVBloadidx1 { 29960 break 29961 } 29962 i0 := x0.AuxInt 29963 s := x0.Aux 29964 _ = x0.Args[2] 29965 idx := x0.Args[0] 29966 p := x0.Args[1] 29967 mem := x0.Args[2] 29968 or := v.Args[1] 29969 if or.Op != OpAMD64ORQ { 29970 break 29971 } 29972 _ = or.Args[1] 29973 s1 := or.Args[0] 29974 if s1.Op != OpAMD64SHLQconst { 29975 break 29976 } 29977 j1 := s1.AuxInt 29978 x1 := s1.Args[0] 29979 if x1.Op != OpAMD64MOVBloadidx1 { 29980 break 29981 } 29982 i1 := x1.AuxInt 29983 if x1.Aux != s { 29984 break 29985 } 29986 _ = x1.Args[2] 29987 if idx != x1.Args[0] { 29988 break 29989 } 29990 if p != x1.Args[1] { 29991 break 29992 } 29993 if mem != x1.Args[2] { 29994 break 29995 } 29996 y := or.Args[1] 29997 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29998 break 29999 } 30000 b = mergePoint(b, x0, x1) 30001 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30002 v.reset(OpCopy) 30003 v.AddArg(v0) 30004 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30005 v1.AuxInt = j1 30006 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30007 v2.AuxInt = 8 30008 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30009 v3.AuxInt = i0 30010 v3.Aux = s 30011 v3.AddArg(p) 30012 v3.AddArg(idx) 30013 v3.AddArg(mem) 30014 v2.AddArg(v3) 30015 v1.AddArg(v2) 30016 v0.AddArg(v1) 30017 v0.AddArg(y) 30018 return true 30019 } 30020 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 30021 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30022 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30023 for { 30024 _ = v.Args[1] 30025 s0 := v.Args[0] 30026 if s0.Op != OpAMD64SHLQconst { 30027 break 30028 } 30029 j0 := s0.AuxInt 30030 x0 := s0.Args[0] 30031 if x0.Op != OpAMD64MOVBloadidx1 { 30032 break 30033 } 30034 i0 := x0.AuxInt 30035 s := x0.Aux 30036 _ = x0.Args[2] 30037 p := x0.Args[0] 30038 idx := x0.Args[1] 30039 mem := x0.Args[2] 30040 or := v.Args[1] 30041 if or.Op != OpAMD64ORQ { 30042 break 30043 } 30044 _ = or.Args[1] 30045 y := or.Args[0] 30046 s1 := or.Args[1] 30047 if s1.Op != OpAMD64SHLQconst { 30048 break 30049 } 30050 j1 := s1.AuxInt 30051 x1 := s1.Args[0] 30052 if x1.Op != OpAMD64MOVBloadidx1 { 30053 break 30054 } 30055 i1 := x1.AuxInt 30056 if x1.Aux != s { 30057 break 30058 } 30059 _ = x1.Args[2] 30060 if p != x1.Args[0] { 30061 break 30062 } 30063 if idx != x1.Args[1] { 30064 break 30065 } 30066 if mem != x1.Args[2] { 30067 break 30068 } 30069 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30070 break 30071 } 30072 b = mergePoint(b, x0, x1) 30073 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30074 v.reset(OpCopy) 30075 v.AddArg(v0) 30076 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30077 v1.AuxInt = j1 30078 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30079 v2.AuxInt = 8 30080 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30081 v3.AuxInt = i0 30082 v3.Aux = s 30083 v3.AddArg(p) 30084 v3.AddArg(idx) 30085 v3.AddArg(mem) 30086 v2.AddArg(v3) 30087 v1.AddArg(v2) 30088 v0.AddArg(v1) 30089 v0.AddArg(y) 30090 return true 30091 } 30092 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 30093 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30094 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30095 for { 30096 _ = v.Args[1] 30097 s0 := v.Args[0] 30098 if s0.Op != OpAMD64SHLQconst { 30099 break 30100 } 30101 j0 := s0.AuxInt 30102 x0 := s0.Args[0] 30103 if x0.Op != OpAMD64MOVBloadidx1 { 30104 break 30105 } 30106 i0 := x0.AuxInt 30107 s := x0.Aux 30108 _ = x0.Args[2] 30109 idx := x0.Args[0] 30110 p := x0.Args[1] 30111 mem := x0.Args[2] 30112 or := v.Args[1] 30113 if or.Op != OpAMD64ORQ { 30114 break 30115 } 30116 _ = or.Args[1] 30117 y := or.Args[0] 30118 s1 := or.Args[1] 30119 if s1.Op != OpAMD64SHLQconst { 30120 break 30121 } 30122 j1 := s1.AuxInt 30123 x1 := s1.Args[0] 30124 if x1.Op != OpAMD64MOVBloadidx1 { 30125 break 30126 } 30127 i1 := x1.AuxInt 30128 if x1.Aux != s { 30129 break 30130 } 30131 _ = x1.Args[2] 30132 if p != x1.Args[0] { 30133 break 30134 } 30135 if idx != x1.Args[1] { 30136 break 30137 } 30138 if mem != x1.Args[2] { 30139 break 30140 } 30141 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30142 break 30143 } 30144 b = mergePoint(b, x0, x1) 30145 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30146 v.reset(OpCopy) 30147 v.AddArg(v0) 30148 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30149 v1.AuxInt = j1 30150 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30151 v2.AuxInt = 8 30152 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30153 v3.AuxInt = i0 30154 v3.Aux = s 30155 v3.AddArg(p) 30156 v3.AddArg(idx) 30157 v3.AddArg(mem) 30158 v2.AddArg(v3) 30159 v1.AddArg(v2) 30160 v0.AddArg(v1) 30161 v0.AddArg(y) 30162 return true 30163 } 30164 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 30165 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30166 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30167 for { 30168 _ = v.Args[1] 30169 s0 := v.Args[0] 30170 if s0.Op != OpAMD64SHLQconst { 30171 break 30172 } 30173 j0 := s0.AuxInt 30174 x0 := s0.Args[0] 30175 if x0.Op != OpAMD64MOVBloadidx1 { 30176 break 30177 } 30178 i0 := x0.AuxInt 30179 s := x0.Aux 30180 _ = x0.Args[2] 30181 p := x0.Args[0] 30182 idx := x0.Args[1] 30183 mem := x0.Args[2] 30184 or := v.Args[1] 30185 if or.Op != OpAMD64ORQ { 30186 break 30187 } 30188 _ = or.Args[1] 30189 y := or.Args[0] 30190 s1 := or.Args[1] 30191 if s1.Op != OpAMD64SHLQconst { 30192 break 30193 } 30194 j1 := s1.AuxInt 30195 x1 := s1.Args[0] 30196 if x1.Op != OpAMD64MOVBloadidx1 { 30197 break 30198 } 30199 i1 := x1.AuxInt 30200 if x1.Aux != s { 30201 break 30202 } 30203 _ = x1.Args[2] 30204 if idx != x1.Args[0] { 30205 break 30206 } 30207 if p != x1.Args[1] { 30208 break 30209 } 30210 if mem != x1.Args[2] { 30211 break 30212 } 30213 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30214 break 30215 } 30216 b = mergePoint(b, x0, x1) 30217 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30218 v.reset(OpCopy) 30219 v.AddArg(v0) 30220 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30221 v1.AuxInt = j1 30222 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30223 v2.AuxInt = 8 30224 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30225 v3.AuxInt = i0 30226 v3.Aux = s 30227 v3.AddArg(p) 30228 v3.AddArg(idx) 30229 v3.AddArg(mem) 30230 v2.AddArg(v3) 30231 v1.AddArg(v2) 30232 v0.AddArg(v1) 30233 v0.AddArg(y) 30234 return true 30235 } 30236 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 30237 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30238 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30239 for { 30240 _ = v.Args[1] 30241 s0 := v.Args[0] 30242 if s0.Op != OpAMD64SHLQconst { 30243 break 30244 } 30245 j0 := s0.AuxInt 30246 x0 := s0.Args[0] 30247 if x0.Op != OpAMD64MOVBloadidx1 { 30248 break 30249 } 30250 i0 := x0.AuxInt 30251 s := x0.Aux 30252 _ = x0.Args[2] 30253 idx := x0.Args[0] 30254 p := x0.Args[1] 30255 mem := x0.Args[2] 30256 or := v.Args[1] 30257 if or.Op != OpAMD64ORQ { 30258 break 30259 } 30260 _ = or.Args[1] 30261 y := or.Args[0] 30262 s1 := or.Args[1] 30263 if s1.Op != OpAMD64SHLQconst { 30264 break 30265 } 30266 j1 := s1.AuxInt 30267 x1 := s1.Args[0] 30268 if x1.Op != OpAMD64MOVBloadidx1 { 30269 break 30270 } 30271 i1 := x1.AuxInt 30272 if x1.Aux != s { 30273 break 30274 } 30275 _ = x1.Args[2] 30276 if idx != x1.Args[0] { 30277 break 30278 } 30279 if p != x1.Args[1] { 30280 break 30281 } 30282 if mem != x1.Args[2] { 30283 break 30284 } 30285 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30286 break 30287 } 30288 b = mergePoint(b, x0, x1) 30289 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30290 v.reset(OpCopy) 30291 v.AddArg(v0) 30292 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30293 v1.AuxInt = j1 30294 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30295 v2.AuxInt = 8 30296 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30297 v3.AuxInt = i0 30298 v3.Aux = s 30299 v3.AddArg(p) 30300 v3.AddArg(idx) 30301 v3.AddArg(mem) 30302 v2.AddArg(v3) 30303 v1.AddArg(v2) 30304 v0.AddArg(v1) 30305 v0.AddArg(y) 30306 return true 30307 } 30308 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30309 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30310 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30311 for { 30312 _ = v.Args[1] 30313 or := v.Args[0] 30314 if or.Op != OpAMD64ORQ { 30315 break 30316 } 30317 _ = or.Args[1] 30318 s1 := or.Args[0] 30319 if s1.Op != OpAMD64SHLQconst { 30320 break 30321 } 30322 j1 := s1.AuxInt 30323 x1 := s1.Args[0] 30324 if x1.Op != OpAMD64MOVBloadidx1 { 30325 break 30326 } 30327 i1 := x1.AuxInt 30328 s := x1.Aux 30329 _ = x1.Args[2] 30330 p := x1.Args[0] 30331 idx := x1.Args[1] 30332 mem := x1.Args[2] 30333 y := or.Args[1] 30334 s0 := v.Args[1] 30335 if s0.Op != OpAMD64SHLQconst { 30336 break 30337 } 30338 j0 := s0.AuxInt 30339 x0 := s0.Args[0] 30340 if x0.Op != OpAMD64MOVBloadidx1 { 30341 break 30342 } 30343 i0 := x0.AuxInt 30344 if x0.Aux != s { 30345 break 30346 } 30347 _ = x0.Args[2] 30348 if p != x0.Args[0] { 30349 break 30350 } 30351 if idx != x0.Args[1] { 30352 break 30353 } 30354 if mem != x0.Args[2] { 30355 break 30356 } 30357 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30358 break 30359 } 30360 b = mergePoint(b, x0, x1) 30361 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30362 v.reset(OpCopy) 30363 v.AddArg(v0) 30364 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30365 v1.AuxInt = j1 30366 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30367 v2.AuxInt = 8 30368 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30369 v3.AuxInt = i0 30370 v3.Aux = s 30371 v3.AddArg(p) 30372 v3.AddArg(idx) 30373 v3.AddArg(mem) 30374 v2.AddArg(v3) 30375 v1.AddArg(v2) 30376 v0.AddArg(v1) 30377 v0.AddArg(y) 30378 return true 30379 } 30380 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30381 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30382 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30383 for { 30384 _ = v.Args[1] 30385 or := v.Args[0] 30386 if or.Op != OpAMD64ORQ { 30387 break 30388 } 30389 _ = or.Args[1] 30390 s1 := or.Args[0] 30391 if s1.Op != OpAMD64SHLQconst { 30392 break 30393 } 30394 j1 := s1.AuxInt 30395 x1 := s1.Args[0] 30396 if x1.Op != OpAMD64MOVBloadidx1 { 30397 break 30398 } 30399 i1 := x1.AuxInt 30400 s := x1.Aux 30401 _ = x1.Args[2] 30402 idx := x1.Args[0] 30403 p := x1.Args[1] 30404 mem := x1.Args[2] 30405 y := or.Args[1] 30406 s0 := v.Args[1] 30407 if s0.Op != OpAMD64SHLQconst { 30408 break 30409 } 30410 j0 := s0.AuxInt 30411 x0 := s0.Args[0] 30412 if x0.Op != OpAMD64MOVBloadidx1 { 30413 break 30414 } 30415 i0 := x0.AuxInt 30416 if x0.Aux != s { 30417 break 30418 } 30419 _ = x0.Args[2] 30420 if p != x0.Args[0] { 30421 break 30422 } 30423 if idx != x0.Args[1] { 30424 break 30425 } 30426 if mem != x0.Args[2] { 30427 break 30428 } 30429 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30430 break 30431 } 30432 b = mergePoint(b, x0, x1) 30433 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30434 v.reset(OpCopy) 30435 v.AddArg(v0) 30436 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30437 v1.AuxInt = j1 30438 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30439 v2.AuxInt = 8 30440 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30441 v3.AuxInt = i0 30442 v3.Aux = s 30443 v3.AddArg(p) 30444 v3.AddArg(idx) 30445 v3.AddArg(mem) 30446 v2.AddArg(v3) 30447 v1.AddArg(v2) 30448 v0.AddArg(v1) 30449 v0.AddArg(y) 30450 return true 30451 } 30452 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30453 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30454 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30455 for { 30456 _ = v.Args[1] 30457 or := v.Args[0] 30458 if or.Op != OpAMD64ORQ { 30459 break 30460 } 30461 _ = or.Args[1] 30462 y := or.Args[0] 30463 s1 := or.Args[1] 30464 if s1.Op != OpAMD64SHLQconst { 30465 break 30466 } 30467 j1 := s1.AuxInt 30468 x1 := s1.Args[0] 30469 if x1.Op != OpAMD64MOVBloadidx1 { 30470 break 30471 } 30472 i1 := x1.AuxInt 30473 s := x1.Aux 30474 _ = x1.Args[2] 30475 p := x1.Args[0] 30476 idx := x1.Args[1] 30477 mem := x1.Args[2] 30478 s0 := v.Args[1] 30479 if s0.Op != OpAMD64SHLQconst { 30480 break 30481 } 30482 j0 := s0.AuxInt 30483 x0 := s0.Args[0] 30484 if x0.Op != OpAMD64MOVBloadidx1 { 30485 break 30486 } 30487 i0 := x0.AuxInt 30488 if x0.Aux != s { 30489 break 30490 } 30491 _ = x0.Args[2] 30492 if p != x0.Args[0] { 30493 break 30494 } 30495 if idx != x0.Args[1] { 30496 break 30497 } 30498 if mem != x0.Args[2] { 30499 break 30500 } 30501 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30502 break 30503 } 30504 b = mergePoint(b, x0, x1) 30505 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30506 v.reset(OpCopy) 30507 v.AddArg(v0) 30508 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30509 v1.AuxInt = j1 30510 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30511 v2.AuxInt = 8 30512 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30513 v3.AuxInt = i0 30514 v3.Aux = s 30515 v3.AddArg(p) 30516 v3.AddArg(idx) 30517 v3.AddArg(mem) 30518 v2.AddArg(v3) 30519 v1.AddArg(v2) 30520 v0.AddArg(v1) 30521 v0.AddArg(y) 30522 return true 30523 } 30524 return false 30525 } 30526 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 30527 b := v.Block 30528 _ = b 30529 typ := &b.Func.Config.Types 30530 _ = typ 30531 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30532 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30533 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30534 for { 30535 _ = v.Args[1] 30536 or := v.Args[0] 30537 if or.Op != OpAMD64ORQ { 30538 break 30539 } 30540 _ = or.Args[1] 30541 y := or.Args[0] 30542 s1 := or.Args[1] 30543 if s1.Op != OpAMD64SHLQconst { 30544 break 30545 } 30546 j1 := s1.AuxInt 30547 x1 := s1.Args[0] 30548 if x1.Op != OpAMD64MOVBloadidx1 { 30549 break 30550 } 30551 i1 := x1.AuxInt 30552 s := x1.Aux 30553 _ = x1.Args[2] 30554 idx := x1.Args[0] 30555 p := x1.Args[1] 30556 mem := x1.Args[2] 30557 s0 := v.Args[1] 30558 if s0.Op != OpAMD64SHLQconst { 30559 break 30560 } 30561 j0 := s0.AuxInt 30562 x0 := s0.Args[0] 30563 if x0.Op != OpAMD64MOVBloadidx1 { 30564 break 30565 } 30566 i0 := x0.AuxInt 30567 if x0.Aux != s { 30568 break 30569 } 30570 _ = x0.Args[2] 30571 if p != x0.Args[0] { 30572 break 30573 } 30574 if idx != x0.Args[1] { 30575 break 30576 } 30577 if mem != x0.Args[2] { 30578 break 30579 } 30580 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30581 break 30582 } 30583 b = mergePoint(b, x0, x1) 30584 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30585 v.reset(OpCopy) 30586 v.AddArg(v0) 30587 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30588 v1.AuxInt = j1 30589 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30590 v2.AuxInt = 8 30591 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30592 v3.AuxInt = i0 30593 v3.Aux = s 30594 v3.AddArg(p) 30595 v3.AddArg(idx) 30596 v3.AddArg(mem) 30597 v2.AddArg(v3) 30598 v1.AddArg(v2) 30599 v0.AddArg(v1) 30600 v0.AddArg(y) 30601 return true 30602 } 30603 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30604 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30605 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30606 for { 30607 _ = v.Args[1] 30608 or := v.Args[0] 30609 if or.Op != OpAMD64ORQ { 30610 break 30611 } 30612 _ = or.Args[1] 30613 s1 := or.Args[0] 30614 if s1.Op != OpAMD64SHLQconst { 30615 break 30616 } 30617 j1 := s1.AuxInt 30618 x1 := s1.Args[0] 30619 if x1.Op != OpAMD64MOVBloadidx1 { 30620 break 30621 } 30622 i1 := x1.AuxInt 30623 s := x1.Aux 30624 _ = x1.Args[2] 30625 p := x1.Args[0] 30626 idx := x1.Args[1] 30627 mem := x1.Args[2] 30628 y := or.Args[1] 30629 s0 := v.Args[1] 30630 if s0.Op != OpAMD64SHLQconst { 30631 break 30632 } 30633 j0 := s0.AuxInt 30634 x0 := s0.Args[0] 30635 if x0.Op != OpAMD64MOVBloadidx1 { 30636 break 30637 } 30638 i0 := x0.AuxInt 30639 if x0.Aux != s { 30640 break 30641 } 30642 _ = x0.Args[2] 30643 if idx != x0.Args[0] { 30644 break 30645 } 30646 if p != x0.Args[1] { 30647 break 30648 } 30649 if mem != x0.Args[2] { 30650 break 30651 } 30652 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30653 break 30654 } 30655 b = mergePoint(b, x0, x1) 30656 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30657 v.reset(OpCopy) 30658 v.AddArg(v0) 30659 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30660 v1.AuxInt = j1 30661 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30662 v2.AuxInt = 8 30663 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30664 v3.AuxInt = i0 30665 v3.Aux = s 30666 v3.AddArg(p) 30667 v3.AddArg(idx) 30668 v3.AddArg(mem) 30669 v2.AddArg(v3) 30670 v1.AddArg(v2) 30671 v0.AddArg(v1) 30672 v0.AddArg(y) 30673 return true 30674 } 30675 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30676 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30677 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30678 for { 30679 _ = v.Args[1] 30680 or := v.Args[0] 30681 if or.Op != OpAMD64ORQ { 30682 break 30683 } 30684 _ = or.Args[1] 30685 s1 := or.Args[0] 30686 if s1.Op != OpAMD64SHLQconst { 30687 break 30688 } 30689 j1 := s1.AuxInt 30690 x1 := s1.Args[0] 30691 if x1.Op != OpAMD64MOVBloadidx1 { 30692 break 30693 } 30694 i1 := x1.AuxInt 30695 s := x1.Aux 30696 _ = x1.Args[2] 30697 idx := x1.Args[0] 30698 p := x1.Args[1] 30699 mem := x1.Args[2] 30700 y := or.Args[1] 30701 s0 := v.Args[1] 30702 if s0.Op != OpAMD64SHLQconst { 30703 break 30704 } 30705 j0 := s0.AuxInt 30706 x0 := s0.Args[0] 30707 if x0.Op != OpAMD64MOVBloadidx1 { 30708 break 30709 } 30710 i0 := x0.AuxInt 30711 if x0.Aux != s { 30712 break 30713 } 30714 _ = x0.Args[2] 30715 if idx != x0.Args[0] { 30716 break 30717 } 30718 if p != x0.Args[1] { 30719 break 30720 } 30721 if mem != x0.Args[2] { 30722 break 30723 } 30724 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30725 break 30726 } 30727 b = mergePoint(b, x0, x1) 30728 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30729 v.reset(OpCopy) 30730 v.AddArg(v0) 30731 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30732 v1.AuxInt = j1 30733 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30734 v2.AuxInt = 8 30735 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30736 v3.AuxInt = i0 30737 v3.Aux = s 30738 v3.AddArg(p) 30739 v3.AddArg(idx) 30740 v3.AddArg(mem) 30741 v2.AddArg(v3) 30742 v1.AddArg(v2) 30743 v0.AddArg(v1) 30744 v0.AddArg(y) 30745 return true 30746 } 30747 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30748 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30749 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30750 for { 30751 _ = v.Args[1] 30752 or := v.Args[0] 30753 if or.Op != OpAMD64ORQ { 30754 break 30755 } 30756 _ = or.Args[1] 30757 y := or.Args[0] 30758 s1 := or.Args[1] 30759 if s1.Op != OpAMD64SHLQconst { 30760 break 30761 } 30762 j1 := s1.AuxInt 30763 x1 := s1.Args[0] 30764 if x1.Op != OpAMD64MOVBloadidx1 { 30765 break 30766 } 30767 i1 := x1.AuxInt 30768 s := x1.Aux 30769 _ = x1.Args[2] 30770 p := x1.Args[0] 30771 idx := x1.Args[1] 30772 mem := x1.Args[2] 30773 s0 := v.Args[1] 30774 if s0.Op != OpAMD64SHLQconst { 30775 break 30776 } 30777 j0 := s0.AuxInt 30778 x0 := s0.Args[0] 30779 if x0.Op != OpAMD64MOVBloadidx1 { 30780 break 30781 } 30782 i0 := x0.AuxInt 30783 if x0.Aux != s { 30784 break 30785 } 30786 _ = x0.Args[2] 30787 if idx != x0.Args[0] { 30788 break 30789 } 30790 if p != x0.Args[1] { 30791 break 30792 } 30793 if mem != x0.Args[2] { 30794 break 30795 } 30796 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30797 break 30798 } 30799 b = mergePoint(b, x0, x1) 30800 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30801 v.reset(OpCopy) 30802 v.AddArg(v0) 30803 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30804 v1.AuxInt = j1 30805 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30806 v2.AuxInt = 8 30807 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30808 v3.AuxInt = i0 30809 v3.Aux = s 30810 v3.AddArg(p) 30811 v3.AddArg(idx) 30812 v3.AddArg(mem) 30813 v2.AddArg(v3) 30814 v1.AddArg(v2) 30815 v0.AddArg(v1) 30816 v0.AddArg(y) 30817 return true 30818 } 30819 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30820 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30821 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30822 for { 30823 _ = v.Args[1] 30824 or := v.Args[0] 30825 if or.Op != OpAMD64ORQ { 30826 break 30827 } 30828 _ = or.Args[1] 30829 y := or.Args[0] 30830 s1 := or.Args[1] 30831 if s1.Op != OpAMD64SHLQconst { 30832 break 30833 } 30834 j1 := s1.AuxInt 30835 x1 := s1.Args[0] 30836 if x1.Op != OpAMD64MOVBloadidx1 { 30837 break 30838 } 30839 i1 := x1.AuxInt 30840 s := x1.Aux 30841 _ = x1.Args[2] 30842 idx := x1.Args[0] 30843 p := x1.Args[1] 30844 mem := x1.Args[2] 30845 s0 := v.Args[1] 30846 if s0.Op != OpAMD64SHLQconst { 30847 break 30848 } 30849 j0 := s0.AuxInt 30850 x0 := s0.Args[0] 30851 if x0.Op != OpAMD64MOVBloadidx1 { 30852 break 30853 } 30854 i0 := x0.AuxInt 30855 if x0.Aux != s { 30856 break 30857 } 30858 _ = x0.Args[2] 30859 if idx != x0.Args[0] { 30860 break 30861 } 30862 if p != x0.Args[1] { 30863 break 30864 } 30865 if mem != x0.Args[2] { 30866 break 30867 } 30868 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30869 break 30870 } 30871 b = mergePoint(b, x0, x1) 30872 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30873 v.reset(OpCopy) 30874 v.AddArg(v0) 30875 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30876 v1.AuxInt = j1 30877 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30878 v2.AuxInt = 8 30879 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30880 v3.AuxInt = i0 30881 v3.Aux = s 30882 v3.AddArg(p) 30883 v3.AddArg(idx) 30884 v3.AddArg(mem) 30885 v2.AddArg(v3) 30886 v1.AddArg(v2) 30887 v0.AddArg(v1) 30888 v0.AddArg(y) 30889 return true 30890 } 30891 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 30892 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30893 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30894 for { 30895 _ = v.Args[1] 30896 s0 := v.Args[0] 30897 if s0.Op != OpAMD64SHLQconst { 30898 break 30899 } 30900 j0 := s0.AuxInt 30901 r0 := s0.Args[0] 30902 if r0.Op != OpAMD64ROLWconst { 30903 break 30904 } 30905 if r0.AuxInt != 8 { 30906 break 30907 } 30908 x0 := r0.Args[0] 30909 if x0.Op != OpAMD64MOVWloadidx1 { 30910 break 30911 } 30912 i0 := x0.AuxInt 30913 s := x0.Aux 30914 _ = x0.Args[2] 30915 p := x0.Args[0] 30916 idx := x0.Args[1] 30917 mem := x0.Args[2] 30918 or := v.Args[1] 30919 if or.Op != OpAMD64ORQ { 30920 break 30921 } 30922 _ = or.Args[1] 30923 s1 := or.Args[0] 30924 if s1.Op != OpAMD64SHLQconst { 30925 break 30926 } 30927 j1 := s1.AuxInt 30928 r1 := s1.Args[0] 30929 if r1.Op != OpAMD64ROLWconst { 30930 break 30931 } 30932 if r1.AuxInt != 8 { 30933 break 30934 } 30935 x1 := r1.Args[0] 30936 if x1.Op != OpAMD64MOVWloadidx1 { 30937 break 30938 } 30939 i1 := x1.AuxInt 30940 if x1.Aux != s { 30941 break 30942 } 30943 _ = x1.Args[2] 30944 if p != x1.Args[0] { 30945 break 30946 } 30947 if idx != x1.Args[1] { 30948 break 30949 } 30950 if mem != x1.Args[2] { 30951 break 30952 } 30953 y := or.Args[1] 30954 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30955 break 30956 } 30957 b = mergePoint(b, x0, x1) 30958 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30959 v.reset(OpCopy) 30960 v.AddArg(v0) 30961 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30962 v1.AuxInt = j1 30963 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 30964 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30965 v3.AuxInt = i0 30966 v3.Aux = s 30967 v3.AddArg(p) 30968 v3.AddArg(idx) 30969 v3.AddArg(mem) 30970 v2.AddArg(v3) 30971 v1.AddArg(v2) 30972 v0.AddArg(v1) 30973 v0.AddArg(y) 30974 return true 30975 } 30976 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 30977 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30978 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30979 for { 30980 _ = v.Args[1] 30981 s0 := v.Args[0] 30982 if s0.Op != OpAMD64SHLQconst { 30983 break 30984 } 30985 j0 := s0.AuxInt 30986 r0 := s0.Args[0] 30987 if r0.Op != OpAMD64ROLWconst { 30988 break 30989 } 30990 if r0.AuxInt != 8 { 30991 break 30992 } 30993 x0 := r0.Args[0] 30994 if x0.Op != OpAMD64MOVWloadidx1 { 30995 break 30996 } 30997 i0 := x0.AuxInt 30998 s := x0.Aux 30999 _ = x0.Args[2] 31000 idx := x0.Args[0] 31001 p := x0.Args[1] 31002 mem := x0.Args[2] 31003 or := v.Args[1] 31004 if or.Op != OpAMD64ORQ { 31005 break 31006 } 31007 _ = or.Args[1] 31008 s1 := or.Args[0] 31009 if s1.Op != OpAMD64SHLQconst { 31010 break 31011 } 31012 j1 := s1.AuxInt 31013 r1 := s1.Args[0] 31014 if r1.Op != OpAMD64ROLWconst { 31015 break 31016 } 31017 if r1.AuxInt != 8 { 31018 break 31019 } 31020 x1 := r1.Args[0] 31021 if x1.Op != OpAMD64MOVWloadidx1 { 31022 break 31023 } 31024 i1 := x1.AuxInt 31025 if x1.Aux != s { 31026 break 31027 } 31028 _ = x1.Args[2] 31029 if p != x1.Args[0] { 31030 break 31031 } 31032 if idx != x1.Args[1] { 31033 break 31034 } 31035 if mem != x1.Args[2] { 31036 break 31037 } 31038 y := or.Args[1] 31039 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31040 break 31041 } 31042 b = mergePoint(b, x0, x1) 31043 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31044 v.reset(OpCopy) 31045 v.AddArg(v0) 31046 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31047 v1.AuxInt = j1 31048 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31049 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31050 v3.AuxInt = i0 31051 v3.Aux = s 31052 v3.AddArg(p) 31053 v3.AddArg(idx) 31054 v3.AddArg(mem) 31055 v2.AddArg(v3) 31056 v1.AddArg(v2) 31057 v0.AddArg(v1) 31058 v0.AddArg(y) 31059 return true 31060 } 31061 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 31062 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31063 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31064 for { 31065 _ = v.Args[1] 31066 s0 := v.Args[0] 31067 if s0.Op != OpAMD64SHLQconst { 31068 break 31069 } 31070 j0 := s0.AuxInt 31071 r0 := s0.Args[0] 31072 if r0.Op != OpAMD64ROLWconst { 31073 break 31074 } 31075 if r0.AuxInt != 8 { 31076 break 31077 } 31078 x0 := r0.Args[0] 31079 if x0.Op != OpAMD64MOVWloadidx1 { 31080 break 31081 } 31082 i0 := x0.AuxInt 31083 s := x0.Aux 31084 _ = x0.Args[2] 31085 p := x0.Args[0] 31086 idx := x0.Args[1] 31087 mem := x0.Args[2] 31088 or := v.Args[1] 31089 if or.Op != OpAMD64ORQ { 31090 break 31091 } 31092 _ = or.Args[1] 31093 s1 := or.Args[0] 31094 if s1.Op != OpAMD64SHLQconst { 31095 break 31096 } 31097 j1 := s1.AuxInt 31098 r1 := s1.Args[0] 31099 if r1.Op != OpAMD64ROLWconst { 31100 break 31101 } 31102 if r1.AuxInt != 8 { 31103 break 31104 } 31105 x1 := r1.Args[0] 31106 if x1.Op != OpAMD64MOVWloadidx1 { 31107 break 31108 } 31109 i1 := x1.AuxInt 31110 if x1.Aux != s { 31111 break 31112 } 31113 _ = x1.Args[2] 31114 if idx != x1.Args[0] { 31115 break 31116 } 31117 if p != x1.Args[1] { 31118 break 31119 } 31120 if mem != x1.Args[2] { 31121 break 31122 } 31123 y := or.Args[1] 31124 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31125 break 31126 } 31127 b = mergePoint(b, x0, x1) 31128 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31129 v.reset(OpCopy) 31130 v.AddArg(v0) 31131 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31132 v1.AuxInt = j1 31133 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31134 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31135 v3.AuxInt = i0 31136 v3.Aux = s 31137 v3.AddArg(p) 31138 v3.AddArg(idx) 31139 v3.AddArg(mem) 31140 v2.AddArg(v3) 31141 v1.AddArg(v2) 31142 v0.AddArg(v1) 31143 v0.AddArg(y) 31144 return true 31145 } 31146 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 31147 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31148 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31149 for { 31150 _ = v.Args[1] 31151 s0 := v.Args[0] 31152 if s0.Op != OpAMD64SHLQconst { 31153 break 31154 } 31155 j0 := s0.AuxInt 31156 r0 := s0.Args[0] 31157 if r0.Op != OpAMD64ROLWconst { 31158 break 31159 } 31160 if r0.AuxInt != 8 { 31161 break 31162 } 31163 x0 := r0.Args[0] 31164 if x0.Op != OpAMD64MOVWloadidx1 { 31165 break 31166 } 31167 i0 := x0.AuxInt 31168 s := x0.Aux 31169 _ = x0.Args[2] 31170 idx := x0.Args[0] 31171 p := x0.Args[1] 31172 mem := x0.Args[2] 31173 or := v.Args[1] 31174 if or.Op != OpAMD64ORQ { 31175 break 31176 } 31177 _ = or.Args[1] 31178 s1 := or.Args[0] 31179 if s1.Op != OpAMD64SHLQconst { 31180 break 31181 } 31182 j1 := s1.AuxInt 31183 r1 := s1.Args[0] 31184 if r1.Op != OpAMD64ROLWconst { 31185 break 31186 } 31187 if r1.AuxInt != 8 { 31188 break 31189 } 31190 x1 := r1.Args[0] 31191 if x1.Op != OpAMD64MOVWloadidx1 { 31192 break 31193 } 31194 i1 := x1.AuxInt 31195 if x1.Aux != s { 31196 break 31197 } 31198 _ = x1.Args[2] 31199 if idx != x1.Args[0] { 31200 break 31201 } 31202 if p != x1.Args[1] { 31203 break 31204 } 31205 if mem != x1.Args[2] { 31206 break 31207 } 31208 y := or.Args[1] 31209 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31210 break 31211 } 31212 b = mergePoint(b, x0, x1) 31213 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31214 v.reset(OpCopy) 31215 v.AddArg(v0) 31216 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31217 v1.AuxInt = j1 31218 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31219 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31220 v3.AuxInt = i0 31221 v3.Aux = s 31222 v3.AddArg(p) 31223 v3.AddArg(idx) 31224 v3.AddArg(mem) 31225 v2.AddArg(v3) 31226 v1.AddArg(v2) 31227 v0.AddArg(v1) 31228 v0.AddArg(y) 31229 return true 31230 } 31231 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 31232 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31233 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31234 for { 31235 _ = v.Args[1] 31236 s0 := v.Args[0] 31237 if s0.Op != OpAMD64SHLQconst { 31238 break 31239 } 31240 j0 := s0.AuxInt 31241 r0 := s0.Args[0] 31242 if r0.Op != OpAMD64ROLWconst { 31243 break 31244 } 31245 if r0.AuxInt != 8 { 31246 break 31247 } 31248 x0 := r0.Args[0] 31249 if x0.Op != OpAMD64MOVWloadidx1 { 31250 break 31251 } 31252 i0 := x0.AuxInt 31253 s := x0.Aux 31254 _ = x0.Args[2] 31255 p := x0.Args[0] 31256 idx := x0.Args[1] 31257 mem := x0.Args[2] 31258 or := v.Args[1] 31259 if or.Op != OpAMD64ORQ { 31260 break 31261 } 31262 _ = or.Args[1] 31263 y := or.Args[0] 31264 s1 := or.Args[1] 31265 if s1.Op != OpAMD64SHLQconst { 31266 break 31267 } 31268 j1 := s1.AuxInt 31269 r1 := s1.Args[0] 31270 if r1.Op != OpAMD64ROLWconst { 31271 break 31272 } 31273 if r1.AuxInt != 8 { 31274 break 31275 } 31276 x1 := r1.Args[0] 31277 if x1.Op != OpAMD64MOVWloadidx1 { 31278 break 31279 } 31280 i1 := x1.AuxInt 31281 if x1.Aux != s { 31282 break 31283 } 31284 _ = x1.Args[2] 31285 if p != x1.Args[0] { 31286 break 31287 } 31288 if idx != x1.Args[1] { 31289 break 31290 } 31291 if mem != x1.Args[2] { 31292 break 31293 } 31294 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31295 break 31296 } 31297 b = mergePoint(b, x0, x1) 31298 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31299 v.reset(OpCopy) 31300 v.AddArg(v0) 31301 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31302 v1.AuxInt = j1 31303 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31304 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31305 v3.AuxInt = i0 31306 v3.Aux = s 31307 v3.AddArg(p) 31308 v3.AddArg(idx) 31309 v3.AddArg(mem) 31310 v2.AddArg(v3) 31311 v1.AddArg(v2) 31312 v0.AddArg(v1) 31313 v0.AddArg(y) 31314 return true 31315 } 31316 return false 31317 } 31318 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 31319 b := v.Block 31320 _ = b 31321 typ := &b.Func.Config.Types 31322 _ = typ 31323 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 31324 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31325 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31326 for { 31327 _ = v.Args[1] 31328 s0 := v.Args[0] 31329 if s0.Op != OpAMD64SHLQconst { 31330 break 31331 } 31332 j0 := s0.AuxInt 31333 r0 := s0.Args[0] 31334 if r0.Op != OpAMD64ROLWconst { 31335 break 31336 } 31337 if r0.AuxInt != 8 { 31338 break 31339 } 31340 x0 := r0.Args[0] 31341 if x0.Op != OpAMD64MOVWloadidx1 { 31342 break 31343 } 31344 i0 := x0.AuxInt 31345 s := x0.Aux 31346 _ = x0.Args[2] 31347 idx := x0.Args[0] 31348 p := x0.Args[1] 31349 mem := x0.Args[2] 31350 or := v.Args[1] 31351 if or.Op != OpAMD64ORQ { 31352 break 31353 } 31354 _ = or.Args[1] 31355 y := or.Args[0] 31356 s1 := or.Args[1] 31357 if s1.Op != OpAMD64SHLQconst { 31358 break 31359 } 31360 j1 := s1.AuxInt 31361 r1 := s1.Args[0] 31362 if r1.Op != OpAMD64ROLWconst { 31363 break 31364 } 31365 if r1.AuxInt != 8 { 31366 break 31367 } 31368 x1 := r1.Args[0] 31369 if x1.Op != OpAMD64MOVWloadidx1 { 31370 break 31371 } 31372 i1 := x1.AuxInt 31373 if x1.Aux != s { 31374 break 31375 } 31376 _ = x1.Args[2] 31377 if p != x1.Args[0] { 31378 break 31379 } 31380 if idx != x1.Args[1] { 31381 break 31382 } 31383 if mem != x1.Args[2] { 31384 break 31385 } 31386 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31387 break 31388 } 31389 b = mergePoint(b, x0, x1) 31390 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31391 v.reset(OpCopy) 31392 v.AddArg(v0) 31393 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31394 v1.AuxInt = j1 31395 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31396 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31397 v3.AuxInt = i0 31398 v3.Aux = s 31399 v3.AddArg(p) 31400 v3.AddArg(idx) 31401 v3.AddArg(mem) 31402 v2.AddArg(v3) 31403 v1.AddArg(v2) 31404 v0.AddArg(v1) 31405 v0.AddArg(y) 31406 return true 31407 } 31408 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 31409 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31410 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31411 for { 31412 _ = v.Args[1] 31413 s0 := v.Args[0] 31414 if s0.Op != OpAMD64SHLQconst { 31415 break 31416 } 31417 j0 := s0.AuxInt 31418 r0 := s0.Args[0] 31419 if r0.Op != OpAMD64ROLWconst { 31420 break 31421 } 31422 if r0.AuxInt != 8 { 31423 break 31424 } 31425 x0 := r0.Args[0] 31426 if x0.Op != OpAMD64MOVWloadidx1 { 31427 break 31428 } 31429 i0 := x0.AuxInt 31430 s := x0.Aux 31431 _ = x0.Args[2] 31432 p := x0.Args[0] 31433 idx := x0.Args[1] 31434 mem := x0.Args[2] 31435 or := v.Args[1] 31436 if or.Op != OpAMD64ORQ { 31437 break 31438 } 31439 _ = or.Args[1] 31440 y := or.Args[0] 31441 s1 := or.Args[1] 31442 if s1.Op != OpAMD64SHLQconst { 31443 break 31444 } 31445 j1 := s1.AuxInt 31446 r1 := s1.Args[0] 31447 if r1.Op != OpAMD64ROLWconst { 31448 break 31449 } 31450 if r1.AuxInt != 8 { 31451 break 31452 } 31453 x1 := r1.Args[0] 31454 if x1.Op != OpAMD64MOVWloadidx1 { 31455 break 31456 } 31457 i1 := x1.AuxInt 31458 if x1.Aux != s { 31459 break 31460 } 31461 _ = x1.Args[2] 31462 if idx != x1.Args[0] { 31463 break 31464 } 31465 if p != x1.Args[1] { 31466 break 31467 } 31468 if mem != x1.Args[2] { 31469 break 31470 } 31471 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31472 break 31473 } 31474 b = mergePoint(b, x0, x1) 31475 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31476 v.reset(OpCopy) 31477 v.AddArg(v0) 31478 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31479 v1.AuxInt = j1 31480 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31481 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31482 v3.AuxInt = i0 31483 v3.Aux = s 31484 v3.AddArg(p) 31485 v3.AddArg(idx) 31486 v3.AddArg(mem) 31487 v2.AddArg(v3) 31488 v1.AddArg(v2) 31489 v0.AddArg(v1) 31490 v0.AddArg(y) 31491 return true 31492 } 31493 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 31494 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31495 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31496 for { 31497 _ = v.Args[1] 31498 s0 := v.Args[0] 31499 if s0.Op != OpAMD64SHLQconst { 31500 break 31501 } 31502 j0 := s0.AuxInt 31503 r0 := s0.Args[0] 31504 if r0.Op != OpAMD64ROLWconst { 31505 break 31506 } 31507 if r0.AuxInt != 8 { 31508 break 31509 } 31510 x0 := r0.Args[0] 31511 if x0.Op != OpAMD64MOVWloadidx1 { 31512 break 31513 } 31514 i0 := x0.AuxInt 31515 s := x0.Aux 31516 _ = x0.Args[2] 31517 idx := x0.Args[0] 31518 p := x0.Args[1] 31519 mem := x0.Args[2] 31520 or := v.Args[1] 31521 if or.Op != OpAMD64ORQ { 31522 break 31523 } 31524 _ = or.Args[1] 31525 y := or.Args[0] 31526 s1 := or.Args[1] 31527 if s1.Op != OpAMD64SHLQconst { 31528 break 31529 } 31530 j1 := s1.AuxInt 31531 r1 := s1.Args[0] 31532 if r1.Op != OpAMD64ROLWconst { 31533 break 31534 } 31535 if r1.AuxInt != 8 { 31536 break 31537 } 31538 x1 := r1.Args[0] 31539 if x1.Op != OpAMD64MOVWloadidx1 { 31540 break 31541 } 31542 i1 := x1.AuxInt 31543 if x1.Aux != s { 31544 break 31545 } 31546 _ = x1.Args[2] 31547 if idx != x1.Args[0] { 31548 break 31549 } 31550 if p != x1.Args[1] { 31551 break 31552 } 31553 if mem != x1.Args[2] { 31554 break 31555 } 31556 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31557 break 31558 } 31559 b = mergePoint(b, x0, x1) 31560 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31561 v.reset(OpCopy) 31562 v.AddArg(v0) 31563 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31564 v1.AuxInt = j1 31565 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31566 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31567 v3.AuxInt = i0 31568 v3.Aux = s 31569 v3.AddArg(p) 31570 v3.AddArg(idx) 31571 v3.AddArg(mem) 31572 v2.AddArg(v3) 31573 v1.AddArg(v2) 31574 v0.AddArg(v1) 31575 v0.AddArg(y) 31576 return true 31577 } 31578 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31579 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31580 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31581 for { 31582 _ = v.Args[1] 31583 or := v.Args[0] 31584 if or.Op != OpAMD64ORQ { 31585 break 31586 } 31587 _ = or.Args[1] 31588 s1 := or.Args[0] 31589 if s1.Op != OpAMD64SHLQconst { 31590 break 31591 } 31592 j1 := s1.AuxInt 31593 r1 := s1.Args[0] 31594 if r1.Op != OpAMD64ROLWconst { 31595 break 31596 } 31597 if r1.AuxInt != 8 { 31598 break 31599 } 31600 x1 := r1.Args[0] 31601 if x1.Op != OpAMD64MOVWloadidx1 { 31602 break 31603 } 31604 i1 := x1.AuxInt 31605 s := x1.Aux 31606 _ = x1.Args[2] 31607 p := x1.Args[0] 31608 idx := x1.Args[1] 31609 mem := x1.Args[2] 31610 y := or.Args[1] 31611 s0 := v.Args[1] 31612 if s0.Op != OpAMD64SHLQconst { 31613 break 31614 } 31615 j0 := s0.AuxInt 31616 r0 := s0.Args[0] 31617 if r0.Op != OpAMD64ROLWconst { 31618 break 31619 } 31620 if r0.AuxInt != 8 { 31621 break 31622 } 31623 x0 := r0.Args[0] 31624 if x0.Op != OpAMD64MOVWloadidx1 { 31625 break 31626 } 31627 i0 := x0.AuxInt 31628 if x0.Aux != s { 31629 break 31630 } 31631 _ = x0.Args[2] 31632 if p != x0.Args[0] { 31633 break 31634 } 31635 if idx != x0.Args[1] { 31636 break 31637 } 31638 if mem != x0.Args[2] { 31639 break 31640 } 31641 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31642 break 31643 } 31644 b = mergePoint(b, x0, x1) 31645 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31646 v.reset(OpCopy) 31647 v.AddArg(v0) 31648 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31649 v1.AuxInt = j1 31650 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31651 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31652 v3.AuxInt = i0 31653 v3.Aux = s 31654 v3.AddArg(p) 31655 v3.AddArg(idx) 31656 v3.AddArg(mem) 31657 v2.AddArg(v3) 31658 v1.AddArg(v2) 31659 v0.AddArg(v1) 31660 v0.AddArg(y) 31661 return true 31662 } 31663 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31664 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31665 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31666 for { 31667 _ = v.Args[1] 31668 or := v.Args[0] 31669 if or.Op != OpAMD64ORQ { 31670 break 31671 } 31672 _ = or.Args[1] 31673 s1 := or.Args[0] 31674 if s1.Op != OpAMD64SHLQconst { 31675 break 31676 } 31677 j1 := s1.AuxInt 31678 r1 := s1.Args[0] 31679 if r1.Op != OpAMD64ROLWconst { 31680 break 31681 } 31682 if r1.AuxInt != 8 { 31683 break 31684 } 31685 x1 := r1.Args[0] 31686 if x1.Op != OpAMD64MOVWloadidx1 { 31687 break 31688 } 31689 i1 := x1.AuxInt 31690 s := x1.Aux 31691 _ = x1.Args[2] 31692 idx := x1.Args[0] 31693 p := x1.Args[1] 31694 mem := x1.Args[2] 31695 y := or.Args[1] 31696 s0 := v.Args[1] 31697 if s0.Op != OpAMD64SHLQconst { 31698 break 31699 } 31700 j0 := s0.AuxInt 31701 r0 := s0.Args[0] 31702 if r0.Op != OpAMD64ROLWconst { 31703 break 31704 } 31705 if r0.AuxInt != 8 { 31706 break 31707 } 31708 x0 := r0.Args[0] 31709 if x0.Op != OpAMD64MOVWloadidx1 { 31710 break 31711 } 31712 i0 := x0.AuxInt 31713 if x0.Aux != s { 31714 break 31715 } 31716 _ = x0.Args[2] 31717 if p != x0.Args[0] { 31718 break 31719 } 31720 if idx != x0.Args[1] { 31721 break 31722 } 31723 if mem != x0.Args[2] { 31724 break 31725 } 31726 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31727 break 31728 } 31729 b = mergePoint(b, x0, x1) 31730 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31731 v.reset(OpCopy) 31732 v.AddArg(v0) 31733 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31734 v1.AuxInt = j1 31735 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31736 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31737 v3.AuxInt = i0 31738 v3.Aux = s 31739 v3.AddArg(p) 31740 v3.AddArg(idx) 31741 v3.AddArg(mem) 31742 v2.AddArg(v3) 31743 v1.AddArg(v2) 31744 v0.AddArg(v1) 31745 v0.AddArg(y) 31746 return true 31747 } 31748 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31749 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31750 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31751 for { 31752 _ = v.Args[1] 31753 or := v.Args[0] 31754 if or.Op != OpAMD64ORQ { 31755 break 31756 } 31757 _ = or.Args[1] 31758 y := or.Args[0] 31759 s1 := or.Args[1] 31760 if s1.Op != OpAMD64SHLQconst { 31761 break 31762 } 31763 j1 := s1.AuxInt 31764 r1 := s1.Args[0] 31765 if r1.Op != OpAMD64ROLWconst { 31766 break 31767 } 31768 if r1.AuxInt != 8 { 31769 break 31770 } 31771 x1 := r1.Args[0] 31772 if x1.Op != OpAMD64MOVWloadidx1 { 31773 break 31774 } 31775 i1 := x1.AuxInt 31776 s := x1.Aux 31777 _ = x1.Args[2] 31778 p := x1.Args[0] 31779 idx := x1.Args[1] 31780 mem := x1.Args[2] 31781 s0 := v.Args[1] 31782 if s0.Op != OpAMD64SHLQconst { 31783 break 31784 } 31785 j0 := s0.AuxInt 31786 r0 := s0.Args[0] 31787 if r0.Op != OpAMD64ROLWconst { 31788 break 31789 } 31790 if r0.AuxInt != 8 { 31791 break 31792 } 31793 x0 := r0.Args[0] 31794 if x0.Op != OpAMD64MOVWloadidx1 { 31795 break 31796 } 31797 i0 := x0.AuxInt 31798 if x0.Aux != s { 31799 break 31800 } 31801 _ = x0.Args[2] 31802 if p != x0.Args[0] { 31803 break 31804 } 31805 if idx != x0.Args[1] { 31806 break 31807 } 31808 if mem != x0.Args[2] { 31809 break 31810 } 31811 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31812 break 31813 } 31814 b = mergePoint(b, x0, x1) 31815 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31816 v.reset(OpCopy) 31817 v.AddArg(v0) 31818 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31819 v1.AuxInt = j1 31820 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31821 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31822 v3.AuxInt = i0 31823 v3.Aux = s 31824 v3.AddArg(p) 31825 v3.AddArg(idx) 31826 v3.AddArg(mem) 31827 v2.AddArg(v3) 31828 v1.AddArg(v2) 31829 v0.AddArg(v1) 31830 v0.AddArg(y) 31831 return true 31832 } 31833 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31834 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31835 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31836 for { 31837 _ = v.Args[1] 31838 or := v.Args[0] 31839 if or.Op != OpAMD64ORQ { 31840 break 31841 } 31842 _ = or.Args[1] 31843 y := or.Args[0] 31844 s1 := or.Args[1] 31845 if s1.Op != OpAMD64SHLQconst { 31846 break 31847 } 31848 j1 := s1.AuxInt 31849 r1 := s1.Args[0] 31850 if r1.Op != OpAMD64ROLWconst { 31851 break 31852 } 31853 if r1.AuxInt != 8 { 31854 break 31855 } 31856 x1 := r1.Args[0] 31857 if x1.Op != OpAMD64MOVWloadidx1 { 31858 break 31859 } 31860 i1 := x1.AuxInt 31861 s := x1.Aux 31862 _ = x1.Args[2] 31863 idx := x1.Args[0] 31864 p := x1.Args[1] 31865 mem := x1.Args[2] 31866 s0 := v.Args[1] 31867 if s0.Op != OpAMD64SHLQconst { 31868 break 31869 } 31870 j0 := s0.AuxInt 31871 r0 := s0.Args[0] 31872 if r0.Op != OpAMD64ROLWconst { 31873 break 31874 } 31875 if r0.AuxInt != 8 { 31876 break 31877 } 31878 x0 := r0.Args[0] 31879 if x0.Op != OpAMD64MOVWloadidx1 { 31880 break 31881 } 31882 i0 := x0.AuxInt 31883 if x0.Aux != s { 31884 break 31885 } 31886 _ = x0.Args[2] 31887 if p != x0.Args[0] { 31888 break 31889 } 31890 if idx != x0.Args[1] { 31891 break 31892 } 31893 if mem != x0.Args[2] { 31894 break 31895 } 31896 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31897 break 31898 } 31899 b = mergePoint(b, x0, x1) 31900 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31901 v.reset(OpCopy) 31902 v.AddArg(v0) 31903 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31904 v1.AuxInt = j1 31905 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31906 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31907 v3.AuxInt = i0 31908 v3.Aux = s 31909 v3.AddArg(p) 31910 v3.AddArg(idx) 31911 v3.AddArg(mem) 31912 v2.AddArg(v3) 31913 v1.AddArg(v2) 31914 v0.AddArg(v1) 31915 v0.AddArg(y) 31916 return true 31917 } 31918 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 31919 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31920 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31921 for { 31922 _ = v.Args[1] 31923 or := v.Args[0] 31924 if or.Op != OpAMD64ORQ { 31925 break 31926 } 31927 _ = or.Args[1] 31928 s1 := or.Args[0] 31929 if s1.Op != OpAMD64SHLQconst { 31930 break 31931 } 31932 j1 := s1.AuxInt 31933 r1 := s1.Args[0] 31934 if r1.Op != OpAMD64ROLWconst { 31935 break 31936 } 31937 if r1.AuxInt != 8 { 31938 break 31939 } 31940 x1 := r1.Args[0] 31941 if x1.Op != OpAMD64MOVWloadidx1 { 31942 break 31943 } 31944 i1 := x1.AuxInt 31945 s := x1.Aux 31946 _ = x1.Args[2] 31947 p := x1.Args[0] 31948 idx := x1.Args[1] 31949 mem := x1.Args[2] 31950 y := or.Args[1] 31951 s0 := v.Args[1] 31952 if s0.Op != OpAMD64SHLQconst { 31953 break 31954 } 31955 j0 := s0.AuxInt 31956 r0 := s0.Args[0] 31957 if r0.Op != OpAMD64ROLWconst { 31958 break 31959 } 31960 if r0.AuxInt != 8 { 31961 break 31962 } 31963 x0 := r0.Args[0] 31964 if x0.Op != OpAMD64MOVWloadidx1 { 31965 break 31966 } 31967 i0 := x0.AuxInt 31968 if x0.Aux != s { 31969 break 31970 } 31971 _ = x0.Args[2] 31972 if idx != x0.Args[0] { 31973 break 31974 } 31975 if p != x0.Args[1] { 31976 break 31977 } 31978 if mem != x0.Args[2] { 31979 break 31980 } 31981 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31982 break 31983 } 31984 b = mergePoint(b, x0, x1) 31985 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31986 v.reset(OpCopy) 31987 v.AddArg(v0) 31988 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31989 v1.AuxInt = j1 31990 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31991 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31992 v3.AuxInt = i0 31993 v3.Aux = s 31994 v3.AddArg(p) 31995 v3.AddArg(idx) 31996 v3.AddArg(mem) 31997 v2.AddArg(v3) 31998 v1.AddArg(v2) 31999 v0.AddArg(v1) 32000 v0.AddArg(y) 32001 return true 32002 } 32003 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 32004 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32005 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32006 for { 32007 _ = v.Args[1] 32008 or := v.Args[0] 32009 if or.Op != OpAMD64ORQ { 32010 break 32011 } 32012 _ = or.Args[1] 32013 s1 := or.Args[0] 32014 if s1.Op != OpAMD64SHLQconst { 32015 break 32016 } 32017 j1 := s1.AuxInt 32018 r1 := s1.Args[0] 32019 if r1.Op != OpAMD64ROLWconst { 32020 break 32021 } 32022 if r1.AuxInt != 8 { 32023 break 32024 } 32025 x1 := r1.Args[0] 32026 if x1.Op != OpAMD64MOVWloadidx1 { 32027 break 32028 } 32029 i1 := x1.AuxInt 32030 s := x1.Aux 32031 _ = x1.Args[2] 32032 idx := x1.Args[0] 32033 p := x1.Args[1] 32034 mem := x1.Args[2] 32035 y := or.Args[1] 32036 s0 := v.Args[1] 32037 if s0.Op != OpAMD64SHLQconst { 32038 break 32039 } 32040 j0 := s0.AuxInt 32041 r0 := s0.Args[0] 32042 if r0.Op != OpAMD64ROLWconst { 32043 break 32044 } 32045 if r0.AuxInt != 8 { 32046 break 32047 } 32048 x0 := r0.Args[0] 32049 if x0.Op != OpAMD64MOVWloadidx1 { 32050 break 32051 } 32052 i0 := x0.AuxInt 32053 if x0.Aux != s { 32054 break 32055 } 32056 _ = x0.Args[2] 32057 if idx != x0.Args[0] { 32058 break 32059 } 32060 if p != x0.Args[1] { 32061 break 32062 } 32063 if mem != x0.Args[2] { 32064 break 32065 } 32066 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32067 break 32068 } 32069 b = mergePoint(b, x0, x1) 32070 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32071 v.reset(OpCopy) 32072 v.AddArg(v0) 32073 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32074 v1.AuxInt = j1 32075 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32076 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32077 v3.AuxInt = i0 32078 v3.Aux = s 32079 v3.AddArg(p) 32080 v3.AddArg(idx) 32081 v3.AddArg(mem) 32082 v2.AddArg(v3) 32083 v1.AddArg(v2) 32084 v0.AddArg(v1) 32085 v0.AddArg(y) 32086 return true 32087 } 32088 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 32089 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32090 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32091 for { 32092 _ = v.Args[1] 32093 or := v.Args[0] 32094 if or.Op != OpAMD64ORQ { 32095 break 32096 } 32097 _ = or.Args[1] 32098 y := or.Args[0] 32099 s1 := or.Args[1] 32100 if s1.Op != OpAMD64SHLQconst { 32101 break 32102 } 32103 j1 := s1.AuxInt 32104 r1 := s1.Args[0] 32105 if r1.Op != OpAMD64ROLWconst { 32106 break 32107 } 32108 if r1.AuxInt != 8 { 32109 break 32110 } 32111 x1 := r1.Args[0] 32112 if x1.Op != OpAMD64MOVWloadidx1 { 32113 break 32114 } 32115 i1 := x1.AuxInt 32116 s := x1.Aux 32117 _ = x1.Args[2] 32118 p := x1.Args[0] 32119 idx := x1.Args[1] 32120 mem := x1.Args[2] 32121 s0 := v.Args[1] 32122 if s0.Op != OpAMD64SHLQconst { 32123 break 32124 } 32125 j0 := s0.AuxInt 32126 r0 := s0.Args[0] 32127 if r0.Op != OpAMD64ROLWconst { 32128 break 32129 } 32130 if r0.AuxInt != 8 { 32131 break 32132 } 32133 x0 := r0.Args[0] 32134 if x0.Op != OpAMD64MOVWloadidx1 { 32135 break 32136 } 32137 i0 := x0.AuxInt 32138 if x0.Aux != s { 32139 break 32140 } 32141 _ = x0.Args[2] 32142 if idx != x0.Args[0] { 32143 break 32144 } 32145 if p != x0.Args[1] { 32146 break 32147 } 32148 if mem != x0.Args[2] { 32149 break 32150 } 32151 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32152 break 32153 } 32154 b = mergePoint(b, x0, x1) 32155 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32156 v.reset(OpCopy) 32157 v.AddArg(v0) 32158 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32159 v1.AuxInt = j1 32160 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32161 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32162 v3.AuxInt = i0 32163 v3.Aux = s 32164 v3.AddArg(p) 32165 v3.AddArg(idx) 32166 v3.AddArg(mem) 32167 v2.AddArg(v3) 32168 v1.AddArg(v2) 32169 v0.AddArg(v1) 32170 v0.AddArg(y) 32171 return true 32172 } 32173 return false 32174 } 32175 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 32176 b := v.Block 32177 _ = b 32178 typ := &b.Func.Config.Types 32179 _ = typ 32180 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 32181 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32182 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32183 for { 32184 _ = v.Args[1] 32185 or := v.Args[0] 32186 if or.Op != OpAMD64ORQ { 32187 break 32188 } 32189 _ = or.Args[1] 32190 y := or.Args[0] 32191 s1 := or.Args[1] 32192 if s1.Op != OpAMD64SHLQconst { 32193 break 32194 } 32195 j1 := s1.AuxInt 32196 r1 := s1.Args[0] 32197 if r1.Op != OpAMD64ROLWconst { 32198 break 32199 } 32200 if r1.AuxInt != 8 { 32201 break 32202 } 32203 x1 := r1.Args[0] 32204 if x1.Op != OpAMD64MOVWloadidx1 { 32205 break 32206 } 32207 i1 := x1.AuxInt 32208 s := x1.Aux 32209 _ = x1.Args[2] 32210 idx := x1.Args[0] 32211 p := x1.Args[1] 32212 mem := x1.Args[2] 32213 s0 := v.Args[1] 32214 if s0.Op != OpAMD64SHLQconst { 32215 break 32216 } 32217 j0 := s0.AuxInt 32218 r0 := s0.Args[0] 32219 if r0.Op != OpAMD64ROLWconst { 32220 break 32221 } 32222 if r0.AuxInt != 8 { 32223 break 32224 } 32225 x0 := r0.Args[0] 32226 if x0.Op != OpAMD64MOVWloadidx1 { 32227 break 32228 } 32229 i0 := x0.AuxInt 32230 if x0.Aux != s { 32231 break 32232 } 32233 _ = x0.Args[2] 32234 if idx != x0.Args[0] { 32235 break 32236 } 32237 if p != x0.Args[1] { 32238 break 32239 } 32240 if mem != x0.Args[2] { 32241 break 32242 } 32243 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32244 break 32245 } 32246 b = mergePoint(b, x0, x1) 32247 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32248 v.reset(OpCopy) 32249 v.AddArg(v0) 32250 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32251 v1.AuxInt = j1 32252 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32253 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32254 v3.AuxInt = i0 32255 v3.Aux = s 32256 v3.AddArg(p) 32257 v3.AddArg(idx) 32258 v3.AddArg(mem) 32259 v2.AddArg(v3) 32260 v1.AddArg(v2) 32261 v0.AddArg(v1) 32262 v0.AddArg(y) 32263 return true 32264 } 32265 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 32266 // cond: canMergeLoad(v, l, x) && clobber(l) 32267 // result: (ORQmem x [off] {sym} ptr mem) 32268 for { 32269 _ = v.Args[1] 32270 x := v.Args[0] 32271 l := v.Args[1] 32272 if l.Op != OpAMD64MOVQload { 32273 break 32274 } 32275 off := l.AuxInt 32276 sym := l.Aux 32277 _ = l.Args[1] 32278 ptr := l.Args[0] 32279 mem := l.Args[1] 32280 if !(canMergeLoad(v, l, x) && clobber(l)) { 32281 break 32282 } 32283 v.reset(OpAMD64ORQmem) 32284 v.AuxInt = off 32285 v.Aux = sym 32286 v.AddArg(x) 32287 v.AddArg(ptr) 32288 v.AddArg(mem) 32289 return true 32290 } 32291 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 32292 // cond: canMergeLoad(v, l, x) && clobber(l) 32293 // result: (ORQmem x [off] {sym} ptr mem) 32294 for { 32295 _ = v.Args[1] 32296 l := v.Args[0] 32297 if l.Op != OpAMD64MOVQload { 32298 break 32299 } 32300 off := l.AuxInt 32301 sym := l.Aux 32302 _ = l.Args[1] 32303 ptr := l.Args[0] 32304 mem := l.Args[1] 32305 x := v.Args[1] 32306 if !(canMergeLoad(v, l, x) && clobber(l)) { 32307 break 32308 } 32309 v.reset(OpAMD64ORQmem) 32310 v.AuxInt = off 32311 v.Aux = sym 32312 v.AddArg(x) 32313 v.AddArg(ptr) 32314 v.AddArg(mem) 32315 return true 32316 } 32317 return false 32318 } 32319 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 32320 // match: (ORQconst [0] x) 32321 // cond: 32322 // result: x 32323 for { 32324 if v.AuxInt != 0 { 32325 break 32326 } 32327 x := v.Args[0] 32328 v.reset(OpCopy) 32329 v.Type = x.Type 32330 v.AddArg(x) 32331 return true 32332 } 32333 // match: (ORQconst [-1] _) 32334 // cond: 32335 // result: (MOVQconst [-1]) 32336 for { 32337 if v.AuxInt != -1 { 32338 break 32339 } 32340 v.reset(OpAMD64MOVQconst) 32341 v.AuxInt = -1 32342 return true 32343 } 32344 // match: (ORQconst [c] (MOVQconst [d])) 32345 // cond: 32346 // result: (MOVQconst [c|d]) 32347 for { 32348 c := v.AuxInt 32349 v_0 := v.Args[0] 32350 if v_0.Op != OpAMD64MOVQconst { 32351 break 32352 } 32353 d := v_0.AuxInt 32354 v.reset(OpAMD64MOVQconst) 32355 v.AuxInt = c | d 32356 return true 32357 } 32358 return false 32359 } 32360 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 32361 // match: (ROLB x (NEGQ y)) 32362 // cond: 32363 // result: (RORB x y) 32364 for { 32365 _ = v.Args[1] 32366 x := v.Args[0] 32367 v_1 := v.Args[1] 32368 if v_1.Op != OpAMD64NEGQ { 32369 break 32370 } 32371 y := v_1.Args[0] 32372 v.reset(OpAMD64RORB) 32373 v.AddArg(x) 32374 v.AddArg(y) 32375 return true 32376 } 32377 // match: (ROLB x (NEGL y)) 32378 // cond: 32379 // result: (RORB x y) 32380 for { 32381 _ = v.Args[1] 32382 x := v.Args[0] 32383 v_1 := v.Args[1] 32384 if v_1.Op != OpAMD64NEGL { 32385 break 32386 } 32387 y := v_1.Args[0] 32388 v.reset(OpAMD64RORB) 32389 v.AddArg(x) 32390 v.AddArg(y) 32391 return true 32392 } 32393 // match: (ROLB x (MOVQconst [c])) 32394 // cond: 32395 // result: (ROLBconst [c&7 ] x) 32396 for { 32397 _ = v.Args[1] 32398 x := v.Args[0] 32399 v_1 := v.Args[1] 32400 if v_1.Op != OpAMD64MOVQconst { 32401 break 32402 } 32403 c := v_1.AuxInt 32404 v.reset(OpAMD64ROLBconst) 32405 v.AuxInt = c & 7 32406 v.AddArg(x) 32407 return true 32408 } 32409 // match: (ROLB x (MOVLconst [c])) 32410 // cond: 32411 // result: (ROLBconst [c&7 ] x) 32412 for { 32413 _ = v.Args[1] 32414 x := v.Args[0] 32415 v_1 := v.Args[1] 32416 if v_1.Op != OpAMD64MOVLconst { 32417 break 32418 } 32419 c := v_1.AuxInt 32420 v.reset(OpAMD64ROLBconst) 32421 v.AuxInt = c & 7 32422 v.AddArg(x) 32423 return true 32424 } 32425 return false 32426 } 32427 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 32428 // match: (ROLBconst [c] (ROLBconst [d] x)) 32429 // cond: 32430 // result: (ROLBconst [(c+d)& 7] x) 32431 for { 32432 c := v.AuxInt 32433 v_0 := v.Args[0] 32434 if v_0.Op != OpAMD64ROLBconst { 32435 break 32436 } 32437 d := v_0.AuxInt 32438 x := v_0.Args[0] 32439 v.reset(OpAMD64ROLBconst) 32440 v.AuxInt = (c + d) & 7 32441 v.AddArg(x) 32442 return true 32443 } 32444 // match: (ROLBconst x [0]) 32445 // cond: 32446 // result: x 32447 for { 32448 if v.AuxInt != 0 { 32449 break 32450 } 32451 x := v.Args[0] 32452 v.reset(OpCopy) 32453 v.Type = x.Type 32454 v.AddArg(x) 32455 return true 32456 } 32457 return false 32458 } 32459 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 32460 // match: (ROLL x (NEGQ y)) 32461 // cond: 32462 // result: (RORL x y) 32463 for { 32464 _ = v.Args[1] 32465 x := v.Args[0] 32466 v_1 := v.Args[1] 32467 if v_1.Op != OpAMD64NEGQ { 32468 break 32469 } 32470 y := v_1.Args[0] 32471 v.reset(OpAMD64RORL) 32472 v.AddArg(x) 32473 v.AddArg(y) 32474 return true 32475 } 32476 // match: (ROLL x (NEGL y)) 32477 // cond: 32478 // result: (RORL x y) 32479 for { 32480 _ = v.Args[1] 32481 x := v.Args[0] 32482 v_1 := v.Args[1] 32483 if v_1.Op != OpAMD64NEGL { 32484 break 32485 } 32486 y := v_1.Args[0] 32487 v.reset(OpAMD64RORL) 32488 v.AddArg(x) 32489 v.AddArg(y) 32490 return true 32491 } 32492 // match: (ROLL x (MOVQconst [c])) 32493 // cond: 32494 // result: (ROLLconst [c&31] x) 32495 for { 32496 _ = v.Args[1] 32497 x := v.Args[0] 32498 v_1 := v.Args[1] 32499 if v_1.Op != OpAMD64MOVQconst { 32500 break 32501 } 32502 c := v_1.AuxInt 32503 v.reset(OpAMD64ROLLconst) 32504 v.AuxInt = c & 31 32505 v.AddArg(x) 32506 return true 32507 } 32508 // match: (ROLL x (MOVLconst [c])) 32509 // cond: 32510 // result: (ROLLconst [c&31] x) 32511 for { 32512 _ = v.Args[1] 32513 x := v.Args[0] 32514 v_1 := v.Args[1] 32515 if v_1.Op != OpAMD64MOVLconst { 32516 break 32517 } 32518 c := v_1.AuxInt 32519 v.reset(OpAMD64ROLLconst) 32520 v.AuxInt = c & 31 32521 v.AddArg(x) 32522 return true 32523 } 32524 return false 32525 } 32526 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 32527 // match: (ROLLconst [c] (ROLLconst [d] x)) 32528 // cond: 32529 // result: (ROLLconst [(c+d)&31] x) 32530 for { 32531 c := v.AuxInt 32532 v_0 := v.Args[0] 32533 if v_0.Op != OpAMD64ROLLconst { 32534 break 32535 } 32536 d := v_0.AuxInt 32537 x := v_0.Args[0] 32538 v.reset(OpAMD64ROLLconst) 32539 v.AuxInt = (c + d) & 31 32540 v.AddArg(x) 32541 return true 32542 } 32543 // match: (ROLLconst x [0]) 32544 // cond: 32545 // result: x 32546 for { 32547 if v.AuxInt != 0 { 32548 break 32549 } 32550 x := v.Args[0] 32551 v.reset(OpCopy) 32552 v.Type = x.Type 32553 v.AddArg(x) 32554 return true 32555 } 32556 return false 32557 } 32558 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 32559 // match: (ROLQ x (NEGQ y)) 32560 // cond: 32561 // result: (RORQ x y) 32562 for { 32563 _ = v.Args[1] 32564 x := v.Args[0] 32565 v_1 := v.Args[1] 32566 if v_1.Op != OpAMD64NEGQ { 32567 break 32568 } 32569 y := v_1.Args[0] 32570 v.reset(OpAMD64RORQ) 32571 v.AddArg(x) 32572 v.AddArg(y) 32573 return true 32574 } 32575 // match: (ROLQ x (NEGL y)) 32576 // cond: 32577 // result: (RORQ x y) 32578 for { 32579 _ = v.Args[1] 32580 x := v.Args[0] 32581 v_1 := v.Args[1] 32582 if v_1.Op != OpAMD64NEGL { 32583 break 32584 } 32585 y := v_1.Args[0] 32586 v.reset(OpAMD64RORQ) 32587 v.AddArg(x) 32588 v.AddArg(y) 32589 return true 32590 } 32591 // match: (ROLQ x (MOVQconst [c])) 32592 // cond: 32593 // result: (ROLQconst [c&63] x) 32594 for { 32595 _ = v.Args[1] 32596 x := v.Args[0] 32597 v_1 := v.Args[1] 32598 if v_1.Op != OpAMD64MOVQconst { 32599 break 32600 } 32601 c := v_1.AuxInt 32602 v.reset(OpAMD64ROLQconst) 32603 v.AuxInt = c & 63 32604 v.AddArg(x) 32605 return true 32606 } 32607 // match: (ROLQ x (MOVLconst [c])) 32608 // cond: 32609 // result: (ROLQconst [c&63] x) 32610 for { 32611 _ = v.Args[1] 32612 x := v.Args[0] 32613 v_1 := v.Args[1] 32614 if v_1.Op != OpAMD64MOVLconst { 32615 break 32616 } 32617 c := v_1.AuxInt 32618 v.reset(OpAMD64ROLQconst) 32619 v.AuxInt = c & 63 32620 v.AddArg(x) 32621 return true 32622 } 32623 return false 32624 } 32625 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 32626 // match: (ROLQconst [c] (ROLQconst [d] x)) 32627 // cond: 32628 // result: (ROLQconst [(c+d)&63] x) 32629 for { 32630 c := v.AuxInt 32631 v_0 := v.Args[0] 32632 if v_0.Op != OpAMD64ROLQconst { 32633 break 32634 } 32635 d := v_0.AuxInt 32636 x := v_0.Args[0] 32637 v.reset(OpAMD64ROLQconst) 32638 v.AuxInt = (c + d) & 63 32639 v.AddArg(x) 32640 return true 32641 } 32642 // match: (ROLQconst x [0]) 32643 // cond: 32644 // result: x 32645 for { 32646 if v.AuxInt != 0 { 32647 break 32648 } 32649 x := v.Args[0] 32650 v.reset(OpCopy) 32651 v.Type = x.Type 32652 v.AddArg(x) 32653 return true 32654 } 32655 return false 32656 } 32657 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 32658 // match: (ROLW x (NEGQ y)) 32659 // cond: 32660 // result: (RORW x y) 32661 for { 32662 _ = v.Args[1] 32663 x := v.Args[0] 32664 v_1 := v.Args[1] 32665 if v_1.Op != OpAMD64NEGQ { 32666 break 32667 } 32668 y := v_1.Args[0] 32669 v.reset(OpAMD64RORW) 32670 v.AddArg(x) 32671 v.AddArg(y) 32672 return true 32673 } 32674 // match: (ROLW x (NEGL y)) 32675 // cond: 32676 // result: (RORW x y) 32677 for { 32678 _ = v.Args[1] 32679 x := v.Args[0] 32680 v_1 := v.Args[1] 32681 if v_1.Op != OpAMD64NEGL { 32682 break 32683 } 32684 y := v_1.Args[0] 32685 v.reset(OpAMD64RORW) 32686 v.AddArg(x) 32687 v.AddArg(y) 32688 return true 32689 } 32690 // match: (ROLW x (MOVQconst [c])) 32691 // cond: 32692 // result: (ROLWconst [c&15] x) 32693 for { 32694 _ = v.Args[1] 32695 x := v.Args[0] 32696 v_1 := v.Args[1] 32697 if v_1.Op != OpAMD64MOVQconst { 32698 break 32699 } 32700 c := v_1.AuxInt 32701 v.reset(OpAMD64ROLWconst) 32702 v.AuxInt = c & 15 32703 v.AddArg(x) 32704 return true 32705 } 32706 // match: (ROLW x (MOVLconst [c])) 32707 // cond: 32708 // result: (ROLWconst [c&15] x) 32709 for { 32710 _ = v.Args[1] 32711 x := v.Args[0] 32712 v_1 := v.Args[1] 32713 if v_1.Op != OpAMD64MOVLconst { 32714 break 32715 } 32716 c := v_1.AuxInt 32717 v.reset(OpAMD64ROLWconst) 32718 v.AuxInt = c & 15 32719 v.AddArg(x) 32720 return true 32721 } 32722 return false 32723 } 32724 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 32725 // match: (ROLWconst [c] (ROLWconst [d] x)) 32726 // cond: 32727 // result: (ROLWconst [(c+d)&15] x) 32728 for { 32729 c := v.AuxInt 32730 v_0 := v.Args[0] 32731 if v_0.Op != OpAMD64ROLWconst { 32732 break 32733 } 32734 d := v_0.AuxInt 32735 x := v_0.Args[0] 32736 v.reset(OpAMD64ROLWconst) 32737 v.AuxInt = (c + d) & 15 32738 v.AddArg(x) 32739 return true 32740 } 32741 // match: (ROLWconst x [0]) 32742 // cond: 32743 // result: x 32744 for { 32745 if v.AuxInt != 0 { 32746 break 32747 } 32748 x := v.Args[0] 32749 v.reset(OpCopy) 32750 v.Type = x.Type 32751 v.AddArg(x) 32752 return true 32753 } 32754 return false 32755 } 32756 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 32757 // match: (RORB x (NEGQ y)) 32758 // cond: 32759 // result: (ROLB x y) 32760 for { 32761 _ = v.Args[1] 32762 x := v.Args[0] 32763 v_1 := v.Args[1] 32764 if v_1.Op != OpAMD64NEGQ { 32765 break 32766 } 32767 y := v_1.Args[0] 32768 v.reset(OpAMD64ROLB) 32769 v.AddArg(x) 32770 v.AddArg(y) 32771 return true 32772 } 32773 // match: (RORB x (NEGL y)) 32774 // cond: 32775 // result: (ROLB x y) 32776 for { 32777 _ = v.Args[1] 32778 x := v.Args[0] 32779 v_1 := v.Args[1] 32780 if v_1.Op != OpAMD64NEGL { 32781 break 32782 } 32783 y := v_1.Args[0] 32784 v.reset(OpAMD64ROLB) 32785 v.AddArg(x) 32786 v.AddArg(y) 32787 return true 32788 } 32789 // match: (RORB x (MOVQconst [c])) 32790 // cond: 32791 // result: (ROLBconst [(-c)&7 ] x) 32792 for { 32793 _ = v.Args[1] 32794 x := v.Args[0] 32795 v_1 := v.Args[1] 32796 if v_1.Op != OpAMD64MOVQconst { 32797 break 32798 } 32799 c := v_1.AuxInt 32800 v.reset(OpAMD64ROLBconst) 32801 v.AuxInt = (-c) & 7 32802 v.AddArg(x) 32803 return true 32804 } 32805 // match: (RORB x (MOVLconst [c])) 32806 // cond: 32807 // result: (ROLBconst [(-c)&7 ] x) 32808 for { 32809 _ = v.Args[1] 32810 x := v.Args[0] 32811 v_1 := v.Args[1] 32812 if v_1.Op != OpAMD64MOVLconst { 32813 break 32814 } 32815 c := v_1.AuxInt 32816 v.reset(OpAMD64ROLBconst) 32817 v.AuxInt = (-c) & 7 32818 v.AddArg(x) 32819 return true 32820 } 32821 return false 32822 } 32823 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 32824 // match: (RORL x (NEGQ y)) 32825 // cond: 32826 // result: (ROLL x y) 32827 for { 32828 _ = v.Args[1] 32829 x := v.Args[0] 32830 v_1 := v.Args[1] 32831 if v_1.Op != OpAMD64NEGQ { 32832 break 32833 } 32834 y := v_1.Args[0] 32835 v.reset(OpAMD64ROLL) 32836 v.AddArg(x) 32837 v.AddArg(y) 32838 return true 32839 } 32840 // match: (RORL x (NEGL y)) 32841 // cond: 32842 // result: (ROLL x y) 32843 for { 32844 _ = v.Args[1] 32845 x := v.Args[0] 32846 v_1 := v.Args[1] 32847 if v_1.Op != OpAMD64NEGL { 32848 break 32849 } 32850 y := v_1.Args[0] 32851 v.reset(OpAMD64ROLL) 32852 v.AddArg(x) 32853 v.AddArg(y) 32854 return true 32855 } 32856 // match: (RORL x (MOVQconst [c])) 32857 // cond: 32858 // result: (ROLLconst [(-c)&31] x) 32859 for { 32860 _ = v.Args[1] 32861 x := v.Args[0] 32862 v_1 := v.Args[1] 32863 if v_1.Op != OpAMD64MOVQconst { 32864 break 32865 } 32866 c := v_1.AuxInt 32867 v.reset(OpAMD64ROLLconst) 32868 v.AuxInt = (-c) & 31 32869 v.AddArg(x) 32870 return true 32871 } 32872 // match: (RORL x (MOVLconst [c])) 32873 // cond: 32874 // result: (ROLLconst [(-c)&31] x) 32875 for { 32876 _ = v.Args[1] 32877 x := v.Args[0] 32878 v_1 := v.Args[1] 32879 if v_1.Op != OpAMD64MOVLconst { 32880 break 32881 } 32882 c := v_1.AuxInt 32883 v.reset(OpAMD64ROLLconst) 32884 v.AuxInt = (-c) & 31 32885 v.AddArg(x) 32886 return true 32887 } 32888 return false 32889 } 32890 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 32891 // match: (RORQ x (NEGQ y)) 32892 // cond: 32893 // result: (ROLQ x y) 32894 for { 32895 _ = v.Args[1] 32896 x := v.Args[0] 32897 v_1 := v.Args[1] 32898 if v_1.Op != OpAMD64NEGQ { 32899 break 32900 } 32901 y := v_1.Args[0] 32902 v.reset(OpAMD64ROLQ) 32903 v.AddArg(x) 32904 v.AddArg(y) 32905 return true 32906 } 32907 // match: (RORQ x (NEGL y)) 32908 // cond: 32909 // result: (ROLQ x y) 32910 for { 32911 _ = v.Args[1] 32912 x := v.Args[0] 32913 v_1 := v.Args[1] 32914 if v_1.Op != OpAMD64NEGL { 32915 break 32916 } 32917 y := v_1.Args[0] 32918 v.reset(OpAMD64ROLQ) 32919 v.AddArg(x) 32920 v.AddArg(y) 32921 return true 32922 } 32923 // match: (RORQ x (MOVQconst [c])) 32924 // cond: 32925 // result: (ROLQconst [(-c)&63] x) 32926 for { 32927 _ = v.Args[1] 32928 x := v.Args[0] 32929 v_1 := v.Args[1] 32930 if v_1.Op != OpAMD64MOVQconst { 32931 break 32932 } 32933 c := v_1.AuxInt 32934 v.reset(OpAMD64ROLQconst) 32935 v.AuxInt = (-c) & 63 32936 v.AddArg(x) 32937 return true 32938 } 32939 // match: (RORQ x (MOVLconst [c])) 32940 // cond: 32941 // result: (ROLQconst [(-c)&63] x) 32942 for { 32943 _ = v.Args[1] 32944 x := v.Args[0] 32945 v_1 := v.Args[1] 32946 if v_1.Op != OpAMD64MOVLconst { 32947 break 32948 } 32949 c := v_1.AuxInt 32950 v.reset(OpAMD64ROLQconst) 32951 v.AuxInt = (-c) & 63 32952 v.AddArg(x) 32953 return true 32954 } 32955 return false 32956 } 32957 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 32958 // match: (RORW x (NEGQ y)) 32959 // cond: 32960 // result: (ROLW x y) 32961 for { 32962 _ = v.Args[1] 32963 x := v.Args[0] 32964 v_1 := v.Args[1] 32965 if v_1.Op != OpAMD64NEGQ { 32966 break 32967 } 32968 y := v_1.Args[0] 32969 v.reset(OpAMD64ROLW) 32970 v.AddArg(x) 32971 v.AddArg(y) 32972 return true 32973 } 32974 // match: (RORW x (NEGL y)) 32975 // cond: 32976 // result: (ROLW x y) 32977 for { 32978 _ = v.Args[1] 32979 x := v.Args[0] 32980 v_1 := v.Args[1] 32981 if v_1.Op != OpAMD64NEGL { 32982 break 32983 } 32984 y := v_1.Args[0] 32985 v.reset(OpAMD64ROLW) 32986 v.AddArg(x) 32987 v.AddArg(y) 32988 return true 32989 } 32990 // match: (RORW x (MOVQconst [c])) 32991 // cond: 32992 // result: (ROLWconst [(-c)&15] x) 32993 for { 32994 _ = v.Args[1] 32995 x := v.Args[0] 32996 v_1 := v.Args[1] 32997 if v_1.Op != OpAMD64MOVQconst { 32998 break 32999 } 33000 c := v_1.AuxInt 33001 v.reset(OpAMD64ROLWconst) 33002 v.AuxInt = (-c) & 15 33003 v.AddArg(x) 33004 return true 33005 } 33006 // match: (RORW x (MOVLconst [c])) 33007 // cond: 33008 // result: (ROLWconst [(-c)&15] x) 33009 for { 33010 _ = v.Args[1] 33011 x := v.Args[0] 33012 v_1 := v.Args[1] 33013 if v_1.Op != OpAMD64MOVLconst { 33014 break 33015 } 33016 c := v_1.AuxInt 33017 v.reset(OpAMD64ROLWconst) 33018 v.AuxInt = (-c) & 15 33019 v.AddArg(x) 33020 return true 33021 } 33022 return false 33023 } 33024 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 33025 // match: (SARB x (MOVQconst [c])) 33026 // cond: 33027 // result: (SARBconst [min(c&31,7)] x) 33028 for { 33029 _ = v.Args[1] 33030 x := v.Args[0] 33031 v_1 := v.Args[1] 33032 if v_1.Op != OpAMD64MOVQconst { 33033 break 33034 } 33035 c := v_1.AuxInt 33036 v.reset(OpAMD64SARBconst) 33037 v.AuxInt = min(c&31, 7) 33038 v.AddArg(x) 33039 return true 33040 } 33041 // match: (SARB x (MOVLconst [c])) 33042 // cond: 33043 // result: (SARBconst [min(c&31,7)] x) 33044 for { 33045 _ = v.Args[1] 33046 x := v.Args[0] 33047 v_1 := v.Args[1] 33048 if v_1.Op != OpAMD64MOVLconst { 33049 break 33050 } 33051 c := v_1.AuxInt 33052 v.reset(OpAMD64SARBconst) 33053 v.AuxInt = min(c&31, 7) 33054 v.AddArg(x) 33055 return true 33056 } 33057 return false 33058 } 33059 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 33060 // match: (SARBconst x [0]) 33061 // cond: 33062 // result: x 33063 for { 33064 if v.AuxInt != 0 { 33065 break 33066 } 33067 x := v.Args[0] 33068 v.reset(OpCopy) 33069 v.Type = x.Type 33070 v.AddArg(x) 33071 return true 33072 } 33073 // match: (SARBconst [c] (MOVQconst [d])) 33074 // cond: 33075 // result: (MOVQconst [d>>uint64(c)]) 33076 for { 33077 c := v.AuxInt 33078 v_0 := v.Args[0] 33079 if v_0.Op != OpAMD64MOVQconst { 33080 break 33081 } 33082 d := v_0.AuxInt 33083 v.reset(OpAMD64MOVQconst) 33084 v.AuxInt = d >> uint64(c) 33085 return true 33086 } 33087 return false 33088 } 33089 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 33090 b := v.Block 33091 _ = b 33092 // match: (SARL x (MOVQconst [c])) 33093 // cond: 33094 // result: (SARLconst [c&31] x) 33095 for { 33096 _ = v.Args[1] 33097 x := v.Args[0] 33098 v_1 := v.Args[1] 33099 if v_1.Op != OpAMD64MOVQconst { 33100 break 33101 } 33102 c := v_1.AuxInt 33103 v.reset(OpAMD64SARLconst) 33104 v.AuxInt = c & 31 33105 v.AddArg(x) 33106 return true 33107 } 33108 // match: (SARL x (MOVLconst [c])) 33109 // cond: 33110 // result: (SARLconst [c&31] x) 33111 for { 33112 _ = v.Args[1] 33113 x := v.Args[0] 33114 v_1 := v.Args[1] 33115 if v_1.Op != OpAMD64MOVLconst { 33116 break 33117 } 33118 c := v_1.AuxInt 33119 v.reset(OpAMD64SARLconst) 33120 v.AuxInt = c & 31 33121 v.AddArg(x) 33122 return true 33123 } 33124 // match: (SARL x (ADDQconst [c] y)) 33125 // cond: c & 31 == 0 33126 // result: (SARL x y) 33127 for { 33128 _ = v.Args[1] 33129 x := v.Args[0] 33130 v_1 := v.Args[1] 33131 if v_1.Op != OpAMD64ADDQconst { 33132 break 33133 } 33134 c := v_1.AuxInt 33135 y := v_1.Args[0] 33136 if !(c&31 == 0) { 33137 break 33138 } 33139 v.reset(OpAMD64SARL) 33140 v.AddArg(x) 33141 v.AddArg(y) 33142 return true 33143 } 33144 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 33145 // cond: c & 31 == 0 33146 // result: (SARL x (NEGQ <t> y)) 33147 for { 33148 _ = v.Args[1] 33149 x := v.Args[0] 33150 v_1 := v.Args[1] 33151 if v_1.Op != OpAMD64NEGQ { 33152 break 33153 } 33154 t := v_1.Type 33155 v_1_0 := v_1.Args[0] 33156 if v_1_0.Op != OpAMD64ADDQconst { 33157 break 33158 } 33159 c := v_1_0.AuxInt 33160 y := v_1_0.Args[0] 33161 if !(c&31 == 0) { 33162 break 33163 } 33164 v.reset(OpAMD64SARL) 33165 v.AddArg(x) 33166 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33167 v0.AddArg(y) 33168 v.AddArg(v0) 33169 return true 33170 } 33171 // match: (SARL x (ANDQconst [c] y)) 33172 // cond: c & 31 == 31 33173 // result: (SARL x y) 33174 for { 33175 _ = v.Args[1] 33176 x := v.Args[0] 33177 v_1 := v.Args[1] 33178 if v_1.Op != OpAMD64ANDQconst { 33179 break 33180 } 33181 c := v_1.AuxInt 33182 y := v_1.Args[0] 33183 if !(c&31 == 31) { 33184 break 33185 } 33186 v.reset(OpAMD64SARL) 33187 v.AddArg(x) 33188 v.AddArg(y) 33189 return true 33190 } 33191 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 33192 // cond: c & 31 == 31 33193 // result: (SARL x (NEGQ <t> y)) 33194 for { 33195 _ = v.Args[1] 33196 x := v.Args[0] 33197 v_1 := v.Args[1] 33198 if v_1.Op != OpAMD64NEGQ { 33199 break 33200 } 33201 t := v_1.Type 33202 v_1_0 := v_1.Args[0] 33203 if v_1_0.Op != OpAMD64ANDQconst { 33204 break 33205 } 33206 c := v_1_0.AuxInt 33207 y := v_1_0.Args[0] 33208 if !(c&31 == 31) { 33209 break 33210 } 33211 v.reset(OpAMD64SARL) 33212 v.AddArg(x) 33213 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33214 v0.AddArg(y) 33215 v.AddArg(v0) 33216 return true 33217 } 33218 // match: (SARL x (ADDLconst [c] y)) 33219 // cond: c & 31 == 0 33220 // result: (SARL x y) 33221 for { 33222 _ = v.Args[1] 33223 x := v.Args[0] 33224 v_1 := v.Args[1] 33225 if v_1.Op != OpAMD64ADDLconst { 33226 break 33227 } 33228 c := v_1.AuxInt 33229 y := v_1.Args[0] 33230 if !(c&31 == 0) { 33231 break 33232 } 33233 v.reset(OpAMD64SARL) 33234 v.AddArg(x) 33235 v.AddArg(y) 33236 return true 33237 } 33238 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 33239 // cond: c & 31 == 0 33240 // result: (SARL x (NEGL <t> y)) 33241 for { 33242 _ = v.Args[1] 33243 x := v.Args[0] 33244 v_1 := v.Args[1] 33245 if v_1.Op != OpAMD64NEGL { 33246 break 33247 } 33248 t := v_1.Type 33249 v_1_0 := v_1.Args[0] 33250 if v_1_0.Op != OpAMD64ADDLconst { 33251 break 33252 } 33253 c := v_1_0.AuxInt 33254 y := v_1_0.Args[0] 33255 if !(c&31 == 0) { 33256 break 33257 } 33258 v.reset(OpAMD64SARL) 33259 v.AddArg(x) 33260 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33261 v0.AddArg(y) 33262 v.AddArg(v0) 33263 return true 33264 } 33265 // match: (SARL x (ANDLconst [c] y)) 33266 // cond: c & 31 == 31 33267 // result: (SARL x y) 33268 for { 33269 _ = v.Args[1] 33270 x := v.Args[0] 33271 v_1 := v.Args[1] 33272 if v_1.Op != OpAMD64ANDLconst { 33273 break 33274 } 33275 c := v_1.AuxInt 33276 y := v_1.Args[0] 33277 if !(c&31 == 31) { 33278 break 33279 } 33280 v.reset(OpAMD64SARL) 33281 v.AddArg(x) 33282 v.AddArg(y) 33283 return true 33284 } 33285 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 33286 // cond: c & 31 == 31 33287 // result: (SARL x (NEGL <t> y)) 33288 for { 33289 _ = v.Args[1] 33290 x := v.Args[0] 33291 v_1 := v.Args[1] 33292 if v_1.Op != OpAMD64NEGL { 33293 break 33294 } 33295 t := v_1.Type 33296 v_1_0 := v_1.Args[0] 33297 if v_1_0.Op != OpAMD64ANDLconst { 33298 break 33299 } 33300 c := v_1_0.AuxInt 33301 y := v_1_0.Args[0] 33302 if !(c&31 == 31) { 33303 break 33304 } 33305 v.reset(OpAMD64SARL) 33306 v.AddArg(x) 33307 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33308 v0.AddArg(y) 33309 v.AddArg(v0) 33310 return true 33311 } 33312 return false 33313 } 33314 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 33315 // match: (SARLconst x [0]) 33316 // cond: 33317 // result: x 33318 for { 33319 if v.AuxInt != 0 { 33320 break 33321 } 33322 x := v.Args[0] 33323 v.reset(OpCopy) 33324 v.Type = x.Type 33325 v.AddArg(x) 33326 return true 33327 } 33328 // match: (SARLconst [c] (MOVQconst [d])) 33329 // cond: 33330 // result: (MOVQconst [d>>uint64(c)]) 33331 for { 33332 c := v.AuxInt 33333 v_0 := v.Args[0] 33334 if v_0.Op != OpAMD64MOVQconst { 33335 break 33336 } 33337 d := v_0.AuxInt 33338 v.reset(OpAMD64MOVQconst) 33339 v.AuxInt = d >> uint64(c) 33340 return true 33341 } 33342 return false 33343 } 33344 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 33345 b := v.Block 33346 _ = b 33347 // match: (SARQ x (MOVQconst [c])) 33348 // cond: 33349 // result: (SARQconst [c&63] x) 33350 for { 33351 _ = v.Args[1] 33352 x := v.Args[0] 33353 v_1 := v.Args[1] 33354 if v_1.Op != OpAMD64MOVQconst { 33355 break 33356 } 33357 c := v_1.AuxInt 33358 v.reset(OpAMD64SARQconst) 33359 v.AuxInt = c & 63 33360 v.AddArg(x) 33361 return true 33362 } 33363 // match: (SARQ x (MOVLconst [c])) 33364 // cond: 33365 // result: (SARQconst [c&63] x) 33366 for { 33367 _ = v.Args[1] 33368 x := v.Args[0] 33369 v_1 := v.Args[1] 33370 if v_1.Op != OpAMD64MOVLconst { 33371 break 33372 } 33373 c := v_1.AuxInt 33374 v.reset(OpAMD64SARQconst) 33375 v.AuxInt = c & 63 33376 v.AddArg(x) 33377 return true 33378 } 33379 // match: (SARQ x (ADDQconst [c] y)) 33380 // cond: c & 63 == 0 33381 // result: (SARQ x y) 33382 for { 33383 _ = v.Args[1] 33384 x := v.Args[0] 33385 v_1 := v.Args[1] 33386 if v_1.Op != OpAMD64ADDQconst { 33387 break 33388 } 33389 c := v_1.AuxInt 33390 y := v_1.Args[0] 33391 if !(c&63 == 0) { 33392 break 33393 } 33394 v.reset(OpAMD64SARQ) 33395 v.AddArg(x) 33396 v.AddArg(y) 33397 return true 33398 } 33399 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 33400 // cond: c & 63 == 0 33401 // result: (SARQ x (NEGQ <t> y)) 33402 for { 33403 _ = v.Args[1] 33404 x := v.Args[0] 33405 v_1 := v.Args[1] 33406 if v_1.Op != OpAMD64NEGQ { 33407 break 33408 } 33409 t := v_1.Type 33410 v_1_0 := v_1.Args[0] 33411 if v_1_0.Op != OpAMD64ADDQconst { 33412 break 33413 } 33414 c := v_1_0.AuxInt 33415 y := v_1_0.Args[0] 33416 if !(c&63 == 0) { 33417 break 33418 } 33419 v.reset(OpAMD64SARQ) 33420 v.AddArg(x) 33421 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33422 v0.AddArg(y) 33423 v.AddArg(v0) 33424 return true 33425 } 33426 // match: (SARQ x (ANDQconst [c] y)) 33427 // cond: c & 63 == 63 33428 // result: (SARQ x y) 33429 for { 33430 _ = v.Args[1] 33431 x := v.Args[0] 33432 v_1 := v.Args[1] 33433 if v_1.Op != OpAMD64ANDQconst { 33434 break 33435 } 33436 c := v_1.AuxInt 33437 y := v_1.Args[0] 33438 if !(c&63 == 63) { 33439 break 33440 } 33441 v.reset(OpAMD64SARQ) 33442 v.AddArg(x) 33443 v.AddArg(y) 33444 return true 33445 } 33446 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 33447 // cond: c & 63 == 63 33448 // result: (SARQ x (NEGQ <t> y)) 33449 for { 33450 _ = v.Args[1] 33451 x := v.Args[0] 33452 v_1 := v.Args[1] 33453 if v_1.Op != OpAMD64NEGQ { 33454 break 33455 } 33456 t := v_1.Type 33457 v_1_0 := v_1.Args[0] 33458 if v_1_0.Op != OpAMD64ANDQconst { 33459 break 33460 } 33461 c := v_1_0.AuxInt 33462 y := v_1_0.Args[0] 33463 if !(c&63 == 63) { 33464 break 33465 } 33466 v.reset(OpAMD64SARQ) 33467 v.AddArg(x) 33468 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33469 v0.AddArg(y) 33470 v.AddArg(v0) 33471 return true 33472 } 33473 // match: (SARQ x (ADDLconst [c] y)) 33474 // cond: c & 63 == 0 33475 // result: (SARQ x y) 33476 for { 33477 _ = v.Args[1] 33478 x := v.Args[0] 33479 v_1 := v.Args[1] 33480 if v_1.Op != OpAMD64ADDLconst { 33481 break 33482 } 33483 c := v_1.AuxInt 33484 y := v_1.Args[0] 33485 if !(c&63 == 0) { 33486 break 33487 } 33488 v.reset(OpAMD64SARQ) 33489 v.AddArg(x) 33490 v.AddArg(y) 33491 return true 33492 } 33493 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 33494 // cond: c & 63 == 0 33495 // result: (SARQ x (NEGL <t> y)) 33496 for { 33497 _ = v.Args[1] 33498 x := v.Args[0] 33499 v_1 := v.Args[1] 33500 if v_1.Op != OpAMD64NEGL { 33501 break 33502 } 33503 t := v_1.Type 33504 v_1_0 := v_1.Args[0] 33505 if v_1_0.Op != OpAMD64ADDLconst { 33506 break 33507 } 33508 c := v_1_0.AuxInt 33509 y := v_1_0.Args[0] 33510 if !(c&63 == 0) { 33511 break 33512 } 33513 v.reset(OpAMD64SARQ) 33514 v.AddArg(x) 33515 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33516 v0.AddArg(y) 33517 v.AddArg(v0) 33518 return true 33519 } 33520 // match: (SARQ x (ANDLconst [c] y)) 33521 // cond: c & 63 == 63 33522 // result: (SARQ x y) 33523 for { 33524 _ = v.Args[1] 33525 x := v.Args[0] 33526 v_1 := v.Args[1] 33527 if v_1.Op != OpAMD64ANDLconst { 33528 break 33529 } 33530 c := v_1.AuxInt 33531 y := v_1.Args[0] 33532 if !(c&63 == 63) { 33533 break 33534 } 33535 v.reset(OpAMD64SARQ) 33536 v.AddArg(x) 33537 v.AddArg(y) 33538 return true 33539 } 33540 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 33541 // cond: c & 63 == 63 33542 // result: (SARQ x (NEGL <t> y)) 33543 for { 33544 _ = v.Args[1] 33545 x := v.Args[0] 33546 v_1 := v.Args[1] 33547 if v_1.Op != OpAMD64NEGL { 33548 break 33549 } 33550 t := v_1.Type 33551 v_1_0 := v_1.Args[0] 33552 if v_1_0.Op != OpAMD64ANDLconst { 33553 break 33554 } 33555 c := v_1_0.AuxInt 33556 y := v_1_0.Args[0] 33557 if !(c&63 == 63) { 33558 break 33559 } 33560 v.reset(OpAMD64SARQ) 33561 v.AddArg(x) 33562 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33563 v0.AddArg(y) 33564 v.AddArg(v0) 33565 return true 33566 } 33567 return false 33568 } 33569 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 33570 // match: (SARQconst x [0]) 33571 // cond: 33572 // result: x 33573 for { 33574 if v.AuxInt != 0 { 33575 break 33576 } 33577 x := v.Args[0] 33578 v.reset(OpCopy) 33579 v.Type = x.Type 33580 v.AddArg(x) 33581 return true 33582 } 33583 // match: (SARQconst [c] (MOVQconst [d])) 33584 // cond: 33585 // result: (MOVQconst [d>>uint64(c)]) 33586 for { 33587 c := v.AuxInt 33588 v_0 := v.Args[0] 33589 if v_0.Op != OpAMD64MOVQconst { 33590 break 33591 } 33592 d := v_0.AuxInt 33593 v.reset(OpAMD64MOVQconst) 33594 v.AuxInt = d >> uint64(c) 33595 return true 33596 } 33597 return false 33598 } 33599 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 33600 // match: (SARW x (MOVQconst [c])) 33601 // cond: 33602 // result: (SARWconst [min(c&31,15)] x) 33603 for { 33604 _ = v.Args[1] 33605 x := v.Args[0] 33606 v_1 := v.Args[1] 33607 if v_1.Op != OpAMD64MOVQconst { 33608 break 33609 } 33610 c := v_1.AuxInt 33611 v.reset(OpAMD64SARWconst) 33612 v.AuxInt = min(c&31, 15) 33613 v.AddArg(x) 33614 return true 33615 } 33616 // match: (SARW x (MOVLconst [c])) 33617 // cond: 33618 // result: (SARWconst [min(c&31,15)] x) 33619 for { 33620 _ = v.Args[1] 33621 x := v.Args[0] 33622 v_1 := v.Args[1] 33623 if v_1.Op != OpAMD64MOVLconst { 33624 break 33625 } 33626 c := v_1.AuxInt 33627 v.reset(OpAMD64SARWconst) 33628 v.AuxInt = min(c&31, 15) 33629 v.AddArg(x) 33630 return true 33631 } 33632 return false 33633 } 33634 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 33635 // match: (SARWconst x [0]) 33636 // cond: 33637 // result: x 33638 for { 33639 if v.AuxInt != 0 { 33640 break 33641 } 33642 x := v.Args[0] 33643 v.reset(OpCopy) 33644 v.Type = x.Type 33645 v.AddArg(x) 33646 return true 33647 } 33648 // match: (SARWconst [c] (MOVQconst [d])) 33649 // cond: 33650 // result: (MOVQconst [d>>uint64(c)]) 33651 for { 33652 c := v.AuxInt 33653 v_0 := v.Args[0] 33654 if v_0.Op != OpAMD64MOVQconst { 33655 break 33656 } 33657 d := v_0.AuxInt 33658 v.reset(OpAMD64MOVQconst) 33659 v.AuxInt = d >> uint64(c) 33660 return true 33661 } 33662 return false 33663 } 33664 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 33665 // match: (SBBLcarrymask (FlagEQ)) 33666 // cond: 33667 // result: (MOVLconst [0]) 33668 for { 33669 v_0 := v.Args[0] 33670 if v_0.Op != OpAMD64FlagEQ { 33671 break 33672 } 33673 v.reset(OpAMD64MOVLconst) 33674 v.AuxInt = 0 33675 return true 33676 } 33677 // match: (SBBLcarrymask (FlagLT_ULT)) 33678 // cond: 33679 // result: (MOVLconst [-1]) 33680 for { 33681 v_0 := v.Args[0] 33682 if v_0.Op != OpAMD64FlagLT_ULT { 33683 break 33684 } 33685 v.reset(OpAMD64MOVLconst) 33686 v.AuxInt = -1 33687 return true 33688 } 33689 // match: (SBBLcarrymask (FlagLT_UGT)) 33690 // cond: 33691 // result: (MOVLconst [0]) 33692 for { 33693 v_0 := v.Args[0] 33694 if v_0.Op != OpAMD64FlagLT_UGT { 33695 break 33696 } 33697 v.reset(OpAMD64MOVLconst) 33698 v.AuxInt = 0 33699 return true 33700 } 33701 // match: (SBBLcarrymask (FlagGT_ULT)) 33702 // cond: 33703 // result: (MOVLconst [-1]) 33704 for { 33705 v_0 := v.Args[0] 33706 if v_0.Op != OpAMD64FlagGT_ULT { 33707 break 33708 } 33709 v.reset(OpAMD64MOVLconst) 33710 v.AuxInt = -1 33711 return true 33712 } 33713 // match: (SBBLcarrymask (FlagGT_UGT)) 33714 // cond: 33715 // result: (MOVLconst [0]) 33716 for { 33717 v_0 := v.Args[0] 33718 if v_0.Op != OpAMD64FlagGT_UGT { 33719 break 33720 } 33721 v.reset(OpAMD64MOVLconst) 33722 v.AuxInt = 0 33723 return true 33724 } 33725 return false 33726 } 33727 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 33728 // match: (SBBQcarrymask (FlagEQ)) 33729 // cond: 33730 // result: (MOVQconst [0]) 33731 for { 33732 v_0 := v.Args[0] 33733 if v_0.Op != OpAMD64FlagEQ { 33734 break 33735 } 33736 v.reset(OpAMD64MOVQconst) 33737 v.AuxInt = 0 33738 return true 33739 } 33740 // match: (SBBQcarrymask (FlagLT_ULT)) 33741 // cond: 33742 // result: (MOVQconst [-1]) 33743 for { 33744 v_0 := v.Args[0] 33745 if v_0.Op != OpAMD64FlagLT_ULT { 33746 break 33747 } 33748 v.reset(OpAMD64MOVQconst) 33749 v.AuxInt = -1 33750 return true 33751 } 33752 // match: (SBBQcarrymask (FlagLT_UGT)) 33753 // cond: 33754 // result: (MOVQconst [0]) 33755 for { 33756 v_0 := v.Args[0] 33757 if v_0.Op != OpAMD64FlagLT_UGT { 33758 break 33759 } 33760 v.reset(OpAMD64MOVQconst) 33761 v.AuxInt = 0 33762 return true 33763 } 33764 // match: (SBBQcarrymask (FlagGT_ULT)) 33765 // cond: 33766 // result: (MOVQconst [-1]) 33767 for { 33768 v_0 := v.Args[0] 33769 if v_0.Op != OpAMD64FlagGT_ULT { 33770 break 33771 } 33772 v.reset(OpAMD64MOVQconst) 33773 v.AuxInt = -1 33774 return true 33775 } 33776 // match: (SBBQcarrymask (FlagGT_UGT)) 33777 // cond: 33778 // result: (MOVQconst [0]) 33779 for { 33780 v_0 := v.Args[0] 33781 if v_0.Op != OpAMD64FlagGT_UGT { 33782 break 33783 } 33784 v.reset(OpAMD64MOVQconst) 33785 v.AuxInt = 0 33786 return true 33787 } 33788 return false 33789 } 33790 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 33791 // match: (SETA (InvertFlags x)) 33792 // cond: 33793 // result: (SETB x) 33794 for { 33795 v_0 := v.Args[0] 33796 if v_0.Op != OpAMD64InvertFlags { 33797 break 33798 } 33799 x := v_0.Args[0] 33800 v.reset(OpAMD64SETB) 33801 v.AddArg(x) 33802 return true 33803 } 33804 // match: (SETA (FlagEQ)) 33805 // cond: 33806 // result: (MOVLconst [0]) 33807 for { 33808 v_0 := v.Args[0] 33809 if v_0.Op != OpAMD64FlagEQ { 33810 break 33811 } 33812 v.reset(OpAMD64MOVLconst) 33813 v.AuxInt = 0 33814 return true 33815 } 33816 // match: (SETA (FlagLT_ULT)) 33817 // cond: 33818 // result: (MOVLconst [0]) 33819 for { 33820 v_0 := v.Args[0] 33821 if v_0.Op != OpAMD64FlagLT_ULT { 33822 break 33823 } 33824 v.reset(OpAMD64MOVLconst) 33825 v.AuxInt = 0 33826 return true 33827 } 33828 // match: (SETA (FlagLT_UGT)) 33829 // cond: 33830 // result: (MOVLconst [1]) 33831 for { 33832 v_0 := v.Args[0] 33833 if v_0.Op != OpAMD64FlagLT_UGT { 33834 break 33835 } 33836 v.reset(OpAMD64MOVLconst) 33837 v.AuxInt = 1 33838 return true 33839 } 33840 // match: (SETA (FlagGT_ULT)) 33841 // cond: 33842 // result: (MOVLconst [0]) 33843 for { 33844 v_0 := v.Args[0] 33845 if v_0.Op != OpAMD64FlagGT_ULT { 33846 break 33847 } 33848 v.reset(OpAMD64MOVLconst) 33849 v.AuxInt = 0 33850 return true 33851 } 33852 // match: (SETA (FlagGT_UGT)) 33853 // cond: 33854 // result: (MOVLconst [1]) 33855 for { 33856 v_0 := v.Args[0] 33857 if v_0.Op != OpAMD64FlagGT_UGT { 33858 break 33859 } 33860 v.reset(OpAMD64MOVLconst) 33861 v.AuxInt = 1 33862 return true 33863 } 33864 return false 33865 } 33866 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 33867 // match: (SETAE (InvertFlags x)) 33868 // cond: 33869 // result: (SETBE x) 33870 for { 33871 v_0 := v.Args[0] 33872 if v_0.Op != OpAMD64InvertFlags { 33873 break 33874 } 33875 x := v_0.Args[0] 33876 v.reset(OpAMD64SETBE) 33877 v.AddArg(x) 33878 return true 33879 } 33880 // match: (SETAE (FlagEQ)) 33881 // cond: 33882 // result: (MOVLconst [1]) 33883 for { 33884 v_0 := v.Args[0] 33885 if v_0.Op != OpAMD64FlagEQ { 33886 break 33887 } 33888 v.reset(OpAMD64MOVLconst) 33889 v.AuxInt = 1 33890 return true 33891 } 33892 // match: (SETAE (FlagLT_ULT)) 33893 // cond: 33894 // result: (MOVLconst [0]) 33895 for { 33896 v_0 := v.Args[0] 33897 if v_0.Op != OpAMD64FlagLT_ULT { 33898 break 33899 } 33900 v.reset(OpAMD64MOVLconst) 33901 v.AuxInt = 0 33902 return true 33903 } 33904 // match: (SETAE (FlagLT_UGT)) 33905 // cond: 33906 // result: (MOVLconst [1]) 33907 for { 33908 v_0 := v.Args[0] 33909 if v_0.Op != OpAMD64FlagLT_UGT { 33910 break 33911 } 33912 v.reset(OpAMD64MOVLconst) 33913 v.AuxInt = 1 33914 return true 33915 } 33916 // match: (SETAE (FlagGT_ULT)) 33917 // cond: 33918 // result: (MOVLconst [0]) 33919 for { 33920 v_0 := v.Args[0] 33921 if v_0.Op != OpAMD64FlagGT_ULT { 33922 break 33923 } 33924 v.reset(OpAMD64MOVLconst) 33925 v.AuxInt = 0 33926 return true 33927 } 33928 // match: (SETAE (FlagGT_UGT)) 33929 // cond: 33930 // result: (MOVLconst [1]) 33931 for { 33932 v_0 := v.Args[0] 33933 if v_0.Op != OpAMD64FlagGT_UGT { 33934 break 33935 } 33936 v.reset(OpAMD64MOVLconst) 33937 v.AuxInt = 1 33938 return true 33939 } 33940 return false 33941 } 33942 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 33943 // match: (SETB (InvertFlags x)) 33944 // cond: 33945 // result: (SETA x) 33946 for { 33947 v_0 := v.Args[0] 33948 if v_0.Op != OpAMD64InvertFlags { 33949 break 33950 } 33951 x := v_0.Args[0] 33952 v.reset(OpAMD64SETA) 33953 v.AddArg(x) 33954 return true 33955 } 33956 // match: (SETB (FlagEQ)) 33957 // cond: 33958 // result: (MOVLconst [0]) 33959 for { 33960 v_0 := v.Args[0] 33961 if v_0.Op != OpAMD64FlagEQ { 33962 break 33963 } 33964 v.reset(OpAMD64MOVLconst) 33965 v.AuxInt = 0 33966 return true 33967 } 33968 // match: (SETB (FlagLT_ULT)) 33969 // cond: 33970 // result: (MOVLconst [1]) 33971 for { 33972 v_0 := v.Args[0] 33973 if v_0.Op != OpAMD64FlagLT_ULT { 33974 break 33975 } 33976 v.reset(OpAMD64MOVLconst) 33977 v.AuxInt = 1 33978 return true 33979 } 33980 // match: (SETB (FlagLT_UGT)) 33981 // cond: 33982 // result: (MOVLconst [0]) 33983 for { 33984 v_0 := v.Args[0] 33985 if v_0.Op != OpAMD64FlagLT_UGT { 33986 break 33987 } 33988 v.reset(OpAMD64MOVLconst) 33989 v.AuxInt = 0 33990 return true 33991 } 33992 // match: (SETB (FlagGT_ULT)) 33993 // cond: 33994 // result: (MOVLconst [1]) 33995 for { 33996 v_0 := v.Args[0] 33997 if v_0.Op != OpAMD64FlagGT_ULT { 33998 break 33999 } 34000 v.reset(OpAMD64MOVLconst) 34001 v.AuxInt = 1 34002 return true 34003 } 34004 // match: (SETB (FlagGT_UGT)) 34005 // cond: 34006 // result: (MOVLconst [0]) 34007 for { 34008 v_0 := v.Args[0] 34009 if v_0.Op != OpAMD64FlagGT_UGT { 34010 break 34011 } 34012 v.reset(OpAMD64MOVLconst) 34013 v.AuxInt = 0 34014 return true 34015 } 34016 return false 34017 } 34018 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 34019 // match: (SETBE (InvertFlags x)) 34020 // cond: 34021 // result: (SETAE x) 34022 for { 34023 v_0 := v.Args[0] 34024 if v_0.Op != OpAMD64InvertFlags { 34025 break 34026 } 34027 x := v_0.Args[0] 34028 v.reset(OpAMD64SETAE) 34029 v.AddArg(x) 34030 return true 34031 } 34032 // match: (SETBE (FlagEQ)) 34033 // cond: 34034 // result: (MOVLconst [1]) 34035 for { 34036 v_0 := v.Args[0] 34037 if v_0.Op != OpAMD64FlagEQ { 34038 break 34039 } 34040 v.reset(OpAMD64MOVLconst) 34041 v.AuxInt = 1 34042 return true 34043 } 34044 // match: (SETBE (FlagLT_ULT)) 34045 // cond: 34046 // result: (MOVLconst [1]) 34047 for { 34048 v_0 := v.Args[0] 34049 if v_0.Op != OpAMD64FlagLT_ULT { 34050 break 34051 } 34052 v.reset(OpAMD64MOVLconst) 34053 v.AuxInt = 1 34054 return true 34055 } 34056 // match: (SETBE (FlagLT_UGT)) 34057 // cond: 34058 // result: (MOVLconst [0]) 34059 for { 34060 v_0 := v.Args[0] 34061 if v_0.Op != OpAMD64FlagLT_UGT { 34062 break 34063 } 34064 v.reset(OpAMD64MOVLconst) 34065 v.AuxInt = 0 34066 return true 34067 } 34068 // match: (SETBE (FlagGT_ULT)) 34069 // cond: 34070 // result: (MOVLconst [1]) 34071 for { 34072 v_0 := v.Args[0] 34073 if v_0.Op != OpAMD64FlagGT_ULT { 34074 break 34075 } 34076 v.reset(OpAMD64MOVLconst) 34077 v.AuxInt = 1 34078 return true 34079 } 34080 // match: (SETBE (FlagGT_UGT)) 34081 // cond: 34082 // result: (MOVLconst [0]) 34083 for { 34084 v_0 := v.Args[0] 34085 if v_0.Op != OpAMD64FlagGT_UGT { 34086 break 34087 } 34088 v.reset(OpAMD64MOVLconst) 34089 v.AuxInt = 0 34090 return true 34091 } 34092 return false 34093 } 34094 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 34095 b := v.Block 34096 _ = b 34097 config := b.Func.Config 34098 _ = config 34099 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 34100 // cond: !config.nacl 34101 // result: (SETAE (BTL x y)) 34102 for { 34103 v_0 := v.Args[0] 34104 if v_0.Op != OpAMD64TESTL { 34105 break 34106 } 34107 _ = v_0.Args[1] 34108 v_0_0 := v_0.Args[0] 34109 if v_0_0.Op != OpAMD64SHLL { 34110 break 34111 } 34112 _ = v_0_0.Args[1] 34113 v_0_0_0 := v_0_0.Args[0] 34114 if v_0_0_0.Op != OpAMD64MOVLconst { 34115 break 34116 } 34117 if v_0_0_0.AuxInt != 1 { 34118 break 34119 } 34120 x := v_0_0.Args[1] 34121 y := v_0.Args[1] 34122 if !(!config.nacl) { 34123 break 34124 } 34125 v.reset(OpAMD64SETAE) 34126 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34127 v0.AddArg(x) 34128 v0.AddArg(y) 34129 v.AddArg(v0) 34130 return true 34131 } 34132 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 34133 // cond: !config.nacl 34134 // result: (SETAE (BTL x y)) 34135 for { 34136 v_0 := v.Args[0] 34137 if v_0.Op != OpAMD64TESTL { 34138 break 34139 } 34140 _ = v_0.Args[1] 34141 y := v_0.Args[0] 34142 v_0_1 := v_0.Args[1] 34143 if v_0_1.Op != OpAMD64SHLL { 34144 break 34145 } 34146 _ = v_0_1.Args[1] 34147 v_0_1_0 := v_0_1.Args[0] 34148 if v_0_1_0.Op != OpAMD64MOVLconst { 34149 break 34150 } 34151 if v_0_1_0.AuxInt != 1 { 34152 break 34153 } 34154 x := v_0_1.Args[1] 34155 if !(!config.nacl) { 34156 break 34157 } 34158 v.reset(OpAMD64SETAE) 34159 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34160 v0.AddArg(x) 34161 v0.AddArg(y) 34162 v.AddArg(v0) 34163 return true 34164 } 34165 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 34166 // cond: !config.nacl 34167 // result: (SETAE (BTQ x y)) 34168 for { 34169 v_0 := v.Args[0] 34170 if v_0.Op != OpAMD64TESTQ { 34171 break 34172 } 34173 _ = v_0.Args[1] 34174 v_0_0 := v_0.Args[0] 34175 if v_0_0.Op != OpAMD64SHLQ { 34176 break 34177 } 34178 _ = v_0_0.Args[1] 34179 v_0_0_0 := v_0_0.Args[0] 34180 if v_0_0_0.Op != OpAMD64MOVQconst { 34181 break 34182 } 34183 if v_0_0_0.AuxInt != 1 { 34184 break 34185 } 34186 x := v_0_0.Args[1] 34187 y := v_0.Args[1] 34188 if !(!config.nacl) { 34189 break 34190 } 34191 v.reset(OpAMD64SETAE) 34192 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34193 v0.AddArg(x) 34194 v0.AddArg(y) 34195 v.AddArg(v0) 34196 return true 34197 } 34198 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 34199 // cond: !config.nacl 34200 // result: (SETAE (BTQ x y)) 34201 for { 34202 v_0 := v.Args[0] 34203 if v_0.Op != OpAMD64TESTQ { 34204 break 34205 } 34206 _ = v_0.Args[1] 34207 y := v_0.Args[0] 34208 v_0_1 := v_0.Args[1] 34209 if v_0_1.Op != OpAMD64SHLQ { 34210 break 34211 } 34212 _ = v_0_1.Args[1] 34213 v_0_1_0 := v_0_1.Args[0] 34214 if v_0_1_0.Op != OpAMD64MOVQconst { 34215 break 34216 } 34217 if v_0_1_0.AuxInt != 1 { 34218 break 34219 } 34220 x := v_0_1.Args[1] 34221 if !(!config.nacl) { 34222 break 34223 } 34224 v.reset(OpAMD64SETAE) 34225 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34226 v0.AddArg(x) 34227 v0.AddArg(y) 34228 v.AddArg(v0) 34229 return true 34230 } 34231 // match: (SETEQ (TESTLconst [c] x)) 34232 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 34233 // result: (SETAE (BTLconst [log2(c)] x)) 34234 for { 34235 v_0 := v.Args[0] 34236 if v_0.Op != OpAMD64TESTLconst { 34237 break 34238 } 34239 c := v_0.AuxInt 34240 x := v_0.Args[0] 34241 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 34242 break 34243 } 34244 v.reset(OpAMD64SETAE) 34245 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 34246 v0.AuxInt = log2(c) 34247 v0.AddArg(x) 34248 v.AddArg(v0) 34249 return true 34250 } 34251 // match: (SETEQ (TESTQconst [c] x)) 34252 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34253 // result: (SETAE (BTQconst [log2(c)] x)) 34254 for { 34255 v_0 := v.Args[0] 34256 if v_0.Op != OpAMD64TESTQconst { 34257 break 34258 } 34259 c := v_0.AuxInt 34260 x := v_0.Args[0] 34261 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34262 break 34263 } 34264 v.reset(OpAMD64SETAE) 34265 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34266 v0.AuxInt = log2(c) 34267 v0.AddArg(x) 34268 v.AddArg(v0) 34269 return true 34270 } 34271 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 34272 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34273 // result: (SETAE (BTQconst [log2(c)] x)) 34274 for { 34275 v_0 := v.Args[0] 34276 if v_0.Op != OpAMD64TESTQ { 34277 break 34278 } 34279 _ = v_0.Args[1] 34280 v_0_0 := v_0.Args[0] 34281 if v_0_0.Op != OpAMD64MOVQconst { 34282 break 34283 } 34284 c := v_0_0.AuxInt 34285 x := v_0.Args[1] 34286 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34287 break 34288 } 34289 v.reset(OpAMD64SETAE) 34290 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34291 v0.AuxInt = log2(c) 34292 v0.AddArg(x) 34293 v.AddArg(v0) 34294 return true 34295 } 34296 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 34297 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34298 // result: (SETAE (BTQconst [log2(c)] x)) 34299 for { 34300 v_0 := v.Args[0] 34301 if v_0.Op != OpAMD64TESTQ { 34302 break 34303 } 34304 _ = v_0.Args[1] 34305 x := v_0.Args[0] 34306 v_0_1 := v_0.Args[1] 34307 if v_0_1.Op != OpAMD64MOVQconst { 34308 break 34309 } 34310 c := v_0_1.AuxInt 34311 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34312 break 34313 } 34314 v.reset(OpAMD64SETAE) 34315 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34316 v0.AuxInt = log2(c) 34317 v0.AddArg(x) 34318 v.AddArg(v0) 34319 return true 34320 } 34321 // match: (SETEQ (InvertFlags x)) 34322 // cond: 34323 // result: (SETEQ x) 34324 for { 34325 v_0 := v.Args[0] 34326 if v_0.Op != OpAMD64InvertFlags { 34327 break 34328 } 34329 x := v_0.Args[0] 34330 v.reset(OpAMD64SETEQ) 34331 v.AddArg(x) 34332 return true 34333 } 34334 // match: (SETEQ (FlagEQ)) 34335 // cond: 34336 // result: (MOVLconst [1]) 34337 for { 34338 v_0 := v.Args[0] 34339 if v_0.Op != OpAMD64FlagEQ { 34340 break 34341 } 34342 v.reset(OpAMD64MOVLconst) 34343 v.AuxInt = 1 34344 return true 34345 } 34346 return false 34347 } 34348 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 34349 // match: (SETEQ (FlagLT_ULT)) 34350 // cond: 34351 // result: (MOVLconst [0]) 34352 for { 34353 v_0 := v.Args[0] 34354 if v_0.Op != OpAMD64FlagLT_ULT { 34355 break 34356 } 34357 v.reset(OpAMD64MOVLconst) 34358 v.AuxInt = 0 34359 return true 34360 } 34361 // match: (SETEQ (FlagLT_UGT)) 34362 // cond: 34363 // result: (MOVLconst [0]) 34364 for { 34365 v_0 := v.Args[0] 34366 if v_0.Op != OpAMD64FlagLT_UGT { 34367 break 34368 } 34369 v.reset(OpAMD64MOVLconst) 34370 v.AuxInt = 0 34371 return true 34372 } 34373 // match: (SETEQ (FlagGT_ULT)) 34374 // cond: 34375 // result: (MOVLconst [0]) 34376 for { 34377 v_0 := v.Args[0] 34378 if v_0.Op != OpAMD64FlagGT_ULT { 34379 break 34380 } 34381 v.reset(OpAMD64MOVLconst) 34382 v.AuxInt = 0 34383 return true 34384 } 34385 // match: (SETEQ (FlagGT_UGT)) 34386 // cond: 34387 // result: (MOVLconst [0]) 34388 for { 34389 v_0 := v.Args[0] 34390 if v_0.Op != OpAMD64FlagGT_UGT { 34391 break 34392 } 34393 v.reset(OpAMD64MOVLconst) 34394 v.AuxInt = 0 34395 return true 34396 } 34397 return false 34398 } 34399 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 34400 // match: (SETG (InvertFlags x)) 34401 // cond: 34402 // result: (SETL x) 34403 for { 34404 v_0 := v.Args[0] 34405 if v_0.Op != OpAMD64InvertFlags { 34406 break 34407 } 34408 x := v_0.Args[0] 34409 v.reset(OpAMD64SETL) 34410 v.AddArg(x) 34411 return true 34412 } 34413 // match: (SETG (FlagEQ)) 34414 // cond: 34415 // result: (MOVLconst [0]) 34416 for { 34417 v_0 := v.Args[0] 34418 if v_0.Op != OpAMD64FlagEQ { 34419 break 34420 } 34421 v.reset(OpAMD64MOVLconst) 34422 v.AuxInt = 0 34423 return true 34424 } 34425 // match: (SETG (FlagLT_ULT)) 34426 // cond: 34427 // result: (MOVLconst [0]) 34428 for { 34429 v_0 := v.Args[0] 34430 if v_0.Op != OpAMD64FlagLT_ULT { 34431 break 34432 } 34433 v.reset(OpAMD64MOVLconst) 34434 v.AuxInt = 0 34435 return true 34436 } 34437 // match: (SETG (FlagLT_UGT)) 34438 // cond: 34439 // result: (MOVLconst [0]) 34440 for { 34441 v_0 := v.Args[0] 34442 if v_0.Op != OpAMD64FlagLT_UGT { 34443 break 34444 } 34445 v.reset(OpAMD64MOVLconst) 34446 v.AuxInt = 0 34447 return true 34448 } 34449 // match: (SETG (FlagGT_ULT)) 34450 // cond: 34451 // result: (MOVLconst [1]) 34452 for { 34453 v_0 := v.Args[0] 34454 if v_0.Op != OpAMD64FlagGT_ULT { 34455 break 34456 } 34457 v.reset(OpAMD64MOVLconst) 34458 v.AuxInt = 1 34459 return true 34460 } 34461 // match: (SETG (FlagGT_UGT)) 34462 // cond: 34463 // result: (MOVLconst [1]) 34464 for { 34465 v_0 := v.Args[0] 34466 if v_0.Op != OpAMD64FlagGT_UGT { 34467 break 34468 } 34469 v.reset(OpAMD64MOVLconst) 34470 v.AuxInt = 1 34471 return true 34472 } 34473 return false 34474 } 34475 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 34476 // match: (SETGE (InvertFlags x)) 34477 // cond: 34478 // result: (SETLE x) 34479 for { 34480 v_0 := v.Args[0] 34481 if v_0.Op != OpAMD64InvertFlags { 34482 break 34483 } 34484 x := v_0.Args[0] 34485 v.reset(OpAMD64SETLE) 34486 v.AddArg(x) 34487 return true 34488 } 34489 // match: (SETGE (FlagEQ)) 34490 // cond: 34491 // result: (MOVLconst [1]) 34492 for { 34493 v_0 := v.Args[0] 34494 if v_0.Op != OpAMD64FlagEQ { 34495 break 34496 } 34497 v.reset(OpAMD64MOVLconst) 34498 v.AuxInt = 1 34499 return true 34500 } 34501 // match: (SETGE (FlagLT_ULT)) 34502 // cond: 34503 // result: (MOVLconst [0]) 34504 for { 34505 v_0 := v.Args[0] 34506 if v_0.Op != OpAMD64FlagLT_ULT { 34507 break 34508 } 34509 v.reset(OpAMD64MOVLconst) 34510 v.AuxInt = 0 34511 return true 34512 } 34513 // match: (SETGE (FlagLT_UGT)) 34514 // cond: 34515 // result: (MOVLconst [0]) 34516 for { 34517 v_0 := v.Args[0] 34518 if v_0.Op != OpAMD64FlagLT_UGT { 34519 break 34520 } 34521 v.reset(OpAMD64MOVLconst) 34522 v.AuxInt = 0 34523 return true 34524 } 34525 // match: (SETGE (FlagGT_ULT)) 34526 // cond: 34527 // result: (MOVLconst [1]) 34528 for { 34529 v_0 := v.Args[0] 34530 if v_0.Op != OpAMD64FlagGT_ULT { 34531 break 34532 } 34533 v.reset(OpAMD64MOVLconst) 34534 v.AuxInt = 1 34535 return true 34536 } 34537 // match: (SETGE (FlagGT_UGT)) 34538 // cond: 34539 // result: (MOVLconst [1]) 34540 for { 34541 v_0 := v.Args[0] 34542 if v_0.Op != OpAMD64FlagGT_UGT { 34543 break 34544 } 34545 v.reset(OpAMD64MOVLconst) 34546 v.AuxInt = 1 34547 return true 34548 } 34549 return false 34550 } 34551 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 34552 // match: (SETL (InvertFlags x)) 34553 // cond: 34554 // result: (SETG x) 34555 for { 34556 v_0 := v.Args[0] 34557 if v_0.Op != OpAMD64InvertFlags { 34558 break 34559 } 34560 x := v_0.Args[0] 34561 v.reset(OpAMD64SETG) 34562 v.AddArg(x) 34563 return true 34564 } 34565 // match: (SETL (FlagEQ)) 34566 // cond: 34567 // result: (MOVLconst [0]) 34568 for { 34569 v_0 := v.Args[0] 34570 if v_0.Op != OpAMD64FlagEQ { 34571 break 34572 } 34573 v.reset(OpAMD64MOVLconst) 34574 v.AuxInt = 0 34575 return true 34576 } 34577 // match: (SETL (FlagLT_ULT)) 34578 // cond: 34579 // result: (MOVLconst [1]) 34580 for { 34581 v_0 := v.Args[0] 34582 if v_0.Op != OpAMD64FlagLT_ULT { 34583 break 34584 } 34585 v.reset(OpAMD64MOVLconst) 34586 v.AuxInt = 1 34587 return true 34588 } 34589 // match: (SETL (FlagLT_UGT)) 34590 // cond: 34591 // result: (MOVLconst [1]) 34592 for { 34593 v_0 := v.Args[0] 34594 if v_0.Op != OpAMD64FlagLT_UGT { 34595 break 34596 } 34597 v.reset(OpAMD64MOVLconst) 34598 v.AuxInt = 1 34599 return true 34600 } 34601 // match: (SETL (FlagGT_ULT)) 34602 // cond: 34603 // result: (MOVLconst [0]) 34604 for { 34605 v_0 := v.Args[0] 34606 if v_0.Op != OpAMD64FlagGT_ULT { 34607 break 34608 } 34609 v.reset(OpAMD64MOVLconst) 34610 v.AuxInt = 0 34611 return true 34612 } 34613 // match: (SETL (FlagGT_UGT)) 34614 // cond: 34615 // result: (MOVLconst [0]) 34616 for { 34617 v_0 := v.Args[0] 34618 if v_0.Op != OpAMD64FlagGT_UGT { 34619 break 34620 } 34621 v.reset(OpAMD64MOVLconst) 34622 v.AuxInt = 0 34623 return true 34624 } 34625 return false 34626 } 34627 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 34628 // match: (SETLE (InvertFlags x)) 34629 // cond: 34630 // result: (SETGE x) 34631 for { 34632 v_0 := v.Args[0] 34633 if v_0.Op != OpAMD64InvertFlags { 34634 break 34635 } 34636 x := v_0.Args[0] 34637 v.reset(OpAMD64SETGE) 34638 v.AddArg(x) 34639 return true 34640 } 34641 // match: (SETLE (FlagEQ)) 34642 // cond: 34643 // result: (MOVLconst [1]) 34644 for { 34645 v_0 := v.Args[0] 34646 if v_0.Op != OpAMD64FlagEQ { 34647 break 34648 } 34649 v.reset(OpAMD64MOVLconst) 34650 v.AuxInt = 1 34651 return true 34652 } 34653 // match: (SETLE (FlagLT_ULT)) 34654 // cond: 34655 // result: (MOVLconst [1]) 34656 for { 34657 v_0 := v.Args[0] 34658 if v_0.Op != OpAMD64FlagLT_ULT { 34659 break 34660 } 34661 v.reset(OpAMD64MOVLconst) 34662 v.AuxInt = 1 34663 return true 34664 } 34665 // match: (SETLE (FlagLT_UGT)) 34666 // cond: 34667 // result: (MOVLconst [1]) 34668 for { 34669 v_0 := v.Args[0] 34670 if v_0.Op != OpAMD64FlagLT_UGT { 34671 break 34672 } 34673 v.reset(OpAMD64MOVLconst) 34674 v.AuxInt = 1 34675 return true 34676 } 34677 // match: (SETLE (FlagGT_ULT)) 34678 // cond: 34679 // result: (MOVLconst [0]) 34680 for { 34681 v_0 := v.Args[0] 34682 if v_0.Op != OpAMD64FlagGT_ULT { 34683 break 34684 } 34685 v.reset(OpAMD64MOVLconst) 34686 v.AuxInt = 0 34687 return true 34688 } 34689 // match: (SETLE (FlagGT_UGT)) 34690 // cond: 34691 // result: (MOVLconst [0]) 34692 for { 34693 v_0 := v.Args[0] 34694 if v_0.Op != OpAMD64FlagGT_UGT { 34695 break 34696 } 34697 v.reset(OpAMD64MOVLconst) 34698 v.AuxInt = 0 34699 return true 34700 } 34701 return false 34702 } 34703 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 34704 b := v.Block 34705 _ = b 34706 config := b.Func.Config 34707 _ = config 34708 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 34709 // cond: !config.nacl 34710 // result: (SETB (BTL x y)) 34711 for { 34712 v_0 := v.Args[0] 34713 if v_0.Op != OpAMD64TESTL { 34714 break 34715 } 34716 _ = v_0.Args[1] 34717 v_0_0 := v_0.Args[0] 34718 if v_0_0.Op != OpAMD64SHLL { 34719 break 34720 } 34721 _ = v_0_0.Args[1] 34722 v_0_0_0 := v_0_0.Args[0] 34723 if v_0_0_0.Op != OpAMD64MOVLconst { 34724 break 34725 } 34726 if v_0_0_0.AuxInt != 1 { 34727 break 34728 } 34729 x := v_0_0.Args[1] 34730 y := v_0.Args[1] 34731 if !(!config.nacl) { 34732 break 34733 } 34734 v.reset(OpAMD64SETB) 34735 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34736 v0.AddArg(x) 34737 v0.AddArg(y) 34738 v.AddArg(v0) 34739 return true 34740 } 34741 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 34742 // cond: !config.nacl 34743 // result: (SETB (BTL x y)) 34744 for { 34745 v_0 := v.Args[0] 34746 if v_0.Op != OpAMD64TESTL { 34747 break 34748 } 34749 _ = v_0.Args[1] 34750 y := v_0.Args[0] 34751 v_0_1 := v_0.Args[1] 34752 if v_0_1.Op != OpAMD64SHLL { 34753 break 34754 } 34755 _ = v_0_1.Args[1] 34756 v_0_1_0 := v_0_1.Args[0] 34757 if v_0_1_0.Op != OpAMD64MOVLconst { 34758 break 34759 } 34760 if v_0_1_0.AuxInt != 1 { 34761 break 34762 } 34763 x := v_0_1.Args[1] 34764 if !(!config.nacl) { 34765 break 34766 } 34767 v.reset(OpAMD64SETB) 34768 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34769 v0.AddArg(x) 34770 v0.AddArg(y) 34771 v.AddArg(v0) 34772 return true 34773 } 34774 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 34775 // cond: !config.nacl 34776 // result: (SETB (BTQ x y)) 34777 for { 34778 v_0 := v.Args[0] 34779 if v_0.Op != OpAMD64TESTQ { 34780 break 34781 } 34782 _ = v_0.Args[1] 34783 v_0_0 := v_0.Args[0] 34784 if v_0_0.Op != OpAMD64SHLQ { 34785 break 34786 } 34787 _ = v_0_0.Args[1] 34788 v_0_0_0 := v_0_0.Args[0] 34789 if v_0_0_0.Op != OpAMD64MOVQconst { 34790 break 34791 } 34792 if v_0_0_0.AuxInt != 1 { 34793 break 34794 } 34795 x := v_0_0.Args[1] 34796 y := v_0.Args[1] 34797 if !(!config.nacl) { 34798 break 34799 } 34800 v.reset(OpAMD64SETB) 34801 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34802 v0.AddArg(x) 34803 v0.AddArg(y) 34804 v.AddArg(v0) 34805 return true 34806 } 34807 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 34808 // cond: !config.nacl 34809 // result: (SETB (BTQ x y)) 34810 for { 34811 v_0 := v.Args[0] 34812 if v_0.Op != OpAMD64TESTQ { 34813 break 34814 } 34815 _ = v_0.Args[1] 34816 y := v_0.Args[0] 34817 v_0_1 := v_0.Args[1] 34818 if v_0_1.Op != OpAMD64SHLQ { 34819 break 34820 } 34821 _ = v_0_1.Args[1] 34822 v_0_1_0 := v_0_1.Args[0] 34823 if v_0_1_0.Op != OpAMD64MOVQconst { 34824 break 34825 } 34826 if v_0_1_0.AuxInt != 1 { 34827 break 34828 } 34829 x := v_0_1.Args[1] 34830 if !(!config.nacl) { 34831 break 34832 } 34833 v.reset(OpAMD64SETB) 34834 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34835 v0.AddArg(x) 34836 v0.AddArg(y) 34837 v.AddArg(v0) 34838 return true 34839 } 34840 // match: (SETNE (TESTLconst [c] x)) 34841 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 34842 // result: (SETB (BTLconst [log2(c)] x)) 34843 for { 34844 v_0 := v.Args[0] 34845 if v_0.Op != OpAMD64TESTLconst { 34846 break 34847 } 34848 c := v_0.AuxInt 34849 x := v_0.Args[0] 34850 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 34851 break 34852 } 34853 v.reset(OpAMD64SETB) 34854 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 34855 v0.AuxInt = log2(c) 34856 v0.AddArg(x) 34857 v.AddArg(v0) 34858 return true 34859 } 34860 // match: (SETNE (TESTQconst [c] x)) 34861 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34862 // result: (SETB (BTQconst [log2(c)] x)) 34863 for { 34864 v_0 := v.Args[0] 34865 if v_0.Op != OpAMD64TESTQconst { 34866 break 34867 } 34868 c := v_0.AuxInt 34869 x := v_0.Args[0] 34870 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34871 break 34872 } 34873 v.reset(OpAMD64SETB) 34874 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34875 v0.AuxInt = log2(c) 34876 v0.AddArg(x) 34877 v.AddArg(v0) 34878 return true 34879 } 34880 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 34881 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34882 // result: (SETB (BTQconst [log2(c)] x)) 34883 for { 34884 v_0 := v.Args[0] 34885 if v_0.Op != OpAMD64TESTQ { 34886 break 34887 } 34888 _ = v_0.Args[1] 34889 v_0_0 := v_0.Args[0] 34890 if v_0_0.Op != OpAMD64MOVQconst { 34891 break 34892 } 34893 c := v_0_0.AuxInt 34894 x := v_0.Args[1] 34895 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34896 break 34897 } 34898 v.reset(OpAMD64SETB) 34899 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34900 v0.AuxInt = log2(c) 34901 v0.AddArg(x) 34902 v.AddArg(v0) 34903 return true 34904 } 34905 // match: (SETNE (TESTQ x (MOVQconst [c]))) 34906 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34907 // result: (SETB (BTQconst [log2(c)] x)) 34908 for { 34909 v_0 := v.Args[0] 34910 if v_0.Op != OpAMD64TESTQ { 34911 break 34912 } 34913 _ = v_0.Args[1] 34914 x := v_0.Args[0] 34915 v_0_1 := v_0.Args[1] 34916 if v_0_1.Op != OpAMD64MOVQconst { 34917 break 34918 } 34919 c := v_0_1.AuxInt 34920 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34921 break 34922 } 34923 v.reset(OpAMD64SETB) 34924 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34925 v0.AuxInt = log2(c) 34926 v0.AddArg(x) 34927 v.AddArg(v0) 34928 return true 34929 } 34930 // match: (SETNE (InvertFlags x)) 34931 // cond: 34932 // result: (SETNE x) 34933 for { 34934 v_0 := v.Args[0] 34935 if v_0.Op != OpAMD64InvertFlags { 34936 break 34937 } 34938 x := v_0.Args[0] 34939 v.reset(OpAMD64SETNE) 34940 v.AddArg(x) 34941 return true 34942 } 34943 // match: (SETNE (FlagEQ)) 34944 // cond: 34945 // result: (MOVLconst [0]) 34946 for { 34947 v_0 := v.Args[0] 34948 if v_0.Op != OpAMD64FlagEQ { 34949 break 34950 } 34951 v.reset(OpAMD64MOVLconst) 34952 v.AuxInt = 0 34953 return true 34954 } 34955 return false 34956 } 34957 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 34958 // match: (SETNE (FlagLT_ULT)) 34959 // cond: 34960 // result: (MOVLconst [1]) 34961 for { 34962 v_0 := v.Args[0] 34963 if v_0.Op != OpAMD64FlagLT_ULT { 34964 break 34965 } 34966 v.reset(OpAMD64MOVLconst) 34967 v.AuxInt = 1 34968 return true 34969 } 34970 // match: (SETNE (FlagLT_UGT)) 34971 // cond: 34972 // result: (MOVLconst [1]) 34973 for { 34974 v_0 := v.Args[0] 34975 if v_0.Op != OpAMD64FlagLT_UGT { 34976 break 34977 } 34978 v.reset(OpAMD64MOVLconst) 34979 v.AuxInt = 1 34980 return true 34981 } 34982 // match: (SETNE (FlagGT_ULT)) 34983 // cond: 34984 // result: (MOVLconst [1]) 34985 for { 34986 v_0 := v.Args[0] 34987 if v_0.Op != OpAMD64FlagGT_ULT { 34988 break 34989 } 34990 v.reset(OpAMD64MOVLconst) 34991 v.AuxInt = 1 34992 return true 34993 } 34994 // match: (SETNE (FlagGT_UGT)) 34995 // cond: 34996 // result: (MOVLconst [1]) 34997 for { 34998 v_0 := v.Args[0] 34999 if v_0.Op != OpAMD64FlagGT_UGT { 35000 break 35001 } 35002 v.reset(OpAMD64MOVLconst) 35003 v.AuxInt = 1 35004 return true 35005 } 35006 return false 35007 } 35008 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 35009 b := v.Block 35010 _ = b 35011 // match: (SHLL x (MOVQconst [c])) 35012 // cond: 35013 // result: (SHLLconst [c&31] x) 35014 for { 35015 _ = v.Args[1] 35016 x := v.Args[0] 35017 v_1 := v.Args[1] 35018 if v_1.Op != OpAMD64MOVQconst { 35019 break 35020 } 35021 c := v_1.AuxInt 35022 v.reset(OpAMD64SHLLconst) 35023 v.AuxInt = c & 31 35024 v.AddArg(x) 35025 return true 35026 } 35027 // match: (SHLL x (MOVLconst [c])) 35028 // cond: 35029 // result: (SHLLconst [c&31] x) 35030 for { 35031 _ = v.Args[1] 35032 x := v.Args[0] 35033 v_1 := v.Args[1] 35034 if v_1.Op != OpAMD64MOVLconst { 35035 break 35036 } 35037 c := v_1.AuxInt 35038 v.reset(OpAMD64SHLLconst) 35039 v.AuxInt = c & 31 35040 v.AddArg(x) 35041 return true 35042 } 35043 // match: (SHLL x (ADDQconst [c] y)) 35044 // cond: c & 31 == 0 35045 // result: (SHLL x y) 35046 for { 35047 _ = v.Args[1] 35048 x := v.Args[0] 35049 v_1 := v.Args[1] 35050 if v_1.Op != OpAMD64ADDQconst { 35051 break 35052 } 35053 c := v_1.AuxInt 35054 y := v_1.Args[0] 35055 if !(c&31 == 0) { 35056 break 35057 } 35058 v.reset(OpAMD64SHLL) 35059 v.AddArg(x) 35060 v.AddArg(y) 35061 return true 35062 } 35063 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 35064 // cond: c & 31 == 0 35065 // result: (SHLL x (NEGQ <t> y)) 35066 for { 35067 _ = v.Args[1] 35068 x := v.Args[0] 35069 v_1 := v.Args[1] 35070 if v_1.Op != OpAMD64NEGQ { 35071 break 35072 } 35073 t := v_1.Type 35074 v_1_0 := v_1.Args[0] 35075 if v_1_0.Op != OpAMD64ADDQconst { 35076 break 35077 } 35078 c := v_1_0.AuxInt 35079 y := v_1_0.Args[0] 35080 if !(c&31 == 0) { 35081 break 35082 } 35083 v.reset(OpAMD64SHLL) 35084 v.AddArg(x) 35085 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35086 v0.AddArg(y) 35087 v.AddArg(v0) 35088 return true 35089 } 35090 // match: (SHLL x (ANDQconst [c] y)) 35091 // cond: c & 31 == 31 35092 // result: (SHLL x y) 35093 for { 35094 _ = v.Args[1] 35095 x := v.Args[0] 35096 v_1 := v.Args[1] 35097 if v_1.Op != OpAMD64ANDQconst { 35098 break 35099 } 35100 c := v_1.AuxInt 35101 y := v_1.Args[0] 35102 if !(c&31 == 31) { 35103 break 35104 } 35105 v.reset(OpAMD64SHLL) 35106 v.AddArg(x) 35107 v.AddArg(y) 35108 return true 35109 } 35110 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 35111 // cond: c & 31 == 31 35112 // result: (SHLL x (NEGQ <t> y)) 35113 for { 35114 _ = v.Args[1] 35115 x := v.Args[0] 35116 v_1 := v.Args[1] 35117 if v_1.Op != OpAMD64NEGQ { 35118 break 35119 } 35120 t := v_1.Type 35121 v_1_0 := v_1.Args[0] 35122 if v_1_0.Op != OpAMD64ANDQconst { 35123 break 35124 } 35125 c := v_1_0.AuxInt 35126 y := v_1_0.Args[0] 35127 if !(c&31 == 31) { 35128 break 35129 } 35130 v.reset(OpAMD64SHLL) 35131 v.AddArg(x) 35132 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35133 v0.AddArg(y) 35134 v.AddArg(v0) 35135 return true 35136 } 35137 // match: (SHLL x (ADDLconst [c] y)) 35138 // cond: c & 31 == 0 35139 // result: (SHLL x y) 35140 for { 35141 _ = v.Args[1] 35142 x := v.Args[0] 35143 v_1 := v.Args[1] 35144 if v_1.Op != OpAMD64ADDLconst { 35145 break 35146 } 35147 c := v_1.AuxInt 35148 y := v_1.Args[0] 35149 if !(c&31 == 0) { 35150 break 35151 } 35152 v.reset(OpAMD64SHLL) 35153 v.AddArg(x) 35154 v.AddArg(y) 35155 return true 35156 } 35157 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 35158 // cond: c & 31 == 0 35159 // result: (SHLL x (NEGL <t> y)) 35160 for { 35161 _ = v.Args[1] 35162 x := v.Args[0] 35163 v_1 := v.Args[1] 35164 if v_1.Op != OpAMD64NEGL { 35165 break 35166 } 35167 t := v_1.Type 35168 v_1_0 := v_1.Args[0] 35169 if v_1_0.Op != OpAMD64ADDLconst { 35170 break 35171 } 35172 c := v_1_0.AuxInt 35173 y := v_1_0.Args[0] 35174 if !(c&31 == 0) { 35175 break 35176 } 35177 v.reset(OpAMD64SHLL) 35178 v.AddArg(x) 35179 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35180 v0.AddArg(y) 35181 v.AddArg(v0) 35182 return true 35183 } 35184 // match: (SHLL x (ANDLconst [c] y)) 35185 // cond: c & 31 == 31 35186 // result: (SHLL x y) 35187 for { 35188 _ = v.Args[1] 35189 x := v.Args[0] 35190 v_1 := v.Args[1] 35191 if v_1.Op != OpAMD64ANDLconst { 35192 break 35193 } 35194 c := v_1.AuxInt 35195 y := v_1.Args[0] 35196 if !(c&31 == 31) { 35197 break 35198 } 35199 v.reset(OpAMD64SHLL) 35200 v.AddArg(x) 35201 v.AddArg(y) 35202 return true 35203 } 35204 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 35205 // cond: c & 31 == 31 35206 // result: (SHLL x (NEGL <t> y)) 35207 for { 35208 _ = v.Args[1] 35209 x := v.Args[0] 35210 v_1 := v.Args[1] 35211 if v_1.Op != OpAMD64NEGL { 35212 break 35213 } 35214 t := v_1.Type 35215 v_1_0 := v_1.Args[0] 35216 if v_1_0.Op != OpAMD64ANDLconst { 35217 break 35218 } 35219 c := v_1_0.AuxInt 35220 y := v_1_0.Args[0] 35221 if !(c&31 == 31) { 35222 break 35223 } 35224 v.reset(OpAMD64SHLL) 35225 v.AddArg(x) 35226 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35227 v0.AddArg(y) 35228 v.AddArg(v0) 35229 return true 35230 } 35231 return false 35232 } 35233 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 35234 // match: (SHLLconst x [0]) 35235 // cond: 35236 // result: x 35237 for { 35238 if v.AuxInt != 0 { 35239 break 35240 } 35241 x := v.Args[0] 35242 v.reset(OpCopy) 35243 v.Type = x.Type 35244 v.AddArg(x) 35245 return true 35246 } 35247 return false 35248 } 35249 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 35250 b := v.Block 35251 _ = b 35252 // match: (SHLQ x (MOVQconst [c])) 35253 // cond: 35254 // result: (SHLQconst [c&63] x) 35255 for { 35256 _ = v.Args[1] 35257 x := v.Args[0] 35258 v_1 := v.Args[1] 35259 if v_1.Op != OpAMD64MOVQconst { 35260 break 35261 } 35262 c := v_1.AuxInt 35263 v.reset(OpAMD64SHLQconst) 35264 v.AuxInt = c & 63 35265 v.AddArg(x) 35266 return true 35267 } 35268 // match: (SHLQ x (MOVLconst [c])) 35269 // cond: 35270 // result: (SHLQconst [c&63] x) 35271 for { 35272 _ = v.Args[1] 35273 x := v.Args[0] 35274 v_1 := v.Args[1] 35275 if v_1.Op != OpAMD64MOVLconst { 35276 break 35277 } 35278 c := v_1.AuxInt 35279 v.reset(OpAMD64SHLQconst) 35280 v.AuxInt = c & 63 35281 v.AddArg(x) 35282 return true 35283 } 35284 // match: (SHLQ x (ADDQconst [c] y)) 35285 // cond: c & 63 == 0 35286 // result: (SHLQ x y) 35287 for { 35288 _ = v.Args[1] 35289 x := v.Args[0] 35290 v_1 := v.Args[1] 35291 if v_1.Op != OpAMD64ADDQconst { 35292 break 35293 } 35294 c := v_1.AuxInt 35295 y := v_1.Args[0] 35296 if !(c&63 == 0) { 35297 break 35298 } 35299 v.reset(OpAMD64SHLQ) 35300 v.AddArg(x) 35301 v.AddArg(y) 35302 return true 35303 } 35304 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 35305 // cond: c & 63 == 0 35306 // result: (SHLQ x (NEGQ <t> y)) 35307 for { 35308 _ = v.Args[1] 35309 x := v.Args[0] 35310 v_1 := v.Args[1] 35311 if v_1.Op != OpAMD64NEGQ { 35312 break 35313 } 35314 t := v_1.Type 35315 v_1_0 := v_1.Args[0] 35316 if v_1_0.Op != OpAMD64ADDQconst { 35317 break 35318 } 35319 c := v_1_0.AuxInt 35320 y := v_1_0.Args[0] 35321 if !(c&63 == 0) { 35322 break 35323 } 35324 v.reset(OpAMD64SHLQ) 35325 v.AddArg(x) 35326 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35327 v0.AddArg(y) 35328 v.AddArg(v0) 35329 return true 35330 } 35331 // match: (SHLQ x (ANDQconst [c] y)) 35332 // cond: c & 63 == 63 35333 // result: (SHLQ x y) 35334 for { 35335 _ = v.Args[1] 35336 x := v.Args[0] 35337 v_1 := v.Args[1] 35338 if v_1.Op != OpAMD64ANDQconst { 35339 break 35340 } 35341 c := v_1.AuxInt 35342 y := v_1.Args[0] 35343 if !(c&63 == 63) { 35344 break 35345 } 35346 v.reset(OpAMD64SHLQ) 35347 v.AddArg(x) 35348 v.AddArg(y) 35349 return true 35350 } 35351 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 35352 // cond: c & 63 == 63 35353 // result: (SHLQ x (NEGQ <t> y)) 35354 for { 35355 _ = v.Args[1] 35356 x := v.Args[0] 35357 v_1 := v.Args[1] 35358 if v_1.Op != OpAMD64NEGQ { 35359 break 35360 } 35361 t := v_1.Type 35362 v_1_0 := v_1.Args[0] 35363 if v_1_0.Op != OpAMD64ANDQconst { 35364 break 35365 } 35366 c := v_1_0.AuxInt 35367 y := v_1_0.Args[0] 35368 if !(c&63 == 63) { 35369 break 35370 } 35371 v.reset(OpAMD64SHLQ) 35372 v.AddArg(x) 35373 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35374 v0.AddArg(y) 35375 v.AddArg(v0) 35376 return true 35377 } 35378 // match: (SHLQ x (ADDLconst [c] y)) 35379 // cond: c & 63 == 0 35380 // result: (SHLQ x y) 35381 for { 35382 _ = v.Args[1] 35383 x := v.Args[0] 35384 v_1 := v.Args[1] 35385 if v_1.Op != OpAMD64ADDLconst { 35386 break 35387 } 35388 c := v_1.AuxInt 35389 y := v_1.Args[0] 35390 if !(c&63 == 0) { 35391 break 35392 } 35393 v.reset(OpAMD64SHLQ) 35394 v.AddArg(x) 35395 v.AddArg(y) 35396 return true 35397 } 35398 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 35399 // cond: c & 63 == 0 35400 // result: (SHLQ x (NEGL <t> y)) 35401 for { 35402 _ = v.Args[1] 35403 x := v.Args[0] 35404 v_1 := v.Args[1] 35405 if v_1.Op != OpAMD64NEGL { 35406 break 35407 } 35408 t := v_1.Type 35409 v_1_0 := v_1.Args[0] 35410 if v_1_0.Op != OpAMD64ADDLconst { 35411 break 35412 } 35413 c := v_1_0.AuxInt 35414 y := v_1_0.Args[0] 35415 if !(c&63 == 0) { 35416 break 35417 } 35418 v.reset(OpAMD64SHLQ) 35419 v.AddArg(x) 35420 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35421 v0.AddArg(y) 35422 v.AddArg(v0) 35423 return true 35424 } 35425 // match: (SHLQ x (ANDLconst [c] y)) 35426 // cond: c & 63 == 63 35427 // result: (SHLQ x y) 35428 for { 35429 _ = v.Args[1] 35430 x := v.Args[0] 35431 v_1 := v.Args[1] 35432 if v_1.Op != OpAMD64ANDLconst { 35433 break 35434 } 35435 c := v_1.AuxInt 35436 y := v_1.Args[0] 35437 if !(c&63 == 63) { 35438 break 35439 } 35440 v.reset(OpAMD64SHLQ) 35441 v.AddArg(x) 35442 v.AddArg(y) 35443 return true 35444 } 35445 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 35446 // cond: c & 63 == 63 35447 // result: (SHLQ x (NEGL <t> y)) 35448 for { 35449 _ = v.Args[1] 35450 x := v.Args[0] 35451 v_1 := v.Args[1] 35452 if v_1.Op != OpAMD64NEGL { 35453 break 35454 } 35455 t := v_1.Type 35456 v_1_0 := v_1.Args[0] 35457 if v_1_0.Op != OpAMD64ANDLconst { 35458 break 35459 } 35460 c := v_1_0.AuxInt 35461 y := v_1_0.Args[0] 35462 if !(c&63 == 63) { 35463 break 35464 } 35465 v.reset(OpAMD64SHLQ) 35466 v.AddArg(x) 35467 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35468 v0.AddArg(y) 35469 v.AddArg(v0) 35470 return true 35471 } 35472 return false 35473 } 35474 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 35475 // match: (SHLQconst x [0]) 35476 // cond: 35477 // result: x 35478 for { 35479 if v.AuxInt != 0 { 35480 break 35481 } 35482 x := v.Args[0] 35483 v.reset(OpCopy) 35484 v.Type = x.Type 35485 v.AddArg(x) 35486 return true 35487 } 35488 return false 35489 } 35490 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 35491 // match: (SHRB x (MOVQconst [c])) 35492 // cond: c&31 < 8 35493 // result: (SHRBconst [c&31] x) 35494 for { 35495 _ = v.Args[1] 35496 x := v.Args[0] 35497 v_1 := v.Args[1] 35498 if v_1.Op != OpAMD64MOVQconst { 35499 break 35500 } 35501 c := v_1.AuxInt 35502 if !(c&31 < 8) { 35503 break 35504 } 35505 v.reset(OpAMD64SHRBconst) 35506 v.AuxInt = c & 31 35507 v.AddArg(x) 35508 return true 35509 } 35510 // match: (SHRB x (MOVLconst [c])) 35511 // cond: c&31 < 8 35512 // result: (SHRBconst [c&31] x) 35513 for { 35514 _ = v.Args[1] 35515 x := v.Args[0] 35516 v_1 := v.Args[1] 35517 if v_1.Op != OpAMD64MOVLconst { 35518 break 35519 } 35520 c := v_1.AuxInt 35521 if !(c&31 < 8) { 35522 break 35523 } 35524 v.reset(OpAMD64SHRBconst) 35525 v.AuxInt = c & 31 35526 v.AddArg(x) 35527 return true 35528 } 35529 // match: (SHRB _ (MOVQconst [c])) 35530 // cond: c&31 >= 8 35531 // result: (MOVLconst [0]) 35532 for { 35533 _ = v.Args[1] 35534 v_1 := v.Args[1] 35535 if v_1.Op != OpAMD64MOVQconst { 35536 break 35537 } 35538 c := v_1.AuxInt 35539 if !(c&31 >= 8) { 35540 break 35541 } 35542 v.reset(OpAMD64MOVLconst) 35543 v.AuxInt = 0 35544 return true 35545 } 35546 // match: (SHRB _ (MOVLconst [c])) 35547 // cond: c&31 >= 8 35548 // result: (MOVLconst [0]) 35549 for { 35550 _ = v.Args[1] 35551 v_1 := v.Args[1] 35552 if v_1.Op != OpAMD64MOVLconst { 35553 break 35554 } 35555 c := v_1.AuxInt 35556 if !(c&31 >= 8) { 35557 break 35558 } 35559 v.reset(OpAMD64MOVLconst) 35560 v.AuxInt = 0 35561 return true 35562 } 35563 return false 35564 } 35565 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 35566 // match: (SHRBconst x [0]) 35567 // cond: 35568 // result: x 35569 for { 35570 if v.AuxInt != 0 { 35571 break 35572 } 35573 x := v.Args[0] 35574 v.reset(OpCopy) 35575 v.Type = x.Type 35576 v.AddArg(x) 35577 return true 35578 } 35579 return false 35580 } 35581 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 35582 b := v.Block 35583 _ = b 35584 // match: (SHRL x (MOVQconst [c])) 35585 // cond: 35586 // result: (SHRLconst [c&31] x) 35587 for { 35588 _ = v.Args[1] 35589 x := v.Args[0] 35590 v_1 := v.Args[1] 35591 if v_1.Op != OpAMD64MOVQconst { 35592 break 35593 } 35594 c := v_1.AuxInt 35595 v.reset(OpAMD64SHRLconst) 35596 v.AuxInt = c & 31 35597 v.AddArg(x) 35598 return true 35599 } 35600 // match: (SHRL x (MOVLconst [c])) 35601 // cond: 35602 // result: (SHRLconst [c&31] x) 35603 for { 35604 _ = v.Args[1] 35605 x := v.Args[0] 35606 v_1 := v.Args[1] 35607 if v_1.Op != OpAMD64MOVLconst { 35608 break 35609 } 35610 c := v_1.AuxInt 35611 v.reset(OpAMD64SHRLconst) 35612 v.AuxInt = c & 31 35613 v.AddArg(x) 35614 return true 35615 } 35616 // match: (SHRL x (ADDQconst [c] y)) 35617 // cond: c & 31 == 0 35618 // result: (SHRL x y) 35619 for { 35620 _ = v.Args[1] 35621 x := v.Args[0] 35622 v_1 := v.Args[1] 35623 if v_1.Op != OpAMD64ADDQconst { 35624 break 35625 } 35626 c := v_1.AuxInt 35627 y := v_1.Args[0] 35628 if !(c&31 == 0) { 35629 break 35630 } 35631 v.reset(OpAMD64SHRL) 35632 v.AddArg(x) 35633 v.AddArg(y) 35634 return true 35635 } 35636 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 35637 // cond: c & 31 == 0 35638 // result: (SHRL x (NEGQ <t> y)) 35639 for { 35640 _ = v.Args[1] 35641 x := v.Args[0] 35642 v_1 := v.Args[1] 35643 if v_1.Op != OpAMD64NEGQ { 35644 break 35645 } 35646 t := v_1.Type 35647 v_1_0 := v_1.Args[0] 35648 if v_1_0.Op != OpAMD64ADDQconst { 35649 break 35650 } 35651 c := v_1_0.AuxInt 35652 y := v_1_0.Args[0] 35653 if !(c&31 == 0) { 35654 break 35655 } 35656 v.reset(OpAMD64SHRL) 35657 v.AddArg(x) 35658 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35659 v0.AddArg(y) 35660 v.AddArg(v0) 35661 return true 35662 } 35663 // match: (SHRL x (ANDQconst [c] y)) 35664 // cond: c & 31 == 31 35665 // result: (SHRL x y) 35666 for { 35667 _ = v.Args[1] 35668 x := v.Args[0] 35669 v_1 := v.Args[1] 35670 if v_1.Op != OpAMD64ANDQconst { 35671 break 35672 } 35673 c := v_1.AuxInt 35674 y := v_1.Args[0] 35675 if !(c&31 == 31) { 35676 break 35677 } 35678 v.reset(OpAMD64SHRL) 35679 v.AddArg(x) 35680 v.AddArg(y) 35681 return true 35682 } 35683 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 35684 // cond: c & 31 == 31 35685 // result: (SHRL x (NEGQ <t> y)) 35686 for { 35687 _ = v.Args[1] 35688 x := v.Args[0] 35689 v_1 := v.Args[1] 35690 if v_1.Op != OpAMD64NEGQ { 35691 break 35692 } 35693 t := v_1.Type 35694 v_1_0 := v_1.Args[0] 35695 if v_1_0.Op != OpAMD64ANDQconst { 35696 break 35697 } 35698 c := v_1_0.AuxInt 35699 y := v_1_0.Args[0] 35700 if !(c&31 == 31) { 35701 break 35702 } 35703 v.reset(OpAMD64SHRL) 35704 v.AddArg(x) 35705 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35706 v0.AddArg(y) 35707 v.AddArg(v0) 35708 return true 35709 } 35710 // match: (SHRL x (ADDLconst [c] y)) 35711 // cond: c & 31 == 0 35712 // result: (SHRL x y) 35713 for { 35714 _ = v.Args[1] 35715 x := v.Args[0] 35716 v_1 := v.Args[1] 35717 if v_1.Op != OpAMD64ADDLconst { 35718 break 35719 } 35720 c := v_1.AuxInt 35721 y := v_1.Args[0] 35722 if !(c&31 == 0) { 35723 break 35724 } 35725 v.reset(OpAMD64SHRL) 35726 v.AddArg(x) 35727 v.AddArg(y) 35728 return true 35729 } 35730 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 35731 // cond: c & 31 == 0 35732 // result: (SHRL x (NEGL <t> y)) 35733 for { 35734 _ = v.Args[1] 35735 x := v.Args[0] 35736 v_1 := v.Args[1] 35737 if v_1.Op != OpAMD64NEGL { 35738 break 35739 } 35740 t := v_1.Type 35741 v_1_0 := v_1.Args[0] 35742 if v_1_0.Op != OpAMD64ADDLconst { 35743 break 35744 } 35745 c := v_1_0.AuxInt 35746 y := v_1_0.Args[0] 35747 if !(c&31 == 0) { 35748 break 35749 } 35750 v.reset(OpAMD64SHRL) 35751 v.AddArg(x) 35752 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35753 v0.AddArg(y) 35754 v.AddArg(v0) 35755 return true 35756 } 35757 // match: (SHRL x (ANDLconst [c] y)) 35758 // cond: c & 31 == 31 35759 // result: (SHRL x y) 35760 for { 35761 _ = v.Args[1] 35762 x := v.Args[0] 35763 v_1 := v.Args[1] 35764 if v_1.Op != OpAMD64ANDLconst { 35765 break 35766 } 35767 c := v_1.AuxInt 35768 y := v_1.Args[0] 35769 if !(c&31 == 31) { 35770 break 35771 } 35772 v.reset(OpAMD64SHRL) 35773 v.AddArg(x) 35774 v.AddArg(y) 35775 return true 35776 } 35777 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 35778 // cond: c & 31 == 31 35779 // result: (SHRL x (NEGL <t> y)) 35780 for { 35781 _ = v.Args[1] 35782 x := v.Args[0] 35783 v_1 := v.Args[1] 35784 if v_1.Op != OpAMD64NEGL { 35785 break 35786 } 35787 t := v_1.Type 35788 v_1_0 := v_1.Args[0] 35789 if v_1_0.Op != OpAMD64ANDLconst { 35790 break 35791 } 35792 c := v_1_0.AuxInt 35793 y := v_1_0.Args[0] 35794 if !(c&31 == 31) { 35795 break 35796 } 35797 v.reset(OpAMD64SHRL) 35798 v.AddArg(x) 35799 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35800 v0.AddArg(y) 35801 v.AddArg(v0) 35802 return true 35803 } 35804 return false 35805 } 35806 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 35807 // match: (SHRLconst x [0]) 35808 // cond: 35809 // result: x 35810 for { 35811 if v.AuxInt != 0 { 35812 break 35813 } 35814 x := v.Args[0] 35815 v.reset(OpCopy) 35816 v.Type = x.Type 35817 v.AddArg(x) 35818 return true 35819 } 35820 return false 35821 } 35822 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 35823 b := v.Block 35824 _ = b 35825 // match: (SHRQ x (MOVQconst [c])) 35826 // cond: 35827 // result: (SHRQconst [c&63] x) 35828 for { 35829 _ = v.Args[1] 35830 x := v.Args[0] 35831 v_1 := v.Args[1] 35832 if v_1.Op != OpAMD64MOVQconst { 35833 break 35834 } 35835 c := v_1.AuxInt 35836 v.reset(OpAMD64SHRQconst) 35837 v.AuxInt = c & 63 35838 v.AddArg(x) 35839 return true 35840 } 35841 // match: (SHRQ x (MOVLconst [c])) 35842 // cond: 35843 // result: (SHRQconst [c&63] x) 35844 for { 35845 _ = v.Args[1] 35846 x := v.Args[0] 35847 v_1 := v.Args[1] 35848 if v_1.Op != OpAMD64MOVLconst { 35849 break 35850 } 35851 c := v_1.AuxInt 35852 v.reset(OpAMD64SHRQconst) 35853 v.AuxInt = c & 63 35854 v.AddArg(x) 35855 return true 35856 } 35857 // match: (SHRQ x (ADDQconst [c] y)) 35858 // cond: c & 63 == 0 35859 // result: (SHRQ x y) 35860 for { 35861 _ = v.Args[1] 35862 x := v.Args[0] 35863 v_1 := v.Args[1] 35864 if v_1.Op != OpAMD64ADDQconst { 35865 break 35866 } 35867 c := v_1.AuxInt 35868 y := v_1.Args[0] 35869 if !(c&63 == 0) { 35870 break 35871 } 35872 v.reset(OpAMD64SHRQ) 35873 v.AddArg(x) 35874 v.AddArg(y) 35875 return true 35876 } 35877 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 35878 // cond: c & 63 == 0 35879 // result: (SHRQ x (NEGQ <t> y)) 35880 for { 35881 _ = v.Args[1] 35882 x := v.Args[0] 35883 v_1 := v.Args[1] 35884 if v_1.Op != OpAMD64NEGQ { 35885 break 35886 } 35887 t := v_1.Type 35888 v_1_0 := v_1.Args[0] 35889 if v_1_0.Op != OpAMD64ADDQconst { 35890 break 35891 } 35892 c := v_1_0.AuxInt 35893 y := v_1_0.Args[0] 35894 if !(c&63 == 0) { 35895 break 35896 } 35897 v.reset(OpAMD64SHRQ) 35898 v.AddArg(x) 35899 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35900 v0.AddArg(y) 35901 v.AddArg(v0) 35902 return true 35903 } 35904 // match: (SHRQ x (ANDQconst [c] y)) 35905 // cond: c & 63 == 63 35906 // result: (SHRQ x y) 35907 for { 35908 _ = v.Args[1] 35909 x := v.Args[0] 35910 v_1 := v.Args[1] 35911 if v_1.Op != OpAMD64ANDQconst { 35912 break 35913 } 35914 c := v_1.AuxInt 35915 y := v_1.Args[0] 35916 if !(c&63 == 63) { 35917 break 35918 } 35919 v.reset(OpAMD64SHRQ) 35920 v.AddArg(x) 35921 v.AddArg(y) 35922 return true 35923 } 35924 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 35925 // cond: c & 63 == 63 35926 // result: (SHRQ x (NEGQ <t> y)) 35927 for { 35928 _ = v.Args[1] 35929 x := v.Args[0] 35930 v_1 := v.Args[1] 35931 if v_1.Op != OpAMD64NEGQ { 35932 break 35933 } 35934 t := v_1.Type 35935 v_1_0 := v_1.Args[0] 35936 if v_1_0.Op != OpAMD64ANDQconst { 35937 break 35938 } 35939 c := v_1_0.AuxInt 35940 y := v_1_0.Args[0] 35941 if !(c&63 == 63) { 35942 break 35943 } 35944 v.reset(OpAMD64SHRQ) 35945 v.AddArg(x) 35946 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35947 v0.AddArg(y) 35948 v.AddArg(v0) 35949 return true 35950 } 35951 // match: (SHRQ x (ADDLconst [c] y)) 35952 // cond: c & 63 == 0 35953 // result: (SHRQ x y) 35954 for { 35955 _ = v.Args[1] 35956 x := v.Args[0] 35957 v_1 := v.Args[1] 35958 if v_1.Op != OpAMD64ADDLconst { 35959 break 35960 } 35961 c := v_1.AuxInt 35962 y := v_1.Args[0] 35963 if !(c&63 == 0) { 35964 break 35965 } 35966 v.reset(OpAMD64SHRQ) 35967 v.AddArg(x) 35968 v.AddArg(y) 35969 return true 35970 } 35971 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 35972 // cond: c & 63 == 0 35973 // result: (SHRQ x (NEGL <t> y)) 35974 for { 35975 _ = v.Args[1] 35976 x := v.Args[0] 35977 v_1 := v.Args[1] 35978 if v_1.Op != OpAMD64NEGL { 35979 break 35980 } 35981 t := v_1.Type 35982 v_1_0 := v_1.Args[0] 35983 if v_1_0.Op != OpAMD64ADDLconst { 35984 break 35985 } 35986 c := v_1_0.AuxInt 35987 y := v_1_0.Args[0] 35988 if !(c&63 == 0) { 35989 break 35990 } 35991 v.reset(OpAMD64SHRQ) 35992 v.AddArg(x) 35993 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35994 v0.AddArg(y) 35995 v.AddArg(v0) 35996 return true 35997 } 35998 // match: (SHRQ x (ANDLconst [c] y)) 35999 // cond: c & 63 == 63 36000 // result: (SHRQ x y) 36001 for { 36002 _ = v.Args[1] 36003 x := v.Args[0] 36004 v_1 := v.Args[1] 36005 if v_1.Op != OpAMD64ANDLconst { 36006 break 36007 } 36008 c := v_1.AuxInt 36009 y := v_1.Args[0] 36010 if !(c&63 == 63) { 36011 break 36012 } 36013 v.reset(OpAMD64SHRQ) 36014 v.AddArg(x) 36015 v.AddArg(y) 36016 return true 36017 } 36018 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 36019 // cond: c & 63 == 63 36020 // result: (SHRQ x (NEGL <t> y)) 36021 for { 36022 _ = v.Args[1] 36023 x := v.Args[0] 36024 v_1 := v.Args[1] 36025 if v_1.Op != OpAMD64NEGL { 36026 break 36027 } 36028 t := v_1.Type 36029 v_1_0 := v_1.Args[0] 36030 if v_1_0.Op != OpAMD64ANDLconst { 36031 break 36032 } 36033 c := v_1_0.AuxInt 36034 y := v_1_0.Args[0] 36035 if !(c&63 == 63) { 36036 break 36037 } 36038 v.reset(OpAMD64SHRQ) 36039 v.AddArg(x) 36040 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 36041 v0.AddArg(y) 36042 v.AddArg(v0) 36043 return true 36044 } 36045 return false 36046 } 36047 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 36048 // match: (SHRQconst x [0]) 36049 // cond: 36050 // result: x 36051 for { 36052 if v.AuxInt != 0 { 36053 break 36054 } 36055 x := v.Args[0] 36056 v.reset(OpCopy) 36057 v.Type = x.Type 36058 v.AddArg(x) 36059 return true 36060 } 36061 return false 36062 } 36063 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 36064 // match: (SHRW x (MOVQconst [c])) 36065 // cond: c&31 < 16 36066 // result: (SHRWconst [c&31] x) 36067 for { 36068 _ = v.Args[1] 36069 x := v.Args[0] 36070 v_1 := v.Args[1] 36071 if v_1.Op != OpAMD64MOVQconst { 36072 break 36073 } 36074 c := v_1.AuxInt 36075 if !(c&31 < 16) { 36076 break 36077 } 36078 v.reset(OpAMD64SHRWconst) 36079 v.AuxInt = c & 31 36080 v.AddArg(x) 36081 return true 36082 } 36083 // match: (SHRW x (MOVLconst [c])) 36084 // cond: c&31 < 16 36085 // result: (SHRWconst [c&31] x) 36086 for { 36087 _ = v.Args[1] 36088 x := v.Args[0] 36089 v_1 := v.Args[1] 36090 if v_1.Op != OpAMD64MOVLconst { 36091 break 36092 } 36093 c := v_1.AuxInt 36094 if !(c&31 < 16) { 36095 break 36096 } 36097 v.reset(OpAMD64SHRWconst) 36098 v.AuxInt = c & 31 36099 v.AddArg(x) 36100 return true 36101 } 36102 // match: (SHRW _ (MOVQconst [c])) 36103 // cond: c&31 >= 16 36104 // result: (MOVLconst [0]) 36105 for { 36106 _ = v.Args[1] 36107 v_1 := v.Args[1] 36108 if v_1.Op != OpAMD64MOVQconst { 36109 break 36110 } 36111 c := v_1.AuxInt 36112 if !(c&31 >= 16) { 36113 break 36114 } 36115 v.reset(OpAMD64MOVLconst) 36116 v.AuxInt = 0 36117 return true 36118 } 36119 // match: (SHRW _ (MOVLconst [c])) 36120 // cond: c&31 >= 16 36121 // result: (MOVLconst [0]) 36122 for { 36123 _ = v.Args[1] 36124 v_1 := v.Args[1] 36125 if v_1.Op != OpAMD64MOVLconst { 36126 break 36127 } 36128 c := v_1.AuxInt 36129 if !(c&31 >= 16) { 36130 break 36131 } 36132 v.reset(OpAMD64MOVLconst) 36133 v.AuxInt = 0 36134 return true 36135 } 36136 return false 36137 } 36138 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 36139 // match: (SHRWconst x [0]) 36140 // cond: 36141 // result: x 36142 for { 36143 if v.AuxInt != 0 { 36144 break 36145 } 36146 x := v.Args[0] 36147 v.reset(OpCopy) 36148 v.Type = x.Type 36149 v.AddArg(x) 36150 return true 36151 } 36152 return false 36153 } 36154 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 36155 b := v.Block 36156 _ = b 36157 // match: (SUBL x (MOVLconst [c])) 36158 // cond: 36159 // result: (SUBLconst x [c]) 36160 for { 36161 _ = v.Args[1] 36162 x := v.Args[0] 36163 v_1 := v.Args[1] 36164 if v_1.Op != OpAMD64MOVLconst { 36165 break 36166 } 36167 c := v_1.AuxInt 36168 v.reset(OpAMD64SUBLconst) 36169 v.AuxInt = c 36170 v.AddArg(x) 36171 return true 36172 } 36173 // match: (SUBL (MOVLconst [c]) x) 36174 // cond: 36175 // result: (NEGL (SUBLconst <v.Type> x [c])) 36176 for { 36177 _ = v.Args[1] 36178 v_0 := v.Args[0] 36179 if v_0.Op != OpAMD64MOVLconst { 36180 break 36181 } 36182 c := v_0.AuxInt 36183 x := v.Args[1] 36184 v.reset(OpAMD64NEGL) 36185 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 36186 v0.AuxInt = c 36187 v0.AddArg(x) 36188 v.AddArg(v0) 36189 return true 36190 } 36191 // match: (SUBL x x) 36192 // cond: 36193 // result: (MOVLconst [0]) 36194 for { 36195 _ = v.Args[1] 36196 x := v.Args[0] 36197 if x != v.Args[1] { 36198 break 36199 } 36200 v.reset(OpAMD64MOVLconst) 36201 v.AuxInt = 0 36202 return true 36203 } 36204 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 36205 // cond: canMergeLoad(v, l, x) && clobber(l) 36206 // result: (SUBLmem x [off] {sym} ptr mem) 36207 for { 36208 _ = v.Args[1] 36209 x := v.Args[0] 36210 l := v.Args[1] 36211 if l.Op != OpAMD64MOVLload { 36212 break 36213 } 36214 off := l.AuxInt 36215 sym := l.Aux 36216 _ = l.Args[1] 36217 ptr := l.Args[0] 36218 mem := l.Args[1] 36219 if !(canMergeLoad(v, l, x) && clobber(l)) { 36220 break 36221 } 36222 v.reset(OpAMD64SUBLmem) 36223 v.AuxInt = off 36224 v.Aux = sym 36225 v.AddArg(x) 36226 v.AddArg(ptr) 36227 v.AddArg(mem) 36228 return true 36229 } 36230 return false 36231 } 36232 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 36233 // match: (SUBLconst [c] x) 36234 // cond: int32(c) == 0 36235 // result: x 36236 for { 36237 c := v.AuxInt 36238 x := v.Args[0] 36239 if !(int32(c) == 0) { 36240 break 36241 } 36242 v.reset(OpCopy) 36243 v.Type = x.Type 36244 v.AddArg(x) 36245 return true 36246 } 36247 // match: (SUBLconst [c] x) 36248 // cond: 36249 // result: (ADDLconst [int64(int32(-c))] x) 36250 for { 36251 c := v.AuxInt 36252 x := v.Args[0] 36253 v.reset(OpAMD64ADDLconst) 36254 v.AuxInt = int64(int32(-c)) 36255 v.AddArg(x) 36256 return true 36257 } 36258 } 36259 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 36260 b := v.Block 36261 _ = b 36262 // match: (SUBQ x (MOVQconst [c])) 36263 // cond: is32Bit(c) 36264 // result: (SUBQconst x [c]) 36265 for { 36266 _ = v.Args[1] 36267 x := v.Args[0] 36268 v_1 := v.Args[1] 36269 if v_1.Op != OpAMD64MOVQconst { 36270 break 36271 } 36272 c := v_1.AuxInt 36273 if !(is32Bit(c)) { 36274 break 36275 } 36276 v.reset(OpAMD64SUBQconst) 36277 v.AuxInt = c 36278 v.AddArg(x) 36279 return true 36280 } 36281 // match: (SUBQ (MOVQconst [c]) x) 36282 // cond: is32Bit(c) 36283 // result: (NEGQ (SUBQconst <v.Type> x [c])) 36284 for { 36285 _ = v.Args[1] 36286 v_0 := v.Args[0] 36287 if v_0.Op != OpAMD64MOVQconst { 36288 break 36289 } 36290 c := v_0.AuxInt 36291 x := v.Args[1] 36292 if !(is32Bit(c)) { 36293 break 36294 } 36295 v.reset(OpAMD64NEGQ) 36296 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 36297 v0.AuxInt = c 36298 v0.AddArg(x) 36299 v.AddArg(v0) 36300 return true 36301 } 36302 // match: (SUBQ x x) 36303 // cond: 36304 // result: (MOVQconst [0]) 36305 for { 36306 _ = v.Args[1] 36307 x := v.Args[0] 36308 if x != v.Args[1] { 36309 break 36310 } 36311 v.reset(OpAMD64MOVQconst) 36312 v.AuxInt = 0 36313 return true 36314 } 36315 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 36316 // cond: canMergeLoad(v, l, x) && clobber(l) 36317 // result: (SUBQmem x [off] {sym} ptr mem) 36318 for { 36319 _ = v.Args[1] 36320 x := v.Args[0] 36321 l := v.Args[1] 36322 if l.Op != OpAMD64MOVQload { 36323 break 36324 } 36325 off := l.AuxInt 36326 sym := l.Aux 36327 _ = l.Args[1] 36328 ptr := l.Args[0] 36329 mem := l.Args[1] 36330 if !(canMergeLoad(v, l, x) && clobber(l)) { 36331 break 36332 } 36333 v.reset(OpAMD64SUBQmem) 36334 v.AuxInt = off 36335 v.Aux = sym 36336 v.AddArg(x) 36337 v.AddArg(ptr) 36338 v.AddArg(mem) 36339 return true 36340 } 36341 return false 36342 } 36343 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 36344 // match: (SUBQconst [0] x) 36345 // cond: 36346 // result: x 36347 for { 36348 if v.AuxInt != 0 { 36349 break 36350 } 36351 x := v.Args[0] 36352 v.reset(OpCopy) 36353 v.Type = x.Type 36354 v.AddArg(x) 36355 return true 36356 } 36357 // match: (SUBQconst [c] x) 36358 // cond: c != -(1<<31) 36359 // result: (ADDQconst [-c] x) 36360 for { 36361 c := v.AuxInt 36362 x := v.Args[0] 36363 if !(c != -(1 << 31)) { 36364 break 36365 } 36366 v.reset(OpAMD64ADDQconst) 36367 v.AuxInt = -c 36368 v.AddArg(x) 36369 return true 36370 } 36371 // match: (SUBQconst (MOVQconst [d]) [c]) 36372 // cond: 36373 // result: (MOVQconst [d-c]) 36374 for { 36375 c := v.AuxInt 36376 v_0 := v.Args[0] 36377 if v_0.Op != OpAMD64MOVQconst { 36378 break 36379 } 36380 d := v_0.AuxInt 36381 v.reset(OpAMD64MOVQconst) 36382 v.AuxInt = d - c 36383 return true 36384 } 36385 // match: (SUBQconst (SUBQconst x [d]) [c]) 36386 // cond: is32Bit(-c-d) 36387 // result: (ADDQconst [-c-d] x) 36388 for { 36389 c := v.AuxInt 36390 v_0 := v.Args[0] 36391 if v_0.Op != OpAMD64SUBQconst { 36392 break 36393 } 36394 d := v_0.AuxInt 36395 x := v_0.Args[0] 36396 if !(is32Bit(-c - d)) { 36397 break 36398 } 36399 v.reset(OpAMD64ADDQconst) 36400 v.AuxInt = -c - d 36401 v.AddArg(x) 36402 return true 36403 } 36404 return false 36405 } 36406 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 36407 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 36408 // cond: canMergeLoad(v, l, x) && clobber(l) 36409 // result: (SUBSDmem x [off] {sym} ptr mem) 36410 for { 36411 _ = v.Args[1] 36412 x := v.Args[0] 36413 l := v.Args[1] 36414 if l.Op != OpAMD64MOVSDload { 36415 break 36416 } 36417 off := l.AuxInt 36418 sym := l.Aux 36419 _ = l.Args[1] 36420 ptr := l.Args[0] 36421 mem := l.Args[1] 36422 if !(canMergeLoad(v, l, x) && clobber(l)) { 36423 break 36424 } 36425 v.reset(OpAMD64SUBSDmem) 36426 v.AuxInt = off 36427 v.Aux = sym 36428 v.AddArg(x) 36429 v.AddArg(ptr) 36430 v.AddArg(mem) 36431 return true 36432 } 36433 return false 36434 } 36435 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 36436 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 36437 // cond: canMergeLoad(v, l, x) && clobber(l) 36438 // result: (SUBSSmem x [off] {sym} ptr mem) 36439 for { 36440 _ = v.Args[1] 36441 x := v.Args[0] 36442 l := v.Args[1] 36443 if l.Op != OpAMD64MOVSSload { 36444 break 36445 } 36446 off := l.AuxInt 36447 sym := l.Aux 36448 _ = l.Args[1] 36449 ptr := l.Args[0] 36450 mem := l.Args[1] 36451 if !(canMergeLoad(v, l, x) && clobber(l)) { 36452 break 36453 } 36454 v.reset(OpAMD64SUBSSmem) 36455 v.AuxInt = off 36456 v.Aux = sym 36457 v.AddArg(x) 36458 v.AddArg(ptr) 36459 v.AddArg(mem) 36460 return true 36461 } 36462 return false 36463 } 36464 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 36465 // match: (TESTB (MOVLconst [c]) x) 36466 // cond: 36467 // result: (TESTBconst [c] x) 36468 for { 36469 _ = v.Args[1] 36470 v_0 := v.Args[0] 36471 if v_0.Op != OpAMD64MOVLconst { 36472 break 36473 } 36474 c := v_0.AuxInt 36475 x := v.Args[1] 36476 v.reset(OpAMD64TESTBconst) 36477 v.AuxInt = c 36478 v.AddArg(x) 36479 return true 36480 } 36481 // match: (TESTB x (MOVLconst [c])) 36482 // cond: 36483 // result: (TESTBconst [c] x) 36484 for { 36485 _ = v.Args[1] 36486 x := v.Args[0] 36487 v_1 := v.Args[1] 36488 if v_1.Op != OpAMD64MOVLconst { 36489 break 36490 } 36491 c := v_1.AuxInt 36492 v.reset(OpAMD64TESTBconst) 36493 v.AuxInt = c 36494 v.AddArg(x) 36495 return true 36496 } 36497 return false 36498 } 36499 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 36500 // match: (TESTL (MOVLconst [c]) x) 36501 // cond: 36502 // result: (TESTLconst [c] x) 36503 for { 36504 _ = v.Args[1] 36505 v_0 := v.Args[0] 36506 if v_0.Op != OpAMD64MOVLconst { 36507 break 36508 } 36509 c := v_0.AuxInt 36510 x := v.Args[1] 36511 v.reset(OpAMD64TESTLconst) 36512 v.AuxInt = c 36513 v.AddArg(x) 36514 return true 36515 } 36516 // match: (TESTL x (MOVLconst [c])) 36517 // cond: 36518 // result: (TESTLconst [c] x) 36519 for { 36520 _ = v.Args[1] 36521 x := v.Args[0] 36522 v_1 := v.Args[1] 36523 if v_1.Op != OpAMD64MOVLconst { 36524 break 36525 } 36526 c := v_1.AuxInt 36527 v.reset(OpAMD64TESTLconst) 36528 v.AuxInt = c 36529 v.AddArg(x) 36530 return true 36531 } 36532 return false 36533 } 36534 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 36535 // match: (TESTQ (MOVQconst [c]) x) 36536 // cond: is32Bit(c) 36537 // result: (TESTQconst [c] x) 36538 for { 36539 _ = v.Args[1] 36540 v_0 := v.Args[0] 36541 if v_0.Op != OpAMD64MOVQconst { 36542 break 36543 } 36544 c := v_0.AuxInt 36545 x := v.Args[1] 36546 if !(is32Bit(c)) { 36547 break 36548 } 36549 v.reset(OpAMD64TESTQconst) 36550 v.AuxInt = c 36551 v.AddArg(x) 36552 return true 36553 } 36554 // match: (TESTQ x (MOVQconst [c])) 36555 // cond: is32Bit(c) 36556 // result: (TESTQconst [c] x) 36557 for { 36558 _ = v.Args[1] 36559 x := v.Args[0] 36560 v_1 := v.Args[1] 36561 if v_1.Op != OpAMD64MOVQconst { 36562 break 36563 } 36564 c := v_1.AuxInt 36565 if !(is32Bit(c)) { 36566 break 36567 } 36568 v.reset(OpAMD64TESTQconst) 36569 v.AuxInt = c 36570 v.AddArg(x) 36571 return true 36572 } 36573 return false 36574 } 36575 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 36576 // match: (TESTW (MOVLconst [c]) x) 36577 // cond: 36578 // result: (TESTWconst [c] x) 36579 for { 36580 _ = v.Args[1] 36581 v_0 := v.Args[0] 36582 if v_0.Op != OpAMD64MOVLconst { 36583 break 36584 } 36585 c := v_0.AuxInt 36586 x := v.Args[1] 36587 v.reset(OpAMD64TESTWconst) 36588 v.AuxInt = c 36589 v.AddArg(x) 36590 return true 36591 } 36592 // match: (TESTW x (MOVLconst [c])) 36593 // cond: 36594 // result: (TESTWconst [c] x) 36595 for { 36596 _ = v.Args[1] 36597 x := v.Args[0] 36598 v_1 := v.Args[1] 36599 if v_1.Op != OpAMD64MOVLconst { 36600 break 36601 } 36602 c := v_1.AuxInt 36603 v.reset(OpAMD64TESTWconst) 36604 v.AuxInt = c 36605 v.AddArg(x) 36606 return true 36607 } 36608 return false 36609 } 36610 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 36611 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 36612 // cond: is32Bit(off1+off2) 36613 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 36614 for { 36615 off1 := v.AuxInt 36616 sym := v.Aux 36617 _ = v.Args[2] 36618 val := v.Args[0] 36619 v_1 := v.Args[1] 36620 if v_1.Op != OpAMD64ADDQconst { 36621 break 36622 } 36623 off2 := v_1.AuxInt 36624 ptr := v_1.Args[0] 36625 mem := v.Args[2] 36626 if !(is32Bit(off1 + off2)) { 36627 break 36628 } 36629 v.reset(OpAMD64XADDLlock) 36630 v.AuxInt = off1 + off2 36631 v.Aux = sym 36632 v.AddArg(val) 36633 v.AddArg(ptr) 36634 v.AddArg(mem) 36635 return true 36636 } 36637 return false 36638 } 36639 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 36640 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 36641 // cond: is32Bit(off1+off2) 36642 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 36643 for { 36644 off1 := v.AuxInt 36645 sym := v.Aux 36646 _ = v.Args[2] 36647 val := v.Args[0] 36648 v_1 := v.Args[1] 36649 if v_1.Op != OpAMD64ADDQconst { 36650 break 36651 } 36652 off2 := v_1.AuxInt 36653 ptr := v_1.Args[0] 36654 mem := v.Args[2] 36655 if !(is32Bit(off1 + off2)) { 36656 break 36657 } 36658 v.reset(OpAMD64XADDQlock) 36659 v.AuxInt = off1 + off2 36660 v.Aux = sym 36661 v.AddArg(val) 36662 v.AddArg(ptr) 36663 v.AddArg(mem) 36664 return true 36665 } 36666 return false 36667 } 36668 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 36669 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 36670 // cond: is32Bit(off1+off2) 36671 // result: (XCHGL [off1+off2] {sym} val ptr mem) 36672 for { 36673 off1 := v.AuxInt 36674 sym := v.Aux 36675 _ = v.Args[2] 36676 val := v.Args[0] 36677 v_1 := v.Args[1] 36678 if v_1.Op != OpAMD64ADDQconst { 36679 break 36680 } 36681 off2 := v_1.AuxInt 36682 ptr := v_1.Args[0] 36683 mem := v.Args[2] 36684 if !(is32Bit(off1 + off2)) { 36685 break 36686 } 36687 v.reset(OpAMD64XCHGL) 36688 v.AuxInt = off1 + off2 36689 v.Aux = sym 36690 v.AddArg(val) 36691 v.AddArg(ptr) 36692 v.AddArg(mem) 36693 return true 36694 } 36695 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 36696 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 36697 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 36698 for { 36699 off1 := v.AuxInt 36700 sym1 := v.Aux 36701 _ = v.Args[2] 36702 val := v.Args[0] 36703 v_1 := v.Args[1] 36704 if v_1.Op != OpAMD64LEAQ { 36705 break 36706 } 36707 off2 := v_1.AuxInt 36708 sym2 := v_1.Aux 36709 ptr := v_1.Args[0] 36710 mem := v.Args[2] 36711 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 36712 break 36713 } 36714 v.reset(OpAMD64XCHGL) 36715 v.AuxInt = off1 + off2 36716 v.Aux = mergeSym(sym1, sym2) 36717 v.AddArg(val) 36718 v.AddArg(ptr) 36719 v.AddArg(mem) 36720 return true 36721 } 36722 return false 36723 } 36724 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 36725 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 36726 // cond: is32Bit(off1+off2) 36727 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 36728 for { 36729 off1 := v.AuxInt 36730 sym := v.Aux 36731 _ = v.Args[2] 36732 val := v.Args[0] 36733 v_1 := v.Args[1] 36734 if v_1.Op != OpAMD64ADDQconst { 36735 break 36736 } 36737 off2 := v_1.AuxInt 36738 ptr := v_1.Args[0] 36739 mem := v.Args[2] 36740 if !(is32Bit(off1 + off2)) { 36741 break 36742 } 36743 v.reset(OpAMD64XCHGQ) 36744 v.AuxInt = off1 + off2 36745 v.Aux = sym 36746 v.AddArg(val) 36747 v.AddArg(ptr) 36748 v.AddArg(mem) 36749 return true 36750 } 36751 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 36752 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 36753 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 36754 for { 36755 off1 := v.AuxInt 36756 sym1 := v.Aux 36757 _ = v.Args[2] 36758 val := v.Args[0] 36759 v_1 := v.Args[1] 36760 if v_1.Op != OpAMD64LEAQ { 36761 break 36762 } 36763 off2 := v_1.AuxInt 36764 sym2 := v_1.Aux 36765 ptr := v_1.Args[0] 36766 mem := v.Args[2] 36767 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 36768 break 36769 } 36770 v.reset(OpAMD64XCHGQ) 36771 v.AuxInt = off1 + off2 36772 v.Aux = mergeSym(sym1, sym2) 36773 v.AddArg(val) 36774 v.AddArg(ptr) 36775 v.AddArg(mem) 36776 return true 36777 } 36778 return false 36779 } 36780 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 36781 // match: (XORL x (MOVLconst [c])) 36782 // cond: 36783 // result: (XORLconst [c] x) 36784 for { 36785 _ = v.Args[1] 36786 x := v.Args[0] 36787 v_1 := v.Args[1] 36788 if v_1.Op != OpAMD64MOVLconst { 36789 break 36790 } 36791 c := v_1.AuxInt 36792 v.reset(OpAMD64XORLconst) 36793 v.AuxInt = c 36794 v.AddArg(x) 36795 return true 36796 } 36797 // match: (XORL (MOVLconst [c]) x) 36798 // cond: 36799 // result: (XORLconst [c] x) 36800 for { 36801 _ = v.Args[1] 36802 v_0 := v.Args[0] 36803 if v_0.Op != OpAMD64MOVLconst { 36804 break 36805 } 36806 c := v_0.AuxInt 36807 x := v.Args[1] 36808 v.reset(OpAMD64XORLconst) 36809 v.AuxInt = c 36810 v.AddArg(x) 36811 return true 36812 } 36813 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 36814 // cond: d==32-c 36815 // result: (ROLLconst x [c]) 36816 for { 36817 _ = v.Args[1] 36818 v_0 := v.Args[0] 36819 if v_0.Op != OpAMD64SHLLconst { 36820 break 36821 } 36822 c := v_0.AuxInt 36823 x := v_0.Args[0] 36824 v_1 := v.Args[1] 36825 if v_1.Op != OpAMD64SHRLconst { 36826 break 36827 } 36828 d := v_1.AuxInt 36829 if x != v_1.Args[0] { 36830 break 36831 } 36832 if !(d == 32-c) { 36833 break 36834 } 36835 v.reset(OpAMD64ROLLconst) 36836 v.AuxInt = c 36837 v.AddArg(x) 36838 return true 36839 } 36840 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 36841 // cond: d==32-c 36842 // result: (ROLLconst x [c]) 36843 for { 36844 _ = v.Args[1] 36845 v_0 := v.Args[0] 36846 if v_0.Op != OpAMD64SHRLconst { 36847 break 36848 } 36849 d := v_0.AuxInt 36850 x := v_0.Args[0] 36851 v_1 := v.Args[1] 36852 if v_1.Op != OpAMD64SHLLconst { 36853 break 36854 } 36855 c := v_1.AuxInt 36856 if x != v_1.Args[0] { 36857 break 36858 } 36859 if !(d == 32-c) { 36860 break 36861 } 36862 v.reset(OpAMD64ROLLconst) 36863 v.AuxInt = c 36864 v.AddArg(x) 36865 return true 36866 } 36867 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 36868 // cond: d==16-c && c < 16 && t.Size() == 2 36869 // result: (ROLWconst x [c]) 36870 for { 36871 t := v.Type 36872 _ = v.Args[1] 36873 v_0 := v.Args[0] 36874 if v_0.Op != OpAMD64SHLLconst { 36875 break 36876 } 36877 c := v_0.AuxInt 36878 x := v_0.Args[0] 36879 v_1 := v.Args[1] 36880 if v_1.Op != OpAMD64SHRWconst { 36881 break 36882 } 36883 d := v_1.AuxInt 36884 if x != v_1.Args[0] { 36885 break 36886 } 36887 if !(d == 16-c && c < 16 && t.Size() == 2) { 36888 break 36889 } 36890 v.reset(OpAMD64ROLWconst) 36891 v.AuxInt = c 36892 v.AddArg(x) 36893 return true 36894 } 36895 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 36896 // cond: d==16-c && c < 16 && t.Size() == 2 36897 // result: (ROLWconst x [c]) 36898 for { 36899 t := v.Type 36900 _ = v.Args[1] 36901 v_0 := v.Args[0] 36902 if v_0.Op != OpAMD64SHRWconst { 36903 break 36904 } 36905 d := v_0.AuxInt 36906 x := v_0.Args[0] 36907 v_1 := v.Args[1] 36908 if v_1.Op != OpAMD64SHLLconst { 36909 break 36910 } 36911 c := v_1.AuxInt 36912 if x != v_1.Args[0] { 36913 break 36914 } 36915 if !(d == 16-c && c < 16 && t.Size() == 2) { 36916 break 36917 } 36918 v.reset(OpAMD64ROLWconst) 36919 v.AuxInt = c 36920 v.AddArg(x) 36921 return true 36922 } 36923 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 36924 // cond: d==8-c && c < 8 && t.Size() == 1 36925 // result: (ROLBconst x [c]) 36926 for { 36927 t := v.Type 36928 _ = v.Args[1] 36929 v_0 := v.Args[0] 36930 if v_0.Op != OpAMD64SHLLconst { 36931 break 36932 } 36933 c := v_0.AuxInt 36934 x := v_0.Args[0] 36935 v_1 := v.Args[1] 36936 if v_1.Op != OpAMD64SHRBconst { 36937 break 36938 } 36939 d := v_1.AuxInt 36940 if x != v_1.Args[0] { 36941 break 36942 } 36943 if !(d == 8-c && c < 8 && t.Size() == 1) { 36944 break 36945 } 36946 v.reset(OpAMD64ROLBconst) 36947 v.AuxInt = c 36948 v.AddArg(x) 36949 return true 36950 } 36951 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 36952 // cond: d==8-c && c < 8 && t.Size() == 1 36953 // result: (ROLBconst x [c]) 36954 for { 36955 t := v.Type 36956 _ = v.Args[1] 36957 v_0 := v.Args[0] 36958 if v_0.Op != OpAMD64SHRBconst { 36959 break 36960 } 36961 d := v_0.AuxInt 36962 x := v_0.Args[0] 36963 v_1 := v.Args[1] 36964 if v_1.Op != OpAMD64SHLLconst { 36965 break 36966 } 36967 c := v_1.AuxInt 36968 if x != v_1.Args[0] { 36969 break 36970 } 36971 if !(d == 8-c && c < 8 && t.Size() == 1) { 36972 break 36973 } 36974 v.reset(OpAMD64ROLBconst) 36975 v.AuxInt = c 36976 v.AddArg(x) 36977 return true 36978 } 36979 // match: (XORL x x) 36980 // cond: 36981 // result: (MOVLconst [0]) 36982 for { 36983 _ = v.Args[1] 36984 x := v.Args[0] 36985 if x != v.Args[1] { 36986 break 36987 } 36988 v.reset(OpAMD64MOVLconst) 36989 v.AuxInt = 0 36990 return true 36991 } 36992 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 36993 // cond: canMergeLoad(v, l, x) && clobber(l) 36994 // result: (XORLmem x [off] {sym} ptr mem) 36995 for { 36996 _ = v.Args[1] 36997 x := v.Args[0] 36998 l := v.Args[1] 36999 if l.Op != OpAMD64MOVLload { 37000 break 37001 } 37002 off := l.AuxInt 37003 sym := l.Aux 37004 _ = l.Args[1] 37005 ptr := l.Args[0] 37006 mem := l.Args[1] 37007 if !(canMergeLoad(v, l, x) && clobber(l)) { 37008 break 37009 } 37010 v.reset(OpAMD64XORLmem) 37011 v.AuxInt = off 37012 v.Aux = sym 37013 v.AddArg(x) 37014 v.AddArg(ptr) 37015 v.AddArg(mem) 37016 return true 37017 } 37018 return false 37019 } 37020 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 37021 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 37022 // cond: canMergeLoad(v, l, x) && clobber(l) 37023 // result: (XORLmem x [off] {sym} ptr mem) 37024 for { 37025 _ = v.Args[1] 37026 l := v.Args[0] 37027 if l.Op != OpAMD64MOVLload { 37028 break 37029 } 37030 off := l.AuxInt 37031 sym := l.Aux 37032 _ = l.Args[1] 37033 ptr := l.Args[0] 37034 mem := l.Args[1] 37035 x := v.Args[1] 37036 if !(canMergeLoad(v, l, x) && clobber(l)) { 37037 break 37038 } 37039 v.reset(OpAMD64XORLmem) 37040 v.AuxInt = off 37041 v.Aux = sym 37042 v.AddArg(x) 37043 v.AddArg(ptr) 37044 v.AddArg(mem) 37045 return true 37046 } 37047 return false 37048 } 37049 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 37050 // match: (XORLconst [1] (SETNE x)) 37051 // cond: 37052 // result: (SETEQ x) 37053 for { 37054 if v.AuxInt != 1 { 37055 break 37056 } 37057 v_0 := v.Args[0] 37058 if v_0.Op != OpAMD64SETNE { 37059 break 37060 } 37061 x := v_0.Args[0] 37062 v.reset(OpAMD64SETEQ) 37063 v.AddArg(x) 37064 return true 37065 } 37066 // match: (XORLconst [1] (SETEQ x)) 37067 // cond: 37068 // result: (SETNE x) 37069 for { 37070 if v.AuxInt != 1 { 37071 break 37072 } 37073 v_0 := v.Args[0] 37074 if v_0.Op != OpAMD64SETEQ { 37075 break 37076 } 37077 x := v_0.Args[0] 37078 v.reset(OpAMD64SETNE) 37079 v.AddArg(x) 37080 return true 37081 } 37082 // match: (XORLconst [1] (SETL x)) 37083 // cond: 37084 // result: (SETGE x) 37085 for { 37086 if v.AuxInt != 1 { 37087 break 37088 } 37089 v_0 := v.Args[0] 37090 if v_0.Op != OpAMD64SETL { 37091 break 37092 } 37093 x := v_0.Args[0] 37094 v.reset(OpAMD64SETGE) 37095 v.AddArg(x) 37096 return true 37097 } 37098 // match: (XORLconst [1] (SETGE x)) 37099 // cond: 37100 // result: (SETL x) 37101 for { 37102 if v.AuxInt != 1 { 37103 break 37104 } 37105 v_0 := v.Args[0] 37106 if v_0.Op != OpAMD64SETGE { 37107 break 37108 } 37109 x := v_0.Args[0] 37110 v.reset(OpAMD64SETL) 37111 v.AddArg(x) 37112 return true 37113 } 37114 // match: (XORLconst [1] (SETLE x)) 37115 // cond: 37116 // result: (SETG x) 37117 for { 37118 if v.AuxInt != 1 { 37119 break 37120 } 37121 v_0 := v.Args[0] 37122 if v_0.Op != OpAMD64SETLE { 37123 break 37124 } 37125 x := v_0.Args[0] 37126 v.reset(OpAMD64SETG) 37127 v.AddArg(x) 37128 return true 37129 } 37130 // match: (XORLconst [1] (SETG x)) 37131 // cond: 37132 // result: (SETLE x) 37133 for { 37134 if v.AuxInt != 1 { 37135 break 37136 } 37137 v_0 := v.Args[0] 37138 if v_0.Op != OpAMD64SETG { 37139 break 37140 } 37141 x := v_0.Args[0] 37142 v.reset(OpAMD64SETLE) 37143 v.AddArg(x) 37144 return true 37145 } 37146 // match: (XORLconst [1] (SETB x)) 37147 // cond: 37148 // result: (SETAE x) 37149 for { 37150 if v.AuxInt != 1 { 37151 break 37152 } 37153 v_0 := v.Args[0] 37154 if v_0.Op != OpAMD64SETB { 37155 break 37156 } 37157 x := v_0.Args[0] 37158 v.reset(OpAMD64SETAE) 37159 v.AddArg(x) 37160 return true 37161 } 37162 // match: (XORLconst [1] (SETAE x)) 37163 // cond: 37164 // result: (SETB x) 37165 for { 37166 if v.AuxInt != 1 { 37167 break 37168 } 37169 v_0 := v.Args[0] 37170 if v_0.Op != OpAMD64SETAE { 37171 break 37172 } 37173 x := v_0.Args[0] 37174 v.reset(OpAMD64SETB) 37175 v.AddArg(x) 37176 return true 37177 } 37178 // match: (XORLconst [1] (SETBE x)) 37179 // cond: 37180 // result: (SETA x) 37181 for { 37182 if v.AuxInt != 1 { 37183 break 37184 } 37185 v_0 := v.Args[0] 37186 if v_0.Op != OpAMD64SETBE { 37187 break 37188 } 37189 x := v_0.Args[0] 37190 v.reset(OpAMD64SETA) 37191 v.AddArg(x) 37192 return true 37193 } 37194 // match: (XORLconst [1] (SETA x)) 37195 // cond: 37196 // result: (SETBE x) 37197 for { 37198 if v.AuxInt != 1 { 37199 break 37200 } 37201 v_0 := v.Args[0] 37202 if v_0.Op != OpAMD64SETA { 37203 break 37204 } 37205 x := v_0.Args[0] 37206 v.reset(OpAMD64SETBE) 37207 v.AddArg(x) 37208 return true 37209 } 37210 return false 37211 } 37212 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { 37213 // match: (XORLconst [c] (XORLconst [d] x)) 37214 // cond: 37215 // result: (XORLconst [c ^ d] x) 37216 for { 37217 c := v.AuxInt 37218 v_0 := v.Args[0] 37219 if v_0.Op != OpAMD64XORLconst { 37220 break 37221 } 37222 d := v_0.AuxInt 37223 x := v_0.Args[0] 37224 v.reset(OpAMD64XORLconst) 37225 v.AuxInt = c ^ d 37226 v.AddArg(x) 37227 return true 37228 } 37229 // match: (XORLconst [c] x) 37230 // cond: int32(c)==0 37231 // result: x 37232 for { 37233 c := v.AuxInt 37234 x := v.Args[0] 37235 if !(int32(c) == 0) { 37236 break 37237 } 37238 v.reset(OpCopy) 37239 v.Type = x.Type 37240 v.AddArg(x) 37241 return true 37242 } 37243 // match: (XORLconst [c] (MOVLconst [d])) 37244 // cond: 37245 // result: (MOVLconst [c^d]) 37246 for { 37247 c := v.AuxInt 37248 v_0 := v.Args[0] 37249 if v_0.Op != OpAMD64MOVLconst { 37250 break 37251 } 37252 d := v_0.AuxInt 37253 v.reset(OpAMD64MOVLconst) 37254 v.AuxInt = c ^ d 37255 return true 37256 } 37257 return false 37258 } 37259 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 37260 // match: (XORQ x (MOVQconst [c])) 37261 // cond: is32Bit(c) 37262 // result: (XORQconst [c] x) 37263 for { 37264 _ = v.Args[1] 37265 x := v.Args[0] 37266 v_1 := v.Args[1] 37267 if v_1.Op != OpAMD64MOVQconst { 37268 break 37269 } 37270 c := v_1.AuxInt 37271 if !(is32Bit(c)) { 37272 break 37273 } 37274 v.reset(OpAMD64XORQconst) 37275 v.AuxInt = c 37276 v.AddArg(x) 37277 return true 37278 } 37279 // match: (XORQ (MOVQconst [c]) x) 37280 // cond: is32Bit(c) 37281 // result: (XORQconst [c] x) 37282 for { 37283 _ = v.Args[1] 37284 v_0 := v.Args[0] 37285 if v_0.Op != OpAMD64MOVQconst { 37286 break 37287 } 37288 c := v_0.AuxInt 37289 x := v.Args[1] 37290 if !(is32Bit(c)) { 37291 break 37292 } 37293 v.reset(OpAMD64XORQconst) 37294 v.AuxInt = c 37295 v.AddArg(x) 37296 return true 37297 } 37298 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 37299 // cond: d==64-c 37300 // result: (ROLQconst x [c]) 37301 for { 37302 _ = v.Args[1] 37303 v_0 := v.Args[0] 37304 if v_0.Op != OpAMD64SHLQconst { 37305 break 37306 } 37307 c := v_0.AuxInt 37308 x := v_0.Args[0] 37309 v_1 := v.Args[1] 37310 if v_1.Op != OpAMD64SHRQconst { 37311 break 37312 } 37313 d := v_1.AuxInt 37314 if x != v_1.Args[0] { 37315 break 37316 } 37317 if !(d == 64-c) { 37318 break 37319 } 37320 v.reset(OpAMD64ROLQconst) 37321 v.AuxInt = c 37322 v.AddArg(x) 37323 return true 37324 } 37325 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 37326 // cond: d==64-c 37327 // result: (ROLQconst x [c]) 37328 for { 37329 _ = v.Args[1] 37330 v_0 := v.Args[0] 37331 if v_0.Op != OpAMD64SHRQconst { 37332 break 37333 } 37334 d := v_0.AuxInt 37335 x := v_0.Args[0] 37336 v_1 := v.Args[1] 37337 if v_1.Op != OpAMD64SHLQconst { 37338 break 37339 } 37340 c := v_1.AuxInt 37341 if x != v_1.Args[0] { 37342 break 37343 } 37344 if !(d == 64-c) { 37345 break 37346 } 37347 v.reset(OpAMD64ROLQconst) 37348 v.AuxInt = c 37349 v.AddArg(x) 37350 return true 37351 } 37352 // match: (XORQ x x) 37353 // cond: 37354 // result: (MOVQconst [0]) 37355 for { 37356 _ = v.Args[1] 37357 x := v.Args[0] 37358 if x != v.Args[1] { 37359 break 37360 } 37361 v.reset(OpAMD64MOVQconst) 37362 v.AuxInt = 0 37363 return true 37364 } 37365 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 37366 // cond: canMergeLoad(v, l, x) && clobber(l) 37367 // result: (XORQmem x [off] {sym} ptr mem) 37368 for { 37369 _ = v.Args[1] 37370 x := v.Args[0] 37371 l := v.Args[1] 37372 if l.Op != OpAMD64MOVQload { 37373 break 37374 } 37375 off := l.AuxInt 37376 sym := l.Aux 37377 _ = l.Args[1] 37378 ptr := l.Args[0] 37379 mem := l.Args[1] 37380 if !(canMergeLoad(v, l, x) && clobber(l)) { 37381 break 37382 } 37383 v.reset(OpAMD64XORQmem) 37384 v.AuxInt = off 37385 v.Aux = sym 37386 v.AddArg(x) 37387 v.AddArg(ptr) 37388 v.AddArg(mem) 37389 return true 37390 } 37391 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 37392 // cond: canMergeLoad(v, l, x) && clobber(l) 37393 // result: (XORQmem x [off] {sym} ptr mem) 37394 for { 37395 _ = v.Args[1] 37396 l := v.Args[0] 37397 if l.Op != OpAMD64MOVQload { 37398 break 37399 } 37400 off := l.AuxInt 37401 sym := l.Aux 37402 _ = l.Args[1] 37403 ptr := l.Args[0] 37404 mem := l.Args[1] 37405 x := v.Args[1] 37406 if !(canMergeLoad(v, l, x) && clobber(l)) { 37407 break 37408 } 37409 v.reset(OpAMD64XORQmem) 37410 v.AuxInt = off 37411 v.Aux = sym 37412 v.AddArg(x) 37413 v.AddArg(ptr) 37414 v.AddArg(mem) 37415 return true 37416 } 37417 return false 37418 } 37419 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 37420 // match: (XORQconst [c] (XORQconst [d] x)) 37421 // cond: 37422 // result: (XORQconst [c ^ d] x) 37423 for { 37424 c := v.AuxInt 37425 v_0 := v.Args[0] 37426 if v_0.Op != OpAMD64XORQconst { 37427 break 37428 } 37429 d := v_0.AuxInt 37430 x := v_0.Args[0] 37431 v.reset(OpAMD64XORQconst) 37432 v.AuxInt = c ^ d 37433 v.AddArg(x) 37434 return true 37435 } 37436 // match: (XORQconst [0] x) 37437 // cond: 37438 // result: x 37439 for { 37440 if v.AuxInt != 0 { 37441 break 37442 } 37443 x := v.Args[0] 37444 v.reset(OpCopy) 37445 v.Type = x.Type 37446 v.AddArg(x) 37447 return true 37448 } 37449 // match: (XORQconst [c] (MOVQconst [d])) 37450 // cond: 37451 // result: (MOVQconst [c^d]) 37452 for { 37453 c := v.AuxInt 37454 v_0 := v.Args[0] 37455 if v_0.Op != OpAMD64MOVQconst { 37456 break 37457 } 37458 d := v_0.AuxInt 37459 v.reset(OpAMD64MOVQconst) 37460 v.AuxInt = c ^ d 37461 return true 37462 } 37463 return false 37464 } 37465 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 37466 // match: (Add16 x y) 37467 // cond: 37468 // result: (ADDL x y) 37469 for { 37470 _ = v.Args[1] 37471 x := v.Args[0] 37472 y := v.Args[1] 37473 v.reset(OpAMD64ADDL) 37474 v.AddArg(x) 37475 v.AddArg(y) 37476 return true 37477 } 37478 } 37479 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 37480 // match: (Add32 x y) 37481 // cond: 37482 // result: (ADDL x y) 37483 for { 37484 _ = v.Args[1] 37485 x := v.Args[0] 37486 y := v.Args[1] 37487 v.reset(OpAMD64ADDL) 37488 v.AddArg(x) 37489 v.AddArg(y) 37490 return true 37491 } 37492 } 37493 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 37494 // match: (Add32F x y) 37495 // cond: 37496 // result: (ADDSS x y) 37497 for { 37498 _ = v.Args[1] 37499 x := v.Args[0] 37500 y := v.Args[1] 37501 v.reset(OpAMD64ADDSS) 37502 v.AddArg(x) 37503 v.AddArg(y) 37504 return true 37505 } 37506 } 37507 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 37508 // match: (Add64 x y) 37509 // cond: 37510 // result: (ADDQ x y) 37511 for { 37512 _ = v.Args[1] 37513 x := v.Args[0] 37514 y := v.Args[1] 37515 v.reset(OpAMD64ADDQ) 37516 v.AddArg(x) 37517 v.AddArg(y) 37518 return true 37519 } 37520 } 37521 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 37522 // match: (Add64F x y) 37523 // cond: 37524 // result: (ADDSD x y) 37525 for { 37526 _ = v.Args[1] 37527 x := v.Args[0] 37528 y := v.Args[1] 37529 v.reset(OpAMD64ADDSD) 37530 v.AddArg(x) 37531 v.AddArg(y) 37532 return true 37533 } 37534 } 37535 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 37536 // match: (Add8 x y) 37537 // cond: 37538 // result: (ADDL x y) 37539 for { 37540 _ = v.Args[1] 37541 x := v.Args[0] 37542 y := v.Args[1] 37543 v.reset(OpAMD64ADDL) 37544 v.AddArg(x) 37545 v.AddArg(y) 37546 return true 37547 } 37548 } 37549 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 37550 b := v.Block 37551 _ = b 37552 config := b.Func.Config 37553 _ = config 37554 // match: (AddPtr x y) 37555 // cond: config.PtrSize == 8 37556 // result: (ADDQ x y) 37557 for { 37558 _ = v.Args[1] 37559 x := v.Args[0] 37560 y := v.Args[1] 37561 if !(config.PtrSize == 8) { 37562 break 37563 } 37564 v.reset(OpAMD64ADDQ) 37565 v.AddArg(x) 37566 v.AddArg(y) 37567 return true 37568 } 37569 // match: (AddPtr x y) 37570 // cond: config.PtrSize == 4 37571 // result: (ADDL x y) 37572 for { 37573 _ = v.Args[1] 37574 x := v.Args[0] 37575 y := v.Args[1] 37576 if !(config.PtrSize == 4) { 37577 break 37578 } 37579 v.reset(OpAMD64ADDL) 37580 v.AddArg(x) 37581 v.AddArg(y) 37582 return true 37583 } 37584 return false 37585 } 37586 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 37587 b := v.Block 37588 _ = b 37589 config := b.Func.Config 37590 _ = config 37591 // match: (Addr {sym} base) 37592 // cond: config.PtrSize == 8 37593 // result: (LEAQ {sym} base) 37594 for { 37595 sym := v.Aux 37596 base := v.Args[0] 37597 if !(config.PtrSize == 8) { 37598 break 37599 } 37600 v.reset(OpAMD64LEAQ) 37601 v.Aux = sym 37602 v.AddArg(base) 37603 return true 37604 } 37605 // match: (Addr {sym} base) 37606 // cond: config.PtrSize == 4 37607 // result: (LEAL {sym} base) 37608 for { 37609 sym := v.Aux 37610 base := v.Args[0] 37611 if !(config.PtrSize == 4) { 37612 break 37613 } 37614 v.reset(OpAMD64LEAL) 37615 v.Aux = sym 37616 v.AddArg(base) 37617 return true 37618 } 37619 return false 37620 } 37621 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 37622 // match: (And16 x y) 37623 // cond: 37624 // result: (ANDL x y) 37625 for { 37626 _ = v.Args[1] 37627 x := v.Args[0] 37628 y := v.Args[1] 37629 v.reset(OpAMD64ANDL) 37630 v.AddArg(x) 37631 v.AddArg(y) 37632 return true 37633 } 37634 } 37635 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 37636 // match: (And32 x y) 37637 // cond: 37638 // result: (ANDL x y) 37639 for { 37640 _ = v.Args[1] 37641 x := v.Args[0] 37642 y := v.Args[1] 37643 v.reset(OpAMD64ANDL) 37644 v.AddArg(x) 37645 v.AddArg(y) 37646 return true 37647 } 37648 } 37649 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 37650 // match: (And64 x y) 37651 // cond: 37652 // result: (ANDQ x y) 37653 for { 37654 _ = v.Args[1] 37655 x := v.Args[0] 37656 y := v.Args[1] 37657 v.reset(OpAMD64ANDQ) 37658 v.AddArg(x) 37659 v.AddArg(y) 37660 return true 37661 } 37662 } 37663 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 37664 // match: (And8 x y) 37665 // cond: 37666 // result: (ANDL x y) 37667 for { 37668 _ = v.Args[1] 37669 x := v.Args[0] 37670 y := v.Args[1] 37671 v.reset(OpAMD64ANDL) 37672 v.AddArg(x) 37673 v.AddArg(y) 37674 return true 37675 } 37676 } 37677 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 37678 // match: (AndB x y) 37679 // cond: 37680 // result: (ANDL x y) 37681 for { 37682 _ = v.Args[1] 37683 x := v.Args[0] 37684 y := v.Args[1] 37685 v.reset(OpAMD64ANDL) 37686 v.AddArg(x) 37687 v.AddArg(y) 37688 return true 37689 } 37690 } 37691 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 37692 b := v.Block 37693 _ = b 37694 typ := &b.Func.Config.Types 37695 _ = typ 37696 // match: (AtomicAdd32 ptr val mem) 37697 // cond: 37698 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 37699 for { 37700 _ = v.Args[2] 37701 ptr := v.Args[0] 37702 val := v.Args[1] 37703 mem := v.Args[2] 37704 v.reset(OpAMD64AddTupleFirst32) 37705 v.AddArg(val) 37706 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 37707 v0.AddArg(val) 37708 v0.AddArg(ptr) 37709 v0.AddArg(mem) 37710 v.AddArg(v0) 37711 return true 37712 } 37713 } 37714 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 37715 b := v.Block 37716 _ = b 37717 typ := &b.Func.Config.Types 37718 _ = typ 37719 // match: (AtomicAdd64 ptr val mem) 37720 // cond: 37721 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 37722 for { 37723 _ = v.Args[2] 37724 ptr := v.Args[0] 37725 val := v.Args[1] 37726 mem := v.Args[2] 37727 v.reset(OpAMD64AddTupleFirst64) 37728 v.AddArg(val) 37729 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 37730 v0.AddArg(val) 37731 v0.AddArg(ptr) 37732 v0.AddArg(mem) 37733 v.AddArg(v0) 37734 return true 37735 } 37736 } 37737 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 37738 // match: (AtomicAnd8 ptr val mem) 37739 // cond: 37740 // result: (ANDBlock ptr val mem) 37741 for { 37742 _ = v.Args[2] 37743 ptr := v.Args[0] 37744 val := v.Args[1] 37745 mem := v.Args[2] 37746 v.reset(OpAMD64ANDBlock) 37747 v.AddArg(ptr) 37748 v.AddArg(val) 37749 v.AddArg(mem) 37750 return true 37751 } 37752 } 37753 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 37754 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 37755 // cond: 37756 // result: (CMPXCHGLlock ptr old new_ mem) 37757 for { 37758 _ = v.Args[3] 37759 ptr := v.Args[0] 37760 old := v.Args[1] 37761 new_ := v.Args[2] 37762 mem := v.Args[3] 37763 v.reset(OpAMD64CMPXCHGLlock) 37764 v.AddArg(ptr) 37765 v.AddArg(old) 37766 v.AddArg(new_) 37767 v.AddArg(mem) 37768 return true 37769 } 37770 } 37771 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 37772 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 37773 // cond: 37774 // result: (CMPXCHGQlock ptr old new_ mem) 37775 for { 37776 _ = v.Args[3] 37777 ptr := v.Args[0] 37778 old := v.Args[1] 37779 new_ := v.Args[2] 37780 mem := v.Args[3] 37781 v.reset(OpAMD64CMPXCHGQlock) 37782 v.AddArg(ptr) 37783 v.AddArg(old) 37784 v.AddArg(new_) 37785 v.AddArg(mem) 37786 return true 37787 } 37788 } 37789 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 37790 // match: (AtomicExchange32 ptr val mem) 37791 // cond: 37792 // result: (XCHGL val ptr mem) 37793 for { 37794 _ = v.Args[2] 37795 ptr := v.Args[0] 37796 val := v.Args[1] 37797 mem := v.Args[2] 37798 v.reset(OpAMD64XCHGL) 37799 v.AddArg(val) 37800 v.AddArg(ptr) 37801 v.AddArg(mem) 37802 return true 37803 } 37804 } 37805 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 37806 // match: (AtomicExchange64 ptr val mem) 37807 // cond: 37808 // result: (XCHGQ val ptr mem) 37809 for { 37810 _ = v.Args[2] 37811 ptr := v.Args[0] 37812 val := v.Args[1] 37813 mem := v.Args[2] 37814 v.reset(OpAMD64XCHGQ) 37815 v.AddArg(val) 37816 v.AddArg(ptr) 37817 v.AddArg(mem) 37818 return true 37819 } 37820 } 37821 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 37822 // match: (AtomicLoad32 ptr mem) 37823 // cond: 37824 // result: (MOVLatomicload ptr mem) 37825 for { 37826 _ = v.Args[1] 37827 ptr := v.Args[0] 37828 mem := v.Args[1] 37829 v.reset(OpAMD64MOVLatomicload) 37830 v.AddArg(ptr) 37831 v.AddArg(mem) 37832 return true 37833 } 37834 } 37835 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 37836 // match: (AtomicLoad64 ptr mem) 37837 // cond: 37838 // result: (MOVQatomicload ptr mem) 37839 for { 37840 _ = v.Args[1] 37841 ptr := v.Args[0] 37842 mem := v.Args[1] 37843 v.reset(OpAMD64MOVQatomicload) 37844 v.AddArg(ptr) 37845 v.AddArg(mem) 37846 return true 37847 } 37848 } 37849 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 37850 b := v.Block 37851 _ = b 37852 config := b.Func.Config 37853 _ = config 37854 // match: (AtomicLoadPtr ptr mem) 37855 // cond: config.PtrSize == 8 37856 // result: (MOVQatomicload ptr mem) 37857 for { 37858 _ = v.Args[1] 37859 ptr := v.Args[0] 37860 mem := v.Args[1] 37861 if !(config.PtrSize == 8) { 37862 break 37863 } 37864 v.reset(OpAMD64MOVQatomicload) 37865 v.AddArg(ptr) 37866 v.AddArg(mem) 37867 return true 37868 } 37869 // match: (AtomicLoadPtr ptr mem) 37870 // cond: config.PtrSize == 4 37871 // result: (MOVLatomicload ptr mem) 37872 for { 37873 _ = v.Args[1] 37874 ptr := v.Args[0] 37875 mem := v.Args[1] 37876 if !(config.PtrSize == 4) { 37877 break 37878 } 37879 v.reset(OpAMD64MOVLatomicload) 37880 v.AddArg(ptr) 37881 v.AddArg(mem) 37882 return true 37883 } 37884 return false 37885 } 37886 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 37887 // match: (AtomicOr8 ptr val mem) 37888 // cond: 37889 // result: (ORBlock ptr val mem) 37890 for { 37891 _ = v.Args[2] 37892 ptr := v.Args[0] 37893 val := v.Args[1] 37894 mem := v.Args[2] 37895 v.reset(OpAMD64ORBlock) 37896 v.AddArg(ptr) 37897 v.AddArg(val) 37898 v.AddArg(mem) 37899 return true 37900 } 37901 } 37902 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 37903 b := v.Block 37904 _ = b 37905 typ := &b.Func.Config.Types 37906 _ = typ 37907 // match: (AtomicStore32 ptr val mem) 37908 // cond: 37909 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 37910 for { 37911 _ = v.Args[2] 37912 ptr := v.Args[0] 37913 val := v.Args[1] 37914 mem := v.Args[2] 37915 v.reset(OpSelect1) 37916 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 37917 v0.AddArg(val) 37918 v0.AddArg(ptr) 37919 v0.AddArg(mem) 37920 v.AddArg(v0) 37921 return true 37922 } 37923 } 37924 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 37925 b := v.Block 37926 _ = b 37927 typ := &b.Func.Config.Types 37928 _ = typ 37929 // match: (AtomicStore64 ptr val mem) 37930 // cond: 37931 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 37932 for { 37933 _ = v.Args[2] 37934 ptr := v.Args[0] 37935 val := v.Args[1] 37936 mem := v.Args[2] 37937 v.reset(OpSelect1) 37938 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 37939 v0.AddArg(val) 37940 v0.AddArg(ptr) 37941 v0.AddArg(mem) 37942 v.AddArg(v0) 37943 return true 37944 } 37945 } 37946 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 37947 b := v.Block 37948 _ = b 37949 config := b.Func.Config 37950 _ = config 37951 typ := &b.Func.Config.Types 37952 _ = typ 37953 // match: (AtomicStorePtrNoWB ptr val mem) 37954 // cond: config.PtrSize == 8 37955 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 37956 for { 37957 _ = v.Args[2] 37958 ptr := v.Args[0] 37959 val := v.Args[1] 37960 mem := v.Args[2] 37961 if !(config.PtrSize == 8) { 37962 break 37963 } 37964 v.reset(OpSelect1) 37965 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 37966 v0.AddArg(val) 37967 v0.AddArg(ptr) 37968 v0.AddArg(mem) 37969 v.AddArg(v0) 37970 return true 37971 } 37972 // match: (AtomicStorePtrNoWB ptr val mem) 37973 // cond: config.PtrSize == 4 37974 // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 37975 for { 37976 _ = v.Args[2] 37977 ptr := v.Args[0] 37978 val := v.Args[1] 37979 mem := v.Args[2] 37980 if !(config.PtrSize == 4) { 37981 break 37982 } 37983 v.reset(OpSelect1) 37984 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem)) 37985 v0.AddArg(val) 37986 v0.AddArg(ptr) 37987 v0.AddArg(mem) 37988 v.AddArg(v0) 37989 return true 37990 } 37991 return false 37992 } 37993 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 37994 // match: (Avg64u x y) 37995 // cond: 37996 // result: (AVGQU x y) 37997 for { 37998 _ = v.Args[1] 37999 x := v.Args[0] 38000 y := v.Args[1] 38001 v.reset(OpAMD64AVGQU) 38002 v.AddArg(x) 38003 v.AddArg(y) 38004 return true 38005 } 38006 } 38007 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 38008 b := v.Block 38009 _ = b 38010 typ := &b.Func.Config.Types 38011 _ = typ 38012 // match: (BitLen32 x) 38013 // cond: 38014 // result: (BitLen64 (MOVLQZX <typ.UInt64> x)) 38015 for { 38016 x := v.Args[0] 38017 v.reset(OpBitLen64) 38018 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 38019 v0.AddArg(x) 38020 v.AddArg(v0) 38021 return true 38022 } 38023 } 38024 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 38025 b := v.Block 38026 _ = b 38027 typ := &b.Func.Config.Types 38028 _ = typ 38029 // match: (BitLen64 <t> x) 38030 // cond: 38031 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 38032 for { 38033 t := v.Type 38034 x := v.Args[0] 38035 v.reset(OpAMD64ADDQconst) 38036 v.AuxInt = 1 38037 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 38038 v1 := b.NewValue0(v.Pos, OpSelect0, t) 38039 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38040 v2.AddArg(x) 38041 v1.AddArg(v2) 38042 v0.AddArg(v1) 38043 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 38044 v3.AuxInt = -1 38045 v0.AddArg(v3) 38046 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 38047 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38048 v5.AddArg(x) 38049 v4.AddArg(v5) 38050 v0.AddArg(v4) 38051 v.AddArg(v0) 38052 return true 38053 } 38054 } 38055 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 38056 // match: (Bswap32 x) 38057 // cond: 38058 // result: (BSWAPL x) 38059 for { 38060 x := v.Args[0] 38061 v.reset(OpAMD64BSWAPL) 38062 v.AddArg(x) 38063 return true 38064 } 38065 } 38066 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 38067 // match: (Bswap64 x) 38068 // cond: 38069 // result: (BSWAPQ x) 38070 for { 38071 x := v.Args[0] 38072 v.reset(OpAMD64BSWAPQ) 38073 v.AddArg(x) 38074 return true 38075 } 38076 } 38077 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 38078 // match: (ClosureCall [argwid] entry closure mem) 38079 // cond: 38080 // result: (CALLclosure [argwid] entry closure mem) 38081 for { 38082 argwid := v.AuxInt 38083 _ = v.Args[2] 38084 entry := v.Args[0] 38085 closure := v.Args[1] 38086 mem := v.Args[2] 38087 v.reset(OpAMD64CALLclosure) 38088 v.AuxInt = argwid 38089 v.AddArg(entry) 38090 v.AddArg(closure) 38091 v.AddArg(mem) 38092 return true 38093 } 38094 } 38095 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 38096 // match: (Com16 x) 38097 // cond: 38098 // result: (NOTL x) 38099 for { 38100 x := v.Args[0] 38101 v.reset(OpAMD64NOTL) 38102 v.AddArg(x) 38103 return true 38104 } 38105 } 38106 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 38107 // match: (Com32 x) 38108 // cond: 38109 // result: (NOTL x) 38110 for { 38111 x := v.Args[0] 38112 v.reset(OpAMD64NOTL) 38113 v.AddArg(x) 38114 return true 38115 } 38116 } 38117 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 38118 // match: (Com64 x) 38119 // cond: 38120 // result: (NOTQ x) 38121 for { 38122 x := v.Args[0] 38123 v.reset(OpAMD64NOTQ) 38124 v.AddArg(x) 38125 return true 38126 } 38127 } 38128 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 38129 // match: (Com8 x) 38130 // cond: 38131 // result: (NOTL x) 38132 for { 38133 x := v.Args[0] 38134 v.reset(OpAMD64NOTL) 38135 v.AddArg(x) 38136 return true 38137 } 38138 } 38139 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 38140 // match: (Const16 [val]) 38141 // cond: 38142 // result: (MOVLconst [val]) 38143 for { 38144 val := v.AuxInt 38145 v.reset(OpAMD64MOVLconst) 38146 v.AuxInt = val 38147 return true 38148 } 38149 } 38150 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 38151 // match: (Const32 [val]) 38152 // cond: 38153 // result: (MOVLconst [val]) 38154 for { 38155 val := v.AuxInt 38156 v.reset(OpAMD64MOVLconst) 38157 v.AuxInt = val 38158 return true 38159 } 38160 } 38161 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 38162 // match: (Const32F [val]) 38163 // cond: 38164 // result: (MOVSSconst [val]) 38165 for { 38166 val := v.AuxInt 38167 v.reset(OpAMD64MOVSSconst) 38168 v.AuxInt = val 38169 return true 38170 } 38171 } 38172 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 38173 // match: (Const64 [val]) 38174 // cond: 38175 // result: (MOVQconst [val]) 38176 for { 38177 val := v.AuxInt 38178 v.reset(OpAMD64MOVQconst) 38179 v.AuxInt = val 38180 return true 38181 } 38182 } 38183 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 38184 // match: (Const64F [val]) 38185 // cond: 38186 // result: (MOVSDconst [val]) 38187 for { 38188 val := v.AuxInt 38189 v.reset(OpAMD64MOVSDconst) 38190 v.AuxInt = val 38191 return true 38192 } 38193 } 38194 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 38195 // match: (Const8 [val]) 38196 // cond: 38197 // result: (MOVLconst [val]) 38198 for { 38199 val := v.AuxInt 38200 v.reset(OpAMD64MOVLconst) 38201 v.AuxInt = val 38202 return true 38203 } 38204 } 38205 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 38206 // match: (ConstBool [b]) 38207 // cond: 38208 // result: (MOVLconst [b]) 38209 for { 38210 b := v.AuxInt 38211 v.reset(OpAMD64MOVLconst) 38212 v.AuxInt = b 38213 return true 38214 } 38215 } 38216 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 38217 b := v.Block 38218 _ = b 38219 config := b.Func.Config 38220 _ = config 38221 // match: (ConstNil) 38222 // cond: config.PtrSize == 8 38223 // result: (MOVQconst [0]) 38224 for { 38225 if !(config.PtrSize == 8) { 38226 break 38227 } 38228 v.reset(OpAMD64MOVQconst) 38229 v.AuxInt = 0 38230 return true 38231 } 38232 // match: (ConstNil) 38233 // cond: config.PtrSize == 4 38234 // result: (MOVLconst [0]) 38235 for { 38236 if !(config.PtrSize == 4) { 38237 break 38238 } 38239 v.reset(OpAMD64MOVLconst) 38240 v.AuxInt = 0 38241 return true 38242 } 38243 return false 38244 } 38245 func rewriteValueAMD64_OpConvert_0(v *Value) bool { 38246 b := v.Block 38247 _ = b 38248 config := b.Func.Config 38249 _ = config 38250 // match: (Convert <t> x mem) 38251 // cond: config.PtrSize == 8 38252 // result: (MOVQconvert <t> x mem) 38253 for { 38254 t := v.Type 38255 _ = v.Args[1] 38256 x := v.Args[0] 38257 mem := v.Args[1] 38258 if !(config.PtrSize == 8) { 38259 break 38260 } 38261 v.reset(OpAMD64MOVQconvert) 38262 v.Type = t 38263 v.AddArg(x) 38264 v.AddArg(mem) 38265 return true 38266 } 38267 // match: (Convert <t> x mem) 38268 // cond: config.PtrSize == 4 38269 // result: (MOVLconvert <t> x mem) 38270 for { 38271 t := v.Type 38272 _ = v.Args[1] 38273 x := v.Args[0] 38274 mem := v.Args[1] 38275 if !(config.PtrSize == 4) { 38276 break 38277 } 38278 v.reset(OpAMD64MOVLconvert) 38279 v.Type = t 38280 v.AddArg(x) 38281 v.AddArg(mem) 38282 return true 38283 } 38284 return false 38285 } 38286 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 38287 b := v.Block 38288 _ = b 38289 typ := &b.Func.Config.Types 38290 _ = typ 38291 // match: (Ctz32 x) 38292 // cond: 38293 // result: (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x))) 38294 for { 38295 x := v.Args[0] 38296 v.reset(OpSelect0) 38297 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38298 v1 := b.NewValue0(v.Pos, OpAMD64ORQ, typ.UInt64) 38299 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 38300 v2.AuxInt = 1 << 32 38301 v1.AddArg(v2) 38302 v1.AddArg(x) 38303 v0.AddArg(v1) 38304 v.AddArg(v0) 38305 return true 38306 } 38307 } 38308 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 38309 b := v.Block 38310 _ = b 38311 typ := &b.Func.Config.Types 38312 _ = typ 38313 // match: (Ctz64 <t> x) 38314 // cond: 38315 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 38316 for { 38317 t := v.Type 38318 x := v.Args[0] 38319 v.reset(OpAMD64CMOVQEQ) 38320 v0 := b.NewValue0(v.Pos, OpSelect0, t) 38321 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38322 v1.AddArg(x) 38323 v0.AddArg(v1) 38324 v.AddArg(v0) 38325 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 38326 v2.AuxInt = 64 38327 v.AddArg(v2) 38328 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 38329 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38330 v4.AddArg(x) 38331 v3.AddArg(v4) 38332 v.AddArg(v3) 38333 return true 38334 } 38335 } 38336 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 38337 // match: (Cvt32Fto32 x) 38338 // cond: 38339 // result: (CVTTSS2SL x) 38340 for { 38341 x := v.Args[0] 38342 v.reset(OpAMD64CVTTSS2SL) 38343 v.AddArg(x) 38344 return true 38345 } 38346 } 38347 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 38348 // match: (Cvt32Fto64 x) 38349 // cond: 38350 // result: (CVTTSS2SQ x) 38351 for { 38352 x := v.Args[0] 38353 v.reset(OpAMD64CVTTSS2SQ) 38354 v.AddArg(x) 38355 return true 38356 } 38357 } 38358 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 38359 // match: (Cvt32Fto64F x) 38360 // cond: 38361 // result: (CVTSS2SD x) 38362 for { 38363 x := v.Args[0] 38364 v.reset(OpAMD64CVTSS2SD) 38365 v.AddArg(x) 38366 return true 38367 } 38368 } 38369 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 38370 // match: (Cvt32to32F x) 38371 // cond: 38372 // result: (CVTSL2SS x) 38373 for { 38374 x := v.Args[0] 38375 v.reset(OpAMD64CVTSL2SS) 38376 v.AddArg(x) 38377 return true 38378 } 38379 } 38380 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 38381 // match: (Cvt32to64F x) 38382 // cond: 38383 // result: (CVTSL2SD x) 38384 for { 38385 x := v.Args[0] 38386 v.reset(OpAMD64CVTSL2SD) 38387 v.AddArg(x) 38388 return true 38389 } 38390 } 38391 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 38392 // match: (Cvt64Fto32 x) 38393 // cond: 38394 // result: (CVTTSD2SL x) 38395 for { 38396 x := v.Args[0] 38397 v.reset(OpAMD64CVTTSD2SL) 38398 v.AddArg(x) 38399 return true 38400 } 38401 } 38402 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 38403 // match: (Cvt64Fto32F x) 38404 // cond: 38405 // result: (CVTSD2SS x) 38406 for { 38407 x := v.Args[0] 38408 v.reset(OpAMD64CVTSD2SS) 38409 v.AddArg(x) 38410 return true 38411 } 38412 } 38413 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 38414 // match: (Cvt64Fto64 x) 38415 // cond: 38416 // result: (CVTTSD2SQ x) 38417 for { 38418 x := v.Args[0] 38419 v.reset(OpAMD64CVTTSD2SQ) 38420 v.AddArg(x) 38421 return true 38422 } 38423 } 38424 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 38425 // match: (Cvt64to32F x) 38426 // cond: 38427 // result: (CVTSQ2SS x) 38428 for { 38429 x := v.Args[0] 38430 v.reset(OpAMD64CVTSQ2SS) 38431 v.AddArg(x) 38432 return true 38433 } 38434 } 38435 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 38436 // match: (Cvt64to64F x) 38437 // cond: 38438 // result: (CVTSQ2SD x) 38439 for { 38440 x := v.Args[0] 38441 v.reset(OpAMD64CVTSQ2SD) 38442 v.AddArg(x) 38443 return true 38444 } 38445 } 38446 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 38447 // match: (Div128u xhi xlo y) 38448 // cond: 38449 // result: (DIVQU2 xhi xlo y) 38450 for { 38451 _ = v.Args[2] 38452 xhi := v.Args[0] 38453 xlo := v.Args[1] 38454 y := v.Args[2] 38455 v.reset(OpAMD64DIVQU2) 38456 v.AddArg(xhi) 38457 v.AddArg(xlo) 38458 v.AddArg(y) 38459 return true 38460 } 38461 } 38462 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 38463 b := v.Block 38464 _ = b 38465 typ := &b.Func.Config.Types 38466 _ = typ 38467 // match: (Div16 x y) 38468 // cond: 38469 // result: (Select0 (DIVW x y)) 38470 for { 38471 _ = v.Args[1] 38472 x := v.Args[0] 38473 y := v.Args[1] 38474 v.reset(OpSelect0) 38475 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 38476 v0.AddArg(x) 38477 v0.AddArg(y) 38478 v.AddArg(v0) 38479 return true 38480 } 38481 } 38482 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 38483 b := v.Block 38484 _ = b 38485 typ := &b.Func.Config.Types 38486 _ = typ 38487 // match: (Div16u x y) 38488 // cond: 38489 // result: (Select0 (DIVWU x y)) 38490 for { 38491 _ = v.Args[1] 38492 x := v.Args[0] 38493 y := v.Args[1] 38494 v.reset(OpSelect0) 38495 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 38496 v0.AddArg(x) 38497 v0.AddArg(y) 38498 v.AddArg(v0) 38499 return true 38500 } 38501 } 38502 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 38503 b := v.Block 38504 _ = b 38505 typ := &b.Func.Config.Types 38506 _ = typ 38507 // match: (Div32 x y) 38508 // cond: 38509 // result: (Select0 (DIVL x y)) 38510 for { 38511 _ = v.Args[1] 38512 x := v.Args[0] 38513 y := v.Args[1] 38514 v.reset(OpSelect0) 38515 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 38516 v0.AddArg(x) 38517 v0.AddArg(y) 38518 v.AddArg(v0) 38519 return true 38520 } 38521 } 38522 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 38523 // match: (Div32F x y) 38524 // cond: 38525 // result: (DIVSS x y) 38526 for { 38527 _ = v.Args[1] 38528 x := v.Args[0] 38529 y := v.Args[1] 38530 v.reset(OpAMD64DIVSS) 38531 v.AddArg(x) 38532 v.AddArg(y) 38533 return true 38534 } 38535 } 38536 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 38537 b := v.Block 38538 _ = b 38539 typ := &b.Func.Config.Types 38540 _ = typ 38541 // match: (Div32u x y) 38542 // cond: 38543 // result: (Select0 (DIVLU x y)) 38544 for { 38545 _ = v.Args[1] 38546 x := v.Args[0] 38547 y := v.Args[1] 38548 v.reset(OpSelect0) 38549 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 38550 v0.AddArg(x) 38551 v0.AddArg(y) 38552 v.AddArg(v0) 38553 return true 38554 } 38555 } 38556 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 38557 b := v.Block 38558 _ = b 38559 typ := &b.Func.Config.Types 38560 _ = typ 38561 // match: (Div64 x y) 38562 // cond: 38563 // result: (Select0 (DIVQ x y)) 38564 for { 38565 _ = v.Args[1] 38566 x := v.Args[0] 38567 y := v.Args[1] 38568 v.reset(OpSelect0) 38569 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 38570 v0.AddArg(x) 38571 v0.AddArg(y) 38572 v.AddArg(v0) 38573 return true 38574 } 38575 } 38576 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 38577 // match: (Div64F x y) 38578 // cond: 38579 // result: (DIVSD x y) 38580 for { 38581 _ = v.Args[1] 38582 x := v.Args[0] 38583 y := v.Args[1] 38584 v.reset(OpAMD64DIVSD) 38585 v.AddArg(x) 38586 v.AddArg(y) 38587 return true 38588 } 38589 } 38590 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 38591 b := v.Block 38592 _ = b 38593 typ := &b.Func.Config.Types 38594 _ = typ 38595 // match: (Div64u x y) 38596 // cond: 38597 // result: (Select0 (DIVQU x y)) 38598 for { 38599 _ = v.Args[1] 38600 x := v.Args[0] 38601 y := v.Args[1] 38602 v.reset(OpSelect0) 38603 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 38604 v0.AddArg(x) 38605 v0.AddArg(y) 38606 v.AddArg(v0) 38607 return true 38608 } 38609 } 38610 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 38611 b := v.Block 38612 _ = b 38613 typ := &b.Func.Config.Types 38614 _ = typ 38615 // match: (Div8 x y) 38616 // cond: 38617 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 38618 for { 38619 _ = v.Args[1] 38620 x := v.Args[0] 38621 y := v.Args[1] 38622 v.reset(OpSelect0) 38623 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 38624 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 38625 v1.AddArg(x) 38626 v0.AddArg(v1) 38627 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 38628 v2.AddArg(y) 38629 v0.AddArg(v2) 38630 v.AddArg(v0) 38631 return true 38632 } 38633 } 38634 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 38635 b := v.Block 38636 _ = b 38637 typ := &b.Func.Config.Types 38638 _ = typ 38639 // match: (Div8u x y) 38640 // cond: 38641 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 38642 for { 38643 _ = v.Args[1] 38644 x := v.Args[0] 38645 y := v.Args[1] 38646 v.reset(OpSelect0) 38647 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 38648 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 38649 v1.AddArg(x) 38650 v0.AddArg(v1) 38651 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 38652 v2.AddArg(y) 38653 v0.AddArg(v2) 38654 v.AddArg(v0) 38655 return true 38656 } 38657 } 38658 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 38659 b := v.Block 38660 _ = b 38661 // match: (Eq16 x y) 38662 // cond: 38663 // result: (SETEQ (CMPW x y)) 38664 for { 38665 _ = v.Args[1] 38666 x := v.Args[0] 38667 y := v.Args[1] 38668 v.reset(OpAMD64SETEQ) 38669 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 38670 v0.AddArg(x) 38671 v0.AddArg(y) 38672 v.AddArg(v0) 38673 return true 38674 } 38675 } 38676 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 38677 b := v.Block 38678 _ = b 38679 // match: (Eq32 x y) 38680 // cond: 38681 // result: (SETEQ (CMPL x y)) 38682 for { 38683 _ = v.Args[1] 38684 x := v.Args[0] 38685 y := v.Args[1] 38686 v.reset(OpAMD64SETEQ) 38687 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 38688 v0.AddArg(x) 38689 v0.AddArg(y) 38690 v.AddArg(v0) 38691 return true 38692 } 38693 } 38694 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 38695 b := v.Block 38696 _ = b 38697 // match: (Eq32F x y) 38698 // cond: 38699 // result: (SETEQF (UCOMISS x y)) 38700 for { 38701 _ = v.Args[1] 38702 x := v.Args[0] 38703 y := v.Args[1] 38704 v.reset(OpAMD64SETEQF) 38705 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 38706 v0.AddArg(x) 38707 v0.AddArg(y) 38708 v.AddArg(v0) 38709 return true 38710 } 38711 } 38712 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 38713 b := v.Block 38714 _ = b 38715 // match: (Eq64 x y) 38716 // cond: 38717 // result: (SETEQ (CMPQ x y)) 38718 for { 38719 _ = v.Args[1] 38720 x := v.Args[0] 38721 y := v.Args[1] 38722 v.reset(OpAMD64SETEQ) 38723 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 38724 v0.AddArg(x) 38725 v0.AddArg(y) 38726 v.AddArg(v0) 38727 return true 38728 } 38729 } 38730 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 38731 b := v.Block 38732 _ = b 38733 // match: (Eq64F x y) 38734 // cond: 38735 // result: (SETEQF (UCOMISD x y)) 38736 for { 38737 _ = v.Args[1] 38738 x := v.Args[0] 38739 y := v.Args[1] 38740 v.reset(OpAMD64SETEQF) 38741 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 38742 v0.AddArg(x) 38743 v0.AddArg(y) 38744 v.AddArg(v0) 38745 return true 38746 } 38747 } 38748 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 38749 b := v.Block 38750 _ = b 38751 // match: (Eq8 x y) 38752 // cond: 38753 // result: (SETEQ (CMPB x y)) 38754 for { 38755 _ = v.Args[1] 38756 x := v.Args[0] 38757 y := v.Args[1] 38758 v.reset(OpAMD64SETEQ) 38759 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 38760 v0.AddArg(x) 38761 v0.AddArg(y) 38762 v.AddArg(v0) 38763 return true 38764 } 38765 } 38766 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 38767 b := v.Block 38768 _ = b 38769 // match: (EqB x y) 38770 // cond: 38771 // result: (SETEQ (CMPB x y)) 38772 for { 38773 _ = v.Args[1] 38774 x := v.Args[0] 38775 y := v.Args[1] 38776 v.reset(OpAMD64SETEQ) 38777 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 38778 v0.AddArg(x) 38779 v0.AddArg(y) 38780 v.AddArg(v0) 38781 return true 38782 } 38783 } 38784 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 38785 b := v.Block 38786 _ = b 38787 config := b.Func.Config 38788 _ = config 38789 // match: (EqPtr x y) 38790 // cond: config.PtrSize == 8 38791 // result: (SETEQ (CMPQ x y)) 38792 for { 38793 _ = v.Args[1] 38794 x := v.Args[0] 38795 y := v.Args[1] 38796 if !(config.PtrSize == 8) { 38797 break 38798 } 38799 v.reset(OpAMD64SETEQ) 38800 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 38801 v0.AddArg(x) 38802 v0.AddArg(y) 38803 v.AddArg(v0) 38804 return true 38805 } 38806 // match: (EqPtr x y) 38807 // cond: config.PtrSize == 4 38808 // result: (SETEQ (CMPL x y)) 38809 for { 38810 _ = v.Args[1] 38811 x := v.Args[0] 38812 y := v.Args[1] 38813 if !(config.PtrSize == 4) { 38814 break 38815 } 38816 v.reset(OpAMD64SETEQ) 38817 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 38818 v0.AddArg(x) 38819 v0.AddArg(y) 38820 v.AddArg(v0) 38821 return true 38822 } 38823 return false 38824 } 38825 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 38826 b := v.Block 38827 _ = b 38828 // match: (Geq16 x y) 38829 // cond: 38830 // result: (SETGE (CMPW x y)) 38831 for { 38832 _ = v.Args[1] 38833 x := v.Args[0] 38834 y := v.Args[1] 38835 v.reset(OpAMD64SETGE) 38836 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 38837 v0.AddArg(x) 38838 v0.AddArg(y) 38839 v.AddArg(v0) 38840 return true 38841 } 38842 } 38843 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 38844 b := v.Block 38845 _ = b 38846 // match: (Geq16U x y) 38847 // cond: 38848 // result: (SETAE (CMPW x y)) 38849 for { 38850 _ = v.Args[1] 38851 x := v.Args[0] 38852 y := v.Args[1] 38853 v.reset(OpAMD64SETAE) 38854 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 38855 v0.AddArg(x) 38856 v0.AddArg(y) 38857 v.AddArg(v0) 38858 return true 38859 } 38860 } 38861 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 38862 b := v.Block 38863 _ = b 38864 // match: (Geq32 x y) 38865 // cond: 38866 // result: (SETGE (CMPL x y)) 38867 for { 38868 _ = v.Args[1] 38869 x := v.Args[0] 38870 y := v.Args[1] 38871 v.reset(OpAMD64SETGE) 38872 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 38873 v0.AddArg(x) 38874 v0.AddArg(y) 38875 v.AddArg(v0) 38876 return true 38877 } 38878 } 38879 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 38880 b := v.Block 38881 _ = b 38882 // match: (Geq32F x y) 38883 // cond: 38884 // result: (SETGEF (UCOMISS x y)) 38885 for { 38886 _ = v.Args[1] 38887 x := v.Args[0] 38888 y := v.Args[1] 38889 v.reset(OpAMD64SETGEF) 38890 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 38891 v0.AddArg(x) 38892 v0.AddArg(y) 38893 v.AddArg(v0) 38894 return true 38895 } 38896 } 38897 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 38898 b := v.Block 38899 _ = b 38900 // match: (Geq32U x y) 38901 // cond: 38902 // result: (SETAE (CMPL x y)) 38903 for { 38904 _ = v.Args[1] 38905 x := v.Args[0] 38906 y := v.Args[1] 38907 v.reset(OpAMD64SETAE) 38908 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 38909 v0.AddArg(x) 38910 v0.AddArg(y) 38911 v.AddArg(v0) 38912 return true 38913 } 38914 } 38915 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 38916 b := v.Block 38917 _ = b 38918 // match: (Geq64 x y) 38919 // cond: 38920 // result: (SETGE (CMPQ x y)) 38921 for { 38922 _ = v.Args[1] 38923 x := v.Args[0] 38924 y := v.Args[1] 38925 v.reset(OpAMD64SETGE) 38926 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 38927 v0.AddArg(x) 38928 v0.AddArg(y) 38929 v.AddArg(v0) 38930 return true 38931 } 38932 } 38933 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 38934 b := v.Block 38935 _ = b 38936 // match: (Geq64F x y) 38937 // cond: 38938 // result: (SETGEF (UCOMISD x y)) 38939 for { 38940 _ = v.Args[1] 38941 x := v.Args[0] 38942 y := v.Args[1] 38943 v.reset(OpAMD64SETGEF) 38944 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 38945 v0.AddArg(x) 38946 v0.AddArg(y) 38947 v.AddArg(v0) 38948 return true 38949 } 38950 } 38951 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 38952 b := v.Block 38953 _ = b 38954 // match: (Geq64U x y) 38955 // cond: 38956 // result: (SETAE (CMPQ x y)) 38957 for { 38958 _ = v.Args[1] 38959 x := v.Args[0] 38960 y := v.Args[1] 38961 v.reset(OpAMD64SETAE) 38962 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 38963 v0.AddArg(x) 38964 v0.AddArg(y) 38965 v.AddArg(v0) 38966 return true 38967 } 38968 } 38969 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 38970 b := v.Block 38971 _ = b 38972 // match: (Geq8 x y) 38973 // cond: 38974 // result: (SETGE (CMPB x y)) 38975 for { 38976 _ = v.Args[1] 38977 x := v.Args[0] 38978 y := v.Args[1] 38979 v.reset(OpAMD64SETGE) 38980 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 38981 v0.AddArg(x) 38982 v0.AddArg(y) 38983 v.AddArg(v0) 38984 return true 38985 } 38986 } 38987 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 38988 b := v.Block 38989 _ = b 38990 // match: (Geq8U x y) 38991 // cond: 38992 // result: (SETAE (CMPB x y)) 38993 for { 38994 _ = v.Args[1] 38995 x := v.Args[0] 38996 y := v.Args[1] 38997 v.reset(OpAMD64SETAE) 38998 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 38999 v0.AddArg(x) 39000 v0.AddArg(y) 39001 v.AddArg(v0) 39002 return true 39003 } 39004 } 39005 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 39006 // match: (GetClosurePtr) 39007 // cond: 39008 // result: (LoweredGetClosurePtr) 39009 for { 39010 v.reset(OpAMD64LoweredGetClosurePtr) 39011 return true 39012 } 39013 } 39014 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 39015 // match: (GetG mem) 39016 // cond: 39017 // result: (LoweredGetG mem) 39018 for { 39019 mem := v.Args[0] 39020 v.reset(OpAMD64LoweredGetG) 39021 v.AddArg(mem) 39022 return true 39023 } 39024 } 39025 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 39026 b := v.Block 39027 _ = b 39028 // match: (Greater16 x y) 39029 // cond: 39030 // result: (SETG (CMPW x y)) 39031 for { 39032 _ = v.Args[1] 39033 x := v.Args[0] 39034 y := v.Args[1] 39035 v.reset(OpAMD64SETG) 39036 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39037 v0.AddArg(x) 39038 v0.AddArg(y) 39039 v.AddArg(v0) 39040 return true 39041 } 39042 } 39043 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 39044 b := v.Block 39045 _ = b 39046 // match: (Greater16U x y) 39047 // cond: 39048 // result: (SETA (CMPW x y)) 39049 for { 39050 _ = v.Args[1] 39051 x := v.Args[0] 39052 y := v.Args[1] 39053 v.reset(OpAMD64SETA) 39054 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39055 v0.AddArg(x) 39056 v0.AddArg(y) 39057 v.AddArg(v0) 39058 return true 39059 } 39060 } 39061 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 39062 b := v.Block 39063 _ = b 39064 // match: (Greater32 x y) 39065 // cond: 39066 // result: (SETG (CMPL x y)) 39067 for { 39068 _ = v.Args[1] 39069 x := v.Args[0] 39070 y := v.Args[1] 39071 v.reset(OpAMD64SETG) 39072 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39073 v0.AddArg(x) 39074 v0.AddArg(y) 39075 v.AddArg(v0) 39076 return true 39077 } 39078 } 39079 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 39080 b := v.Block 39081 _ = b 39082 // match: (Greater32F x y) 39083 // cond: 39084 // result: (SETGF (UCOMISS x y)) 39085 for { 39086 _ = v.Args[1] 39087 x := v.Args[0] 39088 y := v.Args[1] 39089 v.reset(OpAMD64SETGF) 39090 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 39091 v0.AddArg(x) 39092 v0.AddArg(y) 39093 v.AddArg(v0) 39094 return true 39095 } 39096 } 39097 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 39098 b := v.Block 39099 _ = b 39100 // match: (Greater32U x y) 39101 // cond: 39102 // result: (SETA (CMPL x y)) 39103 for { 39104 _ = v.Args[1] 39105 x := v.Args[0] 39106 y := v.Args[1] 39107 v.reset(OpAMD64SETA) 39108 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39109 v0.AddArg(x) 39110 v0.AddArg(y) 39111 v.AddArg(v0) 39112 return true 39113 } 39114 } 39115 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 39116 b := v.Block 39117 _ = b 39118 // match: (Greater64 x y) 39119 // cond: 39120 // result: (SETG (CMPQ x y)) 39121 for { 39122 _ = v.Args[1] 39123 x := v.Args[0] 39124 y := v.Args[1] 39125 v.reset(OpAMD64SETG) 39126 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39127 v0.AddArg(x) 39128 v0.AddArg(y) 39129 v.AddArg(v0) 39130 return true 39131 } 39132 } 39133 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 39134 b := v.Block 39135 _ = b 39136 // match: (Greater64F x y) 39137 // cond: 39138 // result: (SETGF (UCOMISD x y)) 39139 for { 39140 _ = v.Args[1] 39141 x := v.Args[0] 39142 y := v.Args[1] 39143 v.reset(OpAMD64SETGF) 39144 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 39145 v0.AddArg(x) 39146 v0.AddArg(y) 39147 v.AddArg(v0) 39148 return true 39149 } 39150 } 39151 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 39152 b := v.Block 39153 _ = b 39154 // match: (Greater64U x y) 39155 // cond: 39156 // result: (SETA (CMPQ x y)) 39157 for { 39158 _ = v.Args[1] 39159 x := v.Args[0] 39160 y := v.Args[1] 39161 v.reset(OpAMD64SETA) 39162 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39163 v0.AddArg(x) 39164 v0.AddArg(y) 39165 v.AddArg(v0) 39166 return true 39167 } 39168 } 39169 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 39170 b := v.Block 39171 _ = b 39172 // match: (Greater8 x y) 39173 // cond: 39174 // result: (SETG (CMPB x y)) 39175 for { 39176 _ = v.Args[1] 39177 x := v.Args[0] 39178 y := v.Args[1] 39179 v.reset(OpAMD64SETG) 39180 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39181 v0.AddArg(x) 39182 v0.AddArg(y) 39183 v.AddArg(v0) 39184 return true 39185 } 39186 } 39187 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 39188 b := v.Block 39189 _ = b 39190 // match: (Greater8U x y) 39191 // cond: 39192 // result: (SETA (CMPB x y)) 39193 for { 39194 _ = v.Args[1] 39195 x := v.Args[0] 39196 y := v.Args[1] 39197 v.reset(OpAMD64SETA) 39198 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39199 v0.AddArg(x) 39200 v0.AddArg(y) 39201 v.AddArg(v0) 39202 return true 39203 } 39204 } 39205 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 39206 // match: (Hmul32 x y) 39207 // cond: 39208 // result: (HMULL x y) 39209 for { 39210 _ = v.Args[1] 39211 x := v.Args[0] 39212 y := v.Args[1] 39213 v.reset(OpAMD64HMULL) 39214 v.AddArg(x) 39215 v.AddArg(y) 39216 return true 39217 } 39218 } 39219 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 39220 // match: (Hmul32u x y) 39221 // cond: 39222 // result: (HMULLU x y) 39223 for { 39224 _ = v.Args[1] 39225 x := v.Args[0] 39226 y := v.Args[1] 39227 v.reset(OpAMD64HMULLU) 39228 v.AddArg(x) 39229 v.AddArg(y) 39230 return true 39231 } 39232 } 39233 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 39234 // match: (Hmul64 x y) 39235 // cond: 39236 // result: (HMULQ x y) 39237 for { 39238 _ = v.Args[1] 39239 x := v.Args[0] 39240 y := v.Args[1] 39241 v.reset(OpAMD64HMULQ) 39242 v.AddArg(x) 39243 v.AddArg(y) 39244 return true 39245 } 39246 } 39247 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 39248 // match: (Hmul64u x y) 39249 // cond: 39250 // result: (HMULQU x y) 39251 for { 39252 _ = v.Args[1] 39253 x := v.Args[0] 39254 y := v.Args[1] 39255 v.reset(OpAMD64HMULQU) 39256 v.AddArg(x) 39257 v.AddArg(y) 39258 return true 39259 } 39260 } 39261 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool { 39262 // match: (Int64Hi x) 39263 // cond: 39264 // result: (SHRQconst [32] x) 39265 for { 39266 x := v.Args[0] 39267 v.reset(OpAMD64SHRQconst) 39268 v.AuxInt = 32 39269 v.AddArg(x) 39270 return true 39271 } 39272 } 39273 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 39274 // match: (InterCall [argwid] entry mem) 39275 // cond: 39276 // result: (CALLinter [argwid] entry mem) 39277 for { 39278 argwid := v.AuxInt 39279 _ = v.Args[1] 39280 entry := v.Args[0] 39281 mem := v.Args[1] 39282 v.reset(OpAMD64CALLinter) 39283 v.AuxInt = argwid 39284 v.AddArg(entry) 39285 v.AddArg(mem) 39286 return true 39287 } 39288 } 39289 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 39290 b := v.Block 39291 _ = b 39292 config := b.Func.Config 39293 _ = config 39294 // match: (IsInBounds idx len) 39295 // cond: config.PtrSize == 8 39296 // result: (SETB (CMPQ idx len)) 39297 for { 39298 _ = v.Args[1] 39299 idx := v.Args[0] 39300 len := v.Args[1] 39301 if !(config.PtrSize == 8) { 39302 break 39303 } 39304 v.reset(OpAMD64SETB) 39305 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39306 v0.AddArg(idx) 39307 v0.AddArg(len) 39308 v.AddArg(v0) 39309 return true 39310 } 39311 // match: (IsInBounds idx len) 39312 // cond: config.PtrSize == 4 39313 // result: (SETB (CMPL idx len)) 39314 for { 39315 _ = v.Args[1] 39316 idx := v.Args[0] 39317 len := v.Args[1] 39318 if !(config.PtrSize == 4) { 39319 break 39320 } 39321 v.reset(OpAMD64SETB) 39322 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39323 v0.AddArg(idx) 39324 v0.AddArg(len) 39325 v.AddArg(v0) 39326 return true 39327 } 39328 return false 39329 } 39330 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 39331 b := v.Block 39332 _ = b 39333 config := b.Func.Config 39334 _ = config 39335 // match: (IsNonNil p) 39336 // cond: config.PtrSize == 8 39337 // result: (SETNE (TESTQ p p)) 39338 for { 39339 p := v.Args[0] 39340 if !(config.PtrSize == 8) { 39341 break 39342 } 39343 v.reset(OpAMD64SETNE) 39344 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 39345 v0.AddArg(p) 39346 v0.AddArg(p) 39347 v.AddArg(v0) 39348 return true 39349 } 39350 // match: (IsNonNil p) 39351 // cond: config.PtrSize == 4 39352 // result: (SETNE (TESTL p p)) 39353 for { 39354 p := v.Args[0] 39355 if !(config.PtrSize == 4) { 39356 break 39357 } 39358 v.reset(OpAMD64SETNE) 39359 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags) 39360 v0.AddArg(p) 39361 v0.AddArg(p) 39362 v.AddArg(v0) 39363 return true 39364 } 39365 return false 39366 } 39367 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 39368 b := v.Block 39369 _ = b 39370 config := b.Func.Config 39371 _ = config 39372 // match: (IsSliceInBounds idx len) 39373 // cond: config.PtrSize == 8 39374 // result: (SETBE (CMPQ idx len)) 39375 for { 39376 _ = v.Args[1] 39377 idx := v.Args[0] 39378 len := v.Args[1] 39379 if !(config.PtrSize == 8) { 39380 break 39381 } 39382 v.reset(OpAMD64SETBE) 39383 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39384 v0.AddArg(idx) 39385 v0.AddArg(len) 39386 v.AddArg(v0) 39387 return true 39388 } 39389 // match: (IsSliceInBounds idx len) 39390 // cond: config.PtrSize == 4 39391 // result: (SETBE (CMPL idx len)) 39392 for { 39393 _ = v.Args[1] 39394 idx := v.Args[0] 39395 len := v.Args[1] 39396 if !(config.PtrSize == 4) { 39397 break 39398 } 39399 v.reset(OpAMD64SETBE) 39400 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39401 v0.AddArg(idx) 39402 v0.AddArg(len) 39403 v.AddArg(v0) 39404 return true 39405 } 39406 return false 39407 } 39408 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 39409 b := v.Block 39410 _ = b 39411 // match: (Leq16 x y) 39412 // cond: 39413 // result: (SETLE (CMPW x y)) 39414 for { 39415 _ = v.Args[1] 39416 x := v.Args[0] 39417 y := v.Args[1] 39418 v.reset(OpAMD64SETLE) 39419 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39420 v0.AddArg(x) 39421 v0.AddArg(y) 39422 v.AddArg(v0) 39423 return true 39424 } 39425 } 39426 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 39427 b := v.Block 39428 _ = b 39429 // match: (Leq16U x y) 39430 // cond: 39431 // result: (SETBE (CMPW x y)) 39432 for { 39433 _ = v.Args[1] 39434 x := v.Args[0] 39435 y := v.Args[1] 39436 v.reset(OpAMD64SETBE) 39437 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39438 v0.AddArg(x) 39439 v0.AddArg(y) 39440 v.AddArg(v0) 39441 return true 39442 } 39443 } 39444 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 39445 b := v.Block 39446 _ = b 39447 // match: (Leq32 x y) 39448 // cond: 39449 // result: (SETLE (CMPL x y)) 39450 for { 39451 _ = v.Args[1] 39452 x := v.Args[0] 39453 y := v.Args[1] 39454 v.reset(OpAMD64SETLE) 39455 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39456 v0.AddArg(x) 39457 v0.AddArg(y) 39458 v.AddArg(v0) 39459 return true 39460 } 39461 } 39462 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 39463 b := v.Block 39464 _ = b 39465 // match: (Leq32F x y) 39466 // cond: 39467 // result: (SETGEF (UCOMISS y x)) 39468 for { 39469 _ = v.Args[1] 39470 x := v.Args[0] 39471 y := v.Args[1] 39472 v.reset(OpAMD64SETGEF) 39473 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 39474 v0.AddArg(y) 39475 v0.AddArg(x) 39476 v.AddArg(v0) 39477 return true 39478 } 39479 } 39480 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 39481 b := v.Block 39482 _ = b 39483 // match: (Leq32U x y) 39484 // cond: 39485 // result: (SETBE (CMPL x y)) 39486 for { 39487 _ = v.Args[1] 39488 x := v.Args[0] 39489 y := v.Args[1] 39490 v.reset(OpAMD64SETBE) 39491 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39492 v0.AddArg(x) 39493 v0.AddArg(y) 39494 v.AddArg(v0) 39495 return true 39496 } 39497 } 39498 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 39499 b := v.Block 39500 _ = b 39501 // match: (Leq64 x y) 39502 // cond: 39503 // result: (SETLE (CMPQ x y)) 39504 for { 39505 _ = v.Args[1] 39506 x := v.Args[0] 39507 y := v.Args[1] 39508 v.reset(OpAMD64SETLE) 39509 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39510 v0.AddArg(x) 39511 v0.AddArg(y) 39512 v.AddArg(v0) 39513 return true 39514 } 39515 } 39516 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 39517 b := v.Block 39518 _ = b 39519 // match: (Leq64F x y) 39520 // cond: 39521 // result: (SETGEF (UCOMISD y x)) 39522 for { 39523 _ = v.Args[1] 39524 x := v.Args[0] 39525 y := v.Args[1] 39526 v.reset(OpAMD64SETGEF) 39527 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 39528 v0.AddArg(y) 39529 v0.AddArg(x) 39530 v.AddArg(v0) 39531 return true 39532 } 39533 } 39534 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 39535 b := v.Block 39536 _ = b 39537 // match: (Leq64U x y) 39538 // cond: 39539 // result: (SETBE (CMPQ x y)) 39540 for { 39541 _ = v.Args[1] 39542 x := v.Args[0] 39543 y := v.Args[1] 39544 v.reset(OpAMD64SETBE) 39545 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39546 v0.AddArg(x) 39547 v0.AddArg(y) 39548 v.AddArg(v0) 39549 return true 39550 } 39551 } 39552 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 39553 b := v.Block 39554 _ = b 39555 // match: (Leq8 x y) 39556 // cond: 39557 // result: (SETLE (CMPB x y)) 39558 for { 39559 _ = v.Args[1] 39560 x := v.Args[0] 39561 y := v.Args[1] 39562 v.reset(OpAMD64SETLE) 39563 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39564 v0.AddArg(x) 39565 v0.AddArg(y) 39566 v.AddArg(v0) 39567 return true 39568 } 39569 } 39570 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 39571 b := v.Block 39572 _ = b 39573 // match: (Leq8U x y) 39574 // cond: 39575 // result: (SETBE (CMPB x y)) 39576 for { 39577 _ = v.Args[1] 39578 x := v.Args[0] 39579 y := v.Args[1] 39580 v.reset(OpAMD64SETBE) 39581 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39582 v0.AddArg(x) 39583 v0.AddArg(y) 39584 v.AddArg(v0) 39585 return true 39586 } 39587 } 39588 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 39589 b := v.Block 39590 _ = b 39591 // match: (Less16 x y) 39592 // cond: 39593 // result: (SETL (CMPW x y)) 39594 for { 39595 _ = v.Args[1] 39596 x := v.Args[0] 39597 y := v.Args[1] 39598 v.reset(OpAMD64SETL) 39599 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39600 v0.AddArg(x) 39601 v0.AddArg(y) 39602 v.AddArg(v0) 39603 return true 39604 } 39605 } 39606 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 39607 b := v.Block 39608 _ = b 39609 // match: (Less16U x y) 39610 // cond: 39611 // result: (SETB (CMPW x y)) 39612 for { 39613 _ = v.Args[1] 39614 x := v.Args[0] 39615 y := v.Args[1] 39616 v.reset(OpAMD64SETB) 39617 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39618 v0.AddArg(x) 39619 v0.AddArg(y) 39620 v.AddArg(v0) 39621 return true 39622 } 39623 } 39624 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 39625 b := v.Block 39626 _ = b 39627 // match: (Less32 x y) 39628 // cond: 39629 // result: (SETL (CMPL x y)) 39630 for { 39631 _ = v.Args[1] 39632 x := v.Args[0] 39633 y := v.Args[1] 39634 v.reset(OpAMD64SETL) 39635 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39636 v0.AddArg(x) 39637 v0.AddArg(y) 39638 v.AddArg(v0) 39639 return true 39640 } 39641 } 39642 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 39643 b := v.Block 39644 _ = b 39645 // match: (Less32F x y) 39646 // cond: 39647 // result: (SETGF (UCOMISS y x)) 39648 for { 39649 _ = v.Args[1] 39650 x := v.Args[0] 39651 y := v.Args[1] 39652 v.reset(OpAMD64SETGF) 39653 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 39654 v0.AddArg(y) 39655 v0.AddArg(x) 39656 v.AddArg(v0) 39657 return true 39658 } 39659 } 39660 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 39661 b := v.Block 39662 _ = b 39663 // match: (Less32U x y) 39664 // cond: 39665 // result: (SETB (CMPL x y)) 39666 for { 39667 _ = v.Args[1] 39668 x := v.Args[0] 39669 y := v.Args[1] 39670 v.reset(OpAMD64SETB) 39671 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39672 v0.AddArg(x) 39673 v0.AddArg(y) 39674 v.AddArg(v0) 39675 return true 39676 } 39677 } 39678 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 39679 b := v.Block 39680 _ = b 39681 // match: (Less64 x y) 39682 // cond: 39683 // result: (SETL (CMPQ x y)) 39684 for { 39685 _ = v.Args[1] 39686 x := v.Args[0] 39687 y := v.Args[1] 39688 v.reset(OpAMD64SETL) 39689 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39690 v0.AddArg(x) 39691 v0.AddArg(y) 39692 v.AddArg(v0) 39693 return true 39694 } 39695 } 39696 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 39697 b := v.Block 39698 _ = b 39699 // match: (Less64F x y) 39700 // cond: 39701 // result: (SETGF (UCOMISD y x)) 39702 for { 39703 _ = v.Args[1] 39704 x := v.Args[0] 39705 y := v.Args[1] 39706 v.reset(OpAMD64SETGF) 39707 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 39708 v0.AddArg(y) 39709 v0.AddArg(x) 39710 v.AddArg(v0) 39711 return true 39712 } 39713 } 39714 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 39715 b := v.Block 39716 _ = b 39717 // match: (Less64U x y) 39718 // cond: 39719 // result: (SETB (CMPQ x y)) 39720 for { 39721 _ = v.Args[1] 39722 x := v.Args[0] 39723 y := v.Args[1] 39724 v.reset(OpAMD64SETB) 39725 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39726 v0.AddArg(x) 39727 v0.AddArg(y) 39728 v.AddArg(v0) 39729 return true 39730 } 39731 } 39732 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 39733 b := v.Block 39734 _ = b 39735 // match: (Less8 x y) 39736 // cond: 39737 // result: (SETL (CMPB x y)) 39738 for { 39739 _ = v.Args[1] 39740 x := v.Args[0] 39741 y := v.Args[1] 39742 v.reset(OpAMD64SETL) 39743 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39744 v0.AddArg(x) 39745 v0.AddArg(y) 39746 v.AddArg(v0) 39747 return true 39748 } 39749 } 39750 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 39751 b := v.Block 39752 _ = b 39753 // match: (Less8U x y) 39754 // cond: 39755 // result: (SETB (CMPB x y)) 39756 for { 39757 _ = v.Args[1] 39758 x := v.Args[0] 39759 y := v.Args[1] 39760 v.reset(OpAMD64SETB) 39761 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39762 v0.AddArg(x) 39763 v0.AddArg(y) 39764 v.AddArg(v0) 39765 return true 39766 } 39767 } 39768 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 39769 b := v.Block 39770 _ = b 39771 config := b.Func.Config 39772 _ = config 39773 // match: (Load <t> ptr mem) 39774 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 39775 // result: (MOVQload ptr mem) 39776 for { 39777 t := v.Type 39778 _ = v.Args[1] 39779 ptr := v.Args[0] 39780 mem := v.Args[1] 39781 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 39782 break 39783 } 39784 v.reset(OpAMD64MOVQload) 39785 v.AddArg(ptr) 39786 v.AddArg(mem) 39787 return true 39788 } 39789 // match: (Load <t> ptr mem) 39790 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 39791 // result: (MOVLload ptr mem) 39792 for { 39793 t := v.Type 39794 _ = v.Args[1] 39795 ptr := v.Args[0] 39796 mem := v.Args[1] 39797 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 39798 break 39799 } 39800 v.reset(OpAMD64MOVLload) 39801 v.AddArg(ptr) 39802 v.AddArg(mem) 39803 return true 39804 } 39805 // match: (Load <t> ptr mem) 39806 // cond: is16BitInt(t) 39807 // result: (MOVWload ptr mem) 39808 for { 39809 t := v.Type 39810 _ = v.Args[1] 39811 ptr := v.Args[0] 39812 mem := v.Args[1] 39813 if !(is16BitInt(t)) { 39814 break 39815 } 39816 v.reset(OpAMD64MOVWload) 39817 v.AddArg(ptr) 39818 v.AddArg(mem) 39819 return true 39820 } 39821 // match: (Load <t> ptr mem) 39822 // cond: (t.IsBoolean() || is8BitInt(t)) 39823 // result: (MOVBload ptr mem) 39824 for { 39825 t := v.Type 39826 _ = v.Args[1] 39827 ptr := v.Args[0] 39828 mem := v.Args[1] 39829 if !(t.IsBoolean() || is8BitInt(t)) { 39830 break 39831 } 39832 v.reset(OpAMD64MOVBload) 39833 v.AddArg(ptr) 39834 v.AddArg(mem) 39835 return true 39836 } 39837 // match: (Load <t> ptr mem) 39838 // cond: is32BitFloat(t) 39839 // result: (MOVSSload ptr mem) 39840 for { 39841 t := v.Type 39842 _ = v.Args[1] 39843 ptr := v.Args[0] 39844 mem := v.Args[1] 39845 if !(is32BitFloat(t)) { 39846 break 39847 } 39848 v.reset(OpAMD64MOVSSload) 39849 v.AddArg(ptr) 39850 v.AddArg(mem) 39851 return true 39852 } 39853 // match: (Load <t> ptr mem) 39854 // cond: is64BitFloat(t) 39855 // result: (MOVSDload ptr mem) 39856 for { 39857 t := v.Type 39858 _ = v.Args[1] 39859 ptr := v.Args[0] 39860 mem := v.Args[1] 39861 if !(is64BitFloat(t)) { 39862 break 39863 } 39864 v.reset(OpAMD64MOVSDload) 39865 v.AddArg(ptr) 39866 v.AddArg(mem) 39867 return true 39868 } 39869 return false 39870 } 39871 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 39872 b := v.Block 39873 _ = b 39874 // match: (Lsh16x16 <t> x y) 39875 // cond: 39876 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 39877 for { 39878 t := v.Type 39879 _ = v.Args[1] 39880 x := v.Args[0] 39881 y := v.Args[1] 39882 v.reset(OpAMD64ANDL) 39883 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39884 v0.AddArg(x) 39885 v0.AddArg(y) 39886 v.AddArg(v0) 39887 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39888 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 39889 v2.AuxInt = 32 39890 v2.AddArg(y) 39891 v1.AddArg(v2) 39892 v.AddArg(v1) 39893 return true 39894 } 39895 } 39896 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 39897 b := v.Block 39898 _ = b 39899 // match: (Lsh16x32 <t> x y) 39900 // cond: 39901 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 39902 for { 39903 t := v.Type 39904 _ = v.Args[1] 39905 x := v.Args[0] 39906 y := v.Args[1] 39907 v.reset(OpAMD64ANDL) 39908 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39909 v0.AddArg(x) 39910 v0.AddArg(y) 39911 v.AddArg(v0) 39912 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39913 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 39914 v2.AuxInt = 32 39915 v2.AddArg(y) 39916 v1.AddArg(v2) 39917 v.AddArg(v1) 39918 return true 39919 } 39920 } 39921 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 39922 b := v.Block 39923 _ = b 39924 // match: (Lsh16x64 <t> x y) 39925 // cond: 39926 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 39927 for { 39928 t := v.Type 39929 _ = v.Args[1] 39930 x := v.Args[0] 39931 y := v.Args[1] 39932 v.reset(OpAMD64ANDL) 39933 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39934 v0.AddArg(x) 39935 v0.AddArg(y) 39936 v.AddArg(v0) 39937 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39938 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 39939 v2.AuxInt = 32 39940 v2.AddArg(y) 39941 v1.AddArg(v2) 39942 v.AddArg(v1) 39943 return true 39944 } 39945 } 39946 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 39947 b := v.Block 39948 _ = b 39949 // match: (Lsh16x8 <t> x y) 39950 // cond: 39951 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 39952 for { 39953 t := v.Type 39954 _ = v.Args[1] 39955 x := v.Args[0] 39956 y := v.Args[1] 39957 v.reset(OpAMD64ANDL) 39958 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39959 v0.AddArg(x) 39960 v0.AddArg(y) 39961 v.AddArg(v0) 39962 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39963 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 39964 v2.AuxInt = 32 39965 v2.AddArg(y) 39966 v1.AddArg(v2) 39967 v.AddArg(v1) 39968 return true 39969 } 39970 } 39971 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 39972 b := v.Block 39973 _ = b 39974 // match: (Lsh32x16 <t> x y) 39975 // cond: 39976 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 39977 for { 39978 t := v.Type 39979 _ = v.Args[1] 39980 x := v.Args[0] 39981 y := v.Args[1] 39982 v.reset(OpAMD64ANDL) 39983 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39984 v0.AddArg(x) 39985 v0.AddArg(y) 39986 v.AddArg(v0) 39987 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39988 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 39989 v2.AuxInt = 32 39990 v2.AddArg(y) 39991 v1.AddArg(v2) 39992 v.AddArg(v1) 39993 return true 39994 } 39995 } 39996 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 39997 b := v.Block 39998 _ = b 39999 // match: (Lsh32x32 <t> x y) 40000 // cond: 40001 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 40002 for { 40003 t := v.Type 40004 _ = v.Args[1] 40005 x := v.Args[0] 40006 y := v.Args[1] 40007 v.reset(OpAMD64ANDL) 40008 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40009 v0.AddArg(x) 40010 v0.AddArg(y) 40011 v.AddArg(v0) 40012 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40013 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 40014 v2.AuxInt = 32 40015 v2.AddArg(y) 40016 v1.AddArg(v2) 40017 v.AddArg(v1) 40018 return true 40019 } 40020 } 40021 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 40022 b := v.Block 40023 _ = b 40024 // match: (Lsh32x64 <t> x y) 40025 // cond: 40026 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 40027 for { 40028 t := v.Type 40029 _ = v.Args[1] 40030 x := v.Args[0] 40031 y := v.Args[1] 40032 v.reset(OpAMD64ANDL) 40033 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40034 v0.AddArg(x) 40035 v0.AddArg(y) 40036 v.AddArg(v0) 40037 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40038 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 40039 v2.AuxInt = 32 40040 v2.AddArg(y) 40041 v1.AddArg(v2) 40042 v.AddArg(v1) 40043 return true 40044 } 40045 } 40046 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 40047 b := v.Block 40048 _ = b 40049 // match: (Lsh32x8 <t> x y) 40050 // cond: 40051 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 40052 for { 40053 t := v.Type 40054 _ = v.Args[1] 40055 x := v.Args[0] 40056 y := v.Args[1] 40057 v.reset(OpAMD64ANDL) 40058 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40059 v0.AddArg(x) 40060 v0.AddArg(y) 40061 v.AddArg(v0) 40062 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40063 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 40064 v2.AuxInt = 32 40065 v2.AddArg(y) 40066 v1.AddArg(v2) 40067 v.AddArg(v1) 40068 return true 40069 } 40070 } 40071 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 40072 b := v.Block 40073 _ = b 40074 // match: (Lsh64x16 <t> x y) 40075 // cond: 40076 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 40077 for { 40078 t := v.Type 40079 _ = v.Args[1] 40080 x := v.Args[0] 40081 y := v.Args[1] 40082 v.reset(OpAMD64ANDQ) 40083 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40084 v0.AddArg(x) 40085 v0.AddArg(y) 40086 v.AddArg(v0) 40087 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40088 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 40089 v2.AuxInt = 64 40090 v2.AddArg(y) 40091 v1.AddArg(v2) 40092 v.AddArg(v1) 40093 return true 40094 } 40095 } 40096 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 40097 b := v.Block 40098 _ = b 40099 // match: (Lsh64x32 <t> x y) 40100 // cond: 40101 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 40102 for { 40103 t := v.Type 40104 _ = v.Args[1] 40105 x := v.Args[0] 40106 y := v.Args[1] 40107 v.reset(OpAMD64ANDQ) 40108 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40109 v0.AddArg(x) 40110 v0.AddArg(y) 40111 v.AddArg(v0) 40112 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40113 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 40114 v2.AuxInt = 64 40115 v2.AddArg(y) 40116 v1.AddArg(v2) 40117 v.AddArg(v1) 40118 return true 40119 } 40120 } 40121 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 40122 b := v.Block 40123 _ = b 40124 // match: (Lsh64x64 <t> x y) 40125 // cond: 40126 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 40127 for { 40128 t := v.Type 40129 _ = v.Args[1] 40130 x := v.Args[0] 40131 y := v.Args[1] 40132 v.reset(OpAMD64ANDQ) 40133 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40134 v0.AddArg(x) 40135 v0.AddArg(y) 40136 v.AddArg(v0) 40137 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40138 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 40139 v2.AuxInt = 64 40140 v2.AddArg(y) 40141 v1.AddArg(v2) 40142 v.AddArg(v1) 40143 return true 40144 } 40145 } 40146 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 40147 b := v.Block 40148 _ = b 40149 // match: (Lsh64x8 <t> x y) 40150 // cond: 40151 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 40152 for { 40153 t := v.Type 40154 _ = v.Args[1] 40155 x := v.Args[0] 40156 y := v.Args[1] 40157 v.reset(OpAMD64ANDQ) 40158 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40159 v0.AddArg(x) 40160 v0.AddArg(y) 40161 v.AddArg(v0) 40162 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40163 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 40164 v2.AuxInt = 64 40165 v2.AddArg(y) 40166 v1.AddArg(v2) 40167 v.AddArg(v1) 40168 return true 40169 } 40170 } 40171 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 40172 b := v.Block 40173 _ = b 40174 // match: (Lsh8x16 <t> x y) 40175 // cond: 40176 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 40177 for { 40178 t := v.Type 40179 _ = v.Args[1] 40180 x := v.Args[0] 40181 y := v.Args[1] 40182 v.reset(OpAMD64ANDL) 40183 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40184 v0.AddArg(x) 40185 v0.AddArg(y) 40186 v.AddArg(v0) 40187 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40188 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 40189 v2.AuxInt = 32 40190 v2.AddArg(y) 40191 v1.AddArg(v2) 40192 v.AddArg(v1) 40193 return true 40194 } 40195 } 40196 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 40197 b := v.Block 40198 _ = b 40199 // match: (Lsh8x32 <t> x y) 40200 // cond: 40201 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 40202 for { 40203 t := v.Type 40204 _ = v.Args[1] 40205 x := v.Args[0] 40206 y := v.Args[1] 40207 v.reset(OpAMD64ANDL) 40208 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40209 v0.AddArg(x) 40210 v0.AddArg(y) 40211 v.AddArg(v0) 40212 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40213 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 40214 v2.AuxInt = 32 40215 v2.AddArg(y) 40216 v1.AddArg(v2) 40217 v.AddArg(v1) 40218 return true 40219 } 40220 } 40221 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 40222 b := v.Block 40223 _ = b 40224 // match: (Lsh8x64 <t> x y) 40225 // cond: 40226 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 40227 for { 40228 t := v.Type 40229 _ = v.Args[1] 40230 x := v.Args[0] 40231 y := v.Args[1] 40232 v.reset(OpAMD64ANDL) 40233 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40234 v0.AddArg(x) 40235 v0.AddArg(y) 40236 v.AddArg(v0) 40237 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40238 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 40239 v2.AuxInt = 32 40240 v2.AddArg(y) 40241 v1.AddArg(v2) 40242 v.AddArg(v1) 40243 return true 40244 } 40245 } 40246 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 40247 b := v.Block 40248 _ = b 40249 // match: (Lsh8x8 <t> x y) 40250 // cond: 40251 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 40252 for { 40253 t := v.Type 40254 _ = v.Args[1] 40255 x := v.Args[0] 40256 y := v.Args[1] 40257 v.reset(OpAMD64ANDL) 40258 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40259 v0.AddArg(x) 40260 v0.AddArg(y) 40261 v.AddArg(v0) 40262 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40263 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 40264 v2.AuxInt = 32 40265 v2.AddArg(y) 40266 v1.AddArg(v2) 40267 v.AddArg(v1) 40268 return true 40269 } 40270 } 40271 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 40272 b := v.Block 40273 _ = b 40274 typ := &b.Func.Config.Types 40275 _ = typ 40276 // match: (Mod16 x y) 40277 // cond: 40278 // result: (Select1 (DIVW x y)) 40279 for { 40280 _ = v.Args[1] 40281 x := v.Args[0] 40282 y := v.Args[1] 40283 v.reset(OpSelect1) 40284 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 40285 v0.AddArg(x) 40286 v0.AddArg(y) 40287 v.AddArg(v0) 40288 return true 40289 } 40290 } 40291 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 40292 b := v.Block 40293 _ = b 40294 typ := &b.Func.Config.Types 40295 _ = typ 40296 // match: (Mod16u x y) 40297 // cond: 40298 // result: (Select1 (DIVWU x y)) 40299 for { 40300 _ = v.Args[1] 40301 x := v.Args[0] 40302 y := v.Args[1] 40303 v.reset(OpSelect1) 40304 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 40305 v0.AddArg(x) 40306 v0.AddArg(y) 40307 v.AddArg(v0) 40308 return true 40309 } 40310 } 40311 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 40312 b := v.Block 40313 _ = b 40314 typ := &b.Func.Config.Types 40315 _ = typ 40316 // match: (Mod32 x y) 40317 // cond: 40318 // result: (Select1 (DIVL x y)) 40319 for { 40320 _ = v.Args[1] 40321 x := v.Args[0] 40322 y := v.Args[1] 40323 v.reset(OpSelect1) 40324 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 40325 v0.AddArg(x) 40326 v0.AddArg(y) 40327 v.AddArg(v0) 40328 return true 40329 } 40330 } 40331 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 40332 b := v.Block 40333 _ = b 40334 typ := &b.Func.Config.Types 40335 _ = typ 40336 // match: (Mod32u x y) 40337 // cond: 40338 // result: (Select1 (DIVLU x y)) 40339 for { 40340 _ = v.Args[1] 40341 x := v.Args[0] 40342 y := v.Args[1] 40343 v.reset(OpSelect1) 40344 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 40345 v0.AddArg(x) 40346 v0.AddArg(y) 40347 v.AddArg(v0) 40348 return true 40349 } 40350 } 40351 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 40352 b := v.Block 40353 _ = b 40354 typ := &b.Func.Config.Types 40355 _ = typ 40356 // match: (Mod64 x y) 40357 // cond: 40358 // result: (Select1 (DIVQ x y)) 40359 for { 40360 _ = v.Args[1] 40361 x := v.Args[0] 40362 y := v.Args[1] 40363 v.reset(OpSelect1) 40364 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 40365 v0.AddArg(x) 40366 v0.AddArg(y) 40367 v.AddArg(v0) 40368 return true 40369 } 40370 } 40371 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 40372 b := v.Block 40373 _ = b 40374 typ := &b.Func.Config.Types 40375 _ = typ 40376 // match: (Mod64u x y) 40377 // cond: 40378 // result: (Select1 (DIVQU x y)) 40379 for { 40380 _ = v.Args[1] 40381 x := v.Args[0] 40382 y := v.Args[1] 40383 v.reset(OpSelect1) 40384 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 40385 v0.AddArg(x) 40386 v0.AddArg(y) 40387 v.AddArg(v0) 40388 return true 40389 } 40390 } 40391 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 40392 b := v.Block 40393 _ = b 40394 typ := &b.Func.Config.Types 40395 _ = typ 40396 // match: (Mod8 x y) 40397 // cond: 40398 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 40399 for { 40400 _ = v.Args[1] 40401 x := v.Args[0] 40402 y := v.Args[1] 40403 v.reset(OpSelect1) 40404 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 40405 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 40406 v1.AddArg(x) 40407 v0.AddArg(v1) 40408 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 40409 v2.AddArg(y) 40410 v0.AddArg(v2) 40411 v.AddArg(v0) 40412 return true 40413 } 40414 } 40415 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 40416 b := v.Block 40417 _ = b 40418 typ := &b.Func.Config.Types 40419 _ = typ 40420 // match: (Mod8u x y) 40421 // cond: 40422 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 40423 for { 40424 _ = v.Args[1] 40425 x := v.Args[0] 40426 y := v.Args[1] 40427 v.reset(OpSelect1) 40428 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 40429 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 40430 v1.AddArg(x) 40431 v0.AddArg(v1) 40432 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 40433 v2.AddArg(y) 40434 v0.AddArg(v2) 40435 v.AddArg(v0) 40436 return true 40437 } 40438 } 40439 func rewriteValueAMD64_OpMove_0(v *Value) bool { 40440 b := v.Block 40441 _ = b 40442 typ := &b.Func.Config.Types 40443 _ = typ 40444 // match: (Move [0] _ _ mem) 40445 // cond: 40446 // result: mem 40447 for { 40448 if v.AuxInt != 0 { 40449 break 40450 } 40451 _ = v.Args[2] 40452 mem := v.Args[2] 40453 v.reset(OpCopy) 40454 v.Type = mem.Type 40455 v.AddArg(mem) 40456 return true 40457 } 40458 // match: (Move [1] dst src mem) 40459 // cond: 40460 // result: (MOVBstore dst (MOVBload src mem) mem) 40461 for { 40462 if v.AuxInt != 1 { 40463 break 40464 } 40465 _ = v.Args[2] 40466 dst := v.Args[0] 40467 src := v.Args[1] 40468 mem := v.Args[2] 40469 v.reset(OpAMD64MOVBstore) 40470 v.AddArg(dst) 40471 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 40472 v0.AddArg(src) 40473 v0.AddArg(mem) 40474 v.AddArg(v0) 40475 v.AddArg(mem) 40476 return true 40477 } 40478 // match: (Move [2] dst src mem) 40479 // cond: 40480 // result: (MOVWstore dst (MOVWload src mem) mem) 40481 for { 40482 if v.AuxInt != 2 { 40483 break 40484 } 40485 _ = v.Args[2] 40486 dst := v.Args[0] 40487 src := v.Args[1] 40488 mem := v.Args[2] 40489 v.reset(OpAMD64MOVWstore) 40490 v.AddArg(dst) 40491 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 40492 v0.AddArg(src) 40493 v0.AddArg(mem) 40494 v.AddArg(v0) 40495 v.AddArg(mem) 40496 return true 40497 } 40498 // match: (Move [4] dst src mem) 40499 // cond: 40500 // result: (MOVLstore dst (MOVLload src mem) mem) 40501 for { 40502 if v.AuxInt != 4 { 40503 break 40504 } 40505 _ = v.Args[2] 40506 dst := v.Args[0] 40507 src := v.Args[1] 40508 mem := v.Args[2] 40509 v.reset(OpAMD64MOVLstore) 40510 v.AddArg(dst) 40511 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40512 v0.AddArg(src) 40513 v0.AddArg(mem) 40514 v.AddArg(v0) 40515 v.AddArg(mem) 40516 return true 40517 } 40518 // match: (Move [8] dst src mem) 40519 // cond: 40520 // result: (MOVQstore dst (MOVQload src mem) mem) 40521 for { 40522 if v.AuxInt != 8 { 40523 break 40524 } 40525 _ = v.Args[2] 40526 dst := v.Args[0] 40527 src := v.Args[1] 40528 mem := v.Args[2] 40529 v.reset(OpAMD64MOVQstore) 40530 v.AddArg(dst) 40531 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40532 v0.AddArg(src) 40533 v0.AddArg(mem) 40534 v.AddArg(v0) 40535 v.AddArg(mem) 40536 return true 40537 } 40538 // match: (Move [16] dst src mem) 40539 // cond: 40540 // result: (MOVOstore dst (MOVOload src mem) mem) 40541 for { 40542 if v.AuxInt != 16 { 40543 break 40544 } 40545 _ = v.Args[2] 40546 dst := v.Args[0] 40547 src := v.Args[1] 40548 mem := v.Args[2] 40549 v.reset(OpAMD64MOVOstore) 40550 v.AddArg(dst) 40551 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 40552 v0.AddArg(src) 40553 v0.AddArg(mem) 40554 v.AddArg(v0) 40555 v.AddArg(mem) 40556 return true 40557 } 40558 // match: (Move [3] dst src mem) 40559 // cond: 40560 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 40561 for { 40562 if v.AuxInt != 3 { 40563 break 40564 } 40565 _ = v.Args[2] 40566 dst := v.Args[0] 40567 src := v.Args[1] 40568 mem := v.Args[2] 40569 v.reset(OpAMD64MOVBstore) 40570 v.AuxInt = 2 40571 v.AddArg(dst) 40572 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 40573 v0.AuxInt = 2 40574 v0.AddArg(src) 40575 v0.AddArg(mem) 40576 v.AddArg(v0) 40577 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 40578 v1.AddArg(dst) 40579 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 40580 v2.AddArg(src) 40581 v2.AddArg(mem) 40582 v1.AddArg(v2) 40583 v1.AddArg(mem) 40584 v.AddArg(v1) 40585 return true 40586 } 40587 // match: (Move [5] dst src mem) 40588 // cond: 40589 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 40590 for { 40591 if v.AuxInt != 5 { 40592 break 40593 } 40594 _ = v.Args[2] 40595 dst := v.Args[0] 40596 src := v.Args[1] 40597 mem := v.Args[2] 40598 v.reset(OpAMD64MOVBstore) 40599 v.AuxInt = 4 40600 v.AddArg(dst) 40601 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 40602 v0.AuxInt = 4 40603 v0.AddArg(src) 40604 v0.AddArg(mem) 40605 v.AddArg(v0) 40606 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 40607 v1.AddArg(dst) 40608 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40609 v2.AddArg(src) 40610 v2.AddArg(mem) 40611 v1.AddArg(v2) 40612 v1.AddArg(mem) 40613 v.AddArg(v1) 40614 return true 40615 } 40616 // match: (Move [6] dst src mem) 40617 // cond: 40618 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 40619 for { 40620 if v.AuxInt != 6 { 40621 break 40622 } 40623 _ = v.Args[2] 40624 dst := v.Args[0] 40625 src := v.Args[1] 40626 mem := v.Args[2] 40627 v.reset(OpAMD64MOVWstore) 40628 v.AuxInt = 4 40629 v.AddArg(dst) 40630 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 40631 v0.AuxInt = 4 40632 v0.AddArg(src) 40633 v0.AddArg(mem) 40634 v.AddArg(v0) 40635 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 40636 v1.AddArg(dst) 40637 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40638 v2.AddArg(src) 40639 v2.AddArg(mem) 40640 v1.AddArg(v2) 40641 v1.AddArg(mem) 40642 v.AddArg(v1) 40643 return true 40644 } 40645 // match: (Move [7] dst src mem) 40646 // cond: 40647 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 40648 for { 40649 if v.AuxInt != 7 { 40650 break 40651 } 40652 _ = v.Args[2] 40653 dst := v.Args[0] 40654 src := v.Args[1] 40655 mem := v.Args[2] 40656 v.reset(OpAMD64MOVLstore) 40657 v.AuxInt = 3 40658 v.AddArg(dst) 40659 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40660 v0.AuxInt = 3 40661 v0.AddArg(src) 40662 v0.AddArg(mem) 40663 v.AddArg(v0) 40664 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 40665 v1.AddArg(dst) 40666 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40667 v2.AddArg(src) 40668 v2.AddArg(mem) 40669 v1.AddArg(v2) 40670 v1.AddArg(mem) 40671 v.AddArg(v1) 40672 return true 40673 } 40674 return false 40675 } 40676 func rewriteValueAMD64_OpMove_10(v *Value) bool { 40677 b := v.Block 40678 _ = b 40679 config := b.Func.Config 40680 _ = config 40681 typ := &b.Func.Config.Types 40682 _ = typ 40683 // match: (Move [s] dst src mem) 40684 // cond: s > 8 && s < 16 40685 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 40686 for { 40687 s := v.AuxInt 40688 _ = v.Args[2] 40689 dst := v.Args[0] 40690 src := v.Args[1] 40691 mem := v.Args[2] 40692 if !(s > 8 && s < 16) { 40693 break 40694 } 40695 v.reset(OpAMD64MOVQstore) 40696 v.AuxInt = s - 8 40697 v.AddArg(dst) 40698 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40699 v0.AuxInt = s - 8 40700 v0.AddArg(src) 40701 v0.AddArg(mem) 40702 v.AddArg(v0) 40703 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 40704 v1.AddArg(dst) 40705 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40706 v2.AddArg(src) 40707 v2.AddArg(mem) 40708 v1.AddArg(v2) 40709 v1.AddArg(mem) 40710 v.AddArg(v1) 40711 return true 40712 } 40713 // match: (Move [s] dst src mem) 40714 // cond: s > 16 && s%16 != 0 && s%16 <= 8 40715 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 40716 for { 40717 s := v.AuxInt 40718 _ = v.Args[2] 40719 dst := v.Args[0] 40720 src := v.Args[1] 40721 mem := v.Args[2] 40722 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 40723 break 40724 } 40725 v.reset(OpMove) 40726 v.AuxInt = s - s%16 40727 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 40728 v0.AuxInt = s % 16 40729 v0.AddArg(dst) 40730 v.AddArg(v0) 40731 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 40732 v1.AuxInt = s % 16 40733 v1.AddArg(src) 40734 v.AddArg(v1) 40735 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 40736 v2.AddArg(dst) 40737 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40738 v3.AddArg(src) 40739 v3.AddArg(mem) 40740 v2.AddArg(v3) 40741 v2.AddArg(mem) 40742 v.AddArg(v2) 40743 return true 40744 } 40745 // match: (Move [s] dst src mem) 40746 // cond: s > 16 && s%16 != 0 && s%16 > 8 40747 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 40748 for { 40749 s := v.AuxInt 40750 _ = v.Args[2] 40751 dst := v.Args[0] 40752 src := v.Args[1] 40753 mem := v.Args[2] 40754 if !(s > 16 && s%16 != 0 && s%16 > 8) { 40755 break 40756 } 40757 v.reset(OpMove) 40758 v.AuxInt = s - s%16 40759 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 40760 v0.AuxInt = s % 16 40761 v0.AddArg(dst) 40762 v.AddArg(v0) 40763 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 40764 v1.AuxInt = s % 16 40765 v1.AddArg(src) 40766 v.AddArg(v1) 40767 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 40768 v2.AddArg(dst) 40769 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 40770 v3.AddArg(src) 40771 v3.AddArg(mem) 40772 v2.AddArg(v3) 40773 v2.AddArg(mem) 40774 v.AddArg(v2) 40775 return true 40776 } 40777 // match: (Move [s] dst src mem) 40778 // cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 40779 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 40780 for { 40781 s := v.AuxInt 40782 _ = v.Args[2] 40783 dst := v.Args[0] 40784 src := v.Args[1] 40785 mem := v.Args[2] 40786 if !(s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 40787 break 40788 } 40789 v.reset(OpAMD64DUFFCOPY) 40790 v.AuxInt = 14 * (64 - s/16) 40791 v.AddArg(dst) 40792 v.AddArg(src) 40793 v.AddArg(mem) 40794 return true 40795 } 40796 // match: (Move [s] dst src mem) 40797 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 40798 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 40799 for { 40800 s := v.AuxInt 40801 _ = v.Args[2] 40802 dst := v.Args[0] 40803 src := v.Args[1] 40804 mem := v.Args[2] 40805 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 40806 break 40807 } 40808 v.reset(OpAMD64REPMOVSQ) 40809 v.AddArg(dst) 40810 v.AddArg(src) 40811 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 40812 v0.AuxInt = s / 8 40813 v.AddArg(v0) 40814 v.AddArg(mem) 40815 return true 40816 } 40817 return false 40818 } 40819 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 40820 // match: (Mul16 x y) 40821 // cond: 40822 // result: (MULL x y) 40823 for { 40824 _ = v.Args[1] 40825 x := v.Args[0] 40826 y := v.Args[1] 40827 v.reset(OpAMD64MULL) 40828 v.AddArg(x) 40829 v.AddArg(y) 40830 return true 40831 } 40832 } 40833 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 40834 // match: (Mul32 x y) 40835 // cond: 40836 // result: (MULL x y) 40837 for { 40838 _ = v.Args[1] 40839 x := v.Args[0] 40840 y := v.Args[1] 40841 v.reset(OpAMD64MULL) 40842 v.AddArg(x) 40843 v.AddArg(y) 40844 return true 40845 } 40846 } 40847 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 40848 // match: (Mul32F x y) 40849 // cond: 40850 // result: (MULSS x y) 40851 for { 40852 _ = v.Args[1] 40853 x := v.Args[0] 40854 y := v.Args[1] 40855 v.reset(OpAMD64MULSS) 40856 v.AddArg(x) 40857 v.AddArg(y) 40858 return true 40859 } 40860 } 40861 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 40862 // match: (Mul64 x y) 40863 // cond: 40864 // result: (MULQ x y) 40865 for { 40866 _ = v.Args[1] 40867 x := v.Args[0] 40868 y := v.Args[1] 40869 v.reset(OpAMD64MULQ) 40870 v.AddArg(x) 40871 v.AddArg(y) 40872 return true 40873 } 40874 } 40875 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 40876 // match: (Mul64F x y) 40877 // cond: 40878 // result: (MULSD x y) 40879 for { 40880 _ = v.Args[1] 40881 x := v.Args[0] 40882 y := v.Args[1] 40883 v.reset(OpAMD64MULSD) 40884 v.AddArg(x) 40885 v.AddArg(y) 40886 return true 40887 } 40888 } 40889 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 40890 // match: (Mul64uhilo x y) 40891 // cond: 40892 // result: (MULQU2 x y) 40893 for { 40894 _ = v.Args[1] 40895 x := v.Args[0] 40896 y := v.Args[1] 40897 v.reset(OpAMD64MULQU2) 40898 v.AddArg(x) 40899 v.AddArg(y) 40900 return true 40901 } 40902 } 40903 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 40904 // match: (Mul8 x y) 40905 // cond: 40906 // result: (MULL x y) 40907 for { 40908 _ = v.Args[1] 40909 x := v.Args[0] 40910 y := v.Args[1] 40911 v.reset(OpAMD64MULL) 40912 v.AddArg(x) 40913 v.AddArg(y) 40914 return true 40915 } 40916 } 40917 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 40918 // match: (Neg16 x) 40919 // cond: 40920 // result: (NEGL x) 40921 for { 40922 x := v.Args[0] 40923 v.reset(OpAMD64NEGL) 40924 v.AddArg(x) 40925 return true 40926 } 40927 } 40928 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 40929 // match: (Neg32 x) 40930 // cond: 40931 // result: (NEGL x) 40932 for { 40933 x := v.Args[0] 40934 v.reset(OpAMD64NEGL) 40935 v.AddArg(x) 40936 return true 40937 } 40938 } 40939 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 40940 b := v.Block 40941 _ = b 40942 typ := &b.Func.Config.Types 40943 _ = typ 40944 // match: (Neg32F x) 40945 // cond: 40946 // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))])) 40947 for { 40948 x := v.Args[0] 40949 v.reset(OpAMD64PXOR) 40950 v.AddArg(x) 40951 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 40952 v0.AuxInt = f2i(math.Copysign(0, -1)) 40953 v.AddArg(v0) 40954 return true 40955 } 40956 } 40957 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 40958 // match: (Neg64 x) 40959 // cond: 40960 // result: (NEGQ x) 40961 for { 40962 x := v.Args[0] 40963 v.reset(OpAMD64NEGQ) 40964 v.AddArg(x) 40965 return true 40966 } 40967 } 40968 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 40969 b := v.Block 40970 _ = b 40971 typ := &b.Func.Config.Types 40972 _ = typ 40973 // match: (Neg64F x) 40974 // cond: 40975 // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))])) 40976 for { 40977 x := v.Args[0] 40978 v.reset(OpAMD64PXOR) 40979 v.AddArg(x) 40980 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 40981 v0.AuxInt = f2i(math.Copysign(0, -1)) 40982 v.AddArg(v0) 40983 return true 40984 } 40985 } 40986 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 40987 // match: (Neg8 x) 40988 // cond: 40989 // result: (NEGL x) 40990 for { 40991 x := v.Args[0] 40992 v.reset(OpAMD64NEGL) 40993 v.AddArg(x) 40994 return true 40995 } 40996 } 40997 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 40998 b := v.Block 40999 _ = b 41000 // match: (Neq16 x y) 41001 // cond: 41002 // result: (SETNE (CMPW x y)) 41003 for { 41004 _ = v.Args[1] 41005 x := v.Args[0] 41006 y := v.Args[1] 41007 v.reset(OpAMD64SETNE) 41008 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 41009 v0.AddArg(x) 41010 v0.AddArg(y) 41011 v.AddArg(v0) 41012 return true 41013 } 41014 } 41015 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 41016 b := v.Block 41017 _ = b 41018 // match: (Neq32 x y) 41019 // cond: 41020 // result: (SETNE (CMPL x y)) 41021 for { 41022 _ = v.Args[1] 41023 x := v.Args[0] 41024 y := v.Args[1] 41025 v.reset(OpAMD64SETNE) 41026 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41027 v0.AddArg(x) 41028 v0.AddArg(y) 41029 v.AddArg(v0) 41030 return true 41031 } 41032 } 41033 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 41034 b := v.Block 41035 _ = b 41036 // match: (Neq32F x y) 41037 // cond: 41038 // result: (SETNEF (UCOMISS x y)) 41039 for { 41040 _ = v.Args[1] 41041 x := v.Args[0] 41042 y := v.Args[1] 41043 v.reset(OpAMD64SETNEF) 41044 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 41045 v0.AddArg(x) 41046 v0.AddArg(y) 41047 v.AddArg(v0) 41048 return true 41049 } 41050 } 41051 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 41052 b := v.Block 41053 _ = b 41054 // match: (Neq64 x y) 41055 // cond: 41056 // result: (SETNE (CMPQ x y)) 41057 for { 41058 _ = v.Args[1] 41059 x := v.Args[0] 41060 y := v.Args[1] 41061 v.reset(OpAMD64SETNE) 41062 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41063 v0.AddArg(x) 41064 v0.AddArg(y) 41065 v.AddArg(v0) 41066 return true 41067 } 41068 } 41069 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 41070 b := v.Block 41071 _ = b 41072 // match: (Neq64F x y) 41073 // cond: 41074 // result: (SETNEF (UCOMISD x y)) 41075 for { 41076 _ = v.Args[1] 41077 x := v.Args[0] 41078 y := v.Args[1] 41079 v.reset(OpAMD64SETNEF) 41080 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 41081 v0.AddArg(x) 41082 v0.AddArg(y) 41083 v.AddArg(v0) 41084 return true 41085 } 41086 } 41087 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 41088 b := v.Block 41089 _ = b 41090 // match: (Neq8 x y) 41091 // cond: 41092 // result: (SETNE (CMPB x y)) 41093 for { 41094 _ = v.Args[1] 41095 x := v.Args[0] 41096 y := v.Args[1] 41097 v.reset(OpAMD64SETNE) 41098 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41099 v0.AddArg(x) 41100 v0.AddArg(y) 41101 v.AddArg(v0) 41102 return true 41103 } 41104 } 41105 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 41106 b := v.Block 41107 _ = b 41108 // match: (NeqB x y) 41109 // cond: 41110 // result: (SETNE (CMPB x y)) 41111 for { 41112 _ = v.Args[1] 41113 x := v.Args[0] 41114 y := v.Args[1] 41115 v.reset(OpAMD64SETNE) 41116 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41117 v0.AddArg(x) 41118 v0.AddArg(y) 41119 v.AddArg(v0) 41120 return true 41121 } 41122 } 41123 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 41124 b := v.Block 41125 _ = b 41126 config := b.Func.Config 41127 _ = config 41128 // match: (NeqPtr x y) 41129 // cond: config.PtrSize == 8 41130 // result: (SETNE (CMPQ x y)) 41131 for { 41132 _ = v.Args[1] 41133 x := v.Args[0] 41134 y := v.Args[1] 41135 if !(config.PtrSize == 8) { 41136 break 41137 } 41138 v.reset(OpAMD64SETNE) 41139 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41140 v0.AddArg(x) 41141 v0.AddArg(y) 41142 v.AddArg(v0) 41143 return true 41144 } 41145 // match: (NeqPtr x y) 41146 // cond: config.PtrSize == 4 41147 // result: (SETNE (CMPL x y)) 41148 for { 41149 _ = v.Args[1] 41150 x := v.Args[0] 41151 y := v.Args[1] 41152 if !(config.PtrSize == 4) { 41153 break 41154 } 41155 v.reset(OpAMD64SETNE) 41156 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41157 v0.AddArg(x) 41158 v0.AddArg(y) 41159 v.AddArg(v0) 41160 return true 41161 } 41162 return false 41163 } 41164 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 41165 // match: (NilCheck ptr mem) 41166 // cond: 41167 // result: (LoweredNilCheck ptr mem) 41168 for { 41169 _ = v.Args[1] 41170 ptr := v.Args[0] 41171 mem := v.Args[1] 41172 v.reset(OpAMD64LoweredNilCheck) 41173 v.AddArg(ptr) 41174 v.AddArg(mem) 41175 return true 41176 } 41177 } 41178 func rewriteValueAMD64_OpNot_0(v *Value) bool { 41179 // match: (Not x) 41180 // cond: 41181 // result: (XORLconst [1] x) 41182 for { 41183 x := v.Args[0] 41184 v.reset(OpAMD64XORLconst) 41185 v.AuxInt = 1 41186 v.AddArg(x) 41187 return true 41188 } 41189 } 41190 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 41191 b := v.Block 41192 _ = b 41193 config := b.Func.Config 41194 _ = config 41195 typ := &b.Func.Config.Types 41196 _ = typ 41197 // match: (OffPtr [off] ptr) 41198 // cond: config.PtrSize == 8 && is32Bit(off) 41199 // result: (ADDQconst [off] ptr) 41200 for { 41201 off := v.AuxInt 41202 ptr := v.Args[0] 41203 if !(config.PtrSize == 8 && is32Bit(off)) { 41204 break 41205 } 41206 v.reset(OpAMD64ADDQconst) 41207 v.AuxInt = off 41208 v.AddArg(ptr) 41209 return true 41210 } 41211 // match: (OffPtr [off] ptr) 41212 // cond: config.PtrSize == 8 41213 // result: (ADDQ (MOVQconst [off]) ptr) 41214 for { 41215 off := v.AuxInt 41216 ptr := v.Args[0] 41217 if !(config.PtrSize == 8) { 41218 break 41219 } 41220 v.reset(OpAMD64ADDQ) 41221 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 41222 v0.AuxInt = off 41223 v.AddArg(v0) 41224 v.AddArg(ptr) 41225 return true 41226 } 41227 // match: (OffPtr [off] ptr) 41228 // cond: config.PtrSize == 4 41229 // result: (ADDLconst [off] ptr) 41230 for { 41231 off := v.AuxInt 41232 ptr := v.Args[0] 41233 if !(config.PtrSize == 4) { 41234 break 41235 } 41236 v.reset(OpAMD64ADDLconst) 41237 v.AuxInt = off 41238 v.AddArg(ptr) 41239 return true 41240 } 41241 return false 41242 } 41243 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 41244 // match: (Or16 x y) 41245 // cond: 41246 // result: (ORL x y) 41247 for { 41248 _ = v.Args[1] 41249 x := v.Args[0] 41250 y := v.Args[1] 41251 v.reset(OpAMD64ORL) 41252 v.AddArg(x) 41253 v.AddArg(y) 41254 return true 41255 } 41256 } 41257 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 41258 // match: (Or32 x y) 41259 // cond: 41260 // result: (ORL x y) 41261 for { 41262 _ = v.Args[1] 41263 x := v.Args[0] 41264 y := v.Args[1] 41265 v.reset(OpAMD64ORL) 41266 v.AddArg(x) 41267 v.AddArg(y) 41268 return true 41269 } 41270 } 41271 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 41272 // match: (Or64 x y) 41273 // cond: 41274 // result: (ORQ x y) 41275 for { 41276 _ = v.Args[1] 41277 x := v.Args[0] 41278 y := v.Args[1] 41279 v.reset(OpAMD64ORQ) 41280 v.AddArg(x) 41281 v.AddArg(y) 41282 return true 41283 } 41284 } 41285 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 41286 // match: (Or8 x y) 41287 // cond: 41288 // result: (ORL x y) 41289 for { 41290 _ = v.Args[1] 41291 x := v.Args[0] 41292 y := v.Args[1] 41293 v.reset(OpAMD64ORL) 41294 v.AddArg(x) 41295 v.AddArg(y) 41296 return true 41297 } 41298 } 41299 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 41300 // match: (OrB x y) 41301 // cond: 41302 // result: (ORL x y) 41303 for { 41304 _ = v.Args[1] 41305 x := v.Args[0] 41306 y := v.Args[1] 41307 v.reset(OpAMD64ORL) 41308 v.AddArg(x) 41309 v.AddArg(y) 41310 return true 41311 } 41312 } 41313 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 41314 b := v.Block 41315 _ = b 41316 typ := &b.Func.Config.Types 41317 _ = typ 41318 // match: (PopCount16 x) 41319 // cond: 41320 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 41321 for { 41322 x := v.Args[0] 41323 v.reset(OpAMD64POPCNTL) 41324 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 41325 v0.AddArg(x) 41326 v.AddArg(v0) 41327 return true 41328 } 41329 } 41330 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 41331 // match: (PopCount32 x) 41332 // cond: 41333 // result: (POPCNTL x) 41334 for { 41335 x := v.Args[0] 41336 v.reset(OpAMD64POPCNTL) 41337 v.AddArg(x) 41338 return true 41339 } 41340 } 41341 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 41342 // match: (PopCount64 x) 41343 // cond: 41344 // result: (POPCNTQ x) 41345 for { 41346 x := v.Args[0] 41347 v.reset(OpAMD64POPCNTQ) 41348 v.AddArg(x) 41349 return true 41350 } 41351 } 41352 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 41353 b := v.Block 41354 _ = b 41355 typ := &b.Func.Config.Types 41356 _ = typ 41357 // match: (PopCount8 x) 41358 // cond: 41359 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 41360 for { 41361 x := v.Args[0] 41362 v.reset(OpAMD64POPCNTL) 41363 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 41364 v0.AddArg(x) 41365 v.AddArg(v0) 41366 return true 41367 } 41368 } 41369 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 41370 // match: (Round32F x) 41371 // cond: 41372 // result: x 41373 for { 41374 x := v.Args[0] 41375 v.reset(OpCopy) 41376 v.Type = x.Type 41377 v.AddArg(x) 41378 return true 41379 } 41380 } 41381 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 41382 // match: (Round64F x) 41383 // cond: 41384 // result: x 41385 for { 41386 x := v.Args[0] 41387 v.reset(OpCopy) 41388 v.Type = x.Type 41389 v.AddArg(x) 41390 return true 41391 } 41392 } 41393 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 41394 b := v.Block 41395 _ = b 41396 // match: (Rsh16Ux16 <t> x y) 41397 // cond: 41398 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 41399 for { 41400 t := v.Type 41401 _ = v.Args[1] 41402 x := v.Args[0] 41403 y := v.Args[1] 41404 v.reset(OpAMD64ANDL) 41405 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41406 v0.AddArg(x) 41407 v0.AddArg(y) 41408 v.AddArg(v0) 41409 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41410 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41411 v2.AuxInt = 16 41412 v2.AddArg(y) 41413 v1.AddArg(v2) 41414 v.AddArg(v1) 41415 return true 41416 } 41417 } 41418 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 41419 b := v.Block 41420 _ = b 41421 // match: (Rsh16Ux32 <t> x y) 41422 // cond: 41423 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 41424 for { 41425 t := v.Type 41426 _ = v.Args[1] 41427 x := v.Args[0] 41428 y := v.Args[1] 41429 v.reset(OpAMD64ANDL) 41430 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41431 v0.AddArg(x) 41432 v0.AddArg(y) 41433 v.AddArg(v0) 41434 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41435 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41436 v2.AuxInt = 16 41437 v2.AddArg(y) 41438 v1.AddArg(v2) 41439 v.AddArg(v1) 41440 return true 41441 } 41442 } 41443 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 41444 b := v.Block 41445 _ = b 41446 // match: (Rsh16Ux64 <t> x y) 41447 // cond: 41448 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 41449 for { 41450 t := v.Type 41451 _ = v.Args[1] 41452 x := v.Args[0] 41453 y := v.Args[1] 41454 v.reset(OpAMD64ANDL) 41455 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41456 v0.AddArg(x) 41457 v0.AddArg(y) 41458 v.AddArg(v0) 41459 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41460 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41461 v2.AuxInt = 16 41462 v2.AddArg(y) 41463 v1.AddArg(v2) 41464 v.AddArg(v1) 41465 return true 41466 } 41467 } 41468 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 41469 b := v.Block 41470 _ = b 41471 // match: (Rsh16Ux8 <t> x y) 41472 // cond: 41473 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 41474 for { 41475 t := v.Type 41476 _ = v.Args[1] 41477 x := v.Args[0] 41478 y := v.Args[1] 41479 v.reset(OpAMD64ANDL) 41480 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41481 v0.AddArg(x) 41482 v0.AddArg(y) 41483 v.AddArg(v0) 41484 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41485 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41486 v2.AuxInt = 16 41487 v2.AddArg(y) 41488 v1.AddArg(v2) 41489 v.AddArg(v1) 41490 return true 41491 } 41492 } 41493 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 41494 b := v.Block 41495 _ = b 41496 // match: (Rsh16x16 <t> x y) 41497 // cond: 41498 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 41499 for { 41500 t := v.Type 41501 _ = v.Args[1] 41502 x := v.Args[0] 41503 y := v.Args[1] 41504 v.reset(OpAMD64SARW) 41505 v.Type = t 41506 v.AddArg(x) 41507 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41508 v0.AddArg(y) 41509 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41510 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41511 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41512 v3.AuxInt = 16 41513 v3.AddArg(y) 41514 v2.AddArg(v3) 41515 v1.AddArg(v2) 41516 v0.AddArg(v1) 41517 v.AddArg(v0) 41518 return true 41519 } 41520 } 41521 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 41522 b := v.Block 41523 _ = b 41524 // match: (Rsh16x32 <t> x y) 41525 // cond: 41526 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 41527 for { 41528 t := v.Type 41529 _ = v.Args[1] 41530 x := v.Args[0] 41531 y := v.Args[1] 41532 v.reset(OpAMD64SARW) 41533 v.Type = t 41534 v.AddArg(x) 41535 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41536 v0.AddArg(y) 41537 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41538 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41539 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41540 v3.AuxInt = 16 41541 v3.AddArg(y) 41542 v2.AddArg(v3) 41543 v1.AddArg(v2) 41544 v0.AddArg(v1) 41545 v.AddArg(v0) 41546 return true 41547 } 41548 } 41549 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 41550 b := v.Block 41551 _ = b 41552 // match: (Rsh16x64 <t> x y) 41553 // cond: 41554 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 41555 for { 41556 t := v.Type 41557 _ = v.Args[1] 41558 x := v.Args[0] 41559 y := v.Args[1] 41560 v.reset(OpAMD64SARW) 41561 v.Type = t 41562 v.AddArg(x) 41563 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 41564 v0.AddArg(y) 41565 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 41566 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 41567 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41568 v3.AuxInt = 16 41569 v3.AddArg(y) 41570 v2.AddArg(v3) 41571 v1.AddArg(v2) 41572 v0.AddArg(v1) 41573 v.AddArg(v0) 41574 return true 41575 } 41576 } 41577 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 41578 b := v.Block 41579 _ = b 41580 // match: (Rsh16x8 <t> x y) 41581 // cond: 41582 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 41583 for { 41584 t := v.Type 41585 _ = v.Args[1] 41586 x := v.Args[0] 41587 y := v.Args[1] 41588 v.reset(OpAMD64SARW) 41589 v.Type = t 41590 v.AddArg(x) 41591 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41592 v0.AddArg(y) 41593 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41594 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41595 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41596 v3.AuxInt = 16 41597 v3.AddArg(y) 41598 v2.AddArg(v3) 41599 v1.AddArg(v2) 41600 v0.AddArg(v1) 41601 v.AddArg(v0) 41602 return true 41603 } 41604 } 41605 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 41606 b := v.Block 41607 _ = b 41608 // match: (Rsh32Ux16 <t> x y) 41609 // cond: 41610 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 41611 for { 41612 t := v.Type 41613 _ = v.Args[1] 41614 x := v.Args[0] 41615 y := v.Args[1] 41616 v.reset(OpAMD64ANDL) 41617 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41618 v0.AddArg(x) 41619 v0.AddArg(y) 41620 v.AddArg(v0) 41621 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41622 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41623 v2.AuxInt = 32 41624 v2.AddArg(y) 41625 v1.AddArg(v2) 41626 v.AddArg(v1) 41627 return true 41628 } 41629 } 41630 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 41631 b := v.Block 41632 _ = b 41633 // match: (Rsh32Ux32 <t> x y) 41634 // cond: 41635 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 41636 for { 41637 t := v.Type 41638 _ = v.Args[1] 41639 x := v.Args[0] 41640 y := v.Args[1] 41641 v.reset(OpAMD64ANDL) 41642 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41643 v0.AddArg(x) 41644 v0.AddArg(y) 41645 v.AddArg(v0) 41646 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41647 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41648 v2.AuxInt = 32 41649 v2.AddArg(y) 41650 v1.AddArg(v2) 41651 v.AddArg(v1) 41652 return true 41653 } 41654 } 41655 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 41656 b := v.Block 41657 _ = b 41658 // match: (Rsh32Ux64 <t> x y) 41659 // cond: 41660 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 41661 for { 41662 t := v.Type 41663 _ = v.Args[1] 41664 x := v.Args[0] 41665 y := v.Args[1] 41666 v.reset(OpAMD64ANDL) 41667 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41668 v0.AddArg(x) 41669 v0.AddArg(y) 41670 v.AddArg(v0) 41671 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41672 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41673 v2.AuxInt = 32 41674 v2.AddArg(y) 41675 v1.AddArg(v2) 41676 v.AddArg(v1) 41677 return true 41678 } 41679 } 41680 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 41681 b := v.Block 41682 _ = b 41683 // match: (Rsh32Ux8 <t> x y) 41684 // cond: 41685 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 41686 for { 41687 t := v.Type 41688 _ = v.Args[1] 41689 x := v.Args[0] 41690 y := v.Args[1] 41691 v.reset(OpAMD64ANDL) 41692 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41693 v0.AddArg(x) 41694 v0.AddArg(y) 41695 v.AddArg(v0) 41696 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41697 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41698 v2.AuxInt = 32 41699 v2.AddArg(y) 41700 v1.AddArg(v2) 41701 v.AddArg(v1) 41702 return true 41703 } 41704 } 41705 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 41706 b := v.Block 41707 _ = b 41708 // match: (Rsh32x16 <t> x y) 41709 // cond: 41710 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 41711 for { 41712 t := v.Type 41713 _ = v.Args[1] 41714 x := v.Args[0] 41715 y := v.Args[1] 41716 v.reset(OpAMD64SARL) 41717 v.Type = t 41718 v.AddArg(x) 41719 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41720 v0.AddArg(y) 41721 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41722 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41723 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41724 v3.AuxInt = 32 41725 v3.AddArg(y) 41726 v2.AddArg(v3) 41727 v1.AddArg(v2) 41728 v0.AddArg(v1) 41729 v.AddArg(v0) 41730 return true 41731 } 41732 } 41733 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 41734 b := v.Block 41735 _ = b 41736 // match: (Rsh32x32 <t> x y) 41737 // cond: 41738 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 41739 for { 41740 t := v.Type 41741 _ = v.Args[1] 41742 x := v.Args[0] 41743 y := v.Args[1] 41744 v.reset(OpAMD64SARL) 41745 v.Type = t 41746 v.AddArg(x) 41747 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41748 v0.AddArg(y) 41749 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41750 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41751 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41752 v3.AuxInt = 32 41753 v3.AddArg(y) 41754 v2.AddArg(v3) 41755 v1.AddArg(v2) 41756 v0.AddArg(v1) 41757 v.AddArg(v0) 41758 return true 41759 } 41760 } 41761 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 41762 b := v.Block 41763 _ = b 41764 // match: (Rsh32x64 <t> x y) 41765 // cond: 41766 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 41767 for { 41768 t := v.Type 41769 _ = v.Args[1] 41770 x := v.Args[0] 41771 y := v.Args[1] 41772 v.reset(OpAMD64SARL) 41773 v.Type = t 41774 v.AddArg(x) 41775 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 41776 v0.AddArg(y) 41777 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 41778 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 41779 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41780 v3.AuxInt = 32 41781 v3.AddArg(y) 41782 v2.AddArg(v3) 41783 v1.AddArg(v2) 41784 v0.AddArg(v1) 41785 v.AddArg(v0) 41786 return true 41787 } 41788 } 41789 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 41790 b := v.Block 41791 _ = b 41792 // match: (Rsh32x8 <t> x y) 41793 // cond: 41794 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 41795 for { 41796 t := v.Type 41797 _ = v.Args[1] 41798 x := v.Args[0] 41799 y := v.Args[1] 41800 v.reset(OpAMD64SARL) 41801 v.Type = t 41802 v.AddArg(x) 41803 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41804 v0.AddArg(y) 41805 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41806 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41807 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41808 v3.AuxInt = 32 41809 v3.AddArg(y) 41810 v2.AddArg(v3) 41811 v1.AddArg(v2) 41812 v0.AddArg(v1) 41813 v.AddArg(v0) 41814 return true 41815 } 41816 } 41817 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 41818 b := v.Block 41819 _ = b 41820 // match: (Rsh64Ux16 <t> x y) 41821 // cond: 41822 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 41823 for { 41824 t := v.Type 41825 _ = v.Args[1] 41826 x := v.Args[0] 41827 y := v.Args[1] 41828 v.reset(OpAMD64ANDQ) 41829 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 41830 v0.AddArg(x) 41831 v0.AddArg(y) 41832 v.AddArg(v0) 41833 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41834 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41835 v2.AuxInt = 64 41836 v2.AddArg(y) 41837 v1.AddArg(v2) 41838 v.AddArg(v1) 41839 return true 41840 } 41841 } 41842 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 41843 b := v.Block 41844 _ = b 41845 // match: (Rsh64Ux32 <t> x y) 41846 // cond: 41847 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 41848 for { 41849 t := v.Type 41850 _ = v.Args[1] 41851 x := v.Args[0] 41852 y := v.Args[1] 41853 v.reset(OpAMD64ANDQ) 41854 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 41855 v0.AddArg(x) 41856 v0.AddArg(y) 41857 v.AddArg(v0) 41858 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41859 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41860 v2.AuxInt = 64 41861 v2.AddArg(y) 41862 v1.AddArg(v2) 41863 v.AddArg(v1) 41864 return true 41865 } 41866 } 41867 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 41868 b := v.Block 41869 _ = b 41870 // match: (Rsh64Ux64 <t> x y) 41871 // cond: 41872 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 41873 for { 41874 t := v.Type 41875 _ = v.Args[1] 41876 x := v.Args[0] 41877 y := v.Args[1] 41878 v.reset(OpAMD64ANDQ) 41879 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 41880 v0.AddArg(x) 41881 v0.AddArg(y) 41882 v.AddArg(v0) 41883 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41884 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41885 v2.AuxInt = 64 41886 v2.AddArg(y) 41887 v1.AddArg(v2) 41888 v.AddArg(v1) 41889 return true 41890 } 41891 } 41892 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 41893 b := v.Block 41894 _ = b 41895 // match: (Rsh64Ux8 <t> x y) 41896 // cond: 41897 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 41898 for { 41899 t := v.Type 41900 _ = v.Args[1] 41901 x := v.Args[0] 41902 y := v.Args[1] 41903 v.reset(OpAMD64ANDQ) 41904 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 41905 v0.AddArg(x) 41906 v0.AddArg(y) 41907 v.AddArg(v0) 41908 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41909 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41910 v2.AuxInt = 64 41911 v2.AddArg(y) 41912 v1.AddArg(v2) 41913 v.AddArg(v1) 41914 return true 41915 } 41916 } 41917 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 41918 b := v.Block 41919 _ = b 41920 // match: (Rsh64x16 <t> x y) 41921 // cond: 41922 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 41923 for { 41924 t := v.Type 41925 _ = v.Args[1] 41926 x := v.Args[0] 41927 y := v.Args[1] 41928 v.reset(OpAMD64SARQ) 41929 v.Type = t 41930 v.AddArg(x) 41931 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41932 v0.AddArg(y) 41933 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41934 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41935 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41936 v3.AuxInt = 64 41937 v3.AddArg(y) 41938 v2.AddArg(v3) 41939 v1.AddArg(v2) 41940 v0.AddArg(v1) 41941 v.AddArg(v0) 41942 return true 41943 } 41944 } 41945 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 41946 b := v.Block 41947 _ = b 41948 // match: (Rsh64x32 <t> x y) 41949 // cond: 41950 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 41951 for { 41952 t := v.Type 41953 _ = v.Args[1] 41954 x := v.Args[0] 41955 y := v.Args[1] 41956 v.reset(OpAMD64SARQ) 41957 v.Type = t 41958 v.AddArg(x) 41959 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41960 v0.AddArg(y) 41961 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41962 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41963 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41964 v3.AuxInt = 64 41965 v3.AddArg(y) 41966 v2.AddArg(v3) 41967 v1.AddArg(v2) 41968 v0.AddArg(v1) 41969 v.AddArg(v0) 41970 return true 41971 } 41972 } 41973 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 41974 b := v.Block 41975 _ = b 41976 // match: (Rsh64x64 <t> x y) 41977 // cond: 41978 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 41979 for { 41980 t := v.Type 41981 _ = v.Args[1] 41982 x := v.Args[0] 41983 y := v.Args[1] 41984 v.reset(OpAMD64SARQ) 41985 v.Type = t 41986 v.AddArg(x) 41987 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 41988 v0.AddArg(y) 41989 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 41990 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 41991 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41992 v3.AuxInt = 64 41993 v3.AddArg(y) 41994 v2.AddArg(v3) 41995 v1.AddArg(v2) 41996 v0.AddArg(v1) 41997 v.AddArg(v0) 41998 return true 41999 } 42000 } 42001 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 42002 b := v.Block 42003 _ = b 42004 // match: (Rsh64x8 <t> x y) 42005 // cond: 42006 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 42007 for { 42008 t := v.Type 42009 _ = v.Args[1] 42010 x := v.Args[0] 42011 y := v.Args[1] 42012 v.reset(OpAMD64SARQ) 42013 v.Type = t 42014 v.AddArg(x) 42015 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42016 v0.AddArg(y) 42017 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42018 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42019 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42020 v3.AuxInt = 64 42021 v3.AddArg(y) 42022 v2.AddArg(v3) 42023 v1.AddArg(v2) 42024 v0.AddArg(v1) 42025 v.AddArg(v0) 42026 return true 42027 } 42028 } 42029 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 42030 b := v.Block 42031 _ = b 42032 // match: (Rsh8Ux16 <t> x y) 42033 // cond: 42034 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 42035 for { 42036 t := v.Type 42037 _ = v.Args[1] 42038 x := v.Args[0] 42039 y := v.Args[1] 42040 v.reset(OpAMD64ANDL) 42041 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 42042 v0.AddArg(x) 42043 v0.AddArg(y) 42044 v.AddArg(v0) 42045 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42046 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 42047 v2.AuxInt = 8 42048 v2.AddArg(y) 42049 v1.AddArg(v2) 42050 v.AddArg(v1) 42051 return true 42052 } 42053 } 42054 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 42055 b := v.Block 42056 _ = b 42057 // match: (Rsh8Ux32 <t> x y) 42058 // cond: 42059 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 42060 for { 42061 t := v.Type 42062 _ = v.Args[1] 42063 x := v.Args[0] 42064 y := v.Args[1] 42065 v.reset(OpAMD64ANDL) 42066 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 42067 v0.AddArg(x) 42068 v0.AddArg(y) 42069 v.AddArg(v0) 42070 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42071 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 42072 v2.AuxInt = 8 42073 v2.AddArg(y) 42074 v1.AddArg(v2) 42075 v.AddArg(v1) 42076 return true 42077 } 42078 } 42079 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 42080 b := v.Block 42081 _ = b 42082 // match: (Rsh8Ux64 <t> x y) 42083 // cond: 42084 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 42085 for { 42086 t := v.Type 42087 _ = v.Args[1] 42088 x := v.Args[0] 42089 y := v.Args[1] 42090 v.reset(OpAMD64ANDL) 42091 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 42092 v0.AddArg(x) 42093 v0.AddArg(y) 42094 v.AddArg(v0) 42095 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42096 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 42097 v2.AuxInt = 8 42098 v2.AddArg(y) 42099 v1.AddArg(v2) 42100 v.AddArg(v1) 42101 return true 42102 } 42103 } 42104 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 42105 b := v.Block 42106 _ = b 42107 // match: (Rsh8Ux8 <t> x y) 42108 // cond: 42109 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 42110 for { 42111 t := v.Type 42112 _ = v.Args[1] 42113 x := v.Args[0] 42114 y := v.Args[1] 42115 v.reset(OpAMD64ANDL) 42116 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 42117 v0.AddArg(x) 42118 v0.AddArg(y) 42119 v.AddArg(v0) 42120 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42121 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42122 v2.AuxInt = 8 42123 v2.AddArg(y) 42124 v1.AddArg(v2) 42125 v.AddArg(v1) 42126 return true 42127 } 42128 } 42129 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 42130 b := v.Block 42131 _ = b 42132 // match: (Rsh8x16 <t> x y) 42133 // cond: 42134 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 42135 for { 42136 t := v.Type 42137 _ = v.Args[1] 42138 x := v.Args[0] 42139 y := v.Args[1] 42140 v.reset(OpAMD64SARB) 42141 v.Type = t 42142 v.AddArg(x) 42143 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42144 v0.AddArg(y) 42145 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42146 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42147 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 42148 v3.AuxInt = 8 42149 v3.AddArg(y) 42150 v2.AddArg(v3) 42151 v1.AddArg(v2) 42152 v0.AddArg(v1) 42153 v.AddArg(v0) 42154 return true 42155 } 42156 } 42157 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 42158 b := v.Block 42159 _ = b 42160 // match: (Rsh8x32 <t> x y) 42161 // cond: 42162 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 42163 for { 42164 t := v.Type 42165 _ = v.Args[1] 42166 x := v.Args[0] 42167 y := v.Args[1] 42168 v.reset(OpAMD64SARB) 42169 v.Type = t 42170 v.AddArg(x) 42171 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42172 v0.AddArg(y) 42173 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42174 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42175 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 42176 v3.AuxInt = 8 42177 v3.AddArg(y) 42178 v2.AddArg(v3) 42179 v1.AddArg(v2) 42180 v0.AddArg(v1) 42181 v.AddArg(v0) 42182 return true 42183 } 42184 } 42185 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 42186 b := v.Block 42187 _ = b 42188 // match: (Rsh8x64 <t> x y) 42189 // cond: 42190 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 42191 for { 42192 t := v.Type 42193 _ = v.Args[1] 42194 x := v.Args[0] 42195 y := v.Args[1] 42196 v.reset(OpAMD64SARB) 42197 v.Type = t 42198 v.AddArg(x) 42199 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 42200 v0.AddArg(y) 42201 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 42202 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 42203 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 42204 v3.AuxInt = 8 42205 v3.AddArg(y) 42206 v2.AddArg(v3) 42207 v1.AddArg(v2) 42208 v0.AddArg(v1) 42209 v.AddArg(v0) 42210 return true 42211 } 42212 } 42213 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 42214 b := v.Block 42215 _ = b 42216 // match: (Rsh8x8 <t> x y) 42217 // cond: 42218 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 42219 for { 42220 t := v.Type 42221 _ = v.Args[1] 42222 x := v.Args[0] 42223 y := v.Args[1] 42224 v.reset(OpAMD64SARB) 42225 v.Type = t 42226 v.AddArg(x) 42227 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42228 v0.AddArg(y) 42229 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42230 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42231 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42232 v3.AuxInt = 8 42233 v3.AddArg(y) 42234 v2.AddArg(v3) 42235 v1.AddArg(v2) 42236 v0.AddArg(v1) 42237 v.AddArg(v0) 42238 return true 42239 } 42240 } 42241 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 42242 b := v.Block 42243 _ = b 42244 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 42245 // cond: 42246 // result: (ADDL val (Select0 <t> tuple)) 42247 for { 42248 t := v.Type 42249 v_0 := v.Args[0] 42250 if v_0.Op != OpAMD64AddTupleFirst32 { 42251 break 42252 } 42253 _ = v_0.Args[1] 42254 val := v_0.Args[0] 42255 tuple := v_0.Args[1] 42256 v.reset(OpAMD64ADDL) 42257 v.AddArg(val) 42258 v0 := b.NewValue0(v.Pos, OpSelect0, t) 42259 v0.AddArg(tuple) 42260 v.AddArg(v0) 42261 return true 42262 } 42263 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 42264 // cond: 42265 // result: (ADDQ val (Select0 <t> tuple)) 42266 for { 42267 t := v.Type 42268 v_0 := v.Args[0] 42269 if v_0.Op != OpAMD64AddTupleFirst64 { 42270 break 42271 } 42272 _ = v_0.Args[1] 42273 val := v_0.Args[0] 42274 tuple := v_0.Args[1] 42275 v.reset(OpAMD64ADDQ) 42276 v.AddArg(val) 42277 v0 := b.NewValue0(v.Pos, OpSelect0, t) 42278 v0.AddArg(tuple) 42279 v.AddArg(v0) 42280 return true 42281 } 42282 return false 42283 } 42284 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 42285 // match: (Select1 (AddTupleFirst32 _ tuple)) 42286 // cond: 42287 // result: (Select1 tuple) 42288 for { 42289 v_0 := v.Args[0] 42290 if v_0.Op != OpAMD64AddTupleFirst32 { 42291 break 42292 } 42293 _ = v_0.Args[1] 42294 tuple := v_0.Args[1] 42295 v.reset(OpSelect1) 42296 v.AddArg(tuple) 42297 return true 42298 } 42299 // match: (Select1 (AddTupleFirst64 _ tuple)) 42300 // cond: 42301 // result: (Select1 tuple) 42302 for { 42303 v_0 := v.Args[0] 42304 if v_0.Op != OpAMD64AddTupleFirst64 { 42305 break 42306 } 42307 _ = v_0.Args[1] 42308 tuple := v_0.Args[1] 42309 v.reset(OpSelect1) 42310 v.AddArg(tuple) 42311 return true 42312 } 42313 return false 42314 } 42315 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 42316 // match: (SignExt16to32 x) 42317 // cond: 42318 // result: (MOVWQSX x) 42319 for { 42320 x := v.Args[0] 42321 v.reset(OpAMD64MOVWQSX) 42322 v.AddArg(x) 42323 return true 42324 } 42325 } 42326 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 42327 // match: (SignExt16to64 x) 42328 // cond: 42329 // result: (MOVWQSX x) 42330 for { 42331 x := v.Args[0] 42332 v.reset(OpAMD64MOVWQSX) 42333 v.AddArg(x) 42334 return true 42335 } 42336 } 42337 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 42338 // match: (SignExt32to64 x) 42339 // cond: 42340 // result: (MOVLQSX x) 42341 for { 42342 x := v.Args[0] 42343 v.reset(OpAMD64MOVLQSX) 42344 v.AddArg(x) 42345 return true 42346 } 42347 } 42348 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 42349 // match: (SignExt8to16 x) 42350 // cond: 42351 // result: (MOVBQSX x) 42352 for { 42353 x := v.Args[0] 42354 v.reset(OpAMD64MOVBQSX) 42355 v.AddArg(x) 42356 return true 42357 } 42358 } 42359 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 42360 // match: (SignExt8to32 x) 42361 // cond: 42362 // result: (MOVBQSX x) 42363 for { 42364 x := v.Args[0] 42365 v.reset(OpAMD64MOVBQSX) 42366 v.AddArg(x) 42367 return true 42368 } 42369 } 42370 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 42371 // match: (SignExt8to64 x) 42372 // cond: 42373 // result: (MOVBQSX x) 42374 for { 42375 x := v.Args[0] 42376 v.reset(OpAMD64MOVBQSX) 42377 v.AddArg(x) 42378 return true 42379 } 42380 } 42381 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 42382 b := v.Block 42383 _ = b 42384 // match: (Slicemask <t> x) 42385 // cond: 42386 // result: (SARQconst (NEGQ <t> x) [63]) 42387 for { 42388 t := v.Type 42389 x := v.Args[0] 42390 v.reset(OpAMD64SARQconst) 42391 v.AuxInt = 63 42392 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 42393 v0.AddArg(x) 42394 v.AddArg(v0) 42395 return true 42396 } 42397 } 42398 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 42399 // match: (Sqrt x) 42400 // cond: 42401 // result: (SQRTSD x) 42402 for { 42403 x := v.Args[0] 42404 v.reset(OpAMD64SQRTSD) 42405 v.AddArg(x) 42406 return true 42407 } 42408 } 42409 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 42410 // match: (StaticCall [argwid] {target} mem) 42411 // cond: 42412 // result: (CALLstatic [argwid] {target} mem) 42413 for { 42414 argwid := v.AuxInt 42415 target := v.Aux 42416 mem := v.Args[0] 42417 v.reset(OpAMD64CALLstatic) 42418 v.AuxInt = argwid 42419 v.Aux = target 42420 v.AddArg(mem) 42421 return true 42422 } 42423 } 42424 func rewriteValueAMD64_OpStore_0(v *Value) bool { 42425 // match: (Store {t} ptr val mem) 42426 // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) 42427 // result: (MOVSDstore ptr val mem) 42428 for { 42429 t := v.Aux 42430 _ = v.Args[2] 42431 ptr := v.Args[0] 42432 val := v.Args[1] 42433 mem := v.Args[2] 42434 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { 42435 break 42436 } 42437 v.reset(OpAMD64MOVSDstore) 42438 v.AddArg(ptr) 42439 v.AddArg(val) 42440 v.AddArg(mem) 42441 return true 42442 } 42443 // match: (Store {t} ptr val mem) 42444 // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) 42445 // result: (MOVSSstore ptr val mem) 42446 for { 42447 t := v.Aux 42448 _ = v.Args[2] 42449 ptr := v.Args[0] 42450 val := v.Args[1] 42451 mem := v.Args[2] 42452 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { 42453 break 42454 } 42455 v.reset(OpAMD64MOVSSstore) 42456 v.AddArg(ptr) 42457 v.AddArg(val) 42458 v.AddArg(mem) 42459 return true 42460 } 42461 // match: (Store {t} ptr val mem) 42462 // cond: t.(*types.Type).Size() == 8 42463 // result: (MOVQstore ptr val mem) 42464 for { 42465 t := v.Aux 42466 _ = v.Args[2] 42467 ptr := v.Args[0] 42468 val := v.Args[1] 42469 mem := v.Args[2] 42470 if !(t.(*types.Type).Size() == 8) { 42471 break 42472 } 42473 v.reset(OpAMD64MOVQstore) 42474 v.AddArg(ptr) 42475 v.AddArg(val) 42476 v.AddArg(mem) 42477 return true 42478 } 42479 // match: (Store {t} ptr val mem) 42480 // cond: t.(*types.Type).Size() == 4 42481 // result: (MOVLstore ptr val mem) 42482 for { 42483 t := v.Aux 42484 _ = v.Args[2] 42485 ptr := v.Args[0] 42486 val := v.Args[1] 42487 mem := v.Args[2] 42488 if !(t.(*types.Type).Size() == 4) { 42489 break 42490 } 42491 v.reset(OpAMD64MOVLstore) 42492 v.AddArg(ptr) 42493 v.AddArg(val) 42494 v.AddArg(mem) 42495 return true 42496 } 42497 // match: (Store {t} ptr val mem) 42498 // cond: t.(*types.Type).Size() == 2 42499 // result: (MOVWstore ptr val mem) 42500 for { 42501 t := v.Aux 42502 _ = v.Args[2] 42503 ptr := v.Args[0] 42504 val := v.Args[1] 42505 mem := v.Args[2] 42506 if !(t.(*types.Type).Size() == 2) { 42507 break 42508 } 42509 v.reset(OpAMD64MOVWstore) 42510 v.AddArg(ptr) 42511 v.AddArg(val) 42512 v.AddArg(mem) 42513 return true 42514 } 42515 // match: (Store {t} ptr val mem) 42516 // cond: t.(*types.Type).Size() == 1 42517 // result: (MOVBstore ptr val mem) 42518 for { 42519 t := v.Aux 42520 _ = v.Args[2] 42521 ptr := v.Args[0] 42522 val := v.Args[1] 42523 mem := v.Args[2] 42524 if !(t.(*types.Type).Size() == 1) { 42525 break 42526 } 42527 v.reset(OpAMD64MOVBstore) 42528 v.AddArg(ptr) 42529 v.AddArg(val) 42530 v.AddArg(mem) 42531 return true 42532 } 42533 return false 42534 } 42535 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 42536 // match: (Sub16 x y) 42537 // cond: 42538 // result: (SUBL x y) 42539 for { 42540 _ = v.Args[1] 42541 x := v.Args[0] 42542 y := v.Args[1] 42543 v.reset(OpAMD64SUBL) 42544 v.AddArg(x) 42545 v.AddArg(y) 42546 return true 42547 } 42548 } 42549 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 42550 // match: (Sub32 x y) 42551 // cond: 42552 // result: (SUBL x y) 42553 for { 42554 _ = v.Args[1] 42555 x := v.Args[0] 42556 y := v.Args[1] 42557 v.reset(OpAMD64SUBL) 42558 v.AddArg(x) 42559 v.AddArg(y) 42560 return true 42561 } 42562 } 42563 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 42564 // match: (Sub32F x y) 42565 // cond: 42566 // result: (SUBSS x y) 42567 for { 42568 _ = v.Args[1] 42569 x := v.Args[0] 42570 y := v.Args[1] 42571 v.reset(OpAMD64SUBSS) 42572 v.AddArg(x) 42573 v.AddArg(y) 42574 return true 42575 } 42576 } 42577 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 42578 // match: (Sub64 x y) 42579 // cond: 42580 // result: (SUBQ x y) 42581 for { 42582 _ = v.Args[1] 42583 x := v.Args[0] 42584 y := v.Args[1] 42585 v.reset(OpAMD64SUBQ) 42586 v.AddArg(x) 42587 v.AddArg(y) 42588 return true 42589 } 42590 } 42591 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 42592 // match: (Sub64F x y) 42593 // cond: 42594 // result: (SUBSD x y) 42595 for { 42596 _ = v.Args[1] 42597 x := v.Args[0] 42598 y := v.Args[1] 42599 v.reset(OpAMD64SUBSD) 42600 v.AddArg(x) 42601 v.AddArg(y) 42602 return true 42603 } 42604 } 42605 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 42606 // match: (Sub8 x y) 42607 // cond: 42608 // result: (SUBL x y) 42609 for { 42610 _ = v.Args[1] 42611 x := v.Args[0] 42612 y := v.Args[1] 42613 v.reset(OpAMD64SUBL) 42614 v.AddArg(x) 42615 v.AddArg(y) 42616 return true 42617 } 42618 } 42619 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 42620 b := v.Block 42621 _ = b 42622 config := b.Func.Config 42623 _ = config 42624 // match: (SubPtr x y) 42625 // cond: config.PtrSize == 8 42626 // result: (SUBQ x y) 42627 for { 42628 _ = v.Args[1] 42629 x := v.Args[0] 42630 y := v.Args[1] 42631 if !(config.PtrSize == 8) { 42632 break 42633 } 42634 v.reset(OpAMD64SUBQ) 42635 v.AddArg(x) 42636 v.AddArg(y) 42637 return true 42638 } 42639 // match: (SubPtr x y) 42640 // cond: config.PtrSize == 4 42641 // result: (SUBL x y) 42642 for { 42643 _ = v.Args[1] 42644 x := v.Args[0] 42645 y := v.Args[1] 42646 if !(config.PtrSize == 4) { 42647 break 42648 } 42649 v.reset(OpAMD64SUBL) 42650 v.AddArg(x) 42651 v.AddArg(y) 42652 return true 42653 } 42654 return false 42655 } 42656 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 42657 // match: (Trunc16to8 x) 42658 // cond: 42659 // result: x 42660 for { 42661 x := v.Args[0] 42662 v.reset(OpCopy) 42663 v.Type = x.Type 42664 v.AddArg(x) 42665 return true 42666 } 42667 } 42668 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 42669 // match: (Trunc32to16 x) 42670 // cond: 42671 // result: x 42672 for { 42673 x := v.Args[0] 42674 v.reset(OpCopy) 42675 v.Type = x.Type 42676 v.AddArg(x) 42677 return true 42678 } 42679 } 42680 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 42681 // match: (Trunc32to8 x) 42682 // cond: 42683 // result: x 42684 for { 42685 x := v.Args[0] 42686 v.reset(OpCopy) 42687 v.Type = x.Type 42688 v.AddArg(x) 42689 return true 42690 } 42691 } 42692 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 42693 // match: (Trunc64to16 x) 42694 // cond: 42695 // result: x 42696 for { 42697 x := v.Args[0] 42698 v.reset(OpCopy) 42699 v.Type = x.Type 42700 v.AddArg(x) 42701 return true 42702 } 42703 } 42704 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 42705 // match: (Trunc64to32 x) 42706 // cond: 42707 // result: x 42708 for { 42709 x := v.Args[0] 42710 v.reset(OpCopy) 42711 v.Type = x.Type 42712 v.AddArg(x) 42713 return true 42714 } 42715 } 42716 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 42717 // match: (Trunc64to8 x) 42718 // cond: 42719 // result: x 42720 for { 42721 x := v.Args[0] 42722 v.reset(OpCopy) 42723 v.Type = x.Type 42724 v.AddArg(x) 42725 return true 42726 } 42727 } 42728 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 42729 // match: (Xor16 x y) 42730 // cond: 42731 // result: (XORL x y) 42732 for { 42733 _ = v.Args[1] 42734 x := v.Args[0] 42735 y := v.Args[1] 42736 v.reset(OpAMD64XORL) 42737 v.AddArg(x) 42738 v.AddArg(y) 42739 return true 42740 } 42741 } 42742 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 42743 // match: (Xor32 x y) 42744 // cond: 42745 // result: (XORL x y) 42746 for { 42747 _ = v.Args[1] 42748 x := v.Args[0] 42749 y := v.Args[1] 42750 v.reset(OpAMD64XORL) 42751 v.AddArg(x) 42752 v.AddArg(y) 42753 return true 42754 } 42755 } 42756 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 42757 // match: (Xor64 x y) 42758 // cond: 42759 // result: (XORQ x y) 42760 for { 42761 _ = v.Args[1] 42762 x := v.Args[0] 42763 y := v.Args[1] 42764 v.reset(OpAMD64XORQ) 42765 v.AddArg(x) 42766 v.AddArg(y) 42767 return true 42768 } 42769 } 42770 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 42771 // match: (Xor8 x y) 42772 // cond: 42773 // result: (XORL x y) 42774 for { 42775 _ = v.Args[1] 42776 x := v.Args[0] 42777 y := v.Args[1] 42778 v.reset(OpAMD64XORL) 42779 v.AddArg(x) 42780 v.AddArg(y) 42781 return true 42782 } 42783 } 42784 func rewriteValueAMD64_OpZero_0(v *Value) bool { 42785 b := v.Block 42786 _ = b 42787 // match: (Zero [0] _ mem) 42788 // cond: 42789 // result: mem 42790 for { 42791 if v.AuxInt != 0 { 42792 break 42793 } 42794 _ = v.Args[1] 42795 mem := v.Args[1] 42796 v.reset(OpCopy) 42797 v.Type = mem.Type 42798 v.AddArg(mem) 42799 return true 42800 } 42801 // match: (Zero [1] destptr mem) 42802 // cond: 42803 // result: (MOVBstoreconst [0] destptr mem) 42804 for { 42805 if v.AuxInt != 1 { 42806 break 42807 } 42808 _ = v.Args[1] 42809 destptr := v.Args[0] 42810 mem := v.Args[1] 42811 v.reset(OpAMD64MOVBstoreconst) 42812 v.AuxInt = 0 42813 v.AddArg(destptr) 42814 v.AddArg(mem) 42815 return true 42816 } 42817 // match: (Zero [2] destptr mem) 42818 // cond: 42819 // result: (MOVWstoreconst [0] destptr mem) 42820 for { 42821 if v.AuxInt != 2 { 42822 break 42823 } 42824 _ = v.Args[1] 42825 destptr := v.Args[0] 42826 mem := v.Args[1] 42827 v.reset(OpAMD64MOVWstoreconst) 42828 v.AuxInt = 0 42829 v.AddArg(destptr) 42830 v.AddArg(mem) 42831 return true 42832 } 42833 // match: (Zero [4] destptr mem) 42834 // cond: 42835 // result: (MOVLstoreconst [0] destptr mem) 42836 for { 42837 if v.AuxInt != 4 { 42838 break 42839 } 42840 _ = v.Args[1] 42841 destptr := v.Args[0] 42842 mem := v.Args[1] 42843 v.reset(OpAMD64MOVLstoreconst) 42844 v.AuxInt = 0 42845 v.AddArg(destptr) 42846 v.AddArg(mem) 42847 return true 42848 } 42849 // match: (Zero [8] destptr mem) 42850 // cond: 42851 // result: (MOVQstoreconst [0] destptr mem) 42852 for { 42853 if v.AuxInt != 8 { 42854 break 42855 } 42856 _ = v.Args[1] 42857 destptr := v.Args[0] 42858 mem := v.Args[1] 42859 v.reset(OpAMD64MOVQstoreconst) 42860 v.AuxInt = 0 42861 v.AddArg(destptr) 42862 v.AddArg(mem) 42863 return true 42864 } 42865 // match: (Zero [3] destptr mem) 42866 // cond: 42867 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 42868 for { 42869 if v.AuxInt != 3 { 42870 break 42871 } 42872 _ = v.Args[1] 42873 destptr := v.Args[0] 42874 mem := v.Args[1] 42875 v.reset(OpAMD64MOVBstoreconst) 42876 v.AuxInt = makeValAndOff(0, 2) 42877 v.AddArg(destptr) 42878 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 42879 v0.AuxInt = 0 42880 v0.AddArg(destptr) 42881 v0.AddArg(mem) 42882 v.AddArg(v0) 42883 return true 42884 } 42885 // match: (Zero [5] destptr mem) 42886 // cond: 42887 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 42888 for { 42889 if v.AuxInt != 5 { 42890 break 42891 } 42892 _ = v.Args[1] 42893 destptr := v.Args[0] 42894 mem := v.Args[1] 42895 v.reset(OpAMD64MOVBstoreconst) 42896 v.AuxInt = makeValAndOff(0, 4) 42897 v.AddArg(destptr) 42898 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 42899 v0.AuxInt = 0 42900 v0.AddArg(destptr) 42901 v0.AddArg(mem) 42902 v.AddArg(v0) 42903 return true 42904 } 42905 // match: (Zero [6] destptr mem) 42906 // cond: 42907 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 42908 for { 42909 if v.AuxInt != 6 { 42910 break 42911 } 42912 _ = v.Args[1] 42913 destptr := v.Args[0] 42914 mem := v.Args[1] 42915 v.reset(OpAMD64MOVWstoreconst) 42916 v.AuxInt = makeValAndOff(0, 4) 42917 v.AddArg(destptr) 42918 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 42919 v0.AuxInt = 0 42920 v0.AddArg(destptr) 42921 v0.AddArg(mem) 42922 v.AddArg(v0) 42923 return true 42924 } 42925 // match: (Zero [7] destptr mem) 42926 // cond: 42927 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 42928 for { 42929 if v.AuxInt != 7 { 42930 break 42931 } 42932 _ = v.Args[1] 42933 destptr := v.Args[0] 42934 mem := v.Args[1] 42935 v.reset(OpAMD64MOVLstoreconst) 42936 v.AuxInt = makeValAndOff(0, 3) 42937 v.AddArg(destptr) 42938 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 42939 v0.AuxInt = 0 42940 v0.AddArg(destptr) 42941 v0.AddArg(mem) 42942 v.AddArg(v0) 42943 return true 42944 } 42945 // match: (Zero [s] destptr mem) 42946 // cond: s > 8 && s < 16 42947 // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem)) 42948 for { 42949 s := v.AuxInt 42950 _ = v.Args[1] 42951 destptr := v.Args[0] 42952 mem := v.Args[1] 42953 if !(s > 8 && s < 16) { 42954 break 42955 } 42956 v.reset(OpAMD64MOVQstoreconst) 42957 v.AuxInt = makeValAndOff(0, s-8) 42958 v.AddArg(destptr) 42959 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 42960 v0.AuxInt = 0 42961 v0.AddArg(destptr) 42962 v0.AddArg(mem) 42963 v.AddArg(v0) 42964 return true 42965 } 42966 return false 42967 } 42968 func rewriteValueAMD64_OpZero_10(v *Value) bool { 42969 b := v.Block 42970 _ = b 42971 config := b.Func.Config 42972 _ = config 42973 typ := &b.Func.Config.Types 42974 _ = typ 42975 // match: (Zero [s] destptr mem) 42976 // cond: s%16 != 0 && s > 16 && s%16 > 8 42977 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) 42978 for { 42979 s := v.AuxInt 42980 _ = v.Args[1] 42981 destptr := v.Args[0] 42982 mem := v.Args[1] 42983 if !(s%16 != 0 && s > 16 && s%16 > 8) { 42984 break 42985 } 42986 v.reset(OpZero) 42987 v.AuxInt = s - s%16 42988 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 42989 v0.AuxInt = s % 16 42990 v0.AddArg(destptr) 42991 v.AddArg(v0) 42992 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 42993 v1.AddArg(destptr) 42994 v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 42995 v2.AuxInt = 0 42996 v1.AddArg(v2) 42997 v1.AddArg(mem) 42998 v.AddArg(v1) 42999 return true 43000 } 43001 // match: (Zero [s] destptr mem) 43002 // cond: s%16 != 0 && s > 16 && s%16 <= 8 43003 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [0] destptr mem)) 43004 for { 43005 s := v.AuxInt 43006 _ = v.Args[1] 43007 destptr := v.Args[0] 43008 mem := v.Args[1] 43009 if !(s%16 != 0 && s > 16 && s%16 <= 8) { 43010 break 43011 } 43012 v.reset(OpZero) 43013 v.AuxInt = s - s%16 43014 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 43015 v0.AuxInt = s % 16 43016 v0.AddArg(destptr) 43017 v.AddArg(v0) 43018 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 43019 v1.AuxInt = 0 43020 v1.AddArg(destptr) 43021 v1.AddArg(mem) 43022 v.AddArg(v1) 43023 return true 43024 } 43025 // match: (Zero [16] destptr mem) 43026 // cond: 43027 // result: (MOVOstore destptr (MOVOconst [0]) mem) 43028 for { 43029 if v.AuxInt != 16 { 43030 break 43031 } 43032 _ = v.Args[1] 43033 destptr := v.Args[0] 43034 mem := v.Args[1] 43035 v.reset(OpAMD64MOVOstore) 43036 v.AddArg(destptr) 43037 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43038 v0.AuxInt = 0 43039 v.AddArg(v0) 43040 v.AddArg(mem) 43041 return true 43042 } 43043 // match: (Zero [32] destptr mem) 43044 // cond: 43045 // result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)) 43046 for { 43047 if v.AuxInt != 32 { 43048 break 43049 } 43050 _ = v.Args[1] 43051 destptr := v.Args[0] 43052 mem := v.Args[1] 43053 v.reset(OpAMD64MOVOstore) 43054 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 43055 v0.AuxInt = 16 43056 v0.AddArg(destptr) 43057 v.AddArg(v0) 43058 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43059 v1.AuxInt = 0 43060 v.AddArg(v1) 43061 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 43062 v2.AddArg(destptr) 43063 v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43064 v3.AuxInt = 0 43065 v2.AddArg(v3) 43066 v2.AddArg(mem) 43067 v.AddArg(v2) 43068 return true 43069 } 43070 // match: (Zero [48] destptr mem) 43071 // cond: 43072 // result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))) 43073 for { 43074 if v.AuxInt != 48 { 43075 break 43076 } 43077 _ = v.Args[1] 43078 destptr := v.Args[0] 43079 mem := v.Args[1] 43080 v.reset(OpAMD64MOVOstore) 43081 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 43082 v0.AuxInt = 32 43083 v0.AddArg(destptr) 43084 v.AddArg(v0) 43085 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43086 v1.AuxInt = 0 43087 v.AddArg(v1) 43088 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 43089 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 43090 v3.AuxInt = 16 43091 v3.AddArg(destptr) 43092 v2.AddArg(v3) 43093 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43094 v4.AuxInt = 0 43095 v2.AddArg(v4) 43096 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 43097 v5.AddArg(destptr) 43098 v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43099 v6.AuxInt = 0 43100 v5.AddArg(v6) 43101 v5.AddArg(mem) 43102 v2.AddArg(v5) 43103 v.AddArg(v2) 43104 return true 43105 } 43106 // match: (Zero [64] destptr mem) 43107 // cond: 43108 // result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))) 43109 for { 43110 if v.AuxInt != 64 { 43111 break 43112 } 43113 _ = v.Args[1] 43114 destptr := v.Args[0] 43115 mem := v.Args[1] 43116 v.reset(OpAMD64MOVOstore) 43117 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 43118 v0.AuxInt = 48 43119 v0.AddArg(destptr) 43120 v.AddArg(v0) 43121 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43122 v1.AuxInt = 0 43123 v.AddArg(v1) 43124 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 43125 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 43126 v3.AuxInt = 32 43127 v3.AddArg(destptr) 43128 v2.AddArg(v3) 43129 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43130 v4.AuxInt = 0 43131 v2.AddArg(v4) 43132 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 43133 v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 43134 v6.AuxInt = 16 43135 v6.AddArg(destptr) 43136 v5.AddArg(v6) 43137 v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43138 v7.AuxInt = 0 43139 v5.AddArg(v7) 43140 v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 43141 v8.AddArg(destptr) 43142 v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43143 v9.AuxInt = 0 43144 v8.AddArg(v9) 43145 v8.AddArg(mem) 43146 v5.AddArg(v8) 43147 v2.AddArg(v5) 43148 v.AddArg(v2) 43149 return true 43150 } 43151 // match: (Zero [s] destptr mem) 43152 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice 43153 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 43154 for { 43155 s := v.AuxInt 43156 _ = v.Args[1] 43157 destptr := v.Args[0] 43158 mem := v.Args[1] 43159 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 43160 break 43161 } 43162 v.reset(OpAMD64DUFFZERO) 43163 v.AuxInt = s 43164 v.AddArg(destptr) 43165 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43166 v0.AuxInt = 0 43167 v.AddArg(v0) 43168 v.AddArg(mem) 43169 return true 43170 } 43171 // match: (Zero [s] destptr mem) 43172 // cond: (s > 1024 || (config.noDuffDevice && s > 64)) && s%8 == 0 43173 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 43174 for { 43175 s := v.AuxInt 43176 _ = v.Args[1] 43177 destptr := v.Args[0] 43178 mem := v.Args[1] 43179 if !((s > 1024 || (config.noDuffDevice && s > 64)) && s%8 == 0) { 43180 break 43181 } 43182 v.reset(OpAMD64REPSTOSQ) 43183 v.AddArg(destptr) 43184 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 43185 v0.AuxInt = s / 8 43186 v.AddArg(v0) 43187 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 43188 v1.AuxInt = 0 43189 v.AddArg(v1) 43190 v.AddArg(mem) 43191 return true 43192 } 43193 return false 43194 } 43195 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 43196 // match: (ZeroExt16to32 x) 43197 // cond: 43198 // result: (MOVWQZX x) 43199 for { 43200 x := v.Args[0] 43201 v.reset(OpAMD64MOVWQZX) 43202 v.AddArg(x) 43203 return true 43204 } 43205 } 43206 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 43207 // match: (ZeroExt16to64 x) 43208 // cond: 43209 // result: (MOVWQZX x) 43210 for { 43211 x := v.Args[0] 43212 v.reset(OpAMD64MOVWQZX) 43213 v.AddArg(x) 43214 return true 43215 } 43216 } 43217 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 43218 // match: (ZeroExt32to64 x) 43219 // cond: 43220 // result: (MOVLQZX x) 43221 for { 43222 x := v.Args[0] 43223 v.reset(OpAMD64MOVLQZX) 43224 v.AddArg(x) 43225 return true 43226 } 43227 } 43228 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 43229 // match: (ZeroExt8to16 x) 43230 // cond: 43231 // result: (MOVBQZX x) 43232 for { 43233 x := v.Args[0] 43234 v.reset(OpAMD64MOVBQZX) 43235 v.AddArg(x) 43236 return true 43237 } 43238 } 43239 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 43240 // match: (ZeroExt8to32 x) 43241 // cond: 43242 // result: (MOVBQZX x) 43243 for { 43244 x := v.Args[0] 43245 v.reset(OpAMD64MOVBQZX) 43246 v.AddArg(x) 43247 return true 43248 } 43249 } 43250 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 43251 // match: (ZeroExt8to64 x) 43252 // cond: 43253 // result: (MOVBQZX x) 43254 for { 43255 x := v.Args[0] 43256 v.reset(OpAMD64MOVBQZX) 43257 v.AddArg(x) 43258 return true 43259 } 43260 } 43261 func rewriteBlockAMD64(b *Block) bool { 43262 config := b.Func.Config 43263 _ = config 43264 fe := b.Func.fe 43265 _ = fe 43266 typ := &config.Types 43267 _ = typ 43268 switch b.Kind { 43269 case BlockAMD64EQ: 43270 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 43271 // cond: !config.nacl 43272 // result: (UGE (BTL x y)) 43273 for { 43274 v := b.Control 43275 if v.Op != OpAMD64TESTL { 43276 break 43277 } 43278 _ = v.Args[1] 43279 v_0 := v.Args[0] 43280 if v_0.Op != OpAMD64SHLL { 43281 break 43282 } 43283 _ = v_0.Args[1] 43284 v_0_0 := v_0.Args[0] 43285 if v_0_0.Op != OpAMD64MOVLconst { 43286 break 43287 } 43288 if v_0_0.AuxInt != 1 { 43289 break 43290 } 43291 x := v_0.Args[1] 43292 y := v.Args[1] 43293 if !(!config.nacl) { 43294 break 43295 } 43296 b.Kind = BlockAMD64UGE 43297 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 43298 v0.AddArg(x) 43299 v0.AddArg(y) 43300 b.SetControl(v0) 43301 b.Aux = nil 43302 return true 43303 } 43304 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 43305 // cond: !config.nacl 43306 // result: (UGE (BTL x y)) 43307 for { 43308 v := b.Control 43309 if v.Op != OpAMD64TESTL { 43310 break 43311 } 43312 _ = v.Args[1] 43313 y := v.Args[0] 43314 v_1 := v.Args[1] 43315 if v_1.Op != OpAMD64SHLL { 43316 break 43317 } 43318 _ = v_1.Args[1] 43319 v_1_0 := v_1.Args[0] 43320 if v_1_0.Op != OpAMD64MOVLconst { 43321 break 43322 } 43323 if v_1_0.AuxInt != 1 { 43324 break 43325 } 43326 x := v_1.Args[1] 43327 if !(!config.nacl) { 43328 break 43329 } 43330 b.Kind = BlockAMD64UGE 43331 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 43332 v0.AddArg(x) 43333 v0.AddArg(y) 43334 b.SetControl(v0) 43335 b.Aux = nil 43336 return true 43337 } 43338 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 43339 // cond: !config.nacl 43340 // result: (UGE (BTQ x y)) 43341 for { 43342 v := b.Control 43343 if v.Op != OpAMD64TESTQ { 43344 break 43345 } 43346 _ = v.Args[1] 43347 v_0 := v.Args[0] 43348 if v_0.Op != OpAMD64SHLQ { 43349 break 43350 } 43351 _ = v_0.Args[1] 43352 v_0_0 := v_0.Args[0] 43353 if v_0_0.Op != OpAMD64MOVQconst { 43354 break 43355 } 43356 if v_0_0.AuxInt != 1 { 43357 break 43358 } 43359 x := v_0.Args[1] 43360 y := v.Args[1] 43361 if !(!config.nacl) { 43362 break 43363 } 43364 b.Kind = BlockAMD64UGE 43365 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 43366 v0.AddArg(x) 43367 v0.AddArg(y) 43368 b.SetControl(v0) 43369 b.Aux = nil 43370 return true 43371 } 43372 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 43373 // cond: !config.nacl 43374 // result: (UGE (BTQ x y)) 43375 for { 43376 v := b.Control 43377 if v.Op != OpAMD64TESTQ { 43378 break 43379 } 43380 _ = v.Args[1] 43381 y := v.Args[0] 43382 v_1 := v.Args[1] 43383 if v_1.Op != OpAMD64SHLQ { 43384 break 43385 } 43386 _ = v_1.Args[1] 43387 v_1_0 := v_1.Args[0] 43388 if v_1_0.Op != OpAMD64MOVQconst { 43389 break 43390 } 43391 if v_1_0.AuxInt != 1 { 43392 break 43393 } 43394 x := v_1.Args[1] 43395 if !(!config.nacl) { 43396 break 43397 } 43398 b.Kind = BlockAMD64UGE 43399 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 43400 v0.AddArg(x) 43401 v0.AddArg(y) 43402 b.SetControl(v0) 43403 b.Aux = nil 43404 return true 43405 } 43406 // match: (EQ (TESTLconst [c] x)) 43407 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 43408 // result: (UGE (BTLconst [log2(c)] x)) 43409 for { 43410 v := b.Control 43411 if v.Op != OpAMD64TESTLconst { 43412 break 43413 } 43414 c := v.AuxInt 43415 x := v.Args[0] 43416 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 43417 break 43418 } 43419 b.Kind = BlockAMD64UGE 43420 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 43421 v0.AuxInt = log2(c) 43422 v0.AddArg(x) 43423 b.SetControl(v0) 43424 b.Aux = nil 43425 return true 43426 } 43427 // match: (EQ (TESTQconst [c] x)) 43428 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 43429 // result: (UGE (BTQconst [log2(c)] x)) 43430 for { 43431 v := b.Control 43432 if v.Op != OpAMD64TESTQconst { 43433 break 43434 } 43435 c := v.AuxInt 43436 x := v.Args[0] 43437 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 43438 break 43439 } 43440 b.Kind = BlockAMD64UGE 43441 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43442 v0.AuxInt = log2(c) 43443 v0.AddArg(x) 43444 b.SetControl(v0) 43445 b.Aux = nil 43446 return true 43447 } 43448 // match: (EQ (TESTQ (MOVQconst [c]) x)) 43449 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 43450 // result: (UGE (BTQconst [log2(c)] x)) 43451 for { 43452 v := b.Control 43453 if v.Op != OpAMD64TESTQ { 43454 break 43455 } 43456 _ = v.Args[1] 43457 v_0 := v.Args[0] 43458 if v_0.Op != OpAMD64MOVQconst { 43459 break 43460 } 43461 c := v_0.AuxInt 43462 x := v.Args[1] 43463 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 43464 break 43465 } 43466 b.Kind = BlockAMD64UGE 43467 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43468 v0.AuxInt = log2(c) 43469 v0.AddArg(x) 43470 b.SetControl(v0) 43471 b.Aux = nil 43472 return true 43473 } 43474 // match: (EQ (TESTQ x (MOVQconst [c]))) 43475 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 43476 // result: (UGE (BTQconst [log2(c)] x)) 43477 for { 43478 v := b.Control 43479 if v.Op != OpAMD64TESTQ { 43480 break 43481 } 43482 _ = v.Args[1] 43483 x := v.Args[0] 43484 v_1 := v.Args[1] 43485 if v_1.Op != OpAMD64MOVQconst { 43486 break 43487 } 43488 c := v_1.AuxInt 43489 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 43490 break 43491 } 43492 b.Kind = BlockAMD64UGE 43493 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43494 v0.AuxInt = log2(c) 43495 v0.AddArg(x) 43496 b.SetControl(v0) 43497 b.Aux = nil 43498 return true 43499 } 43500 // match: (EQ (InvertFlags cmp) yes no) 43501 // cond: 43502 // result: (EQ cmp yes no) 43503 for { 43504 v := b.Control 43505 if v.Op != OpAMD64InvertFlags { 43506 break 43507 } 43508 cmp := v.Args[0] 43509 b.Kind = BlockAMD64EQ 43510 b.SetControl(cmp) 43511 b.Aux = nil 43512 return true 43513 } 43514 // match: (EQ (FlagEQ) yes no) 43515 // cond: 43516 // result: (First nil yes no) 43517 for { 43518 v := b.Control 43519 if v.Op != OpAMD64FlagEQ { 43520 break 43521 } 43522 b.Kind = BlockFirst 43523 b.SetControl(nil) 43524 b.Aux = nil 43525 return true 43526 } 43527 // match: (EQ (FlagLT_ULT) yes no) 43528 // cond: 43529 // result: (First nil no yes) 43530 for { 43531 v := b.Control 43532 if v.Op != OpAMD64FlagLT_ULT { 43533 break 43534 } 43535 b.Kind = BlockFirst 43536 b.SetControl(nil) 43537 b.Aux = nil 43538 b.swapSuccessors() 43539 return true 43540 } 43541 // match: (EQ (FlagLT_UGT) yes no) 43542 // cond: 43543 // result: (First nil no yes) 43544 for { 43545 v := b.Control 43546 if v.Op != OpAMD64FlagLT_UGT { 43547 break 43548 } 43549 b.Kind = BlockFirst 43550 b.SetControl(nil) 43551 b.Aux = nil 43552 b.swapSuccessors() 43553 return true 43554 } 43555 // match: (EQ (FlagGT_ULT) yes no) 43556 // cond: 43557 // result: (First nil no yes) 43558 for { 43559 v := b.Control 43560 if v.Op != OpAMD64FlagGT_ULT { 43561 break 43562 } 43563 b.Kind = BlockFirst 43564 b.SetControl(nil) 43565 b.Aux = nil 43566 b.swapSuccessors() 43567 return true 43568 } 43569 // match: (EQ (FlagGT_UGT) yes no) 43570 // cond: 43571 // result: (First nil no yes) 43572 for { 43573 v := b.Control 43574 if v.Op != OpAMD64FlagGT_UGT { 43575 break 43576 } 43577 b.Kind = BlockFirst 43578 b.SetControl(nil) 43579 b.Aux = nil 43580 b.swapSuccessors() 43581 return true 43582 } 43583 case BlockAMD64GE: 43584 // match: (GE (InvertFlags cmp) yes no) 43585 // cond: 43586 // result: (LE cmp yes no) 43587 for { 43588 v := b.Control 43589 if v.Op != OpAMD64InvertFlags { 43590 break 43591 } 43592 cmp := v.Args[0] 43593 b.Kind = BlockAMD64LE 43594 b.SetControl(cmp) 43595 b.Aux = nil 43596 return true 43597 } 43598 // match: (GE (FlagEQ) yes no) 43599 // cond: 43600 // result: (First nil yes no) 43601 for { 43602 v := b.Control 43603 if v.Op != OpAMD64FlagEQ { 43604 break 43605 } 43606 b.Kind = BlockFirst 43607 b.SetControl(nil) 43608 b.Aux = nil 43609 return true 43610 } 43611 // match: (GE (FlagLT_ULT) yes no) 43612 // cond: 43613 // result: (First nil no yes) 43614 for { 43615 v := b.Control 43616 if v.Op != OpAMD64FlagLT_ULT { 43617 break 43618 } 43619 b.Kind = BlockFirst 43620 b.SetControl(nil) 43621 b.Aux = nil 43622 b.swapSuccessors() 43623 return true 43624 } 43625 // match: (GE (FlagLT_UGT) yes no) 43626 // cond: 43627 // result: (First nil no yes) 43628 for { 43629 v := b.Control 43630 if v.Op != OpAMD64FlagLT_UGT { 43631 break 43632 } 43633 b.Kind = BlockFirst 43634 b.SetControl(nil) 43635 b.Aux = nil 43636 b.swapSuccessors() 43637 return true 43638 } 43639 // match: (GE (FlagGT_ULT) yes no) 43640 // cond: 43641 // result: (First nil yes no) 43642 for { 43643 v := b.Control 43644 if v.Op != OpAMD64FlagGT_ULT { 43645 break 43646 } 43647 b.Kind = BlockFirst 43648 b.SetControl(nil) 43649 b.Aux = nil 43650 return true 43651 } 43652 // match: (GE (FlagGT_UGT) yes no) 43653 // cond: 43654 // result: (First nil yes no) 43655 for { 43656 v := b.Control 43657 if v.Op != OpAMD64FlagGT_UGT { 43658 break 43659 } 43660 b.Kind = BlockFirst 43661 b.SetControl(nil) 43662 b.Aux = nil 43663 return true 43664 } 43665 case BlockAMD64GT: 43666 // match: (GT (InvertFlags cmp) yes no) 43667 // cond: 43668 // result: (LT cmp yes no) 43669 for { 43670 v := b.Control 43671 if v.Op != OpAMD64InvertFlags { 43672 break 43673 } 43674 cmp := v.Args[0] 43675 b.Kind = BlockAMD64LT 43676 b.SetControl(cmp) 43677 b.Aux = nil 43678 return true 43679 } 43680 // match: (GT (FlagEQ) yes no) 43681 // cond: 43682 // result: (First nil no yes) 43683 for { 43684 v := b.Control 43685 if v.Op != OpAMD64FlagEQ { 43686 break 43687 } 43688 b.Kind = BlockFirst 43689 b.SetControl(nil) 43690 b.Aux = nil 43691 b.swapSuccessors() 43692 return true 43693 } 43694 // match: (GT (FlagLT_ULT) yes no) 43695 // cond: 43696 // result: (First nil no yes) 43697 for { 43698 v := b.Control 43699 if v.Op != OpAMD64FlagLT_ULT { 43700 break 43701 } 43702 b.Kind = BlockFirst 43703 b.SetControl(nil) 43704 b.Aux = nil 43705 b.swapSuccessors() 43706 return true 43707 } 43708 // match: (GT (FlagLT_UGT) yes no) 43709 // cond: 43710 // result: (First nil no yes) 43711 for { 43712 v := b.Control 43713 if v.Op != OpAMD64FlagLT_UGT { 43714 break 43715 } 43716 b.Kind = BlockFirst 43717 b.SetControl(nil) 43718 b.Aux = nil 43719 b.swapSuccessors() 43720 return true 43721 } 43722 // match: (GT (FlagGT_ULT) yes no) 43723 // cond: 43724 // result: (First nil yes no) 43725 for { 43726 v := b.Control 43727 if v.Op != OpAMD64FlagGT_ULT { 43728 break 43729 } 43730 b.Kind = BlockFirst 43731 b.SetControl(nil) 43732 b.Aux = nil 43733 return true 43734 } 43735 // match: (GT (FlagGT_UGT) yes no) 43736 // cond: 43737 // result: (First nil yes no) 43738 for { 43739 v := b.Control 43740 if v.Op != OpAMD64FlagGT_UGT { 43741 break 43742 } 43743 b.Kind = BlockFirst 43744 b.SetControl(nil) 43745 b.Aux = nil 43746 return true 43747 } 43748 case BlockIf: 43749 // match: (If (SETL cmp) yes no) 43750 // cond: 43751 // result: (LT cmp yes no) 43752 for { 43753 v := b.Control 43754 if v.Op != OpAMD64SETL { 43755 break 43756 } 43757 cmp := v.Args[0] 43758 b.Kind = BlockAMD64LT 43759 b.SetControl(cmp) 43760 b.Aux = nil 43761 return true 43762 } 43763 // match: (If (SETLE cmp) yes no) 43764 // cond: 43765 // result: (LE cmp yes no) 43766 for { 43767 v := b.Control 43768 if v.Op != OpAMD64SETLE { 43769 break 43770 } 43771 cmp := v.Args[0] 43772 b.Kind = BlockAMD64LE 43773 b.SetControl(cmp) 43774 b.Aux = nil 43775 return true 43776 } 43777 // match: (If (SETG cmp) yes no) 43778 // cond: 43779 // result: (GT cmp yes no) 43780 for { 43781 v := b.Control 43782 if v.Op != OpAMD64SETG { 43783 break 43784 } 43785 cmp := v.Args[0] 43786 b.Kind = BlockAMD64GT 43787 b.SetControl(cmp) 43788 b.Aux = nil 43789 return true 43790 } 43791 // match: (If (SETGE cmp) yes no) 43792 // cond: 43793 // result: (GE cmp yes no) 43794 for { 43795 v := b.Control 43796 if v.Op != OpAMD64SETGE { 43797 break 43798 } 43799 cmp := v.Args[0] 43800 b.Kind = BlockAMD64GE 43801 b.SetControl(cmp) 43802 b.Aux = nil 43803 return true 43804 } 43805 // match: (If (SETEQ cmp) yes no) 43806 // cond: 43807 // result: (EQ cmp yes no) 43808 for { 43809 v := b.Control 43810 if v.Op != OpAMD64SETEQ { 43811 break 43812 } 43813 cmp := v.Args[0] 43814 b.Kind = BlockAMD64EQ 43815 b.SetControl(cmp) 43816 b.Aux = nil 43817 return true 43818 } 43819 // match: (If (SETNE cmp) yes no) 43820 // cond: 43821 // result: (NE cmp yes no) 43822 for { 43823 v := b.Control 43824 if v.Op != OpAMD64SETNE { 43825 break 43826 } 43827 cmp := v.Args[0] 43828 b.Kind = BlockAMD64NE 43829 b.SetControl(cmp) 43830 b.Aux = nil 43831 return true 43832 } 43833 // match: (If (SETB cmp) yes no) 43834 // cond: 43835 // result: (ULT cmp yes no) 43836 for { 43837 v := b.Control 43838 if v.Op != OpAMD64SETB { 43839 break 43840 } 43841 cmp := v.Args[0] 43842 b.Kind = BlockAMD64ULT 43843 b.SetControl(cmp) 43844 b.Aux = nil 43845 return true 43846 } 43847 // match: (If (SETBE cmp) yes no) 43848 // cond: 43849 // result: (ULE cmp yes no) 43850 for { 43851 v := b.Control 43852 if v.Op != OpAMD64SETBE { 43853 break 43854 } 43855 cmp := v.Args[0] 43856 b.Kind = BlockAMD64ULE 43857 b.SetControl(cmp) 43858 b.Aux = nil 43859 return true 43860 } 43861 // match: (If (SETA cmp) yes no) 43862 // cond: 43863 // result: (UGT cmp yes no) 43864 for { 43865 v := b.Control 43866 if v.Op != OpAMD64SETA { 43867 break 43868 } 43869 cmp := v.Args[0] 43870 b.Kind = BlockAMD64UGT 43871 b.SetControl(cmp) 43872 b.Aux = nil 43873 return true 43874 } 43875 // match: (If (SETAE cmp) yes no) 43876 // cond: 43877 // result: (UGE cmp yes no) 43878 for { 43879 v := b.Control 43880 if v.Op != OpAMD64SETAE { 43881 break 43882 } 43883 cmp := v.Args[0] 43884 b.Kind = BlockAMD64UGE 43885 b.SetControl(cmp) 43886 b.Aux = nil 43887 return true 43888 } 43889 // match: (If (SETGF cmp) yes no) 43890 // cond: 43891 // result: (UGT cmp yes no) 43892 for { 43893 v := b.Control 43894 if v.Op != OpAMD64SETGF { 43895 break 43896 } 43897 cmp := v.Args[0] 43898 b.Kind = BlockAMD64UGT 43899 b.SetControl(cmp) 43900 b.Aux = nil 43901 return true 43902 } 43903 // match: (If (SETGEF cmp) yes no) 43904 // cond: 43905 // result: (UGE cmp yes no) 43906 for { 43907 v := b.Control 43908 if v.Op != OpAMD64SETGEF { 43909 break 43910 } 43911 cmp := v.Args[0] 43912 b.Kind = BlockAMD64UGE 43913 b.SetControl(cmp) 43914 b.Aux = nil 43915 return true 43916 } 43917 // match: (If (SETEQF cmp) yes no) 43918 // cond: 43919 // result: (EQF cmp yes no) 43920 for { 43921 v := b.Control 43922 if v.Op != OpAMD64SETEQF { 43923 break 43924 } 43925 cmp := v.Args[0] 43926 b.Kind = BlockAMD64EQF 43927 b.SetControl(cmp) 43928 b.Aux = nil 43929 return true 43930 } 43931 // match: (If (SETNEF cmp) yes no) 43932 // cond: 43933 // result: (NEF cmp yes no) 43934 for { 43935 v := b.Control 43936 if v.Op != OpAMD64SETNEF { 43937 break 43938 } 43939 cmp := v.Args[0] 43940 b.Kind = BlockAMD64NEF 43941 b.SetControl(cmp) 43942 b.Aux = nil 43943 return true 43944 } 43945 // match: (If cond yes no) 43946 // cond: 43947 // result: (NE (TESTB cond cond) yes no) 43948 for { 43949 v := b.Control 43950 _ = v 43951 cond := b.Control 43952 b.Kind = BlockAMD64NE 43953 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags) 43954 v0.AddArg(cond) 43955 v0.AddArg(cond) 43956 b.SetControl(v0) 43957 b.Aux = nil 43958 return true 43959 } 43960 case BlockAMD64LE: 43961 // match: (LE (InvertFlags cmp) yes no) 43962 // cond: 43963 // result: (GE cmp yes no) 43964 for { 43965 v := b.Control 43966 if v.Op != OpAMD64InvertFlags { 43967 break 43968 } 43969 cmp := v.Args[0] 43970 b.Kind = BlockAMD64GE 43971 b.SetControl(cmp) 43972 b.Aux = nil 43973 return true 43974 } 43975 // match: (LE (FlagEQ) yes no) 43976 // cond: 43977 // result: (First nil yes no) 43978 for { 43979 v := b.Control 43980 if v.Op != OpAMD64FlagEQ { 43981 break 43982 } 43983 b.Kind = BlockFirst 43984 b.SetControl(nil) 43985 b.Aux = nil 43986 return true 43987 } 43988 // match: (LE (FlagLT_ULT) yes no) 43989 // cond: 43990 // result: (First nil yes no) 43991 for { 43992 v := b.Control 43993 if v.Op != OpAMD64FlagLT_ULT { 43994 break 43995 } 43996 b.Kind = BlockFirst 43997 b.SetControl(nil) 43998 b.Aux = nil 43999 return true 44000 } 44001 // match: (LE (FlagLT_UGT) yes no) 44002 // cond: 44003 // result: (First nil yes no) 44004 for { 44005 v := b.Control 44006 if v.Op != OpAMD64FlagLT_UGT { 44007 break 44008 } 44009 b.Kind = BlockFirst 44010 b.SetControl(nil) 44011 b.Aux = nil 44012 return true 44013 } 44014 // match: (LE (FlagGT_ULT) yes no) 44015 // cond: 44016 // result: (First nil no yes) 44017 for { 44018 v := b.Control 44019 if v.Op != OpAMD64FlagGT_ULT { 44020 break 44021 } 44022 b.Kind = BlockFirst 44023 b.SetControl(nil) 44024 b.Aux = nil 44025 b.swapSuccessors() 44026 return true 44027 } 44028 // match: (LE (FlagGT_UGT) yes no) 44029 // cond: 44030 // result: (First nil no yes) 44031 for { 44032 v := b.Control 44033 if v.Op != OpAMD64FlagGT_UGT { 44034 break 44035 } 44036 b.Kind = BlockFirst 44037 b.SetControl(nil) 44038 b.Aux = nil 44039 b.swapSuccessors() 44040 return true 44041 } 44042 case BlockAMD64LT: 44043 // match: (LT (InvertFlags cmp) yes no) 44044 // cond: 44045 // result: (GT cmp yes no) 44046 for { 44047 v := b.Control 44048 if v.Op != OpAMD64InvertFlags { 44049 break 44050 } 44051 cmp := v.Args[0] 44052 b.Kind = BlockAMD64GT 44053 b.SetControl(cmp) 44054 b.Aux = nil 44055 return true 44056 } 44057 // match: (LT (FlagEQ) yes no) 44058 // cond: 44059 // result: (First nil no yes) 44060 for { 44061 v := b.Control 44062 if v.Op != OpAMD64FlagEQ { 44063 break 44064 } 44065 b.Kind = BlockFirst 44066 b.SetControl(nil) 44067 b.Aux = nil 44068 b.swapSuccessors() 44069 return true 44070 } 44071 // match: (LT (FlagLT_ULT) yes no) 44072 // cond: 44073 // result: (First nil yes no) 44074 for { 44075 v := b.Control 44076 if v.Op != OpAMD64FlagLT_ULT { 44077 break 44078 } 44079 b.Kind = BlockFirst 44080 b.SetControl(nil) 44081 b.Aux = nil 44082 return true 44083 } 44084 // match: (LT (FlagLT_UGT) yes no) 44085 // cond: 44086 // result: (First nil yes no) 44087 for { 44088 v := b.Control 44089 if v.Op != OpAMD64FlagLT_UGT { 44090 break 44091 } 44092 b.Kind = BlockFirst 44093 b.SetControl(nil) 44094 b.Aux = nil 44095 return true 44096 } 44097 // match: (LT (FlagGT_ULT) yes no) 44098 // cond: 44099 // result: (First nil no yes) 44100 for { 44101 v := b.Control 44102 if v.Op != OpAMD64FlagGT_ULT { 44103 break 44104 } 44105 b.Kind = BlockFirst 44106 b.SetControl(nil) 44107 b.Aux = nil 44108 b.swapSuccessors() 44109 return true 44110 } 44111 // match: (LT (FlagGT_UGT) yes no) 44112 // cond: 44113 // result: (First nil no yes) 44114 for { 44115 v := b.Control 44116 if v.Op != OpAMD64FlagGT_UGT { 44117 break 44118 } 44119 b.Kind = BlockFirst 44120 b.SetControl(nil) 44121 b.Aux = nil 44122 b.swapSuccessors() 44123 return true 44124 } 44125 case BlockAMD64NE: 44126 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 44127 // cond: 44128 // result: (LT cmp yes no) 44129 for { 44130 v := b.Control 44131 if v.Op != OpAMD64TESTB { 44132 break 44133 } 44134 _ = v.Args[1] 44135 v_0 := v.Args[0] 44136 if v_0.Op != OpAMD64SETL { 44137 break 44138 } 44139 cmp := v_0.Args[0] 44140 v_1 := v.Args[1] 44141 if v_1.Op != OpAMD64SETL { 44142 break 44143 } 44144 if cmp != v_1.Args[0] { 44145 break 44146 } 44147 b.Kind = BlockAMD64LT 44148 b.SetControl(cmp) 44149 b.Aux = nil 44150 return true 44151 } 44152 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 44153 // cond: 44154 // result: (LT cmp yes no) 44155 for { 44156 v := b.Control 44157 if v.Op != OpAMD64TESTB { 44158 break 44159 } 44160 _ = v.Args[1] 44161 v_0 := v.Args[0] 44162 if v_0.Op != OpAMD64SETL { 44163 break 44164 } 44165 cmp := v_0.Args[0] 44166 v_1 := v.Args[1] 44167 if v_1.Op != OpAMD64SETL { 44168 break 44169 } 44170 if cmp != v_1.Args[0] { 44171 break 44172 } 44173 b.Kind = BlockAMD64LT 44174 b.SetControl(cmp) 44175 b.Aux = nil 44176 return true 44177 } 44178 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 44179 // cond: 44180 // result: (LE cmp yes no) 44181 for { 44182 v := b.Control 44183 if v.Op != OpAMD64TESTB { 44184 break 44185 } 44186 _ = v.Args[1] 44187 v_0 := v.Args[0] 44188 if v_0.Op != OpAMD64SETLE { 44189 break 44190 } 44191 cmp := v_0.Args[0] 44192 v_1 := v.Args[1] 44193 if v_1.Op != OpAMD64SETLE { 44194 break 44195 } 44196 if cmp != v_1.Args[0] { 44197 break 44198 } 44199 b.Kind = BlockAMD64LE 44200 b.SetControl(cmp) 44201 b.Aux = nil 44202 return true 44203 } 44204 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 44205 // cond: 44206 // result: (LE cmp yes no) 44207 for { 44208 v := b.Control 44209 if v.Op != OpAMD64TESTB { 44210 break 44211 } 44212 _ = v.Args[1] 44213 v_0 := v.Args[0] 44214 if v_0.Op != OpAMD64SETLE { 44215 break 44216 } 44217 cmp := v_0.Args[0] 44218 v_1 := v.Args[1] 44219 if v_1.Op != OpAMD64SETLE { 44220 break 44221 } 44222 if cmp != v_1.Args[0] { 44223 break 44224 } 44225 b.Kind = BlockAMD64LE 44226 b.SetControl(cmp) 44227 b.Aux = nil 44228 return true 44229 } 44230 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 44231 // cond: 44232 // result: (GT cmp yes no) 44233 for { 44234 v := b.Control 44235 if v.Op != OpAMD64TESTB { 44236 break 44237 } 44238 _ = v.Args[1] 44239 v_0 := v.Args[0] 44240 if v_0.Op != OpAMD64SETG { 44241 break 44242 } 44243 cmp := v_0.Args[0] 44244 v_1 := v.Args[1] 44245 if v_1.Op != OpAMD64SETG { 44246 break 44247 } 44248 if cmp != v_1.Args[0] { 44249 break 44250 } 44251 b.Kind = BlockAMD64GT 44252 b.SetControl(cmp) 44253 b.Aux = nil 44254 return true 44255 } 44256 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 44257 // cond: 44258 // result: (GT cmp yes no) 44259 for { 44260 v := b.Control 44261 if v.Op != OpAMD64TESTB { 44262 break 44263 } 44264 _ = v.Args[1] 44265 v_0 := v.Args[0] 44266 if v_0.Op != OpAMD64SETG { 44267 break 44268 } 44269 cmp := v_0.Args[0] 44270 v_1 := v.Args[1] 44271 if v_1.Op != OpAMD64SETG { 44272 break 44273 } 44274 if cmp != v_1.Args[0] { 44275 break 44276 } 44277 b.Kind = BlockAMD64GT 44278 b.SetControl(cmp) 44279 b.Aux = nil 44280 return true 44281 } 44282 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 44283 // cond: 44284 // result: (GE cmp yes no) 44285 for { 44286 v := b.Control 44287 if v.Op != OpAMD64TESTB { 44288 break 44289 } 44290 _ = v.Args[1] 44291 v_0 := v.Args[0] 44292 if v_0.Op != OpAMD64SETGE { 44293 break 44294 } 44295 cmp := v_0.Args[0] 44296 v_1 := v.Args[1] 44297 if v_1.Op != OpAMD64SETGE { 44298 break 44299 } 44300 if cmp != v_1.Args[0] { 44301 break 44302 } 44303 b.Kind = BlockAMD64GE 44304 b.SetControl(cmp) 44305 b.Aux = nil 44306 return true 44307 } 44308 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 44309 // cond: 44310 // result: (GE cmp yes no) 44311 for { 44312 v := b.Control 44313 if v.Op != OpAMD64TESTB { 44314 break 44315 } 44316 _ = v.Args[1] 44317 v_0 := v.Args[0] 44318 if v_0.Op != OpAMD64SETGE { 44319 break 44320 } 44321 cmp := v_0.Args[0] 44322 v_1 := v.Args[1] 44323 if v_1.Op != OpAMD64SETGE { 44324 break 44325 } 44326 if cmp != v_1.Args[0] { 44327 break 44328 } 44329 b.Kind = BlockAMD64GE 44330 b.SetControl(cmp) 44331 b.Aux = nil 44332 return true 44333 } 44334 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 44335 // cond: 44336 // result: (EQ cmp yes no) 44337 for { 44338 v := b.Control 44339 if v.Op != OpAMD64TESTB { 44340 break 44341 } 44342 _ = v.Args[1] 44343 v_0 := v.Args[0] 44344 if v_0.Op != OpAMD64SETEQ { 44345 break 44346 } 44347 cmp := v_0.Args[0] 44348 v_1 := v.Args[1] 44349 if v_1.Op != OpAMD64SETEQ { 44350 break 44351 } 44352 if cmp != v_1.Args[0] { 44353 break 44354 } 44355 b.Kind = BlockAMD64EQ 44356 b.SetControl(cmp) 44357 b.Aux = nil 44358 return true 44359 } 44360 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 44361 // cond: 44362 // result: (EQ cmp yes no) 44363 for { 44364 v := b.Control 44365 if v.Op != OpAMD64TESTB { 44366 break 44367 } 44368 _ = v.Args[1] 44369 v_0 := v.Args[0] 44370 if v_0.Op != OpAMD64SETEQ { 44371 break 44372 } 44373 cmp := v_0.Args[0] 44374 v_1 := v.Args[1] 44375 if v_1.Op != OpAMD64SETEQ { 44376 break 44377 } 44378 if cmp != v_1.Args[0] { 44379 break 44380 } 44381 b.Kind = BlockAMD64EQ 44382 b.SetControl(cmp) 44383 b.Aux = nil 44384 return true 44385 } 44386 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 44387 // cond: 44388 // result: (NE cmp yes no) 44389 for { 44390 v := b.Control 44391 if v.Op != OpAMD64TESTB { 44392 break 44393 } 44394 _ = v.Args[1] 44395 v_0 := v.Args[0] 44396 if v_0.Op != OpAMD64SETNE { 44397 break 44398 } 44399 cmp := v_0.Args[0] 44400 v_1 := v.Args[1] 44401 if v_1.Op != OpAMD64SETNE { 44402 break 44403 } 44404 if cmp != v_1.Args[0] { 44405 break 44406 } 44407 b.Kind = BlockAMD64NE 44408 b.SetControl(cmp) 44409 b.Aux = nil 44410 return true 44411 } 44412 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 44413 // cond: 44414 // result: (NE cmp yes no) 44415 for { 44416 v := b.Control 44417 if v.Op != OpAMD64TESTB { 44418 break 44419 } 44420 _ = v.Args[1] 44421 v_0 := v.Args[0] 44422 if v_0.Op != OpAMD64SETNE { 44423 break 44424 } 44425 cmp := v_0.Args[0] 44426 v_1 := v.Args[1] 44427 if v_1.Op != OpAMD64SETNE { 44428 break 44429 } 44430 if cmp != v_1.Args[0] { 44431 break 44432 } 44433 b.Kind = BlockAMD64NE 44434 b.SetControl(cmp) 44435 b.Aux = nil 44436 return true 44437 } 44438 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 44439 // cond: 44440 // result: (ULT cmp yes no) 44441 for { 44442 v := b.Control 44443 if v.Op != OpAMD64TESTB { 44444 break 44445 } 44446 _ = v.Args[1] 44447 v_0 := v.Args[0] 44448 if v_0.Op != OpAMD64SETB { 44449 break 44450 } 44451 cmp := v_0.Args[0] 44452 v_1 := v.Args[1] 44453 if v_1.Op != OpAMD64SETB { 44454 break 44455 } 44456 if cmp != v_1.Args[0] { 44457 break 44458 } 44459 b.Kind = BlockAMD64ULT 44460 b.SetControl(cmp) 44461 b.Aux = nil 44462 return true 44463 } 44464 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 44465 // cond: 44466 // result: (ULT cmp yes no) 44467 for { 44468 v := b.Control 44469 if v.Op != OpAMD64TESTB { 44470 break 44471 } 44472 _ = v.Args[1] 44473 v_0 := v.Args[0] 44474 if v_0.Op != OpAMD64SETB { 44475 break 44476 } 44477 cmp := v_0.Args[0] 44478 v_1 := v.Args[1] 44479 if v_1.Op != OpAMD64SETB { 44480 break 44481 } 44482 if cmp != v_1.Args[0] { 44483 break 44484 } 44485 b.Kind = BlockAMD64ULT 44486 b.SetControl(cmp) 44487 b.Aux = nil 44488 return true 44489 } 44490 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 44491 // cond: 44492 // result: (ULE cmp yes no) 44493 for { 44494 v := b.Control 44495 if v.Op != OpAMD64TESTB { 44496 break 44497 } 44498 _ = v.Args[1] 44499 v_0 := v.Args[0] 44500 if v_0.Op != OpAMD64SETBE { 44501 break 44502 } 44503 cmp := v_0.Args[0] 44504 v_1 := v.Args[1] 44505 if v_1.Op != OpAMD64SETBE { 44506 break 44507 } 44508 if cmp != v_1.Args[0] { 44509 break 44510 } 44511 b.Kind = BlockAMD64ULE 44512 b.SetControl(cmp) 44513 b.Aux = nil 44514 return true 44515 } 44516 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 44517 // cond: 44518 // result: (ULE cmp yes no) 44519 for { 44520 v := b.Control 44521 if v.Op != OpAMD64TESTB { 44522 break 44523 } 44524 _ = v.Args[1] 44525 v_0 := v.Args[0] 44526 if v_0.Op != OpAMD64SETBE { 44527 break 44528 } 44529 cmp := v_0.Args[0] 44530 v_1 := v.Args[1] 44531 if v_1.Op != OpAMD64SETBE { 44532 break 44533 } 44534 if cmp != v_1.Args[0] { 44535 break 44536 } 44537 b.Kind = BlockAMD64ULE 44538 b.SetControl(cmp) 44539 b.Aux = nil 44540 return true 44541 } 44542 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 44543 // cond: 44544 // result: (UGT cmp yes no) 44545 for { 44546 v := b.Control 44547 if v.Op != OpAMD64TESTB { 44548 break 44549 } 44550 _ = v.Args[1] 44551 v_0 := v.Args[0] 44552 if v_0.Op != OpAMD64SETA { 44553 break 44554 } 44555 cmp := v_0.Args[0] 44556 v_1 := v.Args[1] 44557 if v_1.Op != OpAMD64SETA { 44558 break 44559 } 44560 if cmp != v_1.Args[0] { 44561 break 44562 } 44563 b.Kind = BlockAMD64UGT 44564 b.SetControl(cmp) 44565 b.Aux = nil 44566 return true 44567 } 44568 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 44569 // cond: 44570 // result: (UGT cmp yes no) 44571 for { 44572 v := b.Control 44573 if v.Op != OpAMD64TESTB { 44574 break 44575 } 44576 _ = v.Args[1] 44577 v_0 := v.Args[0] 44578 if v_0.Op != OpAMD64SETA { 44579 break 44580 } 44581 cmp := v_0.Args[0] 44582 v_1 := v.Args[1] 44583 if v_1.Op != OpAMD64SETA { 44584 break 44585 } 44586 if cmp != v_1.Args[0] { 44587 break 44588 } 44589 b.Kind = BlockAMD64UGT 44590 b.SetControl(cmp) 44591 b.Aux = nil 44592 return true 44593 } 44594 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 44595 // cond: 44596 // result: (UGE cmp yes no) 44597 for { 44598 v := b.Control 44599 if v.Op != OpAMD64TESTB { 44600 break 44601 } 44602 _ = v.Args[1] 44603 v_0 := v.Args[0] 44604 if v_0.Op != OpAMD64SETAE { 44605 break 44606 } 44607 cmp := v_0.Args[0] 44608 v_1 := v.Args[1] 44609 if v_1.Op != OpAMD64SETAE { 44610 break 44611 } 44612 if cmp != v_1.Args[0] { 44613 break 44614 } 44615 b.Kind = BlockAMD64UGE 44616 b.SetControl(cmp) 44617 b.Aux = nil 44618 return true 44619 } 44620 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 44621 // cond: 44622 // result: (UGE cmp yes no) 44623 for { 44624 v := b.Control 44625 if v.Op != OpAMD64TESTB { 44626 break 44627 } 44628 _ = v.Args[1] 44629 v_0 := v.Args[0] 44630 if v_0.Op != OpAMD64SETAE { 44631 break 44632 } 44633 cmp := v_0.Args[0] 44634 v_1 := v.Args[1] 44635 if v_1.Op != OpAMD64SETAE { 44636 break 44637 } 44638 if cmp != v_1.Args[0] { 44639 break 44640 } 44641 b.Kind = BlockAMD64UGE 44642 b.SetControl(cmp) 44643 b.Aux = nil 44644 return true 44645 } 44646 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 44647 // cond: !config.nacl 44648 // result: (ULT (BTL x y)) 44649 for { 44650 v := b.Control 44651 if v.Op != OpAMD64TESTL { 44652 break 44653 } 44654 _ = v.Args[1] 44655 v_0 := v.Args[0] 44656 if v_0.Op != OpAMD64SHLL { 44657 break 44658 } 44659 _ = v_0.Args[1] 44660 v_0_0 := v_0.Args[0] 44661 if v_0_0.Op != OpAMD64MOVLconst { 44662 break 44663 } 44664 if v_0_0.AuxInt != 1 { 44665 break 44666 } 44667 x := v_0.Args[1] 44668 y := v.Args[1] 44669 if !(!config.nacl) { 44670 break 44671 } 44672 b.Kind = BlockAMD64ULT 44673 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 44674 v0.AddArg(x) 44675 v0.AddArg(y) 44676 b.SetControl(v0) 44677 b.Aux = nil 44678 return true 44679 } 44680 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 44681 // cond: !config.nacl 44682 // result: (ULT (BTL x y)) 44683 for { 44684 v := b.Control 44685 if v.Op != OpAMD64TESTL { 44686 break 44687 } 44688 _ = v.Args[1] 44689 y := v.Args[0] 44690 v_1 := v.Args[1] 44691 if v_1.Op != OpAMD64SHLL { 44692 break 44693 } 44694 _ = v_1.Args[1] 44695 v_1_0 := v_1.Args[0] 44696 if v_1_0.Op != OpAMD64MOVLconst { 44697 break 44698 } 44699 if v_1_0.AuxInt != 1 { 44700 break 44701 } 44702 x := v_1.Args[1] 44703 if !(!config.nacl) { 44704 break 44705 } 44706 b.Kind = BlockAMD64ULT 44707 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 44708 v0.AddArg(x) 44709 v0.AddArg(y) 44710 b.SetControl(v0) 44711 b.Aux = nil 44712 return true 44713 } 44714 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 44715 // cond: !config.nacl 44716 // result: (ULT (BTQ x y)) 44717 for { 44718 v := b.Control 44719 if v.Op != OpAMD64TESTQ { 44720 break 44721 } 44722 _ = v.Args[1] 44723 v_0 := v.Args[0] 44724 if v_0.Op != OpAMD64SHLQ { 44725 break 44726 } 44727 _ = v_0.Args[1] 44728 v_0_0 := v_0.Args[0] 44729 if v_0_0.Op != OpAMD64MOVQconst { 44730 break 44731 } 44732 if v_0_0.AuxInt != 1 { 44733 break 44734 } 44735 x := v_0.Args[1] 44736 y := v.Args[1] 44737 if !(!config.nacl) { 44738 break 44739 } 44740 b.Kind = BlockAMD64ULT 44741 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 44742 v0.AddArg(x) 44743 v0.AddArg(y) 44744 b.SetControl(v0) 44745 b.Aux = nil 44746 return true 44747 } 44748 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 44749 // cond: !config.nacl 44750 // result: (ULT (BTQ x y)) 44751 for { 44752 v := b.Control 44753 if v.Op != OpAMD64TESTQ { 44754 break 44755 } 44756 _ = v.Args[1] 44757 y := v.Args[0] 44758 v_1 := v.Args[1] 44759 if v_1.Op != OpAMD64SHLQ { 44760 break 44761 } 44762 _ = v_1.Args[1] 44763 v_1_0 := v_1.Args[0] 44764 if v_1_0.Op != OpAMD64MOVQconst { 44765 break 44766 } 44767 if v_1_0.AuxInt != 1 { 44768 break 44769 } 44770 x := v_1.Args[1] 44771 if !(!config.nacl) { 44772 break 44773 } 44774 b.Kind = BlockAMD64ULT 44775 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 44776 v0.AddArg(x) 44777 v0.AddArg(y) 44778 b.SetControl(v0) 44779 b.Aux = nil 44780 return true 44781 } 44782 // match: (NE (TESTLconst [c] x)) 44783 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 44784 // result: (ULT (BTLconst [log2(c)] x)) 44785 for { 44786 v := b.Control 44787 if v.Op != OpAMD64TESTLconst { 44788 break 44789 } 44790 c := v.AuxInt 44791 x := v.Args[0] 44792 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 44793 break 44794 } 44795 b.Kind = BlockAMD64ULT 44796 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 44797 v0.AuxInt = log2(c) 44798 v0.AddArg(x) 44799 b.SetControl(v0) 44800 b.Aux = nil 44801 return true 44802 } 44803 // match: (NE (TESTQconst [c] x)) 44804 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 44805 // result: (ULT (BTQconst [log2(c)] x)) 44806 for { 44807 v := b.Control 44808 if v.Op != OpAMD64TESTQconst { 44809 break 44810 } 44811 c := v.AuxInt 44812 x := v.Args[0] 44813 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 44814 break 44815 } 44816 b.Kind = BlockAMD64ULT 44817 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44818 v0.AuxInt = log2(c) 44819 v0.AddArg(x) 44820 b.SetControl(v0) 44821 b.Aux = nil 44822 return true 44823 } 44824 // match: (NE (TESTQ (MOVQconst [c]) x)) 44825 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 44826 // result: (ULT (BTQconst [log2(c)] x)) 44827 for { 44828 v := b.Control 44829 if v.Op != OpAMD64TESTQ { 44830 break 44831 } 44832 _ = v.Args[1] 44833 v_0 := v.Args[0] 44834 if v_0.Op != OpAMD64MOVQconst { 44835 break 44836 } 44837 c := v_0.AuxInt 44838 x := v.Args[1] 44839 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 44840 break 44841 } 44842 b.Kind = BlockAMD64ULT 44843 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44844 v0.AuxInt = log2(c) 44845 v0.AddArg(x) 44846 b.SetControl(v0) 44847 b.Aux = nil 44848 return true 44849 } 44850 // match: (NE (TESTQ x (MOVQconst [c]))) 44851 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 44852 // result: (ULT (BTQconst [log2(c)] x)) 44853 for { 44854 v := b.Control 44855 if v.Op != OpAMD64TESTQ { 44856 break 44857 } 44858 _ = v.Args[1] 44859 x := v.Args[0] 44860 v_1 := v.Args[1] 44861 if v_1.Op != OpAMD64MOVQconst { 44862 break 44863 } 44864 c := v_1.AuxInt 44865 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 44866 break 44867 } 44868 b.Kind = BlockAMD64ULT 44869 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44870 v0.AuxInt = log2(c) 44871 v0.AddArg(x) 44872 b.SetControl(v0) 44873 b.Aux = nil 44874 return true 44875 } 44876 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 44877 // cond: 44878 // result: (UGT cmp yes no) 44879 for { 44880 v := b.Control 44881 if v.Op != OpAMD64TESTB { 44882 break 44883 } 44884 _ = v.Args[1] 44885 v_0 := v.Args[0] 44886 if v_0.Op != OpAMD64SETGF { 44887 break 44888 } 44889 cmp := v_0.Args[0] 44890 v_1 := v.Args[1] 44891 if v_1.Op != OpAMD64SETGF { 44892 break 44893 } 44894 if cmp != v_1.Args[0] { 44895 break 44896 } 44897 b.Kind = BlockAMD64UGT 44898 b.SetControl(cmp) 44899 b.Aux = nil 44900 return true 44901 } 44902 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 44903 // cond: 44904 // result: (UGT cmp yes no) 44905 for { 44906 v := b.Control 44907 if v.Op != OpAMD64TESTB { 44908 break 44909 } 44910 _ = v.Args[1] 44911 v_0 := v.Args[0] 44912 if v_0.Op != OpAMD64SETGF { 44913 break 44914 } 44915 cmp := v_0.Args[0] 44916 v_1 := v.Args[1] 44917 if v_1.Op != OpAMD64SETGF { 44918 break 44919 } 44920 if cmp != v_1.Args[0] { 44921 break 44922 } 44923 b.Kind = BlockAMD64UGT 44924 b.SetControl(cmp) 44925 b.Aux = nil 44926 return true 44927 } 44928 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 44929 // cond: 44930 // result: (UGE cmp yes no) 44931 for { 44932 v := b.Control 44933 if v.Op != OpAMD64TESTB { 44934 break 44935 } 44936 _ = v.Args[1] 44937 v_0 := v.Args[0] 44938 if v_0.Op != OpAMD64SETGEF { 44939 break 44940 } 44941 cmp := v_0.Args[0] 44942 v_1 := v.Args[1] 44943 if v_1.Op != OpAMD64SETGEF { 44944 break 44945 } 44946 if cmp != v_1.Args[0] { 44947 break 44948 } 44949 b.Kind = BlockAMD64UGE 44950 b.SetControl(cmp) 44951 b.Aux = nil 44952 return true 44953 } 44954 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 44955 // cond: 44956 // result: (UGE cmp yes no) 44957 for { 44958 v := b.Control 44959 if v.Op != OpAMD64TESTB { 44960 break 44961 } 44962 _ = v.Args[1] 44963 v_0 := v.Args[0] 44964 if v_0.Op != OpAMD64SETGEF { 44965 break 44966 } 44967 cmp := v_0.Args[0] 44968 v_1 := v.Args[1] 44969 if v_1.Op != OpAMD64SETGEF { 44970 break 44971 } 44972 if cmp != v_1.Args[0] { 44973 break 44974 } 44975 b.Kind = BlockAMD64UGE 44976 b.SetControl(cmp) 44977 b.Aux = nil 44978 return true 44979 } 44980 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 44981 // cond: 44982 // result: (EQF cmp yes no) 44983 for { 44984 v := b.Control 44985 if v.Op != OpAMD64TESTB { 44986 break 44987 } 44988 _ = v.Args[1] 44989 v_0 := v.Args[0] 44990 if v_0.Op != OpAMD64SETEQF { 44991 break 44992 } 44993 cmp := v_0.Args[0] 44994 v_1 := v.Args[1] 44995 if v_1.Op != OpAMD64SETEQF { 44996 break 44997 } 44998 if cmp != v_1.Args[0] { 44999 break 45000 } 45001 b.Kind = BlockAMD64EQF 45002 b.SetControl(cmp) 45003 b.Aux = nil 45004 return true 45005 } 45006 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 45007 // cond: 45008 // result: (EQF cmp yes no) 45009 for { 45010 v := b.Control 45011 if v.Op != OpAMD64TESTB { 45012 break 45013 } 45014 _ = v.Args[1] 45015 v_0 := v.Args[0] 45016 if v_0.Op != OpAMD64SETEQF { 45017 break 45018 } 45019 cmp := v_0.Args[0] 45020 v_1 := v.Args[1] 45021 if v_1.Op != OpAMD64SETEQF { 45022 break 45023 } 45024 if cmp != v_1.Args[0] { 45025 break 45026 } 45027 b.Kind = BlockAMD64EQF 45028 b.SetControl(cmp) 45029 b.Aux = nil 45030 return true 45031 } 45032 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 45033 // cond: 45034 // result: (NEF cmp yes no) 45035 for { 45036 v := b.Control 45037 if v.Op != OpAMD64TESTB { 45038 break 45039 } 45040 _ = v.Args[1] 45041 v_0 := v.Args[0] 45042 if v_0.Op != OpAMD64SETNEF { 45043 break 45044 } 45045 cmp := v_0.Args[0] 45046 v_1 := v.Args[1] 45047 if v_1.Op != OpAMD64SETNEF { 45048 break 45049 } 45050 if cmp != v_1.Args[0] { 45051 break 45052 } 45053 b.Kind = BlockAMD64NEF 45054 b.SetControl(cmp) 45055 b.Aux = nil 45056 return true 45057 } 45058 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 45059 // cond: 45060 // result: (NEF cmp yes no) 45061 for { 45062 v := b.Control 45063 if v.Op != OpAMD64TESTB { 45064 break 45065 } 45066 _ = v.Args[1] 45067 v_0 := v.Args[0] 45068 if v_0.Op != OpAMD64SETNEF { 45069 break 45070 } 45071 cmp := v_0.Args[0] 45072 v_1 := v.Args[1] 45073 if v_1.Op != OpAMD64SETNEF { 45074 break 45075 } 45076 if cmp != v_1.Args[0] { 45077 break 45078 } 45079 b.Kind = BlockAMD64NEF 45080 b.SetControl(cmp) 45081 b.Aux = nil 45082 return true 45083 } 45084 // match: (NE (InvertFlags cmp) yes no) 45085 // cond: 45086 // result: (NE cmp yes no) 45087 for { 45088 v := b.Control 45089 if v.Op != OpAMD64InvertFlags { 45090 break 45091 } 45092 cmp := v.Args[0] 45093 b.Kind = BlockAMD64NE 45094 b.SetControl(cmp) 45095 b.Aux = nil 45096 return true 45097 } 45098 // match: (NE (FlagEQ) yes no) 45099 // cond: 45100 // result: (First nil no yes) 45101 for { 45102 v := b.Control 45103 if v.Op != OpAMD64FlagEQ { 45104 break 45105 } 45106 b.Kind = BlockFirst 45107 b.SetControl(nil) 45108 b.Aux = nil 45109 b.swapSuccessors() 45110 return true 45111 } 45112 // match: (NE (FlagLT_ULT) yes no) 45113 // cond: 45114 // result: (First nil yes no) 45115 for { 45116 v := b.Control 45117 if v.Op != OpAMD64FlagLT_ULT { 45118 break 45119 } 45120 b.Kind = BlockFirst 45121 b.SetControl(nil) 45122 b.Aux = nil 45123 return true 45124 } 45125 // match: (NE (FlagLT_UGT) yes no) 45126 // cond: 45127 // result: (First nil yes no) 45128 for { 45129 v := b.Control 45130 if v.Op != OpAMD64FlagLT_UGT { 45131 break 45132 } 45133 b.Kind = BlockFirst 45134 b.SetControl(nil) 45135 b.Aux = nil 45136 return true 45137 } 45138 // match: (NE (FlagGT_ULT) yes no) 45139 // cond: 45140 // result: (First nil yes no) 45141 for { 45142 v := b.Control 45143 if v.Op != OpAMD64FlagGT_ULT { 45144 break 45145 } 45146 b.Kind = BlockFirst 45147 b.SetControl(nil) 45148 b.Aux = nil 45149 return true 45150 } 45151 // match: (NE (FlagGT_UGT) yes no) 45152 // cond: 45153 // result: (First nil yes no) 45154 for { 45155 v := b.Control 45156 if v.Op != OpAMD64FlagGT_UGT { 45157 break 45158 } 45159 b.Kind = BlockFirst 45160 b.SetControl(nil) 45161 b.Aux = nil 45162 return true 45163 } 45164 case BlockAMD64UGE: 45165 // match: (UGE (InvertFlags cmp) yes no) 45166 // cond: 45167 // result: (ULE cmp yes no) 45168 for { 45169 v := b.Control 45170 if v.Op != OpAMD64InvertFlags { 45171 break 45172 } 45173 cmp := v.Args[0] 45174 b.Kind = BlockAMD64ULE 45175 b.SetControl(cmp) 45176 b.Aux = nil 45177 return true 45178 } 45179 // match: (UGE (FlagEQ) yes no) 45180 // cond: 45181 // result: (First nil yes no) 45182 for { 45183 v := b.Control 45184 if v.Op != OpAMD64FlagEQ { 45185 break 45186 } 45187 b.Kind = BlockFirst 45188 b.SetControl(nil) 45189 b.Aux = nil 45190 return true 45191 } 45192 // match: (UGE (FlagLT_ULT) yes no) 45193 // cond: 45194 // result: (First nil no yes) 45195 for { 45196 v := b.Control 45197 if v.Op != OpAMD64FlagLT_ULT { 45198 break 45199 } 45200 b.Kind = BlockFirst 45201 b.SetControl(nil) 45202 b.Aux = nil 45203 b.swapSuccessors() 45204 return true 45205 } 45206 // match: (UGE (FlagLT_UGT) yes no) 45207 // cond: 45208 // result: (First nil yes no) 45209 for { 45210 v := b.Control 45211 if v.Op != OpAMD64FlagLT_UGT { 45212 break 45213 } 45214 b.Kind = BlockFirst 45215 b.SetControl(nil) 45216 b.Aux = nil 45217 return true 45218 } 45219 // match: (UGE (FlagGT_ULT) yes no) 45220 // cond: 45221 // result: (First nil no yes) 45222 for { 45223 v := b.Control 45224 if v.Op != OpAMD64FlagGT_ULT { 45225 break 45226 } 45227 b.Kind = BlockFirst 45228 b.SetControl(nil) 45229 b.Aux = nil 45230 b.swapSuccessors() 45231 return true 45232 } 45233 // match: (UGE (FlagGT_UGT) yes no) 45234 // cond: 45235 // result: (First nil yes no) 45236 for { 45237 v := b.Control 45238 if v.Op != OpAMD64FlagGT_UGT { 45239 break 45240 } 45241 b.Kind = BlockFirst 45242 b.SetControl(nil) 45243 b.Aux = nil 45244 return true 45245 } 45246 case BlockAMD64UGT: 45247 // match: (UGT (InvertFlags cmp) yes no) 45248 // cond: 45249 // result: (ULT cmp yes no) 45250 for { 45251 v := b.Control 45252 if v.Op != OpAMD64InvertFlags { 45253 break 45254 } 45255 cmp := v.Args[0] 45256 b.Kind = BlockAMD64ULT 45257 b.SetControl(cmp) 45258 b.Aux = nil 45259 return true 45260 } 45261 // match: (UGT (FlagEQ) yes no) 45262 // cond: 45263 // result: (First nil no yes) 45264 for { 45265 v := b.Control 45266 if v.Op != OpAMD64FlagEQ { 45267 break 45268 } 45269 b.Kind = BlockFirst 45270 b.SetControl(nil) 45271 b.Aux = nil 45272 b.swapSuccessors() 45273 return true 45274 } 45275 // match: (UGT (FlagLT_ULT) yes no) 45276 // cond: 45277 // result: (First nil no yes) 45278 for { 45279 v := b.Control 45280 if v.Op != OpAMD64FlagLT_ULT { 45281 break 45282 } 45283 b.Kind = BlockFirst 45284 b.SetControl(nil) 45285 b.Aux = nil 45286 b.swapSuccessors() 45287 return true 45288 } 45289 // match: (UGT (FlagLT_UGT) yes no) 45290 // cond: 45291 // result: (First nil yes no) 45292 for { 45293 v := b.Control 45294 if v.Op != OpAMD64FlagLT_UGT { 45295 break 45296 } 45297 b.Kind = BlockFirst 45298 b.SetControl(nil) 45299 b.Aux = nil 45300 return true 45301 } 45302 // match: (UGT (FlagGT_ULT) yes no) 45303 // cond: 45304 // result: (First nil no yes) 45305 for { 45306 v := b.Control 45307 if v.Op != OpAMD64FlagGT_ULT { 45308 break 45309 } 45310 b.Kind = BlockFirst 45311 b.SetControl(nil) 45312 b.Aux = nil 45313 b.swapSuccessors() 45314 return true 45315 } 45316 // match: (UGT (FlagGT_UGT) yes no) 45317 // cond: 45318 // result: (First nil yes no) 45319 for { 45320 v := b.Control 45321 if v.Op != OpAMD64FlagGT_UGT { 45322 break 45323 } 45324 b.Kind = BlockFirst 45325 b.SetControl(nil) 45326 b.Aux = nil 45327 return true 45328 } 45329 case BlockAMD64ULE: 45330 // match: (ULE (InvertFlags cmp) yes no) 45331 // cond: 45332 // result: (UGE cmp yes no) 45333 for { 45334 v := b.Control 45335 if v.Op != OpAMD64InvertFlags { 45336 break 45337 } 45338 cmp := v.Args[0] 45339 b.Kind = BlockAMD64UGE 45340 b.SetControl(cmp) 45341 b.Aux = nil 45342 return true 45343 } 45344 // match: (ULE (FlagEQ) yes no) 45345 // cond: 45346 // result: (First nil yes no) 45347 for { 45348 v := b.Control 45349 if v.Op != OpAMD64FlagEQ { 45350 break 45351 } 45352 b.Kind = BlockFirst 45353 b.SetControl(nil) 45354 b.Aux = nil 45355 return true 45356 } 45357 // match: (ULE (FlagLT_ULT) yes no) 45358 // cond: 45359 // result: (First nil yes no) 45360 for { 45361 v := b.Control 45362 if v.Op != OpAMD64FlagLT_ULT { 45363 break 45364 } 45365 b.Kind = BlockFirst 45366 b.SetControl(nil) 45367 b.Aux = nil 45368 return true 45369 } 45370 // match: (ULE (FlagLT_UGT) yes no) 45371 // cond: 45372 // result: (First nil no yes) 45373 for { 45374 v := b.Control 45375 if v.Op != OpAMD64FlagLT_UGT { 45376 break 45377 } 45378 b.Kind = BlockFirst 45379 b.SetControl(nil) 45380 b.Aux = nil 45381 b.swapSuccessors() 45382 return true 45383 } 45384 // match: (ULE (FlagGT_ULT) yes no) 45385 // cond: 45386 // result: (First nil yes no) 45387 for { 45388 v := b.Control 45389 if v.Op != OpAMD64FlagGT_ULT { 45390 break 45391 } 45392 b.Kind = BlockFirst 45393 b.SetControl(nil) 45394 b.Aux = nil 45395 return true 45396 } 45397 // match: (ULE (FlagGT_UGT) yes no) 45398 // cond: 45399 // result: (First nil no yes) 45400 for { 45401 v := b.Control 45402 if v.Op != OpAMD64FlagGT_UGT { 45403 break 45404 } 45405 b.Kind = BlockFirst 45406 b.SetControl(nil) 45407 b.Aux = nil 45408 b.swapSuccessors() 45409 return true 45410 } 45411 case BlockAMD64ULT: 45412 // match: (ULT (InvertFlags cmp) yes no) 45413 // cond: 45414 // result: (UGT cmp yes no) 45415 for { 45416 v := b.Control 45417 if v.Op != OpAMD64InvertFlags { 45418 break 45419 } 45420 cmp := v.Args[0] 45421 b.Kind = BlockAMD64UGT 45422 b.SetControl(cmp) 45423 b.Aux = nil 45424 return true 45425 } 45426 // match: (ULT (FlagEQ) yes no) 45427 // cond: 45428 // result: (First nil no yes) 45429 for { 45430 v := b.Control 45431 if v.Op != OpAMD64FlagEQ { 45432 break 45433 } 45434 b.Kind = BlockFirst 45435 b.SetControl(nil) 45436 b.Aux = nil 45437 b.swapSuccessors() 45438 return true 45439 } 45440 // match: (ULT (FlagLT_ULT) yes no) 45441 // cond: 45442 // result: (First nil yes no) 45443 for { 45444 v := b.Control 45445 if v.Op != OpAMD64FlagLT_ULT { 45446 break 45447 } 45448 b.Kind = BlockFirst 45449 b.SetControl(nil) 45450 b.Aux = nil 45451 return true 45452 } 45453 // match: (ULT (FlagLT_UGT) yes no) 45454 // cond: 45455 // result: (First nil no yes) 45456 for { 45457 v := b.Control 45458 if v.Op != OpAMD64FlagLT_UGT { 45459 break 45460 } 45461 b.Kind = BlockFirst 45462 b.SetControl(nil) 45463 b.Aux = nil 45464 b.swapSuccessors() 45465 return true 45466 } 45467 // match: (ULT (FlagGT_ULT) yes no) 45468 // cond: 45469 // result: (First nil yes no) 45470 for { 45471 v := b.Control 45472 if v.Op != OpAMD64FlagGT_ULT { 45473 break 45474 } 45475 b.Kind = BlockFirst 45476 b.SetControl(nil) 45477 b.Aux = nil 45478 return true 45479 } 45480 // match: (ULT (FlagGT_UGT) yes no) 45481 // cond: 45482 // result: (First nil no yes) 45483 for { 45484 v := b.Control 45485 if v.Op != OpAMD64FlagGT_UGT { 45486 break 45487 } 45488 b.Kind = BlockFirst 45489 b.SetControl(nil) 45490 b.Aux = nil 45491 b.swapSuccessors() 45492 return true 45493 } 45494 } 45495 return false 45496 }