github.com/zebozhuang/go@v0.0.0-20200207033046-f8a98f6f5c5d/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "cmd/internal/obj" 8 import "cmd/internal/objabi" 9 import "cmd/compile/internal/types" 10 11 var _ = math.MinInt8 // in case not otherwise used 12 var _ = obj.ANOP // in case not otherwise used 13 var _ = objabi.GOROOT // in case not otherwise used 14 var _ = types.TypeMem // in case not otherwise used 15 16 func rewriteValueAMD64(v *Value) bool { 17 switch v.Op { 18 case OpAMD64ADDL: 19 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) 20 case OpAMD64ADDLconst: 21 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) 22 case OpAMD64ADDQ: 23 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 24 case OpAMD64ADDQconst: 25 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) 26 case OpAMD64ADDSD: 27 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 28 case OpAMD64ADDSS: 29 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 30 case OpAMD64ANDL: 31 return rewriteValueAMD64_OpAMD64ANDL_0(v) 32 case OpAMD64ANDLconst: 33 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 34 case OpAMD64ANDQ: 35 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 36 case OpAMD64ANDQconst: 37 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 38 case OpAMD64BSFQ: 39 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 40 case OpAMD64BTQconst: 41 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 42 case OpAMD64CMOVQEQ: 43 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 44 case OpAMD64CMPB: 45 return rewriteValueAMD64_OpAMD64CMPB_0(v) 46 case OpAMD64CMPBconst: 47 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 48 case OpAMD64CMPL: 49 return rewriteValueAMD64_OpAMD64CMPL_0(v) 50 case OpAMD64CMPLconst: 51 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) 52 case OpAMD64CMPQ: 53 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 54 case OpAMD64CMPQconst: 55 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 56 case OpAMD64CMPW: 57 return rewriteValueAMD64_OpAMD64CMPW_0(v) 58 case OpAMD64CMPWconst: 59 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 60 case OpAMD64CMPXCHGLlock: 61 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 62 case OpAMD64CMPXCHGQlock: 63 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 64 case OpAMD64LEAL: 65 return rewriteValueAMD64_OpAMD64LEAL_0(v) 66 case OpAMD64LEAQ: 67 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 68 case OpAMD64LEAQ1: 69 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 70 case OpAMD64LEAQ2: 71 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 72 case OpAMD64LEAQ4: 73 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 74 case OpAMD64LEAQ8: 75 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 76 case OpAMD64MOVBQSX: 77 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 78 case OpAMD64MOVBQSXload: 79 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 80 case OpAMD64MOVBQZX: 81 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 82 case OpAMD64MOVBload: 83 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 84 case OpAMD64MOVBloadidx1: 85 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 86 case OpAMD64MOVBstore: 87 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) 88 case OpAMD64MOVBstoreconst: 89 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 90 case OpAMD64MOVBstoreconstidx1: 91 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 92 case OpAMD64MOVBstoreidx1: 93 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) 94 case OpAMD64MOVLQSX: 95 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 96 case OpAMD64MOVLQSXload: 97 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 98 case OpAMD64MOVLQZX: 99 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 100 case OpAMD64MOVLatomicload: 101 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 102 case OpAMD64MOVLload: 103 return rewriteValueAMD64_OpAMD64MOVLload_0(v) 104 case OpAMD64MOVLloadidx1: 105 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 106 case OpAMD64MOVLloadidx4: 107 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 108 case OpAMD64MOVLstore: 109 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) 110 case OpAMD64MOVLstoreconst: 111 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 112 case OpAMD64MOVLstoreconstidx1: 113 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 114 case OpAMD64MOVLstoreconstidx4: 115 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 116 case OpAMD64MOVLstoreidx1: 117 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 118 case OpAMD64MOVLstoreidx4: 119 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 120 case OpAMD64MOVOload: 121 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 122 case OpAMD64MOVOstore: 123 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 124 case OpAMD64MOVQatomicload: 125 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 126 case OpAMD64MOVQload: 127 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 128 case OpAMD64MOVQloadidx1: 129 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 130 case OpAMD64MOVQloadidx8: 131 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 132 case OpAMD64MOVQstore: 133 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) 134 case OpAMD64MOVQstoreconst: 135 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 136 case OpAMD64MOVQstoreconstidx1: 137 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 138 case OpAMD64MOVQstoreconstidx8: 139 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 140 case OpAMD64MOVQstoreidx1: 141 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 142 case OpAMD64MOVQstoreidx8: 143 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 144 case OpAMD64MOVSDload: 145 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 146 case OpAMD64MOVSDloadidx1: 147 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 148 case OpAMD64MOVSDloadidx8: 149 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 150 case OpAMD64MOVSDstore: 151 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 152 case OpAMD64MOVSDstoreidx1: 153 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 154 case OpAMD64MOVSDstoreidx8: 155 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 156 case OpAMD64MOVSSload: 157 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 158 case OpAMD64MOVSSloadidx1: 159 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 160 case OpAMD64MOVSSloadidx4: 161 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 162 case OpAMD64MOVSSstore: 163 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 164 case OpAMD64MOVSSstoreidx1: 165 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 166 case OpAMD64MOVSSstoreidx4: 167 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 168 case OpAMD64MOVWQSX: 169 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 170 case OpAMD64MOVWQSXload: 171 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 172 case OpAMD64MOVWQZX: 173 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 174 case OpAMD64MOVWload: 175 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 176 case OpAMD64MOVWloadidx1: 177 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 178 case OpAMD64MOVWloadidx2: 179 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 180 case OpAMD64MOVWstore: 181 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 182 case OpAMD64MOVWstoreconst: 183 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 184 case OpAMD64MOVWstoreconstidx1: 185 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 186 case OpAMD64MOVWstoreconstidx2: 187 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 188 case OpAMD64MOVWstoreidx1: 189 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 190 case OpAMD64MOVWstoreidx2: 191 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 192 case OpAMD64MULL: 193 return rewriteValueAMD64_OpAMD64MULL_0(v) 194 case OpAMD64MULLconst: 195 return rewriteValueAMD64_OpAMD64MULLconst_0(v) 196 case OpAMD64MULQ: 197 return rewriteValueAMD64_OpAMD64MULQ_0(v) 198 case OpAMD64MULQconst: 199 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) 200 case OpAMD64MULSD: 201 return rewriteValueAMD64_OpAMD64MULSD_0(v) 202 case OpAMD64MULSS: 203 return rewriteValueAMD64_OpAMD64MULSS_0(v) 204 case OpAMD64NEGL: 205 return rewriteValueAMD64_OpAMD64NEGL_0(v) 206 case OpAMD64NEGQ: 207 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 208 case OpAMD64NOTL: 209 return rewriteValueAMD64_OpAMD64NOTL_0(v) 210 case OpAMD64NOTQ: 211 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 212 case OpAMD64ORL: 213 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 214 case OpAMD64ORLconst: 215 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 216 case OpAMD64ORQ: 217 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 218 case OpAMD64ORQconst: 219 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 220 case OpAMD64ROLB: 221 return rewriteValueAMD64_OpAMD64ROLB_0(v) 222 case OpAMD64ROLBconst: 223 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 224 case OpAMD64ROLL: 225 return rewriteValueAMD64_OpAMD64ROLL_0(v) 226 case OpAMD64ROLLconst: 227 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 228 case OpAMD64ROLQ: 229 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 230 case OpAMD64ROLQconst: 231 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 232 case OpAMD64ROLW: 233 return rewriteValueAMD64_OpAMD64ROLW_0(v) 234 case OpAMD64ROLWconst: 235 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 236 case OpAMD64RORB: 237 return rewriteValueAMD64_OpAMD64RORB_0(v) 238 case OpAMD64RORL: 239 return rewriteValueAMD64_OpAMD64RORL_0(v) 240 case OpAMD64RORQ: 241 return rewriteValueAMD64_OpAMD64RORQ_0(v) 242 case OpAMD64RORW: 243 return rewriteValueAMD64_OpAMD64RORW_0(v) 244 case OpAMD64SARB: 245 return rewriteValueAMD64_OpAMD64SARB_0(v) 246 case OpAMD64SARBconst: 247 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 248 case OpAMD64SARL: 249 return rewriteValueAMD64_OpAMD64SARL_0(v) 250 case OpAMD64SARLconst: 251 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 252 case OpAMD64SARQ: 253 return rewriteValueAMD64_OpAMD64SARQ_0(v) 254 case OpAMD64SARQconst: 255 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 256 case OpAMD64SARW: 257 return rewriteValueAMD64_OpAMD64SARW_0(v) 258 case OpAMD64SARWconst: 259 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 260 case OpAMD64SBBLcarrymask: 261 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 262 case OpAMD64SBBQcarrymask: 263 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 264 case OpAMD64SETA: 265 return rewriteValueAMD64_OpAMD64SETA_0(v) 266 case OpAMD64SETAE: 267 return rewriteValueAMD64_OpAMD64SETAE_0(v) 268 case OpAMD64SETB: 269 return rewriteValueAMD64_OpAMD64SETB_0(v) 270 case OpAMD64SETBE: 271 return rewriteValueAMD64_OpAMD64SETBE_0(v) 272 case OpAMD64SETEQ: 273 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) 274 case OpAMD64SETG: 275 return rewriteValueAMD64_OpAMD64SETG_0(v) 276 case OpAMD64SETGE: 277 return rewriteValueAMD64_OpAMD64SETGE_0(v) 278 case OpAMD64SETL: 279 return rewriteValueAMD64_OpAMD64SETL_0(v) 280 case OpAMD64SETLE: 281 return rewriteValueAMD64_OpAMD64SETLE_0(v) 282 case OpAMD64SETNE: 283 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) 284 case OpAMD64SHLL: 285 return rewriteValueAMD64_OpAMD64SHLL_0(v) 286 case OpAMD64SHLLconst: 287 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 288 case OpAMD64SHLQ: 289 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 290 case OpAMD64SHLQconst: 291 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 292 case OpAMD64SHRB: 293 return rewriteValueAMD64_OpAMD64SHRB_0(v) 294 case OpAMD64SHRBconst: 295 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 296 case OpAMD64SHRL: 297 return rewriteValueAMD64_OpAMD64SHRL_0(v) 298 case OpAMD64SHRLconst: 299 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 300 case OpAMD64SHRQ: 301 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 302 case OpAMD64SHRQconst: 303 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 304 case OpAMD64SHRW: 305 return rewriteValueAMD64_OpAMD64SHRW_0(v) 306 case OpAMD64SHRWconst: 307 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 308 case OpAMD64SUBL: 309 return rewriteValueAMD64_OpAMD64SUBL_0(v) 310 case OpAMD64SUBLconst: 311 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 312 case OpAMD64SUBQ: 313 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 314 case OpAMD64SUBQconst: 315 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 316 case OpAMD64SUBSD: 317 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 318 case OpAMD64SUBSS: 319 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 320 case OpAMD64TESTB: 321 return rewriteValueAMD64_OpAMD64TESTB_0(v) 322 case OpAMD64TESTL: 323 return rewriteValueAMD64_OpAMD64TESTL_0(v) 324 case OpAMD64TESTQ: 325 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 326 case OpAMD64TESTW: 327 return rewriteValueAMD64_OpAMD64TESTW_0(v) 328 case OpAMD64XADDLlock: 329 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 330 case OpAMD64XADDQlock: 331 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 332 case OpAMD64XCHGL: 333 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 334 case OpAMD64XCHGQ: 335 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 336 case OpAMD64XORL: 337 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 338 case OpAMD64XORLconst: 339 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) 340 case OpAMD64XORQ: 341 return rewriteValueAMD64_OpAMD64XORQ_0(v) 342 case OpAMD64XORQconst: 343 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 344 case OpAdd16: 345 return rewriteValueAMD64_OpAdd16_0(v) 346 case OpAdd32: 347 return rewriteValueAMD64_OpAdd32_0(v) 348 case OpAdd32F: 349 return rewriteValueAMD64_OpAdd32F_0(v) 350 case OpAdd64: 351 return rewriteValueAMD64_OpAdd64_0(v) 352 case OpAdd64F: 353 return rewriteValueAMD64_OpAdd64F_0(v) 354 case OpAdd8: 355 return rewriteValueAMD64_OpAdd8_0(v) 356 case OpAddPtr: 357 return rewriteValueAMD64_OpAddPtr_0(v) 358 case OpAddr: 359 return rewriteValueAMD64_OpAddr_0(v) 360 case OpAnd16: 361 return rewriteValueAMD64_OpAnd16_0(v) 362 case OpAnd32: 363 return rewriteValueAMD64_OpAnd32_0(v) 364 case OpAnd64: 365 return rewriteValueAMD64_OpAnd64_0(v) 366 case OpAnd8: 367 return rewriteValueAMD64_OpAnd8_0(v) 368 case OpAndB: 369 return rewriteValueAMD64_OpAndB_0(v) 370 case OpAtomicAdd32: 371 return rewriteValueAMD64_OpAtomicAdd32_0(v) 372 case OpAtomicAdd64: 373 return rewriteValueAMD64_OpAtomicAdd64_0(v) 374 case OpAtomicAnd8: 375 return rewriteValueAMD64_OpAtomicAnd8_0(v) 376 case OpAtomicCompareAndSwap32: 377 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 378 case OpAtomicCompareAndSwap64: 379 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 380 case OpAtomicExchange32: 381 return rewriteValueAMD64_OpAtomicExchange32_0(v) 382 case OpAtomicExchange64: 383 return rewriteValueAMD64_OpAtomicExchange64_0(v) 384 case OpAtomicLoad32: 385 return rewriteValueAMD64_OpAtomicLoad32_0(v) 386 case OpAtomicLoad64: 387 return rewriteValueAMD64_OpAtomicLoad64_0(v) 388 case OpAtomicLoadPtr: 389 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 390 case OpAtomicOr8: 391 return rewriteValueAMD64_OpAtomicOr8_0(v) 392 case OpAtomicStore32: 393 return rewriteValueAMD64_OpAtomicStore32_0(v) 394 case OpAtomicStore64: 395 return rewriteValueAMD64_OpAtomicStore64_0(v) 396 case OpAtomicStorePtrNoWB: 397 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 398 case OpAvg64u: 399 return rewriteValueAMD64_OpAvg64u_0(v) 400 case OpBitLen32: 401 return rewriteValueAMD64_OpBitLen32_0(v) 402 case OpBitLen64: 403 return rewriteValueAMD64_OpBitLen64_0(v) 404 case OpBswap32: 405 return rewriteValueAMD64_OpBswap32_0(v) 406 case OpBswap64: 407 return rewriteValueAMD64_OpBswap64_0(v) 408 case OpClosureCall: 409 return rewriteValueAMD64_OpClosureCall_0(v) 410 case OpCom16: 411 return rewriteValueAMD64_OpCom16_0(v) 412 case OpCom32: 413 return rewriteValueAMD64_OpCom32_0(v) 414 case OpCom64: 415 return rewriteValueAMD64_OpCom64_0(v) 416 case OpCom8: 417 return rewriteValueAMD64_OpCom8_0(v) 418 case OpConst16: 419 return rewriteValueAMD64_OpConst16_0(v) 420 case OpConst32: 421 return rewriteValueAMD64_OpConst32_0(v) 422 case OpConst32F: 423 return rewriteValueAMD64_OpConst32F_0(v) 424 case OpConst64: 425 return rewriteValueAMD64_OpConst64_0(v) 426 case OpConst64F: 427 return rewriteValueAMD64_OpConst64F_0(v) 428 case OpConst8: 429 return rewriteValueAMD64_OpConst8_0(v) 430 case OpConstBool: 431 return rewriteValueAMD64_OpConstBool_0(v) 432 case OpConstNil: 433 return rewriteValueAMD64_OpConstNil_0(v) 434 case OpConvert: 435 return rewriteValueAMD64_OpConvert_0(v) 436 case OpCtz32: 437 return rewriteValueAMD64_OpCtz32_0(v) 438 case OpCtz64: 439 return rewriteValueAMD64_OpCtz64_0(v) 440 case OpCvt32Fto32: 441 return rewriteValueAMD64_OpCvt32Fto32_0(v) 442 case OpCvt32Fto64: 443 return rewriteValueAMD64_OpCvt32Fto64_0(v) 444 case OpCvt32Fto64F: 445 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 446 case OpCvt32to32F: 447 return rewriteValueAMD64_OpCvt32to32F_0(v) 448 case OpCvt32to64F: 449 return rewriteValueAMD64_OpCvt32to64F_0(v) 450 case OpCvt64Fto32: 451 return rewriteValueAMD64_OpCvt64Fto32_0(v) 452 case OpCvt64Fto32F: 453 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 454 case OpCvt64Fto64: 455 return rewriteValueAMD64_OpCvt64Fto64_0(v) 456 case OpCvt64to32F: 457 return rewriteValueAMD64_OpCvt64to32F_0(v) 458 case OpCvt64to64F: 459 return rewriteValueAMD64_OpCvt64to64F_0(v) 460 case OpDiv128u: 461 return rewriteValueAMD64_OpDiv128u_0(v) 462 case OpDiv16: 463 return rewriteValueAMD64_OpDiv16_0(v) 464 case OpDiv16u: 465 return rewriteValueAMD64_OpDiv16u_0(v) 466 case OpDiv32: 467 return rewriteValueAMD64_OpDiv32_0(v) 468 case OpDiv32F: 469 return rewriteValueAMD64_OpDiv32F_0(v) 470 case OpDiv32u: 471 return rewriteValueAMD64_OpDiv32u_0(v) 472 case OpDiv64: 473 return rewriteValueAMD64_OpDiv64_0(v) 474 case OpDiv64F: 475 return rewriteValueAMD64_OpDiv64F_0(v) 476 case OpDiv64u: 477 return rewriteValueAMD64_OpDiv64u_0(v) 478 case OpDiv8: 479 return rewriteValueAMD64_OpDiv8_0(v) 480 case OpDiv8u: 481 return rewriteValueAMD64_OpDiv8u_0(v) 482 case OpEq16: 483 return rewriteValueAMD64_OpEq16_0(v) 484 case OpEq32: 485 return rewriteValueAMD64_OpEq32_0(v) 486 case OpEq32F: 487 return rewriteValueAMD64_OpEq32F_0(v) 488 case OpEq64: 489 return rewriteValueAMD64_OpEq64_0(v) 490 case OpEq64F: 491 return rewriteValueAMD64_OpEq64F_0(v) 492 case OpEq8: 493 return rewriteValueAMD64_OpEq8_0(v) 494 case OpEqB: 495 return rewriteValueAMD64_OpEqB_0(v) 496 case OpEqPtr: 497 return rewriteValueAMD64_OpEqPtr_0(v) 498 case OpGeq16: 499 return rewriteValueAMD64_OpGeq16_0(v) 500 case OpGeq16U: 501 return rewriteValueAMD64_OpGeq16U_0(v) 502 case OpGeq32: 503 return rewriteValueAMD64_OpGeq32_0(v) 504 case OpGeq32F: 505 return rewriteValueAMD64_OpGeq32F_0(v) 506 case OpGeq32U: 507 return rewriteValueAMD64_OpGeq32U_0(v) 508 case OpGeq64: 509 return rewriteValueAMD64_OpGeq64_0(v) 510 case OpGeq64F: 511 return rewriteValueAMD64_OpGeq64F_0(v) 512 case OpGeq64U: 513 return rewriteValueAMD64_OpGeq64U_0(v) 514 case OpGeq8: 515 return rewriteValueAMD64_OpGeq8_0(v) 516 case OpGeq8U: 517 return rewriteValueAMD64_OpGeq8U_0(v) 518 case OpGetClosurePtr: 519 return rewriteValueAMD64_OpGetClosurePtr_0(v) 520 case OpGetG: 521 return rewriteValueAMD64_OpGetG_0(v) 522 case OpGreater16: 523 return rewriteValueAMD64_OpGreater16_0(v) 524 case OpGreater16U: 525 return rewriteValueAMD64_OpGreater16U_0(v) 526 case OpGreater32: 527 return rewriteValueAMD64_OpGreater32_0(v) 528 case OpGreater32F: 529 return rewriteValueAMD64_OpGreater32F_0(v) 530 case OpGreater32U: 531 return rewriteValueAMD64_OpGreater32U_0(v) 532 case OpGreater64: 533 return rewriteValueAMD64_OpGreater64_0(v) 534 case OpGreater64F: 535 return rewriteValueAMD64_OpGreater64F_0(v) 536 case OpGreater64U: 537 return rewriteValueAMD64_OpGreater64U_0(v) 538 case OpGreater8: 539 return rewriteValueAMD64_OpGreater8_0(v) 540 case OpGreater8U: 541 return rewriteValueAMD64_OpGreater8U_0(v) 542 case OpHmul32: 543 return rewriteValueAMD64_OpHmul32_0(v) 544 case OpHmul32u: 545 return rewriteValueAMD64_OpHmul32u_0(v) 546 case OpHmul64: 547 return rewriteValueAMD64_OpHmul64_0(v) 548 case OpHmul64u: 549 return rewriteValueAMD64_OpHmul64u_0(v) 550 case OpInt64Hi: 551 return rewriteValueAMD64_OpInt64Hi_0(v) 552 case OpInterCall: 553 return rewriteValueAMD64_OpInterCall_0(v) 554 case OpIsInBounds: 555 return rewriteValueAMD64_OpIsInBounds_0(v) 556 case OpIsNonNil: 557 return rewriteValueAMD64_OpIsNonNil_0(v) 558 case OpIsSliceInBounds: 559 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 560 case OpLeq16: 561 return rewriteValueAMD64_OpLeq16_0(v) 562 case OpLeq16U: 563 return rewriteValueAMD64_OpLeq16U_0(v) 564 case OpLeq32: 565 return rewriteValueAMD64_OpLeq32_0(v) 566 case OpLeq32F: 567 return rewriteValueAMD64_OpLeq32F_0(v) 568 case OpLeq32U: 569 return rewriteValueAMD64_OpLeq32U_0(v) 570 case OpLeq64: 571 return rewriteValueAMD64_OpLeq64_0(v) 572 case OpLeq64F: 573 return rewriteValueAMD64_OpLeq64F_0(v) 574 case OpLeq64U: 575 return rewriteValueAMD64_OpLeq64U_0(v) 576 case OpLeq8: 577 return rewriteValueAMD64_OpLeq8_0(v) 578 case OpLeq8U: 579 return rewriteValueAMD64_OpLeq8U_0(v) 580 case OpLess16: 581 return rewriteValueAMD64_OpLess16_0(v) 582 case OpLess16U: 583 return rewriteValueAMD64_OpLess16U_0(v) 584 case OpLess32: 585 return rewriteValueAMD64_OpLess32_0(v) 586 case OpLess32F: 587 return rewriteValueAMD64_OpLess32F_0(v) 588 case OpLess32U: 589 return rewriteValueAMD64_OpLess32U_0(v) 590 case OpLess64: 591 return rewriteValueAMD64_OpLess64_0(v) 592 case OpLess64F: 593 return rewriteValueAMD64_OpLess64F_0(v) 594 case OpLess64U: 595 return rewriteValueAMD64_OpLess64U_0(v) 596 case OpLess8: 597 return rewriteValueAMD64_OpLess8_0(v) 598 case OpLess8U: 599 return rewriteValueAMD64_OpLess8U_0(v) 600 case OpLoad: 601 return rewriteValueAMD64_OpLoad_0(v) 602 case OpLsh16x16: 603 return rewriteValueAMD64_OpLsh16x16_0(v) 604 case OpLsh16x32: 605 return rewriteValueAMD64_OpLsh16x32_0(v) 606 case OpLsh16x64: 607 return rewriteValueAMD64_OpLsh16x64_0(v) 608 case OpLsh16x8: 609 return rewriteValueAMD64_OpLsh16x8_0(v) 610 case OpLsh32x16: 611 return rewriteValueAMD64_OpLsh32x16_0(v) 612 case OpLsh32x32: 613 return rewriteValueAMD64_OpLsh32x32_0(v) 614 case OpLsh32x64: 615 return rewriteValueAMD64_OpLsh32x64_0(v) 616 case OpLsh32x8: 617 return rewriteValueAMD64_OpLsh32x8_0(v) 618 case OpLsh64x16: 619 return rewriteValueAMD64_OpLsh64x16_0(v) 620 case OpLsh64x32: 621 return rewriteValueAMD64_OpLsh64x32_0(v) 622 case OpLsh64x64: 623 return rewriteValueAMD64_OpLsh64x64_0(v) 624 case OpLsh64x8: 625 return rewriteValueAMD64_OpLsh64x8_0(v) 626 case OpLsh8x16: 627 return rewriteValueAMD64_OpLsh8x16_0(v) 628 case OpLsh8x32: 629 return rewriteValueAMD64_OpLsh8x32_0(v) 630 case OpLsh8x64: 631 return rewriteValueAMD64_OpLsh8x64_0(v) 632 case OpLsh8x8: 633 return rewriteValueAMD64_OpLsh8x8_0(v) 634 case OpMod16: 635 return rewriteValueAMD64_OpMod16_0(v) 636 case OpMod16u: 637 return rewriteValueAMD64_OpMod16u_0(v) 638 case OpMod32: 639 return rewriteValueAMD64_OpMod32_0(v) 640 case OpMod32u: 641 return rewriteValueAMD64_OpMod32u_0(v) 642 case OpMod64: 643 return rewriteValueAMD64_OpMod64_0(v) 644 case OpMod64u: 645 return rewriteValueAMD64_OpMod64u_0(v) 646 case OpMod8: 647 return rewriteValueAMD64_OpMod8_0(v) 648 case OpMod8u: 649 return rewriteValueAMD64_OpMod8u_0(v) 650 case OpMove: 651 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) 652 case OpMul16: 653 return rewriteValueAMD64_OpMul16_0(v) 654 case OpMul32: 655 return rewriteValueAMD64_OpMul32_0(v) 656 case OpMul32F: 657 return rewriteValueAMD64_OpMul32F_0(v) 658 case OpMul64: 659 return rewriteValueAMD64_OpMul64_0(v) 660 case OpMul64F: 661 return rewriteValueAMD64_OpMul64F_0(v) 662 case OpMul64uhilo: 663 return rewriteValueAMD64_OpMul64uhilo_0(v) 664 case OpMul8: 665 return rewriteValueAMD64_OpMul8_0(v) 666 case OpNeg16: 667 return rewriteValueAMD64_OpNeg16_0(v) 668 case OpNeg32: 669 return rewriteValueAMD64_OpNeg32_0(v) 670 case OpNeg32F: 671 return rewriteValueAMD64_OpNeg32F_0(v) 672 case OpNeg64: 673 return rewriteValueAMD64_OpNeg64_0(v) 674 case OpNeg64F: 675 return rewriteValueAMD64_OpNeg64F_0(v) 676 case OpNeg8: 677 return rewriteValueAMD64_OpNeg8_0(v) 678 case OpNeq16: 679 return rewriteValueAMD64_OpNeq16_0(v) 680 case OpNeq32: 681 return rewriteValueAMD64_OpNeq32_0(v) 682 case OpNeq32F: 683 return rewriteValueAMD64_OpNeq32F_0(v) 684 case OpNeq64: 685 return rewriteValueAMD64_OpNeq64_0(v) 686 case OpNeq64F: 687 return rewriteValueAMD64_OpNeq64F_0(v) 688 case OpNeq8: 689 return rewriteValueAMD64_OpNeq8_0(v) 690 case OpNeqB: 691 return rewriteValueAMD64_OpNeqB_0(v) 692 case OpNeqPtr: 693 return rewriteValueAMD64_OpNeqPtr_0(v) 694 case OpNilCheck: 695 return rewriteValueAMD64_OpNilCheck_0(v) 696 case OpNot: 697 return rewriteValueAMD64_OpNot_0(v) 698 case OpOffPtr: 699 return rewriteValueAMD64_OpOffPtr_0(v) 700 case OpOr16: 701 return rewriteValueAMD64_OpOr16_0(v) 702 case OpOr32: 703 return rewriteValueAMD64_OpOr32_0(v) 704 case OpOr64: 705 return rewriteValueAMD64_OpOr64_0(v) 706 case OpOr8: 707 return rewriteValueAMD64_OpOr8_0(v) 708 case OpOrB: 709 return rewriteValueAMD64_OpOrB_0(v) 710 case OpPopCount16: 711 return rewriteValueAMD64_OpPopCount16_0(v) 712 case OpPopCount32: 713 return rewriteValueAMD64_OpPopCount32_0(v) 714 case OpPopCount64: 715 return rewriteValueAMD64_OpPopCount64_0(v) 716 case OpPopCount8: 717 return rewriteValueAMD64_OpPopCount8_0(v) 718 case OpRound32F: 719 return rewriteValueAMD64_OpRound32F_0(v) 720 case OpRound64F: 721 return rewriteValueAMD64_OpRound64F_0(v) 722 case OpRsh16Ux16: 723 return rewriteValueAMD64_OpRsh16Ux16_0(v) 724 case OpRsh16Ux32: 725 return rewriteValueAMD64_OpRsh16Ux32_0(v) 726 case OpRsh16Ux64: 727 return rewriteValueAMD64_OpRsh16Ux64_0(v) 728 case OpRsh16Ux8: 729 return rewriteValueAMD64_OpRsh16Ux8_0(v) 730 case OpRsh16x16: 731 return rewriteValueAMD64_OpRsh16x16_0(v) 732 case OpRsh16x32: 733 return rewriteValueAMD64_OpRsh16x32_0(v) 734 case OpRsh16x64: 735 return rewriteValueAMD64_OpRsh16x64_0(v) 736 case OpRsh16x8: 737 return rewriteValueAMD64_OpRsh16x8_0(v) 738 case OpRsh32Ux16: 739 return rewriteValueAMD64_OpRsh32Ux16_0(v) 740 case OpRsh32Ux32: 741 return rewriteValueAMD64_OpRsh32Ux32_0(v) 742 case OpRsh32Ux64: 743 return rewriteValueAMD64_OpRsh32Ux64_0(v) 744 case OpRsh32Ux8: 745 return rewriteValueAMD64_OpRsh32Ux8_0(v) 746 case OpRsh32x16: 747 return rewriteValueAMD64_OpRsh32x16_0(v) 748 case OpRsh32x32: 749 return rewriteValueAMD64_OpRsh32x32_0(v) 750 case OpRsh32x64: 751 return rewriteValueAMD64_OpRsh32x64_0(v) 752 case OpRsh32x8: 753 return rewriteValueAMD64_OpRsh32x8_0(v) 754 case OpRsh64Ux16: 755 return rewriteValueAMD64_OpRsh64Ux16_0(v) 756 case OpRsh64Ux32: 757 return rewriteValueAMD64_OpRsh64Ux32_0(v) 758 case OpRsh64Ux64: 759 return rewriteValueAMD64_OpRsh64Ux64_0(v) 760 case OpRsh64Ux8: 761 return rewriteValueAMD64_OpRsh64Ux8_0(v) 762 case OpRsh64x16: 763 return rewriteValueAMD64_OpRsh64x16_0(v) 764 case OpRsh64x32: 765 return rewriteValueAMD64_OpRsh64x32_0(v) 766 case OpRsh64x64: 767 return rewriteValueAMD64_OpRsh64x64_0(v) 768 case OpRsh64x8: 769 return rewriteValueAMD64_OpRsh64x8_0(v) 770 case OpRsh8Ux16: 771 return rewriteValueAMD64_OpRsh8Ux16_0(v) 772 case OpRsh8Ux32: 773 return rewriteValueAMD64_OpRsh8Ux32_0(v) 774 case OpRsh8Ux64: 775 return rewriteValueAMD64_OpRsh8Ux64_0(v) 776 case OpRsh8Ux8: 777 return rewriteValueAMD64_OpRsh8Ux8_0(v) 778 case OpRsh8x16: 779 return rewriteValueAMD64_OpRsh8x16_0(v) 780 case OpRsh8x32: 781 return rewriteValueAMD64_OpRsh8x32_0(v) 782 case OpRsh8x64: 783 return rewriteValueAMD64_OpRsh8x64_0(v) 784 case OpRsh8x8: 785 return rewriteValueAMD64_OpRsh8x8_0(v) 786 case OpSelect0: 787 return rewriteValueAMD64_OpSelect0_0(v) 788 case OpSelect1: 789 return rewriteValueAMD64_OpSelect1_0(v) 790 case OpSignExt16to32: 791 return rewriteValueAMD64_OpSignExt16to32_0(v) 792 case OpSignExt16to64: 793 return rewriteValueAMD64_OpSignExt16to64_0(v) 794 case OpSignExt32to64: 795 return rewriteValueAMD64_OpSignExt32to64_0(v) 796 case OpSignExt8to16: 797 return rewriteValueAMD64_OpSignExt8to16_0(v) 798 case OpSignExt8to32: 799 return rewriteValueAMD64_OpSignExt8to32_0(v) 800 case OpSignExt8to64: 801 return rewriteValueAMD64_OpSignExt8to64_0(v) 802 case OpSlicemask: 803 return rewriteValueAMD64_OpSlicemask_0(v) 804 case OpSqrt: 805 return rewriteValueAMD64_OpSqrt_0(v) 806 case OpStaticCall: 807 return rewriteValueAMD64_OpStaticCall_0(v) 808 case OpStore: 809 return rewriteValueAMD64_OpStore_0(v) 810 case OpSub16: 811 return rewriteValueAMD64_OpSub16_0(v) 812 case OpSub32: 813 return rewriteValueAMD64_OpSub32_0(v) 814 case OpSub32F: 815 return rewriteValueAMD64_OpSub32F_0(v) 816 case OpSub64: 817 return rewriteValueAMD64_OpSub64_0(v) 818 case OpSub64F: 819 return rewriteValueAMD64_OpSub64F_0(v) 820 case OpSub8: 821 return rewriteValueAMD64_OpSub8_0(v) 822 case OpSubPtr: 823 return rewriteValueAMD64_OpSubPtr_0(v) 824 case OpTrunc16to8: 825 return rewriteValueAMD64_OpTrunc16to8_0(v) 826 case OpTrunc32to16: 827 return rewriteValueAMD64_OpTrunc32to16_0(v) 828 case OpTrunc32to8: 829 return rewriteValueAMD64_OpTrunc32to8_0(v) 830 case OpTrunc64to16: 831 return rewriteValueAMD64_OpTrunc64to16_0(v) 832 case OpTrunc64to32: 833 return rewriteValueAMD64_OpTrunc64to32_0(v) 834 case OpTrunc64to8: 835 return rewriteValueAMD64_OpTrunc64to8_0(v) 836 case OpXor16: 837 return rewriteValueAMD64_OpXor16_0(v) 838 case OpXor32: 839 return rewriteValueAMD64_OpXor32_0(v) 840 case OpXor64: 841 return rewriteValueAMD64_OpXor64_0(v) 842 case OpXor8: 843 return rewriteValueAMD64_OpXor8_0(v) 844 case OpZero: 845 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) 846 case OpZeroExt16to32: 847 return rewriteValueAMD64_OpZeroExt16to32_0(v) 848 case OpZeroExt16to64: 849 return rewriteValueAMD64_OpZeroExt16to64_0(v) 850 case OpZeroExt32to64: 851 return rewriteValueAMD64_OpZeroExt32to64_0(v) 852 case OpZeroExt8to16: 853 return rewriteValueAMD64_OpZeroExt8to16_0(v) 854 case OpZeroExt8to32: 855 return rewriteValueAMD64_OpZeroExt8to32_0(v) 856 case OpZeroExt8to64: 857 return rewriteValueAMD64_OpZeroExt8to64_0(v) 858 } 859 return false 860 } 861 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 862 // match: (ADDL x (MOVLconst [c])) 863 // cond: 864 // result: (ADDLconst [c] x) 865 for { 866 _ = v.Args[1] 867 x := v.Args[0] 868 v_1 := v.Args[1] 869 if v_1.Op != OpAMD64MOVLconst { 870 break 871 } 872 c := v_1.AuxInt 873 v.reset(OpAMD64ADDLconst) 874 v.AuxInt = c 875 v.AddArg(x) 876 return true 877 } 878 // match: (ADDL (MOVLconst [c]) x) 879 // cond: 880 // result: (ADDLconst [c] x) 881 for { 882 _ = v.Args[1] 883 v_0 := v.Args[0] 884 if v_0.Op != OpAMD64MOVLconst { 885 break 886 } 887 c := v_0.AuxInt 888 x := v.Args[1] 889 v.reset(OpAMD64ADDLconst) 890 v.AuxInt = c 891 v.AddArg(x) 892 return true 893 } 894 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 895 // cond: d==32-c 896 // result: (ROLLconst x [c]) 897 for { 898 _ = v.Args[1] 899 v_0 := v.Args[0] 900 if v_0.Op != OpAMD64SHLLconst { 901 break 902 } 903 c := v_0.AuxInt 904 x := v_0.Args[0] 905 v_1 := v.Args[1] 906 if v_1.Op != OpAMD64SHRLconst { 907 break 908 } 909 d := v_1.AuxInt 910 if x != v_1.Args[0] { 911 break 912 } 913 if !(d == 32-c) { 914 break 915 } 916 v.reset(OpAMD64ROLLconst) 917 v.AuxInt = c 918 v.AddArg(x) 919 return true 920 } 921 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 922 // cond: d==32-c 923 // result: (ROLLconst x [c]) 924 for { 925 _ = v.Args[1] 926 v_0 := v.Args[0] 927 if v_0.Op != OpAMD64SHRLconst { 928 break 929 } 930 d := v_0.AuxInt 931 x := v_0.Args[0] 932 v_1 := v.Args[1] 933 if v_1.Op != OpAMD64SHLLconst { 934 break 935 } 936 c := v_1.AuxInt 937 if x != v_1.Args[0] { 938 break 939 } 940 if !(d == 32-c) { 941 break 942 } 943 v.reset(OpAMD64ROLLconst) 944 v.AuxInt = c 945 v.AddArg(x) 946 return true 947 } 948 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 949 // cond: d==16-c && c < 16 && t.Size() == 2 950 // result: (ROLWconst x [c]) 951 for { 952 t := v.Type 953 _ = v.Args[1] 954 v_0 := v.Args[0] 955 if v_0.Op != OpAMD64SHLLconst { 956 break 957 } 958 c := v_0.AuxInt 959 x := v_0.Args[0] 960 v_1 := v.Args[1] 961 if v_1.Op != OpAMD64SHRWconst { 962 break 963 } 964 d := v_1.AuxInt 965 if x != v_1.Args[0] { 966 break 967 } 968 if !(d == 16-c && c < 16 && t.Size() == 2) { 969 break 970 } 971 v.reset(OpAMD64ROLWconst) 972 v.AuxInt = c 973 v.AddArg(x) 974 return true 975 } 976 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 977 // cond: d==16-c && c < 16 && t.Size() == 2 978 // result: (ROLWconst x [c]) 979 for { 980 t := v.Type 981 _ = v.Args[1] 982 v_0 := v.Args[0] 983 if v_0.Op != OpAMD64SHRWconst { 984 break 985 } 986 d := v_0.AuxInt 987 x := v_0.Args[0] 988 v_1 := v.Args[1] 989 if v_1.Op != OpAMD64SHLLconst { 990 break 991 } 992 c := v_1.AuxInt 993 if x != v_1.Args[0] { 994 break 995 } 996 if !(d == 16-c && c < 16 && t.Size() == 2) { 997 break 998 } 999 v.reset(OpAMD64ROLWconst) 1000 v.AuxInt = c 1001 v.AddArg(x) 1002 return true 1003 } 1004 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 1005 // cond: d==8-c && c < 8 && t.Size() == 1 1006 // result: (ROLBconst x [c]) 1007 for { 1008 t := v.Type 1009 _ = v.Args[1] 1010 v_0 := v.Args[0] 1011 if v_0.Op != OpAMD64SHLLconst { 1012 break 1013 } 1014 c := v_0.AuxInt 1015 x := v_0.Args[0] 1016 v_1 := v.Args[1] 1017 if v_1.Op != OpAMD64SHRBconst { 1018 break 1019 } 1020 d := v_1.AuxInt 1021 if x != v_1.Args[0] { 1022 break 1023 } 1024 if !(d == 8-c && c < 8 && t.Size() == 1) { 1025 break 1026 } 1027 v.reset(OpAMD64ROLBconst) 1028 v.AuxInt = c 1029 v.AddArg(x) 1030 return true 1031 } 1032 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1033 // cond: d==8-c && c < 8 && t.Size() == 1 1034 // result: (ROLBconst x [c]) 1035 for { 1036 t := v.Type 1037 _ = v.Args[1] 1038 v_0 := v.Args[0] 1039 if v_0.Op != OpAMD64SHRBconst { 1040 break 1041 } 1042 d := v_0.AuxInt 1043 x := v_0.Args[0] 1044 v_1 := v.Args[1] 1045 if v_1.Op != OpAMD64SHLLconst { 1046 break 1047 } 1048 c := v_1.AuxInt 1049 if x != v_1.Args[0] { 1050 break 1051 } 1052 if !(d == 8-c && c < 8 && t.Size() == 1) { 1053 break 1054 } 1055 v.reset(OpAMD64ROLBconst) 1056 v.AuxInt = c 1057 v.AddArg(x) 1058 return true 1059 } 1060 // match: (ADDL x (NEGL y)) 1061 // cond: 1062 // result: (SUBL x y) 1063 for { 1064 _ = v.Args[1] 1065 x := v.Args[0] 1066 v_1 := v.Args[1] 1067 if v_1.Op != OpAMD64NEGL { 1068 break 1069 } 1070 y := v_1.Args[0] 1071 v.reset(OpAMD64SUBL) 1072 v.AddArg(x) 1073 v.AddArg(y) 1074 return true 1075 } 1076 // match: (ADDL (NEGL y) x) 1077 // cond: 1078 // result: (SUBL x y) 1079 for { 1080 _ = v.Args[1] 1081 v_0 := v.Args[0] 1082 if v_0.Op != OpAMD64NEGL { 1083 break 1084 } 1085 y := v_0.Args[0] 1086 x := v.Args[1] 1087 v.reset(OpAMD64SUBL) 1088 v.AddArg(x) 1089 v.AddArg(y) 1090 return true 1091 } 1092 return false 1093 } 1094 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1095 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1096 // cond: canMergeLoad(v, l, x) && clobber(l) 1097 // result: (ADDLmem x [off] {sym} ptr mem) 1098 for { 1099 _ = v.Args[1] 1100 x := v.Args[0] 1101 l := v.Args[1] 1102 if l.Op != OpAMD64MOVLload { 1103 break 1104 } 1105 off := l.AuxInt 1106 sym := l.Aux 1107 _ = l.Args[1] 1108 ptr := l.Args[0] 1109 mem := l.Args[1] 1110 if !(canMergeLoad(v, l, x) && clobber(l)) { 1111 break 1112 } 1113 v.reset(OpAMD64ADDLmem) 1114 v.AuxInt = off 1115 v.Aux = sym 1116 v.AddArg(x) 1117 v.AddArg(ptr) 1118 v.AddArg(mem) 1119 return true 1120 } 1121 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1122 // cond: canMergeLoad(v, l, x) && clobber(l) 1123 // result: (ADDLmem x [off] {sym} ptr mem) 1124 for { 1125 _ = v.Args[1] 1126 l := v.Args[0] 1127 if l.Op != OpAMD64MOVLload { 1128 break 1129 } 1130 off := l.AuxInt 1131 sym := l.Aux 1132 _ = l.Args[1] 1133 ptr := l.Args[0] 1134 mem := l.Args[1] 1135 x := v.Args[1] 1136 if !(canMergeLoad(v, l, x) && clobber(l)) { 1137 break 1138 } 1139 v.reset(OpAMD64ADDLmem) 1140 v.AuxInt = off 1141 v.Aux = sym 1142 v.AddArg(x) 1143 v.AddArg(ptr) 1144 v.AddArg(mem) 1145 return true 1146 } 1147 return false 1148 } 1149 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1150 // match: (ADDLconst [c] x) 1151 // cond: int32(c)==0 1152 // result: x 1153 for { 1154 c := v.AuxInt 1155 x := v.Args[0] 1156 if !(int32(c) == 0) { 1157 break 1158 } 1159 v.reset(OpCopy) 1160 v.Type = x.Type 1161 v.AddArg(x) 1162 return true 1163 } 1164 // match: (ADDLconst [c] (MOVLconst [d])) 1165 // cond: 1166 // result: (MOVLconst [int64(int32(c+d))]) 1167 for { 1168 c := v.AuxInt 1169 v_0 := v.Args[0] 1170 if v_0.Op != OpAMD64MOVLconst { 1171 break 1172 } 1173 d := v_0.AuxInt 1174 v.reset(OpAMD64MOVLconst) 1175 v.AuxInt = int64(int32(c + d)) 1176 return true 1177 } 1178 // match: (ADDLconst [c] (ADDLconst [d] x)) 1179 // cond: 1180 // result: (ADDLconst [int64(int32(c+d))] x) 1181 for { 1182 c := v.AuxInt 1183 v_0 := v.Args[0] 1184 if v_0.Op != OpAMD64ADDLconst { 1185 break 1186 } 1187 d := v_0.AuxInt 1188 x := v_0.Args[0] 1189 v.reset(OpAMD64ADDLconst) 1190 v.AuxInt = int64(int32(c + d)) 1191 v.AddArg(x) 1192 return true 1193 } 1194 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1195 // cond: is32Bit(c+d) 1196 // result: (LEAL [c+d] {s} x) 1197 for { 1198 c := v.AuxInt 1199 v_0 := v.Args[0] 1200 if v_0.Op != OpAMD64LEAL { 1201 break 1202 } 1203 d := v_0.AuxInt 1204 s := v_0.Aux 1205 x := v_0.Args[0] 1206 if !(is32Bit(c + d)) { 1207 break 1208 } 1209 v.reset(OpAMD64LEAL) 1210 v.AuxInt = c + d 1211 v.Aux = s 1212 v.AddArg(x) 1213 return true 1214 } 1215 return false 1216 } 1217 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 1218 // match: (ADDQ x (MOVQconst [c])) 1219 // cond: is32Bit(c) 1220 // result: (ADDQconst [c] x) 1221 for { 1222 _ = v.Args[1] 1223 x := v.Args[0] 1224 v_1 := v.Args[1] 1225 if v_1.Op != OpAMD64MOVQconst { 1226 break 1227 } 1228 c := v_1.AuxInt 1229 if !(is32Bit(c)) { 1230 break 1231 } 1232 v.reset(OpAMD64ADDQconst) 1233 v.AuxInt = c 1234 v.AddArg(x) 1235 return true 1236 } 1237 // match: (ADDQ (MOVQconst [c]) x) 1238 // cond: is32Bit(c) 1239 // result: (ADDQconst [c] x) 1240 for { 1241 _ = v.Args[1] 1242 v_0 := v.Args[0] 1243 if v_0.Op != OpAMD64MOVQconst { 1244 break 1245 } 1246 c := v_0.AuxInt 1247 x := v.Args[1] 1248 if !(is32Bit(c)) { 1249 break 1250 } 1251 v.reset(OpAMD64ADDQconst) 1252 v.AuxInt = c 1253 v.AddArg(x) 1254 return true 1255 } 1256 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 1257 // cond: d==64-c 1258 // result: (ROLQconst x [c]) 1259 for { 1260 _ = v.Args[1] 1261 v_0 := v.Args[0] 1262 if v_0.Op != OpAMD64SHLQconst { 1263 break 1264 } 1265 c := v_0.AuxInt 1266 x := v_0.Args[0] 1267 v_1 := v.Args[1] 1268 if v_1.Op != OpAMD64SHRQconst { 1269 break 1270 } 1271 d := v_1.AuxInt 1272 if x != v_1.Args[0] { 1273 break 1274 } 1275 if !(d == 64-c) { 1276 break 1277 } 1278 v.reset(OpAMD64ROLQconst) 1279 v.AuxInt = c 1280 v.AddArg(x) 1281 return true 1282 } 1283 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 1284 // cond: d==64-c 1285 // result: (ROLQconst x [c]) 1286 for { 1287 _ = v.Args[1] 1288 v_0 := v.Args[0] 1289 if v_0.Op != OpAMD64SHRQconst { 1290 break 1291 } 1292 d := v_0.AuxInt 1293 x := v_0.Args[0] 1294 v_1 := v.Args[1] 1295 if v_1.Op != OpAMD64SHLQconst { 1296 break 1297 } 1298 c := v_1.AuxInt 1299 if x != v_1.Args[0] { 1300 break 1301 } 1302 if !(d == 64-c) { 1303 break 1304 } 1305 v.reset(OpAMD64ROLQconst) 1306 v.AuxInt = c 1307 v.AddArg(x) 1308 return true 1309 } 1310 // match: (ADDQ x (SHLQconst [3] y)) 1311 // cond: 1312 // result: (LEAQ8 x y) 1313 for { 1314 _ = v.Args[1] 1315 x := v.Args[0] 1316 v_1 := v.Args[1] 1317 if v_1.Op != OpAMD64SHLQconst { 1318 break 1319 } 1320 if v_1.AuxInt != 3 { 1321 break 1322 } 1323 y := v_1.Args[0] 1324 v.reset(OpAMD64LEAQ8) 1325 v.AddArg(x) 1326 v.AddArg(y) 1327 return true 1328 } 1329 // match: (ADDQ (SHLQconst [3] y) x) 1330 // cond: 1331 // result: (LEAQ8 x y) 1332 for { 1333 _ = v.Args[1] 1334 v_0 := v.Args[0] 1335 if v_0.Op != OpAMD64SHLQconst { 1336 break 1337 } 1338 if v_0.AuxInt != 3 { 1339 break 1340 } 1341 y := v_0.Args[0] 1342 x := v.Args[1] 1343 v.reset(OpAMD64LEAQ8) 1344 v.AddArg(x) 1345 v.AddArg(y) 1346 return true 1347 } 1348 // match: (ADDQ x (SHLQconst [2] y)) 1349 // cond: 1350 // result: (LEAQ4 x y) 1351 for { 1352 _ = v.Args[1] 1353 x := v.Args[0] 1354 v_1 := v.Args[1] 1355 if v_1.Op != OpAMD64SHLQconst { 1356 break 1357 } 1358 if v_1.AuxInt != 2 { 1359 break 1360 } 1361 y := v_1.Args[0] 1362 v.reset(OpAMD64LEAQ4) 1363 v.AddArg(x) 1364 v.AddArg(y) 1365 return true 1366 } 1367 // match: (ADDQ (SHLQconst [2] y) x) 1368 // cond: 1369 // result: (LEAQ4 x y) 1370 for { 1371 _ = v.Args[1] 1372 v_0 := v.Args[0] 1373 if v_0.Op != OpAMD64SHLQconst { 1374 break 1375 } 1376 if v_0.AuxInt != 2 { 1377 break 1378 } 1379 y := v_0.Args[0] 1380 x := v.Args[1] 1381 v.reset(OpAMD64LEAQ4) 1382 v.AddArg(x) 1383 v.AddArg(y) 1384 return true 1385 } 1386 // match: (ADDQ x (SHLQconst [1] y)) 1387 // cond: 1388 // result: (LEAQ2 x y) 1389 for { 1390 _ = v.Args[1] 1391 x := v.Args[0] 1392 v_1 := v.Args[1] 1393 if v_1.Op != OpAMD64SHLQconst { 1394 break 1395 } 1396 if v_1.AuxInt != 1 { 1397 break 1398 } 1399 y := v_1.Args[0] 1400 v.reset(OpAMD64LEAQ2) 1401 v.AddArg(x) 1402 v.AddArg(y) 1403 return true 1404 } 1405 // match: (ADDQ (SHLQconst [1] y) x) 1406 // cond: 1407 // result: (LEAQ2 x y) 1408 for { 1409 _ = v.Args[1] 1410 v_0 := v.Args[0] 1411 if v_0.Op != OpAMD64SHLQconst { 1412 break 1413 } 1414 if v_0.AuxInt != 1 { 1415 break 1416 } 1417 y := v_0.Args[0] 1418 x := v.Args[1] 1419 v.reset(OpAMD64LEAQ2) 1420 v.AddArg(x) 1421 v.AddArg(y) 1422 return true 1423 } 1424 return false 1425 } 1426 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 1427 // match: (ADDQ x (ADDQ y y)) 1428 // cond: 1429 // result: (LEAQ2 x y) 1430 for { 1431 _ = v.Args[1] 1432 x := v.Args[0] 1433 v_1 := v.Args[1] 1434 if v_1.Op != OpAMD64ADDQ { 1435 break 1436 } 1437 _ = v_1.Args[1] 1438 y := v_1.Args[0] 1439 if y != v_1.Args[1] { 1440 break 1441 } 1442 v.reset(OpAMD64LEAQ2) 1443 v.AddArg(x) 1444 v.AddArg(y) 1445 return true 1446 } 1447 // match: (ADDQ (ADDQ y y) x) 1448 // cond: 1449 // result: (LEAQ2 x y) 1450 for { 1451 _ = v.Args[1] 1452 v_0 := v.Args[0] 1453 if v_0.Op != OpAMD64ADDQ { 1454 break 1455 } 1456 _ = v_0.Args[1] 1457 y := v_0.Args[0] 1458 if y != v_0.Args[1] { 1459 break 1460 } 1461 x := v.Args[1] 1462 v.reset(OpAMD64LEAQ2) 1463 v.AddArg(x) 1464 v.AddArg(y) 1465 return true 1466 } 1467 // match: (ADDQ x (ADDQ x y)) 1468 // cond: 1469 // result: (LEAQ2 y x) 1470 for { 1471 _ = v.Args[1] 1472 x := v.Args[0] 1473 v_1 := v.Args[1] 1474 if v_1.Op != OpAMD64ADDQ { 1475 break 1476 } 1477 _ = v_1.Args[1] 1478 if x != v_1.Args[0] { 1479 break 1480 } 1481 y := v_1.Args[1] 1482 v.reset(OpAMD64LEAQ2) 1483 v.AddArg(y) 1484 v.AddArg(x) 1485 return true 1486 } 1487 // match: (ADDQ x (ADDQ y x)) 1488 // cond: 1489 // result: (LEAQ2 y x) 1490 for { 1491 _ = v.Args[1] 1492 x := v.Args[0] 1493 v_1 := v.Args[1] 1494 if v_1.Op != OpAMD64ADDQ { 1495 break 1496 } 1497 _ = v_1.Args[1] 1498 y := v_1.Args[0] 1499 if x != v_1.Args[1] { 1500 break 1501 } 1502 v.reset(OpAMD64LEAQ2) 1503 v.AddArg(y) 1504 v.AddArg(x) 1505 return true 1506 } 1507 // match: (ADDQ (ADDQ x y) x) 1508 // cond: 1509 // result: (LEAQ2 y x) 1510 for { 1511 _ = v.Args[1] 1512 v_0 := v.Args[0] 1513 if v_0.Op != OpAMD64ADDQ { 1514 break 1515 } 1516 _ = v_0.Args[1] 1517 x := v_0.Args[0] 1518 y := v_0.Args[1] 1519 if x != v.Args[1] { 1520 break 1521 } 1522 v.reset(OpAMD64LEAQ2) 1523 v.AddArg(y) 1524 v.AddArg(x) 1525 return true 1526 } 1527 // match: (ADDQ (ADDQ y x) x) 1528 // cond: 1529 // result: (LEAQ2 y x) 1530 for { 1531 _ = v.Args[1] 1532 v_0 := v.Args[0] 1533 if v_0.Op != OpAMD64ADDQ { 1534 break 1535 } 1536 _ = v_0.Args[1] 1537 y := v_0.Args[0] 1538 x := v_0.Args[1] 1539 if x != v.Args[1] { 1540 break 1541 } 1542 v.reset(OpAMD64LEAQ2) 1543 v.AddArg(y) 1544 v.AddArg(x) 1545 return true 1546 } 1547 // match: (ADDQ (ADDQconst [c] x) y) 1548 // cond: 1549 // result: (LEAQ1 [c] x y) 1550 for { 1551 _ = v.Args[1] 1552 v_0 := v.Args[0] 1553 if v_0.Op != OpAMD64ADDQconst { 1554 break 1555 } 1556 c := v_0.AuxInt 1557 x := v_0.Args[0] 1558 y := v.Args[1] 1559 v.reset(OpAMD64LEAQ1) 1560 v.AuxInt = c 1561 v.AddArg(x) 1562 v.AddArg(y) 1563 return true 1564 } 1565 // match: (ADDQ y (ADDQconst [c] x)) 1566 // cond: 1567 // result: (LEAQ1 [c] x y) 1568 for { 1569 _ = v.Args[1] 1570 y := v.Args[0] 1571 v_1 := v.Args[1] 1572 if v_1.Op != OpAMD64ADDQconst { 1573 break 1574 } 1575 c := v_1.AuxInt 1576 x := v_1.Args[0] 1577 v.reset(OpAMD64LEAQ1) 1578 v.AuxInt = c 1579 v.AddArg(x) 1580 v.AddArg(y) 1581 return true 1582 } 1583 // match: (ADDQ x (LEAQ [c] {s} y)) 1584 // cond: x.Op != OpSB && y.Op != OpSB 1585 // result: (LEAQ1 [c] {s} x y) 1586 for { 1587 _ = v.Args[1] 1588 x := v.Args[0] 1589 v_1 := v.Args[1] 1590 if v_1.Op != OpAMD64LEAQ { 1591 break 1592 } 1593 c := v_1.AuxInt 1594 s := v_1.Aux 1595 y := v_1.Args[0] 1596 if !(x.Op != OpSB && y.Op != OpSB) { 1597 break 1598 } 1599 v.reset(OpAMD64LEAQ1) 1600 v.AuxInt = c 1601 v.Aux = s 1602 v.AddArg(x) 1603 v.AddArg(y) 1604 return true 1605 } 1606 // match: (ADDQ (LEAQ [c] {s} y) x) 1607 // cond: x.Op != OpSB && y.Op != OpSB 1608 // result: (LEAQ1 [c] {s} x y) 1609 for { 1610 _ = v.Args[1] 1611 v_0 := v.Args[0] 1612 if v_0.Op != OpAMD64LEAQ { 1613 break 1614 } 1615 c := v_0.AuxInt 1616 s := v_0.Aux 1617 y := v_0.Args[0] 1618 x := v.Args[1] 1619 if !(x.Op != OpSB && y.Op != OpSB) { 1620 break 1621 } 1622 v.reset(OpAMD64LEAQ1) 1623 v.AuxInt = c 1624 v.Aux = s 1625 v.AddArg(x) 1626 v.AddArg(y) 1627 return true 1628 } 1629 return false 1630 } 1631 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 1632 // match: (ADDQ x (NEGQ y)) 1633 // cond: 1634 // result: (SUBQ x y) 1635 for { 1636 _ = v.Args[1] 1637 x := v.Args[0] 1638 v_1 := v.Args[1] 1639 if v_1.Op != OpAMD64NEGQ { 1640 break 1641 } 1642 y := v_1.Args[0] 1643 v.reset(OpAMD64SUBQ) 1644 v.AddArg(x) 1645 v.AddArg(y) 1646 return true 1647 } 1648 // match: (ADDQ (NEGQ y) x) 1649 // cond: 1650 // result: (SUBQ x y) 1651 for { 1652 _ = v.Args[1] 1653 v_0 := v.Args[0] 1654 if v_0.Op != OpAMD64NEGQ { 1655 break 1656 } 1657 y := v_0.Args[0] 1658 x := v.Args[1] 1659 v.reset(OpAMD64SUBQ) 1660 v.AddArg(x) 1661 v.AddArg(y) 1662 return true 1663 } 1664 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1665 // cond: canMergeLoad(v, l, x) && clobber(l) 1666 // result: (ADDQmem x [off] {sym} ptr mem) 1667 for { 1668 _ = v.Args[1] 1669 x := v.Args[0] 1670 l := v.Args[1] 1671 if l.Op != OpAMD64MOVQload { 1672 break 1673 } 1674 off := l.AuxInt 1675 sym := l.Aux 1676 _ = l.Args[1] 1677 ptr := l.Args[0] 1678 mem := l.Args[1] 1679 if !(canMergeLoad(v, l, x) && clobber(l)) { 1680 break 1681 } 1682 v.reset(OpAMD64ADDQmem) 1683 v.AuxInt = off 1684 v.Aux = sym 1685 v.AddArg(x) 1686 v.AddArg(ptr) 1687 v.AddArg(mem) 1688 return true 1689 } 1690 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 1691 // cond: canMergeLoad(v, l, x) && clobber(l) 1692 // result: (ADDQmem x [off] {sym} ptr mem) 1693 for { 1694 _ = v.Args[1] 1695 l := v.Args[0] 1696 if l.Op != OpAMD64MOVQload { 1697 break 1698 } 1699 off := l.AuxInt 1700 sym := l.Aux 1701 _ = l.Args[1] 1702 ptr := l.Args[0] 1703 mem := l.Args[1] 1704 x := v.Args[1] 1705 if !(canMergeLoad(v, l, x) && clobber(l)) { 1706 break 1707 } 1708 v.reset(OpAMD64ADDQmem) 1709 v.AuxInt = off 1710 v.Aux = sym 1711 v.AddArg(x) 1712 v.AddArg(ptr) 1713 v.AddArg(mem) 1714 return true 1715 } 1716 return false 1717 } 1718 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 1719 // match: (ADDQconst [c] (ADDQ x y)) 1720 // cond: 1721 // result: (LEAQ1 [c] x y) 1722 for { 1723 c := v.AuxInt 1724 v_0 := v.Args[0] 1725 if v_0.Op != OpAMD64ADDQ { 1726 break 1727 } 1728 _ = v_0.Args[1] 1729 x := v_0.Args[0] 1730 y := v_0.Args[1] 1731 v.reset(OpAMD64LEAQ1) 1732 v.AuxInt = c 1733 v.AddArg(x) 1734 v.AddArg(y) 1735 return true 1736 } 1737 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1738 // cond: is32Bit(c+d) 1739 // result: (LEAQ [c+d] {s} x) 1740 for { 1741 c := v.AuxInt 1742 v_0 := v.Args[0] 1743 if v_0.Op != OpAMD64LEAQ { 1744 break 1745 } 1746 d := v_0.AuxInt 1747 s := v_0.Aux 1748 x := v_0.Args[0] 1749 if !(is32Bit(c + d)) { 1750 break 1751 } 1752 v.reset(OpAMD64LEAQ) 1753 v.AuxInt = c + d 1754 v.Aux = s 1755 v.AddArg(x) 1756 return true 1757 } 1758 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1759 // cond: is32Bit(c+d) 1760 // result: (LEAQ1 [c+d] {s} x y) 1761 for { 1762 c := v.AuxInt 1763 v_0 := v.Args[0] 1764 if v_0.Op != OpAMD64LEAQ1 { 1765 break 1766 } 1767 d := v_0.AuxInt 1768 s := v_0.Aux 1769 _ = v_0.Args[1] 1770 x := v_0.Args[0] 1771 y := v_0.Args[1] 1772 if !(is32Bit(c + d)) { 1773 break 1774 } 1775 v.reset(OpAMD64LEAQ1) 1776 v.AuxInt = c + d 1777 v.Aux = s 1778 v.AddArg(x) 1779 v.AddArg(y) 1780 return true 1781 } 1782 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1783 // cond: is32Bit(c+d) 1784 // result: (LEAQ2 [c+d] {s} x y) 1785 for { 1786 c := v.AuxInt 1787 v_0 := v.Args[0] 1788 if v_0.Op != OpAMD64LEAQ2 { 1789 break 1790 } 1791 d := v_0.AuxInt 1792 s := v_0.Aux 1793 _ = v_0.Args[1] 1794 x := v_0.Args[0] 1795 y := v_0.Args[1] 1796 if !(is32Bit(c + d)) { 1797 break 1798 } 1799 v.reset(OpAMD64LEAQ2) 1800 v.AuxInt = c + d 1801 v.Aux = s 1802 v.AddArg(x) 1803 v.AddArg(y) 1804 return true 1805 } 1806 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1807 // cond: is32Bit(c+d) 1808 // result: (LEAQ4 [c+d] {s} x y) 1809 for { 1810 c := v.AuxInt 1811 v_0 := v.Args[0] 1812 if v_0.Op != OpAMD64LEAQ4 { 1813 break 1814 } 1815 d := v_0.AuxInt 1816 s := v_0.Aux 1817 _ = v_0.Args[1] 1818 x := v_0.Args[0] 1819 y := v_0.Args[1] 1820 if !(is32Bit(c + d)) { 1821 break 1822 } 1823 v.reset(OpAMD64LEAQ4) 1824 v.AuxInt = c + d 1825 v.Aux = s 1826 v.AddArg(x) 1827 v.AddArg(y) 1828 return true 1829 } 1830 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1831 // cond: is32Bit(c+d) 1832 // result: (LEAQ8 [c+d] {s} x y) 1833 for { 1834 c := v.AuxInt 1835 v_0 := v.Args[0] 1836 if v_0.Op != OpAMD64LEAQ8 { 1837 break 1838 } 1839 d := v_0.AuxInt 1840 s := v_0.Aux 1841 _ = v_0.Args[1] 1842 x := v_0.Args[0] 1843 y := v_0.Args[1] 1844 if !(is32Bit(c + d)) { 1845 break 1846 } 1847 v.reset(OpAMD64LEAQ8) 1848 v.AuxInt = c + d 1849 v.Aux = s 1850 v.AddArg(x) 1851 v.AddArg(y) 1852 return true 1853 } 1854 // match: (ADDQconst [0] x) 1855 // cond: 1856 // result: x 1857 for { 1858 if v.AuxInt != 0 { 1859 break 1860 } 1861 x := v.Args[0] 1862 v.reset(OpCopy) 1863 v.Type = x.Type 1864 v.AddArg(x) 1865 return true 1866 } 1867 // match: (ADDQconst [c] (MOVQconst [d])) 1868 // cond: 1869 // result: (MOVQconst [c+d]) 1870 for { 1871 c := v.AuxInt 1872 v_0 := v.Args[0] 1873 if v_0.Op != OpAMD64MOVQconst { 1874 break 1875 } 1876 d := v_0.AuxInt 1877 v.reset(OpAMD64MOVQconst) 1878 v.AuxInt = c + d 1879 return true 1880 } 1881 // match: (ADDQconst [c] (ADDQconst [d] x)) 1882 // cond: is32Bit(c+d) 1883 // result: (ADDQconst [c+d] x) 1884 for { 1885 c := v.AuxInt 1886 v_0 := v.Args[0] 1887 if v_0.Op != OpAMD64ADDQconst { 1888 break 1889 } 1890 d := v_0.AuxInt 1891 x := v_0.Args[0] 1892 if !(is32Bit(c + d)) { 1893 break 1894 } 1895 v.reset(OpAMD64ADDQconst) 1896 v.AuxInt = c + d 1897 v.AddArg(x) 1898 return true 1899 } 1900 return false 1901 } 1902 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 1903 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 1904 // cond: canMergeLoad(v, l, x) && clobber(l) 1905 // result: (ADDSDmem x [off] {sym} ptr mem) 1906 for { 1907 _ = v.Args[1] 1908 x := v.Args[0] 1909 l := v.Args[1] 1910 if l.Op != OpAMD64MOVSDload { 1911 break 1912 } 1913 off := l.AuxInt 1914 sym := l.Aux 1915 _ = l.Args[1] 1916 ptr := l.Args[0] 1917 mem := l.Args[1] 1918 if !(canMergeLoad(v, l, x) && clobber(l)) { 1919 break 1920 } 1921 v.reset(OpAMD64ADDSDmem) 1922 v.AuxInt = off 1923 v.Aux = sym 1924 v.AddArg(x) 1925 v.AddArg(ptr) 1926 v.AddArg(mem) 1927 return true 1928 } 1929 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 1930 // cond: canMergeLoad(v, l, x) && clobber(l) 1931 // result: (ADDSDmem x [off] {sym} ptr mem) 1932 for { 1933 _ = v.Args[1] 1934 l := v.Args[0] 1935 if l.Op != OpAMD64MOVSDload { 1936 break 1937 } 1938 off := l.AuxInt 1939 sym := l.Aux 1940 _ = l.Args[1] 1941 ptr := l.Args[0] 1942 mem := l.Args[1] 1943 x := v.Args[1] 1944 if !(canMergeLoad(v, l, x) && clobber(l)) { 1945 break 1946 } 1947 v.reset(OpAMD64ADDSDmem) 1948 v.AuxInt = off 1949 v.Aux = sym 1950 v.AddArg(x) 1951 v.AddArg(ptr) 1952 v.AddArg(mem) 1953 return true 1954 } 1955 return false 1956 } 1957 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 1958 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 1959 // cond: canMergeLoad(v, l, x) && clobber(l) 1960 // result: (ADDSSmem x [off] {sym} ptr mem) 1961 for { 1962 _ = v.Args[1] 1963 x := v.Args[0] 1964 l := v.Args[1] 1965 if l.Op != OpAMD64MOVSSload { 1966 break 1967 } 1968 off := l.AuxInt 1969 sym := l.Aux 1970 _ = l.Args[1] 1971 ptr := l.Args[0] 1972 mem := l.Args[1] 1973 if !(canMergeLoad(v, l, x) && clobber(l)) { 1974 break 1975 } 1976 v.reset(OpAMD64ADDSSmem) 1977 v.AuxInt = off 1978 v.Aux = sym 1979 v.AddArg(x) 1980 v.AddArg(ptr) 1981 v.AddArg(mem) 1982 return true 1983 } 1984 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 1985 // cond: canMergeLoad(v, l, x) && clobber(l) 1986 // result: (ADDSSmem x [off] {sym} ptr mem) 1987 for { 1988 _ = v.Args[1] 1989 l := v.Args[0] 1990 if l.Op != OpAMD64MOVSSload { 1991 break 1992 } 1993 off := l.AuxInt 1994 sym := l.Aux 1995 _ = l.Args[1] 1996 ptr := l.Args[0] 1997 mem := l.Args[1] 1998 x := v.Args[1] 1999 if !(canMergeLoad(v, l, x) && clobber(l)) { 2000 break 2001 } 2002 v.reset(OpAMD64ADDSSmem) 2003 v.AuxInt = off 2004 v.Aux = sym 2005 v.AddArg(x) 2006 v.AddArg(ptr) 2007 v.AddArg(mem) 2008 return true 2009 } 2010 return false 2011 } 2012 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 2013 // match: (ANDL x (MOVLconst [c])) 2014 // cond: 2015 // result: (ANDLconst [c] x) 2016 for { 2017 _ = v.Args[1] 2018 x := v.Args[0] 2019 v_1 := v.Args[1] 2020 if v_1.Op != OpAMD64MOVLconst { 2021 break 2022 } 2023 c := v_1.AuxInt 2024 v.reset(OpAMD64ANDLconst) 2025 v.AuxInt = c 2026 v.AddArg(x) 2027 return true 2028 } 2029 // match: (ANDL (MOVLconst [c]) x) 2030 // cond: 2031 // result: (ANDLconst [c] x) 2032 for { 2033 _ = v.Args[1] 2034 v_0 := v.Args[0] 2035 if v_0.Op != OpAMD64MOVLconst { 2036 break 2037 } 2038 c := v_0.AuxInt 2039 x := v.Args[1] 2040 v.reset(OpAMD64ANDLconst) 2041 v.AuxInt = c 2042 v.AddArg(x) 2043 return true 2044 } 2045 // match: (ANDL x x) 2046 // cond: 2047 // result: x 2048 for { 2049 _ = v.Args[1] 2050 x := v.Args[0] 2051 if x != v.Args[1] { 2052 break 2053 } 2054 v.reset(OpCopy) 2055 v.Type = x.Type 2056 v.AddArg(x) 2057 return true 2058 } 2059 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 2060 // cond: canMergeLoad(v, l, x) && clobber(l) 2061 // result: (ANDLmem x [off] {sym} ptr mem) 2062 for { 2063 _ = v.Args[1] 2064 x := v.Args[0] 2065 l := v.Args[1] 2066 if l.Op != OpAMD64MOVLload { 2067 break 2068 } 2069 off := l.AuxInt 2070 sym := l.Aux 2071 _ = l.Args[1] 2072 ptr := l.Args[0] 2073 mem := l.Args[1] 2074 if !(canMergeLoad(v, l, x) && clobber(l)) { 2075 break 2076 } 2077 v.reset(OpAMD64ANDLmem) 2078 v.AuxInt = off 2079 v.Aux = sym 2080 v.AddArg(x) 2081 v.AddArg(ptr) 2082 v.AddArg(mem) 2083 return true 2084 } 2085 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 2086 // cond: canMergeLoad(v, l, x) && clobber(l) 2087 // result: (ANDLmem x [off] {sym} ptr mem) 2088 for { 2089 _ = v.Args[1] 2090 l := v.Args[0] 2091 if l.Op != OpAMD64MOVLload { 2092 break 2093 } 2094 off := l.AuxInt 2095 sym := l.Aux 2096 _ = l.Args[1] 2097 ptr := l.Args[0] 2098 mem := l.Args[1] 2099 x := v.Args[1] 2100 if !(canMergeLoad(v, l, x) && clobber(l)) { 2101 break 2102 } 2103 v.reset(OpAMD64ANDLmem) 2104 v.AuxInt = off 2105 v.Aux = sym 2106 v.AddArg(x) 2107 v.AddArg(ptr) 2108 v.AddArg(mem) 2109 return true 2110 } 2111 return false 2112 } 2113 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 2114 // match: (ANDLconst [c] (ANDLconst [d] x)) 2115 // cond: 2116 // result: (ANDLconst [c & d] x) 2117 for { 2118 c := v.AuxInt 2119 v_0 := v.Args[0] 2120 if v_0.Op != OpAMD64ANDLconst { 2121 break 2122 } 2123 d := v_0.AuxInt 2124 x := v_0.Args[0] 2125 v.reset(OpAMD64ANDLconst) 2126 v.AuxInt = c & d 2127 v.AddArg(x) 2128 return true 2129 } 2130 // match: (ANDLconst [0xFF] x) 2131 // cond: 2132 // result: (MOVBQZX x) 2133 for { 2134 if v.AuxInt != 0xFF { 2135 break 2136 } 2137 x := v.Args[0] 2138 v.reset(OpAMD64MOVBQZX) 2139 v.AddArg(x) 2140 return true 2141 } 2142 // match: (ANDLconst [0xFFFF] x) 2143 // cond: 2144 // result: (MOVWQZX x) 2145 for { 2146 if v.AuxInt != 0xFFFF { 2147 break 2148 } 2149 x := v.Args[0] 2150 v.reset(OpAMD64MOVWQZX) 2151 v.AddArg(x) 2152 return true 2153 } 2154 // match: (ANDLconst [c] _) 2155 // cond: int32(c)==0 2156 // result: (MOVLconst [0]) 2157 for { 2158 c := v.AuxInt 2159 if !(int32(c) == 0) { 2160 break 2161 } 2162 v.reset(OpAMD64MOVLconst) 2163 v.AuxInt = 0 2164 return true 2165 } 2166 // match: (ANDLconst [c] x) 2167 // cond: int32(c)==-1 2168 // result: x 2169 for { 2170 c := v.AuxInt 2171 x := v.Args[0] 2172 if !(int32(c) == -1) { 2173 break 2174 } 2175 v.reset(OpCopy) 2176 v.Type = x.Type 2177 v.AddArg(x) 2178 return true 2179 } 2180 // match: (ANDLconst [c] (MOVLconst [d])) 2181 // cond: 2182 // result: (MOVLconst [c&d]) 2183 for { 2184 c := v.AuxInt 2185 v_0 := v.Args[0] 2186 if v_0.Op != OpAMD64MOVLconst { 2187 break 2188 } 2189 d := v_0.AuxInt 2190 v.reset(OpAMD64MOVLconst) 2191 v.AuxInt = c & d 2192 return true 2193 } 2194 return false 2195 } 2196 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 2197 // match: (ANDQ x (MOVQconst [c])) 2198 // cond: is32Bit(c) 2199 // result: (ANDQconst [c] x) 2200 for { 2201 _ = v.Args[1] 2202 x := v.Args[0] 2203 v_1 := v.Args[1] 2204 if v_1.Op != OpAMD64MOVQconst { 2205 break 2206 } 2207 c := v_1.AuxInt 2208 if !(is32Bit(c)) { 2209 break 2210 } 2211 v.reset(OpAMD64ANDQconst) 2212 v.AuxInt = c 2213 v.AddArg(x) 2214 return true 2215 } 2216 // match: (ANDQ (MOVQconst [c]) x) 2217 // cond: is32Bit(c) 2218 // result: (ANDQconst [c] x) 2219 for { 2220 _ = v.Args[1] 2221 v_0 := v.Args[0] 2222 if v_0.Op != OpAMD64MOVQconst { 2223 break 2224 } 2225 c := v_0.AuxInt 2226 x := v.Args[1] 2227 if !(is32Bit(c)) { 2228 break 2229 } 2230 v.reset(OpAMD64ANDQconst) 2231 v.AuxInt = c 2232 v.AddArg(x) 2233 return true 2234 } 2235 // match: (ANDQ x x) 2236 // cond: 2237 // result: x 2238 for { 2239 _ = v.Args[1] 2240 x := v.Args[0] 2241 if x != v.Args[1] { 2242 break 2243 } 2244 v.reset(OpCopy) 2245 v.Type = x.Type 2246 v.AddArg(x) 2247 return true 2248 } 2249 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 2250 // cond: canMergeLoad(v, l, x) && clobber(l) 2251 // result: (ANDQmem x [off] {sym} ptr mem) 2252 for { 2253 _ = v.Args[1] 2254 x := v.Args[0] 2255 l := v.Args[1] 2256 if l.Op != OpAMD64MOVQload { 2257 break 2258 } 2259 off := l.AuxInt 2260 sym := l.Aux 2261 _ = l.Args[1] 2262 ptr := l.Args[0] 2263 mem := l.Args[1] 2264 if !(canMergeLoad(v, l, x) && clobber(l)) { 2265 break 2266 } 2267 v.reset(OpAMD64ANDQmem) 2268 v.AuxInt = off 2269 v.Aux = sym 2270 v.AddArg(x) 2271 v.AddArg(ptr) 2272 v.AddArg(mem) 2273 return true 2274 } 2275 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 2276 // cond: canMergeLoad(v, l, x) && clobber(l) 2277 // result: (ANDQmem x [off] {sym} ptr mem) 2278 for { 2279 _ = v.Args[1] 2280 l := v.Args[0] 2281 if l.Op != OpAMD64MOVQload { 2282 break 2283 } 2284 off := l.AuxInt 2285 sym := l.Aux 2286 _ = l.Args[1] 2287 ptr := l.Args[0] 2288 mem := l.Args[1] 2289 x := v.Args[1] 2290 if !(canMergeLoad(v, l, x) && clobber(l)) { 2291 break 2292 } 2293 v.reset(OpAMD64ANDQmem) 2294 v.AuxInt = off 2295 v.Aux = sym 2296 v.AddArg(x) 2297 v.AddArg(ptr) 2298 v.AddArg(mem) 2299 return true 2300 } 2301 return false 2302 } 2303 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 2304 // match: (ANDQconst [c] (ANDQconst [d] x)) 2305 // cond: 2306 // result: (ANDQconst [c & d] x) 2307 for { 2308 c := v.AuxInt 2309 v_0 := v.Args[0] 2310 if v_0.Op != OpAMD64ANDQconst { 2311 break 2312 } 2313 d := v_0.AuxInt 2314 x := v_0.Args[0] 2315 v.reset(OpAMD64ANDQconst) 2316 v.AuxInt = c & d 2317 v.AddArg(x) 2318 return true 2319 } 2320 // match: (ANDQconst [0xFF] x) 2321 // cond: 2322 // result: (MOVBQZX x) 2323 for { 2324 if v.AuxInt != 0xFF { 2325 break 2326 } 2327 x := v.Args[0] 2328 v.reset(OpAMD64MOVBQZX) 2329 v.AddArg(x) 2330 return true 2331 } 2332 // match: (ANDQconst [0xFFFF] x) 2333 // cond: 2334 // result: (MOVWQZX x) 2335 for { 2336 if v.AuxInt != 0xFFFF { 2337 break 2338 } 2339 x := v.Args[0] 2340 v.reset(OpAMD64MOVWQZX) 2341 v.AddArg(x) 2342 return true 2343 } 2344 // match: (ANDQconst [0xFFFFFFFF] x) 2345 // cond: 2346 // result: (MOVLQZX x) 2347 for { 2348 if v.AuxInt != 0xFFFFFFFF { 2349 break 2350 } 2351 x := v.Args[0] 2352 v.reset(OpAMD64MOVLQZX) 2353 v.AddArg(x) 2354 return true 2355 } 2356 // match: (ANDQconst [0] _) 2357 // cond: 2358 // result: (MOVQconst [0]) 2359 for { 2360 if v.AuxInt != 0 { 2361 break 2362 } 2363 v.reset(OpAMD64MOVQconst) 2364 v.AuxInt = 0 2365 return true 2366 } 2367 // match: (ANDQconst [-1] x) 2368 // cond: 2369 // result: x 2370 for { 2371 if v.AuxInt != -1 { 2372 break 2373 } 2374 x := v.Args[0] 2375 v.reset(OpCopy) 2376 v.Type = x.Type 2377 v.AddArg(x) 2378 return true 2379 } 2380 // match: (ANDQconst [c] (MOVQconst [d])) 2381 // cond: 2382 // result: (MOVQconst [c&d]) 2383 for { 2384 c := v.AuxInt 2385 v_0 := v.Args[0] 2386 if v_0.Op != OpAMD64MOVQconst { 2387 break 2388 } 2389 d := v_0.AuxInt 2390 v.reset(OpAMD64MOVQconst) 2391 v.AuxInt = c & d 2392 return true 2393 } 2394 return false 2395 } 2396 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 2397 b := v.Block 2398 _ = b 2399 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 2400 // cond: 2401 // result: (BSFQ (ORQconst <t> [1<<8] x)) 2402 for { 2403 v_0 := v.Args[0] 2404 if v_0.Op != OpAMD64ORQconst { 2405 break 2406 } 2407 t := v_0.Type 2408 if v_0.AuxInt != 1<<8 { 2409 break 2410 } 2411 v_0_0 := v_0.Args[0] 2412 if v_0_0.Op != OpAMD64MOVBQZX { 2413 break 2414 } 2415 x := v_0_0.Args[0] 2416 v.reset(OpAMD64BSFQ) 2417 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2418 v0.AuxInt = 1 << 8 2419 v0.AddArg(x) 2420 v.AddArg(v0) 2421 return true 2422 } 2423 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 2424 // cond: 2425 // result: (BSFQ (ORQconst <t> [1<<16] x)) 2426 for { 2427 v_0 := v.Args[0] 2428 if v_0.Op != OpAMD64ORQconst { 2429 break 2430 } 2431 t := v_0.Type 2432 if v_0.AuxInt != 1<<16 { 2433 break 2434 } 2435 v_0_0 := v_0.Args[0] 2436 if v_0_0.Op != OpAMD64MOVWQZX { 2437 break 2438 } 2439 x := v_0_0.Args[0] 2440 v.reset(OpAMD64BSFQ) 2441 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2442 v0.AuxInt = 1 << 16 2443 v0.AddArg(x) 2444 v.AddArg(v0) 2445 return true 2446 } 2447 return false 2448 } 2449 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 2450 // match: (BTQconst [c] x) 2451 // cond: c < 32 2452 // result: (BTLconst [c] x) 2453 for { 2454 c := v.AuxInt 2455 x := v.Args[0] 2456 if !(c < 32) { 2457 break 2458 } 2459 v.reset(OpAMD64BTLconst) 2460 v.AuxInt = c 2461 v.AddArg(x) 2462 return true 2463 } 2464 return false 2465 } 2466 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 2467 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 2468 // cond: c != 0 2469 // result: x 2470 for { 2471 _ = v.Args[2] 2472 x := v.Args[0] 2473 v_2 := v.Args[2] 2474 if v_2.Op != OpSelect1 { 2475 break 2476 } 2477 v_2_0 := v_2.Args[0] 2478 if v_2_0.Op != OpAMD64BSFQ { 2479 break 2480 } 2481 v_2_0_0 := v_2_0.Args[0] 2482 if v_2_0_0.Op != OpAMD64ORQconst { 2483 break 2484 } 2485 c := v_2_0_0.AuxInt 2486 if !(c != 0) { 2487 break 2488 } 2489 v.reset(OpCopy) 2490 v.Type = x.Type 2491 v.AddArg(x) 2492 return true 2493 } 2494 return false 2495 } 2496 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 2497 b := v.Block 2498 _ = b 2499 // match: (CMPB x (MOVLconst [c])) 2500 // cond: 2501 // result: (CMPBconst x [int64(int8(c))]) 2502 for { 2503 _ = v.Args[1] 2504 x := v.Args[0] 2505 v_1 := v.Args[1] 2506 if v_1.Op != OpAMD64MOVLconst { 2507 break 2508 } 2509 c := v_1.AuxInt 2510 v.reset(OpAMD64CMPBconst) 2511 v.AuxInt = int64(int8(c)) 2512 v.AddArg(x) 2513 return true 2514 } 2515 // match: (CMPB (MOVLconst [c]) x) 2516 // cond: 2517 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 2518 for { 2519 _ = v.Args[1] 2520 v_0 := v.Args[0] 2521 if v_0.Op != OpAMD64MOVLconst { 2522 break 2523 } 2524 c := v_0.AuxInt 2525 x := v.Args[1] 2526 v.reset(OpAMD64InvertFlags) 2527 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 2528 v0.AuxInt = int64(int8(c)) 2529 v0.AddArg(x) 2530 v.AddArg(v0) 2531 return true 2532 } 2533 return false 2534 } 2535 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 2536 // match: (CMPBconst (MOVLconst [x]) [y]) 2537 // cond: int8(x)==int8(y) 2538 // result: (FlagEQ) 2539 for { 2540 y := v.AuxInt 2541 v_0 := v.Args[0] 2542 if v_0.Op != OpAMD64MOVLconst { 2543 break 2544 } 2545 x := v_0.AuxInt 2546 if !(int8(x) == int8(y)) { 2547 break 2548 } 2549 v.reset(OpAMD64FlagEQ) 2550 return true 2551 } 2552 // match: (CMPBconst (MOVLconst [x]) [y]) 2553 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 2554 // result: (FlagLT_ULT) 2555 for { 2556 y := v.AuxInt 2557 v_0 := v.Args[0] 2558 if v_0.Op != OpAMD64MOVLconst { 2559 break 2560 } 2561 x := v_0.AuxInt 2562 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 2563 break 2564 } 2565 v.reset(OpAMD64FlagLT_ULT) 2566 return true 2567 } 2568 // match: (CMPBconst (MOVLconst [x]) [y]) 2569 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 2570 // result: (FlagLT_UGT) 2571 for { 2572 y := v.AuxInt 2573 v_0 := v.Args[0] 2574 if v_0.Op != OpAMD64MOVLconst { 2575 break 2576 } 2577 x := v_0.AuxInt 2578 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 2579 break 2580 } 2581 v.reset(OpAMD64FlagLT_UGT) 2582 return true 2583 } 2584 // match: (CMPBconst (MOVLconst [x]) [y]) 2585 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 2586 // result: (FlagGT_ULT) 2587 for { 2588 y := v.AuxInt 2589 v_0 := v.Args[0] 2590 if v_0.Op != OpAMD64MOVLconst { 2591 break 2592 } 2593 x := v_0.AuxInt 2594 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 2595 break 2596 } 2597 v.reset(OpAMD64FlagGT_ULT) 2598 return true 2599 } 2600 // match: (CMPBconst (MOVLconst [x]) [y]) 2601 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 2602 // result: (FlagGT_UGT) 2603 for { 2604 y := v.AuxInt 2605 v_0 := v.Args[0] 2606 if v_0.Op != OpAMD64MOVLconst { 2607 break 2608 } 2609 x := v_0.AuxInt 2610 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 2611 break 2612 } 2613 v.reset(OpAMD64FlagGT_UGT) 2614 return true 2615 } 2616 // match: (CMPBconst (ANDLconst _ [m]) [n]) 2617 // cond: 0 <= int8(m) && int8(m) < int8(n) 2618 // result: (FlagLT_ULT) 2619 for { 2620 n := v.AuxInt 2621 v_0 := v.Args[0] 2622 if v_0.Op != OpAMD64ANDLconst { 2623 break 2624 } 2625 m := v_0.AuxInt 2626 if !(0 <= int8(m) && int8(m) < int8(n)) { 2627 break 2628 } 2629 v.reset(OpAMD64FlagLT_ULT) 2630 return true 2631 } 2632 // match: (CMPBconst (ANDL x y) [0]) 2633 // cond: 2634 // result: (TESTB x y) 2635 for { 2636 if v.AuxInt != 0 { 2637 break 2638 } 2639 v_0 := v.Args[0] 2640 if v_0.Op != OpAMD64ANDL { 2641 break 2642 } 2643 _ = v_0.Args[1] 2644 x := v_0.Args[0] 2645 y := v_0.Args[1] 2646 v.reset(OpAMD64TESTB) 2647 v.AddArg(x) 2648 v.AddArg(y) 2649 return true 2650 } 2651 // match: (CMPBconst (ANDLconst [c] x) [0]) 2652 // cond: 2653 // result: (TESTBconst [int64(int8(c))] x) 2654 for { 2655 if v.AuxInt != 0 { 2656 break 2657 } 2658 v_0 := v.Args[0] 2659 if v_0.Op != OpAMD64ANDLconst { 2660 break 2661 } 2662 c := v_0.AuxInt 2663 x := v_0.Args[0] 2664 v.reset(OpAMD64TESTBconst) 2665 v.AuxInt = int64(int8(c)) 2666 v.AddArg(x) 2667 return true 2668 } 2669 // match: (CMPBconst x [0]) 2670 // cond: 2671 // result: (TESTB x x) 2672 for { 2673 if v.AuxInt != 0 { 2674 break 2675 } 2676 x := v.Args[0] 2677 v.reset(OpAMD64TESTB) 2678 v.AddArg(x) 2679 v.AddArg(x) 2680 return true 2681 } 2682 return false 2683 } 2684 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 2685 b := v.Block 2686 _ = b 2687 // match: (CMPL x (MOVLconst [c])) 2688 // cond: 2689 // result: (CMPLconst x [c]) 2690 for { 2691 _ = v.Args[1] 2692 x := v.Args[0] 2693 v_1 := v.Args[1] 2694 if v_1.Op != OpAMD64MOVLconst { 2695 break 2696 } 2697 c := v_1.AuxInt 2698 v.reset(OpAMD64CMPLconst) 2699 v.AuxInt = c 2700 v.AddArg(x) 2701 return true 2702 } 2703 // match: (CMPL (MOVLconst [c]) x) 2704 // cond: 2705 // result: (InvertFlags (CMPLconst x [c])) 2706 for { 2707 _ = v.Args[1] 2708 v_0 := v.Args[0] 2709 if v_0.Op != OpAMD64MOVLconst { 2710 break 2711 } 2712 c := v_0.AuxInt 2713 x := v.Args[1] 2714 v.reset(OpAMD64InvertFlags) 2715 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 2716 v0.AuxInt = c 2717 v0.AddArg(x) 2718 v.AddArg(v0) 2719 return true 2720 } 2721 return false 2722 } 2723 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 2724 // match: (CMPLconst (MOVLconst [x]) [y]) 2725 // cond: int32(x)==int32(y) 2726 // result: (FlagEQ) 2727 for { 2728 y := v.AuxInt 2729 v_0 := v.Args[0] 2730 if v_0.Op != OpAMD64MOVLconst { 2731 break 2732 } 2733 x := v_0.AuxInt 2734 if !(int32(x) == int32(y)) { 2735 break 2736 } 2737 v.reset(OpAMD64FlagEQ) 2738 return true 2739 } 2740 // match: (CMPLconst (MOVLconst [x]) [y]) 2741 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 2742 // result: (FlagLT_ULT) 2743 for { 2744 y := v.AuxInt 2745 v_0 := v.Args[0] 2746 if v_0.Op != OpAMD64MOVLconst { 2747 break 2748 } 2749 x := v_0.AuxInt 2750 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 2751 break 2752 } 2753 v.reset(OpAMD64FlagLT_ULT) 2754 return true 2755 } 2756 // match: (CMPLconst (MOVLconst [x]) [y]) 2757 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 2758 // result: (FlagLT_UGT) 2759 for { 2760 y := v.AuxInt 2761 v_0 := v.Args[0] 2762 if v_0.Op != OpAMD64MOVLconst { 2763 break 2764 } 2765 x := v_0.AuxInt 2766 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 2767 break 2768 } 2769 v.reset(OpAMD64FlagLT_UGT) 2770 return true 2771 } 2772 // match: (CMPLconst (MOVLconst [x]) [y]) 2773 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 2774 // result: (FlagGT_ULT) 2775 for { 2776 y := v.AuxInt 2777 v_0 := v.Args[0] 2778 if v_0.Op != OpAMD64MOVLconst { 2779 break 2780 } 2781 x := v_0.AuxInt 2782 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 2783 break 2784 } 2785 v.reset(OpAMD64FlagGT_ULT) 2786 return true 2787 } 2788 // match: (CMPLconst (MOVLconst [x]) [y]) 2789 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 2790 // result: (FlagGT_UGT) 2791 for { 2792 y := v.AuxInt 2793 v_0 := v.Args[0] 2794 if v_0.Op != OpAMD64MOVLconst { 2795 break 2796 } 2797 x := v_0.AuxInt 2798 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 2799 break 2800 } 2801 v.reset(OpAMD64FlagGT_UGT) 2802 return true 2803 } 2804 // match: (CMPLconst (SHRLconst _ [c]) [n]) 2805 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 2806 // result: (FlagLT_ULT) 2807 for { 2808 n := v.AuxInt 2809 v_0 := v.Args[0] 2810 if v_0.Op != OpAMD64SHRLconst { 2811 break 2812 } 2813 c := v_0.AuxInt 2814 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 2815 break 2816 } 2817 v.reset(OpAMD64FlagLT_ULT) 2818 return true 2819 } 2820 // match: (CMPLconst (ANDLconst _ [m]) [n]) 2821 // cond: 0 <= int32(m) && int32(m) < int32(n) 2822 // result: (FlagLT_ULT) 2823 for { 2824 n := v.AuxInt 2825 v_0 := v.Args[0] 2826 if v_0.Op != OpAMD64ANDLconst { 2827 break 2828 } 2829 m := v_0.AuxInt 2830 if !(0 <= int32(m) && int32(m) < int32(n)) { 2831 break 2832 } 2833 v.reset(OpAMD64FlagLT_ULT) 2834 return true 2835 } 2836 // match: (CMPLconst (ANDL x y) [0]) 2837 // cond: 2838 // result: (TESTL x y) 2839 for { 2840 if v.AuxInt != 0 { 2841 break 2842 } 2843 v_0 := v.Args[0] 2844 if v_0.Op != OpAMD64ANDL { 2845 break 2846 } 2847 _ = v_0.Args[1] 2848 x := v_0.Args[0] 2849 y := v_0.Args[1] 2850 v.reset(OpAMD64TESTL) 2851 v.AddArg(x) 2852 v.AddArg(y) 2853 return true 2854 } 2855 // match: (CMPLconst (ANDLconst [c] x) [0]) 2856 // cond: 2857 // result: (TESTLconst [c] x) 2858 for { 2859 if v.AuxInt != 0 { 2860 break 2861 } 2862 v_0 := v.Args[0] 2863 if v_0.Op != OpAMD64ANDLconst { 2864 break 2865 } 2866 c := v_0.AuxInt 2867 x := v_0.Args[0] 2868 v.reset(OpAMD64TESTLconst) 2869 v.AuxInt = c 2870 v.AddArg(x) 2871 return true 2872 } 2873 // match: (CMPLconst x [0]) 2874 // cond: 2875 // result: (TESTL x x) 2876 for { 2877 if v.AuxInt != 0 { 2878 break 2879 } 2880 x := v.Args[0] 2881 v.reset(OpAMD64TESTL) 2882 v.AddArg(x) 2883 v.AddArg(x) 2884 return true 2885 } 2886 return false 2887 } 2888 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 2889 b := v.Block 2890 _ = b 2891 // match: (CMPQ x (MOVQconst [c])) 2892 // cond: is32Bit(c) 2893 // result: (CMPQconst x [c]) 2894 for { 2895 _ = v.Args[1] 2896 x := v.Args[0] 2897 v_1 := v.Args[1] 2898 if v_1.Op != OpAMD64MOVQconst { 2899 break 2900 } 2901 c := v_1.AuxInt 2902 if !(is32Bit(c)) { 2903 break 2904 } 2905 v.reset(OpAMD64CMPQconst) 2906 v.AuxInt = c 2907 v.AddArg(x) 2908 return true 2909 } 2910 // match: (CMPQ (MOVQconst [c]) x) 2911 // cond: is32Bit(c) 2912 // result: (InvertFlags (CMPQconst x [c])) 2913 for { 2914 _ = v.Args[1] 2915 v_0 := v.Args[0] 2916 if v_0.Op != OpAMD64MOVQconst { 2917 break 2918 } 2919 c := v_0.AuxInt 2920 x := v.Args[1] 2921 if !(is32Bit(c)) { 2922 break 2923 } 2924 v.reset(OpAMD64InvertFlags) 2925 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 2926 v0.AuxInt = c 2927 v0.AddArg(x) 2928 v.AddArg(v0) 2929 return true 2930 } 2931 return false 2932 } 2933 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 2934 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 2935 // cond: 2936 // result: (FlagLT_ULT) 2937 for { 2938 if v.AuxInt != 32 { 2939 break 2940 } 2941 v_0 := v.Args[0] 2942 if v_0.Op != OpAMD64NEGQ { 2943 break 2944 } 2945 v_0_0 := v_0.Args[0] 2946 if v_0_0.Op != OpAMD64ADDQconst { 2947 break 2948 } 2949 if v_0_0.AuxInt != -16 { 2950 break 2951 } 2952 v_0_0_0 := v_0_0.Args[0] 2953 if v_0_0_0.Op != OpAMD64ANDQconst { 2954 break 2955 } 2956 if v_0_0_0.AuxInt != 15 { 2957 break 2958 } 2959 v.reset(OpAMD64FlagLT_ULT) 2960 return true 2961 } 2962 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 2963 // cond: 2964 // result: (FlagLT_ULT) 2965 for { 2966 if v.AuxInt != 32 { 2967 break 2968 } 2969 v_0 := v.Args[0] 2970 if v_0.Op != OpAMD64NEGQ { 2971 break 2972 } 2973 v_0_0 := v_0.Args[0] 2974 if v_0_0.Op != OpAMD64ADDQconst { 2975 break 2976 } 2977 if v_0_0.AuxInt != -8 { 2978 break 2979 } 2980 v_0_0_0 := v_0_0.Args[0] 2981 if v_0_0_0.Op != OpAMD64ANDQconst { 2982 break 2983 } 2984 if v_0_0_0.AuxInt != 7 { 2985 break 2986 } 2987 v.reset(OpAMD64FlagLT_ULT) 2988 return true 2989 } 2990 // match: (CMPQconst (MOVQconst [x]) [y]) 2991 // cond: x==y 2992 // result: (FlagEQ) 2993 for { 2994 y := v.AuxInt 2995 v_0 := v.Args[0] 2996 if v_0.Op != OpAMD64MOVQconst { 2997 break 2998 } 2999 x := v_0.AuxInt 3000 if !(x == y) { 3001 break 3002 } 3003 v.reset(OpAMD64FlagEQ) 3004 return true 3005 } 3006 // match: (CMPQconst (MOVQconst [x]) [y]) 3007 // cond: x<y && uint64(x)<uint64(y) 3008 // result: (FlagLT_ULT) 3009 for { 3010 y := v.AuxInt 3011 v_0 := v.Args[0] 3012 if v_0.Op != OpAMD64MOVQconst { 3013 break 3014 } 3015 x := v_0.AuxInt 3016 if !(x < y && uint64(x) < uint64(y)) { 3017 break 3018 } 3019 v.reset(OpAMD64FlagLT_ULT) 3020 return true 3021 } 3022 // match: (CMPQconst (MOVQconst [x]) [y]) 3023 // cond: x<y && uint64(x)>uint64(y) 3024 // result: (FlagLT_UGT) 3025 for { 3026 y := v.AuxInt 3027 v_0 := v.Args[0] 3028 if v_0.Op != OpAMD64MOVQconst { 3029 break 3030 } 3031 x := v_0.AuxInt 3032 if !(x < y && uint64(x) > uint64(y)) { 3033 break 3034 } 3035 v.reset(OpAMD64FlagLT_UGT) 3036 return true 3037 } 3038 // match: (CMPQconst (MOVQconst [x]) [y]) 3039 // cond: x>y && uint64(x)<uint64(y) 3040 // result: (FlagGT_ULT) 3041 for { 3042 y := v.AuxInt 3043 v_0 := v.Args[0] 3044 if v_0.Op != OpAMD64MOVQconst { 3045 break 3046 } 3047 x := v_0.AuxInt 3048 if !(x > y && uint64(x) < uint64(y)) { 3049 break 3050 } 3051 v.reset(OpAMD64FlagGT_ULT) 3052 return true 3053 } 3054 // match: (CMPQconst (MOVQconst [x]) [y]) 3055 // cond: x>y && uint64(x)>uint64(y) 3056 // result: (FlagGT_UGT) 3057 for { 3058 y := v.AuxInt 3059 v_0 := v.Args[0] 3060 if v_0.Op != OpAMD64MOVQconst { 3061 break 3062 } 3063 x := v_0.AuxInt 3064 if !(x > y && uint64(x) > uint64(y)) { 3065 break 3066 } 3067 v.reset(OpAMD64FlagGT_UGT) 3068 return true 3069 } 3070 // match: (CMPQconst (MOVBQZX _) [c]) 3071 // cond: 0xFF < c 3072 // result: (FlagLT_ULT) 3073 for { 3074 c := v.AuxInt 3075 v_0 := v.Args[0] 3076 if v_0.Op != OpAMD64MOVBQZX { 3077 break 3078 } 3079 if !(0xFF < c) { 3080 break 3081 } 3082 v.reset(OpAMD64FlagLT_ULT) 3083 return true 3084 } 3085 // match: (CMPQconst (MOVWQZX _) [c]) 3086 // cond: 0xFFFF < c 3087 // result: (FlagLT_ULT) 3088 for { 3089 c := v.AuxInt 3090 v_0 := v.Args[0] 3091 if v_0.Op != OpAMD64MOVWQZX { 3092 break 3093 } 3094 if !(0xFFFF < c) { 3095 break 3096 } 3097 v.reset(OpAMD64FlagLT_ULT) 3098 return true 3099 } 3100 // match: (CMPQconst (MOVLQZX _) [c]) 3101 // cond: 0xFFFFFFFF < c 3102 // result: (FlagLT_ULT) 3103 for { 3104 c := v.AuxInt 3105 v_0 := v.Args[0] 3106 if v_0.Op != OpAMD64MOVLQZX { 3107 break 3108 } 3109 if !(0xFFFFFFFF < c) { 3110 break 3111 } 3112 v.reset(OpAMD64FlagLT_ULT) 3113 return true 3114 } 3115 return false 3116 } 3117 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 3118 // match: (CMPQconst (SHRQconst _ [c]) [n]) 3119 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 3120 // result: (FlagLT_ULT) 3121 for { 3122 n := v.AuxInt 3123 v_0 := v.Args[0] 3124 if v_0.Op != OpAMD64SHRQconst { 3125 break 3126 } 3127 c := v_0.AuxInt 3128 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 3129 break 3130 } 3131 v.reset(OpAMD64FlagLT_ULT) 3132 return true 3133 } 3134 // match: (CMPQconst (ANDQconst _ [m]) [n]) 3135 // cond: 0 <= m && m < n 3136 // result: (FlagLT_ULT) 3137 for { 3138 n := v.AuxInt 3139 v_0 := v.Args[0] 3140 if v_0.Op != OpAMD64ANDQconst { 3141 break 3142 } 3143 m := v_0.AuxInt 3144 if !(0 <= m && m < n) { 3145 break 3146 } 3147 v.reset(OpAMD64FlagLT_ULT) 3148 return true 3149 } 3150 // match: (CMPQconst (ANDLconst _ [m]) [n]) 3151 // cond: 0 <= m && m < n 3152 // result: (FlagLT_ULT) 3153 for { 3154 n := v.AuxInt 3155 v_0 := v.Args[0] 3156 if v_0.Op != OpAMD64ANDLconst { 3157 break 3158 } 3159 m := v_0.AuxInt 3160 if !(0 <= m && m < n) { 3161 break 3162 } 3163 v.reset(OpAMD64FlagLT_ULT) 3164 return true 3165 } 3166 // match: (CMPQconst (ANDQ x y) [0]) 3167 // cond: 3168 // result: (TESTQ x y) 3169 for { 3170 if v.AuxInt != 0 { 3171 break 3172 } 3173 v_0 := v.Args[0] 3174 if v_0.Op != OpAMD64ANDQ { 3175 break 3176 } 3177 _ = v_0.Args[1] 3178 x := v_0.Args[0] 3179 y := v_0.Args[1] 3180 v.reset(OpAMD64TESTQ) 3181 v.AddArg(x) 3182 v.AddArg(y) 3183 return true 3184 } 3185 // match: (CMPQconst (ANDQconst [c] x) [0]) 3186 // cond: 3187 // result: (TESTQconst [c] x) 3188 for { 3189 if v.AuxInt != 0 { 3190 break 3191 } 3192 v_0 := v.Args[0] 3193 if v_0.Op != OpAMD64ANDQconst { 3194 break 3195 } 3196 c := v_0.AuxInt 3197 x := v_0.Args[0] 3198 v.reset(OpAMD64TESTQconst) 3199 v.AuxInt = c 3200 v.AddArg(x) 3201 return true 3202 } 3203 // match: (CMPQconst x [0]) 3204 // cond: 3205 // result: (TESTQ x x) 3206 for { 3207 if v.AuxInt != 0 { 3208 break 3209 } 3210 x := v.Args[0] 3211 v.reset(OpAMD64TESTQ) 3212 v.AddArg(x) 3213 v.AddArg(x) 3214 return true 3215 } 3216 return false 3217 } 3218 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 3219 b := v.Block 3220 _ = b 3221 // match: (CMPW x (MOVLconst [c])) 3222 // cond: 3223 // result: (CMPWconst x [int64(int16(c))]) 3224 for { 3225 _ = v.Args[1] 3226 x := v.Args[0] 3227 v_1 := v.Args[1] 3228 if v_1.Op != OpAMD64MOVLconst { 3229 break 3230 } 3231 c := v_1.AuxInt 3232 v.reset(OpAMD64CMPWconst) 3233 v.AuxInt = int64(int16(c)) 3234 v.AddArg(x) 3235 return true 3236 } 3237 // match: (CMPW (MOVLconst [c]) x) 3238 // cond: 3239 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 3240 for { 3241 _ = v.Args[1] 3242 v_0 := v.Args[0] 3243 if v_0.Op != OpAMD64MOVLconst { 3244 break 3245 } 3246 c := v_0.AuxInt 3247 x := v.Args[1] 3248 v.reset(OpAMD64InvertFlags) 3249 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 3250 v0.AuxInt = int64(int16(c)) 3251 v0.AddArg(x) 3252 v.AddArg(v0) 3253 return true 3254 } 3255 return false 3256 } 3257 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 3258 // match: (CMPWconst (MOVLconst [x]) [y]) 3259 // cond: int16(x)==int16(y) 3260 // result: (FlagEQ) 3261 for { 3262 y := v.AuxInt 3263 v_0 := v.Args[0] 3264 if v_0.Op != OpAMD64MOVLconst { 3265 break 3266 } 3267 x := v_0.AuxInt 3268 if !(int16(x) == int16(y)) { 3269 break 3270 } 3271 v.reset(OpAMD64FlagEQ) 3272 return true 3273 } 3274 // match: (CMPWconst (MOVLconst [x]) [y]) 3275 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 3276 // result: (FlagLT_ULT) 3277 for { 3278 y := v.AuxInt 3279 v_0 := v.Args[0] 3280 if v_0.Op != OpAMD64MOVLconst { 3281 break 3282 } 3283 x := v_0.AuxInt 3284 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 3285 break 3286 } 3287 v.reset(OpAMD64FlagLT_ULT) 3288 return true 3289 } 3290 // match: (CMPWconst (MOVLconst [x]) [y]) 3291 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 3292 // result: (FlagLT_UGT) 3293 for { 3294 y := v.AuxInt 3295 v_0 := v.Args[0] 3296 if v_0.Op != OpAMD64MOVLconst { 3297 break 3298 } 3299 x := v_0.AuxInt 3300 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 3301 break 3302 } 3303 v.reset(OpAMD64FlagLT_UGT) 3304 return true 3305 } 3306 // match: (CMPWconst (MOVLconst [x]) [y]) 3307 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 3308 // result: (FlagGT_ULT) 3309 for { 3310 y := v.AuxInt 3311 v_0 := v.Args[0] 3312 if v_0.Op != OpAMD64MOVLconst { 3313 break 3314 } 3315 x := v_0.AuxInt 3316 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 3317 break 3318 } 3319 v.reset(OpAMD64FlagGT_ULT) 3320 return true 3321 } 3322 // match: (CMPWconst (MOVLconst [x]) [y]) 3323 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 3324 // result: (FlagGT_UGT) 3325 for { 3326 y := v.AuxInt 3327 v_0 := v.Args[0] 3328 if v_0.Op != OpAMD64MOVLconst { 3329 break 3330 } 3331 x := v_0.AuxInt 3332 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 3333 break 3334 } 3335 v.reset(OpAMD64FlagGT_UGT) 3336 return true 3337 } 3338 // match: (CMPWconst (ANDLconst _ [m]) [n]) 3339 // cond: 0 <= int16(m) && int16(m) < int16(n) 3340 // result: (FlagLT_ULT) 3341 for { 3342 n := v.AuxInt 3343 v_0 := v.Args[0] 3344 if v_0.Op != OpAMD64ANDLconst { 3345 break 3346 } 3347 m := v_0.AuxInt 3348 if !(0 <= int16(m) && int16(m) < int16(n)) { 3349 break 3350 } 3351 v.reset(OpAMD64FlagLT_ULT) 3352 return true 3353 } 3354 // match: (CMPWconst (ANDL x y) [0]) 3355 // cond: 3356 // result: (TESTW x y) 3357 for { 3358 if v.AuxInt != 0 { 3359 break 3360 } 3361 v_0 := v.Args[0] 3362 if v_0.Op != OpAMD64ANDL { 3363 break 3364 } 3365 _ = v_0.Args[1] 3366 x := v_0.Args[0] 3367 y := v_0.Args[1] 3368 v.reset(OpAMD64TESTW) 3369 v.AddArg(x) 3370 v.AddArg(y) 3371 return true 3372 } 3373 // match: (CMPWconst (ANDLconst [c] x) [0]) 3374 // cond: 3375 // result: (TESTWconst [int64(int16(c))] x) 3376 for { 3377 if v.AuxInt != 0 { 3378 break 3379 } 3380 v_0 := v.Args[0] 3381 if v_0.Op != OpAMD64ANDLconst { 3382 break 3383 } 3384 c := v_0.AuxInt 3385 x := v_0.Args[0] 3386 v.reset(OpAMD64TESTWconst) 3387 v.AuxInt = int64(int16(c)) 3388 v.AddArg(x) 3389 return true 3390 } 3391 // match: (CMPWconst x [0]) 3392 // cond: 3393 // result: (TESTW x x) 3394 for { 3395 if v.AuxInt != 0 { 3396 break 3397 } 3398 x := v.Args[0] 3399 v.reset(OpAMD64TESTW) 3400 v.AddArg(x) 3401 v.AddArg(x) 3402 return true 3403 } 3404 return false 3405 } 3406 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 3407 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3408 // cond: is32Bit(off1+off2) 3409 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 3410 for { 3411 off1 := v.AuxInt 3412 sym := v.Aux 3413 _ = v.Args[3] 3414 v_0 := v.Args[0] 3415 if v_0.Op != OpAMD64ADDQconst { 3416 break 3417 } 3418 off2 := v_0.AuxInt 3419 ptr := v_0.Args[0] 3420 old := v.Args[1] 3421 new_ := v.Args[2] 3422 mem := v.Args[3] 3423 if !(is32Bit(off1 + off2)) { 3424 break 3425 } 3426 v.reset(OpAMD64CMPXCHGLlock) 3427 v.AuxInt = off1 + off2 3428 v.Aux = sym 3429 v.AddArg(ptr) 3430 v.AddArg(old) 3431 v.AddArg(new_) 3432 v.AddArg(mem) 3433 return true 3434 } 3435 return false 3436 } 3437 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 3438 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3439 // cond: is32Bit(off1+off2) 3440 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 3441 for { 3442 off1 := v.AuxInt 3443 sym := v.Aux 3444 _ = v.Args[3] 3445 v_0 := v.Args[0] 3446 if v_0.Op != OpAMD64ADDQconst { 3447 break 3448 } 3449 off2 := v_0.AuxInt 3450 ptr := v_0.Args[0] 3451 old := v.Args[1] 3452 new_ := v.Args[2] 3453 mem := v.Args[3] 3454 if !(is32Bit(off1 + off2)) { 3455 break 3456 } 3457 v.reset(OpAMD64CMPXCHGQlock) 3458 v.AuxInt = off1 + off2 3459 v.Aux = sym 3460 v.AddArg(ptr) 3461 v.AddArg(old) 3462 v.AddArg(new_) 3463 v.AddArg(mem) 3464 return true 3465 } 3466 return false 3467 } 3468 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 3469 // match: (LEAL [c] {s} (ADDLconst [d] x)) 3470 // cond: is32Bit(c+d) 3471 // result: (LEAL [c+d] {s} x) 3472 for { 3473 c := v.AuxInt 3474 s := v.Aux 3475 v_0 := v.Args[0] 3476 if v_0.Op != OpAMD64ADDLconst { 3477 break 3478 } 3479 d := v_0.AuxInt 3480 x := v_0.Args[0] 3481 if !(is32Bit(c + d)) { 3482 break 3483 } 3484 v.reset(OpAMD64LEAL) 3485 v.AuxInt = c + d 3486 v.Aux = s 3487 v.AddArg(x) 3488 return true 3489 } 3490 return false 3491 } 3492 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 3493 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 3494 // cond: is32Bit(c+d) 3495 // result: (LEAQ [c+d] {s} x) 3496 for { 3497 c := v.AuxInt 3498 s := v.Aux 3499 v_0 := v.Args[0] 3500 if v_0.Op != OpAMD64ADDQconst { 3501 break 3502 } 3503 d := v_0.AuxInt 3504 x := v_0.Args[0] 3505 if !(is32Bit(c + d)) { 3506 break 3507 } 3508 v.reset(OpAMD64LEAQ) 3509 v.AuxInt = c + d 3510 v.Aux = s 3511 v.AddArg(x) 3512 return true 3513 } 3514 // match: (LEAQ [c] {s} (ADDQ x y)) 3515 // cond: x.Op != OpSB && y.Op != OpSB 3516 // result: (LEAQ1 [c] {s} x y) 3517 for { 3518 c := v.AuxInt 3519 s := v.Aux 3520 v_0 := v.Args[0] 3521 if v_0.Op != OpAMD64ADDQ { 3522 break 3523 } 3524 _ = v_0.Args[1] 3525 x := v_0.Args[0] 3526 y := v_0.Args[1] 3527 if !(x.Op != OpSB && y.Op != OpSB) { 3528 break 3529 } 3530 v.reset(OpAMD64LEAQ1) 3531 v.AuxInt = c 3532 v.Aux = s 3533 v.AddArg(x) 3534 v.AddArg(y) 3535 return true 3536 } 3537 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 3538 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3539 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 3540 for { 3541 off1 := v.AuxInt 3542 sym1 := v.Aux 3543 v_0 := v.Args[0] 3544 if v_0.Op != OpAMD64LEAQ { 3545 break 3546 } 3547 off2 := v_0.AuxInt 3548 sym2 := v_0.Aux 3549 x := v_0.Args[0] 3550 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3551 break 3552 } 3553 v.reset(OpAMD64LEAQ) 3554 v.AuxInt = off1 + off2 3555 v.Aux = mergeSym(sym1, sym2) 3556 v.AddArg(x) 3557 return true 3558 } 3559 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 3560 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3561 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3562 for { 3563 off1 := v.AuxInt 3564 sym1 := v.Aux 3565 v_0 := v.Args[0] 3566 if v_0.Op != OpAMD64LEAQ1 { 3567 break 3568 } 3569 off2 := v_0.AuxInt 3570 sym2 := v_0.Aux 3571 _ = v_0.Args[1] 3572 x := v_0.Args[0] 3573 y := v_0.Args[1] 3574 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3575 break 3576 } 3577 v.reset(OpAMD64LEAQ1) 3578 v.AuxInt = off1 + off2 3579 v.Aux = mergeSym(sym1, sym2) 3580 v.AddArg(x) 3581 v.AddArg(y) 3582 return true 3583 } 3584 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 3585 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3586 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3587 for { 3588 off1 := v.AuxInt 3589 sym1 := v.Aux 3590 v_0 := v.Args[0] 3591 if v_0.Op != OpAMD64LEAQ2 { 3592 break 3593 } 3594 off2 := v_0.AuxInt 3595 sym2 := v_0.Aux 3596 _ = v_0.Args[1] 3597 x := v_0.Args[0] 3598 y := v_0.Args[1] 3599 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3600 break 3601 } 3602 v.reset(OpAMD64LEAQ2) 3603 v.AuxInt = off1 + off2 3604 v.Aux = mergeSym(sym1, sym2) 3605 v.AddArg(x) 3606 v.AddArg(y) 3607 return true 3608 } 3609 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 3610 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3611 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3612 for { 3613 off1 := v.AuxInt 3614 sym1 := v.Aux 3615 v_0 := v.Args[0] 3616 if v_0.Op != OpAMD64LEAQ4 { 3617 break 3618 } 3619 off2 := v_0.AuxInt 3620 sym2 := v_0.Aux 3621 _ = v_0.Args[1] 3622 x := v_0.Args[0] 3623 y := v_0.Args[1] 3624 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3625 break 3626 } 3627 v.reset(OpAMD64LEAQ4) 3628 v.AuxInt = off1 + off2 3629 v.Aux = mergeSym(sym1, sym2) 3630 v.AddArg(x) 3631 v.AddArg(y) 3632 return true 3633 } 3634 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 3635 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3636 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3637 for { 3638 off1 := v.AuxInt 3639 sym1 := v.Aux 3640 v_0 := v.Args[0] 3641 if v_0.Op != OpAMD64LEAQ8 { 3642 break 3643 } 3644 off2 := v_0.AuxInt 3645 sym2 := v_0.Aux 3646 _ = v_0.Args[1] 3647 x := v_0.Args[0] 3648 y := v_0.Args[1] 3649 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3650 break 3651 } 3652 v.reset(OpAMD64LEAQ8) 3653 v.AuxInt = off1 + off2 3654 v.Aux = mergeSym(sym1, sym2) 3655 v.AddArg(x) 3656 v.AddArg(y) 3657 return true 3658 } 3659 return false 3660 } 3661 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 3662 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 3663 // cond: is32Bit(c+d) && x.Op != OpSB 3664 // result: (LEAQ1 [c+d] {s} x y) 3665 for { 3666 c := v.AuxInt 3667 s := v.Aux 3668 _ = v.Args[1] 3669 v_0 := v.Args[0] 3670 if v_0.Op != OpAMD64ADDQconst { 3671 break 3672 } 3673 d := v_0.AuxInt 3674 x := v_0.Args[0] 3675 y := v.Args[1] 3676 if !(is32Bit(c+d) && x.Op != OpSB) { 3677 break 3678 } 3679 v.reset(OpAMD64LEAQ1) 3680 v.AuxInt = c + d 3681 v.Aux = s 3682 v.AddArg(x) 3683 v.AddArg(y) 3684 return true 3685 } 3686 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 3687 // cond: is32Bit(c+d) && x.Op != OpSB 3688 // result: (LEAQ1 [c+d] {s} x y) 3689 for { 3690 c := v.AuxInt 3691 s := v.Aux 3692 _ = v.Args[1] 3693 y := v.Args[0] 3694 v_1 := v.Args[1] 3695 if v_1.Op != OpAMD64ADDQconst { 3696 break 3697 } 3698 d := v_1.AuxInt 3699 x := v_1.Args[0] 3700 if !(is32Bit(c+d) && x.Op != OpSB) { 3701 break 3702 } 3703 v.reset(OpAMD64LEAQ1) 3704 v.AuxInt = c + d 3705 v.Aux = s 3706 v.AddArg(x) 3707 v.AddArg(y) 3708 return true 3709 } 3710 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 3711 // cond: 3712 // result: (LEAQ2 [c] {s} x y) 3713 for { 3714 c := v.AuxInt 3715 s := v.Aux 3716 _ = v.Args[1] 3717 x := v.Args[0] 3718 v_1 := v.Args[1] 3719 if v_1.Op != OpAMD64SHLQconst { 3720 break 3721 } 3722 if v_1.AuxInt != 1 { 3723 break 3724 } 3725 y := v_1.Args[0] 3726 v.reset(OpAMD64LEAQ2) 3727 v.AuxInt = c 3728 v.Aux = s 3729 v.AddArg(x) 3730 v.AddArg(y) 3731 return true 3732 } 3733 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 3734 // cond: 3735 // result: (LEAQ2 [c] {s} x y) 3736 for { 3737 c := v.AuxInt 3738 s := v.Aux 3739 _ = v.Args[1] 3740 v_0 := v.Args[0] 3741 if v_0.Op != OpAMD64SHLQconst { 3742 break 3743 } 3744 if v_0.AuxInt != 1 { 3745 break 3746 } 3747 y := v_0.Args[0] 3748 x := v.Args[1] 3749 v.reset(OpAMD64LEAQ2) 3750 v.AuxInt = c 3751 v.Aux = s 3752 v.AddArg(x) 3753 v.AddArg(y) 3754 return true 3755 } 3756 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 3757 // cond: 3758 // result: (LEAQ4 [c] {s} x y) 3759 for { 3760 c := v.AuxInt 3761 s := v.Aux 3762 _ = v.Args[1] 3763 x := v.Args[0] 3764 v_1 := v.Args[1] 3765 if v_1.Op != OpAMD64SHLQconst { 3766 break 3767 } 3768 if v_1.AuxInt != 2 { 3769 break 3770 } 3771 y := v_1.Args[0] 3772 v.reset(OpAMD64LEAQ4) 3773 v.AuxInt = c 3774 v.Aux = s 3775 v.AddArg(x) 3776 v.AddArg(y) 3777 return true 3778 } 3779 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 3780 // cond: 3781 // result: (LEAQ4 [c] {s} x y) 3782 for { 3783 c := v.AuxInt 3784 s := v.Aux 3785 _ = v.Args[1] 3786 v_0 := v.Args[0] 3787 if v_0.Op != OpAMD64SHLQconst { 3788 break 3789 } 3790 if v_0.AuxInt != 2 { 3791 break 3792 } 3793 y := v_0.Args[0] 3794 x := v.Args[1] 3795 v.reset(OpAMD64LEAQ4) 3796 v.AuxInt = c 3797 v.Aux = s 3798 v.AddArg(x) 3799 v.AddArg(y) 3800 return true 3801 } 3802 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 3803 // cond: 3804 // result: (LEAQ8 [c] {s} x y) 3805 for { 3806 c := v.AuxInt 3807 s := v.Aux 3808 _ = v.Args[1] 3809 x := v.Args[0] 3810 v_1 := v.Args[1] 3811 if v_1.Op != OpAMD64SHLQconst { 3812 break 3813 } 3814 if v_1.AuxInt != 3 { 3815 break 3816 } 3817 y := v_1.Args[0] 3818 v.reset(OpAMD64LEAQ8) 3819 v.AuxInt = c 3820 v.Aux = s 3821 v.AddArg(x) 3822 v.AddArg(y) 3823 return true 3824 } 3825 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 3826 // cond: 3827 // result: (LEAQ8 [c] {s} x y) 3828 for { 3829 c := v.AuxInt 3830 s := v.Aux 3831 _ = v.Args[1] 3832 v_0 := v.Args[0] 3833 if v_0.Op != OpAMD64SHLQconst { 3834 break 3835 } 3836 if v_0.AuxInt != 3 { 3837 break 3838 } 3839 y := v_0.Args[0] 3840 x := v.Args[1] 3841 v.reset(OpAMD64LEAQ8) 3842 v.AuxInt = c 3843 v.Aux = s 3844 v.AddArg(x) 3845 v.AddArg(y) 3846 return true 3847 } 3848 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3849 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3850 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3851 for { 3852 off1 := v.AuxInt 3853 sym1 := v.Aux 3854 _ = v.Args[1] 3855 v_0 := v.Args[0] 3856 if v_0.Op != OpAMD64LEAQ { 3857 break 3858 } 3859 off2 := v_0.AuxInt 3860 sym2 := v_0.Aux 3861 x := v_0.Args[0] 3862 y := v.Args[1] 3863 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3864 break 3865 } 3866 v.reset(OpAMD64LEAQ1) 3867 v.AuxInt = off1 + off2 3868 v.Aux = mergeSym(sym1, sym2) 3869 v.AddArg(x) 3870 v.AddArg(y) 3871 return true 3872 } 3873 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 3874 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3875 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3876 for { 3877 off1 := v.AuxInt 3878 sym1 := v.Aux 3879 _ = v.Args[1] 3880 y := v.Args[0] 3881 v_1 := v.Args[1] 3882 if v_1.Op != OpAMD64LEAQ { 3883 break 3884 } 3885 off2 := v_1.AuxInt 3886 sym2 := v_1.Aux 3887 x := v_1.Args[0] 3888 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3889 break 3890 } 3891 v.reset(OpAMD64LEAQ1) 3892 v.AuxInt = off1 + off2 3893 v.Aux = mergeSym(sym1, sym2) 3894 v.AddArg(x) 3895 v.AddArg(y) 3896 return true 3897 } 3898 return false 3899 } 3900 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 3901 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 3902 // cond: is32Bit(c+d) && x.Op != OpSB 3903 // result: (LEAQ2 [c+d] {s} x y) 3904 for { 3905 c := v.AuxInt 3906 s := v.Aux 3907 _ = v.Args[1] 3908 v_0 := v.Args[0] 3909 if v_0.Op != OpAMD64ADDQconst { 3910 break 3911 } 3912 d := v_0.AuxInt 3913 x := v_0.Args[0] 3914 y := v.Args[1] 3915 if !(is32Bit(c+d) && x.Op != OpSB) { 3916 break 3917 } 3918 v.reset(OpAMD64LEAQ2) 3919 v.AuxInt = c + d 3920 v.Aux = s 3921 v.AddArg(x) 3922 v.AddArg(y) 3923 return true 3924 } 3925 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 3926 // cond: is32Bit(c+2*d) && y.Op != OpSB 3927 // result: (LEAQ2 [c+2*d] {s} x y) 3928 for { 3929 c := v.AuxInt 3930 s := v.Aux 3931 _ = v.Args[1] 3932 x := v.Args[0] 3933 v_1 := v.Args[1] 3934 if v_1.Op != OpAMD64ADDQconst { 3935 break 3936 } 3937 d := v_1.AuxInt 3938 y := v_1.Args[0] 3939 if !(is32Bit(c+2*d) && y.Op != OpSB) { 3940 break 3941 } 3942 v.reset(OpAMD64LEAQ2) 3943 v.AuxInt = c + 2*d 3944 v.Aux = s 3945 v.AddArg(x) 3946 v.AddArg(y) 3947 return true 3948 } 3949 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 3950 // cond: 3951 // result: (LEAQ4 [c] {s} x y) 3952 for { 3953 c := v.AuxInt 3954 s := v.Aux 3955 _ = v.Args[1] 3956 x := v.Args[0] 3957 v_1 := v.Args[1] 3958 if v_1.Op != OpAMD64SHLQconst { 3959 break 3960 } 3961 if v_1.AuxInt != 1 { 3962 break 3963 } 3964 y := v_1.Args[0] 3965 v.reset(OpAMD64LEAQ4) 3966 v.AuxInt = c 3967 v.Aux = s 3968 v.AddArg(x) 3969 v.AddArg(y) 3970 return true 3971 } 3972 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 3973 // cond: 3974 // result: (LEAQ8 [c] {s} x y) 3975 for { 3976 c := v.AuxInt 3977 s := v.Aux 3978 _ = v.Args[1] 3979 x := v.Args[0] 3980 v_1 := v.Args[1] 3981 if v_1.Op != OpAMD64SHLQconst { 3982 break 3983 } 3984 if v_1.AuxInt != 2 { 3985 break 3986 } 3987 y := v_1.Args[0] 3988 v.reset(OpAMD64LEAQ8) 3989 v.AuxInt = c 3990 v.Aux = s 3991 v.AddArg(x) 3992 v.AddArg(y) 3993 return true 3994 } 3995 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3996 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3997 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3998 for { 3999 off1 := v.AuxInt 4000 sym1 := v.Aux 4001 _ = v.Args[1] 4002 v_0 := v.Args[0] 4003 if v_0.Op != OpAMD64LEAQ { 4004 break 4005 } 4006 off2 := v_0.AuxInt 4007 sym2 := v_0.Aux 4008 x := v_0.Args[0] 4009 y := v.Args[1] 4010 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4011 break 4012 } 4013 v.reset(OpAMD64LEAQ2) 4014 v.AuxInt = off1 + off2 4015 v.Aux = mergeSym(sym1, sym2) 4016 v.AddArg(x) 4017 v.AddArg(y) 4018 return true 4019 } 4020 return false 4021 } 4022 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 4023 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 4024 // cond: is32Bit(c+d) && x.Op != OpSB 4025 // result: (LEAQ4 [c+d] {s} x y) 4026 for { 4027 c := v.AuxInt 4028 s := v.Aux 4029 _ = v.Args[1] 4030 v_0 := v.Args[0] 4031 if v_0.Op != OpAMD64ADDQconst { 4032 break 4033 } 4034 d := v_0.AuxInt 4035 x := v_0.Args[0] 4036 y := v.Args[1] 4037 if !(is32Bit(c+d) && x.Op != OpSB) { 4038 break 4039 } 4040 v.reset(OpAMD64LEAQ4) 4041 v.AuxInt = c + d 4042 v.Aux = s 4043 v.AddArg(x) 4044 v.AddArg(y) 4045 return true 4046 } 4047 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 4048 // cond: is32Bit(c+4*d) && y.Op != OpSB 4049 // result: (LEAQ4 [c+4*d] {s} x y) 4050 for { 4051 c := v.AuxInt 4052 s := v.Aux 4053 _ = v.Args[1] 4054 x := v.Args[0] 4055 v_1 := v.Args[1] 4056 if v_1.Op != OpAMD64ADDQconst { 4057 break 4058 } 4059 d := v_1.AuxInt 4060 y := v_1.Args[0] 4061 if !(is32Bit(c+4*d) && y.Op != OpSB) { 4062 break 4063 } 4064 v.reset(OpAMD64LEAQ4) 4065 v.AuxInt = c + 4*d 4066 v.Aux = s 4067 v.AddArg(x) 4068 v.AddArg(y) 4069 return true 4070 } 4071 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 4072 // cond: 4073 // result: (LEAQ8 [c] {s} x y) 4074 for { 4075 c := v.AuxInt 4076 s := v.Aux 4077 _ = v.Args[1] 4078 x := v.Args[0] 4079 v_1 := v.Args[1] 4080 if v_1.Op != OpAMD64SHLQconst { 4081 break 4082 } 4083 if v_1.AuxInt != 1 { 4084 break 4085 } 4086 y := v_1.Args[0] 4087 v.reset(OpAMD64LEAQ8) 4088 v.AuxInt = c 4089 v.Aux = s 4090 v.AddArg(x) 4091 v.AddArg(y) 4092 return true 4093 } 4094 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4095 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4096 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 4097 for { 4098 off1 := v.AuxInt 4099 sym1 := v.Aux 4100 _ = v.Args[1] 4101 v_0 := v.Args[0] 4102 if v_0.Op != OpAMD64LEAQ { 4103 break 4104 } 4105 off2 := v_0.AuxInt 4106 sym2 := v_0.Aux 4107 x := v_0.Args[0] 4108 y := v.Args[1] 4109 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4110 break 4111 } 4112 v.reset(OpAMD64LEAQ4) 4113 v.AuxInt = off1 + off2 4114 v.Aux = mergeSym(sym1, sym2) 4115 v.AddArg(x) 4116 v.AddArg(y) 4117 return true 4118 } 4119 return false 4120 } 4121 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 4122 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 4123 // cond: is32Bit(c+d) && x.Op != OpSB 4124 // result: (LEAQ8 [c+d] {s} x y) 4125 for { 4126 c := v.AuxInt 4127 s := v.Aux 4128 _ = v.Args[1] 4129 v_0 := v.Args[0] 4130 if v_0.Op != OpAMD64ADDQconst { 4131 break 4132 } 4133 d := v_0.AuxInt 4134 x := v_0.Args[0] 4135 y := v.Args[1] 4136 if !(is32Bit(c+d) && x.Op != OpSB) { 4137 break 4138 } 4139 v.reset(OpAMD64LEAQ8) 4140 v.AuxInt = c + d 4141 v.Aux = s 4142 v.AddArg(x) 4143 v.AddArg(y) 4144 return true 4145 } 4146 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 4147 // cond: is32Bit(c+8*d) && y.Op != OpSB 4148 // result: (LEAQ8 [c+8*d] {s} x y) 4149 for { 4150 c := v.AuxInt 4151 s := v.Aux 4152 _ = v.Args[1] 4153 x := v.Args[0] 4154 v_1 := v.Args[1] 4155 if v_1.Op != OpAMD64ADDQconst { 4156 break 4157 } 4158 d := v_1.AuxInt 4159 y := v_1.Args[0] 4160 if !(is32Bit(c+8*d) && y.Op != OpSB) { 4161 break 4162 } 4163 v.reset(OpAMD64LEAQ8) 4164 v.AuxInt = c + 8*d 4165 v.Aux = s 4166 v.AddArg(x) 4167 v.AddArg(y) 4168 return true 4169 } 4170 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4171 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4172 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4173 for { 4174 off1 := v.AuxInt 4175 sym1 := v.Aux 4176 _ = v.Args[1] 4177 v_0 := v.Args[0] 4178 if v_0.Op != OpAMD64LEAQ { 4179 break 4180 } 4181 off2 := v_0.AuxInt 4182 sym2 := v_0.Aux 4183 x := v_0.Args[0] 4184 y := v.Args[1] 4185 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4186 break 4187 } 4188 v.reset(OpAMD64LEAQ8) 4189 v.AuxInt = off1 + off2 4190 v.Aux = mergeSym(sym1, sym2) 4191 v.AddArg(x) 4192 v.AddArg(y) 4193 return true 4194 } 4195 return false 4196 } 4197 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 4198 b := v.Block 4199 _ = b 4200 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 4201 // cond: x.Uses == 1 && clobber(x) 4202 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4203 for { 4204 x := v.Args[0] 4205 if x.Op != OpAMD64MOVBload { 4206 break 4207 } 4208 off := x.AuxInt 4209 sym := x.Aux 4210 _ = x.Args[1] 4211 ptr := x.Args[0] 4212 mem := x.Args[1] 4213 if !(x.Uses == 1 && clobber(x)) { 4214 break 4215 } 4216 b = x.Block 4217 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4218 v.reset(OpCopy) 4219 v.AddArg(v0) 4220 v0.AuxInt = off 4221 v0.Aux = sym 4222 v0.AddArg(ptr) 4223 v0.AddArg(mem) 4224 return true 4225 } 4226 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 4227 // cond: x.Uses == 1 && clobber(x) 4228 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4229 for { 4230 x := v.Args[0] 4231 if x.Op != OpAMD64MOVWload { 4232 break 4233 } 4234 off := x.AuxInt 4235 sym := x.Aux 4236 _ = x.Args[1] 4237 ptr := x.Args[0] 4238 mem := x.Args[1] 4239 if !(x.Uses == 1 && clobber(x)) { 4240 break 4241 } 4242 b = x.Block 4243 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4244 v.reset(OpCopy) 4245 v.AddArg(v0) 4246 v0.AuxInt = off 4247 v0.Aux = sym 4248 v0.AddArg(ptr) 4249 v0.AddArg(mem) 4250 return true 4251 } 4252 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 4253 // cond: x.Uses == 1 && clobber(x) 4254 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4255 for { 4256 x := v.Args[0] 4257 if x.Op != OpAMD64MOVLload { 4258 break 4259 } 4260 off := x.AuxInt 4261 sym := x.Aux 4262 _ = x.Args[1] 4263 ptr := x.Args[0] 4264 mem := x.Args[1] 4265 if !(x.Uses == 1 && clobber(x)) { 4266 break 4267 } 4268 b = x.Block 4269 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4270 v.reset(OpCopy) 4271 v.AddArg(v0) 4272 v0.AuxInt = off 4273 v0.Aux = sym 4274 v0.AddArg(ptr) 4275 v0.AddArg(mem) 4276 return true 4277 } 4278 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 4279 // cond: x.Uses == 1 && clobber(x) 4280 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4281 for { 4282 x := v.Args[0] 4283 if x.Op != OpAMD64MOVQload { 4284 break 4285 } 4286 off := x.AuxInt 4287 sym := x.Aux 4288 _ = x.Args[1] 4289 ptr := x.Args[0] 4290 mem := x.Args[1] 4291 if !(x.Uses == 1 && clobber(x)) { 4292 break 4293 } 4294 b = x.Block 4295 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4296 v.reset(OpCopy) 4297 v.AddArg(v0) 4298 v0.AuxInt = off 4299 v0.Aux = sym 4300 v0.AddArg(ptr) 4301 v0.AddArg(mem) 4302 return true 4303 } 4304 // match: (MOVBQSX (ANDLconst [c] x)) 4305 // cond: c & 0x80 == 0 4306 // result: (ANDLconst [c & 0x7f] x) 4307 for { 4308 v_0 := v.Args[0] 4309 if v_0.Op != OpAMD64ANDLconst { 4310 break 4311 } 4312 c := v_0.AuxInt 4313 x := v_0.Args[0] 4314 if !(c&0x80 == 0) { 4315 break 4316 } 4317 v.reset(OpAMD64ANDLconst) 4318 v.AuxInt = c & 0x7f 4319 v.AddArg(x) 4320 return true 4321 } 4322 // match: (MOVBQSX (MOVBQSX x)) 4323 // cond: 4324 // result: (MOVBQSX x) 4325 for { 4326 v_0 := v.Args[0] 4327 if v_0.Op != OpAMD64MOVBQSX { 4328 break 4329 } 4330 x := v_0.Args[0] 4331 v.reset(OpAMD64MOVBQSX) 4332 v.AddArg(x) 4333 return true 4334 } 4335 return false 4336 } 4337 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 4338 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4339 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4340 // result: (MOVBQSX x) 4341 for { 4342 off := v.AuxInt 4343 sym := v.Aux 4344 _ = v.Args[1] 4345 ptr := v.Args[0] 4346 v_1 := v.Args[1] 4347 if v_1.Op != OpAMD64MOVBstore { 4348 break 4349 } 4350 off2 := v_1.AuxInt 4351 sym2 := v_1.Aux 4352 _ = v_1.Args[2] 4353 ptr2 := v_1.Args[0] 4354 x := v_1.Args[1] 4355 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4356 break 4357 } 4358 v.reset(OpAMD64MOVBQSX) 4359 v.AddArg(x) 4360 return true 4361 } 4362 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4363 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4364 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4365 for { 4366 off1 := v.AuxInt 4367 sym1 := v.Aux 4368 _ = v.Args[1] 4369 v_0 := v.Args[0] 4370 if v_0.Op != OpAMD64LEAQ { 4371 break 4372 } 4373 off2 := v_0.AuxInt 4374 sym2 := v_0.Aux 4375 base := v_0.Args[0] 4376 mem := v.Args[1] 4377 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4378 break 4379 } 4380 v.reset(OpAMD64MOVBQSXload) 4381 v.AuxInt = off1 + off2 4382 v.Aux = mergeSym(sym1, sym2) 4383 v.AddArg(base) 4384 v.AddArg(mem) 4385 return true 4386 } 4387 return false 4388 } 4389 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 4390 b := v.Block 4391 _ = b 4392 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 4393 // cond: x.Uses == 1 && clobber(x) 4394 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4395 for { 4396 x := v.Args[0] 4397 if x.Op != OpAMD64MOVBload { 4398 break 4399 } 4400 off := x.AuxInt 4401 sym := x.Aux 4402 _ = x.Args[1] 4403 ptr := x.Args[0] 4404 mem := x.Args[1] 4405 if !(x.Uses == 1 && clobber(x)) { 4406 break 4407 } 4408 b = x.Block 4409 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4410 v.reset(OpCopy) 4411 v.AddArg(v0) 4412 v0.AuxInt = off 4413 v0.Aux = sym 4414 v0.AddArg(ptr) 4415 v0.AddArg(mem) 4416 return true 4417 } 4418 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 4419 // cond: x.Uses == 1 && clobber(x) 4420 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4421 for { 4422 x := v.Args[0] 4423 if x.Op != OpAMD64MOVWload { 4424 break 4425 } 4426 off := x.AuxInt 4427 sym := x.Aux 4428 _ = x.Args[1] 4429 ptr := x.Args[0] 4430 mem := x.Args[1] 4431 if !(x.Uses == 1 && clobber(x)) { 4432 break 4433 } 4434 b = x.Block 4435 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4436 v.reset(OpCopy) 4437 v.AddArg(v0) 4438 v0.AuxInt = off 4439 v0.Aux = sym 4440 v0.AddArg(ptr) 4441 v0.AddArg(mem) 4442 return true 4443 } 4444 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 4445 // cond: x.Uses == 1 && clobber(x) 4446 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4447 for { 4448 x := v.Args[0] 4449 if x.Op != OpAMD64MOVLload { 4450 break 4451 } 4452 off := x.AuxInt 4453 sym := x.Aux 4454 _ = x.Args[1] 4455 ptr := x.Args[0] 4456 mem := x.Args[1] 4457 if !(x.Uses == 1 && clobber(x)) { 4458 break 4459 } 4460 b = x.Block 4461 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4462 v.reset(OpCopy) 4463 v.AddArg(v0) 4464 v0.AuxInt = off 4465 v0.Aux = sym 4466 v0.AddArg(ptr) 4467 v0.AddArg(mem) 4468 return true 4469 } 4470 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 4471 // cond: x.Uses == 1 && clobber(x) 4472 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4473 for { 4474 x := v.Args[0] 4475 if x.Op != OpAMD64MOVQload { 4476 break 4477 } 4478 off := x.AuxInt 4479 sym := x.Aux 4480 _ = x.Args[1] 4481 ptr := x.Args[0] 4482 mem := x.Args[1] 4483 if !(x.Uses == 1 && clobber(x)) { 4484 break 4485 } 4486 b = x.Block 4487 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4488 v.reset(OpCopy) 4489 v.AddArg(v0) 4490 v0.AuxInt = off 4491 v0.Aux = sym 4492 v0.AddArg(ptr) 4493 v0.AddArg(mem) 4494 return true 4495 } 4496 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 4497 // cond: x.Uses == 1 && clobber(x) 4498 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 4499 for { 4500 x := v.Args[0] 4501 if x.Op != OpAMD64MOVBloadidx1 { 4502 break 4503 } 4504 off := x.AuxInt 4505 sym := x.Aux 4506 _ = x.Args[2] 4507 ptr := x.Args[0] 4508 idx := x.Args[1] 4509 mem := x.Args[2] 4510 if !(x.Uses == 1 && clobber(x)) { 4511 break 4512 } 4513 b = x.Block 4514 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 4515 v.reset(OpCopy) 4516 v.AddArg(v0) 4517 v0.AuxInt = off 4518 v0.Aux = sym 4519 v0.AddArg(ptr) 4520 v0.AddArg(idx) 4521 v0.AddArg(mem) 4522 return true 4523 } 4524 // match: (MOVBQZX (ANDLconst [c] x)) 4525 // cond: 4526 // result: (ANDLconst [c & 0xff] x) 4527 for { 4528 v_0 := v.Args[0] 4529 if v_0.Op != OpAMD64ANDLconst { 4530 break 4531 } 4532 c := v_0.AuxInt 4533 x := v_0.Args[0] 4534 v.reset(OpAMD64ANDLconst) 4535 v.AuxInt = c & 0xff 4536 v.AddArg(x) 4537 return true 4538 } 4539 // match: (MOVBQZX (MOVBQZX x)) 4540 // cond: 4541 // result: (MOVBQZX x) 4542 for { 4543 v_0 := v.Args[0] 4544 if v_0.Op != OpAMD64MOVBQZX { 4545 break 4546 } 4547 x := v_0.Args[0] 4548 v.reset(OpAMD64MOVBQZX) 4549 v.AddArg(x) 4550 return true 4551 } 4552 return false 4553 } 4554 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 4555 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4556 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4557 // result: (MOVBQZX x) 4558 for { 4559 off := v.AuxInt 4560 sym := v.Aux 4561 _ = v.Args[1] 4562 ptr := v.Args[0] 4563 v_1 := v.Args[1] 4564 if v_1.Op != OpAMD64MOVBstore { 4565 break 4566 } 4567 off2 := v_1.AuxInt 4568 sym2 := v_1.Aux 4569 _ = v_1.Args[2] 4570 ptr2 := v_1.Args[0] 4571 x := v_1.Args[1] 4572 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4573 break 4574 } 4575 v.reset(OpAMD64MOVBQZX) 4576 v.AddArg(x) 4577 return true 4578 } 4579 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 4580 // cond: is32Bit(off1+off2) 4581 // result: (MOVBload [off1+off2] {sym} ptr mem) 4582 for { 4583 off1 := v.AuxInt 4584 sym := v.Aux 4585 _ = v.Args[1] 4586 v_0 := v.Args[0] 4587 if v_0.Op != OpAMD64ADDQconst { 4588 break 4589 } 4590 off2 := v_0.AuxInt 4591 ptr := v_0.Args[0] 4592 mem := v.Args[1] 4593 if !(is32Bit(off1 + off2)) { 4594 break 4595 } 4596 v.reset(OpAMD64MOVBload) 4597 v.AuxInt = off1 + off2 4598 v.Aux = sym 4599 v.AddArg(ptr) 4600 v.AddArg(mem) 4601 return true 4602 } 4603 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4604 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4605 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4606 for { 4607 off1 := v.AuxInt 4608 sym1 := v.Aux 4609 _ = v.Args[1] 4610 v_0 := v.Args[0] 4611 if v_0.Op != OpAMD64LEAQ { 4612 break 4613 } 4614 off2 := v_0.AuxInt 4615 sym2 := v_0.Aux 4616 base := v_0.Args[0] 4617 mem := v.Args[1] 4618 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4619 break 4620 } 4621 v.reset(OpAMD64MOVBload) 4622 v.AuxInt = off1 + off2 4623 v.Aux = mergeSym(sym1, sym2) 4624 v.AddArg(base) 4625 v.AddArg(mem) 4626 return true 4627 } 4628 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 4629 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4630 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4631 for { 4632 off1 := v.AuxInt 4633 sym1 := v.Aux 4634 _ = v.Args[1] 4635 v_0 := v.Args[0] 4636 if v_0.Op != OpAMD64LEAQ1 { 4637 break 4638 } 4639 off2 := v_0.AuxInt 4640 sym2 := v_0.Aux 4641 _ = v_0.Args[1] 4642 ptr := v_0.Args[0] 4643 idx := v_0.Args[1] 4644 mem := v.Args[1] 4645 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4646 break 4647 } 4648 v.reset(OpAMD64MOVBloadidx1) 4649 v.AuxInt = off1 + off2 4650 v.Aux = mergeSym(sym1, sym2) 4651 v.AddArg(ptr) 4652 v.AddArg(idx) 4653 v.AddArg(mem) 4654 return true 4655 } 4656 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 4657 // cond: ptr.Op != OpSB 4658 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 4659 for { 4660 off := v.AuxInt 4661 sym := v.Aux 4662 _ = v.Args[1] 4663 v_0 := v.Args[0] 4664 if v_0.Op != OpAMD64ADDQ { 4665 break 4666 } 4667 _ = v_0.Args[1] 4668 ptr := v_0.Args[0] 4669 idx := v_0.Args[1] 4670 mem := v.Args[1] 4671 if !(ptr.Op != OpSB) { 4672 break 4673 } 4674 v.reset(OpAMD64MOVBloadidx1) 4675 v.AuxInt = off 4676 v.Aux = sym 4677 v.AddArg(ptr) 4678 v.AddArg(idx) 4679 v.AddArg(mem) 4680 return true 4681 } 4682 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 4683 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 4684 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4685 for { 4686 off1 := v.AuxInt 4687 sym1 := v.Aux 4688 _ = v.Args[1] 4689 v_0 := v.Args[0] 4690 if v_0.Op != OpAMD64LEAL { 4691 break 4692 } 4693 off2 := v_0.AuxInt 4694 sym2 := v_0.Aux 4695 base := v_0.Args[0] 4696 mem := v.Args[1] 4697 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 4698 break 4699 } 4700 v.reset(OpAMD64MOVBload) 4701 v.AuxInt = off1 + off2 4702 v.Aux = mergeSym(sym1, sym2) 4703 v.AddArg(base) 4704 v.AddArg(mem) 4705 return true 4706 } 4707 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 4708 // cond: is32Bit(off1+off2) 4709 // result: (MOVBload [off1+off2] {sym} ptr mem) 4710 for { 4711 off1 := v.AuxInt 4712 sym := v.Aux 4713 _ = v.Args[1] 4714 v_0 := v.Args[0] 4715 if v_0.Op != OpAMD64ADDLconst { 4716 break 4717 } 4718 off2 := v_0.AuxInt 4719 ptr := v_0.Args[0] 4720 mem := v.Args[1] 4721 if !(is32Bit(off1 + off2)) { 4722 break 4723 } 4724 v.reset(OpAMD64MOVBload) 4725 v.AuxInt = off1 + off2 4726 v.Aux = sym 4727 v.AddArg(ptr) 4728 v.AddArg(mem) 4729 return true 4730 } 4731 return false 4732 } 4733 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 4734 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 4735 // cond: is32Bit(c+d) 4736 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4737 for { 4738 c := v.AuxInt 4739 sym := v.Aux 4740 _ = v.Args[2] 4741 v_0 := v.Args[0] 4742 if v_0.Op != OpAMD64ADDQconst { 4743 break 4744 } 4745 d := v_0.AuxInt 4746 ptr := v_0.Args[0] 4747 idx := v.Args[1] 4748 mem := v.Args[2] 4749 if !(is32Bit(c + d)) { 4750 break 4751 } 4752 v.reset(OpAMD64MOVBloadidx1) 4753 v.AuxInt = c + d 4754 v.Aux = sym 4755 v.AddArg(ptr) 4756 v.AddArg(idx) 4757 v.AddArg(mem) 4758 return true 4759 } 4760 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 4761 // cond: is32Bit(c+d) 4762 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4763 for { 4764 c := v.AuxInt 4765 sym := v.Aux 4766 _ = v.Args[2] 4767 idx := v.Args[0] 4768 v_1 := v.Args[1] 4769 if v_1.Op != OpAMD64ADDQconst { 4770 break 4771 } 4772 d := v_1.AuxInt 4773 ptr := v_1.Args[0] 4774 mem := v.Args[2] 4775 if !(is32Bit(c + d)) { 4776 break 4777 } 4778 v.reset(OpAMD64MOVBloadidx1) 4779 v.AuxInt = c + d 4780 v.Aux = sym 4781 v.AddArg(ptr) 4782 v.AddArg(idx) 4783 v.AddArg(mem) 4784 return true 4785 } 4786 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 4787 // cond: is32Bit(c+d) 4788 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4789 for { 4790 c := v.AuxInt 4791 sym := v.Aux 4792 _ = v.Args[2] 4793 ptr := v.Args[0] 4794 v_1 := v.Args[1] 4795 if v_1.Op != OpAMD64ADDQconst { 4796 break 4797 } 4798 d := v_1.AuxInt 4799 idx := v_1.Args[0] 4800 mem := v.Args[2] 4801 if !(is32Bit(c + d)) { 4802 break 4803 } 4804 v.reset(OpAMD64MOVBloadidx1) 4805 v.AuxInt = c + d 4806 v.Aux = sym 4807 v.AddArg(ptr) 4808 v.AddArg(idx) 4809 v.AddArg(mem) 4810 return true 4811 } 4812 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 4813 // cond: is32Bit(c+d) 4814 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4815 for { 4816 c := v.AuxInt 4817 sym := v.Aux 4818 _ = v.Args[2] 4819 v_0 := v.Args[0] 4820 if v_0.Op != OpAMD64ADDQconst { 4821 break 4822 } 4823 d := v_0.AuxInt 4824 idx := v_0.Args[0] 4825 ptr := v.Args[1] 4826 mem := v.Args[2] 4827 if !(is32Bit(c + d)) { 4828 break 4829 } 4830 v.reset(OpAMD64MOVBloadidx1) 4831 v.AuxInt = c + d 4832 v.Aux = sym 4833 v.AddArg(ptr) 4834 v.AddArg(idx) 4835 v.AddArg(mem) 4836 return true 4837 } 4838 return false 4839 } 4840 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 4841 b := v.Block 4842 _ = b 4843 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 4844 // cond: 4845 // result: (MOVBstore [off] {sym} ptr x mem) 4846 for { 4847 off := v.AuxInt 4848 sym := v.Aux 4849 _ = v.Args[2] 4850 ptr := v.Args[0] 4851 v_1 := v.Args[1] 4852 if v_1.Op != OpAMD64MOVBQSX { 4853 break 4854 } 4855 x := v_1.Args[0] 4856 mem := v.Args[2] 4857 v.reset(OpAMD64MOVBstore) 4858 v.AuxInt = off 4859 v.Aux = sym 4860 v.AddArg(ptr) 4861 v.AddArg(x) 4862 v.AddArg(mem) 4863 return true 4864 } 4865 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 4866 // cond: 4867 // result: (MOVBstore [off] {sym} ptr x mem) 4868 for { 4869 off := v.AuxInt 4870 sym := v.Aux 4871 _ = v.Args[2] 4872 ptr := v.Args[0] 4873 v_1 := v.Args[1] 4874 if v_1.Op != OpAMD64MOVBQZX { 4875 break 4876 } 4877 x := v_1.Args[0] 4878 mem := v.Args[2] 4879 v.reset(OpAMD64MOVBstore) 4880 v.AuxInt = off 4881 v.Aux = sym 4882 v.AddArg(ptr) 4883 v.AddArg(x) 4884 v.AddArg(mem) 4885 return true 4886 } 4887 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 4888 // cond: is32Bit(off1+off2) 4889 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 4890 for { 4891 off1 := v.AuxInt 4892 sym := v.Aux 4893 _ = v.Args[2] 4894 v_0 := v.Args[0] 4895 if v_0.Op != OpAMD64ADDQconst { 4896 break 4897 } 4898 off2 := v_0.AuxInt 4899 ptr := v_0.Args[0] 4900 val := v.Args[1] 4901 mem := v.Args[2] 4902 if !(is32Bit(off1 + off2)) { 4903 break 4904 } 4905 v.reset(OpAMD64MOVBstore) 4906 v.AuxInt = off1 + off2 4907 v.Aux = sym 4908 v.AddArg(ptr) 4909 v.AddArg(val) 4910 v.AddArg(mem) 4911 return true 4912 } 4913 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 4914 // cond: validOff(off) 4915 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 4916 for { 4917 off := v.AuxInt 4918 sym := v.Aux 4919 _ = v.Args[2] 4920 ptr := v.Args[0] 4921 v_1 := v.Args[1] 4922 if v_1.Op != OpAMD64MOVLconst { 4923 break 4924 } 4925 c := v_1.AuxInt 4926 mem := v.Args[2] 4927 if !(validOff(off)) { 4928 break 4929 } 4930 v.reset(OpAMD64MOVBstoreconst) 4931 v.AuxInt = makeValAndOff(int64(int8(c)), off) 4932 v.Aux = sym 4933 v.AddArg(ptr) 4934 v.AddArg(mem) 4935 return true 4936 } 4937 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4938 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4939 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4940 for { 4941 off1 := v.AuxInt 4942 sym1 := v.Aux 4943 _ = v.Args[2] 4944 v_0 := v.Args[0] 4945 if v_0.Op != OpAMD64LEAQ { 4946 break 4947 } 4948 off2 := v_0.AuxInt 4949 sym2 := v_0.Aux 4950 base := v_0.Args[0] 4951 val := v.Args[1] 4952 mem := v.Args[2] 4953 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4954 break 4955 } 4956 v.reset(OpAMD64MOVBstore) 4957 v.AuxInt = off1 + off2 4958 v.Aux = mergeSym(sym1, sym2) 4959 v.AddArg(base) 4960 v.AddArg(val) 4961 v.AddArg(mem) 4962 return true 4963 } 4964 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 4965 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4966 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 4967 for { 4968 off1 := v.AuxInt 4969 sym1 := v.Aux 4970 _ = v.Args[2] 4971 v_0 := v.Args[0] 4972 if v_0.Op != OpAMD64LEAQ1 { 4973 break 4974 } 4975 off2 := v_0.AuxInt 4976 sym2 := v_0.Aux 4977 _ = v_0.Args[1] 4978 ptr := v_0.Args[0] 4979 idx := v_0.Args[1] 4980 val := v.Args[1] 4981 mem := v.Args[2] 4982 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4983 break 4984 } 4985 v.reset(OpAMD64MOVBstoreidx1) 4986 v.AuxInt = off1 + off2 4987 v.Aux = mergeSym(sym1, sym2) 4988 v.AddArg(ptr) 4989 v.AddArg(idx) 4990 v.AddArg(val) 4991 v.AddArg(mem) 4992 return true 4993 } 4994 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 4995 // cond: ptr.Op != OpSB 4996 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 4997 for { 4998 off := v.AuxInt 4999 sym := v.Aux 5000 _ = v.Args[2] 5001 v_0 := v.Args[0] 5002 if v_0.Op != OpAMD64ADDQ { 5003 break 5004 } 5005 _ = v_0.Args[1] 5006 ptr := v_0.Args[0] 5007 idx := v_0.Args[1] 5008 val := v.Args[1] 5009 mem := v.Args[2] 5010 if !(ptr.Op != OpSB) { 5011 break 5012 } 5013 v.reset(OpAMD64MOVBstoreidx1) 5014 v.AuxInt = off 5015 v.Aux = sym 5016 v.AddArg(ptr) 5017 v.AddArg(idx) 5018 v.AddArg(val) 5019 v.AddArg(mem) 5020 return true 5021 } 5022 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 5023 // cond: x0.Uses == 1 && clobber(x0) 5024 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 5025 for { 5026 i := v.AuxInt 5027 s := v.Aux 5028 _ = v.Args[2] 5029 p := v.Args[0] 5030 w := v.Args[1] 5031 x0 := v.Args[2] 5032 if x0.Op != OpAMD64MOVBstore { 5033 break 5034 } 5035 if x0.AuxInt != i-1 { 5036 break 5037 } 5038 if x0.Aux != s { 5039 break 5040 } 5041 _ = x0.Args[2] 5042 if p != x0.Args[0] { 5043 break 5044 } 5045 x0_1 := x0.Args[1] 5046 if x0_1.Op != OpAMD64SHRWconst { 5047 break 5048 } 5049 if x0_1.AuxInt != 8 { 5050 break 5051 } 5052 if w != x0_1.Args[0] { 5053 break 5054 } 5055 mem := x0.Args[2] 5056 if !(x0.Uses == 1 && clobber(x0)) { 5057 break 5058 } 5059 v.reset(OpAMD64MOVWstore) 5060 v.AuxInt = i - 1 5061 v.Aux = s 5062 v.AddArg(p) 5063 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5064 v0.AuxInt = 8 5065 v0.AddArg(w) 5066 v.AddArg(v0) 5067 v.AddArg(mem) 5068 return true 5069 } 5070 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 5071 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5072 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 5073 for { 5074 i := v.AuxInt 5075 s := v.Aux 5076 _ = v.Args[2] 5077 p := v.Args[0] 5078 w := v.Args[1] 5079 x2 := v.Args[2] 5080 if x2.Op != OpAMD64MOVBstore { 5081 break 5082 } 5083 if x2.AuxInt != i-1 { 5084 break 5085 } 5086 if x2.Aux != s { 5087 break 5088 } 5089 _ = x2.Args[2] 5090 if p != x2.Args[0] { 5091 break 5092 } 5093 x2_1 := x2.Args[1] 5094 if x2_1.Op != OpAMD64SHRLconst { 5095 break 5096 } 5097 if x2_1.AuxInt != 8 { 5098 break 5099 } 5100 if w != x2_1.Args[0] { 5101 break 5102 } 5103 x1 := x2.Args[2] 5104 if x1.Op != OpAMD64MOVBstore { 5105 break 5106 } 5107 if x1.AuxInt != i-2 { 5108 break 5109 } 5110 if x1.Aux != s { 5111 break 5112 } 5113 _ = x1.Args[2] 5114 if p != x1.Args[0] { 5115 break 5116 } 5117 x1_1 := x1.Args[1] 5118 if x1_1.Op != OpAMD64SHRLconst { 5119 break 5120 } 5121 if x1_1.AuxInt != 16 { 5122 break 5123 } 5124 if w != x1_1.Args[0] { 5125 break 5126 } 5127 x0 := x1.Args[2] 5128 if x0.Op != OpAMD64MOVBstore { 5129 break 5130 } 5131 if x0.AuxInt != i-3 { 5132 break 5133 } 5134 if x0.Aux != s { 5135 break 5136 } 5137 _ = x0.Args[2] 5138 if p != x0.Args[0] { 5139 break 5140 } 5141 x0_1 := x0.Args[1] 5142 if x0_1.Op != OpAMD64SHRLconst { 5143 break 5144 } 5145 if x0_1.AuxInt != 24 { 5146 break 5147 } 5148 if w != x0_1.Args[0] { 5149 break 5150 } 5151 mem := x0.Args[2] 5152 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5153 break 5154 } 5155 v.reset(OpAMD64MOVLstore) 5156 v.AuxInt = i - 3 5157 v.Aux = s 5158 v.AddArg(p) 5159 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5160 v0.AddArg(w) 5161 v.AddArg(v0) 5162 v.AddArg(mem) 5163 return true 5164 } 5165 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 5166 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5167 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 5168 for { 5169 i := v.AuxInt 5170 s := v.Aux 5171 _ = v.Args[2] 5172 p := v.Args[0] 5173 w := v.Args[1] 5174 x6 := v.Args[2] 5175 if x6.Op != OpAMD64MOVBstore { 5176 break 5177 } 5178 if x6.AuxInt != i-1 { 5179 break 5180 } 5181 if x6.Aux != s { 5182 break 5183 } 5184 _ = x6.Args[2] 5185 if p != x6.Args[0] { 5186 break 5187 } 5188 x6_1 := x6.Args[1] 5189 if x6_1.Op != OpAMD64SHRQconst { 5190 break 5191 } 5192 if x6_1.AuxInt != 8 { 5193 break 5194 } 5195 if w != x6_1.Args[0] { 5196 break 5197 } 5198 x5 := x6.Args[2] 5199 if x5.Op != OpAMD64MOVBstore { 5200 break 5201 } 5202 if x5.AuxInt != i-2 { 5203 break 5204 } 5205 if x5.Aux != s { 5206 break 5207 } 5208 _ = x5.Args[2] 5209 if p != x5.Args[0] { 5210 break 5211 } 5212 x5_1 := x5.Args[1] 5213 if x5_1.Op != OpAMD64SHRQconst { 5214 break 5215 } 5216 if x5_1.AuxInt != 16 { 5217 break 5218 } 5219 if w != x5_1.Args[0] { 5220 break 5221 } 5222 x4 := x5.Args[2] 5223 if x4.Op != OpAMD64MOVBstore { 5224 break 5225 } 5226 if x4.AuxInt != i-3 { 5227 break 5228 } 5229 if x4.Aux != s { 5230 break 5231 } 5232 _ = x4.Args[2] 5233 if p != x4.Args[0] { 5234 break 5235 } 5236 x4_1 := x4.Args[1] 5237 if x4_1.Op != OpAMD64SHRQconst { 5238 break 5239 } 5240 if x4_1.AuxInt != 24 { 5241 break 5242 } 5243 if w != x4_1.Args[0] { 5244 break 5245 } 5246 x3 := x4.Args[2] 5247 if x3.Op != OpAMD64MOVBstore { 5248 break 5249 } 5250 if x3.AuxInt != i-4 { 5251 break 5252 } 5253 if x3.Aux != s { 5254 break 5255 } 5256 _ = x3.Args[2] 5257 if p != x3.Args[0] { 5258 break 5259 } 5260 x3_1 := x3.Args[1] 5261 if x3_1.Op != OpAMD64SHRQconst { 5262 break 5263 } 5264 if x3_1.AuxInt != 32 { 5265 break 5266 } 5267 if w != x3_1.Args[0] { 5268 break 5269 } 5270 x2 := x3.Args[2] 5271 if x2.Op != OpAMD64MOVBstore { 5272 break 5273 } 5274 if x2.AuxInt != i-5 { 5275 break 5276 } 5277 if x2.Aux != s { 5278 break 5279 } 5280 _ = x2.Args[2] 5281 if p != x2.Args[0] { 5282 break 5283 } 5284 x2_1 := x2.Args[1] 5285 if x2_1.Op != OpAMD64SHRQconst { 5286 break 5287 } 5288 if x2_1.AuxInt != 40 { 5289 break 5290 } 5291 if w != x2_1.Args[0] { 5292 break 5293 } 5294 x1 := x2.Args[2] 5295 if x1.Op != OpAMD64MOVBstore { 5296 break 5297 } 5298 if x1.AuxInt != i-6 { 5299 break 5300 } 5301 if x1.Aux != s { 5302 break 5303 } 5304 _ = x1.Args[2] 5305 if p != x1.Args[0] { 5306 break 5307 } 5308 x1_1 := x1.Args[1] 5309 if x1_1.Op != OpAMD64SHRQconst { 5310 break 5311 } 5312 if x1_1.AuxInt != 48 { 5313 break 5314 } 5315 if w != x1_1.Args[0] { 5316 break 5317 } 5318 x0 := x1.Args[2] 5319 if x0.Op != OpAMD64MOVBstore { 5320 break 5321 } 5322 if x0.AuxInt != i-7 { 5323 break 5324 } 5325 if x0.Aux != s { 5326 break 5327 } 5328 _ = x0.Args[2] 5329 if p != x0.Args[0] { 5330 break 5331 } 5332 x0_1 := x0.Args[1] 5333 if x0_1.Op != OpAMD64SHRQconst { 5334 break 5335 } 5336 if x0_1.AuxInt != 56 { 5337 break 5338 } 5339 if w != x0_1.Args[0] { 5340 break 5341 } 5342 mem := x0.Args[2] 5343 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5344 break 5345 } 5346 v.reset(OpAMD64MOVQstore) 5347 v.AuxInt = i - 7 5348 v.Aux = s 5349 v.AddArg(p) 5350 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5351 v0.AddArg(w) 5352 v.AddArg(v0) 5353 v.AddArg(mem) 5354 return true 5355 } 5356 return false 5357 } 5358 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 5359 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 5360 // cond: x.Uses == 1 && clobber(x) 5361 // result: (MOVWstore [i-1] {s} p w mem) 5362 for { 5363 i := v.AuxInt 5364 s := v.Aux 5365 _ = v.Args[2] 5366 p := v.Args[0] 5367 v_1 := v.Args[1] 5368 if v_1.Op != OpAMD64SHRQconst { 5369 break 5370 } 5371 if v_1.AuxInt != 8 { 5372 break 5373 } 5374 w := v_1.Args[0] 5375 x := v.Args[2] 5376 if x.Op != OpAMD64MOVBstore { 5377 break 5378 } 5379 if x.AuxInt != i-1 { 5380 break 5381 } 5382 if x.Aux != s { 5383 break 5384 } 5385 _ = x.Args[2] 5386 if p != x.Args[0] { 5387 break 5388 } 5389 if w != x.Args[1] { 5390 break 5391 } 5392 mem := x.Args[2] 5393 if !(x.Uses == 1 && clobber(x)) { 5394 break 5395 } 5396 v.reset(OpAMD64MOVWstore) 5397 v.AuxInt = i - 1 5398 v.Aux = s 5399 v.AddArg(p) 5400 v.AddArg(w) 5401 v.AddArg(mem) 5402 return true 5403 } 5404 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 5405 // cond: x.Uses == 1 && clobber(x) 5406 // result: (MOVWstore [i-1] {s} p w0 mem) 5407 for { 5408 i := v.AuxInt 5409 s := v.Aux 5410 _ = v.Args[2] 5411 p := v.Args[0] 5412 v_1 := v.Args[1] 5413 if v_1.Op != OpAMD64SHRQconst { 5414 break 5415 } 5416 j := v_1.AuxInt 5417 w := v_1.Args[0] 5418 x := v.Args[2] 5419 if x.Op != OpAMD64MOVBstore { 5420 break 5421 } 5422 if x.AuxInt != i-1 { 5423 break 5424 } 5425 if x.Aux != s { 5426 break 5427 } 5428 _ = x.Args[2] 5429 if p != x.Args[0] { 5430 break 5431 } 5432 w0 := x.Args[1] 5433 if w0.Op != OpAMD64SHRQconst { 5434 break 5435 } 5436 if w0.AuxInt != j-8 { 5437 break 5438 } 5439 if w != w0.Args[0] { 5440 break 5441 } 5442 mem := x.Args[2] 5443 if !(x.Uses == 1 && clobber(x)) { 5444 break 5445 } 5446 v.reset(OpAMD64MOVWstore) 5447 v.AuxInt = i - 1 5448 v.Aux = s 5449 v.AddArg(p) 5450 v.AddArg(w0) 5451 v.AddArg(mem) 5452 return true 5453 } 5454 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 5455 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 5456 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5457 for { 5458 off1 := v.AuxInt 5459 sym1 := v.Aux 5460 _ = v.Args[2] 5461 v_0 := v.Args[0] 5462 if v_0.Op != OpAMD64LEAL { 5463 break 5464 } 5465 off2 := v_0.AuxInt 5466 sym2 := v_0.Aux 5467 base := v_0.Args[0] 5468 val := v.Args[1] 5469 mem := v.Args[2] 5470 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 5471 break 5472 } 5473 v.reset(OpAMD64MOVBstore) 5474 v.AuxInt = off1 + off2 5475 v.Aux = mergeSym(sym1, sym2) 5476 v.AddArg(base) 5477 v.AddArg(val) 5478 v.AddArg(mem) 5479 return true 5480 } 5481 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 5482 // cond: is32Bit(off1+off2) 5483 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 5484 for { 5485 off1 := v.AuxInt 5486 sym := v.Aux 5487 _ = v.Args[2] 5488 v_0 := v.Args[0] 5489 if v_0.Op != OpAMD64ADDLconst { 5490 break 5491 } 5492 off2 := v_0.AuxInt 5493 ptr := v_0.Args[0] 5494 val := v.Args[1] 5495 mem := v.Args[2] 5496 if !(is32Bit(off1 + off2)) { 5497 break 5498 } 5499 v.reset(OpAMD64MOVBstore) 5500 v.AuxInt = off1 + off2 5501 v.Aux = sym 5502 v.AddArg(ptr) 5503 v.AddArg(val) 5504 v.AddArg(mem) 5505 return true 5506 } 5507 return false 5508 } 5509 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 5510 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 5511 // cond: ValAndOff(sc).canAdd(off) 5512 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5513 for { 5514 sc := v.AuxInt 5515 s := v.Aux 5516 _ = v.Args[1] 5517 v_0 := v.Args[0] 5518 if v_0.Op != OpAMD64ADDQconst { 5519 break 5520 } 5521 off := v_0.AuxInt 5522 ptr := v_0.Args[0] 5523 mem := v.Args[1] 5524 if !(ValAndOff(sc).canAdd(off)) { 5525 break 5526 } 5527 v.reset(OpAMD64MOVBstoreconst) 5528 v.AuxInt = ValAndOff(sc).add(off) 5529 v.Aux = s 5530 v.AddArg(ptr) 5531 v.AddArg(mem) 5532 return true 5533 } 5534 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 5535 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5536 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5537 for { 5538 sc := v.AuxInt 5539 sym1 := v.Aux 5540 _ = v.Args[1] 5541 v_0 := v.Args[0] 5542 if v_0.Op != OpAMD64LEAQ { 5543 break 5544 } 5545 off := v_0.AuxInt 5546 sym2 := v_0.Aux 5547 ptr := v_0.Args[0] 5548 mem := v.Args[1] 5549 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5550 break 5551 } 5552 v.reset(OpAMD64MOVBstoreconst) 5553 v.AuxInt = ValAndOff(sc).add(off) 5554 v.Aux = mergeSym(sym1, sym2) 5555 v.AddArg(ptr) 5556 v.AddArg(mem) 5557 return true 5558 } 5559 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 5560 // cond: canMergeSym(sym1, sym2) 5561 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5562 for { 5563 x := v.AuxInt 5564 sym1 := v.Aux 5565 _ = v.Args[1] 5566 v_0 := v.Args[0] 5567 if v_0.Op != OpAMD64LEAQ1 { 5568 break 5569 } 5570 off := v_0.AuxInt 5571 sym2 := v_0.Aux 5572 _ = v_0.Args[1] 5573 ptr := v_0.Args[0] 5574 idx := v_0.Args[1] 5575 mem := v.Args[1] 5576 if !(canMergeSym(sym1, sym2)) { 5577 break 5578 } 5579 v.reset(OpAMD64MOVBstoreconstidx1) 5580 v.AuxInt = ValAndOff(x).add(off) 5581 v.Aux = mergeSym(sym1, sym2) 5582 v.AddArg(ptr) 5583 v.AddArg(idx) 5584 v.AddArg(mem) 5585 return true 5586 } 5587 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 5588 // cond: 5589 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 5590 for { 5591 x := v.AuxInt 5592 sym := v.Aux 5593 _ = v.Args[1] 5594 v_0 := v.Args[0] 5595 if v_0.Op != OpAMD64ADDQ { 5596 break 5597 } 5598 _ = v_0.Args[1] 5599 ptr := v_0.Args[0] 5600 idx := v_0.Args[1] 5601 mem := v.Args[1] 5602 v.reset(OpAMD64MOVBstoreconstidx1) 5603 v.AuxInt = x 5604 v.Aux = sym 5605 v.AddArg(ptr) 5606 v.AddArg(idx) 5607 v.AddArg(mem) 5608 return true 5609 } 5610 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 5611 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 5612 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 5613 for { 5614 c := v.AuxInt 5615 s := v.Aux 5616 _ = v.Args[1] 5617 p := v.Args[0] 5618 x := v.Args[1] 5619 if x.Op != OpAMD64MOVBstoreconst { 5620 break 5621 } 5622 a := x.AuxInt 5623 if x.Aux != s { 5624 break 5625 } 5626 _ = x.Args[1] 5627 if p != x.Args[0] { 5628 break 5629 } 5630 mem := x.Args[1] 5631 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 5632 break 5633 } 5634 v.reset(OpAMD64MOVWstoreconst) 5635 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 5636 v.Aux = s 5637 v.AddArg(p) 5638 v.AddArg(mem) 5639 return true 5640 } 5641 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 5642 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5643 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5644 for { 5645 sc := v.AuxInt 5646 sym1 := v.Aux 5647 _ = v.Args[1] 5648 v_0 := v.Args[0] 5649 if v_0.Op != OpAMD64LEAL { 5650 break 5651 } 5652 off := v_0.AuxInt 5653 sym2 := v_0.Aux 5654 ptr := v_0.Args[0] 5655 mem := v.Args[1] 5656 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5657 break 5658 } 5659 v.reset(OpAMD64MOVBstoreconst) 5660 v.AuxInt = ValAndOff(sc).add(off) 5661 v.Aux = mergeSym(sym1, sym2) 5662 v.AddArg(ptr) 5663 v.AddArg(mem) 5664 return true 5665 } 5666 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 5667 // cond: ValAndOff(sc).canAdd(off) 5668 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5669 for { 5670 sc := v.AuxInt 5671 s := v.Aux 5672 _ = v.Args[1] 5673 v_0 := v.Args[0] 5674 if v_0.Op != OpAMD64ADDLconst { 5675 break 5676 } 5677 off := v_0.AuxInt 5678 ptr := v_0.Args[0] 5679 mem := v.Args[1] 5680 if !(ValAndOff(sc).canAdd(off)) { 5681 break 5682 } 5683 v.reset(OpAMD64MOVBstoreconst) 5684 v.AuxInt = ValAndOff(sc).add(off) 5685 v.Aux = s 5686 v.AddArg(ptr) 5687 v.AddArg(mem) 5688 return true 5689 } 5690 return false 5691 } 5692 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 5693 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 5694 // cond: ValAndOff(x).canAdd(c) 5695 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5696 for { 5697 x := v.AuxInt 5698 sym := v.Aux 5699 _ = v.Args[2] 5700 v_0 := v.Args[0] 5701 if v_0.Op != OpAMD64ADDQconst { 5702 break 5703 } 5704 c := v_0.AuxInt 5705 ptr := v_0.Args[0] 5706 idx := v.Args[1] 5707 mem := v.Args[2] 5708 if !(ValAndOff(x).canAdd(c)) { 5709 break 5710 } 5711 v.reset(OpAMD64MOVBstoreconstidx1) 5712 v.AuxInt = ValAndOff(x).add(c) 5713 v.Aux = sym 5714 v.AddArg(ptr) 5715 v.AddArg(idx) 5716 v.AddArg(mem) 5717 return true 5718 } 5719 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 5720 // cond: ValAndOff(x).canAdd(c) 5721 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5722 for { 5723 x := v.AuxInt 5724 sym := v.Aux 5725 _ = v.Args[2] 5726 ptr := v.Args[0] 5727 v_1 := v.Args[1] 5728 if v_1.Op != OpAMD64ADDQconst { 5729 break 5730 } 5731 c := v_1.AuxInt 5732 idx := v_1.Args[0] 5733 mem := v.Args[2] 5734 if !(ValAndOff(x).canAdd(c)) { 5735 break 5736 } 5737 v.reset(OpAMD64MOVBstoreconstidx1) 5738 v.AuxInt = ValAndOff(x).add(c) 5739 v.Aux = sym 5740 v.AddArg(ptr) 5741 v.AddArg(idx) 5742 v.AddArg(mem) 5743 return true 5744 } 5745 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 5746 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 5747 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 5748 for { 5749 c := v.AuxInt 5750 s := v.Aux 5751 _ = v.Args[2] 5752 p := v.Args[0] 5753 i := v.Args[1] 5754 x := v.Args[2] 5755 if x.Op != OpAMD64MOVBstoreconstidx1 { 5756 break 5757 } 5758 a := x.AuxInt 5759 if x.Aux != s { 5760 break 5761 } 5762 _ = x.Args[2] 5763 if p != x.Args[0] { 5764 break 5765 } 5766 if i != x.Args[1] { 5767 break 5768 } 5769 mem := x.Args[2] 5770 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 5771 break 5772 } 5773 v.reset(OpAMD64MOVWstoreconstidx1) 5774 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 5775 v.Aux = s 5776 v.AddArg(p) 5777 v.AddArg(i) 5778 v.AddArg(mem) 5779 return true 5780 } 5781 return false 5782 } 5783 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 5784 b := v.Block 5785 _ = b 5786 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 5787 // cond: is32Bit(c+d) 5788 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 5789 for { 5790 c := v.AuxInt 5791 sym := v.Aux 5792 _ = v.Args[3] 5793 v_0 := v.Args[0] 5794 if v_0.Op != OpAMD64ADDQconst { 5795 break 5796 } 5797 d := v_0.AuxInt 5798 ptr := v_0.Args[0] 5799 idx := v.Args[1] 5800 val := v.Args[2] 5801 mem := v.Args[3] 5802 if !(is32Bit(c + d)) { 5803 break 5804 } 5805 v.reset(OpAMD64MOVBstoreidx1) 5806 v.AuxInt = c + d 5807 v.Aux = sym 5808 v.AddArg(ptr) 5809 v.AddArg(idx) 5810 v.AddArg(val) 5811 v.AddArg(mem) 5812 return true 5813 } 5814 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 5815 // cond: is32Bit(c+d) 5816 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 5817 for { 5818 c := v.AuxInt 5819 sym := v.Aux 5820 _ = v.Args[3] 5821 ptr := v.Args[0] 5822 v_1 := v.Args[1] 5823 if v_1.Op != OpAMD64ADDQconst { 5824 break 5825 } 5826 d := v_1.AuxInt 5827 idx := v_1.Args[0] 5828 val := v.Args[2] 5829 mem := v.Args[3] 5830 if !(is32Bit(c + d)) { 5831 break 5832 } 5833 v.reset(OpAMD64MOVBstoreidx1) 5834 v.AuxInt = c + d 5835 v.Aux = sym 5836 v.AddArg(ptr) 5837 v.AddArg(idx) 5838 v.AddArg(val) 5839 v.AddArg(mem) 5840 return true 5841 } 5842 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 5843 // cond: x0.Uses == 1 && clobber(x0) 5844 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 5845 for { 5846 i := v.AuxInt 5847 s := v.Aux 5848 _ = v.Args[3] 5849 p := v.Args[0] 5850 idx := v.Args[1] 5851 w := v.Args[2] 5852 x0 := v.Args[3] 5853 if x0.Op != OpAMD64MOVBstoreidx1 { 5854 break 5855 } 5856 if x0.AuxInt != i-1 { 5857 break 5858 } 5859 if x0.Aux != s { 5860 break 5861 } 5862 _ = x0.Args[3] 5863 if p != x0.Args[0] { 5864 break 5865 } 5866 if idx != x0.Args[1] { 5867 break 5868 } 5869 x0_2 := x0.Args[2] 5870 if x0_2.Op != OpAMD64SHRWconst { 5871 break 5872 } 5873 if x0_2.AuxInt != 8 { 5874 break 5875 } 5876 if w != x0_2.Args[0] { 5877 break 5878 } 5879 mem := x0.Args[3] 5880 if !(x0.Uses == 1 && clobber(x0)) { 5881 break 5882 } 5883 v.reset(OpAMD64MOVWstoreidx1) 5884 v.AuxInt = i - 1 5885 v.Aux = s 5886 v.AddArg(p) 5887 v.AddArg(idx) 5888 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5889 v0.AuxInt = 8 5890 v0.AddArg(w) 5891 v.AddArg(v0) 5892 v.AddArg(mem) 5893 return true 5894 } 5895 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 5896 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5897 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 5898 for { 5899 i := v.AuxInt 5900 s := v.Aux 5901 _ = v.Args[3] 5902 p := v.Args[0] 5903 idx := v.Args[1] 5904 w := v.Args[2] 5905 x2 := v.Args[3] 5906 if x2.Op != OpAMD64MOVBstoreidx1 { 5907 break 5908 } 5909 if x2.AuxInt != i-1 { 5910 break 5911 } 5912 if x2.Aux != s { 5913 break 5914 } 5915 _ = x2.Args[3] 5916 if p != x2.Args[0] { 5917 break 5918 } 5919 if idx != x2.Args[1] { 5920 break 5921 } 5922 x2_2 := x2.Args[2] 5923 if x2_2.Op != OpAMD64SHRLconst { 5924 break 5925 } 5926 if x2_2.AuxInt != 8 { 5927 break 5928 } 5929 if w != x2_2.Args[0] { 5930 break 5931 } 5932 x1 := x2.Args[3] 5933 if x1.Op != OpAMD64MOVBstoreidx1 { 5934 break 5935 } 5936 if x1.AuxInt != i-2 { 5937 break 5938 } 5939 if x1.Aux != s { 5940 break 5941 } 5942 _ = x1.Args[3] 5943 if p != x1.Args[0] { 5944 break 5945 } 5946 if idx != x1.Args[1] { 5947 break 5948 } 5949 x1_2 := x1.Args[2] 5950 if x1_2.Op != OpAMD64SHRLconst { 5951 break 5952 } 5953 if x1_2.AuxInt != 16 { 5954 break 5955 } 5956 if w != x1_2.Args[0] { 5957 break 5958 } 5959 x0 := x1.Args[3] 5960 if x0.Op != OpAMD64MOVBstoreidx1 { 5961 break 5962 } 5963 if x0.AuxInt != i-3 { 5964 break 5965 } 5966 if x0.Aux != s { 5967 break 5968 } 5969 _ = x0.Args[3] 5970 if p != x0.Args[0] { 5971 break 5972 } 5973 if idx != x0.Args[1] { 5974 break 5975 } 5976 x0_2 := x0.Args[2] 5977 if x0_2.Op != OpAMD64SHRLconst { 5978 break 5979 } 5980 if x0_2.AuxInt != 24 { 5981 break 5982 } 5983 if w != x0_2.Args[0] { 5984 break 5985 } 5986 mem := x0.Args[3] 5987 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5988 break 5989 } 5990 v.reset(OpAMD64MOVLstoreidx1) 5991 v.AuxInt = i - 3 5992 v.Aux = s 5993 v.AddArg(p) 5994 v.AddArg(idx) 5995 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5996 v0.AddArg(w) 5997 v.AddArg(v0) 5998 v.AddArg(mem) 5999 return true 6000 } 6001 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 6002 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 6003 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 6004 for { 6005 i := v.AuxInt 6006 s := v.Aux 6007 _ = v.Args[3] 6008 p := v.Args[0] 6009 idx := v.Args[1] 6010 w := v.Args[2] 6011 x6 := v.Args[3] 6012 if x6.Op != OpAMD64MOVBstoreidx1 { 6013 break 6014 } 6015 if x6.AuxInt != i-1 { 6016 break 6017 } 6018 if x6.Aux != s { 6019 break 6020 } 6021 _ = x6.Args[3] 6022 if p != x6.Args[0] { 6023 break 6024 } 6025 if idx != x6.Args[1] { 6026 break 6027 } 6028 x6_2 := x6.Args[2] 6029 if x6_2.Op != OpAMD64SHRQconst { 6030 break 6031 } 6032 if x6_2.AuxInt != 8 { 6033 break 6034 } 6035 if w != x6_2.Args[0] { 6036 break 6037 } 6038 x5 := x6.Args[3] 6039 if x5.Op != OpAMD64MOVBstoreidx1 { 6040 break 6041 } 6042 if x5.AuxInt != i-2 { 6043 break 6044 } 6045 if x5.Aux != s { 6046 break 6047 } 6048 _ = x5.Args[3] 6049 if p != x5.Args[0] { 6050 break 6051 } 6052 if idx != x5.Args[1] { 6053 break 6054 } 6055 x5_2 := x5.Args[2] 6056 if x5_2.Op != OpAMD64SHRQconst { 6057 break 6058 } 6059 if x5_2.AuxInt != 16 { 6060 break 6061 } 6062 if w != x5_2.Args[0] { 6063 break 6064 } 6065 x4 := x5.Args[3] 6066 if x4.Op != OpAMD64MOVBstoreidx1 { 6067 break 6068 } 6069 if x4.AuxInt != i-3 { 6070 break 6071 } 6072 if x4.Aux != s { 6073 break 6074 } 6075 _ = x4.Args[3] 6076 if p != x4.Args[0] { 6077 break 6078 } 6079 if idx != x4.Args[1] { 6080 break 6081 } 6082 x4_2 := x4.Args[2] 6083 if x4_2.Op != OpAMD64SHRQconst { 6084 break 6085 } 6086 if x4_2.AuxInt != 24 { 6087 break 6088 } 6089 if w != x4_2.Args[0] { 6090 break 6091 } 6092 x3 := x4.Args[3] 6093 if x3.Op != OpAMD64MOVBstoreidx1 { 6094 break 6095 } 6096 if x3.AuxInt != i-4 { 6097 break 6098 } 6099 if x3.Aux != s { 6100 break 6101 } 6102 _ = x3.Args[3] 6103 if p != x3.Args[0] { 6104 break 6105 } 6106 if idx != x3.Args[1] { 6107 break 6108 } 6109 x3_2 := x3.Args[2] 6110 if x3_2.Op != OpAMD64SHRQconst { 6111 break 6112 } 6113 if x3_2.AuxInt != 32 { 6114 break 6115 } 6116 if w != x3_2.Args[0] { 6117 break 6118 } 6119 x2 := x3.Args[3] 6120 if x2.Op != OpAMD64MOVBstoreidx1 { 6121 break 6122 } 6123 if x2.AuxInt != i-5 { 6124 break 6125 } 6126 if x2.Aux != s { 6127 break 6128 } 6129 _ = x2.Args[3] 6130 if p != x2.Args[0] { 6131 break 6132 } 6133 if idx != x2.Args[1] { 6134 break 6135 } 6136 x2_2 := x2.Args[2] 6137 if x2_2.Op != OpAMD64SHRQconst { 6138 break 6139 } 6140 if x2_2.AuxInt != 40 { 6141 break 6142 } 6143 if w != x2_2.Args[0] { 6144 break 6145 } 6146 x1 := x2.Args[3] 6147 if x1.Op != OpAMD64MOVBstoreidx1 { 6148 break 6149 } 6150 if x1.AuxInt != i-6 { 6151 break 6152 } 6153 if x1.Aux != s { 6154 break 6155 } 6156 _ = x1.Args[3] 6157 if p != x1.Args[0] { 6158 break 6159 } 6160 if idx != x1.Args[1] { 6161 break 6162 } 6163 x1_2 := x1.Args[2] 6164 if x1_2.Op != OpAMD64SHRQconst { 6165 break 6166 } 6167 if x1_2.AuxInt != 48 { 6168 break 6169 } 6170 if w != x1_2.Args[0] { 6171 break 6172 } 6173 x0 := x1.Args[3] 6174 if x0.Op != OpAMD64MOVBstoreidx1 { 6175 break 6176 } 6177 if x0.AuxInt != i-7 { 6178 break 6179 } 6180 if x0.Aux != s { 6181 break 6182 } 6183 _ = x0.Args[3] 6184 if p != x0.Args[0] { 6185 break 6186 } 6187 if idx != x0.Args[1] { 6188 break 6189 } 6190 x0_2 := x0.Args[2] 6191 if x0_2.Op != OpAMD64SHRQconst { 6192 break 6193 } 6194 if x0_2.AuxInt != 56 { 6195 break 6196 } 6197 if w != x0_2.Args[0] { 6198 break 6199 } 6200 mem := x0.Args[3] 6201 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 6202 break 6203 } 6204 v.reset(OpAMD64MOVQstoreidx1) 6205 v.AuxInt = i - 7 6206 v.Aux = s 6207 v.AddArg(p) 6208 v.AddArg(idx) 6209 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 6210 v0.AddArg(w) 6211 v.AddArg(v0) 6212 v.AddArg(mem) 6213 return true 6214 } 6215 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 6216 // cond: x.Uses == 1 && clobber(x) 6217 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 6218 for { 6219 i := v.AuxInt 6220 s := v.Aux 6221 _ = v.Args[3] 6222 p := v.Args[0] 6223 idx := v.Args[1] 6224 v_2 := v.Args[2] 6225 if v_2.Op != OpAMD64SHRQconst { 6226 break 6227 } 6228 if v_2.AuxInt != 8 { 6229 break 6230 } 6231 w := v_2.Args[0] 6232 x := v.Args[3] 6233 if x.Op != OpAMD64MOVBstoreidx1 { 6234 break 6235 } 6236 if x.AuxInt != i-1 { 6237 break 6238 } 6239 if x.Aux != s { 6240 break 6241 } 6242 _ = x.Args[3] 6243 if p != x.Args[0] { 6244 break 6245 } 6246 if idx != x.Args[1] { 6247 break 6248 } 6249 if w != x.Args[2] { 6250 break 6251 } 6252 mem := x.Args[3] 6253 if !(x.Uses == 1 && clobber(x)) { 6254 break 6255 } 6256 v.reset(OpAMD64MOVWstoreidx1) 6257 v.AuxInt = i - 1 6258 v.Aux = s 6259 v.AddArg(p) 6260 v.AddArg(idx) 6261 v.AddArg(w) 6262 v.AddArg(mem) 6263 return true 6264 } 6265 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 6266 // cond: x.Uses == 1 && clobber(x) 6267 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 6268 for { 6269 i := v.AuxInt 6270 s := v.Aux 6271 _ = v.Args[3] 6272 p := v.Args[0] 6273 idx := v.Args[1] 6274 v_2 := v.Args[2] 6275 if v_2.Op != OpAMD64SHRQconst { 6276 break 6277 } 6278 j := v_2.AuxInt 6279 w := v_2.Args[0] 6280 x := v.Args[3] 6281 if x.Op != OpAMD64MOVBstoreidx1 { 6282 break 6283 } 6284 if x.AuxInt != i-1 { 6285 break 6286 } 6287 if x.Aux != s { 6288 break 6289 } 6290 _ = x.Args[3] 6291 if p != x.Args[0] { 6292 break 6293 } 6294 if idx != x.Args[1] { 6295 break 6296 } 6297 w0 := x.Args[2] 6298 if w0.Op != OpAMD64SHRQconst { 6299 break 6300 } 6301 if w0.AuxInt != j-8 { 6302 break 6303 } 6304 if w != w0.Args[0] { 6305 break 6306 } 6307 mem := x.Args[3] 6308 if !(x.Uses == 1 && clobber(x)) { 6309 break 6310 } 6311 v.reset(OpAMD64MOVWstoreidx1) 6312 v.AuxInt = i - 1 6313 v.Aux = s 6314 v.AddArg(p) 6315 v.AddArg(idx) 6316 v.AddArg(w0) 6317 v.AddArg(mem) 6318 return true 6319 } 6320 return false 6321 } 6322 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 6323 b := v.Block 6324 _ = b 6325 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 6326 // cond: x.Uses == 1 && clobber(x) 6327 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6328 for { 6329 x := v.Args[0] 6330 if x.Op != OpAMD64MOVLload { 6331 break 6332 } 6333 off := x.AuxInt 6334 sym := x.Aux 6335 _ = x.Args[1] 6336 ptr := x.Args[0] 6337 mem := x.Args[1] 6338 if !(x.Uses == 1 && clobber(x)) { 6339 break 6340 } 6341 b = x.Block 6342 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6343 v.reset(OpCopy) 6344 v.AddArg(v0) 6345 v0.AuxInt = off 6346 v0.Aux = sym 6347 v0.AddArg(ptr) 6348 v0.AddArg(mem) 6349 return true 6350 } 6351 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 6352 // cond: x.Uses == 1 && clobber(x) 6353 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6354 for { 6355 x := v.Args[0] 6356 if x.Op != OpAMD64MOVQload { 6357 break 6358 } 6359 off := x.AuxInt 6360 sym := x.Aux 6361 _ = x.Args[1] 6362 ptr := x.Args[0] 6363 mem := x.Args[1] 6364 if !(x.Uses == 1 && clobber(x)) { 6365 break 6366 } 6367 b = x.Block 6368 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6369 v.reset(OpCopy) 6370 v.AddArg(v0) 6371 v0.AuxInt = off 6372 v0.Aux = sym 6373 v0.AddArg(ptr) 6374 v0.AddArg(mem) 6375 return true 6376 } 6377 // match: (MOVLQSX (ANDLconst [c] x)) 6378 // cond: c & 0x80000000 == 0 6379 // result: (ANDLconst [c & 0x7fffffff] x) 6380 for { 6381 v_0 := v.Args[0] 6382 if v_0.Op != OpAMD64ANDLconst { 6383 break 6384 } 6385 c := v_0.AuxInt 6386 x := v_0.Args[0] 6387 if !(c&0x80000000 == 0) { 6388 break 6389 } 6390 v.reset(OpAMD64ANDLconst) 6391 v.AuxInt = c & 0x7fffffff 6392 v.AddArg(x) 6393 return true 6394 } 6395 // match: (MOVLQSX (MOVLQSX x)) 6396 // cond: 6397 // result: (MOVLQSX x) 6398 for { 6399 v_0 := v.Args[0] 6400 if v_0.Op != OpAMD64MOVLQSX { 6401 break 6402 } 6403 x := v_0.Args[0] 6404 v.reset(OpAMD64MOVLQSX) 6405 v.AddArg(x) 6406 return true 6407 } 6408 // match: (MOVLQSX (MOVWQSX x)) 6409 // cond: 6410 // result: (MOVWQSX x) 6411 for { 6412 v_0 := v.Args[0] 6413 if v_0.Op != OpAMD64MOVWQSX { 6414 break 6415 } 6416 x := v_0.Args[0] 6417 v.reset(OpAMD64MOVWQSX) 6418 v.AddArg(x) 6419 return true 6420 } 6421 // match: (MOVLQSX (MOVBQSX x)) 6422 // cond: 6423 // result: (MOVBQSX x) 6424 for { 6425 v_0 := v.Args[0] 6426 if v_0.Op != OpAMD64MOVBQSX { 6427 break 6428 } 6429 x := v_0.Args[0] 6430 v.reset(OpAMD64MOVBQSX) 6431 v.AddArg(x) 6432 return true 6433 } 6434 return false 6435 } 6436 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 6437 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 6438 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6439 // result: (MOVLQSX x) 6440 for { 6441 off := v.AuxInt 6442 sym := v.Aux 6443 _ = v.Args[1] 6444 ptr := v.Args[0] 6445 v_1 := v.Args[1] 6446 if v_1.Op != OpAMD64MOVLstore { 6447 break 6448 } 6449 off2 := v_1.AuxInt 6450 sym2 := v_1.Aux 6451 _ = v_1.Args[2] 6452 ptr2 := v_1.Args[0] 6453 x := v_1.Args[1] 6454 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6455 break 6456 } 6457 v.reset(OpAMD64MOVLQSX) 6458 v.AddArg(x) 6459 return true 6460 } 6461 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6462 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6463 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6464 for { 6465 off1 := v.AuxInt 6466 sym1 := v.Aux 6467 _ = v.Args[1] 6468 v_0 := v.Args[0] 6469 if v_0.Op != OpAMD64LEAQ { 6470 break 6471 } 6472 off2 := v_0.AuxInt 6473 sym2 := v_0.Aux 6474 base := v_0.Args[0] 6475 mem := v.Args[1] 6476 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6477 break 6478 } 6479 v.reset(OpAMD64MOVLQSXload) 6480 v.AuxInt = off1 + off2 6481 v.Aux = mergeSym(sym1, sym2) 6482 v.AddArg(base) 6483 v.AddArg(mem) 6484 return true 6485 } 6486 return false 6487 } 6488 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 6489 b := v.Block 6490 _ = b 6491 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 6492 // cond: x.Uses == 1 && clobber(x) 6493 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6494 for { 6495 x := v.Args[0] 6496 if x.Op != OpAMD64MOVLload { 6497 break 6498 } 6499 off := x.AuxInt 6500 sym := x.Aux 6501 _ = x.Args[1] 6502 ptr := x.Args[0] 6503 mem := x.Args[1] 6504 if !(x.Uses == 1 && clobber(x)) { 6505 break 6506 } 6507 b = x.Block 6508 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6509 v.reset(OpCopy) 6510 v.AddArg(v0) 6511 v0.AuxInt = off 6512 v0.Aux = sym 6513 v0.AddArg(ptr) 6514 v0.AddArg(mem) 6515 return true 6516 } 6517 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 6518 // cond: x.Uses == 1 && clobber(x) 6519 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6520 for { 6521 x := v.Args[0] 6522 if x.Op != OpAMD64MOVQload { 6523 break 6524 } 6525 off := x.AuxInt 6526 sym := x.Aux 6527 _ = x.Args[1] 6528 ptr := x.Args[0] 6529 mem := x.Args[1] 6530 if !(x.Uses == 1 && clobber(x)) { 6531 break 6532 } 6533 b = x.Block 6534 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6535 v.reset(OpCopy) 6536 v.AddArg(v0) 6537 v0.AuxInt = off 6538 v0.Aux = sym 6539 v0.AddArg(ptr) 6540 v0.AddArg(mem) 6541 return true 6542 } 6543 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 6544 // cond: x.Uses == 1 && clobber(x) 6545 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 6546 for { 6547 x := v.Args[0] 6548 if x.Op != OpAMD64MOVLloadidx1 { 6549 break 6550 } 6551 off := x.AuxInt 6552 sym := x.Aux 6553 _ = x.Args[2] 6554 ptr := x.Args[0] 6555 idx := x.Args[1] 6556 mem := x.Args[2] 6557 if !(x.Uses == 1 && clobber(x)) { 6558 break 6559 } 6560 b = x.Block 6561 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 6562 v.reset(OpCopy) 6563 v.AddArg(v0) 6564 v0.AuxInt = off 6565 v0.Aux = sym 6566 v0.AddArg(ptr) 6567 v0.AddArg(idx) 6568 v0.AddArg(mem) 6569 return true 6570 } 6571 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 6572 // cond: x.Uses == 1 && clobber(x) 6573 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 6574 for { 6575 x := v.Args[0] 6576 if x.Op != OpAMD64MOVLloadidx4 { 6577 break 6578 } 6579 off := x.AuxInt 6580 sym := x.Aux 6581 _ = x.Args[2] 6582 ptr := x.Args[0] 6583 idx := x.Args[1] 6584 mem := x.Args[2] 6585 if !(x.Uses == 1 && clobber(x)) { 6586 break 6587 } 6588 b = x.Block 6589 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 6590 v.reset(OpCopy) 6591 v.AddArg(v0) 6592 v0.AuxInt = off 6593 v0.Aux = sym 6594 v0.AddArg(ptr) 6595 v0.AddArg(idx) 6596 v0.AddArg(mem) 6597 return true 6598 } 6599 // match: (MOVLQZX (ANDLconst [c] x)) 6600 // cond: 6601 // result: (ANDLconst [c] x) 6602 for { 6603 v_0 := v.Args[0] 6604 if v_0.Op != OpAMD64ANDLconst { 6605 break 6606 } 6607 c := v_0.AuxInt 6608 x := v_0.Args[0] 6609 v.reset(OpAMD64ANDLconst) 6610 v.AuxInt = c 6611 v.AddArg(x) 6612 return true 6613 } 6614 // match: (MOVLQZX (MOVLQZX x)) 6615 // cond: 6616 // result: (MOVLQZX x) 6617 for { 6618 v_0 := v.Args[0] 6619 if v_0.Op != OpAMD64MOVLQZX { 6620 break 6621 } 6622 x := v_0.Args[0] 6623 v.reset(OpAMD64MOVLQZX) 6624 v.AddArg(x) 6625 return true 6626 } 6627 // match: (MOVLQZX (MOVWQZX x)) 6628 // cond: 6629 // result: (MOVWQZX x) 6630 for { 6631 v_0 := v.Args[0] 6632 if v_0.Op != OpAMD64MOVWQZX { 6633 break 6634 } 6635 x := v_0.Args[0] 6636 v.reset(OpAMD64MOVWQZX) 6637 v.AddArg(x) 6638 return true 6639 } 6640 // match: (MOVLQZX (MOVBQZX x)) 6641 // cond: 6642 // result: (MOVBQZX x) 6643 for { 6644 v_0 := v.Args[0] 6645 if v_0.Op != OpAMD64MOVBQZX { 6646 break 6647 } 6648 x := v_0.Args[0] 6649 v.reset(OpAMD64MOVBQZX) 6650 v.AddArg(x) 6651 return true 6652 } 6653 return false 6654 } 6655 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 6656 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 6657 // cond: is32Bit(off1+off2) 6658 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 6659 for { 6660 off1 := v.AuxInt 6661 sym := v.Aux 6662 _ = v.Args[1] 6663 v_0 := v.Args[0] 6664 if v_0.Op != OpAMD64ADDQconst { 6665 break 6666 } 6667 off2 := v_0.AuxInt 6668 ptr := v_0.Args[0] 6669 mem := v.Args[1] 6670 if !(is32Bit(off1 + off2)) { 6671 break 6672 } 6673 v.reset(OpAMD64MOVLatomicload) 6674 v.AuxInt = off1 + off2 6675 v.Aux = sym 6676 v.AddArg(ptr) 6677 v.AddArg(mem) 6678 return true 6679 } 6680 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 6681 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6682 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 6683 for { 6684 off1 := v.AuxInt 6685 sym1 := v.Aux 6686 _ = v.Args[1] 6687 v_0 := v.Args[0] 6688 if v_0.Op != OpAMD64LEAQ { 6689 break 6690 } 6691 off2 := v_0.AuxInt 6692 sym2 := v_0.Aux 6693 ptr := v_0.Args[0] 6694 mem := v.Args[1] 6695 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6696 break 6697 } 6698 v.reset(OpAMD64MOVLatomicload) 6699 v.AuxInt = off1 + off2 6700 v.Aux = mergeSym(sym1, sym2) 6701 v.AddArg(ptr) 6702 v.AddArg(mem) 6703 return true 6704 } 6705 return false 6706 } 6707 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 6708 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 6709 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6710 // result: (MOVLQZX x) 6711 for { 6712 off := v.AuxInt 6713 sym := v.Aux 6714 _ = v.Args[1] 6715 ptr := v.Args[0] 6716 v_1 := v.Args[1] 6717 if v_1.Op != OpAMD64MOVLstore { 6718 break 6719 } 6720 off2 := v_1.AuxInt 6721 sym2 := v_1.Aux 6722 _ = v_1.Args[2] 6723 ptr2 := v_1.Args[0] 6724 x := v_1.Args[1] 6725 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6726 break 6727 } 6728 v.reset(OpAMD64MOVLQZX) 6729 v.AddArg(x) 6730 return true 6731 } 6732 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 6733 // cond: is32Bit(off1+off2) 6734 // result: (MOVLload [off1+off2] {sym} ptr mem) 6735 for { 6736 off1 := v.AuxInt 6737 sym := v.Aux 6738 _ = v.Args[1] 6739 v_0 := v.Args[0] 6740 if v_0.Op != OpAMD64ADDQconst { 6741 break 6742 } 6743 off2 := v_0.AuxInt 6744 ptr := v_0.Args[0] 6745 mem := v.Args[1] 6746 if !(is32Bit(off1 + off2)) { 6747 break 6748 } 6749 v.reset(OpAMD64MOVLload) 6750 v.AuxInt = off1 + off2 6751 v.Aux = sym 6752 v.AddArg(ptr) 6753 v.AddArg(mem) 6754 return true 6755 } 6756 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6757 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6758 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6759 for { 6760 off1 := v.AuxInt 6761 sym1 := v.Aux 6762 _ = v.Args[1] 6763 v_0 := v.Args[0] 6764 if v_0.Op != OpAMD64LEAQ { 6765 break 6766 } 6767 off2 := v_0.AuxInt 6768 sym2 := v_0.Aux 6769 base := v_0.Args[0] 6770 mem := v.Args[1] 6771 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6772 break 6773 } 6774 v.reset(OpAMD64MOVLload) 6775 v.AuxInt = off1 + off2 6776 v.Aux = mergeSym(sym1, sym2) 6777 v.AddArg(base) 6778 v.AddArg(mem) 6779 return true 6780 } 6781 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 6782 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6783 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6784 for { 6785 off1 := v.AuxInt 6786 sym1 := v.Aux 6787 _ = v.Args[1] 6788 v_0 := v.Args[0] 6789 if v_0.Op != OpAMD64LEAQ1 { 6790 break 6791 } 6792 off2 := v_0.AuxInt 6793 sym2 := v_0.Aux 6794 _ = v_0.Args[1] 6795 ptr := v_0.Args[0] 6796 idx := v_0.Args[1] 6797 mem := v.Args[1] 6798 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6799 break 6800 } 6801 v.reset(OpAMD64MOVLloadidx1) 6802 v.AuxInt = off1 + off2 6803 v.Aux = mergeSym(sym1, sym2) 6804 v.AddArg(ptr) 6805 v.AddArg(idx) 6806 v.AddArg(mem) 6807 return true 6808 } 6809 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 6810 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6811 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6812 for { 6813 off1 := v.AuxInt 6814 sym1 := v.Aux 6815 _ = v.Args[1] 6816 v_0 := v.Args[0] 6817 if v_0.Op != OpAMD64LEAQ4 { 6818 break 6819 } 6820 off2 := v_0.AuxInt 6821 sym2 := v_0.Aux 6822 _ = v_0.Args[1] 6823 ptr := v_0.Args[0] 6824 idx := v_0.Args[1] 6825 mem := v.Args[1] 6826 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6827 break 6828 } 6829 v.reset(OpAMD64MOVLloadidx4) 6830 v.AuxInt = off1 + off2 6831 v.Aux = mergeSym(sym1, sym2) 6832 v.AddArg(ptr) 6833 v.AddArg(idx) 6834 v.AddArg(mem) 6835 return true 6836 } 6837 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 6838 // cond: ptr.Op != OpSB 6839 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 6840 for { 6841 off := v.AuxInt 6842 sym := v.Aux 6843 _ = v.Args[1] 6844 v_0 := v.Args[0] 6845 if v_0.Op != OpAMD64ADDQ { 6846 break 6847 } 6848 _ = v_0.Args[1] 6849 ptr := v_0.Args[0] 6850 idx := v_0.Args[1] 6851 mem := v.Args[1] 6852 if !(ptr.Op != OpSB) { 6853 break 6854 } 6855 v.reset(OpAMD64MOVLloadidx1) 6856 v.AuxInt = off 6857 v.Aux = sym 6858 v.AddArg(ptr) 6859 v.AddArg(idx) 6860 v.AddArg(mem) 6861 return true 6862 } 6863 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 6864 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 6865 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6866 for { 6867 off1 := v.AuxInt 6868 sym1 := v.Aux 6869 _ = v.Args[1] 6870 v_0 := v.Args[0] 6871 if v_0.Op != OpAMD64LEAL { 6872 break 6873 } 6874 off2 := v_0.AuxInt 6875 sym2 := v_0.Aux 6876 base := v_0.Args[0] 6877 mem := v.Args[1] 6878 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 6879 break 6880 } 6881 v.reset(OpAMD64MOVLload) 6882 v.AuxInt = off1 + off2 6883 v.Aux = mergeSym(sym1, sym2) 6884 v.AddArg(base) 6885 v.AddArg(mem) 6886 return true 6887 } 6888 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 6889 // cond: is32Bit(off1+off2) 6890 // result: (MOVLload [off1+off2] {sym} ptr mem) 6891 for { 6892 off1 := v.AuxInt 6893 sym := v.Aux 6894 _ = v.Args[1] 6895 v_0 := v.Args[0] 6896 if v_0.Op != OpAMD64ADDLconst { 6897 break 6898 } 6899 off2 := v_0.AuxInt 6900 ptr := v_0.Args[0] 6901 mem := v.Args[1] 6902 if !(is32Bit(off1 + off2)) { 6903 break 6904 } 6905 v.reset(OpAMD64MOVLload) 6906 v.AuxInt = off1 + off2 6907 v.Aux = sym 6908 v.AddArg(ptr) 6909 v.AddArg(mem) 6910 return true 6911 } 6912 return false 6913 } 6914 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 6915 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 6916 // cond: 6917 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 6918 for { 6919 c := v.AuxInt 6920 sym := v.Aux 6921 _ = v.Args[2] 6922 ptr := v.Args[0] 6923 v_1 := v.Args[1] 6924 if v_1.Op != OpAMD64SHLQconst { 6925 break 6926 } 6927 if v_1.AuxInt != 2 { 6928 break 6929 } 6930 idx := v_1.Args[0] 6931 mem := v.Args[2] 6932 v.reset(OpAMD64MOVLloadidx4) 6933 v.AuxInt = c 6934 v.Aux = sym 6935 v.AddArg(ptr) 6936 v.AddArg(idx) 6937 v.AddArg(mem) 6938 return true 6939 } 6940 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 6941 // cond: 6942 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 6943 for { 6944 c := v.AuxInt 6945 sym := v.Aux 6946 _ = v.Args[2] 6947 v_0 := v.Args[0] 6948 if v_0.Op != OpAMD64SHLQconst { 6949 break 6950 } 6951 if v_0.AuxInt != 2 { 6952 break 6953 } 6954 idx := v_0.Args[0] 6955 ptr := v.Args[1] 6956 mem := v.Args[2] 6957 v.reset(OpAMD64MOVLloadidx4) 6958 v.AuxInt = c 6959 v.Aux = sym 6960 v.AddArg(ptr) 6961 v.AddArg(idx) 6962 v.AddArg(mem) 6963 return true 6964 } 6965 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 6966 // cond: is32Bit(c+d) 6967 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6968 for { 6969 c := v.AuxInt 6970 sym := v.Aux 6971 _ = v.Args[2] 6972 v_0 := v.Args[0] 6973 if v_0.Op != OpAMD64ADDQconst { 6974 break 6975 } 6976 d := v_0.AuxInt 6977 ptr := v_0.Args[0] 6978 idx := v.Args[1] 6979 mem := v.Args[2] 6980 if !(is32Bit(c + d)) { 6981 break 6982 } 6983 v.reset(OpAMD64MOVLloadidx1) 6984 v.AuxInt = c + d 6985 v.Aux = sym 6986 v.AddArg(ptr) 6987 v.AddArg(idx) 6988 v.AddArg(mem) 6989 return true 6990 } 6991 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 6992 // cond: is32Bit(c+d) 6993 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6994 for { 6995 c := v.AuxInt 6996 sym := v.Aux 6997 _ = v.Args[2] 6998 idx := v.Args[0] 6999 v_1 := v.Args[1] 7000 if v_1.Op != OpAMD64ADDQconst { 7001 break 7002 } 7003 d := v_1.AuxInt 7004 ptr := v_1.Args[0] 7005 mem := v.Args[2] 7006 if !(is32Bit(c + d)) { 7007 break 7008 } 7009 v.reset(OpAMD64MOVLloadidx1) 7010 v.AuxInt = c + d 7011 v.Aux = sym 7012 v.AddArg(ptr) 7013 v.AddArg(idx) 7014 v.AddArg(mem) 7015 return true 7016 } 7017 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7018 // cond: is32Bit(c+d) 7019 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7020 for { 7021 c := v.AuxInt 7022 sym := v.Aux 7023 _ = v.Args[2] 7024 ptr := v.Args[0] 7025 v_1 := v.Args[1] 7026 if v_1.Op != OpAMD64ADDQconst { 7027 break 7028 } 7029 d := v_1.AuxInt 7030 idx := v_1.Args[0] 7031 mem := v.Args[2] 7032 if !(is32Bit(c + d)) { 7033 break 7034 } 7035 v.reset(OpAMD64MOVLloadidx1) 7036 v.AuxInt = c + d 7037 v.Aux = sym 7038 v.AddArg(ptr) 7039 v.AddArg(idx) 7040 v.AddArg(mem) 7041 return true 7042 } 7043 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 7044 // cond: is32Bit(c+d) 7045 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7046 for { 7047 c := v.AuxInt 7048 sym := v.Aux 7049 _ = v.Args[2] 7050 v_0 := v.Args[0] 7051 if v_0.Op != OpAMD64ADDQconst { 7052 break 7053 } 7054 d := v_0.AuxInt 7055 idx := v_0.Args[0] 7056 ptr := v.Args[1] 7057 mem := v.Args[2] 7058 if !(is32Bit(c + d)) { 7059 break 7060 } 7061 v.reset(OpAMD64MOVLloadidx1) 7062 v.AuxInt = c + d 7063 v.Aux = sym 7064 v.AddArg(ptr) 7065 v.AddArg(idx) 7066 v.AddArg(mem) 7067 return true 7068 } 7069 return false 7070 } 7071 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 7072 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 7073 // cond: is32Bit(c+d) 7074 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 7075 for { 7076 c := v.AuxInt 7077 sym := v.Aux 7078 _ = v.Args[2] 7079 v_0 := v.Args[0] 7080 if v_0.Op != OpAMD64ADDQconst { 7081 break 7082 } 7083 d := v_0.AuxInt 7084 ptr := v_0.Args[0] 7085 idx := v.Args[1] 7086 mem := v.Args[2] 7087 if !(is32Bit(c + d)) { 7088 break 7089 } 7090 v.reset(OpAMD64MOVLloadidx4) 7091 v.AuxInt = c + d 7092 v.Aux = sym 7093 v.AddArg(ptr) 7094 v.AddArg(idx) 7095 v.AddArg(mem) 7096 return true 7097 } 7098 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 7099 // cond: is32Bit(c+4*d) 7100 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 7101 for { 7102 c := v.AuxInt 7103 sym := v.Aux 7104 _ = v.Args[2] 7105 ptr := v.Args[0] 7106 v_1 := v.Args[1] 7107 if v_1.Op != OpAMD64ADDQconst { 7108 break 7109 } 7110 d := v_1.AuxInt 7111 idx := v_1.Args[0] 7112 mem := v.Args[2] 7113 if !(is32Bit(c + 4*d)) { 7114 break 7115 } 7116 v.reset(OpAMD64MOVLloadidx4) 7117 v.AuxInt = c + 4*d 7118 v.Aux = sym 7119 v.AddArg(ptr) 7120 v.AddArg(idx) 7121 v.AddArg(mem) 7122 return true 7123 } 7124 return false 7125 } 7126 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 7127 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 7128 // cond: 7129 // result: (MOVLstore [off] {sym} ptr x mem) 7130 for { 7131 off := v.AuxInt 7132 sym := v.Aux 7133 _ = v.Args[2] 7134 ptr := v.Args[0] 7135 v_1 := v.Args[1] 7136 if v_1.Op != OpAMD64MOVLQSX { 7137 break 7138 } 7139 x := v_1.Args[0] 7140 mem := v.Args[2] 7141 v.reset(OpAMD64MOVLstore) 7142 v.AuxInt = off 7143 v.Aux = sym 7144 v.AddArg(ptr) 7145 v.AddArg(x) 7146 v.AddArg(mem) 7147 return true 7148 } 7149 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 7150 // cond: 7151 // result: (MOVLstore [off] {sym} ptr x mem) 7152 for { 7153 off := v.AuxInt 7154 sym := v.Aux 7155 _ = v.Args[2] 7156 ptr := v.Args[0] 7157 v_1 := v.Args[1] 7158 if v_1.Op != OpAMD64MOVLQZX { 7159 break 7160 } 7161 x := v_1.Args[0] 7162 mem := v.Args[2] 7163 v.reset(OpAMD64MOVLstore) 7164 v.AuxInt = off 7165 v.Aux = sym 7166 v.AddArg(ptr) 7167 v.AddArg(x) 7168 v.AddArg(mem) 7169 return true 7170 } 7171 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7172 // cond: is32Bit(off1+off2) 7173 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 7174 for { 7175 off1 := v.AuxInt 7176 sym := v.Aux 7177 _ = v.Args[2] 7178 v_0 := v.Args[0] 7179 if v_0.Op != OpAMD64ADDQconst { 7180 break 7181 } 7182 off2 := v_0.AuxInt 7183 ptr := v_0.Args[0] 7184 val := v.Args[1] 7185 mem := v.Args[2] 7186 if !(is32Bit(off1 + off2)) { 7187 break 7188 } 7189 v.reset(OpAMD64MOVLstore) 7190 v.AuxInt = off1 + off2 7191 v.Aux = sym 7192 v.AddArg(ptr) 7193 v.AddArg(val) 7194 v.AddArg(mem) 7195 return true 7196 } 7197 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 7198 // cond: validOff(off) 7199 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 7200 for { 7201 off := v.AuxInt 7202 sym := v.Aux 7203 _ = v.Args[2] 7204 ptr := v.Args[0] 7205 v_1 := v.Args[1] 7206 if v_1.Op != OpAMD64MOVLconst { 7207 break 7208 } 7209 c := v_1.AuxInt 7210 mem := v.Args[2] 7211 if !(validOff(off)) { 7212 break 7213 } 7214 v.reset(OpAMD64MOVLstoreconst) 7215 v.AuxInt = makeValAndOff(int64(int32(c)), off) 7216 v.Aux = sym 7217 v.AddArg(ptr) 7218 v.AddArg(mem) 7219 return true 7220 } 7221 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7222 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7223 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7224 for { 7225 off1 := v.AuxInt 7226 sym1 := v.Aux 7227 _ = v.Args[2] 7228 v_0 := v.Args[0] 7229 if v_0.Op != OpAMD64LEAQ { 7230 break 7231 } 7232 off2 := v_0.AuxInt 7233 sym2 := v_0.Aux 7234 base := v_0.Args[0] 7235 val := v.Args[1] 7236 mem := v.Args[2] 7237 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7238 break 7239 } 7240 v.reset(OpAMD64MOVLstore) 7241 v.AuxInt = off1 + off2 7242 v.Aux = mergeSym(sym1, sym2) 7243 v.AddArg(base) 7244 v.AddArg(val) 7245 v.AddArg(mem) 7246 return true 7247 } 7248 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7249 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7250 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7251 for { 7252 off1 := v.AuxInt 7253 sym1 := v.Aux 7254 _ = v.Args[2] 7255 v_0 := v.Args[0] 7256 if v_0.Op != OpAMD64LEAQ1 { 7257 break 7258 } 7259 off2 := v_0.AuxInt 7260 sym2 := v_0.Aux 7261 _ = v_0.Args[1] 7262 ptr := v_0.Args[0] 7263 idx := v_0.Args[1] 7264 val := v.Args[1] 7265 mem := v.Args[2] 7266 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7267 break 7268 } 7269 v.reset(OpAMD64MOVLstoreidx1) 7270 v.AuxInt = off1 + off2 7271 v.Aux = mergeSym(sym1, sym2) 7272 v.AddArg(ptr) 7273 v.AddArg(idx) 7274 v.AddArg(val) 7275 v.AddArg(mem) 7276 return true 7277 } 7278 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 7279 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7280 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7281 for { 7282 off1 := v.AuxInt 7283 sym1 := v.Aux 7284 _ = v.Args[2] 7285 v_0 := v.Args[0] 7286 if v_0.Op != OpAMD64LEAQ4 { 7287 break 7288 } 7289 off2 := v_0.AuxInt 7290 sym2 := v_0.Aux 7291 _ = v_0.Args[1] 7292 ptr := v_0.Args[0] 7293 idx := v_0.Args[1] 7294 val := v.Args[1] 7295 mem := v.Args[2] 7296 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7297 break 7298 } 7299 v.reset(OpAMD64MOVLstoreidx4) 7300 v.AuxInt = off1 + off2 7301 v.Aux = mergeSym(sym1, sym2) 7302 v.AddArg(ptr) 7303 v.AddArg(idx) 7304 v.AddArg(val) 7305 v.AddArg(mem) 7306 return true 7307 } 7308 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 7309 // cond: ptr.Op != OpSB 7310 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 7311 for { 7312 off := v.AuxInt 7313 sym := v.Aux 7314 _ = v.Args[2] 7315 v_0 := v.Args[0] 7316 if v_0.Op != OpAMD64ADDQ { 7317 break 7318 } 7319 _ = v_0.Args[1] 7320 ptr := v_0.Args[0] 7321 idx := v_0.Args[1] 7322 val := v.Args[1] 7323 mem := v.Args[2] 7324 if !(ptr.Op != OpSB) { 7325 break 7326 } 7327 v.reset(OpAMD64MOVLstoreidx1) 7328 v.AuxInt = off 7329 v.Aux = sym 7330 v.AddArg(ptr) 7331 v.AddArg(idx) 7332 v.AddArg(val) 7333 v.AddArg(mem) 7334 return true 7335 } 7336 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 7337 // cond: x.Uses == 1 && clobber(x) 7338 // result: (MOVQstore [i-4] {s} p w mem) 7339 for { 7340 i := v.AuxInt 7341 s := v.Aux 7342 _ = v.Args[2] 7343 p := v.Args[0] 7344 v_1 := v.Args[1] 7345 if v_1.Op != OpAMD64SHRQconst { 7346 break 7347 } 7348 if v_1.AuxInt != 32 { 7349 break 7350 } 7351 w := v_1.Args[0] 7352 x := v.Args[2] 7353 if x.Op != OpAMD64MOVLstore { 7354 break 7355 } 7356 if x.AuxInt != i-4 { 7357 break 7358 } 7359 if x.Aux != s { 7360 break 7361 } 7362 _ = x.Args[2] 7363 if p != x.Args[0] { 7364 break 7365 } 7366 if w != x.Args[1] { 7367 break 7368 } 7369 mem := x.Args[2] 7370 if !(x.Uses == 1 && clobber(x)) { 7371 break 7372 } 7373 v.reset(OpAMD64MOVQstore) 7374 v.AuxInt = i - 4 7375 v.Aux = s 7376 v.AddArg(p) 7377 v.AddArg(w) 7378 v.AddArg(mem) 7379 return true 7380 } 7381 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 7382 // cond: x.Uses == 1 && clobber(x) 7383 // result: (MOVQstore [i-4] {s} p w0 mem) 7384 for { 7385 i := v.AuxInt 7386 s := v.Aux 7387 _ = v.Args[2] 7388 p := v.Args[0] 7389 v_1 := v.Args[1] 7390 if v_1.Op != OpAMD64SHRQconst { 7391 break 7392 } 7393 j := v_1.AuxInt 7394 w := v_1.Args[0] 7395 x := v.Args[2] 7396 if x.Op != OpAMD64MOVLstore { 7397 break 7398 } 7399 if x.AuxInt != i-4 { 7400 break 7401 } 7402 if x.Aux != s { 7403 break 7404 } 7405 _ = x.Args[2] 7406 if p != x.Args[0] { 7407 break 7408 } 7409 w0 := x.Args[1] 7410 if w0.Op != OpAMD64SHRQconst { 7411 break 7412 } 7413 if w0.AuxInt != j-32 { 7414 break 7415 } 7416 if w != w0.Args[0] { 7417 break 7418 } 7419 mem := x.Args[2] 7420 if !(x.Uses == 1 && clobber(x)) { 7421 break 7422 } 7423 v.reset(OpAMD64MOVQstore) 7424 v.AuxInt = i - 4 7425 v.Aux = s 7426 v.AddArg(p) 7427 v.AddArg(w0) 7428 v.AddArg(mem) 7429 return true 7430 } 7431 return false 7432 } 7433 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 7434 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 7435 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 7436 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7437 for { 7438 off1 := v.AuxInt 7439 sym1 := v.Aux 7440 _ = v.Args[2] 7441 v_0 := v.Args[0] 7442 if v_0.Op != OpAMD64LEAL { 7443 break 7444 } 7445 off2 := v_0.AuxInt 7446 sym2 := v_0.Aux 7447 base := v_0.Args[0] 7448 val := v.Args[1] 7449 mem := v.Args[2] 7450 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 7451 break 7452 } 7453 v.reset(OpAMD64MOVLstore) 7454 v.AuxInt = off1 + off2 7455 v.Aux = mergeSym(sym1, sym2) 7456 v.AddArg(base) 7457 v.AddArg(val) 7458 v.AddArg(mem) 7459 return true 7460 } 7461 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 7462 // cond: is32Bit(off1+off2) 7463 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 7464 for { 7465 off1 := v.AuxInt 7466 sym := v.Aux 7467 _ = v.Args[2] 7468 v_0 := v.Args[0] 7469 if v_0.Op != OpAMD64ADDLconst { 7470 break 7471 } 7472 off2 := v_0.AuxInt 7473 ptr := v_0.Args[0] 7474 val := v.Args[1] 7475 mem := v.Args[2] 7476 if !(is32Bit(off1 + off2)) { 7477 break 7478 } 7479 v.reset(OpAMD64MOVLstore) 7480 v.AuxInt = off1 + off2 7481 v.Aux = sym 7482 v.AddArg(ptr) 7483 v.AddArg(val) 7484 v.AddArg(mem) 7485 return true 7486 } 7487 return false 7488 } 7489 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 7490 b := v.Block 7491 _ = b 7492 typ := &b.Func.Config.Types 7493 _ = typ 7494 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 7495 // cond: ValAndOff(sc).canAdd(off) 7496 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7497 for { 7498 sc := v.AuxInt 7499 s := v.Aux 7500 _ = v.Args[1] 7501 v_0 := v.Args[0] 7502 if v_0.Op != OpAMD64ADDQconst { 7503 break 7504 } 7505 off := v_0.AuxInt 7506 ptr := v_0.Args[0] 7507 mem := v.Args[1] 7508 if !(ValAndOff(sc).canAdd(off)) { 7509 break 7510 } 7511 v.reset(OpAMD64MOVLstoreconst) 7512 v.AuxInt = ValAndOff(sc).add(off) 7513 v.Aux = s 7514 v.AddArg(ptr) 7515 v.AddArg(mem) 7516 return true 7517 } 7518 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 7519 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7520 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7521 for { 7522 sc := v.AuxInt 7523 sym1 := v.Aux 7524 _ = v.Args[1] 7525 v_0 := v.Args[0] 7526 if v_0.Op != OpAMD64LEAQ { 7527 break 7528 } 7529 off := v_0.AuxInt 7530 sym2 := v_0.Aux 7531 ptr := v_0.Args[0] 7532 mem := v.Args[1] 7533 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7534 break 7535 } 7536 v.reset(OpAMD64MOVLstoreconst) 7537 v.AuxInt = ValAndOff(sc).add(off) 7538 v.Aux = mergeSym(sym1, sym2) 7539 v.AddArg(ptr) 7540 v.AddArg(mem) 7541 return true 7542 } 7543 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 7544 // cond: canMergeSym(sym1, sym2) 7545 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7546 for { 7547 x := v.AuxInt 7548 sym1 := v.Aux 7549 _ = v.Args[1] 7550 v_0 := v.Args[0] 7551 if v_0.Op != OpAMD64LEAQ1 { 7552 break 7553 } 7554 off := v_0.AuxInt 7555 sym2 := v_0.Aux 7556 _ = v_0.Args[1] 7557 ptr := v_0.Args[0] 7558 idx := v_0.Args[1] 7559 mem := v.Args[1] 7560 if !(canMergeSym(sym1, sym2)) { 7561 break 7562 } 7563 v.reset(OpAMD64MOVLstoreconstidx1) 7564 v.AuxInt = ValAndOff(x).add(off) 7565 v.Aux = mergeSym(sym1, sym2) 7566 v.AddArg(ptr) 7567 v.AddArg(idx) 7568 v.AddArg(mem) 7569 return true 7570 } 7571 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 7572 // cond: canMergeSym(sym1, sym2) 7573 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7574 for { 7575 x := v.AuxInt 7576 sym1 := v.Aux 7577 _ = v.Args[1] 7578 v_0 := v.Args[0] 7579 if v_0.Op != OpAMD64LEAQ4 { 7580 break 7581 } 7582 off := v_0.AuxInt 7583 sym2 := v_0.Aux 7584 _ = v_0.Args[1] 7585 ptr := v_0.Args[0] 7586 idx := v_0.Args[1] 7587 mem := v.Args[1] 7588 if !(canMergeSym(sym1, sym2)) { 7589 break 7590 } 7591 v.reset(OpAMD64MOVLstoreconstidx4) 7592 v.AuxInt = ValAndOff(x).add(off) 7593 v.Aux = mergeSym(sym1, sym2) 7594 v.AddArg(ptr) 7595 v.AddArg(idx) 7596 v.AddArg(mem) 7597 return true 7598 } 7599 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 7600 // cond: 7601 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 7602 for { 7603 x := v.AuxInt 7604 sym := v.Aux 7605 _ = v.Args[1] 7606 v_0 := v.Args[0] 7607 if v_0.Op != OpAMD64ADDQ { 7608 break 7609 } 7610 _ = v_0.Args[1] 7611 ptr := v_0.Args[0] 7612 idx := v_0.Args[1] 7613 mem := v.Args[1] 7614 v.reset(OpAMD64MOVLstoreconstidx1) 7615 v.AuxInt = x 7616 v.Aux = sym 7617 v.AddArg(ptr) 7618 v.AddArg(idx) 7619 v.AddArg(mem) 7620 return true 7621 } 7622 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 7623 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7624 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7625 for { 7626 c := v.AuxInt 7627 s := v.Aux 7628 _ = v.Args[1] 7629 p := v.Args[0] 7630 x := v.Args[1] 7631 if x.Op != OpAMD64MOVLstoreconst { 7632 break 7633 } 7634 a := x.AuxInt 7635 if x.Aux != s { 7636 break 7637 } 7638 _ = x.Args[1] 7639 if p != x.Args[0] { 7640 break 7641 } 7642 mem := x.Args[1] 7643 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7644 break 7645 } 7646 v.reset(OpAMD64MOVQstore) 7647 v.AuxInt = ValAndOff(a).Off() 7648 v.Aux = s 7649 v.AddArg(p) 7650 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 7651 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7652 v.AddArg(v0) 7653 v.AddArg(mem) 7654 return true 7655 } 7656 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 7657 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7658 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7659 for { 7660 sc := v.AuxInt 7661 sym1 := v.Aux 7662 _ = v.Args[1] 7663 v_0 := v.Args[0] 7664 if v_0.Op != OpAMD64LEAL { 7665 break 7666 } 7667 off := v_0.AuxInt 7668 sym2 := v_0.Aux 7669 ptr := v_0.Args[0] 7670 mem := v.Args[1] 7671 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7672 break 7673 } 7674 v.reset(OpAMD64MOVLstoreconst) 7675 v.AuxInt = ValAndOff(sc).add(off) 7676 v.Aux = mergeSym(sym1, sym2) 7677 v.AddArg(ptr) 7678 v.AddArg(mem) 7679 return true 7680 } 7681 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 7682 // cond: ValAndOff(sc).canAdd(off) 7683 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7684 for { 7685 sc := v.AuxInt 7686 s := v.Aux 7687 _ = v.Args[1] 7688 v_0 := v.Args[0] 7689 if v_0.Op != OpAMD64ADDLconst { 7690 break 7691 } 7692 off := v_0.AuxInt 7693 ptr := v_0.Args[0] 7694 mem := v.Args[1] 7695 if !(ValAndOff(sc).canAdd(off)) { 7696 break 7697 } 7698 v.reset(OpAMD64MOVLstoreconst) 7699 v.AuxInt = ValAndOff(sc).add(off) 7700 v.Aux = s 7701 v.AddArg(ptr) 7702 v.AddArg(mem) 7703 return true 7704 } 7705 return false 7706 } 7707 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 7708 b := v.Block 7709 _ = b 7710 typ := &b.Func.Config.Types 7711 _ = typ 7712 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7713 // cond: 7714 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 7715 for { 7716 c := v.AuxInt 7717 sym := v.Aux 7718 _ = v.Args[2] 7719 ptr := v.Args[0] 7720 v_1 := v.Args[1] 7721 if v_1.Op != OpAMD64SHLQconst { 7722 break 7723 } 7724 if v_1.AuxInt != 2 { 7725 break 7726 } 7727 idx := v_1.Args[0] 7728 mem := v.Args[2] 7729 v.reset(OpAMD64MOVLstoreconstidx4) 7730 v.AuxInt = c 7731 v.Aux = sym 7732 v.AddArg(ptr) 7733 v.AddArg(idx) 7734 v.AddArg(mem) 7735 return true 7736 } 7737 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 7738 // cond: ValAndOff(x).canAdd(c) 7739 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7740 for { 7741 x := v.AuxInt 7742 sym := v.Aux 7743 _ = v.Args[2] 7744 v_0 := v.Args[0] 7745 if v_0.Op != OpAMD64ADDQconst { 7746 break 7747 } 7748 c := v_0.AuxInt 7749 ptr := v_0.Args[0] 7750 idx := v.Args[1] 7751 mem := v.Args[2] 7752 if !(ValAndOff(x).canAdd(c)) { 7753 break 7754 } 7755 v.reset(OpAMD64MOVLstoreconstidx1) 7756 v.AuxInt = ValAndOff(x).add(c) 7757 v.Aux = sym 7758 v.AddArg(ptr) 7759 v.AddArg(idx) 7760 v.AddArg(mem) 7761 return true 7762 } 7763 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 7764 // cond: ValAndOff(x).canAdd(c) 7765 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7766 for { 7767 x := v.AuxInt 7768 sym := v.Aux 7769 _ = v.Args[2] 7770 ptr := v.Args[0] 7771 v_1 := v.Args[1] 7772 if v_1.Op != OpAMD64ADDQconst { 7773 break 7774 } 7775 c := v_1.AuxInt 7776 idx := v_1.Args[0] 7777 mem := v.Args[2] 7778 if !(ValAndOff(x).canAdd(c)) { 7779 break 7780 } 7781 v.reset(OpAMD64MOVLstoreconstidx1) 7782 v.AuxInt = ValAndOff(x).add(c) 7783 v.Aux = sym 7784 v.AddArg(ptr) 7785 v.AddArg(idx) 7786 v.AddArg(mem) 7787 return true 7788 } 7789 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 7790 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7791 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7792 for { 7793 c := v.AuxInt 7794 s := v.Aux 7795 _ = v.Args[2] 7796 p := v.Args[0] 7797 i := v.Args[1] 7798 x := v.Args[2] 7799 if x.Op != OpAMD64MOVLstoreconstidx1 { 7800 break 7801 } 7802 a := x.AuxInt 7803 if x.Aux != s { 7804 break 7805 } 7806 _ = x.Args[2] 7807 if p != x.Args[0] { 7808 break 7809 } 7810 if i != x.Args[1] { 7811 break 7812 } 7813 mem := x.Args[2] 7814 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7815 break 7816 } 7817 v.reset(OpAMD64MOVQstoreidx1) 7818 v.AuxInt = ValAndOff(a).Off() 7819 v.Aux = s 7820 v.AddArg(p) 7821 v.AddArg(i) 7822 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 7823 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7824 v.AddArg(v0) 7825 v.AddArg(mem) 7826 return true 7827 } 7828 return false 7829 } 7830 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 7831 b := v.Block 7832 _ = b 7833 typ := &b.Func.Config.Types 7834 _ = typ 7835 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 7836 // cond: ValAndOff(x).canAdd(c) 7837 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7838 for { 7839 x := v.AuxInt 7840 sym := v.Aux 7841 _ = v.Args[2] 7842 v_0 := v.Args[0] 7843 if v_0.Op != OpAMD64ADDQconst { 7844 break 7845 } 7846 c := v_0.AuxInt 7847 ptr := v_0.Args[0] 7848 idx := v.Args[1] 7849 mem := v.Args[2] 7850 if !(ValAndOff(x).canAdd(c)) { 7851 break 7852 } 7853 v.reset(OpAMD64MOVLstoreconstidx4) 7854 v.AuxInt = ValAndOff(x).add(c) 7855 v.Aux = sym 7856 v.AddArg(ptr) 7857 v.AddArg(idx) 7858 v.AddArg(mem) 7859 return true 7860 } 7861 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 7862 // cond: ValAndOff(x).canAdd(4*c) 7863 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 7864 for { 7865 x := v.AuxInt 7866 sym := v.Aux 7867 _ = v.Args[2] 7868 ptr := v.Args[0] 7869 v_1 := v.Args[1] 7870 if v_1.Op != OpAMD64ADDQconst { 7871 break 7872 } 7873 c := v_1.AuxInt 7874 idx := v_1.Args[0] 7875 mem := v.Args[2] 7876 if !(ValAndOff(x).canAdd(4 * c)) { 7877 break 7878 } 7879 v.reset(OpAMD64MOVLstoreconstidx4) 7880 v.AuxInt = ValAndOff(x).add(4 * c) 7881 v.Aux = sym 7882 v.AddArg(ptr) 7883 v.AddArg(idx) 7884 v.AddArg(mem) 7885 return true 7886 } 7887 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 7888 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7889 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7890 for { 7891 c := v.AuxInt 7892 s := v.Aux 7893 _ = v.Args[2] 7894 p := v.Args[0] 7895 i := v.Args[1] 7896 x := v.Args[2] 7897 if x.Op != OpAMD64MOVLstoreconstidx4 { 7898 break 7899 } 7900 a := x.AuxInt 7901 if x.Aux != s { 7902 break 7903 } 7904 _ = x.Args[2] 7905 if p != x.Args[0] { 7906 break 7907 } 7908 if i != x.Args[1] { 7909 break 7910 } 7911 mem := x.Args[2] 7912 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7913 break 7914 } 7915 v.reset(OpAMD64MOVQstoreidx1) 7916 v.AuxInt = ValAndOff(a).Off() 7917 v.Aux = s 7918 v.AddArg(p) 7919 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 7920 v0.AuxInt = 2 7921 v0.AddArg(i) 7922 v.AddArg(v0) 7923 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 7924 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7925 v.AddArg(v1) 7926 v.AddArg(mem) 7927 return true 7928 } 7929 return false 7930 } 7931 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 7932 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 7933 // cond: 7934 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 7935 for { 7936 c := v.AuxInt 7937 sym := v.Aux 7938 _ = v.Args[3] 7939 ptr := v.Args[0] 7940 v_1 := v.Args[1] 7941 if v_1.Op != OpAMD64SHLQconst { 7942 break 7943 } 7944 if v_1.AuxInt != 2 { 7945 break 7946 } 7947 idx := v_1.Args[0] 7948 val := v.Args[2] 7949 mem := v.Args[3] 7950 v.reset(OpAMD64MOVLstoreidx4) 7951 v.AuxInt = c 7952 v.Aux = sym 7953 v.AddArg(ptr) 7954 v.AddArg(idx) 7955 v.AddArg(val) 7956 v.AddArg(mem) 7957 return true 7958 } 7959 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7960 // cond: is32Bit(c+d) 7961 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7962 for { 7963 c := v.AuxInt 7964 sym := v.Aux 7965 _ = v.Args[3] 7966 v_0 := v.Args[0] 7967 if v_0.Op != OpAMD64ADDQconst { 7968 break 7969 } 7970 d := v_0.AuxInt 7971 ptr := v_0.Args[0] 7972 idx := v.Args[1] 7973 val := v.Args[2] 7974 mem := v.Args[3] 7975 if !(is32Bit(c + d)) { 7976 break 7977 } 7978 v.reset(OpAMD64MOVLstoreidx1) 7979 v.AuxInt = c + d 7980 v.Aux = sym 7981 v.AddArg(ptr) 7982 v.AddArg(idx) 7983 v.AddArg(val) 7984 v.AddArg(mem) 7985 return true 7986 } 7987 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7988 // cond: is32Bit(c+d) 7989 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7990 for { 7991 c := v.AuxInt 7992 sym := v.Aux 7993 _ = v.Args[3] 7994 ptr := v.Args[0] 7995 v_1 := v.Args[1] 7996 if v_1.Op != OpAMD64ADDQconst { 7997 break 7998 } 7999 d := v_1.AuxInt 8000 idx := v_1.Args[0] 8001 val := v.Args[2] 8002 mem := v.Args[3] 8003 if !(is32Bit(c + d)) { 8004 break 8005 } 8006 v.reset(OpAMD64MOVLstoreidx1) 8007 v.AuxInt = c + d 8008 v.Aux = sym 8009 v.AddArg(ptr) 8010 v.AddArg(idx) 8011 v.AddArg(val) 8012 v.AddArg(mem) 8013 return true 8014 } 8015 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 8016 // cond: x.Uses == 1 && clobber(x) 8017 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 8018 for { 8019 i := v.AuxInt 8020 s := v.Aux 8021 _ = v.Args[3] 8022 p := v.Args[0] 8023 idx := v.Args[1] 8024 v_2 := v.Args[2] 8025 if v_2.Op != OpAMD64SHRQconst { 8026 break 8027 } 8028 if v_2.AuxInt != 32 { 8029 break 8030 } 8031 w := v_2.Args[0] 8032 x := v.Args[3] 8033 if x.Op != OpAMD64MOVLstoreidx1 { 8034 break 8035 } 8036 if x.AuxInt != i-4 { 8037 break 8038 } 8039 if x.Aux != s { 8040 break 8041 } 8042 _ = x.Args[3] 8043 if p != x.Args[0] { 8044 break 8045 } 8046 if idx != x.Args[1] { 8047 break 8048 } 8049 if w != x.Args[2] { 8050 break 8051 } 8052 mem := x.Args[3] 8053 if !(x.Uses == 1 && clobber(x)) { 8054 break 8055 } 8056 v.reset(OpAMD64MOVQstoreidx1) 8057 v.AuxInt = i - 4 8058 v.Aux = s 8059 v.AddArg(p) 8060 v.AddArg(idx) 8061 v.AddArg(w) 8062 v.AddArg(mem) 8063 return true 8064 } 8065 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 8066 // cond: x.Uses == 1 && clobber(x) 8067 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 8068 for { 8069 i := v.AuxInt 8070 s := v.Aux 8071 _ = v.Args[3] 8072 p := v.Args[0] 8073 idx := v.Args[1] 8074 v_2 := v.Args[2] 8075 if v_2.Op != OpAMD64SHRQconst { 8076 break 8077 } 8078 j := v_2.AuxInt 8079 w := v_2.Args[0] 8080 x := v.Args[3] 8081 if x.Op != OpAMD64MOVLstoreidx1 { 8082 break 8083 } 8084 if x.AuxInt != i-4 { 8085 break 8086 } 8087 if x.Aux != s { 8088 break 8089 } 8090 _ = x.Args[3] 8091 if p != x.Args[0] { 8092 break 8093 } 8094 if idx != x.Args[1] { 8095 break 8096 } 8097 w0 := x.Args[2] 8098 if w0.Op != OpAMD64SHRQconst { 8099 break 8100 } 8101 if w0.AuxInt != j-32 { 8102 break 8103 } 8104 if w != w0.Args[0] { 8105 break 8106 } 8107 mem := x.Args[3] 8108 if !(x.Uses == 1 && clobber(x)) { 8109 break 8110 } 8111 v.reset(OpAMD64MOVQstoreidx1) 8112 v.AuxInt = i - 4 8113 v.Aux = s 8114 v.AddArg(p) 8115 v.AddArg(idx) 8116 v.AddArg(w0) 8117 v.AddArg(mem) 8118 return true 8119 } 8120 return false 8121 } 8122 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 8123 b := v.Block 8124 _ = b 8125 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8126 // cond: is32Bit(c+d) 8127 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 8128 for { 8129 c := v.AuxInt 8130 sym := v.Aux 8131 _ = v.Args[3] 8132 v_0 := v.Args[0] 8133 if v_0.Op != OpAMD64ADDQconst { 8134 break 8135 } 8136 d := v_0.AuxInt 8137 ptr := v_0.Args[0] 8138 idx := v.Args[1] 8139 val := v.Args[2] 8140 mem := v.Args[3] 8141 if !(is32Bit(c + d)) { 8142 break 8143 } 8144 v.reset(OpAMD64MOVLstoreidx4) 8145 v.AuxInt = c + d 8146 v.Aux = sym 8147 v.AddArg(ptr) 8148 v.AddArg(idx) 8149 v.AddArg(val) 8150 v.AddArg(mem) 8151 return true 8152 } 8153 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8154 // cond: is32Bit(c+4*d) 8155 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 8156 for { 8157 c := v.AuxInt 8158 sym := v.Aux 8159 _ = v.Args[3] 8160 ptr := v.Args[0] 8161 v_1 := v.Args[1] 8162 if v_1.Op != OpAMD64ADDQconst { 8163 break 8164 } 8165 d := v_1.AuxInt 8166 idx := v_1.Args[0] 8167 val := v.Args[2] 8168 mem := v.Args[3] 8169 if !(is32Bit(c + 4*d)) { 8170 break 8171 } 8172 v.reset(OpAMD64MOVLstoreidx4) 8173 v.AuxInt = c + 4*d 8174 v.Aux = sym 8175 v.AddArg(ptr) 8176 v.AddArg(idx) 8177 v.AddArg(val) 8178 v.AddArg(mem) 8179 return true 8180 } 8181 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 8182 // cond: x.Uses == 1 && clobber(x) 8183 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 8184 for { 8185 i := v.AuxInt 8186 s := v.Aux 8187 _ = v.Args[3] 8188 p := v.Args[0] 8189 idx := v.Args[1] 8190 v_2 := v.Args[2] 8191 if v_2.Op != OpAMD64SHRQconst { 8192 break 8193 } 8194 if v_2.AuxInt != 32 { 8195 break 8196 } 8197 w := v_2.Args[0] 8198 x := v.Args[3] 8199 if x.Op != OpAMD64MOVLstoreidx4 { 8200 break 8201 } 8202 if x.AuxInt != i-4 { 8203 break 8204 } 8205 if x.Aux != s { 8206 break 8207 } 8208 _ = x.Args[3] 8209 if p != x.Args[0] { 8210 break 8211 } 8212 if idx != x.Args[1] { 8213 break 8214 } 8215 if w != x.Args[2] { 8216 break 8217 } 8218 mem := x.Args[3] 8219 if !(x.Uses == 1 && clobber(x)) { 8220 break 8221 } 8222 v.reset(OpAMD64MOVQstoreidx1) 8223 v.AuxInt = i - 4 8224 v.Aux = s 8225 v.AddArg(p) 8226 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 8227 v0.AuxInt = 2 8228 v0.AddArg(idx) 8229 v.AddArg(v0) 8230 v.AddArg(w) 8231 v.AddArg(mem) 8232 return true 8233 } 8234 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 8235 // cond: x.Uses == 1 && clobber(x) 8236 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 8237 for { 8238 i := v.AuxInt 8239 s := v.Aux 8240 _ = v.Args[3] 8241 p := v.Args[0] 8242 idx := v.Args[1] 8243 v_2 := v.Args[2] 8244 if v_2.Op != OpAMD64SHRQconst { 8245 break 8246 } 8247 j := v_2.AuxInt 8248 w := v_2.Args[0] 8249 x := v.Args[3] 8250 if x.Op != OpAMD64MOVLstoreidx4 { 8251 break 8252 } 8253 if x.AuxInt != i-4 { 8254 break 8255 } 8256 if x.Aux != s { 8257 break 8258 } 8259 _ = x.Args[3] 8260 if p != x.Args[0] { 8261 break 8262 } 8263 if idx != x.Args[1] { 8264 break 8265 } 8266 w0 := x.Args[2] 8267 if w0.Op != OpAMD64SHRQconst { 8268 break 8269 } 8270 if w0.AuxInt != j-32 { 8271 break 8272 } 8273 if w != w0.Args[0] { 8274 break 8275 } 8276 mem := x.Args[3] 8277 if !(x.Uses == 1 && clobber(x)) { 8278 break 8279 } 8280 v.reset(OpAMD64MOVQstoreidx1) 8281 v.AuxInt = i - 4 8282 v.Aux = s 8283 v.AddArg(p) 8284 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 8285 v0.AuxInt = 2 8286 v0.AddArg(idx) 8287 v.AddArg(v0) 8288 v.AddArg(w0) 8289 v.AddArg(mem) 8290 return true 8291 } 8292 return false 8293 } 8294 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 8295 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 8296 // cond: is32Bit(off1+off2) 8297 // result: (MOVOload [off1+off2] {sym} ptr mem) 8298 for { 8299 off1 := v.AuxInt 8300 sym := v.Aux 8301 _ = v.Args[1] 8302 v_0 := v.Args[0] 8303 if v_0.Op != OpAMD64ADDQconst { 8304 break 8305 } 8306 off2 := v_0.AuxInt 8307 ptr := v_0.Args[0] 8308 mem := v.Args[1] 8309 if !(is32Bit(off1 + off2)) { 8310 break 8311 } 8312 v.reset(OpAMD64MOVOload) 8313 v.AuxInt = off1 + off2 8314 v.Aux = sym 8315 v.AddArg(ptr) 8316 v.AddArg(mem) 8317 return true 8318 } 8319 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8320 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8321 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8322 for { 8323 off1 := v.AuxInt 8324 sym1 := v.Aux 8325 _ = v.Args[1] 8326 v_0 := v.Args[0] 8327 if v_0.Op != OpAMD64LEAQ { 8328 break 8329 } 8330 off2 := v_0.AuxInt 8331 sym2 := v_0.Aux 8332 base := v_0.Args[0] 8333 mem := v.Args[1] 8334 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8335 break 8336 } 8337 v.reset(OpAMD64MOVOload) 8338 v.AuxInt = off1 + off2 8339 v.Aux = mergeSym(sym1, sym2) 8340 v.AddArg(base) 8341 v.AddArg(mem) 8342 return true 8343 } 8344 return false 8345 } 8346 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 8347 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8348 // cond: is32Bit(off1+off2) 8349 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 8350 for { 8351 off1 := v.AuxInt 8352 sym := v.Aux 8353 _ = v.Args[2] 8354 v_0 := v.Args[0] 8355 if v_0.Op != OpAMD64ADDQconst { 8356 break 8357 } 8358 off2 := v_0.AuxInt 8359 ptr := v_0.Args[0] 8360 val := v.Args[1] 8361 mem := v.Args[2] 8362 if !(is32Bit(off1 + off2)) { 8363 break 8364 } 8365 v.reset(OpAMD64MOVOstore) 8366 v.AuxInt = off1 + off2 8367 v.Aux = sym 8368 v.AddArg(ptr) 8369 v.AddArg(val) 8370 v.AddArg(mem) 8371 return true 8372 } 8373 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8374 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8375 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8376 for { 8377 off1 := v.AuxInt 8378 sym1 := v.Aux 8379 _ = v.Args[2] 8380 v_0 := v.Args[0] 8381 if v_0.Op != OpAMD64LEAQ { 8382 break 8383 } 8384 off2 := v_0.AuxInt 8385 sym2 := v_0.Aux 8386 base := v_0.Args[0] 8387 val := v.Args[1] 8388 mem := v.Args[2] 8389 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8390 break 8391 } 8392 v.reset(OpAMD64MOVOstore) 8393 v.AuxInt = off1 + off2 8394 v.Aux = mergeSym(sym1, sym2) 8395 v.AddArg(base) 8396 v.AddArg(val) 8397 v.AddArg(mem) 8398 return true 8399 } 8400 return false 8401 } 8402 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 8403 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 8404 // cond: is32Bit(off1+off2) 8405 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 8406 for { 8407 off1 := v.AuxInt 8408 sym := v.Aux 8409 _ = v.Args[1] 8410 v_0 := v.Args[0] 8411 if v_0.Op != OpAMD64ADDQconst { 8412 break 8413 } 8414 off2 := v_0.AuxInt 8415 ptr := v_0.Args[0] 8416 mem := v.Args[1] 8417 if !(is32Bit(off1 + off2)) { 8418 break 8419 } 8420 v.reset(OpAMD64MOVQatomicload) 8421 v.AuxInt = off1 + off2 8422 v.Aux = sym 8423 v.AddArg(ptr) 8424 v.AddArg(mem) 8425 return true 8426 } 8427 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 8428 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8429 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 8430 for { 8431 off1 := v.AuxInt 8432 sym1 := v.Aux 8433 _ = v.Args[1] 8434 v_0 := v.Args[0] 8435 if v_0.Op != OpAMD64LEAQ { 8436 break 8437 } 8438 off2 := v_0.AuxInt 8439 sym2 := v_0.Aux 8440 ptr := v_0.Args[0] 8441 mem := v.Args[1] 8442 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8443 break 8444 } 8445 v.reset(OpAMD64MOVQatomicload) 8446 v.AuxInt = off1 + off2 8447 v.Aux = mergeSym(sym1, sym2) 8448 v.AddArg(ptr) 8449 v.AddArg(mem) 8450 return true 8451 } 8452 return false 8453 } 8454 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 8455 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 8456 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 8457 // result: x 8458 for { 8459 off := v.AuxInt 8460 sym := v.Aux 8461 _ = v.Args[1] 8462 ptr := v.Args[0] 8463 v_1 := v.Args[1] 8464 if v_1.Op != OpAMD64MOVQstore { 8465 break 8466 } 8467 off2 := v_1.AuxInt 8468 sym2 := v_1.Aux 8469 _ = v_1.Args[2] 8470 ptr2 := v_1.Args[0] 8471 x := v_1.Args[1] 8472 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 8473 break 8474 } 8475 v.reset(OpCopy) 8476 v.Type = x.Type 8477 v.AddArg(x) 8478 return true 8479 } 8480 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 8481 // cond: is32Bit(off1+off2) 8482 // result: (MOVQload [off1+off2] {sym} ptr mem) 8483 for { 8484 off1 := v.AuxInt 8485 sym := v.Aux 8486 _ = v.Args[1] 8487 v_0 := v.Args[0] 8488 if v_0.Op != OpAMD64ADDQconst { 8489 break 8490 } 8491 off2 := v_0.AuxInt 8492 ptr := v_0.Args[0] 8493 mem := v.Args[1] 8494 if !(is32Bit(off1 + off2)) { 8495 break 8496 } 8497 v.reset(OpAMD64MOVQload) 8498 v.AuxInt = off1 + off2 8499 v.Aux = sym 8500 v.AddArg(ptr) 8501 v.AddArg(mem) 8502 return true 8503 } 8504 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8505 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8506 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8507 for { 8508 off1 := v.AuxInt 8509 sym1 := v.Aux 8510 _ = v.Args[1] 8511 v_0 := v.Args[0] 8512 if v_0.Op != OpAMD64LEAQ { 8513 break 8514 } 8515 off2 := v_0.AuxInt 8516 sym2 := v_0.Aux 8517 base := v_0.Args[0] 8518 mem := v.Args[1] 8519 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8520 break 8521 } 8522 v.reset(OpAMD64MOVQload) 8523 v.AuxInt = off1 + off2 8524 v.Aux = mergeSym(sym1, sym2) 8525 v.AddArg(base) 8526 v.AddArg(mem) 8527 return true 8528 } 8529 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8530 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8531 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8532 for { 8533 off1 := v.AuxInt 8534 sym1 := v.Aux 8535 _ = v.Args[1] 8536 v_0 := v.Args[0] 8537 if v_0.Op != OpAMD64LEAQ1 { 8538 break 8539 } 8540 off2 := v_0.AuxInt 8541 sym2 := v_0.Aux 8542 _ = v_0.Args[1] 8543 ptr := v_0.Args[0] 8544 idx := v_0.Args[1] 8545 mem := v.Args[1] 8546 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8547 break 8548 } 8549 v.reset(OpAMD64MOVQloadidx1) 8550 v.AuxInt = off1 + off2 8551 v.Aux = mergeSym(sym1, sym2) 8552 v.AddArg(ptr) 8553 v.AddArg(idx) 8554 v.AddArg(mem) 8555 return true 8556 } 8557 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 8558 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8559 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8560 for { 8561 off1 := v.AuxInt 8562 sym1 := v.Aux 8563 _ = v.Args[1] 8564 v_0 := v.Args[0] 8565 if v_0.Op != OpAMD64LEAQ8 { 8566 break 8567 } 8568 off2 := v_0.AuxInt 8569 sym2 := v_0.Aux 8570 _ = v_0.Args[1] 8571 ptr := v_0.Args[0] 8572 idx := v_0.Args[1] 8573 mem := v.Args[1] 8574 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8575 break 8576 } 8577 v.reset(OpAMD64MOVQloadidx8) 8578 v.AuxInt = off1 + off2 8579 v.Aux = mergeSym(sym1, sym2) 8580 v.AddArg(ptr) 8581 v.AddArg(idx) 8582 v.AddArg(mem) 8583 return true 8584 } 8585 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 8586 // cond: ptr.Op != OpSB 8587 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 8588 for { 8589 off := v.AuxInt 8590 sym := v.Aux 8591 _ = v.Args[1] 8592 v_0 := v.Args[0] 8593 if v_0.Op != OpAMD64ADDQ { 8594 break 8595 } 8596 _ = v_0.Args[1] 8597 ptr := v_0.Args[0] 8598 idx := v_0.Args[1] 8599 mem := v.Args[1] 8600 if !(ptr.Op != OpSB) { 8601 break 8602 } 8603 v.reset(OpAMD64MOVQloadidx1) 8604 v.AuxInt = off 8605 v.Aux = sym 8606 v.AddArg(ptr) 8607 v.AddArg(idx) 8608 v.AddArg(mem) 8609 return true 8610 } 8611 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 8612 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 8613 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8614 for { 8615 off1 := v.AuxInt 8616 sym1 := v.Aux 8617 _ = v.Args[1] 8618 v_0 := v.Args[0] 8619 if v_0.Op != OpAMD64LEAL { 8620 break 8621 } 8622 off2 := v_0.AuxInt 8623 sym2 := v_0.Aux 8624 base := v_0.Args[0] 8625 mem := v.Args[1] 8626 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 8627 break 8628 } 8629 v.reset(OpAMD64MOVQload) 8630 v.AuxInt = off1 + off2 8631 v.Aux = mergeSym(sym1, sym2) 8632 v.AddArg(base) 8633 v.AddArg(mem) 8634 return true 8635 } 8636 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 8637 // cond: is32Bit(off1+off2) 8638 // result: (MOVQload [off1+off2] {sym} ptr mem) 8639 for { 8640 off1 := v.AuxInt 8641 sym := v.Aux 8642 _ = v.Args[1] 8643 v_0 := v.Args[0] 8644 if v_0.Op != OpAMD64ADDLconst { 8645 break 8646 } 8647 off2 := v_0.AuxInt 8648 ptr := v_0.Args[0] 8649 mem := v.Args[1] 8650 if !(is32Bit(off1 + off2)) { 8651 break 8652 } 8653 v.reset(OpAMD64MOVQload) 8654 v.AuxInt = off1 + off2 8655 v.Aux = sym 8656 v.AddArg(ptr) 8657 v.AddArg(mem) 8658 return true 8659 } 8660 return false 8661 } 8662 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 8663 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 8664 // cond: 8665 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 8666 for { 8667 c := v.AuxInt 8668 sym := v.Aux 8669 _ = v.Args[2] 8670 ptr := v.Args[0] 8671 v_1 := v.Args[1] 8672 if v_1.Op != OpAMD64SHLQconst { 8673 break 8674 } 8675 if v_1.AuxInt != 3 { 8676 break 8677 } 8678 idx := v_1.Args[0] 8679 mem := v.Args[2] 8680 v.reset(OpAMD64MOVQloadidx8) 8681 v.AuxInt = c 8682 v.Aux = sym 8683 v.AddArg(ptr) 8684 v.AddArg(idx) 8685 v.AddArg(mem) 8686 return true 8687 } 8688 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 8689 // cond: 8690 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 8691 for { 8692 c := v.AuxInt 8693 sym := v.Aux 8694 _ = v.Args[2] 8695 v_0 := v.Args[0] 8696 if v_0.Op != OpAMD64SHLQconst { 8697 break 8698 } 8699 if v_0.AuxInt != 3 { 8700 break 8701 } 8702 idx := v_0.Args[0] 8703 ptr := v.Args[1] 8704 mem := v.Args[2] 8705 v.reset(OpAMD64MOVQloadidx8) 8706 v.AuxInt = c 8707 v.Aux = sym 8708 v.AddArg(ptr) 8709 v.AddArg(idx) 8710 v.AddArg(mem) 8711 return true 8712 } 8713 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8714 // cond: is32Bit(c+d) 8715 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8716 for { 8717 c := v.AuxInt 8718 sym := v.Aux 8719 _ = v.Args[2] 8720 v_0 := v.Args[0] 8721 if v_0.Op != OpAMD64ADDQconst { 8722 break 8723 } 8724 d := v_0.AuxInt 8725 ptr := v_0.Args[0] 8726 idx := v.Args[1] 8727 mem := v.Args[2] 8728 if !(is32Bit(c + d)) { 8729 break 8730 } 8731 v.reset(OpAMD64MOVQloadidx1) 8732 v.AuxInt = c + d 8733 v.Aux = sym 8734 v.AddArg(ptr) 8735 v.AddArg(idx) 8736 v.AddArg(mem) 8737 return true 8738 } 8739 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 8740 // cond: is32Bit(c+d) 8741 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8742 for { 8743 c := v.AuxInt 8744 sym := v.Aux 8745 _ = v.Args[2] 8746 idx := v.Args[0] 8747 v_1 := v.Args[1] 8748 if v_1.Op != OpAMD64ADDQconst { 8749 break 8750 } 8751 d := v_1.AuxInt 8752 ptr := v_1.Args[0] 8753 mem := v.Args[2] 8754 if !(is32Bit(c + d)) { 8755 break 8756 } 8757 v.reset(OpAMD64MOVQloadidx1) 8758 v.AuxInt = c + d 8759 v.Aux = sym 8760 v.AddArg(ptr) 8761 v.AddArg(idx) 8762 v.AddArg(mem) 8763 return true 8764 } 8765 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8766 // cond: is32Bit(c+d) 8767 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8768 for { 8769 c := v.AuxInt 8770 sym := v.Aux 8771 _ = v.Args[2] 8772 ptr := v.Args[0] 8773 v_1 := v.Args[1] 8774 if v_1.Op != OpAMD64ADDQconst { 8775 break 8776 } 8777 d := v_1.AuxInt 8778 idx := v_1.Args[0] 8779 mem := v.Args[2] 8780 if !(is32Bit(c + d)) { 8781 break 8782 } 8783 v.reset(OpAMD64MOVQloadidx1) 8784 v.AuxInt = c + d 8785 v.Aux = sym 8786 v.AddArg(ptr) 8787 v.AddArg(idx) 8788 v.AddArg(mem) 8789 return true 8790 } 8791 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 8792 // cond: is32Bit(c+d) 8793 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8794 for { 8795 c := v.AuxInt 8796 sym := v.Aux 8797 _ = v.Args[2] 8798 v_0 := v.Args[0] 8799 if v_0.Op != OpAMD64ADDQconst { 8800 break 8801 } 8802 d := v_0.AuxInt 8803 idx := v_0.Args[0] 8804 ptr := v.Args[1] 8805 mem := v.Args[2] 8806 if !(is32Bit(c + d)) { 8807 break 8808 } 8809 v.reset(OpAMD64MOVQloadidx1) 8810 v.AuxInt = c + d 8811 v.Aux = sym 8812 v.AddArg(ptr) 8813 v.AddArg(idx) 8814 v.AddArg(mem) 8815 return true 8816 } 8817 return false 8818 } 8819 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 8820 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 8821 // cond: is32Bit(c+d) 8822 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 8823 for { 8824 c := v.AuxInt 8825 sym := v.Aux 8826 _ = v.Args[2] 8827 v_0 := v.Args[0] 8828 if v_0.Op != OpAMD64ADDQconst { 8829 break 8830 } 8831 d := v_0.AuxInt 8832 ptr := v_0.Args[0] 8833 idx := v.Args[1] 8834 mem := v.Args[2] 8835 if !(is32Bit(c + d)) { 8836 break 8837 } 8838 v.reset(OpAMD64MOVQloadidx8) 8839 v.AuxInt = c + d 8840 v.Aux = sym 8841 v.AddArg(ptr) 8842 v.AddArg(idx) 8843 v.AddArg(mem) 8844 return true 8845 } 8846 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8847 // cond: is32Bit(c+8*d) 8848 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 8849 for { 8850 c := v.AuxInt 8851 sym := v.Aux 8852 _ = v.Args[2] 8853 ptr := v.Args[0] 8854 v_1 := v.Args[1] 8855 if v_1.Op != OpAMD64ADDQconst { 8856 break 8857 } 8858 d := v_1.AuxInt 8859 idx := v_1.Args[0] 8860 mem := v.Args[2] 8861 if !(is32Bit(c + 8*d)) { 8862 break 8863 } 8864 v.reset(OpAMD64MOVQloadidx8) 8865 v.AuxInt = c + 8*d 8866 v.Aux = sym 8867 v.AddArg(ptr) 8868 v.AddArg(idx) 8869 v.AddArg(mem) 8870 return true 8871 } 8872 return false 8873 } 8874 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 8875 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8876 // cond: is32Bit(off1+off2) 8877 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 8878 for { 8879 off1 := v.AuxInt 8880 sym := v.Aux 8881 _ = v.Args[2] 8882 v_0 := v.Args[0] 8883 if v_0.Op != OpAMD64ADDQconst { 8884 break 8885 } 8886 off2 := v_0.AuxInt 8887 ptr := v_0.Args[0] 8888 val := v.Args[1] 8889 mem := v.Args[2] 8890 if !(is32Bit(off1 + off2)) { 8891 break 8892 } 8893 v.reset(OpAMD64MOVQstore) 8894 v.AuxInt = off1 + off2 8895 v.Aux = sym 8896 v.AddArg(ptr) 8897 v.AddArg(val) 8898 v.AddArg(mem) 8899 return true 8900 } 8901 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 8902 // cond: validValAndOff(c,off) 8903 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 8904 for { 8905 off := v.AuxInt 8906 sym := v.Aux 8907 _ = v.Args[2] 8908 ptr := v.Args[0] 8909 v_1 := v.Args[1] 8910 if v_1.Op != OpAMD64MOVQconst { 8911 break 8912 } 8913 c := v_1.AuxInt 8914 mem := v.Args[2] 8915 if !(validValAndOff(c, off)) { 8916 break 8917 } 8918 v.reset(OpAMD64MOVQstoreconst) 8919 v.AuxInt = makeValAndOff(c, off) 8920 v.Aux = sym 8921 v.AddArg(ptr) 8922 v.AddArg(mem) 8923 return true 8924 } 8925 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8926 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8927 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8928 for { 8929 off1 := v.AuxInt 8930 sym1 := v.Aux 8931 _ = v.Args[2] 8932 v_0 := v.Args[0] 8933 if v_0.Op != OpAMD64LEAQ { 8934 break 8935 } 8936 off2 := v_0.AuxInt 8937 sym2 := v_0.Aux 8938 base := v_0.Args[0] 8939 val := v.Args[1] 8940 mem := v.Args[2] 8941 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8942 break 8943 } 8944 v.reset(OpAMD64MOVQstore) 8945 v.AuxInt = off1 + off2 8946 v.Aux = mergeSym(sym1, sym2) 8947 v.AddArg(base) 8948 v.AddArg(val) 8949 v.AddArg(mem) 8950 return true 8951 } 8952 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8953 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8954 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8955 for { 8956 off1 := v.AuxInt 8957 sym1 := v.Aux 8958 _ = v.Args[2] 8959 v_0 := v.Args[0] 8960 if v_0.Op != OpAMD64LEAQ1 { 8961 break 8962 } 8963 off2 := v_0.AuxInt 8964 sym2 := v_0.Aux 8965 _ = v_0.Args[1] 8966 ptr := v_0.Args[0] 8967 idx := v_0.Args[1] 8968 val := v.Args[1] 8969 mem := v.Args[2] 8970 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8971 break 8972 } 8973 v.reset(OpAMD64MOVQstoreidx1) 8974 v.AuxInt = off1 + off2 8975 v.Aux = mergeSym(sym1, sym2) 8976 v.AddArg(ptr) 8977 v.AddArg(idx) 8978 v.AddArg(val) 8979 v.AddArg(mem) 8980 return true 8981 } 8982 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8983 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8984 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8985 for { 8986 off1 := v.AuxInt 8987 sym1 := v.Aux 8988 _ = v.Args[2] 8989 v_0 := v.Args[0] 8990 if v_0.Op != OpAMD64LEAQ8 { 8991 break 8992 } 8993 off2 := v_0.AuxInt 8994 sym2 := v_0.Aux 8995 _ = v_0.Args[1] 8996 ptr := v_0.Args[0] 8997 idx := v_0.Args[1] 8998 val := v.Args[1] 8999 mem := v.Args[2] 9000 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9001 break 9002 } 9003 v.reset(OpAMD64MOVQstoreidx8) 9004 v.AuxInt = off1 + off2 9005 v.Aux = mergeSym(sym1, sym2) 9006 v.AddArg(ptr) 9007 v.AddArg(idx) 9008 v.AddArg(val) 9009 v.AddArg(mem) 9010 return true 9011 } 9012 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 9013 // cond: ptr.Op != OpSB 9014 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 9015 for { 9016 off := v.AuxInt 9017 sym := v.Aux 9018 _ = v.Args[2] 9019 v_0 := v.Args[0] 9020 if v_0.Op != OpAMD64ADDQ { 9021 break 9022 } 9023 _ = v_0.Args[1] 9024 ptr := v_0.Args[0] 9025 idx := v_0.Args[1] 9026 val := v.Args[1] 9027 mem := v.Args[2] 9028 if !(ptr.Op != OpSB) { 9029 break 9030 } 9031 v.reset(OpAMD64MOVQstoreidx1) 9032 v.AuxInt = off 9033 v.Aux = sym 9034 v.AddArg(ptr) 9035 v.AddArg(idx) 9036 v.AddArg(val) 9037 v.AddArg(mem) 9038 return true 9039 } 9040 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 9041 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 9042 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9043 for { 9044 off1 := v.AuxInt 9045 sym1 := v.Aux 9046 _ = v.Args[2] 9047 v_0 := v.Args[0] 9048 if v_0.Op != OpAMD64LEAL { 9049 break 9050 } 9051 off2 := v_0.AuxInt 9052 sym2 := v_0.Aux 9053 base := v_0.Args[0] 9054 val := v.Args[1] 9055 mem := v.Args[2] 9056 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 9057 break 9058 } 9059 v.reset(OpAMD64MOVQstore) 9060 v.AuxInt = off1 + off2 9061 v.Aux = mergeSym(sym1, sym2) 9062 v.AddArg(base) 9063 v.AddArg(val) 9064 v.AddArg(mem) 9065 return true 9066 } 9067 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 9068 // cond: is32Bit(off1+off2) 9069 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 9070 for { 9071 off1 := v.AuxInt 9072 sym := v.Aux 9073 _ = v.Args[2] 9074 v_0 := v.Args[0] 9075 if v_0.Op != OpAMD64ADDLconst { 9076 break 9077 } 9078 off2 := v_0.AuxInt 9079 ptr := v_0.Args[0] 9080 val := v.Args[1] 9081 mem := v.Args[2] 9082 if !(is32Bit(off1 + off2)) { 9083 break 9084 } 9085 v.reset(OpAMD64MOVQstore) 9086 v.AuxInt = off1 + off2 9087 v.Aux = sym 9088 v.AddArg(ptr) 9089 v.AddArg(val) 9090 v.AddArg(mem) 9091 return true 9092 } 9093 return false 9094 } 9095 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 9096 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 9097 // cond: ValAndOff(sc).canAdd(off) 9098 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9099 for { 9100 sc := v.AuxInt 9101 s := v.Aux 9102 _ = v.Args[1] 9103 v_0 := v.Args[0] 9104 if v_0.Op != OpAMD64ADDQconst { 9105 break 9106 } 9107 off := v_0.AuxInt 9108 ptr := v_0.Args[0] 9109 mem := v.Args[1] 9110 if !(ValAndOff(sc).canAdd(off)) { 9111 break 9112 } 9113 v.reset(OpAMD64MOVQstoreconst) 9114 v.AuxInt = ValAndOff(sc).add(off) 9115 v.Aux = s 9116 v.AddArg(ptr) 9117 v.AddArg(mem) 9118 return true 9119 } 9120 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 9121 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9122 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9123 for { 9124 sc := v.AuxInt 9125 sym1 := v.Aux 9126 _ = v.Args[1] 9127 v_0 := v.Args[0] 9128 if v_0.Op != OpAMD64LEAQ { 9129 break 9130 } 9131 off := v_0.AuxInt 9132 sym2 := v_0.Aux 9133 ptr := v_0.Args[0] 9134 mem := v.Args[1] 9135 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9136 break 9137 } 9138 v.reset(OpAMD64MOVQstoreconst) 9139 v.AuxInt = ValAndOff(sc).add(off) 9140 v.Aux = mergeSym(sym1, sym2) 9141 v.AddArg(ptr) 9142 v.AddArg(mem) 9143 return true 9144 } 9145 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 9146 // cond: canMergeSym(sym1, sym2) 9147 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9148 for { 9149 x := v.AuxInt 9150 sym1 := v.Aux 9151 _ = v.Args[1] 9152 v_0 := v.Args[0] 9153 if v_0.Op != OpAMD64LEAQ1 { 9154 break 9155 } 9156 off := v_0.AuxInt 9157 sym2 := v_0.Aux 9158 _ = v_0.Args[1] 9159 ptr := v_0.Args[0] 9160 idx := v_0.Args[1] 9161 mem := v.Args[1] 9162 if !(canMergeSym(sym1, sym2)) { 9163 break 9164 } 9165 v.reset(OpAMD64MOVQstoreconstidx1) 9166 v.AuxInt = ValAndOff(x).add(off) 9167 v.Aux = mergeSym(sym1, sym2) 9168 v.AddArg(ptr) 9169 v.AddArg(idx) 9170 v.AddArg(mem) 9171 return true 9172 } 9173 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 9174 // cond: canMergeSym(sym1, sym2) 9175 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9176 for { 9177 x := v.AuxInt 9178 sym1 := v.Aux 9179 _ = v.Args[1] 9180 v_0 := v.Args[0] 9181 if v_0.Op != OpAMD64LEAQ8 { 9182 break 9183 } 9184 off := v_0.AuxInt 9185 sym2 := v_0.Aux 9186 _ = v_0.Args[1] 9187 ptr := v_0.Args[0] 9188 idx := v_0.Args[1] 9189 mem := v.Args[1] 9190 if !(canMergeSym(sym1, sym2)) { 9191 break 9192 } 9193 v.reset(OpAMD64MOVQstoreconstidx8) 9194 v.AuxInt = ValAndOff(x).add(off) 9195 v.Aux = mergeSym(sym1, sym2) 9196 v.AddArg(ptr) 9197 v.AddArg(idx) 9198 v.AddArg(mem) 9199 return true 9200 } 9201 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 9202 // cond: 9203 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 9204 for { 9205 x := v.AuxInt 9206 sym := v.Aux 9207 _ = v.Args[1] 9208 v_0 := v.Args[0] 9209 if v_0.Op != OpAMD64ADDQ { 9210 break 9211 } 9212 _ = v_0.Args[1] 9213 ptr := v_0.Args[0] 9214 idx := v_0.Args[1] 9215 mem := v.Args[1] 9216 v.reset(OpAMD64MOVQstoreconstidx1) 9217 v.AuxInt = x 9218 v.Aux = sym 9219 v.AddArg(ptr) 9220 v.AddArg(idx) 9221 v.AddArg(mem) 9222 return true 9223 } 9224 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 9225 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9226 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9227 for { 9228 sc := v.AuxInt 9229 sym1 := v.Aux 9230 _ = v.Args[1] 9231 v_0 := v.Args[0] 9232 if v_0.Op != OpAMD64LEAL { 9233 break 9234 } 9235 off := v_0.AuxInt 9236 sym2 := v_0.Aux 9237 ptr := v_0.Args[0] 9238 mem := v.Args[1] 9239 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9240 break 9241 } 9242 v.reset(OpAMD64MOVQstoreconst) 9243 v.AuxInt = ValAndOff(sc).add(off) 9244 v.Aux = mergeSym(sym1, sym2) 9245 v.AddArg(ptr) 9246 v.AddArg(mem) 9247 return true 9248 } 9249 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 9250 // cond: ValAndOff(sc).canAdd(off) 9251 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9252 for { 9253 sc := v.AuxInt 9254 s := v.Aux 9255 _ = v.Args[1] 9256 v_0 := v.Args[0] 9257 if v_0.Op != OpAMD64ADDLconst { 9258 break 9259 } 9260 off := v_0.AuxInt 9261 ptr := v_0.Args[0] 9262 mem := v.Args[1] 9263 if !(ValAndOff(sc).canAdd(off)) { 9264 break 9265 } 9266 v.reset(OpAMD64MOVQstoreconst) 9267 v.AuxInt = ValAndOff(sc).add(off) 9268 v.Aux = s 9269 v.AddArg(ptr) 9270 v.AddArg(mem) 9271 return true 9272 } 9273 return false 9274 } 9275 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 9276 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9277 // cond: 9278 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 9279 for { 9280 c := v.AuxInt 9281 sym := v.Aux 9282 _ = v.Args[2] 9283 ptr := v.Args[0] 9284 v_1 := v.Args[1] 9285 if v_1.Op != OpAMD64SHLQconst { 9286 break 9287 } 9288 if v_1.AuxInt != 3 { 9289 break 9290 } 9291 idx := v_1.Args[0] 9292 mem := v.Args[2] 9293 v.reset(OpAMD64MOVQstoreconstidx8) 9294 v.AuxInt = c 9295 v.Aux = sym 9296 v.AddArg(ptr) 9297 v.AddArg(idx) 9298 v.AddArg(mem) 9299 return true 9300 } 9301 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 9302 // cond: ValAndOff(x).canAdd(c) 9303 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9304 for { 9305 x := v.AuxInt 9306 sym := v.Aux 9307 _ = v.Args[2] 9308 v_0 := v.Args[0] 9309 if v_0.Op != OpAMD64ADDQconst { 9310 break 9311 } 9312 c := v_0.AuxInt 9313 ptr := v_0.Args[0] 9314 idx := v.Args[1] 9315 mem := v.Args[2] 9316 if !(ValAndOff(x).canAdd(c)) { 9317 break 9318 } 9319 v.reset(OpAMD64MOVQstoreconstidx1) 9320 v.AuxInt = ValAndOff(x).add(c) 9321 v.Aux = sym 9322 v.AddArg(ptr) 9323 v.AddArg(idx) 9324 v.AddArg(mem) 9325 return true 9326 } 9327 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 9328 // cond: ValAndOff(x).canAdd(c) 9329 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9330 for { 9331 x := v.AuxInt 9332 sym := v.Aux 9333 _ = v.Args[2] 9334 ptr := v.Args[0] 9335 v_1 := v.Args[1] 9336 if v_1.Op != OpAMD64ADDQconst { 9337 break 9338 } 9339 c := v_1.AuxInt 9340 idx := v_1.Args[0] 9341 mem := v.Args[2] 9342 if !(ValAndOff(x).canAdd(c)) { 9343 break 9344 } 9345 v.reset(OpAMD64MOVQstoreconstidx1) 9346 v.AuxInt = ValAndOff(x).add(c) 9347 v.Aux = sym 9348 v.AddArg(ptr) 9349 v.AddArg(idx) 9350 v.AddArg(mem) 9351 return true 9352 } 9353 return false 9354 } 9355 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 9356 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 9357 // cond: ValAndOff(x).canAdd(c) 9358 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9359 for { 9360 x := v.AuxInt 9361 sym := v.Aux 9362 _ = v.Args[2] 9363 v_0 := v.Args[0] 9364 if v_0.Op != OpAMD64ADDQconst { 9365 break 9366 } 9367 c := v_0.AuxInt 9368 ptr := v_0.Args[0] 9369 idx := v.Args[1] 9370 mem := v.Args[2] 9371 if !(ValAndOff(x).canAdd(c)) { 9372 break 9373 } 9374 v.reset(OpAMD64MOVQstoreconstidx8) 9375 v.AuxInt = ValAndOff(x).add(c) 9376 v.Aux = sym 9377 v.AddArg(ptr) 9378 v.AddArg(idx) 9379 v.AddArg(mem) 9380 return true 9381 } 9382 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 9383 // cond: ValAndOff(x).canAdd(8*c) 9384 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 9385 for { 9386 x := v.AuxInt 9387 sym := v.Aux 9388 _ = v.Args[2] 9389 ptr := v.Args[0] 9390 v_1 := v.Args[1] 9391 if v_1.Op != OpAMD64ADDQconst { 9392 break 9393 } 9394 c := v_1.AuxInt 9395 idx := v_1.Args[0] 9396 mem := v.Args[2] 9397 if !(ValAndOff(x).canAdd(8 * c)) { 9398 break 9399 } 9400 v.reset(OpAMD64MOVQstoreconstidx8) 9401 v.AuxInt = ValAndOff(x).add(8 * c) 9402 v.Aux = sym 9403 v.AddArg(ptr) 9404 v.AddArg(idx) 9405 v.AddArg(mem) 9406 return true 9407 } 9408 return false 9409 } 9410 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 9411 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9412 // cond: 9413 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 9414 for { 9415 c := v.AuxInt 9416 sym := v.Aux 9417 _ = v.Args[3] 9418 ptr := v.Args[0] 9419 v_1 := v.Args[1] 9420 if v_1.Op != OpAMD64SHLQconst { 9421 break 9422 } 9423 if v_1.AuxInt != 3 { 9424 break 9425 } 9426 idx := v_1.Args[0] 9427 val := v.Args[2] 9428 mem := v.Args[3] 9429 v.reset(OpAMD64MOVQstoreidx8) 9430 v.AuxInt = c 9431 v.Aux = sym 9432 v.AddArg(ptr) 9433 v.AddArg(idx) 9434 v.AddArg(val) 9435 v.AddArg(mem) 9436 return true 9437 } 9438 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9439 // cond: is32Bit(c+d) 9440 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 9441 for { 9442 c := v.AuxInt 9443 sym := v.Aux 9444 _ = v.Args[3] 9445 v_0 := v.Args[0] 9446 if v_0.Op != OpAMD64ADDQconst { 9447 break 9448 } 9449 d := v_0.AuxInt 9450 ptr := v_0.Args[0] 9451 idx := v.Args[1] 9452 val := v.Args[2] 9453 mem := v.Args[3] 9454 if !(is32Bit(c + d)) { 9455 break 9456 } 9457 v.reset(OpAMD64MOVQstoreidx1) 9458 v.AuxInt = c + d 9459 v.Aux = sym 9460 v.AddArg(ptr) 9461 v.AddArg(idx) 9462 v.AddArg(val) 9463 v.AddArg(mem) 9464 return true 9465 } 9466 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9467 // cond: is32Bit(c+d) 9468 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 9469 for { 9470 c := v.AuxInt 9471 sym := v.Aux 9472 _ = v.Args[3] 9473 ptr := v.Args[0] 9474 v_1 := v.Args[1] 9475 if v_1.Op != OpAMD64ADDQconst { 9476 break 9477 } 9478 d := v_1.AuxInt 9479 idx := v_1.Args[0] 9480 val := v.Args[2] 9481 mem := v.Args[3] 9482 if !(is32Bit(c + d)) { 9483 break 9484 } 9485 v.reset(OpAMD64MOVQstoreidx1) 9486 v.AuxInt = c + d 9487 v.Aux = sym 9488 v.AddArg(ptr) 9489 v.AddArg(idx) 9490 v.AddArg(val) 9491 v.AddArg(mem) 9492 return true 9493 } 9494 return false 9495 } 9496 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 9497 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9498 // cond: is32Bit(c+d) 9499 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 9500 for { 9501 c := v.AuxInt 9502 sym := v.Aux 9503 _ = v.Args[3] 9504 v_0 := v.Args[0] 9505 if v_0.Op != OpAMD64ADDQconst { 9506 break 9507 } 9508 d := v_0.AuxInt 9509 ptr := v_0.Args[0] 9510 idx := v.Args[1] 9511 val := v.Args[2] 9512 mem := v.Args[3] 9513 if !(is32Bit(c + d)) { 9514 break 9515 } 9516 v.reset(OpAMD64MOVQstoreidx8) 9517 v.AuxInt = c + d 9518 v.Aux = sym 9519 v.AddArg(ptr) 9520 v.AddArg(idx) 9521 v.AddArg(val) 9522 v.AddArg(mem) 9523 return true 9524 } 9525 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9526 // cond: is32Bit(c+8*d) 9527 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 9528 for { 9529 c := v.AuxInt 9530 sym := v.Aux 9531 _ = v.Args[3] 9532 ptr := v.Args[0] 9533 v_1 := v.Args[1] 9534 if v_1.Op != OpAMD64ADDQconst { 9535 break 9536 } 9537 d := v_1.AuxInt 9538 idx := v_1.Args[0] 9539 val := v.Args[2] 9540 mem := v.Args[3] 9541 if !(is32Bit(c + 8*d)) { 9542 break 9543 } 9544 v.reset(OpAMD64MOVQstoreidx8) 9545 v.AuxInt = c + 8*d 9546 v.Aux = sym 9547 v.AddArg(ptr) 9548 v.AddArg(idx) 9549 v.AddArg(val) 9550 v.AddArg(mem) 9551 return true 9552 } 9553 return false 9554 } 9555 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 9556 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 9557 // cond: is32Bit(off1+off2) 9558 // result: (MOVSDload [off1+off2] {sym} ptr mem) 9559 for { 9560 off1 := v.AuxInt 9561 sym := v.Aux 9562 _ = v.Args[1] 9563 v_0 := v.Args[0] 9564 if v_0.Op != OpAMD64ADDQconst { 9565 break 9566 } 9567 off2 := v_0.AuxInt 9568 ptr := v_0.Args[0] 9569 mem := v.Args[1] 9570 if !(is32Bit(off1 + off2)) { 9571 break 9572 } 9573 v.reset(OpAMD64MOVSDload) 9574 v.AuxInt = off1 + off2 9575 v.Aux = sym 9576 v.AddArg(ptr) 9577 v.AddArg(mem) 9578 return true 9579 } 9580 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9581 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9582 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9583 for { 9584 off1 := v.AuxInt 9585 sym1 := v.Aux 9586 _ = v.Args[1] 9587 v_0 := v.Args[0] 9588 if v_0.Op != OpAMD64LEAQ { 9589 break 9590 } 9591 off2 := v_0.AuxInt 9592 sym2 := v_0.Aux 9593 base := v_0.Args[0] 9594 mem := v.Args[1] 9595 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9596 break 9597 } 9598 v.reset(OpAMD64MOVSDload) 9599 v.AuxInt = off1 + off2 9600 v.Aux = mergeSym(sym1, sym2) 9601 v.AddArg(base) 9602 v.AddArg(mem) 9603 return true 9604 } 9605 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9606 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9607 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9608 for { 9609 off1 := v.AuxInt 9610 sym1 := v.Aux 9611 _ = v.Args[1] 9612 v_0 := v.Args[0] 9613 if v_0.Op != OpAMD64LEAQ1 { 9614 break 9615 } 9616 off2 := v_0.AuxInt 9617 sym2 := v_0.Aux 9618 _ = v_0.Args[1] 9619 ptr := v_0.Args[0] 9620 idx := v_0.Args[1] 9621 mem := v.Args[1] 9622 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9623 break 9624 } 9625 v.reset(OpAMD64MOVSDloadidx1) 9626 v.AuxInt = off1 + off2 9627 v.Aux = mergeSym(sym1, sym2) 9628 v.AddArg(ptr) 9629 v.AddArg(idx) 9630 v.AddArg(mem) 9631 return true 9632 } 9633 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 9634 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9635 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9636 for { 9637 off1 := v.AuxInt 9638 sym1 := v.Aux 9639 _ = v.Args[1] 9640 v_0 := v.Args[0] 9641 if v_0.Op != OpAMD64LEAQ8 { 9642 break 9643 } 9644 off2 := v_0.AuxInt 9645 sym2 := v_0.Aux 9646 _ = v_0.Args[1] 9647 ptr := v_0.Args[0] 9648 idx := v_0.Args[1] 9649 mem := v.Args[1] 9650 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9651 break 9652 } 9653 v.reset(OpAMD64MOVSDloadidx8) 9654 v.AuxInt = off1 + off2 9655 v.Aux = mergeSym(sym1, sym2) 9656 v.AddArg(ptr) 9657 v.AddArg(idx) 9658 v.AddArg(mem) 9659 return true 9660 } 9661 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 9662 // cond: ptr.Op != OpSB 9663 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 9664 for { 9665 off := v.AuxInt 9666 sym := v.Aux 9667 _ = v.Args[1] 9668 v_0 := v.Args[0] 9669 if v_0.Op != OpAMD64ADDQ { 9670 break 9671 } 9672 _ = v_0.Args[1] 9673 ptr := v_0.Args[0] 9674 idx := v_0.Args[1] 9675 mem := v.Args[1] 9676 if !(ptr.Op != OpSB) { 9677 break 9678 } 9679 v.reset(OpAMD64MOVSDloadidx1) 9680 v.AuxInt = off 9681 v.Aux = sym 9682 v.AddArg(ptr) 9683 v.AddArg(idx) 9684 v.AddArg(mem) 9685 return true 9686 } 9687 return false 9688 } 9689 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 9690 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9691 // cond: 9692 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 9693 for { 9694 c := v.AuxInt 9695 sym := v.Aux 9696 _ = v.Args[2] 9697 ptr := v.Args[0] 9698 v_1 := v.Args[1] 9699 if v_1.Op != OpAMD64SHLQconst { 9700 break 9701 } 9702 if v_1.AuxInt != 3 { 9703 break 9704 } 9705 idx := v_1.Args[0] 9706 mem := v.Args[2] 9707 v.reset(OpAMD64MOVSDloadidx8) 9708 v.AuxInt = c 9709 v.Aux = sym 9710 v.AddArg(ptr) 9711 v.AddArg(idx) 9712 v.AddArg(mem) 9713 return true 9714 } 9715 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9716 // cond: is32Bit(c+d) 9717 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9718 for { 9719 c := v.AuxInt 9720 sym := v.Aux 9721 _ = v.Args[2] 9722 v_0 := v.Args[0] 9723 if v_0.Op != OpAMD64ADDQconst { 9724 break 9725 } 9726 d := v_0.AuxInt 9727 ptr := v_0.Args[0] 9728 idx := v.Args[1] 9729 mem := v.Args[2] 9730 if !(is32Bit(c + d)) { 9731 break 9732 } 9733 v.reset(OpAMD64MOVSDloadidx1) 9734 v.AuxInt = c + d 9735 v.Aux = sym 9736 v.AddArg(ptr) 9737 v.AddArg(idx) 9738 v.AddArg(mem) 9739 return true 9740 } 9741 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9742 // cond: is32Bit(c+d) 9743 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9744 for { 9745 c := v.AuxInt 9746 sym := v.Aux 9747 _ = v.Args[2] 9748 ptr := v.Args[0] 9749 v_1 := v.Args[1] 9750 if v_1.Op != OpAMD64ADDQconst { 9751 break 9752 } 9753 d := v_1.AuxInt 9754 idx := v_1.Args[0] 9755 mem := v.Args[2] 9756 if !(is32Bit(c + d)) { 9757 break 9758 } 9759 v.reset(OpAMD64MOVSDloadidx1) 9760 v.AuxInt = c + d 9761 v.Aux = sym 9762 v.AddArg(ptr) 9763 v.AddArg(idx) 9764 v.AddArg(mem) 9765 return true 9766 } 9767 return false 9768 } 9769 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 9770 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 9771 // cond: is32Bit(c+d) 9772 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 9773 for { 9774 c := v.AuxInt 9775 sym := v.Aux 9776 _ = v.Args[2] 9777 v_0 := v.Args[0] 9778 if v_0.Op != OpAMD64ADDQconst { 9779 break 9780 } 9781 d := v_0.AuxInt 9782 ptr := v_0.Args[0] 9783 idx := v.Args[1] 9784 mem := v.Args[2] 9785 if !(is32Bit(c + d)) { 9786 break 9787 } 9788 v.reset(OpAMD64MOVSDloadidx8) 9789 v.AuxInt = c + d 9790 v.Aux = sym 9791 v.AddArg(ptr) 9792 v.AddArg(idx) 9793 v.AddArg(mem) 9794 return true 9795 } 9796 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 9797 // cond: is32Bit(c+8*d) 9798 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 9799 for { 9800 c := v.AuxInt 9801 sym := v.Aux 9802 _ = v.Args[2] 9803 ptr := v.Args[0] 9804 v_1 := v.Args[1] 9805 if v_1.Op != OpAMD64ADDQconst { 9806 break 9807 } 9808 d := v_1.AuxInt 9809 idx := v_1.Args[0] 9810 mem := v.Args[2] 9811 if !(is32Bit(c + 8*d)) { 9812 break 9813 } 9814 v.reset(OpAMD64MOVSDloadidx8) 9815 v.AuxInt = c + 8*d 9816 v.Aux = sym 9817 v.AddArg(ptr) 9818 v.AddArg(idx) 9819 v.AddArg(mem) 9820 return true 9821 } 9822 return false 9823 } 9824 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 9825 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9826 // cond: is32Bit(off1+off2) 9827 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 9828 for { 9829 off1 := v.AuxInt 9830 sym := v.Aux 9831 _ = v.Args[2] 9832 v_0 := v.Args[0] 9833 if v_0.Op != OpAMD64ADDQconst { 9834 break 9835 } 9836 off2 := v_0.AuxInt 9837 ptr := v_0.Args[0] 9838 val := v.Args[1] 9839 mem := v.Args[2] 9840 if !(is32Bit(off1 + off2)) { 9841 break 9842 } 9843 v.reset(OpAMD64MOVSDstore) 9844 v.AuxInt = off1 + off2 9845 v.Aux = sym 9846 v.AddArg(ptr) 9847 v.AddArg(val) 9848 v.AddArg(mem) 9849 return true 9850 } 9851 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9852 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9853 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9854 for { 9855 off1 := v.AuxInt 9856 sym1 := v.Aux 9857 _ = v.Args[2] 9858 v_0 := v.Args[0] 9859 if v_0.Op != OpAMD64LEAQ { 9860 break 9861 } 9862 off2 := v_0.AuxInt 9863 sym2 := v_0.Aux 9864 base := v_0.Args[0] 9865 val := v.Args[1] 9866 mem := v.Args[2] 9867 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9868 break 9869 } 9870 v.reset(OpAMD64MOVSDstore) 9871 v.AuxInt = off1 + off2 9872 v.Aux = mergeSym(sym1, sym2) 9873 v.AddArg(base) 9874 v.AddArg(val) 9875 v.AddArg(mem) 9876 return true 9877 } 9878 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9879 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9880 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9881 for { 9882 off1 := v.AuxInt 9883 sym1 := v.Aux 9884 _ = v.Args[2] 9885 v_0 := v.Args[0] 9886 if v_0.Op != OpAMD64LEAQ1 { 9887 break 9888 } 9889 off2 := v_0.AuxInt 9890 sym2 := v_0.Aux 9891 _ = v_0.Args[1] 9892 ptr := v_0.Args[0] 9893 idx := v_0.Args[1] 9894 val := v.Args[1] 9895 mem := v.Args[2] 9896 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9897 break 9898 } 9899 v.reset(OpAMD64MOVSDstoreidx1) 9900 v.AuxInt = off1 + off2 9901 v.Aux = mergeSym(sym1, sym2) 9902 v.AddArg(ptr) 9903 v.AddArg(idx) 9904 v.AddArg(val) 9905 v.AddArg(mem) 9906 return true 9907 } 9908 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 9909 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9910 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9911 for { 9912 off1 := v.AuxInt 9913 sym1 := v.Aux 9914 _ = v.Args[2] 9915 v_0 := v.Args[0] 9916 if v_0.Op != OpAMD64LEAQ8 { 9917 break 9918 } 9919 off2 := v_0.AuxInt 9920 sym2 := v_0.Aux 9921 _ = v_0.Args[1] 9922 ptr := v_0.Args[0] 9923 idx := v_0.Args[1] 9924 val := v.Args[1] 9925 mem := v.Args[2] 9926 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9927 break 9928 } 9929 v.reset(OpAMD64MOVSDstoreidx8) 9930 v.AuxInt = off1 + off2 9931 v.Aux = mergeSym(sym1, sym2) 9932 v.AddArg(ptr) 9933 v.AddArg(idx) 9934 v.AddArg(val) 9935 v.AddArg(mem) 9936 return true 9937 } 9938 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 9939 // cond: ptr.Op != OpSB 9940 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 9941 for { 9942 off := v.AuxInt 9943 sym := v.Aux 9944 _ = v.Args[2] 9945 v_0 := v.Args[0] 9946 if v_0.Op != OpAMD64ADDQ { 9947 break 9948 } 9949 _ = v_0.Args[1] 9950 ptr := v_0.Args[0] 9951 idx := v_0.Args[1] 9952 val := v.Args[1] 9953 mem := v.Args[2] 9954 if !(ptr.Op != OpSB) { 9955 break 9956 } 9957 v.reset(OpAMD64MOVSDstoreidx1) 9958 v.AuxInt = off 9959 v.Aux = sym 9960 v.AddArg(ptr) 9961 v.AddArg(idx) 9962 v.AddArg(val) 9963 v.AddArg(mem) 9964 return true 9965 } 9966 return false 9967 } 9968 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 9969 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9970 // cond: 9971 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 9972 for { 9973 c := v.AuxInt 9974 sym := v.Aux 9975 _ = v.Args[3] 9976 ptr := v.Args[0] 9977 v_1 := v.Args[1] 9978 if v_1.Op != OpAMD64SHLQconst { 9979 break 9980 } 9981 if v_1.AuxInt != 3 { 9982 break 9983 } 9984 idx := v_1.Args[0] 9985 val := v.Args[2] 9986 mem := v.Args[3] 9987 v.reset(OpAMD64MOVSDstoreidx8) 9988 v.AuxInt = c 9989 v.Aux = sym 9990 v.AddArg(ptr) 9991 v.AddArg(idx) 9992 v.AddArg(val) 9993 v.AddArg(mem) 9994 return true 9995 } 9996 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9997 // cond: is32Bit(c+d) 9998 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 9999 for { 10000 c := v.AuxInt 10001 sym := v.Aux 10002 _ = v.Args[3] 10003 v_0 := v.Args[0] 10004 if v_0.Op != OpAMD64ADDQconst { 10005 break 10006 } 10007 d := v_0.AuxInt 10008 ptr := v_0.Args[0] 10009 idx := v.Args[1] 10010 val := v.Args[2] 10011 mem := v.Args[3] 10012 if !(is32Bit(c + d)) { 10013 break 10014 } 10015 v.reset(OpAMD64MOVSDstoreidx1) 10016 v.AuxInt = c + d 10017 v.Aux = sym 10018 v.AddArg(ptr) 10019 v.AddArg(idx) 10020 v.AddArg(val) 10021 v.AddArg(mem) 10022 return true 10023 } 10024 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10025 // cond: is32Bit(c+d) 10026 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 10027 for { 10028 c := v.AuxInt 10029 sym := v.Aux 10030 _ = v.Args[3] 10031 ptr := v.Args[0] 10032 v_1 := v.Args[1] 10033 if v_1.Op != OpAMD64ADDQconst { 10034 break 10035 } 10036 d := v_1.AuxInt 10037 idx := v_1.Args[0] 10038 val := v.Args[2] 10039 mem := v.Args[3] 10040 if !(is32Bit(c + d)) { 10041 break 10042 } 10043 v.reset(OpAMD64MOVSDstoreidx1) 10044 v.AuxInt = c + d 10045 v.Aux = sym 10046 v.AddArg(ptr) 10047 v.AddArg(idx) 10048 v.AddArg(val) 10049 v.AddArg(mem) 10050 return true 10051 } 10052 return false 10053 } 10054 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 10055 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10056 // cond: is32Bit(c+d) 10057 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 10058 for { 10059 c := v.AuxInt 10060 sym := v.Aux 10061 _ = v.Args[3] 10062 v_0 := v.Args[0] 10063 if v_0.Op != OpAMD64ADDQconst { 10064 break 10065 } 10066 d := v_0.AuxInt 10067 ptr := v_0.Args[0] 10068 idx := v.Args[1] 10069 val := v.Args[2] 10070 mem := v.Args[3] 10071 if !(is32Bit(c + d)) { 10072 break 10073 } 10074 v.reset(OpAMD64MOVSDstoreidx8) 10075 v.AuxInt = c + d 10076 v.Aux = sym 10077 v.AddArg(ptr) 10078 v.AddArg(idx) 10079 v.AddArg(val) 10080 v.AddArg(mem) 10081 return true 10082 } 10083 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10084 // cond: is32Bit(c+8*d) 10085 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 10086 for { 10087 c := v.AuxInt 10088 sym := v.Aux 10089 _ = v.Args[3] 10090 ptr := v.Args[0] 10091 v_1 := v.Args[1] 10092 if v_1.Op != OpAMD64ADDQconst { 10093 break 10094 } 10095 d := v_1.AuxInt 10096 idx := v_1.Args[0] 10097 val := v.Args[2] 10098 mem := v.Args[3] 10099 if !(is32Bit(c + 8*d)) { 10100 break 10101 } 10102 v.reset(OpAMD64MOVSDstoreidx8) 10103 v.AuxInt = c + 8*d 10104 v.Aux = sym 10105 v.AddArg(ptr) 10106 v.AddArg(idx) 10107 v.AddArg(val) 10108 v.AddArg(mem) 10109 return true 10110 } 10111 return false 10112 } 10113 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 10114 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 10115 // cond: is32Bit(off1+off2) 10116 // result: (MOVSSload [off1+off2] {sym} ptr mem) 10117 for { 10118 off1 := v.AuxInt 10119 sym := v.Aux 10120 _ = v.Args[1] 10121 v_0 := v.Args[0] 10122 if v_0.Op != OpAMD64ADDQconst { 10123 break 10124 } 10125 off2 := v_0.AuxInt 10126 ptr := v_0.Args[0] 10127 mem := v.Args[1] 10128 if !(is32Bit(off1 + off2)) { 10129 break 10130 } 10131 v.reset(OpAMD64MOVSSload) 10132 v.AuxInt = off1 + off2 10133 v.Aux = sym 10134 v.AddArg(ptr) 10135 v.AddArg(mem) 10136 return true 10137 } 10138 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10139 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10140 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10141 for { 10142 off1 := v.AuxInt 10143 sym1 := v.Aux 10144 _ = v.Args[1] 10145 v_0 := v.Args[0] 10146 if v_0.Op != OpAMD64LEAQ { 10147 break 10148 } 10149 off2 := v_0.AuxInt 10150 sym2 := v_0.Aux 10151 base := v_0.Args[0] 10152 mem := v.Args[1] 10153 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10154 break 10155 } 10156 v.reset(OpAMD64MOVSSload) 10157 v.AuxInt = off1 + off2 10158 v.Aux = mergeSym(sym1, sym2) 10159 v.AddArg(base) 10160 v.AddArg(mem) 10161 return true 10162 } 10163 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 10164 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10165 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10166 for { 10167 off1 := v.AuxInt 10168 sym1 := v.Aux 10169 _ = v.Args[1] 10170 v_0 := v.Args[0] 10171 if v_0.Op != OpAMD64LEAQ1 { 10172 break 10173 } 10174 off2 := v_0.AuxInt 10175 sym2 := v_0.Aux 10176 _ = v_0.Args[1] 10177 ptr := v_0.Args[0] 10178 idx := v_0.Args[1] 10179 mem := v.Args[1] 10180 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10181 break 10182 } 10183 v.reset(OpAMD64MOVSSloadidx1) 10184 v.AuxInt = off1 + off2 10185 v.Aux = mergeSym(sym1, sym2) 10186 v.AddArg(ptr) 10187 v.AddArg(idx) 10188 v.AddArg(mem) 10189 return true 10190 } 10191 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 10192 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10193 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10194 for { 10195 off1 := v.AuxInt 10196 sym1 := v.Aux 10197 _ = v.Args[1] 10198 v_0 := v.Args[0] 10199 if v_0.Op != OpAMD64LEAQ4 { 10200 break 10201 } 10202 off2 := v_0.AuxInt 10203 sym2 := v_0.Aux 10204 _ = v_0.Args[1] 10205 ptr := v_0.Args[0] 10206 idx := v_0.Args[1] 10207 mem := v.Args[1] 10208 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10209 break 10210 } 10211 v.reset(OpAMD64MOVSSloadidx4) 10212 v.AuxInt = off1 + off2 10213 v.Aux = mergeSym(sym1, sym2) 10214 v.AddArg(ptr) 10215 v.AddArg(idx) 10216 v.AddArg(mem) 10217 return true 10218 } 10219 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 10220 // cond: ptr.Op != OpSB 10221 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 10222 for { 10223 off := v.AuxInt 10224 sym := v.Aux 10225 _ = v.Args[1] 10226 v_0 := v.Args[0] 10227 if v_0.Op != OpAMD64ADDQ { 10228 break 10229 } 10230 _ = v_0.Args[1] 10231 ptr := v_0.Args[0] 10232 idx := v_0.Args[1] 10233 mem := v.Args[1] 10234 if !(ptr.Op != OpSB) { 10235 break 10236 } 10237 v.reset(OpAMD64MOVSSloadidx1) 10238 v.AuxInt = off 10239 v.Aux = sym 10240 v.AddArg(ptr) 10241 v.AddArg(idx) 10242 v.AddArg(mem) 10243 return true 10244 } 10245 return false 10246 } 10247 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 10248 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 10249 // cond: 10250 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 10251 for { 10252 c := v.AuxInt 10253 sym := v.Aux 10254 _ = v.Args[2] 10255 ptr := v.Args[0] 10256 v_1 := v.Args[1] 10257 if v_1.Op != OpAMD64SHLQconst { 10258 break 10259 } 10260 if v_1.AuxInt != 2 { 10261 break 10262 } 10263 idx := v_1.Args[0] 10264 mem := v.Args[2] 10265 v.reset(OpAMD64MOVSSloadidx4) 10266 v.AuxInt = c 10267 v.Aux = sym 10268 v.AddArg(ptr) 10269 v.AddArg(idx) 10270 v.AddArg(mem) 10271 return true 10272 } 10273 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 10274 // cond: is32Bit(c+d) 10275 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 10276 for { 10277 c := v.AuxInt 10278 sym := v.Aux 10279 _ = v.Args[2] 10280 v_0 := v.Args[0] 10281 if v_0.Op != OpAMD64ADDQconst { 10282 break 10283 } 10284 d := v_0.AuxInt 10285 ptr := v_0.Args[0] 10286 idx := v.Args[1] 10287 mem := v.Args[2] 10288 if !(is32Bit(c + d)) { 10289 break 10290 } 10291 v.reset(OpAMD64MOVSSloadidx1) 10292 v.AuxInt = c + d 10293 v.Aux = sym 10294 v.AddArg(ptr) 10295 v.AddArg(idx) 10296 v.AddArg(mem) 10297 return true 10298 } 10299 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 10300 // cond: is32Bit(c+d) 10301 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 10302 for { 10303 c := v.AuxInt 10304 sym := v.Aux 10305 _ = v.Args[2] 10306 ptr := v.Args[0] 10307 v_1 := v.Args[1] 10308 if v_1.Op != OpAMD64ADDQconst { 10309 break 10310 } 10311 d := v_1.AuxInt 10312 idx := v_1.Args[0] 10313 mem := v.Args[2] 10314 if !(is32Bit(c + d)) { 10315 break 10316 } 10317 v.reset(OpAMD64MOVSSloadidx1) 10318 v.AuxInt = c + d 10319 v.Aux = sym 10320 v.AddArg(ptr) 10321 v.AddArg(idx) 10322 v.AddArg(mem) 10323 return true 10324 } 10325 return false 10326 } 10327 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 10328 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 10329 // cond: is32Bit(c+d) 10330 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 10331 for { 10332 c := v.AuxInt 10333 sym := v.Aux 10334 _ = v.Args[2] 10335 v_0 := v.Args[0] 10336 if v_0.Op != OpAMD64ADDQconst { 10337 break 10338 } 10339 d := v_0.AuxInt 10340 ptr := v_0.Args[0] 10341 idx := v.Args[1] 10342 mem := v.Args[2] 10343 if !(is32Bit(c + d)) { 10344 break 10345 } 10346 v.reset(OpAMD64MOVSSloadidx4) 10347 v.AuxInt = c + d 10348 v.Aux = sym 10349 v.AddArg(ptr) 10350 v.AddArg(idx) 10351 v.AddArg(mem) 10352 return true 10353 } 10354 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 10355 // cond: is32Bit(c+4*d) 10356 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 10357 for { 10358 c := v.AuxInt 10359 sym := v.Aux 10360 _ = v.Args[2] 10361 ptr := v.Args[0] 10362 v_1 := v.Args[1] 10363 if v_1.Op != OpAMD64ADDQconst { 10364 break 10365 } 10366 d := v_1.AuxInt 10367 idx := v_1.Args[0] 10368 mem := v.Args[2] 10369 if !(is32Bit(c + 4*d)) { 10370 break 10371 } 10372 v.reset(OpAMD64MOVSSloadidx4) 10373 v.AuxInt = c + 4*d 10374 v.Aux = sym 10375 v.AddArg(ptr) 10376 v.AddArg(idx) 10377 v.AddArg(mem) 10378 return true 10379 } 10380 return false 10381 } 10382 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 10383 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10384 // cond: is32Bit(off1+off2) 10385 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 10386 for { 10387 off1 := v.AuxInt 10388 sym := v.Aux 10389 _ = v.Args[2] 10390 v_0 := v.Args[0] 10391 if v_0.Op != OpAMD64ADDQconst { 10392 break 10393 } 10394 off2 := v_0.AuxInt 10395 ptr := v_0.Args[0] 10396 val := v.Args[1] 10397 mem := v.Args[2] 10398 if !(is32Bit(off1 + off2)) { 10399 break 10400 } 10401 v.reset(OpAMD64MOVSSstore) 10402 v.AuxInt = off1 + off2 10403 v.Aux = sym 10404 v.AddArg(ptr) 10405 v.AddArg(val) 10406 v.AddArg(mem) 10407 return true 10408 } 10409 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10410 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10411 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10412 for { 10413 off1 := v.AuxInt 10414 sym1 := v.Aux 10415 _ = v.Args[2] 10416 v_0 := v.Args[0] 10417 if v_0.Op != OpAMD64LEAQ { 10418 break 10419 } 10420 off2 := v_0.AuxInt 10421 sym2 := v_0.Aux 10422 base := v_0.Args[0] 10423 val := v.Args[1] 10424 mem := v.Args[2] 10425 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10426 break 10427 } 10428 v.reset(OpAMD64MOVSSstore) 10429 v.AuxInt = off1 + off2 10430 v.Aux = mergeSym(sym1, sym2) 10431 v.AddArg(base) 10432 v.AddArg(val) 10433 v.AddArg(mem) 10434 return true 10435 } 10436 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 10437 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10438 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10439 for { 10440 off1 := v.AuxInt 10441 sym1 := v.Aux 10442 _ = v.Args[2] 10443 v_0 := v.Args[0] 10444 if v_0.Op != OpAMD64LEAQ1 { 10445 break 10446 } 10447 off2 := v_0.AuxInt 10448 sym2 := v_0.Aux 10449 _ = v_0.Args[1] 10450 ptr := v_0.Args[0] 10451 idx := v_0.Args[1] 10452 val := v.Args[1] 10453 mem := v.Args[2] 10454 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10455 break 10456 } 10457 v.reset(OpAMD64MOVSSstoreidx1) 10458 v.AuxInt = off1 + off2 10459 v.Aux = mergeSym(sym1, sym2) 10460 v.AddArg(ptr) 10461 v.AddArg(idx) 10462 v.AddArg(val) 10463 v.AddArg(mem) 10464 return true 10465 } 10466 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 10467 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10468 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10469 for { 10470 off1 := v.AuxInt 10471 sym1 := v.Aux 10472 _ = v.Args[2] 10473 v_0 := v.Args[0] 10474 if v_0.Op != OpAMD64LEAQ4 { 10475 break 10476 } 10477 off2 := v_0.AuxInt 10478 sym2 := v_0.Aux 10479 _ = v_0.Args[1] 10480 ptr := v_0.Args[0] 10481 idx := v_0.Args[1] 10482 val := v.Args[1] 10483 mem := v.Args[2] 10484 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10485 break 10486 } 10487 v.reset(OpAMD64MOVSSstoreidx4) 10488 v.AuxInt = off1 + off2 10489 v.Aux = mergeSym(sym1, sym2) 10490 v.AddArg(ptr) 10491 v.AddArg(idx) 10492 v.AddArg(val) 10493 v.AddArg(mem) 10494 return true 10495 } 10496 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 10497 // cond: ptr.Op != OpSB 10498 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 10499 for { 10500 off := v.AuxInt 10501 sym := v.Aux 10502 _ = v.Args[2] 10503 v_0 := v.Args[0] 10504 if v_0.Op != OpAMD64ADDQ { 10505 break 10506 } 10507 _ = v_0.Args[1] 10508 ptr := v_0.Args[0] 10509 idx := v_0.Args[1] 10510 val := v.Args[1] 10511 mem := v.Args[2] 10512 if !(ptr.Op != OpSB) { 10513 break 10514 } 10515 v.reset(OpAMD64MOVSSstoreidx1) 10516 v.AuxInt = off 10517 v.Aux = sym 10518 v.AddArg(ptr) 10519 v.AddArg(idx) 10520 v.AddArg(val) 10521 v.AddArg(mem) 10522 return true 10523 } 10524 return false 10525 } 10526 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 10527 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 10528 // cond: 10529 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 10530 for { 10531 c := v.AuxInt 10532 sym := v.Aux 10533 _ = v.Args[3] 10534 ptr := v.Args[0] 10535 v_1 := v.Args[1] 10536 if v_1.Op != OpAMD64SHLQconst { 10537 break 10538 } 10539 if v_1.AuxInt != 2 { 10540 break 10541 } 10542 idx := v_1.Args[0] 10543 val := v.Args[2] 10544 mem := v.Args[3] 10545 v.reset(OpAMD64MOVSSstoreidx4) 10546 v.AuxInt = c 10547 v.Aux = sym 10548 v.AddArg(ptr) 10549 v.AddArg(idx) 10550 v.AddArg(val) 10551 v.AddArg(mem) 10552 return true 10553 } 10554 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10555 // cond: is32Bit(c+d) 10556 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 10557 for { 10558 c := v.AuxInt 10559 sym := v.Aux 10560 _ = v.Args[3] 10561 v_0 := v.Args[0] 10562 if v_0.Op != OpAMD64ADDQconst { 10563 break 10564 } 10565 d := v_0.AuxInt 10566 ptr := v_0.Args[0] 10567 idx := v.Args[1] 10568 val := v.Args[2] 10569 mem := v.Args[3] 10570 if !(is32Bit(c + d)) { 10571 break 10572 } 10573 v.reset(OpAMD64MOVSSstoreidx1) 10574 v.AuxInt = c + d 10575 v.Aux = sym 10576 v.AddArg(ptr) 10577 v.AddArg(idx) 10578 v.AddArg(val) 10579 v.AddArg(mem) 10580 return true 10581 } 10582 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10583 // cond: is32Bit(c+d) 10584 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 10585 for { 10586 c := v.AuxInt 10587 sym := v.Aux 10588 _ = v.Args[3] 10589 ptr := v.Args[0] 10590 v_1 := v.Args[1] 10591 if v_1.Op != OpAMD64ADDQconst { 10592 break 10593 } 10594 d := v_1.AuxInt 10595 idx := v_1.Args[0] 10596 val := v.Args[2] 10597 mem := v.Args[3] 10598 if !(is32Bit(c + d)) { 10599 break 10600 } 10601 v.reset(OpAMD64MOVSSstoreidx1) 10602 v.AuxInt = c + d 10603 v.Aux = sym 10604 v.AddArg(ptr) 10605 v.AddArg(idx) 10606 v.AddArg(val) 10607 v.AddArg(mem) 10608 return true 10609 } 10610 return false 10611 } 10612 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 10613 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10614 // cond: is32Bit(c+d) 10615 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 10616 for { 10617 c := v.AuxInt 10618 sym := v.Aux 10619 _ = v.Args[3] 10620 v_0 := v.Args[0] 10621 if v_0.Op != OpAMD64ADDQconst { 10622 break 10623 } 10624 d := v_0.AuxInt 10625 ptr := v_0.Args[0] 10626 idx := v.Args[1] 10627 val := v.Args[2] 10628 mem := v.Args[3] 10629 if !(is32Bit(c + d)) { 10630 break 10631 } 10632 v.reset(OpAMD64MOVSSstoreidx4) 10633 v.AuxInt = c + d 10634 v.Aux = sym 10635 v.AddArg(ptr) 10636 v.AddArg(idx) 10637 v.AddArg(val) 10638 v.AddArg(mem) 10639 return true 10640 } 10641 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10642 // cond: is32Bit(c+4*d) 10643 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 10644 for { 10645 c := v.AuxInt 10646 sym := v.Aux 10647 _ = v.Args[3] 10648 ptr := v.Args[0] 10649 v_1 := v.Args[1] 10650 if v_1.Op != OpAMD64ADDQconst { 10651 break 10652 } 10653 d := v_1.AuxInt 10654 idx := v_1.Args[0] 10655 val := v.Args[2] 10656 mem := v.Args[3] 10657 if !(is32Bit(c + 4*d)) { 10658 break 10659 } 10660 v.reset(OpAMD64MOVSSstoreidx4) 10661 v.AuxInt = c + 4*d 10662 v.Aux = sym 10663 v.AddArg(ptr) 10664 v.AddArg(idx) 10665 v.AddArg(val) 10666 v.AddArg(mem) 10667 return true 10668 } 10669 return false 10670 } 10671 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 10672 b := v.Block 10673 _ = b 10674 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 10675 // cond: x.Uses == 1 && clobber(x) 10676 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10677 for { 10678 x := v.Args[0] 10679 if x.Op != OpAMD64MOVWload { 10680 break 10681 } 10682 off := x.AuxInt 10683 sym := x.Aux 10684 _ = x.Args[1] 10685 ptr := x.Args[0] 10686 mem := x.Args[1] 10687 if !(x.Uses == 1 && clobber(x)) { 10688 break 10689 } 10690 b = x.Block 10691 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10692 v.reset(OpCopy) 10693 v.AddArg(v0) 10694 v0.AuxInt = off 10695 v0.Aux = sym 10696 v0.AddArg(ptr) 10697 v0.AddArg(mem) 10698 return true 10699 } 10700 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 10701 // cond: x.Uses == 1 && clobber(x) 10702 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10703 for { 10704 x := v.Args[0] 10705 if x.Op != OpAMD64MOVLload { 10706 break 10707 } 10708 off := x.AuxInt 10709 sym := x.Aux 10710 _ = x.Args[1] 10711 ptr := x.Args[0] 10712 mem := x.Args[1] 10713 if !(x.Uses == 1 && clobber(x)) { 10714 break 10715 } 10716 b = x.Block 10717 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10718 v.reset(OpCopy) 10719 v.AddArg(v0) 10720 v0.AuxInt = off 10721 v0.Aux = sym 10722 v0.AddArg(ptr) 10723 v0.AddArg(mem) 10724 return true 10725 } 10726 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 10727 // cond: x.Uses == 1 && clobber(x) 10728 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10729 for { 10730 x := v.Args[0] 10731 if x.Op != OpAMD64MOVQload { 10732 break 10733 } 10734 off := x.AuxInt 10735 sym := x.Aux 10736 _ = x.Args[1] 10737 ptr := x.Args[0] 10738 mem := x.Args[1] 10739 if !(x.Uses == 1 && clobber(x)) { 10740 break 10741 } 10742 b = x.Block 10743 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10744 v.reset(OpCopy) 10745 v.AddArg(v0) 10746 v0.AuxInt = off 10747 v0.Aux = sym 10748 v0.AddArg(ptr) 10749 v0.AddArg(mem) 10750 return true 10751 } 10752 // match: (MOVWQSX (ANDLconst [c] x)) 10753 // cond: c & 0x8000 == 0 10754 // result: (ANDLconst [c & 0x7fff] x) 10755 for { 10756 v_0 := v.Args[0] 10757 if v_0.Op != OpAMD64ANDLconst { 10758 break 10759 } 10760 c := v_0.AuxInt 10761 x := v_0.Args[0] 10762 if !(c&0x8000 == 0) { 10763 break 10764 } 10765 v.reset(OpAMD64ANDLconst) 10766 v.AuxInt = c & 0x7fff 10767 v.AddArg(x) 10768 return true 10769 } 10770 // match: (MOVWQSX (MOVWQSX x)) 10771 // cond: 10772 // result: (MOVWQSX x) 10773 for { 10774 v_0 := v.Args[0] 10775 if v_0.Op != OpAMD64MOVWQSX { 10776 break 10777 } 10778 x := v_0.Args[0] 10779 v.reset(OpAMD64MOVWQSX) 10780 v.AddArg(x) 10781 return true 10782 } 10783 // match: (MOVWQSX (MOVBQSX x)) 10784 // cond: 10785 // result: (MOVBQSX x) 10786 for { 10787 v_0 := v.Args[0] 10788 if v_0.Op != OpAMD64MOVBQSX { 10789 break 10790 } 10791 x := v_0.Args[0] 10792 v.reset(OpAMD64MOVBQSX) 10793 v.AddArg(x) 10794 return true 10795 } 10796 return false 10797 } 10798 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 10799 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 10800 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 10801 // result: (MOVWQSX x) 10802 for { 10803 off := v.AuxInt 10804 sym := v.Aux 10805 _ = v.Args[1] 10806 ptr := v.Args[0] 10807 v_1 := v.Args[1] 10808 if v_1.Op != OpAMD64MOVWstore { 10809 break 10810 } 10811 off2 := v_1.AuxInt 10812 sym2 := v_1.Aux 10813 _ = v_1.Args[2] 10814 ptr2 := v_1.Args[0] 10815 x := v_1.Args[1] 10816 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 10817 break 10818 } 10819 v.reset(OpAMD64MOVWQSX) 10820 v.AddArg(x) 10821 return true 10822 } 10823 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10824 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10825 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10826 for { 10827 off1 := v.AuxInt 10828 sym1 := v.Aux 10829 _ = v.Args[1] 10830 v_0 := v.Args[0] 10831 if v_0.Op != OpAMD64LEAQ { 10832 break 10833 } 10834 off2 := v_0.AuxInt 10835 sym2 := v_0.Aux 10836 base := v_0.Args[0] 10837 mem := v.Args[1] 10838 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10839 break 10840 } 10841 v.reset(OpAMD64MOVWQSXload) 10842 v.AuxInt = off1 + off2 10843 v.Aux = mergeSym(sym1, sym2) 10844 v.AddArg(base) 10845 v.AddArg(mem) 10846 return true 10847 } 10848 return false 10849 } 10850 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 10851 b := v.Block 10852 _ = b 10853 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 10854 // cond: x.Uses == 1 && clobber(x) 10855 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10856 for { 10857 x := v.Args[0] 10858 if x.Op != OpAMD64MOVWload { 10859 break 10860 } 10861 off := x.AuxInt 10862 sym := x.Aux 10863 _ = x.Args[1] 10864 ptr := x.Args[0] 10865 mem := x.Args[1] 10866 if !(x.Uses == 1 && clobber(x)) { 10867 break 10868 } 10869 b = x.Block 10870 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10871 v.reset(OpCopy) 10872 v.AddArg(v0) 10873 v0.AuxInt = off 10874 v0.Aux = sym 10875 v0.AddArg(ptr) 10876 v0.AddArg(mem) 10877 return true 10878 } 10879 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 10880 // cond: x.Uses == 1 && clobber(x) 10881 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10882 for { 10883 x := v.Args[0] 10884 if x.Op != OpAMD64MOVLload { 10885 break 10886 } 10887 off := x.AuxInt 10888 sym := x.Aux 10889 _ = x.Args[1] 10890 ptr := x.Args[0] 10891 mem := x.Args[1] 10892 if !(x.Uses == 1 && clobber(x)) { 10893 break 10894 } 10895 b = x.Block 10896 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10897 v.reset(OpCopy) 10898 v.AddArg(v0) 10899 v0.AuxInt = off 10900 v0.Aux = sym 10901 v0.AddArg(ptr) 10902 v0.AddArg(mem) 10903 return true 10904 } 10905 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 10906 // cond: x.Uses == 1 && clobber(x) 10907 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10908 for { 10909 x := v.Args[0] 10910 if x.Op != OpAMD64MOVQload { 10911 break 10912 } 10913 off := x.AuxInt 10914 sym := x.Aux 10915 _ = x.Args[1] 10916 ptr := x.Args[0] 10917 mem := x.Args[1] 10918 if !(x.Uses == 1 && clobber(x)) { 10919 break 10920 } 10921 b = x.Block 10922 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10923 v.reset(OpCopy) 10924 v.AddArg(v0) 10925 v0.AuxInt = off 10926 v0.Aux = sym 10927 v0.AddArg(ptr) 10928 v0.AddArg(mem) 10929 return true 10930 } 10931 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 10932 // cond: x.Uses == 1 && clobber(x) 10933 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 10934 for { 10935 x := v.Args[0] 10936 if x.Op != OpAMD64MOVWloadidx1 { 10937 break 10938 } 10939 off := x.AuxInt 10940 sym := x.Aux 10941 _ = x.Args[2] 10942 ptr := x.Args[0] 10943 idx := x.Args[1] 10944 mem := x.Args[2] 10945 if !(x.Uses == 1 && clobber(x)) { 10946 break 10947 } 10948 b = x.Block 10949 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 10950 v.reset(OpCopy) 10951 v.AddArg(v0) 10952 v0.AuxInt = off 10953 v0.Aux = sym 10954 v0.AddArg(ptr) 10955 v0.AddArg(idx) 10956 v0.AddArg(mem) 10957 return true 10958 } 10959 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 10960 // cond: x.Uses == 1 && clobber(x) 10961 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 10962 for { 10963 x := v.Args[0] 10964 if x.Op != OpAMD64MOVWloadidx2 { 10965 break 10966 } 10967 off := x.AuxInt 10968 sym := x.Aux 10969 _ = x.Args[2] 10970 ptr := x.Args[0] 10971 idx := x.Args[1] 10972 mem := x.Args[2] 10973 if !(x.Uses == 1 && clobber(x)) { 10974 break 10975 } 10976 b = x.Block 10977 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 10978 v.reset(OpCopy) 10979 v.AddArg(v0) 10980 v0.AuxInt = off 10981 v0.Aux = sym 10982 v0.AddArg(ptr) 10983 v0.AddArg(idx) 10984 v0.AddArg(mem) 10985 return true 10986 } 10987 // match: (MOVWQZX (ANDLconst [c] x)) 10988 // cond: 10989 // result: (ANDLconst [c & 0xffff] x) 10990 for { 10991 v_0 := v.Args[0] 10992 if v_0.Op != OpAMD64ANDLconst { 10993 break 10994 } 10995 c := v_0.AuxInt 10996 x := v_0.Args[0] 10997 v.reset(OpAMD64ANDLconst) 10998 v.AuxInt = c & 0xffff 10999 v.AddArg(x) 11000 return true 11001 } 11002 // match: (MOVWQZX (MOVWQZX x)) 11003 // cond: 11004 // result: (MOVWQZX x) 11005 for { 11006 v_0 := v.Args[0] 11007 if v_0.Op != OpAMD64MOVWQZX { 11008 break 11009 } 11010 x := v_0.Args[0] 11011 v.reset(OpAMD64MOVWQZX) 11012 v.AddArg(x) 11013 return true 11014 } 11015 // match: (MOVWQZX (MOVBQZX x)) 11016 // cond: 11017 // result: (MOVBQZX x) 11018 for { 11019 v_0 := v.Args[0] 11020 if v_0.Op != OpAMD64MOVBQZX { 11021 break 11022 } 11023 x := v_0.Args[0] 11024 v.reset(OpAMD64MOVBQZX) 11025 v.AddArg(x) 11026 return true 11027 } 11028 return false 11029 } 11030 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 11031 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 11032 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 11033 // result: (MOVWQZX x) 11034 for { 11035 off := v.AuxInt 11036 sym := v.Aux 11037 _ = v.Args[1] 11038 ptr := v.Args[0] 11039 v_1 := v.Args[1] 11040 if v_1.Op != OpAMD64MOVWstore { 11041 break 11042 } 11043 off2 := v_1.AuxInt 11044 sym2 := v_1.Aux 11045 _ = v_1.Args[2] 11046 ptr2 := v_1.Args[0] 11047 x := v_1.Args[1] 11048 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 11049 break 11050 } 11051 v.reset(OpAMD64MOVWQZX) 11052 v.AddArg(x) 11053 return true 11054 } 11055 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 11056 // cond: is32Bit(off1+off2) 11057 // result: (MOVWload [off1+off2] {sym} ptr mem) 11058 for { 11059 off1 := v.AuxInt 11060 sym := v.Aux 11061 _ = v.Args[1] 11062 v_0 := v.Args[0] 11063 if v_0.Op != OpAMD64ADDQconst { 11064 break 11065 } 11066 off2 := v_0.AuxInt 11067 ptr := v_0.Args[0] 11068 mem := v.Args[1] 11069 if !(is32Bit(off1 + off2)) { 11070 break 11071 } 11072 v.reset(OpAMD64MOVWload) 11073 v.AuxInt = off1 + off2 11074 v.Aux = sym 11075 v.AddArg(ptr) 11076 v.AddArg(mem) 11077 return true 11078 } 11079 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11080 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11081 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11082 for { 11083 off1 := v.AuxInt 11084 sym1 := v.Aux 11085 _ = v.Args[1] 11086 v_0 := v.Args[0] 11087 if v_0.Op != OpAMD64LEAQ { 11088 break 11089 } 11090 off2 := v_0.AuxInt 11091 sym2 := v_0.Aux 11092 base := v_0.Args[0] 11093 mem := v.Args[1] 11094 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11095 break 11096 } 11097 v.reset(OpAMD64MOVWload) 11098 v.AuxInt = off1 + off2 11099 v.Aux = mergeSym(sym1, sym2) 11100 v.AddArg(base) 11101 v.AddArg(mem) 11102 return true 11103 } 11104 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 11105 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11106 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11107 for { 11108 off1 := v.AuxInt 11109 sym1 := v.Aux 11110 _ = v.Args[1] 11111 v_0 := v.Args[0] 11112 if v_0.Op != OpAMD64LEAQ1 { 11113 break 11114 } 11115 off2 := v_0.AuxInt 11116 sym2 := v_0.Aux 11117 _ = v_0.Args[1] 11118 ptr := v_0.Args[0] 11119 idx := v_0.Args[1] 11120 mem := v.Args[1] 11121 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11122 break 11123 } 11124 v.reset(OpAMD64MOVWloadidx1) 11125 v.AuxInt = off1 + off2 11126 v.Aux = mergeSym(sym1, sym2) 11127 v.AddArg(ptr) 11128 v.AddArg(idx) 11129 v.AddArg(mem) 11130 return true 11131 } 11132 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 11133 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11134 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11135 for { 11136 off1 := v.AuxInt 11137 sym1 := v.Aux 11138 _ = v.Args[1] 11139 v_0 := v.Args[0] 11140 if v_0.Op != OpAMD64LEAQ2 { 11141 break 11142 } 11143 off2 := v_0.AuxInt 11144 sym2 := v_0.Aux 11145 _ = v_0.Args[1] 11146 ptr := v_0.Args[0] 11147 idx := v_0.Args[1] 11148 mem := v.Args[1] 11149 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11150 break 11151 } 11152 v.reset(OpAMD64MOVWloadidx2) 11153 v.AuxInt = off1 + off2 11154 v.Aux = mergeSym(sym1, sym2) 11155 v.AddArg(ptr) 11156 v.AddArg(idx) 11157 v.AddArg(mem) 11158 return true 11159 } 11160 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 11161 // cond: ptr.Op != OpSB 11162 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 11163 for { 11164 off := v.AuxInt 11165 sym := v.Aux 11166 _ = v.Args[1] 11167 v_0 := v.Args[0] 11168 if v_0.Op != OpAMD64ADDQ { 11169 break 11170 } 11171 _ = v_0.Args[1] 11172 ptr := v_0.Args[0] 11173 idx := v_0.Args[1] 11174 mem := v.Args[1] 11175 if !(ptr.Op != OpSB) { 11176 break 11177 } 11178 v.reset(OpAMD64MOVWloadidx1) 11179 v.AuxInt = off 11180 v.Aux = sym 11181 v.AddArg(ptr) 11182 v.AddArg(idx) 11183 v.AddArg(mem) 11184 return true 11185 } 11186 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 11187 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 11188 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11189 for { 11190 off1 := v.AuxInt 11191 sym1 := v.Aux 11192 _ = v.Args[1] 11193 v_0 := v.Args[0] 11194 if v_0.Op != OpAMD64LEAL { 11195 break 11196 } 11197 off2 := v_0.AuxInt 11198 sym2 := v_0.Aux 11199 base := v_0.Args[0] 11200 mem := v.Args[1] 11201 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 11202 break 11203 } 11204 v.reset(OpAMD64MOVWload) 11205 v.AuxInt = off1 + off2 11206 v.Aux = mergeSym(sym1, sym2) 11207 v.AddArg(base) 11208 v.AddArg(mem) 11209 return true 11210 } 11211 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 11212 // cond: is32Bit(off1+off2) 11213 // result: (MOVWload [off1+off2] {sym} ptr mem) 11214 for { 11215 off1 := v.AuxInt 11216 sym := v.Aux 11217 _ = v.Args[1] 11218 v_0 := v.Args[0] 11219 if v_0.Op != OpAMD64ADDLconst { 11220 break 11221 } 11222 off2 := v_0.AuxInt 11223 ptr := v_0.Args[0] 11224 mem := v.Args[1] 11225 if !(is32Bit(off1 + off2)) { 11226 break 11227 } 11228 v.reset(OpAMD64MOVWload) 11229 v.AuxInt = off1 + off2 11230 v.Aux = sym 11231 v.AddArg(ptr) 11232 v.AddArg(mem) 11233 return true 11234 } 11235 return false 11236 } 11237 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 11238 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 11239 // cond: 11240 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 11241 for { 11242 c := v.AuxInt 11243 sym := v.Aux 11244 _ = v.Args[2] 11245 ptr := v.Args[0] 11246 v_1 := v.Args[1] 11247 if v_1.Op != OpAMD64SHLQconst { 11248 break 11249 } 11250 if v_1.AuxInt != 1 { 11251 break 11252 } 11253 idx := v_1.Args[0] 11254 mem := v.Args[2] 11255 v.reset(OpAMD64MOVWloadidx2) 11256 v.AuxInt = c 11257 v.Aux = sym 11258 v.AddArg(ptr) 11259 v.AddArg(idx) 11260 v.AddArg(mem) 11261 return true 11262 } 11263 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 11264 // cond: 11265 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 11266 for { 11267 c := v.AuxInt 11268 sym := v.Aux 11269 _ = v.Args[2] 11270 v_0 := v.Args[0] 11271 if v_0.Op != OpAMD64SHLQconst { 11272 break 11273 } 11274 if v_0.AuxInt != 1 { 11275 break 11276 } 11277 idx := v_0.Args[0] 11278 ptr := v.Args[1] 11279 mem := v.Args[2] 11280 v.reset(OpAMD64MOVWloadidx2) 11281 v.AuxInt = c 11282 v.Aux = sym 11283 v.AddArg(ptr) 11284 v.AddArg(idx) 11285 v.AddArg(mem) 11286 return true 11287 } 11288 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11289 // cond: is32Bit(c+d) 11290 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11291 for { 11292 c := v.AuxInt 11293 sym := v.Aux 11294 _ = v.Args[2] 11295 v_0 := v.Args[0] 11296 if v_0.Op != OpAMD64ADDQconst { 11297 break 11298 } 11299 d := v_0.AuxInt 11300 ptr := v_0.Args[0] 11301 idx := v.Args[1] 11302 mem := v.Args[2] 11303 if !(is32Bit(c + d)) { 11304 break 11305 } 11306 v.reset(OpAMD64MOVWloadidx1) 11307 v.AuxInt = c + d 11308 v.Aux = sym 11309 v.AddArg(ptr) 11310 v.AddArg(idx) 11311 v.AddArg(mem) 11312 return true 11313 } 11314 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 11315 // cond: is32Bit(c+d) 11316 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11317 for { 11318 c := v.AuxInt 11319 sym := v.Aux 11320 _ = v.Args[2] 11321 idx := v.Args[0] 11322 v_1 := v.Args[1] 11323 if v_1.Op != OpAMD64ADDQconst { 11324 break 11325 } 11326 d := v_1.AuxInt 11327 ptr := v_1.Args[0] 11328 mem := v.Args[2] 11329 if !(is32Bit(c + d)) { 11330 break 11331 } 11332 v.reset(OpAMD64MOVWloadidx1) 11333 v.AuxInt = c + d 11334 v.Aux = sym 11335 v.AddArg(ptr) 11336 v.AddArg(idx) 11337 v.AddArg(mem) 11338 return true 11339 } 11340 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11341 // cond: is32Bit(c+d) 11342 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11343 for { 11344 c := v.AuxInt 11345 sym := v.Aux 11346 _ = v.Args[2] 11347 ptr := v.Args[0] 11348 v_1 := v.Args[1] 11349 if v_1.Op != OpAMD64ADDQconst { 11350 break 11351 } 11352 d := v_1.AuxInt 11353 idx := v_1.Args[0] 11354 mem := v.Args[2] 11355 if !(is32Bit(c + d)) { 11356 break 11357 } 11358 v.reset(OpAMD64MOVWloadidx1) 11359 v.AuxInt = c + d 11360 v.Aux = sym 11361 v.AddArg(ptr) 11362 v.AddArg(idx) 11363 v.AddArg(mem) 11364 return true 11365 } 11366 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 11367 // cond: is32Bit(c+d) 11368 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11369 for { 11370 c := v.AuxInt 11371 sym := v.Aux 11372 _ = v.Args[2] 11373 v_0 := v.Args[0] 11374 if v_0.Op != OpAMD64ADDQconst { 11375 break 11376 } 11377 d := v_0.AuxInt 11378 idx := v_0.Args[0] 11379 ptr := v.Args[1] 11380 mem := v.Args[2] 11381 if !(is32Bit(c + d)) { 11382 break 11383 } 11384 v.reset(OpAMD64MOVWloadidx1) 11385 v.AuxInt = c + d 11386 v.Aux = sym 11387 v.AddArg(ptr) 11388 v.AddArg(idx) 11389 v.AddArg(mem) 11390 return true 11391 } 11392 return false 11393 } 11394 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 11395 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 11396 // cond: is32Bit(c+d) 11397 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 11398 for { 11399 c := v.AuxInt 11400 sym := v.Aux 11401 _ = v.Args[2] 11402 v_0 := v.Args[0] 11403 if v_0.Op != OpAMD64ADDQconst { 11404 break 11405 } 11406 d := v_0.AuxInt 11407 ptr := v_0.Args[0] 11408 idx := v.Args[1] 11409 mem := v.Args[2] 11410 if !(is32Bit(c + d)) { 11411 break 11412 } 11413 v.reset(OpAMD64MOVWloadidx2) 11414 v.AuxInt = c + d 11415 v.Aux = sym 11416 v.AddArg(ptr) 11417 v.AddArg(idx) 11418 v.AddArg(mem) 11419 return true 11420 } 11421 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 11422 // cond: is32Bit(c+2*d) 11423 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 11424 for { 11425 c := v.AuxInt 11426 sym := v.Aux 11427 _ = v.Args[2] 11428 ptr := v.Args[0] 11429 v_1 := v.Args[1] 11430 if v_1.Op != OpAMD64ADDQconst { 11431 break 11432 } 11433 d := v_1.AuxInt 11434 idx := v_1.Args[0] 11435 mem := v.Args[2] 11436 if !(is32Bit(c + 2*d)) { 11437 break 11438 } 11439 v.reset(OpAMD64MOVWloadidx2) 11440 v.AuxInt = c + 2*d 11441 v.Aux = sym 11442 v.AddArg(ptr) 11443 v.AddArg(idx) 11444 v.AddArg(mem) 11445 return true 11446 } 11447 return false 11448 } 11449 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 11450 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 11451 // cond: 11452 // result: (MOVWstore [off] {sym} ptr x mem) 11453 for { 11454 off := v.AuxInt 11455 sym := v.Aux 11456 _ = v.Args[2] 11457 ptr := v.Args[0] 11458 v_1 := v.Args[1] 11459 if v_1.Op != OpAMD64MOVWQSX { 11460 break 11461 } 11462 x := v_1.Args[0] 11463 mem := v.Args[2] 11464 v.reset(OpAMD64MOVWstore) 11465 v.AuxInt = off 11466 v.Aux = sym 11467 v.AddArg(ptr) 11468 v.AddArg(x) 11469 v.AddArg(mem) 11470 return true 11471 } 11472 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 11473 // cond: 11474 // result: (MOVWstore [off] {sym} ptr x mem) 11475 for { 11476 off := v.AuxInt 11477 sym := v.Aux 11478 _ = v.Args[2] 11479 ptr := v.Args[0] 11480 v_1 := v.Args[1] 11481 if v_1.Op != OpAMD64MOVWQZX { 11482 break 11483 } 11484 x := v_1.Args[0] 11485 mem := v.Args[2] 11486 v.reset(OpAMD64MOVWstore) 11487 v.AuxInt = off 11488 v.Aux = sym 11489 v.AddArg(ptr) 11490 v.AddArg(x) 11491 v.AddArg(mem) 11492 return true 11493 } 11494 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11495 // cond: is32Bit(off1+off2) 11496 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 11497 for { 11498 off1 := v.AuxInt 11499 sym := v.Aux 11500 _ = v.Args[2] 11501 v_0 := v.Args[0] 11502 if v_0.Op != OpAMD64ADDQconst { 11503 break 11504 } 11505 off2 := v_0.AuxInt 11506 ptr := v_0.Args[0] 11507 val := v.Args[1] 11508 mem := v.Args[2] 11509 if !(is32Bit(off1 + off2)) { 11510 break 11511 } 11512 v.reset(OpAMD64MOVWstore) 11513 v.AuxInt = off1 + off2 11514 v.Aux = sym 11515 v.AddArg(ptr) 11516 v.AddArg(val) 11517 v.AddArg(mem) 11518 return true 11519 } 11520 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 11521 // cond: validOff(off) 11522 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 11523 for { 11524 off := v.AuxInt 11525 sym := v.Aux 11526 _ = v.Args[2] 11527 ptr := v.Args[0] 11528 v_1 := v.Args[1] 11529 if v_1.Op != OpAMD64MOVLconst { 11530 break 11531 } 11532 c := v_1.AuxInt 11533 mem := v.Args[2] 11534 if !(validOff(off)) { 11535 break 11536 } 11537 v.reset(OpAMD64MOVWstoreconst) 11538 v.AuxInt = makeValAndOff(int64(int16(c)), off) 11539 v.Aux = sym 11540 v.AddArg(ptr) 11541 v.AddArg(mem) 11542 return true 11543 } 11544 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11545 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11546 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11547 for { 11548 off1 := v.AuxInt 11549 sym1 := v.Aux 11550 _ = v.Args[2] 11551 v_0 := v.Args[0] 11552 if v_0.Op != OpAMD64LEAQ { 11553 break 11554 } 11555 off2 := v_0.AuxInt 11556 sym2 := v_0.Aux 11557 base := v_0.Args[0] 11558 val := v.Args[1] 11559 mem := v.Args[2] 11560 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11561 break 11562 } 11563 v.reset(OpAMD64MOVWstore) 11564 v.AuxInt = off1 + off2 11565 v.Aux = mergeSym(sym1, sym2) 11566 v.AddArg(base) 11567 v.AddArg(val) 11568 v.AddArg(mem) 11569 return true 11570 } 11571 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11572 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11573 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11574 for { 11575 off1 := v.AuxInt 11576 sym1 := v.Aux 11577 _ = v.Args[2] 11578 v_0 := v.Args[0] 11579 if v_0.Op != OpAMD64LEAQ1 { 11580 break 11581 } 11582 off2 := v_0.AuxInt 11583 sym2 := v_0.Aux 11584 _ = v_0.Args[1] 11585 ptr := v_0.Args[0] 11586 idx := v_0.Args[1] 11587 val := v.Args[1] 11588 mem := v.Args[2] 11589 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11590 break 11591 } 11592 v.reset(OpAMD64MOVWstoreidx1) 11593 v.AuxInt = off1 + off2 11594 v.Aux = mergeSym(sym1, sym2) 11595 v.AddArg(ptr) 11596 v.AddArg(idx) 11597 v.AddArg(val) 11598 v.AddArg(mem) 11599 return true 11600 } 11601 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 11602 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11603 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11604 for { 11605 off1 := v.AuxInt 11606 sym1 := v.Aux 11607 _ = v.Args[2] 11608 v_0 := v.Args[0] 11609 if v_0.Op != OpAMD64LEAQ2 { 11610 break 11611 } 11612 off2 := v_0.AuxInt 11613 sym2 := v_0.Aux 11614 _ = v_0.Args[1] 11615 ptr := v_0.Args[0] 11616 idx := v_0.Args[1] 11617 val := v.Args[1] 11618 mem := v.Args[2] 11619 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11620 break 11621 } 11622 v.reset(OpAMD64MOVWstoreidx2) 11623 v.AuxInt = off1 + off2 11624 v.Aux = mergeSym(sym1, sym2) 11625 v.AddArg(ptr) 11626 v.AddArg(idx) 11627 v.AddArg(val) 11628 v.AddArg(mem) 11629 return true 11630 } 11631 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 11632 // cond: ptr.Op != OpSB 11633 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 11634 for { 11635 off := v.AuxInt 11636 sym := v.Aux 11637 _ = v.Args[2] 11638 v_0 := v.Args[0] 11639 if v_0.Op != OpAMD64ADDQ { 11640 break 11641 } 11642 _ = v_0.Args[1] 11643 ptr := v_0.Args[0] 11644 idx := v_0.Args[1] 11645 val := v.Args[1] 11646 mem := v.Args[2] 11647 if !(ptr.Op != OpSB) { 11648 break 11649 } 11650 v.reset(OpAMD64MOVWstoreidx1) 11651 v.AuxInt = off 11652 v.Aux = sym 11653 v.AddArg(ptr) 11654 v.AddArg(idx) 11655 v.AddArg(val) 11656 v.AddArg(mem) 11657 return true 11658 } 11659 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 11660 // cond: x.Uses == 1 && clobber(x) 11661 // result: (MOVLstore [i-2] {s} p w mem) 11662 for { 11663 i := v.AuxInt 11664 s := v.Aux 11665 _ = v.Args[2] 11666 p := v.Args[0] 11667 v_1 := v.Args[1] 11668 if v_1.Op != OpAMD64SHRQconst { 11669 break 11670 } 11671 if v_1.AuxInt != 16 { 11672 break 11673 } 11674 w := v_1.Args[0] 11675 x := v.Args[2] 11676 if x.Op != OpAMD64MOVWstore { 11677 break 11678 } 11679 if x.AuxInt != i-2 { 11680 break 11681 } 11682 if x.Aux != s { 11683 break 11684 } 11685 _ = x.Args[2] 11686 if p != x.Args[0] { 11687 break 11688 } 11689 if w != x.Args[1] { 11690 break 11691 } 11692 mem := x.Args[2] 11693 if !(x.Uses == 1 && clobber(x)) { 11694 break 11695 } 11696 v.reset(OpAMD64MOVLstore) 11697 v.AuxInt = i - 2 11698 v.Aux = s 11699 v.AddArg(p) 11700 v.AddArg(w) 11701 v.AddArg(mem) 11702 return true 11703 } 11704 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 11705 // cond: x.Uses == 1 && clobber(x) 11706 // result: (MOVLstore [i-2] {s} p w0 mem) 11707 for { 11708 i := v.AuxInt 11709 s := v.Aux 11710 _ = v.Args[2] 11711 p := v.Args[0] 11712 v_1 := v.Args[1] 11713 if v_1.Op != OpAMD64SHRQconst { 11714 break 11715 } 11716 j := v_1.AuxInt 11717 w := v_1.Args[0] 11718 x := v.Args[2] 11719 if x.Op != OpAMD64MOVWstore { 11720 break 11721 } 11722 if x.AuxInt != i-2 { 11723 break 11724 } 11725 if x.Aux != s { 11726 break 11727 } 11728 _ = x.Args[2] 11729 if p != x.Args[0] { 11730 break 11731 } 11732 w0 := x.Args[1] 11733 if w0.Op != OpAMD64SHRQconst { 11734 break 11735 } 11736 if w0.AuxInt != j-16 { 11737 break 11738 } 11739 if w != w0.Args[0] { 11740 break 11741 } 11742 mem := x.Args[2] 11743 if !(x.Uses == 1 && clobber(x)) { 11744 break 11745 } 11746 v.reset(OpAMD64MOVLstore) 11747 v.AuxInt = i - 2 11748 v.Aux = s 11749 v.AddArg(p) 11750 v.AddArg(w0) 11751 v.AddArg(mem) 11752 return true 11753 } 11754 return false 11755 } 11756 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 11757 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 11758 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 11759 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11760 for { 11761 off1 := v.AuxInt 11762 sym1 := v.Aux 11763 _ = v.Args[2] 11764 v_0 := v.Args[0] 11765 if v_0.Op != OpAMD64LEAL { 11766 break 11767 } 11768 off2 := v_0.AuxInt 11769 sym2 := v_0.Aux 11770 base := v_0.Args[0] 11771 val := v.Args[1] 11772 mem := v.Args[2] 11773 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 11774 break 11775 } 11776 v.reset(OpAMD64MOVWstore) 11777 v.AuxInt = off1 + off2 11778 v.Aux = mergeSym(sym1, sym2) 11779 v.AddArg(base) 11780 v.AddArg(val) 11781 v.AddArg(mem) 11782 return true 11783 } 11784 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 11785 // cond: is32Bit(off1+off2) 11786 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 11787 for { 11788 off1 := v.AuxInt 11789 sym := v.Aux 11790 _ = v.Args[2] 11791 v_0 := v.Args[0] 11792 if v_0.Op != OpAMD64ADDLconst { 11793 break 11794 } 11795 off2 := v_0.AuxInt 11796 ptr := v_0.Args[0] 11797 val := v.Args[1] 11798 mem := v.Args[2] 11799 if !(is32Bit(off1 + off2)) { 11800 break 11801 } 11802 v.reset(OpAMD64MOVWstore) 11803 v.AuxInt = off1 + off2 11804 v.Aux = sym 11805 v.AddArg(ptr) 11806 v.AddArg(val) 11807 v.AddArg(mem) 11808 return true 11809 } 11810 return false 11811 } 11812 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 11813 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 11814 // cond: ValAndOff(sc).canAdd(off) 11815 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 11816 for { 11817 sc := v.AuxInt 11818 s := v.Aux 11819 _ = v.Args[1] 11820 v_0 := v.Args[0] 11821 if v_0.Op != OpAMD64ADDQconst { 11822 break 11823 } 11824 off := v_0.AuxInt 11825 ptr := v_0.Args[0] 11826 mem := v.Args[1] 11827 if !(ValAndOff(sc).canAdd(off)) { 11828 break 11829 } 11830 v.reset(OpAMD64MOVWstoreconst) 11831 v.AuxInt = ValAndOff(sc).add(off) 11832 v.Aux = s 11833 v.AddArg(ptr) 11834 v.AddArg(mem) 11835 return true 11836 } 11837 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 11838 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 11839 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 11840 for { 11841 sc := v.AuxInt 11842 sym1 := v.Aux 11843 _ = v.Args[1] 11844 v_0 := v.Args[0] 11845 if v_0.Op != OpAMD64LEAQ { 11846 break 11847 } 11848 off := v_0.AuxInt 11849 sym2 := v_0.Aux 11850 ptr := v_0.Args[0] 11851 mem := v.Args[1] 11852 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 11853 break 11854 } 11855 v.reset(OpAMD64MOVWstoreconst) 11856 v.AuxInt = ValAndOff(sc).add(off) 11857 v.Aux = mergeSym(sym1, sym2) 11858 v.AddArg(ptr) 11859 v.AddArg(mem) 11860 return true 11861 } 11862 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 11863 // cond: canMergeSym(sym1, sym2) 11864 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 11865 for { 11866 x := v.AuxInt 11867 sym1 := v.Aux 11868 _ = v.Args[1] 11869 v_0 := v.Args[0] 11870 if v_0.Op != OpAMD64LEAQ1 { 11871 break 11872 } 11873 off := v_0.AuxInt 11874 sym2 := v_0.Aux 11875 _ = v_0.Args[1] 11876 ptr := v_0.Args[0] 11877 idx := v_0.Args[1] 11878 mem := v.Args[1] 11879 if !(canMergeSym(sym1, sym2)) { 11880 break 11881 } 11882 v.reset(OpAMD64MOVWstoreconstidx1) 11883 v.AuxInt = ValAndOff(x).add(off) 11884 v.Aux = mergeSym(sym1, sym2) 11885 v.AddArg(ptr) 11886 v.AddArg(idx) 11887 v.AddArg(mem) 11888 return true 11889 } 11890 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 11891 // cond: canMergeSym(sym1, sym2) 11892 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 11893 for { 11894 x := v.AuxInt 11895 sym1 := v.Aux 11896 _ = v.Args[1] 11897 v_0 := v.Args[0] 11898 if v_0.Op != OpAMD64LEAQ2 { 11899 break 11900 } 11901 off := v_0.AuxInt 11902 sym2 := v_0.Aux 11903 _ = v_0.Args[1] 11904 ptr := v_0.Args[0] 11905 idx := v_0.Args[1] 11906 mem := v.Args[1] 11907 if !(canMergeSym(sym1, sym2)) { 11908 break 11909 } 11910 v.reset(OpAMD64MOVWstoreconstidx2) 11911 v.AuxInt = ValAndOff(x).add(off) 11912 v.Aux = mergeSym(sym1, sym2) 11913 v.AddArg(ptr) 11914 v.AddArg(idx) 11915 v.AddArg(mem) 11916 return true 11917 } 11918 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 11919 // cond: 11920 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 11921 for { 11922 x := v.AuxInt 11923 sym := v.Aux 11924 _ = v.Args[1] 11925 v_0 := v.Args[0] 11926 if v_0.Op != OpAMD64ADDQ { 11927 break 11928 } 11929 _ = v_0.Args[1] 11930 ptr := v_0.Args[0] 11931 idx := v_0.Args[1] 11932 mem := v.Args[1] 11933 v.reset(OpAMD64MOVWstoreconstidx1) 11934 v.AuxInt = x 11935 v.Aux = sym 11936 v.AddArg(ptr) 11937 v.AddArg(idx) 11938 v.AddArg(mem) 11939 return true 11940 } 11941 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 11942 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 11943 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 11944 for { 11945 c := v.AuxInt 11946 s := v.Aux 11947 _ = v.Args[1] 11948 p := v.Args[0] 11949 x := v.Args[1] 11950 if x.Op != OpAMD64MOVWstoreconst { 11951 break 11952 } 11953 a := x.AuxInt 11954 if x.Aux != s { 11955 break 11956 } 11957 _ = x.Args[1] 11958 if p != x.Args[0] { 11959 break 11960 } 11961 mem := x.Args[1] 11962 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 11963 break 11964 } 11965 v.reset(OpAMD64MOVLstoreconst) 11966 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 11967 v.Aux = s 11968 v.AddArg(p) 11969 v.AddArg(mem) 11970 return true 11971 } 11972 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 11973 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 11974 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 11975 for { 11976 sc := v.AuxInt 11977 sym1 := v.Aux 11978 _ = v.Args[1] 11979 v_0 := v.Args[0] 11980 if v_0.Op != OpAMD64LEAL { 11981 break 11982 } 11983 off := v_0.AuxInt 11984 sym2 := v_0.Aux 11985 ptr := v_0.Args[0] 11986 mem := v.Args[1] 11987 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 11988 break 11989 } 11990 v.reset(OpAMD64MOVWstoreconst) 11991 v.AuxInt = ValAndOff(sc).add(off) 11992 v.Aux = mergeSym(sym1, sym2) 11993 v.AddArg(ptr) 11994 v.AddArg(mem) 11995 return true 11996 } 11997 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 11998 // cond: ValAndOff(sc).canAdd(off) 11999 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 12000 for { 12001 sc := v.AuxInt 12002 s := v.Aux 12003 _ = v.Args[1] 12004 v_0 := v.Args[0] 12005 if v_0.Op != OpAMD64ADDLconst { 12006 break 12007 } 12008 off := v_0.AuxInt 12009 ptr := v_0.Args[0] 12010 mem := v.Args[1] 12011 if !(ValAndOff(sc).canAdd(off)) { 12012 break 12013 } 12014 v.reset(OpAMD64MOVWstoreconst) 12015 v.AuxInt = ValAndOff(sc).add(off) 12016 v.Aux = s 12017 v.AddArg(ptr) 12018 v.AddArg(mem) 12019 return true 12020 } 12021 return false 12022 } 12023 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 12024 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 12025 // cond: 12026 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 12027 for { 12028 c := v.AuxInt 12029 sym := v.Aux 12030 _ = v.Args[2] 12031 ptr := v.Args[0] 12032 v_1 := v.Args[1] 12033 if v_1.Op != OpAMD64SHLQconst { 12034 break 12035 } 12036 if v_1.AuxInt != 1 { 12037 break 12038 } 12039 idx := v_1.Args[0] 12040 mem := v.Args[2] 12041 v.reset(OpAMD64MOVWstoreconstidx2) 12042 v.AuxInt = c 12043 v.Aux = sym 12044 v.AddArg(ptr) 12045 v.AddArg(idx) 12046 v.AddArg(mem) 12047 return true 12048 } 12049 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 12050 // cond: ValAndOff(x).canAdd(c) 12051 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 12052 for { 12053 x := v.AuxInt 12054 sym := v.Aux 12055 _ = v.Args[2] 12056 v_0 := v.Args[0] 12057 if v_0.Op != OpAMD64ADDQconst { 12058 break 12059 } 12060 c := v_0.AuxInt 12061 ptr := v_0.Args[0] 12062 idx := v.Args[1] 12063 mem := v.Args[2] 12064 if !(ValAndOff(x).canAdd(c)) { 12065 break 12066 } 12067 v.reset(OpAMD64MOVWstoreconstidx1) 12068 v.AuxInt = ValAndOff(x).add(c) 12069 v.Aux = sym 12070 v.AddArg(ptr) 12071 v.AddArg(idx) 12072 v.AddArg(mem) 12073 return true 12074 } 12075 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 12076 // cond: ValAndOff(x).canAdd(c) 12077 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 12078 for { 12079 x := v.AuxInt 12080 sym := v.Aux 12081 _ = v.Args[2] 12082 ptr := v.Args[0] 12083 v_1 := v.Args[1] 12084 if v_1.Op != OpAMD64ADDQconst { 12085 break 12086 } 12087 c := v_1.AuxInt 12088 idx := v_1.Args[0] 12089 mem := v.Args[2] 12090 if !(ValAndOff(x).canAdd(c)) { 12091 break 12092 } 12093 v.reset(OpAMD64MOVWstoreconstidx1) 12094 v.AuxInt = ValAndOff(x).add(c) 12095 v.Aux = sym 12096 v.AddArg(ptr) 12097 v.AddArg(idx) 12098 v.AddArg(mem) 12099 return true 12100 } 12101 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 12102 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 12103 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 12104 for { 12105 c := v.AuxInt 12106 s := v.Aux 12107 _ = v.Args[2] 12108 p := v.Args[0] 12109 i := v.Args[1] 12110 x := v.Args[2] 12111 if x.Op != OpAMD64MOVWstoreconstidx1 { 12112 break 12113 } 12114 a := x.AuxInt 12115 if x.Aux != s { 12116 break 12117 } 12118 _ = x.Args[2] 12119 if p != x.Args[0] { 12120 break 12121 } 12122 if i != x.Args[1] { 12123 break 12124 } 12125 mem := x.Args[2] 12126 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 12127 break 12128 } 12129 v.reset(OpAMD64MOVLstoreconstidx1) 12130 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 12131 v.Aux = s 12132 v.AddArg(p) 12133 v.AddArg(i) 12134 v.AddArg(mem) 12135 return true 12136 } 12137 return false 12138 } 12139 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 12140 b := v.Block 12141 _ = b 12142 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 12143 // cond: ValAndOff(x).canAdd(c) 12144 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 12145 for { 12146 x := v.AuxInt 12147 sym := v.Aux 12148 _ = v.Args[2] 12149 v_0 := v.Args[0] 12150 if v_0.Op != OpAMD64ADDQconst { 12151 break 12152 } 12153 c := v_0.AuxInt 12154 ptr := v_0.Args[0] 12155 idx := v.Args[1] 12156 mem := v.Args[2] 12157 if !(ValAndOff(x).canAdd(c)) { 12158 break 12159 } 12160 v.reset(OpAMD64MOVWstoreconstidx2) 12161 v.AuxInt = ValAndOff(x).add(c) 12162 v.Aux = sym 12163 v.AddArg(ptr) 12164 v.AddArg(idx) 12165 v.AddArg(mem) 12166 return true 12167 } 12168 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 12169 // cond: ValAndOff(x).canAdd(2*c) 12170 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 12171 for { 12172 x := v.AuxInt 12173 sym := v.Aux 12174 _ = v.Args[2] 12175 ptr := v.Args[0] 12176 v_1 := v.Args[1] 12177 if v_1.Op != OpAMD64ADDQconst { 12178 break 12179 } 12180 c := v_1.AuxInt 12181 idx := v_1.Args[0] 12182 mem := v.Args[2] 12183 if !(ValAndOff(x).canAdd(2 * c)) { 12184 break 12185 } 12186 v.reset(OpAMD64MOVWstoreconstidx2) 12187 v.AuxInt = ValAndOff(x).add(2 * c) 12188 v.Aux = sym 12189 v.AddArg(ptr) 12190 v.AddArg(idx) 12191 v.AddArg(mem) 12192 return true 12193 } 12194 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 12195 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 12196 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 12197 for { 12198 c := v.AuxInt 12199 s := v.Aux 12200 _ = v.Args[2] 12201 p := v.Args[0] 12202 i := v.Args[1] 12203 x := v.Args[2] 12204 if x.Op != OpAMD64MOVWstoreconstidx2 { 12205 break 12206 } 12207 a := x.AuxInt 12208 if x.Aux != s { 12209 break 12210 } 12211 _ = x.Args[2] 12212 if p != x.Args[0] { 12213 break 12214 } 12215 if i != x.Args[1] { 12216 break 12217 } 12218 mem := x.Args[2] 12219 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 12220 break 12221 } 12222 v.reset(OpAMD64MOVLstoreconstidx1) 12223 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 12224 v.Aux = s 12225 v.AddArg(p) 12226 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 12227 v0.AuxInt = 1 12228 v0.AddArg(i) 12229 v.AddArg(v0) 12230 v.AddArg(mem) 12231 return true 12232 } 12233 return false 12234 } 12235 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 12236 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 12237 // cond: 12238 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 12239 for { 12240 c := v.AuxInt 12241 sym := v.Aux 12242 _ = v.Args[3] 12243 ptr := v.Args[0] 12244 v_1 := v.Args[1] 12245 if v_1.Op != OpAMD64SHLQconst { 12246 break 12247 } 12248 if v_1.AuxInt != 1 { 12249 break 12250 } 12251 idx := v_1.Args[0] 12252 val := v.Args[2] 12253 mem := v.Args[3] 12254 v.reset(OpAMD64MOVWstoreidx2) 12255 v.AuxInt = c 12256 v.Aux = sym 12257 v.AddArg(ptr) 12258 v.AddArg(idx) 12259 v.AddArg(val) 12260 v.AddArg(mem) 12261 return true 12262 } 12263 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12264 // cond: is32Bit(c+d) 12265 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 12266 for { 12267 c := v.AuxInt 12268 sym := v.Aux 12269 _ = v.Args[3] 12270 v_0 := v.Args[0] 12271 if v_0.Op != OpAMD64ADDQconst { 12272 break 12273 } 12274 d := v_0.AuxInt 12275 ptr := v_0.Args[0] 12276 idx := v.Args[1] 12277 val := v.Args[2] 12278 mem := v.Args[3] 12279 if !(is32Bit(c + d)) { 12280 break 12281 } 12282 v.reset(OpAMD64MOVWstoreidx1) 12283 v.AuxInt = c + d 12284 v.Aux = sym 12285 v.AddArg(ptr) 12286 v.AddArg(idx) 12287 v.AddArg(val) 12288 v.AddArg(mem) 12289 return true 12290 } 12291 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12292 // cond: is32Bit(c+d) 12293 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 12294 for { 12295 c := v.AuxInt 12296 sym := v.Aux 12297 _ = v.Args[3] 12298 ptr := v.Args[0] 12299 v_1 := v.Args[1] 12300 if v_1.Op != OpAMD64ADDQconst { 12301 break 12302 } 12303 d := v_1.AuxInt 12304 idx := v_1.Args[0] 12305 val := v.Args[2] 12306 mem := v.Args[3] 12307 if !(is32Bit(c + d)) { 12308 break 12309 } 12310 v.reset(OpAMD64MOVWstoreidx1) 12311 v.AuxInt = c + d 12312 v.Aux = sym 12313 v.AddArg(ptr) 12314 v.AddArg(idx) 12315 v.AddArg(val) 12316 v.AddArg(mem) 12317 return true 12318 } 12319 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 12320 // cond: x.Uses == 1 && clobber(x) 12321 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 12322 for { 12323 i := v.AuxInt 12324 s := v.Aux 12325 _ = v.Args[3] 12326 p := v.Args[0] 12327 idx := v.Args[1] 12328 v_2 := v.Args[2] 12329 if v_2.Op != OpAMD64SHRQconst { 12330 break 12331 } 12332 if v_2.AuxInt != 16 { 12333 break 12334 } 12335 w := v_2.Args[0] 12336 x := v.Args[3] 12337 if x.Op != OpAMD64MOVWstoreidx1 { 12338 break 12339 } 12340 if x.AuxInt != i-2 { 12341 break 12342 } 12343 if x.Aux != s { 12344 break 12345 } 12346 _ = x.Args[3] 12347 if p != x.Args[0] { 12348 break 12349 } 12350 if idx != x.Args[1] { 12351 break 12352 } 12353 if w != x.Args[2] { 12354 break 12355 } 12356 mem := x.Args[3] 12357 if !(x.Uses == 1 && clobber(x)) { 12358 break 12359 } 12360 v.reset(OpAMD64MOVLstoreidx1) 12361 v.AuxInt = i - 2 12362 v.Aux = s 12363 v.AddArg(p) 12364 v.AddArg(idx) 12365 v.AddArg(w) 12366 v.AddArg(mem) 12367 return true 12368 } 12369 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 12370 // cond: x.Uses == 1 && clobber(x) 12371 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 12372 for { 12373 i := v.AuxInt 12374 s := v.Aux 12375 _ = v.Args[3] 12376 p := v.Args[0] 12377 idx := v.Args[1] 12378 v_2 := v.Args[2] 12379 if v_2.Op != OpAMD64SHRQconst { 12380 break 12381 } 12382 j := v_2.AuxInt 12383 w := v_2.Args[0] 12384 x := v.Args[3] 12385 if x.Op != OpAMD64MOVWstoreidx1 { 12386 break 12387 } 12388 if x.AuxInt != i-2 { 12389 break 12390 } 12391 if x.Aux != s { 12392 break 12393 } 12394 _ = x.Args[3] 12395 if p != x.Args[0] { 12396 break 12397 } 12398 if idx != x.Args[1] { 12399 break 12400 } 12401 w0 := x.Args[2] 12402 if w0.Op != OpAMD64SHRQconst { 12403 break 12404 } 12405 if w0.AuxInt != j-16 { 12406 break 12407 } 12408 if w != w0.Args[0] { 12409 break 12410 } 12411 mem := x.Args[3] 12412 if !(x.Uses == 1 && clobber(x)) { 12413 break 12414 } 12415 v.reset(OpAMD64MOVLstoreidx1) 12416 v.AuxInt = i - 2 12417 v.Aux = s 12418 v.AddArg(p) 12419 v.AddArg(idx) 12420 v.AddArg(w0) 12421 v.AddArg(mem) 12422 return true 12423 } 12424 return false 12425 } 12426 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 12427 b := v.Block 12428 _ = b 12429 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12430 // cond: is32Bit(c+d) 12431 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 12432 for { 12433 c := v.AuxInt 12434 sym := v.Aux 12435 _ = v.Args[3] 12436 v_0 := v.Args[0] 12437 if v_0.Op != OpAMD64ADDQconst { 12438 break 12439 } 12440 d := v_0.AuxInt 12441 ptr := v_0.Args[0] 12442 idx := v.Args[1] 12443 val := v.Args[2] 12444 mem := v.Args[3] 12445 if !(is32Bit(c + d)) { 12446 break 12447 } 12448 v.reset(OpAMD64MOVWstoreidx2) 12449 v.AuxInt = c + d 12450 v.Aux = sym 12451 v.AddArg(ptr) 12452 v.AddArg(idx) 12453 v.AddArg(val) 12454 v.AddArg(mem) 12455 return true 12456 } 12457 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12458 // cond: is32Bit(c+2*d) 12459 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 12460 for { 12461 c := v.AuxInt 12462 sym := v.Aux 12463 _ = v.Args[3] 12464 ptr := v.Args[0] 12465 v_1 := v.Args[1] 12466 if v_1.Op != OpAMD64ADDQconst { 12467 break 12468 } 12469 d := v_1.AuxInt 12470 idx := v_1.Args[0] 12471 val := v.Args[2] 12472 mem := v.Args[3] 12473 if !(is32Bit(c + 2*d)) { 12474 break 12475 } 12476 v.reset(OpAMD64MOVWstoreidx2) 12477 v.AuxInt = c + 2*d 12478 v.Aux = sym 12479 v.AddArg(ptr) 12480 v.AddArg(idx) 12481 v.AddArg(val) 12482 v.AddArg(mem) 12483 return true 12484 } 12485 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 12486 // cond: x.Uses == 1 && clobber(x) 12487 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 12488 for { 12489 i := v.AuxInt 12490 s := v.Aux 12491 _ = v.Args[3] 12492 p := v.Args[0] 12493 idx := v.Args[1] 12494 v_2 := v.Args[2] 12495 if v_2.Op != OpAMD64SHRQconst { 12496 break 12497 } 12498 if v_2.AuxInt != 16 { 12499 break 12500 } 12501 w := v_2.Args[0] 12502 x := v.Args[3] 12503 if x.Op != OpAMD64MOVWstoreidx2 { 12504 break 12505 } 12506 if x.AuxInt != i-2 { 12507 break 12508 } 12509 if x.Aux != s { 12510 break 12511 } 12512 _ = x.Args[3] 12513 if p != x.Args[0] { 12514 break 12515 } 12516 if idx != x.Args[1] { 12517 break 12518 } 12519 if w != x.Args[2] { 12520 break 12521 } 12522 mem := x.Args[3] 12523 if !(x.Uses == 1 && clobber(x)) { 12524 break 12525 } 12526 v.reset(OpAMD64MOVLstoreidx1) 12527 v.AuxInt = i - 2 12528 v.Aux = s 12529 v.AddArg(p) 12530 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 12531 v0.AuxInt = 1 12532 v0.AddArg(idx) 12533 v.AddArg(v0) 12534 v.AddArg(w) 12535 v.AddArg(mem) 12536 return true 12537 } 12538 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 12539 // cond: x.Uses == 1 && clobber(x) 12540 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 12541 for { 12542 i := v.AuxInt 12543 s := v.Aux 12544 _ = v.Args[3] 12545 p := v.Args[0] 12546 idx := v.Args[1] 12547 v_2 := v.Args[2] 12548 if v_2.Op != OpAMD64SHRQconst { 12549 break 12550 } 12551 j := v_2.AuxInt 12552 w := v_2.Args[0] 12553 x := v.Args[3] 12554 if x.Op != OpAMD64MOVWstoreidx2 { 12555 break 12556 } 12557 if x.AuxInt != i-2 { 12558 break 12559 } 12560 if x.Aux != s { 12561 break 12562 } 12563 _ = x.Args[3] 12564 if p != x.Args[0] { 12565 break 12566 } 12567 if idx != x.Args[1] { 12568 break 12569 } 12570 w0 := x.Args[2] 12571 if w0.Op != OpAMD64SHRQconst { 12572 break 12573 } 12574 if w0.AuxInt != j-16 { 12575 break 12576 } 12577 if w != w0.Args[0] { 12578 break 12579 } 12580 mem := x.Args[3] 12581 if !(x.Uses == 1 && clobber(x)) { 12582 break 12583 } 12584 v.reset(OpAMD64MOVLstoreidx1) 12585 v.AuxInt = i - 2 12586 v.Aux = s 12587 v.AddArg(p) 12588 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 12589 v0.AuxInt = 1 12590 v0.AddArg(idx) 12591 v.AddArg(v0) 12592 v.AddArg(w0) 12593 v.AddArg(mem) 12594 return true 12595 } 12596 return false 12597 } 12598 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 12599 // match: (MULL x (MOVLconst [c])) 12600 // cond: 12601 // result: (MULLconst [c] x) 12602 for { 12603 _ = v.Args[1] 12604 x := v.Args[0] 12605 v_1 := v.Args[1] 12606 if v_1.Op != OpAMD64MOVLconst { 12607 break 12608 } 12609 c := v_1.AuxInt 12610 v.reset(OpAMD64MULLconst) 12611 v.AuxInt = c 12612 v.AddArg(x) 12613 return true 12614 } 12615 // match: (MULL (MOVLconst [c]) x) 12616 // cond: 12617 // result: (MULLconst [c] x) 12618 for { 12619 _ = v.Args[1] 12620 v_0 := v.Args[0] 12621 if v_0.Op != OpAMD64MOVLconst { 12622 break 12623 } 12624 c := v_0.AuxInt 12625 x := v.Args[1] 12626 v.reset(OpAMD64MULLconst) 12627 v.AuxInt = c 12628 v.AddArg(x) 12629 return true 12630 } 12631 return false 12632 } 12633 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 12634 // match: (MULLconst [c] (MULLconst [d] x)) 12635 // cond: 12636 // result: (MULLconst [int64(int32(c * d))] x) 12637 for { 12638 c := v.AuxInt 12639 v_0 := v.Args[0] 12640 if v_0.Op != OpAMD64MULLconst { 12641 break 12642 } 12643 d := v_0.AuxInt 12644 x := v_0.Args[0] 12645 v.reset(OpAMD64MULLconst) 12646 v.AuxInt = int64(int32(c * d)) 12647 v.AddArg(x) 12648 return true 12649 } 12650 // match: (MULLconst [c] (MOVLconst [d])) 12651 // cond: 12652 // result: (MOVLconst [int64(int32(c*d))]) 12653 for { 12654 c := v.AuxInt 12655 v_0 := v.Args[0] 12656 if v_0.Op != OpAMD64MOVLconst { 12657 break 12658 } 12659 d := v_0.AuxInt 12660 v.reset(OpAMD64MOVLconst) 12661 v.AuxInt = int64(int32(c * d)) 12662 return true 12663 } 12664 return false 12665 } 12666 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 12667 // match: (MULQ x (MOVQconst [c])) 12668 // cond: is32Bit(c) 12669 // result: (MULQconst [c] x) 12670 for { 12671 _ = v.Args[1] 12672 x := v.Args[0] 12673 v_1 := v.Args[1] 12674 if v_1.Op != OpAMD64MOVQconst { 12675 break 12676 } 12677 c := v_1.AuxInt 12678 if !(is32Bit(c)) { 12679 break 12680 } 12681 v.reset(OpAMD64MULQconst) 12682 v.AuxInt = c 12683 v.AddArg(x) 12684 return true 12685 } 12686 // match: (MULQ (MOVQconst [c]) x) 12687 // cond: is32Bit(c) 12688 // result: (MULQconst [c] x) 12689 for { 12690 _ = v.Args[1] 12691 v_0 := v.Args[0] 12692 if v_0.Op != OpAMD64MOVQconst { 12693 break 12694 } 12695 c := v_0.AuxInt 12696 x := v.Args[1] 12697 if !(is32Bit(c)) { 12698 break 12699 } 12700 v.reset(OpAMD64MULQconst) 12701 v.AuxInt = c 12702 v.AddArg(x) 12703 return true 12704 } 12705 return false 12706 } 12707 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 12708 b := v.Block 12709 _ = b 12710 // match: (MULQconst [c] (MULQconst [d] x)) 12711 // cond: is32Bit(c*d) 12712 // result: (MULQconst [c * d] x) 12713 for { 12714 c := v.AuxInt 12715 v_0 := v.Args[0] 12716 if v_0.Op != OpAMD64MULQconst { 12717 break 12718 } 12719 d := v_0.AuxInt 12720 x := v_0.Args[0] 12721 if !(is32Bit(c * d)) { 12722 break 12723 } 12724 v.reset(OpAMD64MULQconst) 12725 v.AuxInt = c * d 12726 v.AddArg(x) 12727 return true 12728 } 12729 // match: (MULQconst [-1] x) 12730 // cond: 12731 // result: (NEGQ x) 12732 for { 12733 if v.AuxInt != -1 { 12734 break 12735 } 12736 x := v.Args[0] 12737 v.reset(OpAMD64NEGQ) 12738 v.AddArg(x) 12739 return true 12740 } 12741 // match: (MULQconst [0] _) 12742 // cond: 12743 // result: (MOVQconst [0]) 12744 for { 12745 if v.AuxInt != 0 { 12746 break 12747 } 12748 v.reset(OpAMD64MOVQconst) 12749 v.AuxInt = 0 12750 return true 12751 } 12752 // match: (MULQconst [1] x) 12753 // cond: 12754 // result: x 12755 for { 12756 if v.AuxInt != 1 { 12757 break 12758 } 12759 x := v.Args[0] 12760 v.reset(OpCopy) 12761 v.Type = x.Type 12762 v.AddArg(x) 12763 return true 12764 } 12765 // match: (MULQconst [3] x) 12766 // cond: 12767 // result: (LEAQ2 x x) 12768 for { 12769 if v.AuxInt != 3 { 12770 break 12771 } 12772 x := v.Args[0] 12773 v.reset(OpAMD64LEAQ2) 12774 v.AddArg(x) 12775 v.AddArg(x) 12776 return true 12777 } 12778 // match: (MULQconst [5] x) 12779 // cond: 12780 // result: (LEAQ4 x x) 12781 for { 12782 if v.AuxInt != 5 { 12783 break 12784 } 12785 x := v.Args[0] 12786 v.reset(OpAMD64LEAQ4) 12787 v.AddArg(x) 12788 v.AddArg(x) 12789 return true 12790 } 12791 // match: (MULQconst [7] x) 12792 // cond: 12793 // result: (LEAQ8 (NEGQ <v.Type> x) x) 12794 for { 12795 if v.AuxInt != 7 { 12796 break 12797 } 12798 x := v.Args[0] 12799 v.reset(OpAMD64LEAQ8) 12800 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 12801 v0.AddArg(x) 12802 v.AddArg(v0) 12803 v.AddArg(x) 12804 return true 12805 } 12806 // match: (MULQconst [9] x) 12807 // cond: 12808 // result: (LEAQ8 x x) 12809 for { 12810 if v.AuxInt != 9 { 12811 break 12812 } 12813 x := v.Args[0] 12814 v.reset(OpAMD64LEAQ8) 12815 v.AddArg(x) 12816 v.AddArg(x) 12817 return true 12818 } 12819 // match: (MULQconst [11] x) 12820 // cond: 12821 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 12822 for { 12823 if v.AuxInt != 11 { 12824 break 12825 } 12826 x := v.Args[0] 12827 v.reset(OpAMD64LEAQ2) 12828 v.AddArg(x) 12829 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12830 v0.AddArg(x) 12831 v0.AddArg(x) 12832 v.AddArg(v0) 12833 return true 12834 } 12835 // match: (MULQconst [13] x) 12836 // cond: 12837 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 12838 for { 12839 if v.AuxInt != 13 { 12840 break 12841 } 12842 x := v.Args[0] 12843 v.reset(OpAMD64LEAQ4) 12844 v.AddArg(x) 12845 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12846 v0.AddArg(x) 12847 v0.AddArg(x) 12848 v.AddArg(v0) 12849 return true 12850 } 12851 return false 12852 } 12853 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 12854 b := v.Block 12855 _ = b 12856 // match: (MULQconst [21] x) 12857 // cond: 12858 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 12859 for { 12860 if v.AuxInt != 21 { 12861 break 12862 } 12863 x := v.Args[0] 12864 v.reset(OpAMD64LEAQ4) 12865 v.AddArg(x) 12866 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12867 v0.AddArg(x) 12868 v0.AddArg(x) 12869 v.AddArg(v0) 12870 return true 12871 } 12872 // match: (MULQconst [25] x) 12873 // cond: 12874 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 12875 for { 12876 if v.AuxInt != 25 { 12877 break 12878 } 12879 x := v.Args[0] 12880 v.reset(OpAMD64LEAQ8) 12881 v.AddArg(x) 12882 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12883 v0.AddArg(x) 12884 v0.AddArg(x) 12885 v.AddArg(v0) 12886 return true 12887 } 12888 // match: (MULQconst [37] x) 12889 // cond: 12890 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 12891 for { 12892 if v.AuxInt != 37 { 12893 break 12894 } 12895 x := v.Args[0] 12896 v.reset(OpAMD64LEAQ4) 12897 v.AddArg(x) 12898 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12899 v0.AddArg(x) 12900 v0.AddArg(x) 12901 v.AddArg(v0) 12902 return true 12903 } 12904 // match: (MULQconst [41] x) 12905 // cond: 12906 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 12907 for { 12908 if v.AuxInt != 41 { 12909 break 12910 } 12911 x := v.Args[0] 12912 v.reset(OpAMD64LEAQ8) 12913 v.AddArg(x) 12914 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12915 v0.AddArg(x) 12916 v0.AddArg(x) 12917 v.AddArg(v0) 12918 return true 12919 } 12920 // match: (MULQconst [73] x) 12921 // cond: 12922 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 12923 for { 12924 if v.AuxInt != 73 { 12925 break 12926 } 12927 x := v.Args[0] 12928 v.reset(OpAMD64LEAQ8) 12929 v.AddArg(x) 12930 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12931 v0.AddArg(x) 12932 v0.AddArg(x) 12933 v.AddArg(v0) 12934 return true 12935 } 12936 // match: (MULQconst [c] x) 12937 // cond: isPowerOfTwo(c) 12938 // result: (SHLQconst [log2(c)] x) 12939 for { 12940 c := v.AuxInt 12941 x := v.Args[0] 12942 if !(isPowerOfTwo(c)) { 12943 break 12944 } 12945 v.reset(OpAMD64SHLQconst) 12946 v.AuxInt = log2(c) 12947 v.AddArg(x) 12948 return true 12949 } 12950 // match: (MULQconst [c] x) 12951 // cond: isPowerOfTwo(c+1) && c >= 15 12952 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 12953 for { 12954 c := v.AuxInt 12955 x := v.Args[0] 12956 if !(isPowerOfTwo(c+1) && c >= 15) { 12957 break 12958 } 12959 v.reset(OpAMD64SUBQ) 12960 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12961 v0.AuxInt = log2(c + 1) 12962 v0.AddArg(x) 12963 v.AddArg(v0) 12964 v.AddArg(x) 12965 return true 12966 } 12967 // match: (MULQconst [c] x) 12968 // cond: isPowerOfTwo(c-1) && c >= 17 12969 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 12970 for { 12971 c := v.AuxInt 12972 x := v.Args[0] 12973 if !(isPowerOfTwo(c-1) && c >= 17) { 12974 break 12975 } 12976 v.reset(OpAMD64LEAQ1) 12977 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12978 v0.AuxInt = log2(c - 1) 12979 v0.AddArg(x) 12980 v.AddArg(v0) 12981 v.AddArg(x) 12982 return true 12983 } 12984 // match: (MULQconst [c] x) 12985 // cond: isPowerOfTwo(c-2) && c >= 34 12986 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 12987 for { 12988 c := v.AuxInt 12989 x := v.Args[0] 12990 if !(isPowerOfTwo(c-2) && c >= 34) { 12991 break 12992 } 12993 v.reset(OpAMD64LEAQ2) 12994 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12995 v0.AuxInt = log2(c - 2) 12996 v0.AddArg(x) 12997 v.AddArg(v0) 12998 v.AddArg(x) 12999 return true 13000 } 13001 // match: (MULQconst [c] x) 13002 // cond: isPowerOfTwo(c-4) && c >= 68 13003 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 13004 for { 13005 c := v.AuxInt 13006 x := v.Args[0] 13007 if !(isPowerOfTwo(c-4) && c >= 68) { 13008 break 13009 } 13010 v.reset(OpAMD64LEAQ4) 13011 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 13012 v0.AuxInt = log2(c - 4) 13013 v0.AddArg(x) 13014 v.AddArg(v0) 13015 v.AddArg(x) 13016 return true 13017 } 13018 return false 13019 } 13020 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 13021 b := v.Block 13022 _ = b 13023 // match: (MULQconst [c] x) 13024 // cond: isPowerOfTwo(c-8) && c >= 136 13025 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 13026 for { 13027 c := v.AuxInt 13028 x := v.Args[0] 13029 if !(isPowerOfTwo(c-8) && c >= 136) { 13030 break 13031 } 13032 v.reset(OpAMD64LEAQ8) 13033 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 13034 v0.AuxInt = log2(c - 8) 13035 v0.AddArg(x) 13036 v.AddArg(v0) 13037 v.AddArg(x) 13038 return true 13039 } 13040 // match: (MULQconst [c] x) 13041 // cond: c%3 == 0 && isPowerOfTwo(c/3) 13042 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 13043 for { 13044 c := v.AuxInt 13045 x := v.Args[0] 13046 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 13047 break 13048 } 13049 v.reset(OpAMD64SHLQconst) 13050 v.AuxInt = log2(c / 3) 13051 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 13052 v0.AddArg(x) 13053 v0.AddArg(x) 13054 v.AddArg(v0) 13055 return true 13056 } 13057 // match: (MULQconst [c] x) 13058 // cond: c%5 == 0 && isPowerOfTwo(c/5) 13059 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 13060 for { 13061 c := v.AuxInt 13062 x := v.Args[0] 13063 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 13064 break 13065 } 13066 v.reset(OpAMD64SHLQconst) 13067 v.AuxInt = log2(c / 5) 13068 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 13069 v0.AddArg(x) 13070 v0.AddArg(x) 13071 v.AddArg(v0) 13072 return true 13073 } 13074 // match: (MULQconst [c] x) 13075 // cond: c%9 == 0 && isPowerOfTwo(c/9) 13076 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 13077 for { 13078 c := v.AuxInt 13079 x := v.Args[0] 13080 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 13081 break 13082 } 13083 v.reset(OpAMD64SHLQconst) 13084 v.AuxInt = log2(c / 9) 13085 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 13086 v0.AddArg(x) 13087 v0.AddArg(x) 13088 v.AddArg(v0) 13089 return true 13090 } 13091 // match: (MULQconst [c] (MOVQconst [d])) 13092 // cond: 13093 // result: (MOVQconst [c*d]) 13094 for { 13095 c := v.AuxInt 13096 v_0 := v.Args[0] 13097 if v_0.Op != OpAMD64MOVQconst { 13098 break 13099 } 13100 d := v_0.AuxInt 13101 v.reset(OpAMD64MOVQconst) 13102 v.AuxInt = c * d 13103 return true 13104 } 13105 return false 13106 } 13107 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 13108 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 13109 // cond: canMergeLoad(v, l, x) && clobber(l) 13110 // result: (MULSDmem x [off] {sym} ptr mem) 13111 for { 13112 _ = v.Args[1] 13113 x := v.Args[0] 13114 l := v.Args[1] 13115 if l.Op != OpAMD64MOVSDload { 13116 break 13117 } 13118 off := l.AuxInt 13119 sym := l.Aux 13120 _ = l.Args[1] 13121 ptr := l.Args[0] 13122 mem := l.Args[1] 13123 if !(canMergeLoad(v, l, x) && clobber(l)) { 13124 break 13125 } 13126 v.reset(OpAMD64MULSDmem) 13127 v.AuxInt = off 13128 v.Aux = sym 13129 v.AddArg(x) 13130 v.AddArg(ptr) 13131 v.AddArg(mem) 13132 return true 13133 } 13134 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 13135 // cond: canMergeLoad(v, l, x) && clobber(l) 13136 // result: (MULSDmem x [off] {sym} ptr mem) 13137 for { 13138 _ = v.Args[1] 13139 l := v.Args[0] 13140 if l.Op != OpAMD64MOVSDload { 13141 break 13142 } 13143 off := l.AuxInt 13144 sym := l.Aux 13145 _ = l.Args[1] 13146 ptr := l.Args[0] 13147 mem := l.Args[1] 13148 x := v.Args[1] 13149 if !(canMergeLoad(v, l, x) && clobber(l)) { 13150 break 13151 } 13152 v.reset(OpAMD64MULSDmem) 13153 v.AuxInt = off 13154 v.Aux = sym 13155 v.AddArg(x) 13156 v.AddArg(ptr) 13157 v.AddArg(mem) 13158 return true 13159 } 13160 return false 13161 } 13162 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 13163 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 13164 // cond: canMergeLoad(v, l, x) && clobber(l) 13165 // result: (MULSSmem x [off] {sym} ptr mem) 13166 for { 13167 _ = v.Args[1] 13168 x := v.Args[0] 13169 l := v.Args[1] 13170 if l.Op != OpAMD64MOVSSload { 13171 break 13172 } 13173 off := l.AuxInt 13174 sym := l.Aux 13175 _ = l.Args[1] 13176 ptr := l.Args[0] 13177 mem := l.Args[1] 13178 if !(canMergeLoad(v, l, x) && clobber(l)) { 13179 break 13180 } 13181 v.reset(OpAMD64MULSSmem) 13182 v.AuxInt = off 13183 v.Aux = sym 13184 v.AddArg(x) 13185 v.AddArg(ptr) 13186 v.AddArg(mem) 13187 return true 13188 } 13189 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 13190 // cond: canMergeLoad(v, l, x) && clobber(l) 13191 // result: (MULSSmem x [off] {sym} ptr mem) 13192 for { 13193 _ = v.Args[1] 13194 l := v.Args[0] 13195 if l.Op != OpAMD64MOVSSload { 13196 break 13197 } 13198 off := l.AuxInt 13199 sym := l.Aux 13200 _ = l.Args[1] 13201 ptr := l.Args[0] 13202 mem := l.Args[1] 13203 x := v.Args[1] 13204 if !(canMergeLoad(v, l, x) && clobber(l)) { 13205 break 13206 } 13207 v.reset(OpAMD64MULSSmem) 13208 v.AuxInt = off 13209 v.Aux = sym 13210 v.AddArg(x) 13211 v.AddArg(ptr) 13212 v.AddArg(mem) 13213 return true 13214 } 13215 return false 13216 } 13217 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 13218 // match: (NEGL (MOVLconst [c])) 13219 // cond: 13220 // result: (MOVLconst [int64(int32(-c))]) 13221 for { 13222 v_0 := v.Args[0] 13223 if v_0.Op != OpAMD64MOVLconst { 13224 break 13225 } 13226 c := v_0.AuxInt 13227 v.reset(OpAMD64MOVLconst) 13228 v.AuxInt = int64(int32(-c)) 13229 return true 13230 } 13231 return false 13232 } 13233 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 13234 // match: (NEGQ (MOVQconst [c])) 13235 // cond: 13236 // result: (MOVQconst [-c]) 13237 for { 13238 v_0 := v.Args[0] 13239 if v_0.Op != OpAMD64MOVQconst { 13240 break 13241 } 13242 c := v_0.AuxInt 13243 v.reset(OpAMD64MOVQconst) 13244 v.AuxInt = -c 13245 return true 13246 } 13247 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 13248 // cond: c != -(1<<31) 13249 // result: (ADDQconst [-c] x) 13250 for { 13251 v_0 := v.Args[0] 13252 if v_0.Op != OpAMD64ADDQconst { 13253 break 13254 } 13255 c := v_0.AuxInt 13256 v_0_0 := v_0.Args[0] 13257 if v_0_0.Op != OpAMD64NEGQ { 13258 break 13259 } 13260 x := v_0_0.Args[0] 13261 if !(c != -(1 << 31)) { 13262 break 13263 } 13264 v.reset(OpAMD64ADDQconst) 13265 v.AuxInt = -c 13266 v.AddArg(x) 13267 return true 13268 } 13269 return false 13270 } 13271 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 13272 // match: (NOTL (MOVLconst [c])) 13273 // cond: 13274 // result: (MOVLconst [^c]) 13275 for { 13276 v_0 := v.Args[0] 13277 if v_0.Op != OpAMD64MOVLconst { 13278 break 13279 } 13280 c := v_0.AuxInt 13281 v.reset(OpAMD64MOVLconst) 13282 v.AuxInt = ^c 13283 return true 13284 } 13285 return false 13286 } 13287 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 13288 // match: (NOTQ (MOVQconst [c])) 13289 // cond: 13290 // result: (MOVQconst [^c]) 13291 for { 13292 v_0 := v.Args[0] 13293 if v_0.Op != OpAMD64MOVQconst { 13294 break 13295 } 13296 c := v_0.AuxInt 13297 v.reset(OpAMD64MOVQconst) 13298 v.AuxInt = ^c 13299 return true 13300 } 13301 return false 13302 } 13303 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 13304 // match: (ORL x (MOVLconst [c])) 13305 // cond: 13306 // result: (ORLconst [c] x) 13307 for { 13308 _ = v.Args[1] 13309 x := v.Args[0] 13310 v_1 := v.Args[1] 13311 if v_1.Op != OpAMD64MOVLconst { 13312 break 13313 } 13314 c := v_1.AuxInt 13315 v.reset(OpAMD64ORLconst) 13316 v.AuxInt = c 13317 v.AddArg(x) 13318 return true 13319 } 13320 // match: (ORL (MOVLconst [c]) x) 13321 // cond: 13322 // result: (ORLconst [c] x) 13323 for { 13324 _ = v.Args[1] 13325 v_0 := v.Args[0] 13326 if v_0.Op != OpAMD64MOVLconst { 13327 break 13328 } 13329 c := v_0.AuxInt 13330 x := v.Args[1] 13331 v.reset(OpAMD64ORLconst) 13332 v.AuxInt = c 13333 v.AddArg(x) 13334 return true 13335 } 13336 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 13337 // cond: d==32-c 13338 // result: (ROLLconst x [c]) 13339 for { 13340 _ = v.Args[1] 13341 v_0 := v.Args[0] 13342 if v_0.Op != OpAMD64SHLLconst { 13343 break 13344 } 13345 c := v_0.AuxInt 13346 x := v_0.Args[0] 13347 v_1 := v.Args[1] 13348 if v_1.Op != OpAMD64SHRLconst { 13349 break 13350 } 13351 d := v_1.AuxInt 13352 if x != v_1.Args[0] { 13353 break 13354 } 13355 if !(d == 32-c) { 13356 break 13357 } 13358 v.reset(OpAMD64ROLLconst) 13359 v.AuxInt = c 13360 v.AddArg(x) 13361 return true 13362 } 13363 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 13364 // cond: d==32-c 13365 // result: (ROLLconst x [c]) 13366 for { 13367 _ = v.Args[1] 13368 v_0 := v.Args[0] 13369 if v_0.Op != OpAMD64SHRLconst { 13370 break 13371 } 13372 d := v_0.AuxInt 13373 x := v_0.Args[0] 13374 v_1 := v.Args[1] 13375 if v_1.Op != OpAMD64SHLLconst { 13376 break 13377 } 13378 c := v_1.AuxInt 13379 if x != v_1.Args[0] { 13380 break 13381 } 13382 if !(d == 32-c) { 13383 break 13384 } 13385 v.reset(OpAMD64ROLLconst) 13386 v.AuxInt = c 13387 v.AddArg(x) 13388 return true 13389 } 13390 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 13391 // cond: d==16-c && c < 16 && t.Size() == 2 13392 // result: (ROLWconst x [c]) 13393 for { 13394 t := v.Type 13395 _ = v.Args[1] 13396 v_0 := v.Args[0] 13397 if v_0.Op != OpAMD64SHLLconst { 13398 break 13399 } 13400 c := v_0.AuxInt 13401 x := v_0.Args[0] 13402 v_1 := v.Args[1] 13403 if v_1.Op != OpAMD64SHRWconst { 13404 break 13405 } 13406 d := v_1.AuxInt 13407 if x != v_1.Args[0] { 13408 break 13409 } 13410 if !(d == 16-c && c < 16 && t.Size() == 2) { 13411 break 13412 } 13413 v.reset(OpAMD64ROLWconst) 13414 v.AuxInt = c 13415 v.AddArg(x) 13416 return true 13417 } 13418 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 13419 // cond: d==16-c && c < 16 && t.Size() == 2 13420 // result: (ROLWconst x [c]) 13421 for { 13422 t := v.Type 13423 _ = v.Args[1] 13424 v_0 := v.Args[0] 13425 if v_0.Op != OpAMD64SHRWconst { 13426 break 13427 } 13428 d := v_0.AuxInt 13429 x := v_0.Args[0] 13430 v_1 := v.Args[1] 13431 if v_1.Op != OpAMD64SHLLconst { 13432 break 13433 } 13434 c := v_1.AuxInt 13435 if x != v_1.Args[0] { 13436 break 13437 } 13438 if !(d == 16-c && c < 16 && t.Size() == 2) { 13439 break 13440 } 13441 v.reset(OpAMD64ROLWconst) 13442 v.AuxInt = c 13443 v.AddArg(x) 13444 return true 13445 } 13446 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 13447 // cond: d==8-c && c < 8 && t.Size() == 1 13448 // result: (ROLBconst x [c]) 13449 for { 13450 t := v.Type 13451 _ = v.Args[1] 13452 v_0 := v.Args[0] 13453 if v_0.Op != OpAMD64SHLLconst { 13454 break 13455 } 13456 c := v_0.AuxInt 13457 x := v_0.Args[0] 13458 v_1 := v.Args[1] 13459 if v_1.Op != OpAMD64SHRBconst { 13460 break 13461 } 13462 d := v_1.AuxInt 13463 if x != v_1.Args[0] { 13464 break 13465 } 13466 if !(d == 8-c && c < 8 && t.Size() == 1) { 13467 break 13468 } 13469 v.reset(OpAMD64ROLBconst) 13470 v.AuxInt = c 13471 v.AddArg(x) 13472 return true 13473 } 13474 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 13475 // cond: d==8-c && c < 8 && t.Size() == 1 13476 // result: (ROLBconst x [c]) 13477 for { 13478 t := v.Type 13479 _ = v.Args[1] 13480 v_0 := v.Args[0] 13481 if v_0.Op != OpAMD64SHRBconst { 13482 break 13483 } 13484 d := v_0.AuxInt 13485 x := v_0.Args[0] 13486 v_1 := v.Args[1] 13487 if v_1.Op != OpAMD64SHLLconst { 13488 break 13489 } 13490 c := v_1.AuxInt 13491 if x != v_1.Args[0] { 13492 break 13493 } 13494 if !(d == 8-c && c < 8 && t.Size() == 1) { 13495 break 13496 } 13497 v.reset(OpAMD64ROLBconst) 13498 v.AuxInt = c 13499 v.AddArg(x) 13500 return true 13501 } 13502 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 13503 // cond: 13504 // result: (ROLL x y) 13505 for { 13506 _ = v.Args[1] 13507 v_0 := v.Args[0] 13508 if v_0.Op != OpAMD64SHLL { 13509 break 13510 } 13511 _ = v_0.Args[1] 13512 x := v_0.Args[0] 13513 y := v_0.Args[1] 13514 v_1 := v.Args[1] 13515 if v_1.Op != OpAMD64ANDL { 13516 break 13517 } 13518 _ = v_1.Args[1] 13519 v_1_0 := v_1.Args[0] 13520 if v_1_0.Op != OpAMD64SHRL { 13521 break 13522 } 13523 _ = v_1_0.Args[1] 13524 if x != v_1_0.Args[0] { 13525 break 13526 } 13527 v_1_0_1 := v_1_0.Args[1] 13528 if v_1_0_1.Op != OpAMD64NEGQ { 13529 break 13530 } 13531 if y != v_1_0_1.Args[0] { 13532 break 13533 } 13534 v_1_1 := v_1.Args[1] 13535 if v_1_1.Op != OpAMD64SBBLcarrymask { 13536 break 13537 } 13538 v_1_1_0 := v_1_1.Args[0] 13539 if v_1_1_0.Op != OpAMD64CMPQconst { 13540 break 13541 } 13542 if v_1_1_0.AuxInt != 32 { 13543 break 13544 } 13545 v_1_1_0_0 := v_1_1_0.Args[0] 13546 if v_1_1_0_0.Op != OpAMD64NEGQ { 13547 break 13548 } 13549 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13550 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 13551 break 13552 } 13553 if v_1_1_0_0_0.AuxInt != -32 { 13554 break 13555 } 13556 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13557 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 13558 break 13559 } 13560 if v_1_1_0_0_0_0.AuxInt != 31 { 13561 break 13562 } 13563 if y != v_1_1_0_0_0_0.Args[0] { 13564 break 13565 } 13566 v.reset(OpAMD64ROLL) 13567 v.AddArg(x) 13568 v.AddArg(y) 13569 return true 13570 } 13571 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 13572 // cond: 13573 // result: (ROLL x y) 13574 for { 13575 _ = v.Args[1] 13576 v_0 := v.Args[0] 13577 if v_0.Op != OpAMD64SHLL { 13578 break 13579 } 13580 _ = v_0.Args[1] 13581 x := v_0.Args[0] 13582 y := v_0.Args[1] 13583 v_1 := v.Args[1] 13584 if v_1.Op != OpAMD64ANDL { 13585 break 13586 } 13587 _ = v_1.Args[1] 13588 v_1_0 := v_1.Args[0] 13589 if v_1_0.Op != OpAMD64SBBLcarrymask { 13590 break 13591 } 13592 v_1_0_0 := v_1_0.Args[0] 13593 if v_1_0_0.Op != OpAMD64CMPQconst { 13594 break 13595 } 13596 if v_1_0_0.AuxInt != 32 { 13597 break 13598 } 13599 v_1_0_0_0 := v_1_0_0.Args[0] 13600 if v_1_0_0_0.Op != OpAMD64NEGQ { 13601 break 13602 } 13603 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13604 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 13605 break 13606 } 13607 if v_1_0_0_0_0.AuxInt != -32 { 13608 break 13609 } 13610 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 13611 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 13612 break 13613 } 13614 if v_1_0_0_0_0_0.AuxInt != 31 { 13615 break 13616 } 13617 if y != v_1_0_0_0_0_0.Args[0] { 13618 break 13619 } 13620 v_1_1 := v_1.Args[1] 13621 if v_1_1.Op != OpAMD64SHRL { 13622 break 13623 } 13624 _ = v_1_1.Args[1] 13625 if x != v_1_1.Args[0] { 13626 break 13627 } 13628 v_1_1_1 := v_1_1.Args[1] 13629 if v_1_1_1.Op != OpAMD64NEGQ { 13630 break 13631 } 13632 if y != v_1_1_1.Args[0] { 13633 break 13634 } 13635 v.reset(OpAMD64ROLL) 13636 v.AddArg(x) 13637 v.AddArg(y) 13638 return true 13639 } 13640 return false 13641 } 13642 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 13643 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 13644 // cond: 13645 // result: (ROLL x y) 13646 for { 13647 _ = v.Args[1] 13648 v_0 := v.Args[0] 13649 if v_0.Op != OpAMD64ANDL { 13650 break 13651 } 13652 _ = v_0.Args[1] 13653 v_0_0 := v_0.Args[0] 13654 if v_0_0.Op != OpAMD64SHRL { 13655 break 13656 } 13657 _ = v_0_0.Args[1] 13658 x := v_0_0.Args[0] 13659 v_0_0_1 := v_0_0.Args[1] 13660 if v_0_0_1.Op != OpAMD64NEGQ { 13661 break 13662 } 13663 y := v_0_0_1.Args[0] 13664 v_0_1 := v_0.Args[1] 13665 if v_0_1.Op != OpAMD64SBBLcarrymask { 13666 break 13667 } 13668 v_0_1_0 := v_0_1.Args[0] 13669 if v_0_1_0.Op != OpAMD64CMPQconst { 13670 break 13671 } 13672 if v_0_1_0.AuxInt != 32 { 13673 break 13674 } 13675 v_0_1_0_0 := v_0_1_0.Args[0] 13676 if v_0_1_0_0.Op != OpAMD64NEGQ { 13677 break 13678 } 13679 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 13680 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 13681 break 13682 } 13683 if v_0_1_0_0_0.AuxInt != -32 { 13684 break 13685 } 13686 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 13687 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 13688 break 13689 } 13690 if v_0_1_0_0_0_0.AuxInt != 31 { 13691 break 13692 } 13693 if y != v_0_1_0_0_0_0.Args[0] { 13694 break 13695 } 13696 v_1 := v.Args[1] 13697 if v_1.Op != OpAMD64SHLL { 13698 break 13699 } 13700 _ = v_1.Args[1] 13701 if x != v_1.Args[0] { 13702 break 13703 } 13704 if y != v_1.Args[1] { 13705 break 13706 } 13707 v.reset(OpAMD64ROLL) 13708 v.AddArg(x) 13709 v.AddArg(y) 13710 return true 13711 } 13712 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 13713 // cond: 13714 // result: (ROLL x y) 13715 for { 13716 _ = v.Args[1] 13717 v_0 := v.Args[0] 13718 if v_0.Op != OpAMD64ANDL { 13719 break 13720 } 13721 _ = v_0.Args[1] 13722 v_0_0 := v_0.Args[0] 13723 if v_0_0.Op != OpAMD64SBBLcarrymask { 13724 break 13725 } 13726 v_0_0_0 := v_0_0.Args[0] 13727 if v_0_0_0.Op != OpAMD64CMPQconst { 13728 break 13729 } 13730 if v_0_0_0.AuxInt != 32 { 13731 break 13732 } 13733 v_0_0_0_0 := v_0_0_0.Args[0] 13734 if v_0_0_0_0.Op != OpAMD64NEGQ { 13735 break 13736 } 13737 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 13738 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 13739 break 13740 } 13741 if v_0_0_0_0_0.AuxInt != -32 { 13742 break 13743 } 13744 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 13745 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 13746 break 13747 } 13748 if v_0_0_0_0_0_0.AuxInt != 31 { 13749 break 13750 } 13751 y := v_0_0_0_0_0_0.Args[0] 13752 v_0_1 := v_0.Args[1] 13753 if v_0_1.Op != OpAMD64SHRL { 13754 break 13755 } 13756 _ = v_0_1.Args[1] 13757 x := v_0_1.Args[0] 13758 v_0_1_1 := v_0_1.Args[1] 13759 if v_0_1_1.Op != OpAMD64NEGQ { 13760 break 13761 } 13762 if y != v_0_1_1.Args[0] { 13763 break 13764 } 13765 v_1 := v.Args[1] 13766 if v_1.Op != OpAMD64SHLL { 13767 break 13768 } 13769 _ = v_1.Args[1] 13770 if x != v_1.Args[0] { 13771 break 13772 } 13773 if y != v_1.Args[1] { 13774 break 13775 } 13776 v.reset(OpAMD64ROLL) 13777 v.AddArg(x) 13778 v.AddArg(y) 13779 return true 13780 } 13781 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 13782 // cond: 13783 // result: (ROLL x y) 13784 for { 13785 _ = v.Args[1] 13786 v_0 := v.Args[0] 13787 if v_0.Op != OpAMD64SHLL { 13788 break 13789 } 13790 _ = v_0.Args[1] 13791 x := v_0.Args[0] 13792 y := v_0.Args[1] 13793 v_1 := v.Args[1] 13794 if v_1.Op != OpAMD64ANDL { 13795 break 13796 } 13797 _ = v_1.Args[1] 13798 v_1_0 := v_1.Args[0] 13799 if v_1_0.Op != OpAMD64SHRL { 13800 break 13801 } 13802 _ = v_1_0.Args[1] 13803 if x != v_1_0.Args[0] { 13804 break 13805 } 13806 v_1_0_1 := v_1_0.Args[1] 13807 if v_1_0_1.Op != OpAMD64NEGL { 13808 break 13809 } 13810 if y != v_1_0_1.Args[0] { 13811 break 13812 } 13813 v_1_1 := v_1.Args[1] 13814 if v_1_1.Op != OpAMD64SBBLcarrymask { 13815 break 13816 } 13817 v_1_1_0 := v_1_1.Args[0] 13818 if v_1_1_0.Op != OpAMD64CMPLconst { 13819 break 13820 } 13821 if v_1_1_0.AuxInt != 32 { 13822 break 13823 } 13824 v_1_1_0_0 := v_1_1_0.Args[0] 13825 if v_1_1_0_0.Op != OpAMD64NEGL { 13826 break 13827 } 13828 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13829 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 13830 break 13831 } 13832 if v_1_1_0_0_0.AuxInt != -32 { 13833 break 13834 } 13835 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13836 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 13837 break 13838 } 13839 if v_1_1_0_0_0_0.AuxInt != 31 { 13840 break 13841 } 13842 if y != v_1_1_0_0_0_0.Args[0] { 13843 break 13844 } 13845 v.reset(OpAMD64ROLL) 13846 v.AddArg(x) 13847 v.AddArg(y) 13848 return true 13849 } 13850 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 13851 // cond: 13852 // result: (ROLL x y) 13853 for { 13854 _ = v.Args[1] 13855 v_0 := v.Args[0] 13856 if v_0.Op != OpAMD64SHLL { 13857 break 13858 } 13859 _ = v_0.Args[1] 13860 x := v_0.Args[0] 13861 y := v_0.Args[1] 13862 v_1 := v.Args[1] 13863 if v_1.Op != OpAMD64ANDL { 13864 break 13865 } 13866 _ = v_1.Args[1] 13867 v_1_0 := v_1.Args[0] 13868 if v_1_0.Op != OpAMD64SBBLcarrymask { 13869 break 13870 } 13871 v_1_0_0 := v_1_0.Args[0] 13872 if v_1_0_0.Op != OpAMD64CMPLconst { 13873 break 13874 } 13875 if v_1_0_0.AuxInt != 32 { 13876 break 13877 } 13878 v_1_0_0_0 := v_1_0_0.Args[0] 13879 if v_1_0_0_0.Op != OpAMD64NEGL { 13880 break 13881 } 13882 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13883 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 13884 break 13885 } 13886 if v_1_0_0_0_0.AuxInt != -32 { 13887 break 13888 } 13889 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 13890 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 13891 break 13892 } 13893 if v_1_0_0_0_0_0.AuxInt != 31 { 13894 break 13895 } 13896 if y != v_1_0_0_0_0_0.Args[0] { 13897 break 13898 } 13899 v_1_1 := v_1.Args[1] 13900 if v_1_1.Op != OpAMD64SHRL { 13901 break 13902 } 13903 _ = v_1_1.Args[1] 13904 if x != v_1_1.Args[0] { 13905 break 13906 } 13907 v_1_1_1 := v_1_1.Args[1] 13908 if v_1_1_1.Op != OpAMD64NEGL { 13909 break 13910 } 13911 if y != v_1_1_1.Args[0] { 13912 break 13913 } 13914 v.reset(OpAMD64ROLL) 13915 v.AddArg(x) 13916 v.AddArg(y) 13917 return true 13918 } 13919 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 13920 // cond: 13921 // result: (ROLL x y) 13922 for { 13923 _ = v.Args[1] 13924 v_0 := v.Args[0] 13925 if v_0.Op != OpAMD64ANDL { 13926 break 13927 } 13928 _ = v_0.Args[1] 13929 v_0_0 := v_0.Args[0] 13930 if v_0_0.Op != OpAMD64SHRL { 13931 break 13932 } 13933 _ = v_0_0.Args[1] 13934 x := v_0_0.Args[0] 13935 v_0_0_1 := v_0_0.Args[1] 13936 if v_0_0_1.Op != OpAMD64NEGL { 13937 break 13938 } 13939 y := v_0_0_1.Args[0] 13940 v_0_1 := v_0.Args[1] 13941 if v_0_1.Op != OpAMD64SBBLcarrymask { 13942 break 13943 } 13944 v_0_1_0 := v_0_1.Args[0] 13945 if v_0_1_0.Op != OpAMD64CMPLconst { 13946 break 13947 } 13948 if v_0_1_0.AuxInt != 32 { 13949 break 13950 } 13951 v_0_1_0_0 := v_0_1_0.Args[0] 13952 if v_0_1_0_0.Op != OpAMD64NEGL { 13953 break 13954 } 13955 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 13956 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 13957 break 13958 } 13959 if v_0_1_0_0_0.AuxInt != -32 { 13960 break 13961 } 13962 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 13963 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 13964 break 13965 } 13966 if v_0_1_0_0_0_0.AuxInt != 31 { 13967 break 13968 } 13969 if y != v_0_1_0_0_0_0.Args[0] { 13970 break 13971 } 13972 v_1 := v.Args[1] 13973 if v_1.Op != OpAMD64SHLL { 13974 break 13975 } 13976 _ = v_1.Args[1] 13977 if x != v_1.Args[0] { 13978 break 13979 } 13980 if y != v_1.Args[1] { 13981 break 13982 } 13983 v.reset(OpAMD64ROLL) 13984 v.AddArg(x) 13985 v.AddArg(y) 13986 return true 13987 } 13988 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 13989 // cond: 13990 // result: (ROLL x y) 13991 for { 13992 _ = v.Args[1] 13993 v_0 := v.Args[0] 13994 if v_0.Op != OpAMD64ANDL { 13995 break 13996 } 13997 _ = v_0.Args[1] 13998 v_0_0 := v_0.Args[0] 13999 if v_0_0.Op != OpAMD64SBBLcarrymask { 14000 break 14001 } 14002 v_0_0_0 := v_0_0.Args[0] 14003 if v_0_0_0.Op != OpAMD64CMPLconst { 14004 break 14005 } 14006 if v_0_0_0.AuxInt != 32 { 14007 break 14008 } 14009 v_0_0_0_0 := v_0_0_0.Args[0] 14010 if v_0_0_0_0.Op != OpAMD64NEGL { 14011 break 14012 } 14013 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14014 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 14015 break 14016 } 14017 if v_0_0_0_0_0.AuxInt != -32 { 14018 break 14019 } 14020 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14021 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 14022 break 14023 } 14024 if v_0_0_0_0_0_0.AuxInt != 31 { 14025 break 14026 } 14027 y := v_0_0_0_0_0_0.Args[0] 14028 v_0_1 := v_0.Args[1] 14029 if v_0_1.Op != OpAMD64SHRL { 14030 break 14031 } 14032 _ = v_0_1.Args[1] 14033 x := v_0_1.Args[0] 14034 v_0_1_1 := v_0_1.Args[1] 14035 if v_0_1_1.Op != OpAMD64NEGL { 14036 break 14037 } 14038 if y != v_0_1_1.Args[0] { 14039 break 14040 } 14041 v_1 := v.Args[1] 14042 if v_1.Op != OpAMD64SHLL { 14043 break 14044 } 14045 _ = v_1.Args[1] 14046 if x != v_1.Args[0] { 14047 break 14048 } 14049 if y != v_1.Args[1] { 14050 break 14051 } 14052 v.reset(OpAMD64ROLL) 14053 v.AddArg(x) 14054 v.AddArg(y) 14055 return true 14056 } 14057 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 14058 // cond: 14059 // result: (RORL x y) 14060 for { 14061 _ = v.Args[1] 14062 v_0 := v.Args[0] 14063 if v_0.Op != OpAMD64SHRL { 14064 break 14065 } 14066 _ = v_0.Args[1] 14067 x := v_0.Args[0] 14068 y := v_0.Args[1] 14069 v_1 := v.Args[1] 14070 if v_1.Op != OpAMD64ANDL { 14071 break 14072 } 14073 _ = v_1.Args[1] 14074 v_1_0 := v_1.Args[0] 14075 if v_1_0.Op != OpAMD64SHLL { 14076 break 14077 } 14078 _ = v_1_0.Args[1] 14079 if x != v_1_0.Args[0] { 14080 break 14081 } 14082 v_1_0_1 := v_1_0.Args[1] 14083 if v_1_0_1.Op != OpAMD64NEGQ { 14084 break 14085 } 14086 if y != v_1_0_1.Args[0] { 14087 break 14088 } 14089 v_1_1 := v_1.Args[1] 14090 if v_1_1.Op != OpAMD64SBBLcarrymask { 14091 break 14092 } 14093 v_1_1_0 := v_1_1.Args[0] 14094 if v_1_1_0.Op != OpAMD64CMPQconst { 14095 break 14096 } 14097 if v_1_1_0.AuxInt != 32 { 14098 break 14099 } 14100 v_1_1_0_0 := v_1_1_0.Args[0] 14101 if v_1_1_0_0.Op != OpAMD64NEGQ { 14102 break 14103 } 14104 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14105 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 14106 break 14107 } 14108 if v_1_1_0_0_0.AuxInt != -32 { 14109 break 14110 } 14111 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14112 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 14113 break 14114 } 14115 if v_1_1_0_0_0_0.AuxInt != 31 { 14116 break 14117 } 14118 if y != v_1_1_0_0_0_0.Args[0] { 14119 break 14120 } 14121 v.reset(OpAMD64RORL) 14122 v.AddArg(x) 14123 v.AddArg(y) 14124 return true 14125 } 14126 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 14127 // cond: 14128 // result: (RORL x y) 14129 for { 14130 _ = v.Args[1] 14131 v_0 := v.Args[0] 14132 if v_0.Op != OpAMD64SHRL { 14133 break 14134 } 14135 _ = v_0.Args[1] 14136 x := v_0.Args[0] 14137 y := v_0.Args[1] 14138 v_1 := v.Args[1] 14139 if v_1.Op != OpAMD64ANDL { 14140 break 14141 } 14142 _ = v_1.Args[1] 14143 v_1_0 := v_1.Args[0] 14144 if v_1_0.Op != OpAMD64SBBLcarrymask { 14145 break 14146 } 14147 v_1_0_0 := v_1_0.Args[0] 14148 if v_1_0_0.Op != OpAMD64CMPQconst { 14149 break 14150 } 14151 if v_1_0_0.AuxInt != 32 { 14152 break 14153 } 14154 v_1_0_0_0 := v_1_0_0.Args[0] 14155 if v_1_0_0_0.Op != OpAMD64NEGQ { 14156 break 14157 } 14158 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14159 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 14160 break 14161 } 14162 if v_1_0_0_0_0.AuxInt != -32 { 14163 break 14164 } 14165 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14166 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 14167 break 14168 } 14169 if v_1_0_0_0_0_0.AuxInt != 31 { 14170 break 14171 } 14172 if y != v_1_0_0_0_0_0.Args[0] { 14173 break 14174 } 14175 v_1_1 := v_1.Args[1] 14176 if v_1_1.Op != OpAMD64SHLL { 14177 break 14178 } 14179 _ = v_1_1.Args[1] 14180 if x != v_1_1.Args[0] { 14181 break 14182 } 14183 v_1_1_1 := v_1_1.Args[1] 14184 if v_1_1_1.Op != OpAMD64NEGQ { 14185 break 14186 } 14187 if y != v_1_1_1.Args[0] { 14188 break 14189 } 14190 v.reset(OpAMD64RORL) 14191 v.AddArg(x) 14192 v.AddArg(y) 14193 return true 14194 } 14195 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 14196 // cond: 14197 // result: (RORL x y) 14198 for { 14199 _ = v.Args[1] 14200 v_0 := v.Args[0] 14201 if v_0.Op != OpAMD64ANDL { 14202 break 14203 } 14204 _ = v_0.Args[1] 14205 v_0_0 := v_0.Args[0] 14206 if v_0_0.Op != OpAMD64SHLL { 14207 break 14208 } 14209 _ = v_0_0.Args[1] 14210 x := v_0_0.Args[0] 14211 v_0_0_1 := v_0_0.Args[1] 14212 if v_0_0_1.Op != OpAMD64NEGQ { 14213 break 14214 } 14215 y := v_0_0_1.Args[0] 14216 v_0_1 := v_0.Args[1] 14217 if v_0_1.Op != OpAMD64SBBLcarrymask { 14218 break 14219 } 14220 v_0_1_0 := v_0_1.Args[0] 14221 if v_0_1_0.Op != OpAMD64CMPQconst { 14222 break 14223 } 14224 if v_0_1_0.AuxInt != 32 { 14225 break 14226 } 14227 v_0_1_0_0 := v_0_1_0.Args[0] 14228 if v_0_1_0_0.Op != OpAMD64NEGQ { 14229 break 14230 } 14231 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14232 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 14233 break 14234 } 14235 if v_0_1_0_0_0.AuxInt != -32 { 14236 break 14237 } 14238 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14239 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 14240 break 14241 } 14242 if v_0_1_0_0_0_0.AuxInt != 31 { 14243 break 14244 } 14245 if y != v_0_1_0_0_0_0.Args[0] { 14246 break 14247 } 14248 v_1 := v.Args[1] 14249 if v_1.Op != OpAMD64SHRL { 14250 break 14251 } 14252 _ = v_1.Args[1] 14253 if x != v_1.Args[0] { 14254 break 14255 } 14256 if y != v_1.Args[1] { 14257 break 14258 } 14259 v.reset(OpAMD64RORL) 14260 v.AddArg(x) 14261 v.AddArg(y) 14262 return true 14263 } 14264 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 14265 // cond: 14266 // result: (RORL x y) 14267 for { 14268 _ = v.Args[1] 14269 v_0 := v.Args[0] 14270 if v_0.Op != OpAMD64ANDL { 14271 break 14272 } 14273 _ = v_0.Args[1] 14274 v_0_0 := v_0.Args[0] 14275 if v_0_0.Op != OpAMD64SBBLcarrymask { 14276 break 14277 } 14278 v_0_0_0 := v_0_0.Args[0] 14279 if v_0_0_0.Op != OpAMD64CMPQconst { 14280 break 14281 } 14282 if v_0_0_0.AuxInt != 32 { 14283 break 14284 } 14285 v_0_0_0_0 := v_0_0_0.Args[0] 14286 if v_0_0_0_0.Op != OpAMD64NEGQ { 14287 break 14288 } 14289 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14290 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 14291 break 14292 } 14293 if v_0_0_0_0_0.AuxInt != -32 { 14294 break 14295 } 14296 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14297 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 14298 break 14299 } 14300 if v_0_0_0_0_0_0.AuxInt != 31 { 14301 break 14302 } 14303 y := v_0_0_0_0_0_0.Args[0] 14304 v_0_1 := v_0.Args[1] 14305 if v_0_1.Op != OpAMD64SHLL { 14306 break 14307 } 14308 _ = v_0_1.Args[1] 14309 x := v_0_1.Args[0] 14310 v_0_1_1 := v_0_1.Args[1] 14311 if v_0_1_1.Op != OpAMD64NEGQ { 14312 break 14313 } 14314 if y != v_0_1_1.Args[0] { 14315 break 14316 } 14317 v_1 := v.Args[1] 14318 if v_1.Op != OpAMD64SHRL { 14319 break 14320 } 14321 _ = v_1.Args[1] 14322 if x != v_1.Args[0] { 14323 break 14324 } 14325 if y != v_1.Args[1] { 14326 break 14327 } 14328 v.reset(OpAMD64RORL) 14329 v.AddArg(x) 14330 v.AddArg(y) 14331 return true 14332 } 14333 return false 14334 } 14335 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 14336 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 14337 // cond: 14338 // result: (RORL x y) 14339 for { 14340 _ = v.Args[1] 14341 v_0 := v.Args[0] 14342 if v_0.Op != OpAMD64SHRL { 14343 break 14344 } 14345 _ = v_0.Args[1] 14346 x := v_0.Args[0] 14347 y := v_0.Args[1] 14348 v_1 := v.Args[1] 14349 if v_1.Op != OpAMD64ANDL { 14350 break 14351 } 14352 _ = v_1.Args[1] 14353 v_1_0 := v_1.Args[0] 14354 if v_1_0.Op != OpAMD64SHLL { 14355 break 14356 } 14357 _ = v_1_0.Args[1] 14358 if x != v_1_0.Args[0] { 14359 break 14360 } 14361 v_1_0_1 := v_1_0.Args[1] 14362 if v_1_0_1.Op != OpAMD64NEGL { 14363 break 14364 } 14365 if y != v_1_0_1.Args[0] { 14366 break 14367 } 14368 v_1_1 := v_1.Args[1] 14369 if v_1_1.Op != OpAMD64SBBLcarrymask { 14370 break 14371 } 14372 v_1_1_0 := v_1_1.Args[0] 14373 if v_1_1_0.Op != OpAMD64CMPLconst { 14374 break 14375 } 14376 if v_1_1_0.AuxInt != 32 { 14377 break 14378 } 14379 v_1_1_0_0 := v_1_1_0.Args[0] 14380 if v_1_1_0_0.Op != OpAMD64NEGL { 14381 break 14382 } 14383 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14384 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 14385 break 14386 } 14387 if v_1_1_0_0_0.AuxInt != -32 { 14388 break 14389 } 14390 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14391 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 14392 break 14393 } 14394 if v_1_1_0_0_0_0.AuxInt != 31 { 14395 break 14396 } 14397 if y != v_1_1_0_0_0_0.Args[0] { 14398 break 14399 } 14400 v.reset(OpAMD64RORL) 14401 v.AddArg(x) 14402 v.AddArg(y) 14403 return true 14404 } 14405 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 14406 // cond: 14407 // result: (RORL x y) 14408 for { 14409 _ = v.Args[1] 14410 v_0 := v.Args[0] 14411 if v_0.Op != OpAMD64SHRL { 14412 break 14413 } 14414 _ = v_0.Args[1] 14415 x := v_0.Args[0] 14416 y := v_0.Args[1] 14417 v_1 := v.Args[1] 14418 if v_1.Op != OpAMD64ANDL { 14419 break 14420 } 14421 _ = v_1.Args[1] 14422 v_1_0 := v_1.Args[0] 14423 if v_1_0.Op != OpAMD64SBBLcarrymask { 14424 break 14425 } 14426 v_1_0_0 := v_1_0.Args[0] 14427 if v_1_0_0.Op != OpAMD64CMPLconst { 14428 break 14429 } 14430 if v_1_0_0.AuxInt != 32 { 14431 break 14432 } 14433 v_1_0_0_0 := v_1_0_0.Args[0] 14434 if v_1_0_0_0.Op != OpAMD64NEGL { 14435 break 14436 } 14437 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14438 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 14439 break 14440 } 14441 if v_1_0_0_0_0.AuxInt != -32 { 14442 break 14443 } 14444 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14445 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 14446 break 14447 } 14448 if v_1_0_0_0_0_0.AuxInt != 31 { 14449 break 14450 } 14451 if y != v_1_0_0_0_0_0.Args[0] { 14452 break 14453 } 14454 v_1_1 := v_1.Args[1] 14455 if v_1_1.Op != OpAMD64SHLL { 14456 break 14457 } 14458 _ = v_1_1.Args[1] 14459 if x != v_1_1.Args[0] { 14460 break 14461 } 14462 v_1_1_1 := v_1_1.Args[1] 14463 if v_1_1_1.Op != OpAMD64NEGL { 14464 break 14465 } 14466 if y != v_1_1_1.Args[0] { 14467 break 14468 } 14469 v.reset(OpAMD64RORL) 14470 v.AddArg(x) 14471 v.AddArg(y) 14472 return true 14473 } 14474 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 14475 // cond: 14476 // result: (RORL x y) 14477 for { 14478 _ = v.Args[1] 14479 v_0 := v.Args[0] 14480 if v_0.Op != OpAMD64ANDL { 14481 break 14482 } 14483 _ = v_0.Args[1] 14484 v_0_0 := v_0.Args[0] 14485 if v_0_0.Op != OpAMD64SHLL { 14486 break 14487 } 14488 _ = v_0_0.Args[1] 14489 x := v_0_0.Args[0] 14490 v_0_0_1 := v_0_0.Args[1] 14491 if v_0_0_1.Op != OpAMD64NEGL { 14492 break 14493 } 14494 y := v_0_0_1.Args[0] 14495 v_0_1 := v_0.Args[1] 14496 if v_0_1.Op != OpAMD64SBBLcarrymask { 14497 break 14498 } 14499 v_0_1_0 := v_0_1.Args[0] 14500 if v_0_1_0.Op != OpAMD64CMPLconst { 14501 break 14502 } 14503 if v_0_1_0.AuxInt != 32 { 14504 break 14505 } 14506 v_0_1_0_0 := v_0_1_0.Args[0] 14507 if v_0_1_0_0.Op != OpAMD64NEGL { 14508 break 14509 } 14510 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14511 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 14512 break 14513 } 14514 if v_0_1_0_0_0.AuxInt != -32 { 14515 break 14516 } 14517 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14518 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 14519 break 14520 } 14521 if v_0_1_0_0_0_0.AuxInt != 31 { 14522 break 14523 } 14524 if y != v_0_1_0_0_0_0.Args[0] { 14525 break 14526 } 14527 v_1 := v.Args[1] 14528 if v_1.Op != OpAMD64SHRL { 14529 break 14530 } 14531 _ = v_1.Args[1] 14532 if x != v_1.Args[0] { 14533 break 14534 } 14535 if y != v_1.Args[1] { 14536 break 14537 } 14538 v.reset(OpAMD64RORL) 14539 v.AddArg(x) 14540 v.AddArg(y) 14541 return true 14542 } 14543 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 14544 // cond: 14545 // result: (RORL x y) 14546 for { 14547 _ = v.Args[1] 14548 v_0 := v.Args[0] 14549 if v_0.Op != OpAMD64ANDL { 14550 break 14551 } 14552 _ = v_0.Args[1] 14553 v_0_0 := v_0.Args[0] 14554 if v_0_0.Op != OpAMD64SBBLcarrymask { 14555 break 14556 } 14557 v_0_0_0 := v_0_0.Args[0] 14558 if v_0_0_0.Op != OpAMD64CMPLconst { 14559 break 14560 } 14561 if v_0_0_0.AuxInt != 32 { 14562 break 14563 } 14564 v_0_0_0_0 := v_0_0_0.Args[0] 14565 if v_0_0_0_0.Op != OpAMD64NEGL { 14566 break 14567 } 14568 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14569 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 14570 break 14571 } 14572 if v_0_0_0_0_0.AuxInt != -32 { 14573 break 14574 } 14575 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14576 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 14577 break 14578 } 14579 if v_0_0_0_0_0_0.AuxInt != 31 { 14580 break 14581 } 14582 y := v_0_0_0_0_0_0.Args[0] 14583 v_0_1 := v_0.Args[1] 14584 if v_0_1.Op != OpAMD64SHLL { 14585 break 14586 } 14587 _ = v_0_1.Args[1] 14588 x := v_0_1.Args[0] 14589 v_0_1_1 := v_0_1.Args[1] 14590 if v_0_1_1.Op != OpAMD64NEGL { 14591 break 14592 } 14593 if y != v_0_1_1.Args[0] { 14594 break 14595 } 14596 v_1 := v.Args[1] 14597 if v_1.Op != OpAMD64SHRL { 14598 break 14599 } 14600 _ = v_1.Args[1] 14601 if x != v_1.Args[0] { 14602 break 14603 } 14604 if y != v_1.Args[1] { 14605 break 14606 } 14607 v.reset(OpAMD64RORL) 14608 v.AddArg(x) 14609 v.AddArg(y) 14610 return true 14611 } 14612 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 14613 // cond: v.Type.Size() == 2 14614 // result: (ROLW x y) 14615 for { 14616 _ = v.Args[1] 14617 v_0 := v.Args[0] 14618 if v_0.Op != OpAMD64SHLL { 14619 break 14620 } 14621 _ = v_0.Args[1] 14622 x := v_0.Args[0] 14623 v_0_1 := v_0.Args[1] 14624 if v_0_1.Op != OpAMD64ANDQconst { 14625 break 14626 } 14627 if v_0_1.AuxInt != 15 { 14628 break 14629 } 14630 y := v_0_1.Args[0] 14631 v_1 := v.Args[1] 14632 if v_1.Op != OpAMD64ANDL { 14633 break 14634 } 14635 _ = v_1.Args[1] 14636 v_1_0 := v_1.Args[0] 14637 if v_1_0.Op != OpAMD64SHRW { 14638 break 14639 } 14640 _ = v_1_0.Args[1] 14641 if x != v_1_0.Args[0] { 14642 break 14643 } 14644 v_1_0_1 := v_1_0.Args[1] 14645 if v_1_0_1.Op != OpAMD64NEGQ { 14646 break 14647 } 14648 v_1_0_1_0 := v_1_0_1.Args[0] 14649 if v_1_0_1_0.Op != OpAMD64ADDQconst { 14650 break 14651 } 14652 if v_1_0_1_0.AuxInt != -16 { 14653 break 14654 } 14655 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 14656 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 14657 break 14658 } 14659 if v_1_0_1_0_0.AuxInt != 15 { 14660 break 14661 } 14662 if y != v_1_0_1_0_0.Args[0] { 14663 break 14664 } 14665 v_1_1 := v_1.Args[1] 14666 if v_1_1.Op != OpAMD64SBBLcarrymask { 14667 break 14668 } 14669 v_1_1_0 := v_1_1.Args[0] 14670 if v_1_1_0.Op != OpAMD64CMPQconst { 14671 break 14672 } 14673 if v_1_1_0.AuxInt != 16 { 14674 break 14675 } 14676 v_1_1_0_0 := v_1_1_0.Args[0] 14677 if v_1_1_0_0.Op != OpAMD64NEGQ { 14678 break 14679 } 14680 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14681 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 14682 break 14683 } 14684 if v_1_1_0_0_0.AuxInt != -16 { 14685 break 14686 } 14687 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14688 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 14689 break 14690 } 14691 if v_1_1_0_0_0_0.AuxInt != 15 { 14692 break 14693 } 14694 if y != v_1_1_0_0_0_0.Args[0] { 14695 break 14696 } 14697 if !(v.Type.Size() == 2) { 14698 break 14699 } 14700 v.reset(OpAMD64ROLW) 14701 v.AddArg(x) 14702 v.AddArg(y) 14703 return true 14704 } 14705 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 14706 // cond: v.Type.Size() == 2 14707 // result: (ROLW x y) 14708 for { 14709 _ = v.Args[1] 14710 v_0 := v.Args[0] 14711 if v_0.Op != OpAMD64SHLL { 14712 break 14713 } 14714 _ = v_0.Args[1] 14715 x := v_0.Args[0] 14716 v_0_1 := v_0.Args[1] 14717 if v_0_1.Op != OpAMD64ANDQconst { 14718 break 14719 } 14720 if v_0_1.AuxInt != 15 { 14721 break 14722 } 14723 y := v_0_1.Args[0] 14724 v_1 := v.Args[1] 14725 if v_1.Op != OpAMD64ANDL { 14726 break 14727 } 14728 _ = v_1.Args[1] 14729 v_1_0 := v_1.Args[0] 14730 if v_1_0.Op != OpAMD64SBBLcarrymask { 14731 break 14732 } 14733 v_1_0_0 := v_1_0.Args[0] 14734 if v_1_0_0.Op != OpAMD64CMPQconst { 14735 break 14736 } 14737 if v_1_0_0.AuxInt != 16 { 14738 break 14739 } 14740 v_1_0_0_0 := v_1_0_0.Args[0] 14741 if v_1_0_0_0.Op != OpAMD64NEGQ { 14742 break 14743 } 14744 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14745 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 14746 break 14747 } 14748 if v_1_0_0_0_0.AuxInt != -16 { 14749 break 14750 } 14751 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14752 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 14753 break 14754 } 14755 if v_1_0_0_0_0_0.AuxInt != 15 { 14756 break 14757 } 14758 if y != v_1_0_0_0_0_0.Args[0] { 14759 break 14760 } 14761 v_1_1 := v_1.Args[1] 14762 if v_1_1.Op != OpAMD64SHRW { 14763 break 14764 } 14765 _ = v_1_1.Args[1] 14766 if x != v_1_1.Args[0] { 14767 break 14768 } 14769 v_1_1_1 := v_1_1.Args[1] 14770 if v_1_1_1.Op != OpAMD64NEGQ { 14771 break 14772 } 14773 v_1_1_1_0 := v_1_1_1.Args[0] 14774 if v_1_1_1_0.Op != OpAMD64ADDQconst { 14775 break 14776 } 14777 if v_1_1_1_0.AuxInt != -16 { 14778 break 14779 } 14780 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 14781 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 14782 break 14783 } 14784 if v_1_1_1_0_0.AuxInt != 15 { 14785 break 14786 } 14787 if y != v_1_1_1_0_0.Args[0] { 14788 break 14789 } 14790 if !(v.Type.Size() == 2) { 14791 break 14792 } 14793 v.reset(OpAMD64ROLW) 14794 v.AddArg(x) 14795 v.AddArg(y) 14796 return true 14797 } 14798 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 14799 // cond: v.Type.Size() == 2 14800 // result: (ROLW x y) 14801 for { 14802 _ = v.Args[1] 14803 v_0 := v.Args[0] 14804 if v_0.Op != OpAMD64ANDL { 14805 break 14806 } 14807 _ = v_0.Args[1] 14808 v_0_0 := v_0.Args[0] 14809 if v_0_0.Op != OpAMD64SHRW { 14810 break 14811 } 14812 _ = v_0_0.Args[1] 14813 x := v_0_0.Args[0] 14814 v_0_0_1 := v_0_0.Args[1] 14815 if v_0_0_1.Op != OpAMD64NEGQ { 14816 break 14817 } 14818 v_0_0_1_0 := v_0_0_1.Args[0] 14819 if v_0_0_1_0.Op != OpAMD64ADDQconst { 14820 break 14821 } 14822 if v_0_0_1_0.AuxInt != -16 { 14823 break 14824 } 14825 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 14826 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 14827 break 14828 } 14829 if v_0_0_1_0_0.AuxInt != 15 { 14830 break 14831 } 14832 y := v_0_0_1_0_0.Args[0] 14833 v_0_1 := v_0.Args[1] 14834 if v_0_1.Op != OpAMD64SBBLcarrymask { 14835 break 14836 } 14837 v_0_1_0 := v_0_1.Args[0] 14838 if v_0_1_0.Op != OpAMD64CMPQconst { 14839 break 14840 } 14841 if v_0_1_0.AuxInt != 16 { 14842 break 14843 } 14844 v_0_1_0_0 := v_0_1_0.Args[0] 14845 if v_0_1_0_0.Op != OpAMD64NEGQ { 14846 break 14847 } 14848 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14849 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 14850 break 14851 } 14852 if v_0_1_0_0_0.AuxInt != -16 { 14853 break 14854 } 14855 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14856 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 14857 break 14858 } 14859 if v_0_1_0_0_0_0.AuxInt != 15 { 14860 break 14861 } 14862 if y != v_0_1_0_0_0_0.Args[0] { 14863 break 14864 } 14865 v_1 := v.Args[1] 14866 if v_1.Op != OpAMD64SHLL { 14867 break 14868 } 14869 _ = v_1.Args[1] 14870 if x != v_1.Args[0] { 14871 break 14872 } 14873 v_1_1 := v_1.Args[1] 14874 if v_1_1.Op != OpAMD64ANDQconst { 14875 break 14876 } 14877 if v_1_1.AuxInt != 15 { 14878 break 14879 } 14880 if y != v_1_1.Args[0] { 14881 break 14882 } 14883 if !(v.Type.Size() == 2) { 14884 break 14885 } 14886 v.reset(OpAMD64ROLW) 14887 v.AddArg(x) 14888 v.AddArg(y) 14889 return true 14890 } 14891 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 14892 // cond: v.Type.Size() == 2 14893 // result: (ROLW x y) 14894 for { 14895 _ = v.Args[1] 14896 v_0 := v.Args[0] 14897 if v_0.Op != OpAMD64ANDL { 14898 break 14899 } 14900 _ = v_0.Args[1] 14901 v_0_0 := v_0.Args[0] 14902 if v_0_0.Op != OpAMD64SBBLcarrymask { 14903 break 14904 } 14905 v_0_0_0 := v_0_0.Args[0] 14906 if v_0_0_0.Op != OpAMD64CMPQconst { 14907 break 14908 } 14909 if v_0_0_0.AuxInt != 16 { 14910 break 14911 } 14912 v_0_0_0_0 := v_0_0_0.Args[0] 14913 if v_0_0_0_0.Op != OpAMD64NEGQ { 14914 break 14915 } 14916 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14917 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 14918 break 14919 } 14920 if v_0_0_0_0_0.AuxInt != -16 { 14921 break 14922 } 14923 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14924 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 14925 break 14926 } 14927 if v_0_0_0_0_0_0.AuxInt != 15 { 14928 break 14929 } 14930 y := v_0_0_0_0_0_0.Args[0] 14931 v_0_1 := v_0.Args[1] 14932 if v_0_1.Op != OpAMD64SHRW { 14933 break 14934 } 14935 _ = v_0_1.Args[1] 14936 x := v_0_1.Args[0] 14937 v_0_1_1 := v_0_1.Args[1] 14938 if v_0_1_1.Op != OpAMD64NEGQ { 14939 break 14940 } 14941 v_0_1_1_0 := v_0_1_1.Args[0] 14942 if v_0_1_1_0.Op != OpAMD64ADDQconst { 14943 break 14944 } 14945 if v_0_1_1_0.AuxInt != -16 { 14946 break 14947 } 14948 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 14949 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 14950 break 14951 } 14952 if v_0_1_1_0_0.AuxInt != 15 { 14953 break 14954 } 14955 if y != v_0_1_1_0_0.Args[0] { 14956 break 14957 } 14958 v_1 := v.Args[1] 14959 if v_1.Op != OpAMD64SHLL { 14960 break 14961 } 14962 _ = v_1.Args[1] 14963 if x != v_1.Args[0] { 14964 break 14965 } 14966 v_1_1 := v_1.Args[1] 14967 if v_1_1.Op != OpAMD64ANDQconst { 14968 break 14969 } 14970 if v_1_1.AuxInt != 15 { 14971 break 14972 } 14973 if y != v_1_1.Args[0] { 14974 break 14975 } 14976 if !(v.Type.Size() == 2) { 14977 break 14978 } 14979 v.reset(OpAMD64ROLW) 14980 v.AddArg(x) 14981 v.AddArg(y) 14982 return true 14983 } 14984 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 14985 // cond: v.Type.Size() == 2 14986 // result: (ROLW x y) 14987 for { 14988 _ = v.Args[1] 14989 v_0 := v.Args[0] 14990 if v_0.Op != OpAMD64SHLL { 14991 break 14992 } 14993 _ = v_0.Args[1] 14994 x := v_0.Args[0] 14995 v_0_1 := v_0.Args[1] 14996 if v_0_1.Op != OpAMD64ANDLconst { 14997 break 14998 } 14999 if v_0_1.AuxInt != 15 { 15000 break 15001 } 15002 y := v_0_1.Args[0] 15003 v_1 := v.Args[1] 15004 if v_1.Op != OpAMD64ANDL { 15005 break 15006 } 15007 _ = v_1.Args[1] 15008 v_1_0 := v_1.Args[0] 15009 if v_1_0.Op != OpAMD64SHRW { 15010 break 15011 } 15012 _ = v_1_0.Args[1] 15013 if x != v_1_0.Args[0] { 15014 break 15015 } 15016 v_1_0_1 := v_1_0.Args[1] 15017 if v_1_0_1.Op != OpAMD64NEGL { 15018 break 15019 } 15020 v_1_0_1_0 := v_1_0_1.Args[0] 15021 if v_1_0_1_0.Op != OpAMD64ADDLconst { 15022 break 15023 } 15024 if v_1_0_1_0.AuxInt != -16 { 15025 break 15026 } 15027 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 15028 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 15029 break 15030 } 15031 if v_1_0_1_0_0.AuxInt != 15 { 15032 break 15033 } 15034 if y != v_1_0_1_0_0.Args[0] { 15035 break 15036 } 15037 v_1_1 := v_1.Args[1] 15038 if v_1_1.Op != OpAMD64SBBLcarrymask { 15039 break 15040 } 15041 v_1_1_0 := v_1_1.Args[0] 15042 if v_1_1_0.Op != OpAMD64CMPLconst { 15043 break 15044 } 15045 if v_1_1_0.AuxInt != 16 { 15046 break 15047 } 15048 v_1_1_0_0 := v_1_1_0.Args[0] 15049 if v_1_1_0_0.Op != OpAMD64NEGL { 15050 break 15051 } 15052 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15053 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15054 break 15055 } 15056 if v_1_1_0_0_0.AuxInt != -16 { 15057 break 15058 } 15059 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15060 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15061 break 15062 } 15063 if v_1_1_0_0_0_0.AuxInt != 15 { 15064 break 15065 } 15066 if y != v_1_1_0_0_0_0.Args[0] { 15067 break 15068 } 15069 if !(v.Type.Size() == 2) { 15070 break 15071 } 15072 v.reset(OpAMD64ROLW) 15073 v.AddArg(x) 15074 v.AddArg(y) 15075 return true 15076 } 15077 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 15078 // cond: v.Type.Size() == 2 15079 // result: (ROLW x y) 15080 for { 15081 _ = v.Args[1] 15082 v_0 := v.Args[0] 15083 if v_0.Op != OpAMD64SHLL { 15084 break 15085 } 15086 _ = v_0.Args[1] 15087 x := v_0.Args[0] 15088 v_0_1 := v_0.Args[1] 15089 if v_0_1.Op != OpAMD64ANDLconst { 15090 break 15091 } 15092 if v_0_1.AuxInt != 15 { 15093 break 15094 } 15095 y := v_0_1.Args[0] 15096 v_1 := v.Args[1] 15097 if v_1.Op != OpAMD64ANDL { 15098 break 15099 } 15100 _ = v_1.Args[1] 15101 v_1_0 := v_1.Args[0] 15102 if v_1_0.Op != OpAMD64SBBLcarrymask { 15103 break 15104 } 15105 v_1_0_0 := v_1_0.Args[0] 15106 if v_1_0_0.Op != OpAMD64CMPLconst { 15107 break 15108 } 15109 if v_1_0_0.AuxInt != 16 { 15110 break 15111 } 15112 v_1_0_0_0 := v_1_0_0.Args[0] 15113 if v_1_0_0_0.Op != OpAMD64NEGL { 15114 break 15115 } 15116 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15117 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 15118 break 15119 } 15120 if v_1_0_0_0_0.AuxInt != -16 { 15121 break 15122 } 15123 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15124 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 15125 break 15126 } 15127 if v_1_0_0_0_0_0.AuxInt != 15 { 15128 break 15129 } 15130 if y != v_1_0_0_0_0_0.Args[0] { 15131 break 15132 } 15133 v_1_1 := v_1.Args[1] 15134 if v_1_1.Op != OpAMD64SHRW { 15135 break 15136 } 15137 _ = v_1_1.Args[1] 15138 if x != v_1_1.Args[0] { 15139 break 15140 } 15141 v_1_1_1 := v_1_1.Args[1] 15142 if v_1_1_1.Op != OpAMD64NEGL { 15143 break 15144 } 15145 v_1_1_1_0 := v_1_1_1.Args[0] 15146 if v_1_1_1_0.Op != OpAMD64ADDLconst { 15147 break 15148 } 15149 if v_1_1_1_0.AuxInt != -16 { 15150 break 15151 } 15152 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 15153 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 15154 break 15155 } 15156 if v_1_1_1_0_0.AuxInt != 15 { 15157 break 15158 } 15159 if y != v_1_1_1_0_0.Args[0] { 15160 break 15161 } 15162 if !(v.Type.Size() == 2) { 15163 break 15164 } 15165 v.reset(OpAMD64ROLW) 15166 v.AddArg(x) 15167 v.AddArg(y) 15168 return true 15169 } 15170 return false 15171 } 15172 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 15173 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 15174 // cond: v.Type.Size() == 2 15175 // result: (ROLW x y) 15176 for { 15177 _ = v.Args[1] 15178 v_0 := v.Args[0] 15179 if v_0.Op != OpAMD64ANDL { 15180 break 15181 } 15182 _ = v_0.Args[1] 15183 v_0_0 := v_0.Args[0] 15184 if v_0_0.Op != OpAMD64SHRW { 15185 break 15186 } 15187 _ = v_0_0.Args[1] 15188 x := v_0_0.Args[0] 15189 v_0_0_1 := v_0_0.Args[1] 15190 if v_0_0_1.Op != OpAMD64NEGL { 15191 break 15192 } 15193 v_0_0_1_0 := v_0_0_1.Args[0] 15194 if v_0_0_1_0.Op != OpAMD64ADDLconst { 15195 break 15196 } 15197 if v_0_0_1_0.AuxInt != -16 { 15198 break 15199 } 15200 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 15201 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 15202 break 15203 } 15204 if v_0_0_1_0_0.AuxInt != 15 { 15205 break 15206 } 15207 y := v_0_0_1_0_0.Args[0] 15208 v_0_1 := v_0.Args[1] 15209 if v_0_1.Op != OpAMD64SBBLcarrymask { 15210 break 15211 } 15212 v_0_1_0 := v_0_1.Args[0] 15213 if v_0_1_0.Op != OpAMD64CMPLconst { 15214 break 15215 } 15216 if v_0_1_0.AuxInt != 16 { 15217 break 15218 } 15219 v_0_1_0_0 := v_0_1_0.Args[0] 15220 if v_0_1_0_0.Op != OpAMD64NEGL { 15221 break 15222 } 15223 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15224 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 15225 break 15226 } 15227 if v_0_1_0_0_0.AuxInt != -16 { 15228 break 15229 } 15230 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15231 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 15232 break 15233 } 15234 if v_0_1_0_0_0_0.AuxInt != 15 { 15235 break 15236 } 15237 if y != v_0_1_0_0_0_0.Args[0] { 15238 break 15239 } 15240 v_1 := v.Args[1] 15241 if v_1.Op != OpAMD64SHLL { 15242 break 15243 } 15244 _ = v_1.Args[1] 15245 if x != v_1.Args[0] { 15246 break 15247 } 15248 v_1_1 := v_1.Args[1] 15249 if v_1_1.Op != OpAMD64ANDLconst { 15250 break 15251 } 15252 if v_1_1.AuxInt != 15 { 15253 break 15254 } 15255 if y != v_1_1.Args[0] { 15256 break 15257 } 15258 if !(v.Type.Size() == 2) { 15259 break 15260 } 15261 v.reset(OpAMD64ROLW) 15262 v.AddArg(x) 15263 v.AddArg(y) 15264 return true 15265 } 15266 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 15267 // cond: v.Type.Size() == 2 15268 // result: (ROLW x y) 15269 for { 15270 _ = v.Args[1] 15271 v_0 := v.Args[0] 15272 if v_0.Op != OpAMD64ANDL { 15273 break 15274 } 15275 _ = v_0.Args[1] 15276 v_0_0 := v_0.Args[0] 15277 if v_0_0.Op != OpAMD64SBBLcarrymask { 15278 break 15279 } 15280 v_0_0_0 := v_0_0.Args[0] 15281 if v_0_0_0.Op != OpAMD64CMPLconst { 15282 break 15283 } 15284 if v_0_0_0.AuxInt != 16 { 15285 break 15286 } 15287 v_0_0_0_0 := v_0_0_0.Args[0] 15288 if v_0_0_0_0.Op != OpAMD64NEGL { 15289 break 15290 } 15291 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15292 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 15293 break 15294 } 15295 if v_0_0_0_0_0.AuxInt != -16 { 15296 break 15297 } 15298 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15299 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 15300 break 15301 } 15302 if v_0_0_0_0_0_0.AuxInt != 15 { 15303 break 15304 } 15305 y := v_0_0_0_0_0_0.Args[0] 15306 v_0_1 := v_0.Args[1] 15307 if v_0_1.Op != OpAMD64SHRW { 15308 break 15309 } 15310 _ = v_0_1.Args[1] 15311 x := v_0_1.Args[0] 15312 v_0_1_1 := v_0_1.Args[1] 15313 if v_0_1_1.Op != OpAMD64NEGL { 15314 break 15315 } 15316 v_0_1_1_0 := v_0_1_1.Args[0] 15317 if v_0_1_1_0.Op != OpAMD64ADDLconst { 15318 break 15319 } 15320 if v_0_1_1_0.AuxInt != -16 { 15321 break 15322 } 15323 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 15324 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 15325 break 15326 } 15327 if v_0_1_1_0_0.AuxInt != 15 { 15328 break 15329 } 15330 if y != v_0_1_1_0_0.Args[0] { 15331 break 15332 } 15333 v_1 := v.Args[1] 15334 if v_1.Op != OpAMD64SHLL { 15335 break 15336 } 15337 _ = v_1.Args[1] 15338 if x != v_1.Args[0] { 15339 break 15340 } 15341 v_1_1 := v_1.Args[1] 15342 if v_1_1.Op != OpAMD64ANDLconst { 15343 break 15344 } 15345 if v_1_1.AuxInt != 15 { 15346 break 15347 } 15348 if y != v_1_1.Args[0] { 15349 break 15350 } 15351 if !(v.Type.Size() == 2) { 15352 break 15353 } 15354 v.reset(OpAMD64ROLW) 15355 v.AddArg(x) 15356 v.AddArg(y) 15357 return true 15358 } 15359 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 15360 // cond: v.Type.Size() == 2 15361 // result: (RORW x y) 15362 for { 15363 _ = v.Args[1] 15364 v_0 := v.Args[0] 15365 if v_0.Op != OpAMD64SHRW { 15366 break 15367 } 15368 _ = v_0.Args[1] 15369 x := v_0.Args[0] 15370 v_0_1 := v_0.Args[1] 15371 if v_0_1.Op != OpAMD64ANDQconst { 15372 break 15373 } 15374 if v_0_1.AuxInt != 15 { 15375 break 15376 } 15377 y := v_0_1.Args[0] 15378 v_1 := v.Args[1] 15379 if v_1.Op != OpAMD64SHLL { 15380 break 15381 } 15382 _ = v_1.Args[1] 15383 if x != v_1.Args[0] { 15384 break 15385 } 15386 v_1_1 := v_1.Args[1] 15387 if v_1_1.Op != OpAMD64NEGQ { 15388 break 15389 } 15390 v_1_1_0 := v_1_1.Args[0] 15391 if v_1_1_0.Op != OpAMD64ADDQconst { 15392 break 15393 } 15394 if v_1_1_0.AuxInt != -16 { 15395 break 15396 } 15397 v_1_1_0_0 := v_1_1_0.Args[0] 15398 if v_1_1_0_0.Op != OpAMD64ANDQconst { 15399 break 15400 } 15401 if v_1_1_0_0.AuxInt != 15 { 15402 break 15403 } 15404 if y != v_1_1_0_0.Args[0] { 15405 break 15406 } 15407 if !(v.Type.Size() == 2) { 15408 break 15409 } 15410 v.reset(OpAMD64RORW) 15411 v.AddArg(x) 15412 v.AddArg(y) 15413 return true 15414 } 15415 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 15416 // cond: v.Type.Size() == 2 15417 // result: (RORW x y) 15418 for { 15419 _ = v.Args[1] 15420 v_0 := v.Args[0] 15421 if v_0.Op != OpAMD64SHLL { 15422 break 15423 } 15424 _ = v_0.Args[1] 15425 x := v_0.Args[0] 15426 v_0_1 := v_0.Args[1] 15427 if v_0_1.Op != OpAMD64NEGQ { 15428 break 15429 } 15430 v_0_1_0 := v_0_1.Args[0] 15431 if v_0_1_0.Op != OpAMD64ADDQconst { 15432 break 15433 } 15434 if v_0_1_0.AuxInt != -16 { 15435 break 15436 } 15437 v_0_1_0_0 := v_0_1_0.Args[0] 15438 if v_0_1_0_0.Op != OpAMD64ANDQconst { 15439 break 15440 } 15441 if v_0_1_0_0.AuxInt != 15 { 15442 break 15443 } 15444 y := v_0_1_0_0.Args[0] 15445 v_1 := v.Args[1] 15446 if v_1.Op != OpAMD64SHRW { 15447 break 15448 } 15449 _ = v_1.Args[1] 15450 if x != v_1.Args[0] { 15451 break 15452 } 15453 v_1_1 := v_1.Args[1] 15454 if v_1_1.Op != OpAMD64ANDQconst { 15455 break 15456 } 15457 if v_1_1.AuxInt != 15 { 15458 break 15459 } 15460 if y != v_1_1.Args[0] { 15461 break 15462 } 15463 if !(v.Type.Size() == 2) { 15464 break 15465 } 15466 v.reset(OpAMD64RORW) 15467 v.AddArg(x) 15468 v.AddArg(y) 15469 return true 15470 } 15471 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 15472 // cond: v.Type.Size() == 2 15473 // result: (RORW x y) 15474 for { 15475 _ = v.Args[1] 15476 v_0 := v.Args[0] 15477 if v_0.Op != OpAMD64SHRW { 15478 break 15479 } 15480 _ = v_0.Args[1] 15481 x := v_0.Args[0] 15482 v_0_1 := v_0.Args[1] 15483 if v_0_1.Op != OpAMD64ANDLconst { 15484 break 15485 } 15486 if v_0_1.AuxInt != 15 { 15487 break 15488 } 15489 y := v_0_1.Args[0] 15490 v_1 := v.Args[1] 15491 if v_1.Op != OpAMD64SHLL { 15492 break 15493 } 15494 _ = v_1.Args[1] 15495 if x != v_1.Args[0] { 15496 break 15497 } 15498 v_1_1 := v_1.Args[1] 15499 if v_1_1.Op != OpAMD64NEGL { 15500 break 15501 } 15502 v_1_1_0 := v_1_1.Args[0] 15503 if v_1_1_0.Op != OpAMD64ADDLconst { 15504 break 15505 } 15506 if v_1_1_0.AuxInt != -16 { 15507 break 15508 } 15509 v_1_1_0_0 := v_1_1_0.Args[0] 15510 if v_1_1_0_0.Op != OpAMD64ANDLconst { 15511 break 15512 } 15513 if v_1_1_0_0.AuxInt != 15 { 15514 break 15515 } 15516 if y != v_1_1_0_0.Args[0] { 15517 break 15518 } 15519 if !(v.Type.Size() == 2) { 15520 break 15521 } 15522 v.reset(OpAMD64RORW) 15523 v.AddArg(x) 15524 v.AddArg(y) 15525 return true 15526 } 15527 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 15528 // cond: v.Type.Size() == 2 15529 // result: (RORW x y) 15530 for { 15531 _ = v.Args[1] 15532 v_0 := v.Args[0] 15533 if v_0.Op != OpAMD64SHLL { 15534 break 15535 } 15536 _ = v_0.Args[1] 15537 x := v_0.Args[0] 15538 v_0_1 := v_0.Args[1] 15539 if v_0_1.Op != OpAMD64NEGL { 15540 break 15541 } 15542 v_0_1_0 := v_0_1.Args[0] 15543 if v_0_1_0.Op != OpAMD64ADDLconst { 15544 break 15545 } 15546 if v_0_1_0.AuxInt != -16 { 15547 break 15548 } 15549 v_0_1_0_0 := v_0_1_0.Args[0] 15550 if v_0_1_0_0.Op != OpAMD64ANDLconst { 15551 break 15552 } 15553 if v_0_1_0_0.AuxInt != 15 { 15554 break 15555 } 15556 y := v_0_1_0_0.Args[0] 15557 v_1 := v.Args[1] 15558 if v_1.Op != OpAMD64SHRW { 15559 break 15560 } 15561 _ = v_1.Args[1] 15562 if x != v_1.Args[0] { 15563 break 15564 } 15565 v_1_1 := v_1.Args[1] 15566 if v_1_1.Op != OpAMD64ANDLconst { 15567 break 15568 } 15569 if v_1_1.AuxInt != 15 { 15570 break 15571 } 15572 if y != v_1_1.Args[0] { 15573 break 15574 } 15575 if !(v.Type.Size() == 2) { 15576 break 15577 } 15578 v.reset(OpAMD64RORW) 15579 v.AddArg(x) 15580 v.AddArg(y) 15581 return true 15582 } 15583 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 15584 // cond: v.Type.Size() == 1 15585 // result: (ROLB x y) 15586 for { 15587 _ = v.Args[1] 15588 v_0 := v.Args[0] 15589 if v_0.Op != OpAMD64SHLL { 15590 break 15591 } 15592 _ = v_0.Args[1] 15593 x := v_0.Args[0] 15594 v_0_1 := v_0.Args[1] 15595 if v_0_1.Op != OpAMD64ANDQconst { 15596 break 15597 } 15598 if v_0_1.AuxInt != 7 { 15599 break 15600 } 15601 y := v_0_1.Args[0] 15602 v_1 := v.Args[1] 15603 if v_1.Op != OpAMD64ANDL { 15604 break 15605 } 15606 _ = v_1.Args[1] 15607 v_1_0 := v_1.Args[0] 15608 if v_1_0.Op != OpAMD64SHRB { 15609 break 15610 } 15611 _ = v_1_0.Args[1] 15612 if x != v_1_0.Args[0] { 15613 break 15614 } 15615 v_1_0_1 := v_1_0.Args[1] 15616 if v_1_0_1.Op != OpAMD64NEGQ { 15617 break 15618 } 15619 v_1_0_1_0 := v_1_0_1.Args[0] 15620 if v_1_0_1_0.Op != OpAMD64ADDQconst { 15621 break 15622 } 15623 if v_1_0_1_0.AuxInt != -8 { 15624 break 15625 } 15626 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 15627 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 15628 break 15629 } 15630 if v_1_0_1_0_0.AuxInt != 7 { 15631 break 15632 } 15633 if y != v_1_0_1_0_0.Args[0] { 15634 break 15635 } 15636 v_1_1 := v_1.Args[1] 15637 if v_1_1.Op != OpAMD64SBBLcarrymask { 15638 break 15639 } 15640 v_1_1_0 := v_1_1.Args[0] 15641 if v_1_1_0.Op != OpAMD64CMPQconst { 15642 break 15643 } 15644 if v_1_1_0.AuxInt != 8 { 15645 break 15646 } 15647 v_1_1_0_0 := v_1_1_0.Args[0] 15648 if v_1_1_0_0.Op != OpAMD64NEGQ { 15649 break 15650 } 15651 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15652 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15653 break 15654 } 15655 if v_1_1_0_0_0.AuxInt != -8 { 15656 break 15657 } 15658 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15659 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15660 break 15661 } 15662 if v_1_1_0_0_0_0.AuxInt != 7 { 15663 break 15664 } 15665 if y != v_1_1_0_0_0_0.Args[0] { 15666 break 15667 } 15668 if !(v.Type.Size() == 1) { 15669 break 15670 } 15671 v.reset(OpAMD64ROLB) 15672 v.AddArg(x) 15673 v.AddArg(y) 15674 return true 15675 } 15676 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 15677 // cond: v.Type.Size() == 1 15678 // result: (ROLB x y) 15679 for { 15680 _ = v.Args[1] 15681 v_0 := v.Args[0] 15682 if v_0.Op != OpAMD64SHLL { 15683 break 15684 } 15685 _ = v_0.Args[1] 15686 x := v_0.Args[0] 15687 v_0_1 := v_0.Args[1] 15688 if v_0_1.Op != OpAMD64ANDQconst { 15689 break 15690 } 15691 if v_0_1.AuxInt != 7 { 15692 break 15693 } 15694 y := v_0_1.Args[0] 15695 v_1 := v.Args[1] 15696 if v_1.Op != OpAMD64ANDL { 15697 break 15698 } 15699 _ = v_1.Args[1] 15700 v_1_0 := v_1.Args[0] 15701 if v_1_0.Op != OpAMD64SBBLcarrymask { 15702 break 15703 } 15704 v_1_0_0 := v_1_0.Args[0] 15705 if v_1_0_0.Op != OpAMD64CMPQconst { 15706 break 15707 } 15708 if v_1_0_0.AuxInt != 8 { 15709 break 15710 } 15711 v_1_0_0_0 := v_1_0_0.Args[0] 15712 if v_1_0_0_0.Op != OpAMD64NEGQ { 15713 break 15714 } 15715 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15716 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15717 break 15718 } 15719 if v_1_0_0_0_0.AuxInt != -8 { 15720 break 15721 } 15722 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15723 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15724 break 15725 } 15726 if v_1_0_0_0_0_0.AuxInt != 7 { 15727 break 15728 } 15729 if y != v_1_0_0_0_0_0.Args[0] { 15730 break 15731 } 15732 v_1_1 := v_1.Args[1] 15733 if v_1_1.Op != OpAMD64SHRB { 15734 break 15735 } 15736 _ = v_1_1.Args[1] 15737 if x != v_1_1.Args[0] { 15738 break 15739 } 15740 v_1_1_1 := v_1_1.Args[1] 15741 if v_1_1_1.Op != OpAMD64NEGQ { 15742 break 15743 } 15744 v_1_1_1_0 := v_1_1_1.Args[0] 15745 if v_1_1_1_0.Op != OpAMD64ADDQconst { 15746 break 15747 } 15748 if v_1_1_1_0.AuxInt != -8 { 15749 break 15750 } 15751 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 15752 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 15753 break 15754 } 15755 if v_1_1_1_0_0.AuxInt != 7 { 15756 break 15757 } 15758 if y != v_1_1_1_0_0.Args[0] { 15759 break 15760 } 15761 if !(v.Type.Size() == 1) { 15762 break 15763 } 15764 v.reset(OpAMD64ROLB) 15765 v.AddArg(x) 15766 v.AddArg(y) 15767 return true 15768 } 15769 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 15770 // cond: v.Type.Size() == 1 15771 // result: (ROLB x y) 15772 for { 15773 _ = v.Args[1] 15774 v_0 := v.Args[0] 15775 if v_0.Op != OpAMD64ANDL { 15776 break 15777 } 15778 _ = v_0.Args[1] 15779 v_0_0 := v_0.Args[0] 15780 if v_0_0.Op != OpAMD64SHRB { 15781 break 15782 } 15783 _ = v_0_0.Args[1] 15784 x := v_0_0.Args[0] 15785 v_0_0_1 := v_0_0.Args[1] 15786 if v_0_0_1.Op != OpAMD64NEGQ { 15787 break 15788 } 15789 v_0_0_1_0 := v_0_0_1.Args[0] 15790 if v_0_0_1_0.Op != OpAMD64ADDQconst { 15791 break 15792 } 15793 if v_0_0_1_0.AuxInt != -8 { 15794 break 15795 } 15796 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 15797 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 15798 break 15799 } 15800 if v_0_0_1_0_0.AuxInt != 7 { 15801 break 15802 } 15803 y := v_0_0_1_0_0.Args[0] 15804 v_0_1 := v_0.Args[1] 15805 if v_0_1.Op != OpAMD64SBBLcarrymask { 15806 break 15807 } 15808 v_0_1_0 := v_0_1.Args[0] 15809 if v_0_1_0.Op != OpAMD64CMPQconst { 15810 break 15811 } 15812 if v_0_1_0.AuxInt != 8 { 15813 break 15814 } 15815 v_0_1_0_0 := v_0_1_0.Args[0] 15816 if v_0_1_0_0.Op != OpAMD64NEGQ { 15817 break 15818 } 15819 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15820 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15821 break 15822 } 15823 if v_0_1_0_0_0.AuxInt != -8 { 15824 break 15825 } 15826 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15827 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15828 break 15829 } 15830 if v_0_1_0_0_0_0.AuxInt != 7 { 15831 break 15832 } 15833 if y != v_0_1_0_0_0_0.Args[0] { 15834 break 15835 } 15836 v_1 := v.Args[1] 15837 if v_1.Op != OpAMD64SHLL { 15838 break 15839 } 15840 _ = v_1.Args[1] 15841 if x != v_1.Args[0] { 15842 break 15843 } 15844 v_1_1 := v_1.Args[1] 15845 if v_1_1.Op != OpAMD64ANDQconst { 15846 break 15847 } 15848 if v_1_1.AuxInt != 7 { 15849 break 15850 } 15851 if y != v_1_1.Args[0] { 15852 break 15853 } 15854 if !(v.Type.Size() == 1) { 15855 break 15856 } 15857 v.reset(OpAMD64ROLB) 15858 v.AddArg(x) 15859 v.AddArg(y) 15860 return true 15861 } 15862 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 15863 // cond: v.Type.Size() == 1 15864 // result: (ROLB x y) 15865 for { 15866 _ = v.Args[1] 15867 v_0 := v.Args[0] 15868 if v_0.Op != OpAMD64ANDL { 15869 break 15870 } 15871 _ = v_0.Args[1] 15872 v_0_0 := v_0.Args[0] 15873 if v_0_0.Op != OpAMD64SBBLcarrymask { 15874 break 15875 } 15876 v_0_0_0 := v_0_0.Args[0] 15877 if v_0_0_0.Op != OpAMD64CMPQconst { 15878 break 15879 } 15880 if v_0_0_0.AuxInt != 8 { 15881 break 15882 } 15883 v_0_0_0_0 := v_0_0_0.Args[0] 15884 if v_0_0_0_0.Op != OpAMD64NEGQ { 15885 break 15886 } 15887 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15888 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15889 break 15890 } 15891 if v_0_0_0_0_0.AuxInt != -8 { 15892 break 15893 } 15894 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15895 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15896 break 15897 } 15898 if v_0_0_0_0_0_0.AuxInt != 7 { 15899 break 15900 } 15901 y := v_0_0_0_0_0_0.Args[0] 15902 v_0_1 := v_0.Args[1] 15903 if v_0_1.Op != OpAMD64SHRB { 15904 break 15905 } 15906 _ = v_0_1.Args[1] 15907 x := v_0_1.Args[0] 15908 v_0_1_1 := v_0_1.Args[1] 15909 if v_0_1_1.Op != OpAMD64NEGQ { 15910 break 15911 } 15912 v_0_1_1_0 := v_0_1_1.Args[0] 15913 if v_0_1_1_0.Op != OpAMD64ADDQconst { 15914 break 15915 } 15916 if v_0_1_1_0.AuxInt != -8 { 15917 break 15918 } 15919 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 15920 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 15921 break 15922 } 15923 if v_0_1_1_0_0.AuxInt != 7 { 15924 break 15925 } 15926 if y != v_0_1_1_0_0.Args[0] { 15927 break 15928 } 15929 v_1 := v.Args[1] 15930 if v_1.Op != OpAMD64SHLL { 15931 break 15932 } 15933 _ = v_1.Args[1] 15934 if x != v_1.Args[0] { 15935 break 15936 } 15937 v_1_1 := v_1.Args[1] 15938 if v_1_1.Op != OpAMD64ANDQconst { 15939 break 15940 } 15941 if v_1_1.AuxInt != 7 { 15942 break 15943 } 15944 if y != v_1_1.Args[0] { 15945 break 15946 } 15947 if !(v.Type.Size() == 1) { 15948 break 15949 } 15950 v.reset(OpAMD64ROLB) 15951 v.AddArg(x) 15952 v.AddArg(y) 15953 return true 15954 } 15955 return false 15956 } 15957 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 15958 b := v.Block 15959 _ = b 15960 typ := &b.Func.Config.Types 15961 _ = typ 15962 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 15963 // cond: v.Type.Size() == 1 15964 // result: (ROLB x y) 15965 for { 15966 _ = v.Args[1] 15967 v_0 := v.Args[0] 15968 if v_0.Op != OpAMD64SHLL { 15969 break 15970 } 15971 _ = v_0.Args[1] 15972 x := v_0.Args[0] 15973 v_0_1 := v_0.Args[1] 15974 if v_0_1.Op != OpAMD64ANDLconst { 15975 break 15976 } 15977 if v_0_1.AuxInt != 7 { 15978 break 15979 } 15980 y := v_0_1.Args[0] 15981 v_1 := v.Args[1] 15982 if v_1.Op != OpAMD64ANDL { 15983 break 15984 } 15985 _ = v_1.Args[1] 15986 v_1_0 := v_1.Args[0] 15987 if v_1_0.Op != OpAMD64SHRB { 15988 break 15989 } 15990 _ = v_1_0.Args[1] 15991 if x != v_1_0.Args[0] { 15992 break 15993 } 15994 v_1_0_1 := v_1_0.Args[1] 15995 if v_1_0_1.Op != OpAMD64NEGL { 15996 break 15997 } 15998 v_1_0_1_0 := v_1_0_1.Args[0] 15999 if v_1_0_1_0.Op != OpAMD64ADDLconst { 16000 break 16001 } 16002 if v_1_0_1_0.AuxInt != -8 { 16003 break 16004 } 16005 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 16006 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 16007 break 16008 } 16009 if v_1_0_1_0_0.AuxInt != 7 { 16010 break 16011 } 16012 if y != v_1_0_1_0_0.Args[0] { 16013 break 16014 } 16015 v_1_1 := v_1.Args[1] 16016 if v_1_1.Op != OpAMD64SBBLcarrymask { 16017 break 16018 } 16019 v_1_1_0 := v_1_1.Args[0] 16020 if v_1_1_0.Op != OpAMD64CMPLconst { 16021 break 16022 } 16023 if v_1_1_0.AuxInt != 8 { 16024 break 16025 } 16026 v_1_1_0_0 := v_1_1_0.Args[0] 16027 if v_1_1_0_0.Op != OpAMD64NEGL { 16028 break 16029 } 16030 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16031 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 16032 break 16033 } 16034 if v_1_1_0_0_0.AuxInt != -8 { 16035 break 16036 } 16037 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16038 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 16039 break 16040 } 16041 if v_1_1_0_0_0_0.AuxInt != 7 { 16042 break 16043 } 16044 if y != v_1_1_0_0_0_0.Args[0] { 16045 break 16046 } 16047 if !(v.Type.Size() == 1) { 16048 break 16049 } 16050 v.reset(OpAMD64ROLB) 16051 v.AddArg(x) 16052 v.AddArg(y) 16053 return true 16054 } 16055 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 16056 // cond: v.Type.Size() == 1 16057 // result: (ROLB x y) 16058 for { 16059 _ = v.Args[1] 16060 v_0 := v.Args[0] 16061 if v_0.Op != OpAMD64SHLL { 16062 break 16063 } 16064 _ = v_0.Args[1] 16065 x := v_0.Args[0] 16066 v_0_1 := v_0.Args[1] 16067 if v_0_1.Op != OpAMD64ANDLconst { 16068 break 16069 } 16070 if v_0_1.AuxInt != 7 { 16071 break 16072 } 16073 y := v_0_1.Args[0] 16074 v_1 := v.Args[1] 16075 if v_1.Op != OpAMD64ANDL { 16076 break 16077 } 16078 _ = v_1.Args[1] 16079 v_1_0 := v_1.Args[0] 16080 if v_1_0.Op != OpAMD64SBBLcarrymask { 16081 break 16082 } 16083 v_1_0_0 := v_1_0.Args[0] 16084 if v_1_0_0.Op != OpAMD64CMPLconst { 16085 break 16086 } 16087 if v_1_0_0.AuxInt != 8 { 16088 break 16089 } 16090 v_1_0_0_0 := v_1_0_0.Args[0] 16091 if v_1_0_0_0.Op != OpAMD64NEGL { 16092 break 16093 } 16094 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16095 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 16096 break 16097 } 16098 if v_1_0_0_0_0.AuxInt != -8 { 16099 break 16100 } 16101 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16102 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 16103 break 16104 } 16105 if v_1_0_0_0_0_0.AuxInt != 7 { 16106 break 16107 } 16108 if y != v_1_0_0_0_0_0.Args[0] { 16109 break 16110 } 16111 v_1_1 := v_1.Args[1] 16112 if v_1_1.Op != OpAMD64SHRB { 16113 break 16114 } 16115 _ = v_1_1.Args[1] 16116 if x != v_1_1.Args[0] { 16117 break 16118 } 16119 v_1_1_1 := v_1_1.Args[1] 16120 if v_1_1_1.Op != OpAMD64NEGL { 16121 break 16122 } 16123 v_1_1_1_0 := v_1_1_1.Args[0] 16124 if v_1_1_1_0.Op != OpAMD64ADDLconst { 16125 break 16126 } 16127 if v_1_1_1_0.AuxInt != -8 { 16128 break 16129 } 16130 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 16131 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 16132 break 16133 } 16134 if v_1_1_1_0_0.AuxInt != 7 { 16135 break 16136 } 16137 if y != v_1_1_1_0_0.Args[0] { 16138 break 16139 } 16140 if !(v.Type.Size() == 1) { 16141 break 16142 } 16143 v.reset(OpAMD64ROLB) 16144 v.AddArg(x) 16145 v.AddArg(y) 16146 return true 16147 } 16148 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 16149 // cond: v.Type.Size() == 1 16150 // result: (ROLB x y) 16151 for { 16152 _ = v.Args[1] 16153 v_0 := v.Args[0] 16154 if v_0.Op != OpAMD64ANDL { 16155 break 16156 } 16157 _ = v_0.Args[1] 16158 v_0_0 := v_0.Args[0] 16159 if v_0_0.Op != OpAMD64SHRB { 16160 break 16161 } 16162 _ = v_0_0.Args[1] 16163 x := v_0_0.Args[0] 16164 v_0_0_1 := v_0_0.Args[1] 16165 if v_0_0_1.Op != OpAMD64NEGL { 16166 break 16167 } 16168 v_0_0_1_0 := v_0_0_1.Args[0] 16169 if v_0_0_1_0.Op != OpAMD64ADDLconst { 16170 break 16171 } 16172 if v_0_0_1_0.AuxInt != -8 { 16173 break 16174 } 16175 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16176 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 16177 break 16178 } 16179 if v_0_0_1_0_0.AuxInt != 7 { 16180 break 16181 } 16182 y := v_0_0_1_0_0.Args[0] 16183 v_0_1 := v_0.Args[1] 16184 if v_0_1.Op != OpAMD64SBBLcarrymask { 16185 break 16186 } 16187 v_0_1_0 := v_0_1.Args[0] 16188 if v_0_1_0.Op != OpAMD64CMPLconst { 16189 break 16190 } 16191 if v_0_1_0.AuxInt != 8 { 16192 break 16193 } 16194 v_0_1_0_0 := v_0_1_0.Args[0] 16195 if v_0_1_0_0.Op != OpAMD64NEGL { 16196 break 16197 } 16198 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16199 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16200 break 16201 } 16202 if v_0_1_0_0_0.AuxInt != -8 { 16203 break 16204 } 16205 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16206 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16207 break 16208 } 16209 if v_0_1_0_0_0_0.AuxInt != 7 { 16210 break 16211 } 16212 if y != v_0_1_0_0_0_0.Args[0] { 16213 break 16214 } 16215 v_1 := v.Args[1] 16216 if v_1.Op != OpAMD64SHLL { 16217 break 16218 } 16219 _ = v_1.Args[1] 16220 if x != v_1.Args[0] { 16221 break 16222 } 16223 v_1_1 := v_1.Args[1] 16224 if v_1_1.Op != OpAMD64ANDLconst { 16225 break 16226 } 16227 if v_1_1.AuxInt != 7 { 16228 break 16229 } 16230 if y != v_1_1.Args[0] { 16231 break 16232 } 16233 if !(v.Type.Size() == 1) { 16234 break 16235 } 16236 v.reset(OpAMD64ROLB) 16237 v.AddArg(x) 16238 v.AddArg(y) 16239 return true 16240 } 16241 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 16242 // cond: v.Type.Size() == 1 16243 // result: (ROLB x y) 16244 for { 16245 _ = v.Args[1] 16246 v_0 := v.Args[0] 16247 if v_0.Op != OpAMD64ANDL { 16248 break 16249 } 16250 _ = v_0.Args[1] 16251 v_0_0 := v_0.Args[0] 16252 if v_0_0.Op != OpAMD64SBBLcarrymask { 16253 break 16254 } 16255 v_0_0_0 := v_0_0.Args[0] 16256 if v_0_0_0.Op != OpAMD64CMPLconst { 16257 break 16258 } 16259 if v_0_0_0.AuxInt != 8 { 16260 break 16261 } 16262 v_0_0_0_0 := v_0_0_0.Args[0] 16263 if v_0_0_0_0.Op != OpAMD64NEGL { 16264 break 16265 } 16266 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16267 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16268 break 16269 } 16270 if v_0_0_0_0_0.AuxInt != -8 { 16271 break 16272 } 16273 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16274 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16275 break 16276 } 16277 if v_0_0_0_0_0_0.AuxInt != 7 { 16278 break 16279 } 16280 y := v_0_0_0_0_0_0.Args[0] 16281 v_0_1 := v_0.Args[1] 16282 if v_0_1.Op != OpAMD64SHRB { 16283 break 16284 } 16285 _ = v_0_1.Args[1] 16286 x := v_0_1.Args[0] 16287 v_0_1_1 := v_0_1.Args[1] 16288 if v_0_1_1.Op != OpAMD64NEGL { 16289 break 16290 } 16291 v_0_1_1_0 := v_0_1_1.Args[0] 16292 if v_0_1_1_0.Op != OpAMD64ADDLconst { 16293 break 16294 } 16295 if v_0_1_1_0.AuxInt != -8 { 16296 break 16297 } 16298 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16299 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 16300 break 16301 } 16302 if v_0_1_1_0_0.AuxInt != 7 { 16303 break 16304 } 16305 if y != v_0_1_1_0_0.Args[0] { 16306 break 16307 } 16308 v_1 := v.Args[1] 16309 if v_1.Op != OpAMD64SHLL { 16310 break 16311 } 16312 _ = v_1.Args[1] 16313 if x != v_1.Args[0] { 16314 break 16315 } 16316 v_1_1 := v_1.Args[1] 16317 if v_1_1.Op != OpAMD64ANDLconst { 16318 break 16319 } 16320 if v_1_1.AuxInt != 7 { 16321 break 16322 } 16323 if y != v_1_1.Args[0] { 16324 break 16325 } 16326 if !(v.Type.Size() == 1) { 16327 break 16328 } 16329 v.reset(OpAMD64ROLB) 16330 v.AddArg(x) 16331 v.AddArg(y) 16332 return true 16333 } 16334 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 16335 // cond: v.Type.Size() == 1 16336 // result: (RORB x y) 16337 for { 16338 _ = v.Args[1] 16339 v_0 := v.Args[0] 16340 if v_0.Op != OpAMD64SHRB { 16341 break 16342 } 16343 _ = v_0.Args[1] 16344 x := v_0.Args[0] 16345 v_0_1 := v_0.Args[1] 16346 if v_0_1.Op != OpAMD64ANDQconst { 16347 break 16348 } 16349 if v_0_1.AuxInt != 7 { 16350 break 16351 } 16352 y := v_0_1.Args[0] 16353 v_1 := v.Args[1] 16354 if v_1.Op != OpAMD64SHLL { 16355 break 16356 } 16357 _ = v_1.Args[1] 16358 if x != v_1.Args[0] { 16359 break 16360 } 16361 v_1_1 := v_1.Args[1] 16362 if v_1_1.Op != OpAMD64NEGQ { 16363 break 16364 } 16365 v_1_1_0 := v_1_1.Args[0] 16366 if v_1_1_0.Op != OpAMD64ADDQconst { 16367 break 16368 } 16369 if v_1_1_0.AuxInt != -8 { 16370 break 16371 } 16372 v_1_1_0_0 := v_1_1_0.Args[0] 16373 if v_1_1_0_0.Op != OpAMD64ANDQconst { 16374 break 16375 } 16376 if v_1_1_0_0.AuxInt != 7 { 16377 break 16378 } 16379 if y != v_1_1_0_0.Args[0] { 16380 break 16381 } 16382 if !(v.Type.Size() == 1) { 16383 break 16384 } 16385 v.reset(OpAMD64RORB) 16386 v.AddArg(x) 16387 v.AddArg(y) 16388 return true 16389 } 16390 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 16391 // cond: v.Type.Size() == 1 16392 // result: (RORB x y) 16393 for { 16394 _ = v.Args[1] 16395 v_0 := v.Args[0] 16396 if v_0.Op != OpAMD64SHLL { 16397 break 16398 } 16399 _ = v_0.Args[1] 16400 x := v_0.Args[0] 16401 v_0_1 := v_0.Args[1] 16402 if v_0_1.Op != OpAMD64NEGQ { 16403 break 16404 } 16405 v_0_1_0 := v_0_1.Args[0] 16406 if v_0_1_0.Op != OpAMD64ADDQconst { 16407 break 16408 } 16409 if v_0_1_0.AuxInt != -8 { 16410 break 16411 } 16412 v_0_1_0_0 := v_0_1_0.Args[0] 16413 if v_0_1_0_0.Op != OpAMD64ANDQconst { 16414 break 16415 } 16416 if v_0_1_0_0.AuxInt != 7 { 16417 break 16418 } 16419 y := v_0_1_0_0.Args[0] 16420 v_1 := v.Args[1] 16421 if v_1.Op != OpAMD64SHRB { 16422 break 16423 } 16424 _ = v_1.Args[1] 16425 if x != v_1.Args[0] { 16426 break 16427 } 16428 v_1_1 := v_1.Args[1] 16429 if v_1_1.Op != OpAMD64ANDQconst { 16430 break 16431 } 16432 if v_1_1.AuxInt != 7 { 16433 break 16434 } 16435 if y != v_1_1.Args[0] { 16436 break 16437 } 16438 if !(v.Type.Size() == 1) { 16439 break 16440 } 16441 v.reset(OpAMD64RORB) 16442 v.AddArg(x) 16443 v.AddArg(y) 16444 return true 16445 } 16446 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 16447 // cond: v.Type.Size() == 1 16448 // result: (RORB x y) 16449 for { 16450 _ = v.Args[1] 16451 v_0 := v.Args[0] 16452 if v_0.Op != OpAMD64SHRB { 16453 break 16454 } 16455 _ = v_0.Args[1] 16456 x := v_0.Args[0] 16457 v_0_1 := v_0.Args[1] 16458 if v_0_1.Op != OpAMD64ANDLconst { 16459 break 16460 } 16461 if v_0_1.AuxInt != 7 { 16462 break 16463 } 16464 y := v_0_1.Args[0] 16465 v_1 := v.Args[1] 16466 if v_1.Op != OpAMD64SHLL { 16467 break 16468 } 16469 _ = v_1.Args[1] 16470 if x != v_1.Args[0] { 16471 break 16472 } 16473 v_1_1 := v_1.Args[1] 16474 if v_1_1.Op != OpAMD64NEGL { 16475 break 16476 } 16477 v_1_1_0 := v_1_1.Args[0] 16478 if v_1_1_0.Op != OpAMD64ADDLconst { 16479 break 16480 } 16481 if v_1_1_0.AuxInt != -8 { 16482 break 16483 } 16484 v_1_1_0_0 := v_1_1_0.Args[0] 16485 if v_1_1_0_0.Op != OpAMD64ANDLconst { 16486 break 16487 } 16488 if v_1_1_0_0.AuxInt != 7 { 16489 break 16490 } 16491 if y != v_1_1_0_0.Args[0] { 16492 break 16493 } 16494 if !(v.Type.Size() == 1) { 16495 break 16496 } 16497 v.reset(OpAMD64RORB) 16498 v.AddArg(x) 16499 v.AddArg(y) 16500 return true 16501 } 16502 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 16503 // cond: v.Type.Size() == 1 16504 // result: (RORB x y) 16505 for { 16506 _ = v.Args[1] 16507 v_0 := v.Args[0] 16508 if v_0.Op != OpAMD64SHLL { 16509 break 16510 } 16511 _ = v_0.Args[1] 16512 x := v_0.Args[0] 16513 v_0_1 := v_0.Args[1] 16514 if v_0_1.Op != OpAMD64NEGL { 16515 break 16516 } 16517 v_0_1_0 := v_0_1.Args[0] 16518 if v_0_1_0.Op != OpAMD64ADDLconst { 16519 break 16520 } 16521 if v_0_1_0.AuxInt != -8 { 16522 break 16523 } 16524 v_0_1_0_0 := v_0_1_0.Args[0] 16525 if v_0_1_0_0.Op != OpAMD64ANDLconst { 16526 break 16527 } 16528 if v_0_1_0_0.AuxInt != 7 { 16529 break 16530 } 16531 y := v_0_1_0_0.Args[0] 16532 v_1 := v.Args[1] 16533 if v_1.Op != OpAMD64SHRB { 16534 break 16535 } 16536 _ = v_1.Args[1] 16537 if x != v_1.Args[0] { 16538 break 16539 } 16540 v_1_1 := v_1.Args[1] 16541 if v_1_1.Op != OpAMD64ANDLconst { 16542 break 16543 } 16544 if v_1_1.AuxInt != 7 { 16545 break 16546 } 16547 if y != v_1_1.Args[0] { 16548 break 16549 } 16550 if !(v.Type.Size() == 1) { 16551 break 16552 } 16553 v.reset(OpAMD64RORB) 16554 v.AddArg(x) 16555 v.AddArg(y) 16556 return true 16557 } 16558 // match: (ORL x x) 16559 // cond: 16560 // result: x 16561 for { 16562 _ = v.Args[1] 16563 x := v.Args[0] 16564 if x != v.Args[1] { 16565 break 16566 } 16567 v.reset(OpCopy) 16568 v.Type = x.Type 16569 v.AddArg(x) 16570 return true 16571 } 16572 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 16573 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16574 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 16575 for { 16576 _ = v.Args[1] 16577 x0 := v.Args[0] 16578 if x0.Op != OpAMD64MOVBload { 16579 break 16580 } 16581 i0 := x0.AuxInt 16582 s := x0.Aux 16583 _ = x0.Args[1] 16584 p := x0.Args[0] 16585 mem := x0.Args[1] 16586 sh := v.Args[1] 16587 if sh.Op != OpAMD64SHLLconst { 16588 break 16589 } 16590 if sh.AuxInt != 8 { 16591 break 16592 } 16593 x1 := sh.Args[0] 16594 if x1.Op != OpAMD64MOVBload { 16595 break 16596 } 16597 i1 := x1.AuxInt 16598 if x1.Aux != s { 16599 break 16600 } 16601 _ = x1.Args[1] 16602 if p != x1.Args[0] { 16603 break 16604 } 16605 if mem != x1.Args[1] { 16606 break 16607 } 16608 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16609 break 16610 } 16611 b = mergePoint(b, x0, x1) 16612 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16613 v.reset(OpCopy) 16614 v.AddArg(v0) 16615 v0.AuxInt = i0 16616 v0.Aux = s 16617 v0.AddArg(p) 16618 v0.AddArg(mem) 16619 return true 16620 } 16621 return false 16622 } 16623 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 16624 b := v.Block 16625 _ = b 16626 typ := &b.Func.Config.Types 16627 _ = typ 16628 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 16629 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16630 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 16631 for { 16632 _ = v.Args[1] 16633 sh := v.Args[0] 16634 if sh.Op != OpAMD64SHLLconst { 16635 break 16636 } 16637 if sh.AuxInt != 8 { 16638 break 16639 } 16640 x1 := sh.Args[0] 16641 if x1.Op != OpAMD64MOVBload { 16642 break 16643 } 16644 i1 := x1.AuxInt 16645 s := x1.Aux 16646 _ = x1.Args[1] 16647 p := x1.Args[0] 16648 mem := x1.Args[1] 16649 x0 := v.Args[1] 16650 if x0.Op != OpAMD64MOVBload { 16651 break 16652 } 16653 i0 := x0.AuxInt 16654 if x0.Aux != s { 16655 break 16656 } 16657 _ = x0.Args[1] 16658 if p != x0.Args[0] { 16659 break 16660 } 16661 if mem != x0.Args[1] { 16662 break 16663 } 16664 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16665 break 16666 } 16667 b = mergePoint(b, x0, x1) 16668 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16669 v.reset(OpCopy) 16670 v.AddArg(v0) 16671 v0.AuxInt = i0 16672 v0.Aux = s 16673 v0.AddArg(p) 16674 v0.AddArg(mem) 16675 return true 16676 } 16677 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 16678 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16679 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 16680 for { 16681 _ = v.Args[1] 16682 x0 := v.Args[0] 16683 if x0.Op != OpAMD64MOVWload { 16684 break 16685 } 16686 i0 := x0.AuxInt 16687 s := x0.Aux 16688 _ = x0.Args[1] 16689 p := x0.Args[0] 16690 mem := x0.Args[1] 16691 sh := v.Args[1] 16692 if sh.Op != OpAMD64SHLLconst { 16693 break 16694 } 16695 if sh.AuxInt != 16 { 16696 break 16697 } 16698 x1 := sh.Args[0] 16699 if x1.Op != OpAMD64MOVWload { 16700 break 16701 } 16702 i1 := x1.AuxInt 16703 if x1.Aux != s { 16704 break 16705 } 16706 _ = x1.Args[1] 16707 if p != x1.Args[0] { 16708 break 16709 } 16710 if mem != x1.Args[1] { 16711 break 16712 } 16713 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16714 break 16715 } 16716 b = mergePoint(b, x0, x1) 16717 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 16718 v.reset(OpCopy) 16719 v.AddArg(v0) 16720 v0.AuxInt = i0 16721 v0.Aux = s 16722 v0.AddArg(p) 16723 v0.AddArg(mem) 16724 return true 16725 } 16726 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 16727 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16728 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 16729 for { 16730 _ = v.Args[1] 16731 sh := v.Args[0] 16732 if sh.Op != OpAMD64SHLLconst { 16733 break 16734 } 16735 if sh.AuxInt != 16 { 16736 break 16737 } 16738 x1 := sh.Args[0] 16739 if x1.Op != OpAMD64MOVWload { 16740 break 16741 } 16742 i1 := x1.AuxInt 16743 s := x1.Aux 16744 _ = x1.Args[1] 16745 p := x1.Args[0] 16746 mem := x1.Args[1] 16747 x0 := v.Args[1] 16748 if x0.Op != OpAMD64MOVWload { 16749 break 16750 } 16751 i0 := x0.AuxInt 16752 if x0.Aux != s { 16753 break 16754 } 16755 _ = x0.Args[1] 16756 if p != x0.Args[0] { 16757 break 16758 } 16759 if mem != x0.Args[1] { 16760 break 16761 } 16762 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16763 break 16764 } 16765 b = mergePoint(b, x0, x1) 16766 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 16767 v.reset(OpCopy) 16768 v.AddArg(v0) 16769 v0.AuxInt = i0 16770 v0.Aux = s 16771 v0.AddArg(p) 16772 v0.AddArg(mem) 16773 return true 16774 } 16775 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 16776 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16777 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16778 for { 16779 _ = v.Args[1] 16780 s1 := v.Args[0] 16781 if s1.Op != OpAMD64SHLLconst { 16782 break 16783 } 16784 j1 := s1.AuxInt 16785 x1 := s1.Args[0] 16786 if x1.Op != OpAMD64MOVBload { 16787 break 16788 } 16789 i1 := x1.AuxInt 16790 s := x1.Aux 16791 _ = x1.Args[1] 16792 p := x1.Args[0] 16793 mem := x1.Args[1] 16794 or := v.Args[1] 16795 if or.Op != OpAMD64ORL { 16796 break 16797 } 16798 _ = or.Args[1] 16799 s0 := or.Args[0] 16800 if s0.Op != OpAMD64SHLLconst { 16801 break 16802 } 16803 j0 := s0.AuxInt 16804 x0 := s0.Args[0] 16805 if x0.Op != OpAMD64MOVBload { 16806 break 16807 } 16808 i0 := x0.AuxInt 16809 if x0.Aux != s { 16810 break 16811 } 16812 _ = x0.Args[1] 16813 if p != x0.Args[0] { 16814 break 16815 } 16816 if mem != x0.Args[1] { 16817 break 16818 } 16819 y := or.Args[1] 16820 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16821 break 16822 } 16823 b = mergePoint(b, x0, x1) 16824 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16825 v.reset(OpCopy) 16826 v.AddArg(v0) 16827 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16828 v1.AuxInt = j0 16829 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16830 v2.AuxInt = i0 16831 v2.Aux = s 16832 v2.AddArg(p) 16833 v2.AddArg(mem) 16834 v1.AddArg(v2) 16835 v0.AddArg(v1) 16836 v0.AddArg(y) 16837 return true 16838 } 16839 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 16840 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16841 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16842 for { 16843 _ = v.Args[1] 16844 s1 := v.Args[0] 16845 if s1.Op != OpAMD64SHLLconst { 16846 break 16847 } 16848 j1 := s1.AuxInt 16849 x1 := s1.Args[0] 16850 if x1.Op != OpAMD64MOVBload { 16851 break 16852 } 16853 i1 := x1.AuxInt 16854 s := x1.Aux 16855 _ = x1.Args[1] 16856 p := x1.Args[0] 16857 mem := x1.Args[1] 16858 or := v.Args[1] 16859 if or.Op != OpAMD64ORL { 16860 break 16861 } 16862 _ = or.Args[1] 16863 y := or.Args[0] 16864 s0 := or.Args[1] 16865 if s0.Op != OpAMD64SHLLconst { 16866 break 16867 } 16868 j0 := s0.AuxInt 16869 x0 := s0.Args[0] 16870 if x0.Op != OpAMD64MOVBload { 16871 break 16872 } 16873 i0 := x0.AuxInt 16874 if x0.Aux != s { 16875 break 16876 } 16877 _ = x0.Args[1] 16878 if p != x0.Args[0] { 16879 break 16880 } 16881 if mem != x0.Args[1] { 16882 break 16883 } 16884 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16885 break 16886 } 16887 b = mergePoint(b, x0, x1) 16888 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16889 v.reset(OpCopy) 16890 v.AddArg(v0) 16891 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16892 v1.AuxInt = j0 16893 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16894 v2.AuxInt = i0 16895 v2.Aux = s 16896 v2.AddArg(p) 16897 v2.AddArg(mem) 16898 v1.AddArg(v2) 16899 v0.AddArg(v1) 16900 v0.AddArg(y) 16901 return true 16902 } 16903 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 16904 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16905 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16906 for { 16907 _ = v.Args[1] 16908 or := v.Args[0] 16909 if or.Op != OpAMD64ORL { 16910 break 16911 } 16912 _ = or.Args[1] 16913 s0 := or.Args[0] 16914 if s0.Op != OpAMD64SHLLconst { 16915 break 16916 } 16917 j0 := s0.AuxInt 16918 x0 := s0.Args[0] 16919 if x0.Op != OpAMD64MOVBload { 16920 break 16921 } 16922 i0 := x0.AuxInt 16923 s := x0.Aux 16924 _ = x0.Args[1] 16925 p := x0.Args[0] 16926 mem := x0.Args[1] 16927 y := or.Args[1] 16928 s1 := v.Args[1] 16929 if s1.Op != OpAMD64SHLLconst { 16930 break 16931 } 16932 j1 := s1.AuxInt 16933 x1 := s1.Args[0] 16934 if x1.Op != OpAMD64MOVBload { 16935 break 16936 } 16937 i1 := x1.AuxInt 16938 if x1.Aux != s { 16939 break 16940 } 16941 _ = x1.Args[1] 16942 if p != x1.Args[0] { 16943 break 16944 } 16945 if mem != x1.Args[1] { 16946 break 16947 } 16948 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16949 break 16950 } 16951 b = mergePoint(b, x0, x1) 16952 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16953 v.reset(OpCopy) 16954 v.AddArg(v0) 16955 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16956 v1.AuxInt = j0 16957 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16958 v2.AuxInt = i0 16959 v2.Aux = s 16960 v2.AddArg(p) 16961 v2.AddArg(mem) 16962 v1.AddArg(v2) 16963 v0.AddArg(v1) 16964 v0.AddArg(y) 16965 return true 16966 } 16967 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 16968 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16969 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16970 for { 16971 _ = v.Args[1] 16972 or := v.Args[0] 16973 if or.Op != OpAMD64ORL { 16974 break 16975 } 16976 _ = or.Args[1] 16977 y := or.Args[0] 16978 s0 := or.Args[1] 16979 if s0.Op != OpAMD64SHLLconst { 16980 break 16981 } 16982 j0 := s0.AuxInt 16983 x0 := s0.Args[0] 16984 if x0.Op != OpAMD64MOVBload { 16985 break 16986 } 16987 i0 := x0.AuxInt 16988 s := x0.Aux 16989 _ = x0.Args[1] 16990 p := x0.Args[0] 16991 mem := x0.Args[1] 16992 s1 := v.Args[1] 16993 if s1.Op != OpAMD64SHLLconst { 16994 break 16995 } 16996 j1 := s1.AuxInt 16997 x1 := s1.Args[0] 16998 if x1.Op != OpAMD64MOVBload { 16999 break 17000 } 17001 i1 := x1.AuxInt 17002 if x1.Aux != s { 17003 break 17004 } 17005 _ = x1.Args[1] 17006 if p != x1.Args[0] { 17007 break 17008 } 17009 if mem != x1.Args[1] { 17010 break 17011 } 17012 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17013 break 17014 } 17015 b = mergePoint(b, x0, x1) 17016 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17017 v.reset(OpCopy) 17018 v.AddArg(v0) 17019 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17020 v1.AuxInt = j0 17021 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 17022 v2.AuxInt = i0 17023 v2.Aux = s 17024 v2.AddArg(p) 17025 v2.AddArg(mem) 17026 v1.AddArg(v2) 17027 v0.AddArg(v1) 17028 v0.AddArg(y) 17029 return true 17030 } 17031 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 17032 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17033 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17034 for { 17035 _ = v.Args[1] 17036 x0 := v.Args[0] 17037 if x0.Op != OpAMD64MOVBloadidx1 { 17038 break 17039 } 17040 i0 := x0.AuxInt 17041 s := x0.Aux 17042 _ = x0.Args[2] 17043 p := x0.Args[0] 17044 idx := x0.Args[1] 17045 mem := x0.Args[2] 17046 sh := v.Args[1] 17047 if sh.Op != OpAMD64SHLLconst { 17048 break 17049 } 17050 if sh.AuxInt != 8 { 17051 break 17052 } 17053 x1 := sh.Args[0] 17054 if x1.Op != OpAMD64MOVBloadidx1 { 17055 break 17056 } 17057 i1 := x1.AuxInt 17058 if x1.Aux != s { 17059 break 17060 } 17061 _ = x1.Args[2] 17062 if p != x1.Args[0] { 17063 break 17064 } 17065 if idx != x1.Args[1] { 17066 break 17067 } 17068 if mem != x1.Args[2] { 17069 break 17070 } 17071 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17072 break 17073 } 17074 b = mergePoint(b, x0, x1) 17075 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17076 v.reset(OpCopy) 17077 v.AddArg(v0) 17078 v0.AuxInt = i0 17079 v0.Aux = s 17080 v0.AddArg(p) 17081 v0.AddArg(idx) 17082 v0.AddArg(mem) 17083 return true 17084 } 17085 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 17086 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17087 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17088 for { 17089 _ = v.Args[1] 17090 x0 := v.Args[0] 17091 if x0.Op != OpAMD64MOVBloadidx1 { 17092 break 17093 } 17094 i0 := x0.AuxInt 17095 s := x0.Aux 17096 _ = x0.Args[2] 17097 idx := x0.Args[0] 17098 p := x0.Args[1] 17099 mem := x0.Args[2] 17100 sh := v.Args[1] 17101 if sh.Op != OpAMD64SHLLconst { 17102 break 17103 } 17104 if sh.AuxInt != 8 { 17105 break 17106 } 17107 x1 := sh.Args[0] 17108 if x1.Op != OpAMD64MOVBloadidx1 { 17109 break 17110 } 17111 i1 := x1.AuxInt 17112 if x1.Aux != s { 17113 break 17114 } 17115 _ = x1.Args[2] 17116 if p != x1.Args[0] { 17117 break 17118 } 17119 if idx != x1.Args[1] { 17120 break 17121 } 17122 if mem != x1.Args[2] { 17123 break 17124 } 17125 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17126 break 17127 } 17128 b = mergePoint(b, x0, x1) 17129 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17130 v.reset(OpCopy) 17131 v.AddArg(v0) 17132 v0.AuxInt = i0 17133 v0.Aux = s 17134 v0.AddArg(p) 17135 v0.AddArg(idx) 17136 v0.AddArg(mem) 17137 return true 17138 } 17139 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 17140 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17141 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17142 for { 17143 _ = v.Args[1] 17144 x0 := v.Args[0] 17145 if x0.Op != OpAMD64MOVBloadidx1 { 17146 break 17147 } 17148 i0 := x0.AuxInt 17149 s := x0.Aux 17150 _ = x0.Args[2] 17151 p := x0.Args[0] 17152 idx := x0.Args[1] 17153 mem := x0.Args[2] 17154 sh := v.Args[1] 17155 if sh.Op != OpAMD64SHLLconst { 17156 break 17157 } 17158 if sh.AuxInt != 8 { 17159 break 17160 } 17161 x1 := sh.Args[0] 17162 if x1.Op != OpAMD64MOVBloadidx1 { 17163 break 17164 } 17165 i1 := x1.AuxInt 17166 if x1.Aux != s { 17167 break 17168 } 17169 _ = x1.Args[2] 17170 if idx != x1.Args[0] { 17171 break 17172 } 17173 if p != x1.Args[1] { 17174 break 17175 } 17176 if mem != x1.Args[2] { 17177 break 17178 } 17179 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17180 break 17181 } 17182 b = mergePoint(b, x0, x1) 17183 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17184 v.reset(OpCopy) 17185 v.AddArg(v0) 17186 v0.AuxInt = i0 17187 v0.Aux = s 17188 v0.AddArg(p) 17189 v0.AddArg(idx) 17190 v0.AddArg(mem) 17191 return true 17192 } 17193 return false 17194 } 17195 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 17196 b := v.Block 17197 _ = b 17198 typ := &b.Func.Config.Types 17199 _ = typ 17200 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 17201 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17202 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17203 for { 17204 _ = v.Args[1] 17205 x0 := v.Args[0] 17206 if x0.Op != OpAMD64MOVBloadidx1 { 17207 break 17208 } 17209 i0 := x0.AuxInt 17210 s := x0.Aux 17211 _ = x0.Args[2] 17212 idx := x0.Args[0] 17213 p := x0.Args[1] 17214 mem := x0.Args[2] 17215 sh := v.Args[1] 17216 if sh.Op != OpAMD64SHLLconst { 17217 break 17218 } 17219 if sh.AuxInt != 8 { 17220 break 17221 } 17222 x1 := sh.Args[0] 17223 if x1.Op != OpAMD64MOVBloadidx1 { 17224 break 17225 } 17226 i1 := x1.AuxInt 17227 if x1.Aux != s { 17228 break 17229 } 17230 _ = x1.Args[2] 17231 if idx != x1.Args[0] { 17232 break 17233 } 17234 if p != x1.Args[1] { 17235 break 17236 } 17237 if mem != x1.Args[2] { 17238 break 17239 } 17240 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17241 break 17242 } 17243 b = mergePoint(b, x0, x1) 17244 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17245 v.reset(OpCopy) 17246 v.AddArg(v0) 17247 v0.AuxInt = i0 17248 v0.Aux = s 17249 v0.AddArg(p) 17250 v0.AddArg(idx) 17251 v0.AddArg(mem) 17252 return true 17253 } 17254 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 17255 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17256 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17257 for { 17258 _ = v.Args[1] 17259 sh := v.Args[0] 17260 if sh.Op != OpAMD64SHLLconst { 17261 break 17262 } 17263 if sh.AuxInt != 8 { 17264 break 17265 } 17266 x1 := sh.Args[0] 17267 if x1.Op != OpAMD64MOVBloadidx1 { 17268 break 17269 } 17270 i1 := x1.AuxInt 17271 s := x1.Aux 17272 _ = x1.Args[2] 17273 p := x1.Args[0] 17274 idx := x1.Args[1] 17275 mem := x1.Args[2] 17276 x0 := v.Args[1] 17277 if x0.Op != OpAMD64MOVBloadidx1 { 17278 break 17279 } 17280 i0 := x0.AuxInt 17281 if x0.Aux != s { 17282 break 17283 } 17284 _ = x0.Args[2] 17285 if p != x0.Args[0] { 17286 break 17287 } 17288 if idx != x0.Args[1] { 17289 break 17290 } 17291 if mem != x0.Args[2] { 17292 break 17293 } 17294 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17295 break 17296 } 17297 b = mergePoint(b, x0, x1) 17298 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17299 v.reset(OpCopy) 17300 v.AddArg(v0) 17301 v0.AuxInt = i0 17302 v0.Aux = s 17303 v0.AddArg(p) 17304 v0.AddArg(idx) 17305 v0.AddArg(mem) 17306 return true 17307 } 17308 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 17309 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17310 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17311 for { 17312 _ = v.Args[1] 17313 sh := v.Args[0] 17314 if sh.Op != OpAMD64SHLLconst { 17315 break 17316 } 17317 if sh.AuxInt != 8 { 17318 break 17319 } 17320 x1 := sh.Args[0] 17321 if x1.Op != OpAMD64MOVBloadidx1 { 17322 break 17323 } 17324 i1 := x1.AuxInt 17325 s := x1.Aux 17326 _ = x1.Args[2] 17327 idx := x1.Args[0] 17328 p := x1.Args[1] 17329 mem := x1.Args[2] 17330 x0 := v.Args[1] 17331 if x0.Op != OpAMD64MOVBloadidx1 { 17332 break 17333 } 17334 i0 := x0.AuxInt 17335 if x0.Aux != s { 17336 break 17337 } 17338 _ = x0.Args[2] 17339 if p != x0.Args[0] { 17340 break 17341 } 17342 if idx != x0.Args[1] { 17343 break 17344 } 17345 if mem != x0.Args[2] { 17346 break 17347 } 17348 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17349 break 17350 } 17351 b = mergePoint(b, x0, x1) 17352 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17353 v.reset(OpCopy) 17354 v.AddArg(v0) 17355 v0.AuxInt = i0 17356 v0.Aux = s 17357 v0.AddArg(p) 17358 v0.AddArg(idx) 17359 v0.AddArg(mem) 17360 return true 17361 } 17362 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 17363 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17364 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17365 for { 17366 _ = v.Args[1] 17367 sh := v.Args[0] 17368 if sh.Op != OpAMD64SHLLconst { 17369 break 17370 } 17371 if sh.AuxInt != 8 { 17372 break 17373 } 17374 x1 := sh.Args[0] 17375 if x1.Op != OpAMD64MOVBloadidx1 { 17376 break 17377 } 17378 i1 := x1.AuxInt 17379 s := x1.Aux 17380 _ = x1.Args[2] 17381 p := x1.Args[0] 17382 idx := x1.Args[1] 17383 mem := x1.Args[2] 17384 x0 := v.Args[1] 17385 if x0.Op != OpAMD64MOVBloadidx1 { 17386 break 17387 } 17388 i0 := x0.AuxInt 17389 if x0.Aux != s { 17390 break 17391 } 17392 _ = x0.Args[2] 17393 if idx != x0.Args[0] { 17394 break 17395 } 17396 if p != x0.Args[1] { 17397 break 17398 } 17399 if mem != x0.Args[2] { 17400 break 17401 } 17402 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17403 break 17404 } 17405 b = mergePoint(b, x0, x1) 17406 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17407 v.reset(OpCopy) 17408 v.AddArg(v0) 17409 v0.AuxInt = i0 17410 v0.Aux = s 17411 v0.AddArg(p) 17412 v0.AddArg(idx) 17413 v0.AddArg(mem) 17414 return true 17415 } 17416 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 17417 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17418 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17419 for { 17420 _ = v.Args[1] 17421 sh := v.Args[0] 17422 if sh.Op != OpAMD64SHLLconst { 17423 break 17424 } 17425 if sh.AuxInt != 8 { 17426 break 17427 } 17428 x1 := sh.Args[0] 17429 if x1.Op != OpAMD64MOVBloadidx1 { 17430 break 17431 } 17432 i1 := x1.AuxInt 17433 s := x1.Aux 17434 _ = x1.Args[2] 17435 idx := x1.Args[0] 17436 p := x1.Args[1] 17437 mem := x1.Args[2] 17438 x0 := v.Args[1] 17439 if x0.Op != OpAMD64MOVBloadidx1 { 17440 break 17441 } 17442 i0 := x0.AuxInt 17443 if x0.Aux != s { 17444 break 17445 } 17446 _ = x0.Args[2] 17447 if idx != x0.Args[0] { 17448 break 17449 } 17450 if p != x0.Args[1] { 17451 break 17452 } 17453 if mem != x0.Args[2] { 17454 break 17455 } 17456 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17457 break 17458 } 17459 b = mergePoint(b, x0, x1) 17460 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17461 v.reset(OpCopy) 17462 v.AddArg(v0) 17463 v0.AuxInt = i0 17464 v0.Aux = s 17465 v0.AddArg(p) 17466 v0.AddArg(idx) 17467 v0.AddArg(mem) 17468 return true 17469 } 17470 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 17471 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17472 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17473 for { 17474 _ = v.Args[1] 17475 x0 := v.Args[0] 17476 if x0.Op != OpAMD64MOVWloadidx1 { 17477 break 17478 } 17479 i0 := x0.AuxInt 17480 s := x0.Aux 17481 _ = x0.Args[2] 17482 p := x0.Args[0] 17483 idx := x0.Args[1] 17484 mem := x0.Args[2] 17485 sh := v.Args[1] 17486 if sh.Op != OpAMD64SHLLconst { 17487 break 17488 } 17489 if sh.AuxInt != 16 { 17490 break 17491 } 17492 x1 := sh.Args[0] 17493 if x1.Op != OpAMD64MOVWloadidx1 { 17494 break 17495 } 17496 i1 := x1.AuxInt 17497 if x1.Aux != s { 17498 break 17499 } 17500 _ = x1.Args[2] 17501 if p != x1.Args[0] { 17502 break 17503 } 17504 if idx != x1.Args[1] { 17505 break 17506 } 17507 if mem != x1.Args[2] { 17508 break 17509 } 17510 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17511 break 17512 } 17513 b = mergePoint(b, x0, x1) 17514 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17515 v.reset(OpCopy) 17516 v.AddArg(v0) 17517 v0.AuxInt = i0 17518 v0.Aux = s 17519 v0.AddArg(p) 17520 v0.AddArg(idx) 17521 v0.AddArg(mem) 17522 return true 17523 } 17524 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 17525 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17526 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17527 for { 17528 _ = v.Args[1] 17529 x0 := v.Args[0] 17530 if x0.Op != OpAMD64MOVWloadidx1 { 17531 break 17532 } 17533 i0 := x0.AuxInt 17534 s := x0.Aux 17535 _ = x0.Args[2] 17536 idx := x0.Args[0] 17537 p := x0.Args[1] 17538 mem := x0.Args[2] 17539 sh := v.Args[1] 17540 if sh.Op != OpAMD64SHLLconst { 17541 break 17542 } 17543 if sh.AuxInt != 16 { 17544 break 17545 } 17546 x1 := sh.Args[0] 17547 if x1.Op != OpAMD64MOVWloadidx1 { 17548 break 17549 } 17550 i1 := x1.AuxInt 17551 if x1.Aux != s { 17552 break 17553 } 17554 _ = x1.Args[2] 17555 if p != x1.Args[0] { 17556 break 17557 } 17558 if idx != x1.Args[1] { 17559 break 17560 } 17561 if mem != x1.Args[2] { 17562 break 17563 } 17564 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17565 break 17566 } 17567 b = mergePoint(b, x0, x1) 17568 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17569 v.reset(OpCopy) 17570 v.AddArg(v0) 17571 v0.AuxInt = i0 17572 v0.Aux = s 17573 v0.AddArg(p) 17574 v0.AddArg(idx) 17575 v0.AddArg(mem) 17576 return true 17577 } 17578 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 17579 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17580 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17581 for { 17582 _ = v.Args[1] 17583 x0 := v.Args[0] 17584 if x0.Op != OpAMD64MOVWloadidx1 { 17585 break 17586 } 17587 i0 := x0.AuxInt 17588 s := x0.Aux 17589 _ = x0.Args[2] 17590 p := x0.Args[0] 17591 idx := x0.Args[1] 17592 mem := x0.Args[2] 17593 sh := v.Args[1] 17594 if sh.Op != OpAMD64SHLLconst { 17595 break 17596 } 17597 if sh.AuxInt != 16 { 17598 break 17599 } 17600 x1 := sh.Args[0] 17601 if x1.Op != OpAMD64MOVWloadidx1 { 17602 break 17603 } 17604 i1 := x1.AuxInt 17605 if x1.Aux != s { 17606 break 17607 } 17608 _ = x1.Args[2] 17609 if idx != x1.Args[0] { 17610 break 17611 } 17612 if p != x1.Args[1] { 17613 break 17614 } 17615 if mem != x1.Args[2] { 17616 break 17617 } 17618 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17619 break 17620 } 17621 b = mergePoint(b, x0, x1) 17622 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17623 v.reset(OpCopy) 17624 v.AddArg(v0) 17625 v0.AuxInt = i0 17626 v0.Aux = s 17627 v0.AddArg(p) 17628 v0.AddArg(idx) 17629 v0.AddArg(mem) 17630 return true 17631 } 17632 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 17633 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17634 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17635 for { 17636 _ = v.Args[1] 17637 x0 := v.Args[0] 17638 if x0.Op != OpAMD64MOVWloadidx1 { 17639 break 17640 } 17641 i0 := x0.AuxInt 17642 s := x0.Aux 17643 _ = x0.Args[2] 17644 idx := x0.Args[0] 17645 p := x0.Args[1] 17646 mem := x0.Args[2] 17647 sh := v.Args[1] 17648 if sh.Op != OpAMD64SHLLconst { 17649 break 17650 } 17651 if sh.AuxInt != 16 { 17652 break 17653 } 17654 x1 := sh.Args[0] 17655 if x1.Op != OpAMD64MOVWloadidx1 { 17656 break 17657 } 17658 i1 := x1.AuxInt 17659 if x1.Aux != s { 17660 break 17661 } 17662 _ = x1.Args[2] 17663 if idx != x1.Args[0] { 17664 break 17665 } 17666 if p != x1.Args[1] { 17667 break 17668 } 17669 if mem != x1.Args[2] { 17670 break 17671 } 17672 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17673 break 17674 } 17675 b = mergePoint(b, x0, x1) 17676 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17677 v.reset(OpCopy) 17678 v.AddArg(v0) 17679 v0.AuxInt = i0 17680 v0.Aux = s 17681 v0.AddArg(p) 17682 v0.AddArg(idx) 17683 v0.AddArg(mem) 17684 return true 17685 } 17686 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 17687 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17688 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17689 for { 17690 _ = v.Args[1] 17691 sh := v.Args[0] 17692 if sh.Op != OpAMD64SHLLconst { 17693 break 17694 } 17695 if sh.AuxInt != 16 { 17696 break 17697 } 17698 x1 := sh.Args[0] 17699 if x1.Op != OpAMD64MOVWloadidx1 { 17700 break 17701 } 17702 i1 := x1.AuxInt 17703 s := x1.Aux 17704 _ = x1.Args[2] 17705 p := x1.Args[0] 17706 idx := x1.Args[1] 17707 mem := x1.Args[2] 17708 x0 := v.Args[1] 17709 if x0.Op != OpAMD64MOVWloadidx1 { 17710 break 17711 } 17712 i0 := x0.AuxInt 17713 if x0.Aux != s { 17714 break 17715 } 17716 _ = x0.Args[2] 17717 if p != x0.Args[0] { 17718 break 17719 } 17720 if idx != x0.Args[1] { 17721 break 17722 } 17723 if mem != x0.Args[2] { 17724 break 17725 } 17726 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17727 break 17728 } 17729 b = mergePoint(b, x0, x1) 17730 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17731 v.reset(OpCopy) 17732 v.AddArg(v0) 17733 v0.AuxInt = i0 17734 v0.Aux = s 17735 v0.AddArg(p) 17736 v0.AddArg(idx) 17737 v0.AddArg(mem) 17738 return true 17739 } 17740 return false 17741 } 17742 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 17743 b := v.Block 17744 _ = b 17745 typ := &b.Func.Config.Types 17746 _ = typ 17747 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 17748 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17749 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17750 for { 17751 _ = v.Args[1] 17752 sh := v.Args[0] 17753 if sh.Op != OpAMD64SHLLconst { 17754 break 17755 } 17756 if sh.AuxInt != 16 { 17757 break 17758 } 17759 x1 := sh.Args[0] 17760 if x1.Op != OpAMD64MOVWloadidx1 { 17761 break 17762 } 17763 i1 := x1.AuxInt 17764 s := x1.Aux 17765 _ = x1.Args[2] 17766 idx := x1.Args[0] 17767 p := x1.Args[1] 17768 mem := x1.Args[2] 17769 x0 := v.Args[1] 17770 if x0.Op != OpAMD64MOVWloadidx1 { 17771 break 17772 } 17773 i0 := x0.AuxInt 17774 if x0.Aux != s { 17775 break 17776 } 17777 _ = x0.Args[2] 17778 if p != x0.Args[0] { 17779 break 17780 } 17781 if idx != x0.Args[1] { 17782 break 17783 } 17784 if mem != x0.Args[2] { 17785 break 17786 } 17787 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17788 break 17789 } 17790 b = mergePoint(b, x0, x1) 17791 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17792 v.reset(OpCopy) 17793 v.AddArg(v0) 17794 v0.AuxInt = i0 17795 v0.Aux = s 17796 v0.AddArg(p) 17797 v0.AddArg(idx) 17798 v0.AddArg(mem) 17799 return true 17800 } 17801 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 17802 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17803 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17804 for { 17805 _ = v.Args[1] 17806 sh := v.Args[0] 17807 if sh.Op != OpAMD64SHLLconst { 17808 break 17809 } 17810 if sh.AuxInt != 16 { 17811 break 17812 } 17813 x1 := sh.Args[0] 17814 if x1.Op != OpAMD64MOVWloadidx1 { 17815 break 17816 } 17817 i1 := x1.AuxInt 17818 s := x1.Aux 17819 _ = x1.Args[2] 17820 p := x1.Args[0] 17821 idx := x1.Args[1] 17822 mem := x1.Args[2] 17823 x0 := v.Args[1] 17824 if x0.Op != OpAMD64MOVWloadidx1 { 17825 break 17826 } 17827 i0 := x0.AuxInt 17828 if x0.Aux != s { 17829 break 17830 } 17831 _ = x0.Args[2] 17832 if idx != x0.Args[0] { 17833 break 17834 } 17835 if p != x0.Args[1] { 17836 break 17837 } 17838 if mem != x0.Args[2] { 17839 break 17840 } 17841 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17842 break 17843 } 17844 b = mergePoint(b, x0, x1) 17845 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17846 v.reset(OpCopy) 17847 v.AddArg(v0) 17848 v0.AuxInt = i0 17849 v0.Aux = s 17850 v0.AddArg(p) 17851 v0.AddArg(idx) 17852 v0.AddArg(mem) 17853 return true 17854 } 17855 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 17856 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17857 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17858 for { 17859 _ = v.Args[1] 17860 sh := v.Args[0] 17861 if sh.Op != OpAMD64SHLLconst { 17862 break 17863 } 17864 if sh.AuxInt != 16 { 17865 break 17866 } 17867 x1 := sh.Args[0] 17868 if x1.Op != OpAMD64MOVWloadidx1 { 17869 break 17870 } 17871 i1 := x1.AuxInt 17872 s := x1.Aux 17873 _ = x1.Args[2] 17874 idx := x1.Args[0] 17875 p := x1.Args[1] 17876 mem := x1.Args[2] 17877 x0 := v.Args[1] 17878 if x0.Op != OpAMD64MOVWloadidx1 { 17879 break 17880 } 17881 i0 := x0.AuxInt 17882 if x0.Aux != s { 17883 break 17884 } 17885 _ = x0.Args[2] 17886 if idx != x0.Args[0] { 17887 break 17888 } 17889 if p != x0.Args[1] { 17890 break 17891 } 17892 if mem != x0.Args[2] { 17893 break 17894 } 17895 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17896 break 17897 } 17898 b = mergePoint(b, x0, x1) 17899 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17900 v.reset(OpCopy) 17901 v.AddArg(v0) 17902 v0.AuxInt = i0 17903 v0.Aux = s 17904 v0.AddArg(p) 17905 v0.AddArg(idx) 17906 v0.AddArg(mem) 17907 return true 17908 } 17909 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 17910 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17911 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17912 for { 17913 _ = v.Args[1] 17914 s1 := v.Args[0] 17915 if s1.Op != OpAMD64SHLLconst { 17916 break 17917 } 17918 j1 := s1.AuxInt 17919 x1 := s1.Args[0] 17920 if x1.Op != OpAMD64MOVBloadidx1 { 17921 break 17922 } 17923 i1 := x1.AuxInt 17924 s := x1.Aux 17925 _ = x1.Args[2] 17926 p := x1.Args[0] 17927 idx := x1.Args[1] 17928 mem := x1.Args[2] 17929 or := v.Args[1] 17930 if or.Op != OpAMD64ORL { 17931 break 17932 } 17933 _ = or.Args[1] 17934 s0 := or.Args[0] 17935 if s0.Op != OpAMD64SHLLconst { 17936 break 17937 } 17938 j0 := s0.AuxInt 17939 x0 := s0.Args[0] 17940 if x0.Op != OpAMD64MOVBloadidx1 { 17941 break 17942 } 17943 i0 := x0.AuxInt 17944 if x0.Aux != s { 17945 break 17946 } 17947 _ = x0.Args[2] 17948 if p != x0.Args[0] { 17949 break 17950 } 17951 if idx != x0.Args[1] { 17952 break 17953 } 17954 if mem != x0.Args[2] { 17955 break 17956 } 17957 y := or.Args[1] 17958 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17959 break 17960 } 17961 b = mergePoint(b, x0, x1) 17962 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17963 v.reset(OpCopy) 17964 v.AddArg(v0) 17965 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17966 v1.AuxInt = j0 17967 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 17968 v2.AuxInt = i0 17969 v2.Aux = s 17970 v2.AddArg(p) 17971 v2.AddArg(idx) 17972 v2.AddArg(mem) 17973 v1.AddArg(v2) 17974 v0.AddArg(v1) 17975 v0.AddArg(y) 17976 return true 17977 } 17978 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 17979 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17980 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17981 for { 17982 _ = v.Args[1] 17983 s1 := v.Args[0] 17984 if s1.Op != OpAMD64SHLLconst { 17985 break 17986 } 17987 j1 := s1.AuxInt 17988 x1 := s1.Args[0] 17989 if x1.Op != OpAMD64MOVBloadidx1 { 17990 break 17991 } 17992 i1 := x1.AuxInt 17993 s := x1.Aux 17994 _ = x1.Args[2] 17995 idx := x1.Args[0] 17996 p := x1.Args[1] 17997 mem := x1.Args[2] 17998 or := v.Args[1] 17999 if or.Op != OpAMD64ORL { 18000 break 18001 } 18002 _ = or.Args[1] 18003 s0 := or.Args[0] 18004 if s0.Op != OpAMD64SHLLconst { 18005 break 18006 } 18007 j0 := s0.AuxInt 18008 x0 := s0.Args[0] 18009 if x0.Op != OpAMD64MOVBloadidx1 { 18010 break 18011 } 18012 i0 := x0.AuxInt 18013 if x0.Aux != s { 18014 break 18015 } 18016 _ = x0.Args[2] 18017 if p != x0.Args[0] { 18018 break 18019 } 18020 if idx != x0.Args[1] { 18021 break 18022 } 18023 if mem != x0.Args[2] { 18024 break 18025 } 18026 y := or.Args[1] 18027 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18028 break 18029 } 18030 b = mergePoint(b, x0, x1) 18031 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18032 v.reset(OpCopy) 18033 v.AddArg(v0) 18034 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18035 v1.AuxInt = j0 18036 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18037 v2.AuxInt = i0 18038 v2.Aux = s 18039 v2.AddArg(p) 18040 v2.AddArg(idx) 18041 v2.AddArg(mem) 18042 v1.AddArg(v2) 18043 v0.AddArg(v1) 18044 v0.AddArg(y) 18045 return true 18046 } 18047 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 18048 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18049 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18050 for { 18051 _ = v.Args[1] 18052 s1 := v.Args[0] 18053 if s1.Op != OpAMD64SHLLconst { 18054 break 18055 } 18056 j1 := s1.AuxInt 18057 x1 := s1.Args[0] 18058 if x1.Op != OpAMD64MOVBloadidx1 { 18059 break 18060 } 18061 i1 := x1.AuxInt 18062 s := x1.Aux 18063 _ = x1.Args[2] 18064 p := x1.Args[0] 18065 idx := x1.Args[1] 18066 mem := x1.Args[2] 18067 or := v.Args[1] 18068 if or.Op != OpAMD64ORL { 18069 break 18070 } 18071 _ = or.Args[1] 18072 s0 := or.Args[0] 18073 if s0.Op != OpAMD64SHLLconst { 18074 break 18075 } 18076 j0 := s0.AuxInt 18077 x0 := s0.Args[0] 18078 if x0.Op != OpAMD64MOVBloadidx1 { 18079 break 18080 } 18081 i0 := x0.AuxInt 18082 if x0.Aux != s { 18083 break 18084 } 18085 _ = x0.Args[2] 18086 if idx != x0.Args[0] { 18087 break 18088 } 18089 if p != x0.Args[1] { 18090 break 18091 } 18092 if mem != x0.Args[2] { 18093 break 18094 } 18095 y := or.Args[1] 18096 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18097 break 18098 } 18099 b = mergePoint(b, x0, x1) 18100 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18101 v.reset(OpCopy) 18102 v.AddArg(v0) 18103 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18104 v1.AuxInt = j0 18105 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18106 v2.AuxInt = i0 18107 v2.Aux = s 18108 v2.AddArg(p) 18109 v2.AddArg(idx) 18110 v2.AddArg(mem) 18111 v1.AddArg(v2) 18112 v0.AddArg(v1) 18113 v0.AddArg(y) 18114 return true 18115 } 18116 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 18117 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18118 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18119 for { 18120 _ = v.Args[1] 18121 s1 := v.Args[0] 18122 if s1.Op != OpAMD64SHLLconst { 18123 break 18124 } 18125 j1 := s1.AuxInt 18126 x1 := s1.Args[0] 18127 if x1.Op != OpAMD64MOVBloadidx1 { 18128 break 18129 } 18130 i1 := x1.AuxInt 18131 s := x1.Aux 18132 _ = x1.Args[2] 18133 idx := x1.Args[0] 18134 p := x1.Args[1] 18135 mem := x1.Args[2] 18136 or := v.Args[1] 18137 if or.Op != OpAMD64ORL { 18138 break 18139 } 18140 _ = or.Args[1] 18141 s0 := or.Args[0] 18142 if s0.Op != OpAMD64SHLLconst { 18143 break 18144 } 18145 j0 := s0.AuxInt 18146 x0 := s0.Args[0] 18147 if x0.Op != OpAMD64MOVBloadidx1 { 18148 break 18149 } 18150 i0 := x0.AuxInt 18151 if x0.Aux != s { 18152 break 18153 } 18154 _ = x0.Args[2] 18155 if idx != x0.Args[0] { 18156 break 18157 } 18158 if p != x0.Args[1] { 18159 break 18160 } 18161 if mem != x0.Args[2] { 18162 break 18163 } 18164 y := or.Args[1] 18165 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18166 break 18167 } 18168 b = mergePoint(b, x0, x1) 18169 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18170 v.reset(OpCopy) 18171 v.AddArg(v0) 18172 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18173 v1.AuxInt = j0 18174 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18175 v2.AuxInt = i0 18176 v2.Aux = s 18177 v2.AddArg(p) 18178 v2.AddArg(idx) 18179 v2.AddArg(mem) 18180 v1.AddArg(v2) 18181 v0.AddArg(v1) 18182 v0.AddArg(y) 18183 return true 18184 } 18185 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 18186 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18187 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18188 for { 18189 _ = v.Args[1] 18190 s1 := v.Args[0] 18191 if s1.Op != OpAMD64SHLLconst { 18192 break 18193 } 18194 j1 := s1.AuxInt 18195 x1 := s1.Args[0] 18196 if x1.Op != OpAMD64MOVBloadidx1 { 18197 break 18198 } 18199 i1 := x1.AuxInt 18200 s := x1.Aux 18201 _ = x1.Args[2] 18202 p := x1.Args[0] 18203 idx := x1.Args[1] 18204 mem := x1.Args[2] 18205 or := v.Args[1] 18206 if or.Op != OpAMD64ORL { 18207 break 18208 } 18209 _ = or.Args[1] 18210 y := or.Args[0] 18211 s0 := or.Args[1] 18212 if s0.Op != OpAMD64SHLLconst { 18213 break 18214 } 18215 j0 := s0.AuxInt 18216 x0 := s0.Args[0] 18217 if x0.Op != OpAMD64MOVBloadidx1 { 18218 break 18219 } 18220 i0 := x0.AuxInt 18221 if x0.Aux != s { 18222 break 18223 } 18224 _ = x0.Args[2] 18225 if p != x0.Args[0] { 18226 break 18227 } 18228 if idx != x0.Args[1] { 18229 break 18230 } 18231 if mem != x0.Args[2] { 18232 break 18233 } 18234 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18235 break 18236 } 18237 b = mergePoint(b, x0, x1) 18238 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18239 v.reset(OpCopy) 18240 v.AddArg(v0) 18241 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18242 v1.AuxInt = j0 18243 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18244 v2.AuxInt = i0 18245 v2.Aux = s 18246 v2.AddArg(p) 18247 v2.AddArg(idx) 18248 v2.AddArg(mem) 18249 v1.AddArg(v2) 18250 v0.AddArg(v1) 18251 v0.AddArg(y) 18252 return true 18253 } 18254 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 18255 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18256 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18257 for { 18258 _ = v.Args[1] 18259 s1 := v.Args[0] 18260 if s1.Op != OpAMD64SHLLconst { 18261 break 18262 } 18263 j1 := s1.AuxInt 18264 x1 := s1.Args[0] 18265 if x1.Op != OpAMD64MOVBloadidx1 { 18266 break 18267 } 18268 i1 := x1.AuxInt 18269 s := x1.Aux 18270 _ = x1.Args[2] 18271 idx := x1.Args[0] 18272 p := x1.Args[1] 18273 mem := x1.Args[2] 18274 or := v.Args[1] 18275 if or.Op != OpAMD64ORL { 18276 break 18277 } 18278 _ = or.Args[1] 18279 y := or.Args[0] 18280 s0 := or.Args[1] 18281 if s0.Op != OpAMD64SHLLconst { 18282 break 18283 } 18284 j0 := s0.AuxInt 18285 x0 := s0.Args[0] 18286 if x0.Op != OpAMD64MOVBloadidx1 { 18287 break 18288 } 18289 i0 := x0.AuxInt 18290 if x0.Aux != s { 18291 break 18292 } 18293 _ = x0.Args[2] 18294 if p != x0.Args[0] { 18295 break 18296 } 18297 if idx != x0.Args[1] { 18298 break 18299 } 18300 if mem != x0.Args[2] { 18301 break 18302 } 18303 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18304 break 18305 } 18306 b = mergePoint(b, x0, x1) 18307 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18308 v.reset(OpCopy) 18309 v.AddArg(v0) 18310 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18311 v1.AuxInt = j0 18312 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18313 v2.AuxInt = i0 18314 v2.Aux = s 18315 v2.AddArg(p) 18316 v2.AddArg(idx) 18317 v2.AddArg(mem) 18318 v1.AddArg(v2) 18319 v0.AddArg(v1) 18320 v0.AddArg(y) 18321 return true 18322 } 18323 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 18324 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18325 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18326 for { 18327 _ = v.Args[1] 18328 s1 := v.Args[0] 18329 if s1.Op != OpAMD64SHLLconst { 18330 break 18331 } 18332 j1 := s1.AuxInt 18333 x1 := s1.Args[0] 18334 if x1.Op != OpAMD64MOVBloadidx1 { 18335 break 18336 } 18337 i1 := x1.AuxInt 18338 s := x1.Aux 18339 _ = x1.Args[2] 18340 p := x1.Args[0] 18341 idx := x1.Args[1] 18342 mem := x1.Args[2] 18343 or := v.Args[1] 18344 if or.Op != OpAMD64ORL { 18345 break 18346 } 18347 _ = or.Args[1] 18348 y := or.Args[0] 18349 s0 := or.Args[1] 18350 if s0.Op != OpAMD64SHLLconst { 18351 break 18352 } 18353 j0 := s0.AuxInt 18354 x0 := s0.Args[0] 18355 if x0.Op != OpAMD64MOVBloadidx1 { 18356 break 18357 } 18358 i0 := x0.AuxInt 18359 if x0.Aux != s { 18360 break 18361 } 18362 _ = x0.Args[2] 18363 if idx != x0.Args[0] { 18364 break 18365 } 18366 if p != x0.Args[1] { 18367 break 18368 } 18369 if mem != x0.Args[2] { 18370 break 18371 } 18372 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18373 break 18374 } 18375 b = mergePoint(b, x0, x1) 18376 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18377 v.reset(OpCopy) 18378 v.AddArg(v0) 18379 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18380 v1.AuxInt = j0 18381 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18382 v2.AuxInt = i0 18383 v2.Aux = s 18384 v2.AddArg(p) 18385 v2.AddArg(idx) 18386 v2.AddArg(mem) 18387 v1.AddArg(v2) 18388 v0.AddArg(v1) 18389 v0.AddArg(y) 18390 return true 18391 } 18392 return false 18393 } 18394 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 18395 b := v.Block 18396 _ = b 18397 typ := &b.Func.Config.Types 18398 _ = typ 18399 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 18400 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18401 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18402 for { 18403 _ = v.Args[1] 18404 s1 := v.Args[0] 18405 if s1.Op != OpAMD64SHLLconst { 18406 break 18407 } 18408 j1 := s1.AuxInt 18409 x1 := s1.Args[0] 18410 if x1.Op != OpAMD64MOVBloadidx1 { 18411 break 18412 } 18413 i1 := x1.AuxInt 18414 s := x1.Aux 18415 _ = x1.Args[2] 18416 idx := x1.Args[0] 18417 p := x1.Args[1] 18418 mem := x1.Args[2] 18419 or := v.Args[1] 18420 if or.Op != OpAMD64ORL { 18421 break 18422 } 18423 _ = or.Args[1] 18424 y := or.Args[0] 18425 s0 := or.Args[1] 18426 if s0.Op != OpAMD64SHLLconst { 18427 break 18428 } 18429 j0 := s0.AuxInt 18430 x0 := s0.Args[0] 18431 if x0.Op != OpAMD64MOVBloadidx1 { 18432 break 18433 } 18434 i0 := x0.AuxInt 18435 if x0.Aux != s { 18436 break 18437 } 18438 _ = x0.Args[2] 18439 if idx != x0.Args[0] { 18440 break 18441 } 18442 if p != x0.Args[1] { 18443 break 18444 } 18445 if mem != x0.Args[2] { 18446 break 18447 } 18448 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18449 break 18450 } 18451 b = mergePoint(b, x0, x1) 18452 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18453 v.reset(OpCopy) 18454 v.AddArg(v0) 18455 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18456 v1.AuxInt = j0 18457 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18458 v2.AuxInt = i0 18459 v2.Aux = s 18460 v2.AddArg(p) 18461 v2.AddArg(idx) 18462 v2.AddArg(mem) 18463 v1.AddArg(v2) 18464 v0.AddArg(v1) 18465 v0.AddArg(y) 18466 return true 18467 } 18468 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18469 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18470 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18471 for { 18472 _ = v.Args[1] 18473 or := v.Args[0] 18474 if or.Op != OpAMD64ORL { 18475 break 18476 } 18477 _ = or.Args[1] 18478 s0 := or.Args[0] 18479 if s0.Op != OpAMD64SHLLconst { 18480 break 18481 } 18482 j0 := s0.AuxInt 18483 x0 := s0.Args[0] 18484 if x0.Op != OpAMD64MOVBloadidx1 { 18485 break 18486 } 18487 i0 := x0.AuxInt 18488 s := x0.Aux 18489 _ = x0.Args[2] 18490 p := x0.Args[0] 18491 idx := x0.Args[1] 18492 mem := x0.Args[2] 18493 y := or.Args[1] 18494 s1 := v.Args[1] 18495 if s1.Op != OpAMD64SHLLconst { 18496 break 18497 } 18498 j1 := s1.AuxInt 18499 x1 := s1.Args[0] 18500 if x1.Op != OpAMD64MOVBloadidx1 { 18501 break 18502 } 18503 i1 := x1.AuxInt 18504 if x1.Aux != s { 18505 break 18506 } 18507 _ = x1.Args[2] 18508 if p != x1.Args[0] { 18509 break 18510 } 18511 if idx != x1.Args[1] { 18512 break 18513 } 18514 if mem != x1.Args[2] { 18515 break 18516 } 18517 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18518 break 18519 } 18520 b = mergePoint(b, x0, x1) 18521 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18522 v.reset(OpCopy) 18523 v.AddArg(v0) 18524 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18525 v1.AuxInt = j0 18526 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18527 v2.AuxInt = i0 18528 v2.Aux = s 18529 v2.AddArg(p) 18530 v2.AddArg(idx) 18531 v2.AddArg(mem) 18532 v1.AddArg(v2) 18533 v0.AddArg(v1) 18534 v0.AddArg(y) 18535 return true 18536 } 18537 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18538 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18539 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18540 for { 18541 _ = v.Args[1] 18542 or := v.Args[0] 18543 if or.Op != OpAMD64ORL { 18544 break 18545 } 18546 _ = or.Args[1] 18547 s0 := or.Args[0] 18548 if s0.Op != OpAMD64SHLLconst { 18549 break 18550 } 18551 j0 := s0.AuxInt 18552 x0 := s0.Args[0] 18553 if x0.Op != OpAMD64MOVBloadidx1 { 18554 break 18555 } 18556 i0 := x0.AuxInt 18557 s := x0.Aux 18558 _ = x0.Args[2] 18559 idx := x0.Args[0] 18560 p := x0.Args[1] 18561 mem := x0.Args[2] 18562 y := or.Args[1] 18563 s1 := v.Args[1] 18564 if s1.Op != OpAMD64SHLLconst { 18565 break 18566 } 18567 j1 := s1.AuxInt 18568 x1 := s1.Args[0] 18569 if x1.Op != OpAMD64MOVBloadidx1 { 18570 break 18571 } 18572 i1 := x1.AuxInt 18573 if x1.Aux != s { 18574 break 18575 } 18576 _ = x1.Args[2] 18577 if p != x1.Args[0] { 18578 break 18579 } 18580 if idx != x1.Args[1] { 18581 break 18582 } 18583 if mem != x1.Args[2] { 18584 break 18585 } 18586 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18587 break 18588 } 18589 b = mergePoint(b, x0, x1) 18590 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18591 v.reset(OpCopy) 18592 v.AddArg(v0) 18593 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18594 v1.AuxInt = j0 18595 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18596 v2.AuxInt = i0 18597 v2.Aux = s 18598 v2.AddArg(p) 18599 v2.AddArg(idx) 18600 v2.AddArg(mem) 18601 v1.AddArg(v2) 18602 v0.AddArg(v1) 18603 v0.AddArg(y) 18604 return true 18605 } 18606 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18607 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18608 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18609 for { 18610 _ = v.Args[1] 18611 or := v.Args[0] 18612 if or.Op != OpAMD64ORL { 18613 break 18614 } 18615 _ = or.Args[1] 18616 y := or.Args[0] 18617 s0 := or.Args[1] 18618 if s0.Op != OpAMD64SHLLconst { 18619 break 18620 } 18621 j0 := s0.AuxInt 18622 x0 := s0.Args[0] 18623 if x0.Op != OpAMD64MOVBloadidx1 { 18624 break 18625 } 18626 i0 := x0.AuxInt 18627 s := x0.Aux 18628 _ = x0.Args[2] 18629 p := x0.Args[0] 18630 idx := x0.Args[1] 18631 mem := x0.Args[2] 18632 s1 := v.Args[1] 18633 if s1.Op != OpAMD64SHLLconst { 18634 break 18635 } 18636 j1 := s1.AuxInt 18637 x1 := s1.Args[0] 18638 if x1.Op != OpAMD64MOVBloadidx1 { 18639 break 18640 } 18641 i1 := x1.AuxInt 18642 if x1.Aux != s { 18643 break 18644 } 18645 _ = x1.Args[2] 18646 if p != x1.Args[0] { 18647 break 18648 } 18649 if idx != x1.Args[1] { 18650 break 18651 } 18652 if mem != x1.Args[2] { 18653 break 18654 } 18655 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18656 break 18657 } 18658 b = mergePoint(b, x0, x1) 18659 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18660 v.reset(OpCopy) 18661 v.AddArg(v0) 18662 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18663 v1.AuxInt = j0 18664 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18665 v2.AuxInt = i0 18666 v2.Aux = s 18667 v2.AddArg(p) 18668 v2.AddArg(idx) 18669 v2.AddArg(mem) 18670 v1.AddArg(v2) 18671 v0.AddArg(v1) 18672 v0.AddArg(y) 18673 return true 18674 } 18675 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18676 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18677 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18678 for { 18679 _ = v.Args[1] 18680 or := v.Args[0] 18681 if or.Op != OpAMD64ORL { 18682 break 18683 } 18684 _ = or.Args[1] 18685 y := or.Args[0] 18686 s0 := or.Args[1] 18687 if s0.Op != OpAMD64SHLLconst { 18688 break 18689 } 18690 j0 := s0.AuxInt 18691 x0 := s0.Args[0] 18692 if x0.Op != OpAMD64MOVBloadidx1 { 18693 break 18694 } 18695 i0 := x0.AuxInt 18696 s := x0.Aux 18697 _ = x0.Args[2] 18698 idx := x0.Args[0] 18699 p := x0.Args[1] 18700 mem := x0.Args[2] 18701 s1 := v.Args[1] 18702 if s1.Op != OpAMD64SHLLconst { 18703 break 18704 } 18705 j1 := s1.AuxInt 18706 x1 := s1.Args[0] 18707 if x1.Op != OpAMD64MOVBloadidx1 { 18708 break 18709 } 18710 i1 := x1.AuxInt 18711 if x1.Aux != s { 18712 break 18713 } 18714 _ = x1.Args[2] 18715 if p != x1.Args[0] { 18716 break 18717 } 18718 if idx != x1.Args[1] { 18719 break 18720 } 18721 if mem != x1.Args[2] { 18722 break 18723 } 18724 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18725 break 18726 } 18727 b = mergePoint(b, x0, x1) 18728 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18729 v.reset(OpCopy) 18730 v.AddArg(v0) 18731 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18732 v1.AuxInt = j0 18733 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18734 v2.AuxInt = i0 18735 v2.Aux = s 18736 v2.AddArg(p) 18737 v2.AddArg(idx) 18738 v2.AddArg(mem) 18739 v1.AddArg(v2) 18740 v0.AddArg(v1) 18741 v0.AddArg(y) 18742 return true 18743 } 18744 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18745 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18746 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18747 for { 18748 _ = v.Args[1] 18749 or := v.Args[0] 18750 if or.Op != OpAMD64ORL { 18751 break 18752 } 18753 _ = or.Args[1] 18754 s0 := or.Args[0] 18755 if s0.Op != OpAMD64SHLLconst { 18756 break 18757 } 18758 j0 := s0.AuxInt 18759 x0 := s0.Args[0] 18760 if x0.Op != OpAMD64MOVBloadidx1 { 18761 break 18762 } 18763 i0 := x0.AuxInt 18764 s := x0.Aux 18765 _ = x0.Args[2] 18766 p := x0.Args[0] 18767 idx := x0.Args[1] 18768 mem := x0.Args[2] 18769 y := or.Args[1] 18770 s1 := v.Args[1] 18771 if s1.Op != OpAMD64SHLLconst { 18772 break 18773 } 18774 j1 := s1.AuxInt 18775 x1 := s1.Args[0] 18776 if x1.Op != OpAMD64MOVBloadidx1 { 18777 break 18778 } 18779 i1 := x1.AuxInt 18780 if x1.Aux != s { 18781 break 18782 } 18783 _ = x1.Args[2] 18784 if idx != x1.Args[0] { 18785 break 18786 } 18787 if p != x1.Args[1] { 18788 break 18789 } 18790 if mem != x1.Args[2] { 18791 break 18792 } 18793 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18794 break 18795 } 18796 b = mergePoint(b, x0, x1) 18797 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18798 v.reset(OpCopy) 18799 v.AddArg(v0) 18800 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18801 v1.AuxInt = j0 18802 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18803 v2.AuxInt = i0 18804 v2.Aux = s 18805 v2.AddArg(p) 18806 v2.AddArg(idx) 18807 v2.AddArg(mem) 18808 v1.AddArg(v2) 18809 v0.AddArg(v1) 18810 v0.AddArg(y) 18811 return true 18812 } 18813 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18814 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18815 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18816 for { 18817 _ = v.Args[1] 18818 or := v.Args[0] 18819 if or.Op != OpAMD64ORL { 18820 break 18821 } 18822 _ = or.Args[1] 18823 s0 := or.Args[0] 18824 if s0.Op != OpAMD64SHLLconst { 18825 break 18826 } 18827 j0 := s0.AuxInt 18828 x0 := s0.Args[0] 18829 if x0.Op != OpAMD64MOVBloadidx1 { 18830 break 18831 } 18832 i0 := x0.AuxInt 18833 s := x0.Aux 18834 _ = x0.Args[2] 18835 idx := x0.Args[0] 18836 p := x0.Args[1] 18837 mem := x0.Args[2] 18838 y := or.Args[1] 18839 s1 := v.Args[1] 18840 if s1.Op != OpAMD64SHLLconst { 18841 break 18842 } 18843 j1 := s1.AuxInt 18844 x1 := s1.Args[0] 18845 if x1.Op != OpAMD64MOVBloadidx1 { 18846 break 18847 } 18848 i1 := x1.AuxInt 18849 if x1.Aux != s { 18850 break 18851 } 18852 _ = x1.Args[2] 18853 if idx != x1.Args[0] { 18854 break 18855 } 18856 if p != x1.Args[1] { 18857 break 18858 } 18859 if mem != x1.Args[2] { 18860 break 18861 } 18862 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18863 break 18864 } 18865 b = mergePoint(b, x0, x1) 18866 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18867 v.reset(OpCopy) 18868 v.AddArg(v0) 18869 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18870 v1.AuxInt = j0 18871 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18872 v2.AuxInt = i0 18873 v2.Aux = s 18874 v2.AddArg(p) 18875 v2.AddArg(idx) 18876 v2.AddArg(mem) 18877 v1.AddArg(v2) 18878 v0.AddArg(v1) 18879 v0.AddArg(y) 18880 return true 18881 } 18882 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18883 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18884 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18885 for { 18886 _ = v.Args[1] 18887 or := v.Args[0] 18888 if or.Op != OpAMD64ORL { 18889 break 18890 } 18891 _ = or.Args[1] 18892 y := or.Args[0] 18893 s0 := or.Args[1] 18894 if s0.Op != OpAMD64SHLLconst { 18895 break 18896 } 18897 j0 := s0.AuxInt 18898 x0 := s0.Args[0] 18899 if x0.Op != OpAMD64MOVBloadidx1 { 18900 break 18901 } 18902 i0 := x0.AuxInt 18903 s := x0.Aux 18904 _ = x0.Args[2] 18905 p := x0.Args[0] 18906 idx := x0.Args[1] 18907 mem := x0.Args[2] 18908 s1 := v.Args[1] 18909 if s1.Op != OpAMD64SHLLconst { 18910 break 18911 } 18912 j1 := s1.AuxInt 18913 x1 := s1.Args[0] 18914 if x1.Op != OpAMD64MOVBloadidx1 { 18915 break 18916 } 18917 i1 := x1.AuxInt 18918 if x1.Aux != s { 18919 break 18920 } 18921 _ = x1.Args[2] 18922 if idx != x1.Args[0] { 18923 break 18924 } 18925 if p != x1.Args[1] { 18926 break 18927 } 18928 if mem != x1.Args[2] { 18929 break 18930 } 18931 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18932 break 18933 } 18934 b = mergePoint(b, x0, x1) 18935 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18936 v.reset(OpCopy) 18937 v.AddArg(v0) 18938 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18939 v1.AuxInt = j0 18940 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18941 v2.AuxInt = i0 18942 v2.Aux = s 18943 v2.AddArg(p) 18944 v2.AddArg(idx) 18945 v2.AddArg(mem) 18946 v1.AddArg(v2) 18947 v0.AddArg(v1) 18948 v0.AddArg(y) 18949 return true 18950 } 18951 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18952 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18953 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18954 for { 18955 _ = v.Args[1] 18956 or := v.Args[0] 18957 if or.Op != OpAMD64ORL { 18958 break 18959 } 18960 _ = or.Args[1] 18961 y := or.Args[0] 18962 s0 := or.Args[1] 18963 if s0.Op != OpAMD64SHLLconst { 18964 break 18965 } 18966 j0 := s0.AuxInt 18967 x0 := s0.Args[0] 18968 if x0.Op != OpAMD64MOVBloadidx1 { 18969 break 18970 } 18971 i0 := x0.AuxInt 18972 s := x0.Aux 18973 _ = x0.Args[2] 18974 idx := x0.Args[0] 18975 p := x0.Args[1] 18976 mem := x0.Args[2] 18977 s1 := v.Args[1] 18978 if s1.Op != OpAMD64SHLLconst { 18979 break 18980 } 18981 j1 := s1.AuxInt 18982 x1 := s1.Args[0] 18983 if x1.Op != OpAMD64MOVBloadidx1 { 18984 break 18985 } 18986 i1 := x1.AuxInt 18987 if x1.Aux != s { 18988 break 18989 } 18990 _ = x1.Args[2] 18991 if idx != x1.Args[0] { 18992 break 18993 } 18994 if p != x1.Args[1] { 18995 break 18996 } 18997 if mem != x1.Args[2] { 18998 break 18999 } 19000 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19001 break 19002 } 19003 b = mergePoint(b, x0, x1) 19004 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19005 v.reset(OpCopy) 19006 v.AddArg(v0) 19007 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19008 v1.AuxInt = j0 19009 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19010 v2.AuxInt = i0 19011 v2.Aux = s 19012 v2.AddArg(p) 19013 v2.AddArg(idx) 19014 v2.AddArg(mem) 19015 v1.AddArg(v2) 19016 v0.AddArg(v1) 19017 v0.AddArg(y) 19018 return true 19019 } 19020 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 19021 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19022 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 19023 for { 19024 _ = v.Args[1] 19025 x1 := v.Args[0] 19026 if x1.Op != OpAMD64MOVBload { 19027 break 19028 } 19029 i1 := x1.AuxInt 19030 s := x1.Aux 19031 _ = x1.Args[1] 19032 p := x1.Args[0] 19033 mem := x1.Args[1] 19034 sh := v.Args[1] 19035 if sh.Op != OpAMD64SHLLconst { 19036 break 19037 } 19038 if sh.AuxInt != 8 { 19039 break 19040 } 19041 x0 := sh.Args[0] 19042 if x0.Op != OpAMD64MOVBload { 19043 break 19044 } 19045 i0 := x0.AuxInt 19046 if x0.Aux != s { 19047 break 19048 } 19049 _ = x0.Args[1] 19050 if p != x0.Args[0] { 19051 break 19052 } 19053 if mem != x0.Args[1] { 19054 break 19055 } 19056 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19057 break 19058 } 19059 b = mergePoint(b, x0, x1) 19060 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19061 v.reset(OpCopy) 19062 v.AddArg(v0) 19063 v0.AuxInt = 8 19064 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19065 v1.AuxInt = i0 19066 v1.Aux = s 19067 v1.AddArg(p) 19068 v1.AddArg(mem) 19069 v0.AddArg(v1) 19070 return true 19071 } 19072 return false 19073 } 19074 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 19075 b := v.Block 19076 _ = b 19077 typ := &b.Func.Config.Types 19078 _ = typ 19079 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 19080 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19081 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 19082 for { 19083 _ = v.Args[1] 19084 sh := v.Args[0] 19085 if sh.Op != OpAMD64SHLLconst { 19086 break 19087 } 19088 if sh.AuxInt != 8 { 19089 break 19090 } 19091 x0 := sh.Args[0] 19092 if x0.Op != OpAMD64MOVBload { 19093 break 19094 } 19095 i0 := x0.AuxInt 19096 s := x0.Aux 19097 _ = x0.Args[1] 19098 p := x0.Args[0] 19099 mem := x0.Args[1] 19100 x1 := v.Args[1] 19101 if x1.Op != OpAMD64MOVBload { 19102 break 19103 } 19104 i1 := x1.AuxInt 19105 if x1.Aux != s { 19106 break 19107 } 19108 _ = x1.Args[1] 19109 if p != x1.Args[0] { 19110 break 19111 } 19112 if mem != x1.Args[1] { 19113 break 19114 } 19115 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19116 break 19117 } 19118 b = mergePoint(b, x0, x1) 19119 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19120 v.reset(OpCopy) 19121 v.AddArg(v0) 19122 v0.AuxInt = 8 19123 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19124 v1.AuxInt = i0 19125 v1.Aux = s 19126 v1.AddArg(p) 19127 v1.AddArg(mem) 19128 v0.AddArg(v1) 19129 return true 19130 } 19131 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 19132 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19133 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 19134 for { 19135 _ = v.Args[1] 19136 r1 := v.Args[0] 19137 if r1.Op != OpAMD64ROLWconst { 19138 break 19139 } 19140 if r1.AuxInt != 8 { 19141 break 19142 } 19143 x1 := r1.Args[0] 19144 if x1.Op != OpAMD64MOVWload { 19145 break 19146 } 19147 i1 := x1.AuxInt 19148 s := x1.Aux 19149 _ = x1.Args[1] 19150 p := x1.Args[0] 19151 mem := x1.Args[1] 19152 sh := v.Args[1] 19153 if sh.Op != OpAMD64SHLLconst { 19154 break 19155 } 19156 if sh.AuxInt != 16 { 19157 break 19158 } 19159 r0 := sh.Args[0] 19160 if r0.Op != OpAMD64ROLWconst { 19161 break 19162 } 19163 if r0.AuxInt != 8 { 19164 break 19165 } 19166 x0 := r0.Args[0] 19167 if x0.Op != OpAMD64MOVWload { 19168 break 19169 } 19170 i0 := x0.AuxInt 19171 if x0.Aux != s { 19172 break 19173 } 19174 _ = x0.Args[1] 19175 if p != x0.Args[0] { 19176 break 19177 } 19178 if mem != x0.Args[1] { 19179 break 19180 } 19181 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19182 break 19183 } 19184 b = mergePoint(b, x0, x1) 19185 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19186 v.reset(OpCopy) 19187 v.AddArg(v0) 19188 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 19189 v1.AuxInt = i0 19190 v1.Aux = s 19191 v1.AddArg(p) 19192 v1.AddArg(mem) 19193 v0.AddArg(v1) 19194 return true 19195 } 19196 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 19197 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19198 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 19199 for { 19200 _ = v.Args[1] 19201 sh := v.Args[0] 19202 if sh.Op != OpAMD64SHLLconst { 19203 break 19204 } 19205 if sh.AuxInt != 16 { 19206 break 19207 } 19208 r0 := sh.Args[0] 19209 if r0.Op != OpAMD64ROLWconst { 19210 break 19211 } 19212 if r0.AuxInt != 8 { 19213 break 19214 } 19215 x0 := r0.Args[0] 19216 if x0.Op != OpAMD64MOVWload { 19217 break 19218 } 19219 i0 := x0.AuxInt 19220 s := x0.Aux 19221 _ = x0.Args[1] 19222 p := x0.Args[0] 19223 mem := x0.Args[1] 19224 r1 := v.Args[1] 19225 if r1.Op != OpAMD64ROLWconst { 19226 break 19227 } 19228 if r1.AuxInt != 8 { 19229 break 19230 } 19231 x1 := r1.Args[0] 19232 if x1.Op != OpAMD64MOVWload { 19233 break 19234 } 19235 i1 := x1.AuxInt 19236 if x1.Aux != s { 19237 break 19238 } 19239 _ = x1.Args[1] 19240 if p != x1.Args[0] { 19241 break 19242 } 19243 if mem != x1.Args[1] { 19244 break 19245 } 19246 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19247 break 19248 } 19249 b = mergePoint(b, x0, x1) 19250 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19251 v.reset(OpCopy) 19252 v.AddArg(v0) 19253 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 19254 v1.AuxInt = i0 19255 v1.Aux = s 19256 v1.AddArg(p) 19257 v1.AddArg(mem) 19258 v0.AddArg(v1) 19259 return true 19260 } 19261 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 19262 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19263 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19264 for { 19265 _ = v.Args[1] 19266 s0 := v.Args[0] 19267 if s0.Op != OpAMD64SHLLconst { 19268 break 19269 } 19270 j0 := s0.AuxInt 19271 x0 := s0.Args[0] 19272 if x0.Op != OpAMD64MOVBload { 19273 break 19274 } 19275 i0 := x0.AuxInt 19276 s := x0.Aux 19277 _ = x0.Args[1] 19278 p := x0.Args[0] 19279 mem := x0.Args[1] 19280 or := v.Args[1] 19281 if or.Op != OpAMD64ORL { 19282 break 19283 } 19284 _ = or.Args[1] 19285 s1 := or.Args[0] 19286 if s1.Op != OpAMD64SHLLconst { 19287 break 19288 } 19289 j1 := s1.AuxInt 19290 x1 := s1.Args[0] 19291 if x1.Op != OpAMD64MOVBload { 19292 break 19293 } 19294 i1 := x1.AuxInt 19295 if x1.Aux != s { 19296 break 19297 } 19298 _ = x1.Args[1] 19299 if p != x1.Args[0] { 19300 break 19301 } 19302 if mem != x1.Args[1] { 19303 break 19304 } 19305 y := or.Args[1] 19306 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19307 break 19308 } 19309 b = mergePoint(b, x0, x1) 19310 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19311 v.reset(OpCopy) 19312 v.AddArg(v0) 19313 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19314 v1.AuxInt = j1 19315 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19316 v2.AuxInt = 8 19317 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19318 v3.AuxInt = i0 19319 v3.Aux = s 19320 v3.AddArg(p) 19321 v3.AddArg(mem) 19322 v2.AddArg(v3) 19323 v1.AddArg(v2) 19324 v0.AddArg(v1) 19325 v0.AddArg(y) 19326 return true 19327 } 19328 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 19329 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19330 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19331 for { 19332 _ = v.Args[1] 19333 s0 := v.Args[0] 19334 if s0.Op != OpAMD64SHLLconst { 19335 break 19336 } 19337 j0 := s0.AuxInt 19338 x0 := s0.Args[0] 19339 if x0.Op != OpAMD64MOVBload { 19340 break 19341 } 19342 i0 := x0.AuxInt 19343 s := x0.Aux 19344 _ = x0.Args[1] 19345 p := x0.Args[0] 19346 mem := x0.Args[1] 19347 or := v.Args[1] 19348 if or.Op != OpAMD64ORL { 19349 break 19350 } 19351 _ = or.Args[1] 19352 y := or.Args[0] 19353 s1 := or.Args[1] 19354 if s1.Op != OpAMD64SHLLconst { 19355 break 19356 } 19357 j1 := s1.AuxInt 19358 x1 := s1.Args[0] 19359 if x1.Op != OpAMD64MOVBload { 19360 break 19361 } 19362 i1 := x1.AuxInt 19363 if x1.Aux != s { 19364 break 19365 } 19366 _ = x1.Args[1] 19367 if p != x1.Args[0] { 19368 break 19369 } 19370 if mem != x1.Args[1] { 19371 break 19372 } 19373 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19374 break 19375 } 19376 b = mergePoint(b, x0, x1) 19377 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19378 v.reset(OpCopy) 19379 v.AddArg(v0) 19380 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19381 v1.AuxInt = j1 19382 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19383 v2.AuxInt = 8 19384 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19385 v3.AuxInt = i0 19386 v3.Aux = s 19387 v3.AddArg(p) 19388 v3.AddArg(mem) 19389 v2.AddArg(v3) 19390 v1.AddArg(v2) 19391 v0.AddArg(v1) 19392 v0.AddArg(y) 19393 return true 19394 } 19395 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 19396 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19397 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19398 for { 19399 _ = v.Args[1] 19400 or := v.Args[0] 19401 if or.Op != OpAMD64ORL { 19402 break 19403 } 19404 _ = or.Args[1] 19405 s1 := or.Args[0] 19406 if s1.Op != OpAMD64SHLLconst { 19407 break 19408 } 19409 j1 := s1.AuxInt 19410 x1 := s1.Args[0] 19411 if x1.Op != OpAMD64MOVBload { 19412 break 19413 } 19414 i1 := x1.AuxInt 19415 s := x1.Aux 19416 _ = x1.Args[1] 19417 p := x1.Args[0] 19418 mem := x1.Args[1] 19419 y := or.Args[1] 19420 s0 := v.Args[1] 19421 if s0.Op != OpAMD64SHLLconst { 19422 break 19423 } 19424 j0 := s0.AuxInt 19425 x0 := s0.Args[0] 19426 if x0.Op != OpAMD64MOVBload { 19427 break 19428 } 19429 i0 := x0.AuxInt 19430 if x0.Aux != s { 19431 break 19432 } 19433 _ = x0.Args[1] 19434 if p != x0.Args[0] { 19435 break 19436 } 19437 if mem != x0.Args[1] { 19438 break 19439 } 19440 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19441 break 19442 } 19443 b = mergePoint(b, x0, x1) 19444 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19445 v.reset(OpCopy) 19446 v.AddArg(v0) 19447 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19448 v1.AuxInt = j1 19449 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19450 v2.AuxInt = 8 19451 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19452 v3.AuxInt = i0 19453 v3.Aux = s 19454 v3.AddArg(p) 19455 v3.AddArg(mem) 19456 v2.AddArg(v3) 19457 v1.AddArg(v2) 19458 v0.AddArg(v1) 19459 v0.AddArg(y) 19460 return true 19461 } 19462 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 19463 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19464 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19465 for { 19466 _ = v.Args[1] 19467 or := v.Args[0] 19468 if or.Op != OpAMD64ORL { 19469 break 19470 } 19471 _ = or.Args[1] 19472 y := or.Args[0] 19473 s1 := or.Args[1] 19474 if s1.Op != OpAMD64SHLLconst { 19475 break 19476 } 19477 j1 := s1.AuxInt 19478 x1 := s1.Args[0] 19479 if x1.Op != OpAMD64MOVBload { 19480 break 19481 } 19482 i1 := x1.AuxInt 19483 s := x1.Aux 19484 _ = x1.Args[1] 19485 p := x1.Args[0] 19486 mem := x1.Args[1] 19487 s0 := v.Args[1] 19488 if s0.Op != OpAMD64SHLLconst { 19489 break 19490 } 19491 j0 := s0.AuxInt 19492 x0 := s0.Args[0] 19493 if x0.Op != OpAMD64MOVBload { 19494 break 19495 } 19496 i0 := x0.AuxInt 19497 if x0.Aux != s { 19498 break 19499 } 19500 _ = x0.Args[1] 19501 if p != x0.Args[0] { 19502 break 19503 } 19504 if mem != x0.Args[1] { 19505 break 19506 } 19507 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19508 break 19509 } 19510 b = mergePoint(b, x0, x1) 19511 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19512 v.reset(OpCopy) 19513 v.AddArg(v0) 19514 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19515 v1.AuxInt = j1 19516 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19517 v2.AuxInt = 8 19518 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19519 v3.AuxInt = i0 19520 v3.Aux = s 19521 v3.AddArg(p) 19522 v3.AddArg(mem) 19523 v2.AddArg(v3) 19524 v1.AddArg(v2) 19525 v0.AddArg(v1) 19526 v0.AddArg(y) 19527 return true 19528 } 19529 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 19530 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19531 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19532 for { 19533 _ = v.Args[1] 19534 x1 := v.Args[0] 19535 if x1.Op != OpAMD64MOVBloadidx1 { 19536 break 19537 } 19538 i1 := x1.AuxInt 19539 s := x1.Aux 19540 _ = x1.Args[2] 19541 p := x1.Args[0] 19542 idx := x1.Args[1] 19543 mem := x1.Args[2] 19544 sh := v.Args[1] 19545 if sh.Op != OpAMD64SHLLconst { 19546 break 19547 } 19548 if sh.AuxInt != 8 { 19549 break 19550 } 19551 x0 := sh.Args[0] 19552 if x0.Op != OpAMD64MOVBloadidx1 { 19553 break 19554 } 19555 i0 := x0.AuxInt 19556 if x0.Aux != s { 19557 break 19558 } 19559 _ = x0.Args[2] 19560 if p != x0.Args[0] { 19561 break 19562 } 19563 if idx != x0.Args[1] { 19564 break 19565 } 19566 if mem != x0.Args[2] { 19567 break 19568 } 19569 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19570 break 19571 } 19572 b = mergePoint(b, x0, x1) 19573 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19574 v.reset(OpCopy) 19575 v.AddArg(v0) 19576 v0.AuxInt = 8 19577 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19578 v1.AuxInt = i0 19579 v1.Aux = s 19580 v1.AddArg(p) 19581 v1.AddArg(idx) 19582 v1.AddArg(mem) 19583 v0.AddArg(v1) 19584 return true 19585 } 19586 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 19587 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19588 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19589 for { 19590 _ = v.Args[1] 19591 x1 := v.Args[0] 19592 if x1.Op != OpAMD64MOVBloadidx1 { 19593 break 19594 } 19595 i1 := x1.AuxInt 19596 s := x1.Aux 19597 _ = x1.Args[2] 19598 idx := x1.Args[0] 19599 p := x1.Args[1] 19600 mem := x1.Args[2] 19601 sh := v.Args[1] 19602 if sh.Op != OpAMD64SHLLconst { 19603 break 19604 } 19605 if sh.AuxInt != 8 { 19606 break 19607 } 19608 x0 := sh.Args[0] 19609 if x0.Op != OpAMD64MOVBloadidx1 { 19610 break 19611 } 19612 i0 := x0.AuxInt 19613 if x0.Aux != s { 19614 break 19615 } 19616 _ = x0.Args[2] 19617 if p != x0.Args[0] { 19618 break 19619 } 19620 if idx != x0.Args[1] { 19621 break 19622 } 19623 if mem != x0.Args[2] { 19624 break 19625 } 19626 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19627 break 19628 } 19629 b = mergePoint(b, x0, x1) 19630 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19631 v.reset(OpCopy) 19632 v.AddArg(v0) 19633 v0.AuxInt = 8 19634 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19635 v1.AuxInt = i0 19636 v1.Aux = s 19637 v1.AddArg(p) 19638 v1.AddArg(idx) 19639 v1.AddArg(mem) 19640 v0.AddArg(v1) 19641 return true 19642 } 19643 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 19644 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19645 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19646 for { 19647 _ = v.Args[1] 19648 x1 := v.Args[0] 19649 if x1.Op != OpAMD64MOVBloadidx1 { 19650 break 19651 } 19652 i1 := x1.AuxInt 19653 s := x1.Aux 19654 _ = x1.Args[2] 19655 p := x1.Args[0] 19656 idx := x1.Args[1] 19657 mem := x1.Args[2] 19658 sh := v.Args[1] 19659 if sh.Op != OpAMD64SHLLconst { 19660 break 19661 } 19662 if sh.AuxInt != 8 { 19663 break 19664 } 19665 x0 := sh.Args[0] 19666 if x0.Op != OpAMD64MOVBloadidx1 { 19667 break 19668 } 19669 i0 := x0.AuxInt 19670 if x0.Aux != s { 19671 break 19672 } 19673 _ = x0.Args[2] 19674 if idx != x0.Args[0] { 19675 break 19676 } 19677 if p != x0.Args[1] { 19678 break 19679 } 19680 if mem != x0.Args[2] { 19681 break 19682 } 19683 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19684 break 19685 } 19686 b = mergePoint(b, x0, x1) 19687 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19688 v.reset(OpCopy) 19689 v.AddArg(v0) 19690 v0.AuxInt = 8 19691 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19692 v1.AuxInt = i0 19693 v1.Aux = s 19694 v1.AddArg(p) 19695 v1.AddArg(idx) 19696 v1.AddArg(mem) 19697 v0.AddArg(v1) 19698 return true 19699 } 19700 return false 19701 } 19702 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 19703 b := v.Block 19704 _ = b 19705 typ := &b.Func.Config.Types 19706 _ = typ 19707 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 19708 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19709 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19710 for { 19711 _ = v.Args[1] 19712 x1 := v.Args[0] 19713 if x1.Op != OpAMD64MOVBloadidx1 { 19714 break 19715 } 19716 i1 := x1.AuxInt 19717 s := x1.Aux 19718 _ = x1.Args[2] 19719 idx := x1.Args[0] 19720 p := x1.Args[1] 19721 mem := x1.Args[2] 19722 sh := v.Args[1] 19723 if sh.Op != OpAMD64SHLLconst { 19724 break 19725 } 19726 if sh.AuxInt != 8 { 19727 break 19728 } 19729 x0 := sh.Args[0] 19730 if x0.Op != OpAMD64MOVBloadidx1 { 19731 break 19732 } 19733 i0 := x0.AuxInt 19734 if x0.Aux != s { 19735 break 19736 } 19737 _ = x0.Args[2] 19738 if idx != x0.Args[0] { 19739 break 19740 } 19741 if p != x0.Args[1] { 19742 break 19743 } 19744 if mem != x0.Args[2] { 19745 break 19746 } 19747 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19748 break 19749 } 19750 b = mergePoint(b, x0, x1) 19751 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19752 v.reset(OpCopy) 19753 v.AddArg(v0) 19754 v0.AuxInt = 8 19755 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19756 v1.AuxInt = i0 19757 v1.Aux = s 19758 v1.AddArg(p) 19759 v1.AddArg(idx) 19760 v1.AddArg(mem) 19761 v0.AddArg(v1) 19762 return true 19763 } 19764 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 19765 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19766 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19767 for { 19768 _ = v.Args[1] 19769 sh := v.Args[0] 19770 if sh.Op != OpAMD64SHLLconst { 19771 break 19772 } 19773 if sh.AuxInt != 8 { 19774 break 19775 } 19776 x0 := sh.Args[0] 19777 if x0.Op != OpAMD64MOVBloadidx1 { 19778 break 19779 } 19780 i0 := x0.AuxInt 19781 s := x0.Aux 19782 _ = x0.Args[2] 19783 p := x0.Args[0] 19784 idx := x0.Args[1] 19785 mem := x0.Args[2] 19786 x1 := v.Args[1] 19787 if x1.Op != OpAMD64MOVBloadidx1 { 19788 break 19789 } 19790 i1 := x1.AuxInt 19791 if x1.Aux != s { 19792 break 19793 } 19794 _ = x1.Args[2] 19795 if p != x1.Args[0] { 19796 break 19797 } 19798 if idx != x1.Args[1] { 19799 break 19800 } 19801 if mem != x1.Args[2] { 19802 break 19803 } 19804 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19805 break 19806 } 19807 b = mergePoint(b, x0, x1) 19808 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19809 v.reset(OpCopy) 19810 v.AddArg(v0) 19811 v0.AuxInt = 8 19812 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19813 v1.AuxInt = i0 19814 v1.Aux = s 19815 v1.AddArg(p) 19816 v1.AddArg(idx) 19817 v1.AddArg(mem) 19818 v0.AddArg(v1) 19819 return true 19820 } 19821 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 19822 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19823 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19824 for { 19825 _ = v.Args[1] 19826 sh := v.Args[0] 19827 if sh.Op != OpAMD64SHLLconst { 19828 break 19829 } 19830 if sh.AuxInt != 8 { 19831 break 19832 } 19833 x0 := sh.Args[0] 19834 if x0.Op != OpAMD64MOVBloadidx1 { 19835 break 19836 } 19837 i0 := x0.AuxInt 19838 s := x0.Aux 19839 _ = x0.Args[2] 19840 idx := x0.Args[0] 19841 p := x0.Args[1] 19842 mem := x0.Args[2] 19843 x1 := v.Args[1] 19844 if x1.Op != OpAMD64MOVBloadidx1 { 19845 break 19846 } 19847 i1 := x1.AuxInt 19848 if x1.Aux != s { 19849 break 19850 } 19851 _ = x1.Args[2] 19852 if p != x1.Args[0] { 19853 break 19854 } 19855 if idx != x1.Args[1] { 19856 break 19857 } 19858 if mem != x1.Args[2] { 19859 break 19860 } 19861 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19862 break 19863 } 19864 b = mergePoint(b, x0, x1) 19865 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19866 v.reset(OpCopy) 19867 v.AddArg(v0) 19868 v0.AuxInt = 8 19869 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19870 v1.AuxInt = i0 19871 v1.Aux = s 19872 v1.AddArg(p) 19873 v1.AddArg(idx) 19874 v1.AddArg(mem) 19875 v0.AddArg(v1) 19876 return true 19877 } 19878 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 19879 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19880 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19881 for { 19882 _ = v.Args[1] 19883 sh := v.Args[0] 19884 if sh.Op != OpAMD64SHLLconst { 19885 break 19886 } 19887 if sh.AuxInt != 8 { 19888 break 19889 } 19890 x0 := sh.Args[0] 19891 if x0.Op != OpAMD64MOVBloadidx1 { 19892 break 19893 } 19894 i0 := x0.AuxInt 19895 s := x0.Aux 19896 _ = x0.Args[2] 19897 p := x0.Args[0] 19898 idx := x0.Args[1] 19899 mem := x0.Args[2] 19900 x1 := v.Args[1] 19901 if x1.Op != OpAMD64MOVBloadidx1 { 19902 break 19903 } 19904 i1 := x1.AuxInt 19905 if x1.Aux != s { 19906 break 19907 } 19908 _ = x1.Args[2] 19909 if idx != x1.Args[0] { 19910 break 19911 } 19912 if p != x1.Args[1] { 19913 break 19914 } 19915 if mem != x1.Args[2] { 19916 break 19917 } 19918 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19919 break 19920 } 19921 b = mergePoint(b, x0, x1) 19922 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19923 v.reset(OpCopy) 19924 v.AddArg(v0) 19925 v0.AuxInt = 8 19926 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19927 v1.AuxInt = i0 19928 v1.Aux = s 19929 v1.AddArg(p) 19930 v1.AddArg(idx) 19931 v1.AddArg(mem) 19932 v0.AddArg(v1) 19933 return true 19934 } 19935 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 19936 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19937 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19938 for { 19939 _ = v.Args[1] 19940 sh := v.Args[0] 19941 if sh.Op != OpAMD64SHLLconst { 19942 break 19943 } 19944 if sh.AuxInt != 8 { 19945 break 19946 } 19947 x0 := sh.Args[0] 19948 if x0.Op != OpAMD64MOVBloadidx1 { 19949 break 19950 } 19951 i0 := x0.AuxInt 19952 s := x0.Aux 19953 _ = x0.Args[2] 19954 idx := x0.Args[0] 19955 p := x0.Args[1] 19956 mem := x0.Args[2] 19957 x1 := v.Args[1] 19958 if x1.Op != OpAMD64MOVBloadidx1 { 19959 break 19960 } 19961 i1 := x1.AuxInt 19962 if x1.Aux != s { 19963 break 19964 } 19965 _ = x1.Args[2] 19966 if idx != x1.Args[0] { 19967 break 19968 } 19969 if p != x1.Args[1] { 19970 break 19971 } 19972 if mem != x1.Args[2] { 19973 break 19974 } 19975 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19976 break 19977 } 19978 b = mergePoint(b, x0, x1) 19979 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19980 v.reset(OpCopy) 19981 v.AddArg(v0) 19982 v0.AuxInt = 8 19983 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19984 v1.AuxInt = i0 19985 v1.Aux = s 19986 v1.AddArg(p) 19987 v1.AddArg(idx) 19988 v1.AddArg(mem) 19989 v0.AddArg(v1) 19990 return true 19991 } 19992 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 19993 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19994 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19995 for { 19996 _ = v.Args[1] 19997 r1 := v.Args[0] 19998 if r1.Op != OpAMD64ROLWconst { 19999 break 20000 } 20001 if r1.AuxInt != 8 { 20002 break 20003 } 20004 x1 := r1.Args[0] 20005 if x1.Op != OpAMD64MOVWloadidx1 { 20006 break 20007 } 20008 i1 := x1.AuxInt 20009 s := x1.Aux 20010 _ = x1.Args[2] 20011 p := x1.Args[0] 20012 idx := x1.Args[1] 20013 mem := x1.Args[2] 20014 sh := v.Args[1] 20015 if sh.Op != OpAMD64SHLLconst { 20016 break 20017 } 20018 if sh.AuxInt != 16 { 20019 break 20020 } 20021 r0 := sh.Args[0] 20022 if r0.Op != OpAMD64ROLWconst { 20023 break 20024 } 20025 if r0.AuxInt != 8 { 20026 break 20027 } 20028 x0 := r0.Args[0] 20029 if x0.Op != OpAMD64MOVWloadidx1 { 20030 break 20031 } 20032 i0 := x0.AuxInt 20033 if x0.Aux != s { 20034 break 20035 } 20036 _ = x0.Args[2] 20037 if p != x0.Args[0] { 20038 break 20039 } 20040 if idx != x0.Args[1] { 20041 break 20042 } 20043 if mem != x0.Args[2] { 20044 break 20045 } 20046 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20047 break 20048 } 20049 b = mergePoint(b, x0, x1) 20050 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20051 v.reset(OpCopy) 20052 v.AddArg(v0) 20053 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20054 v1.AuxInt = i0 20055 v1.Aux = s 20056 v1.AddArg(p) 20057 v1.AddArg(idx) 20058 v1.AddArg(mem) 20059 v0.AddArg(v1) 20060 return true 20061 } 20062 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 20063 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20064 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20065 for { 20066 _ = v.Args[1] 20067 r1 := v.Args[0] 20068 if r1.Op != OpAMD64ROLWconst { 20069 break 20070 } 20071 if r1.AuxInt != 8 { 20072 break 20073 } 20074 x1 := r1.Args[0] 20075 if x1.Op != OpAMD64MOVWloadidx1 { 20076 break 20077 } 20078 i1 := x1.AuxInt 20079 s := x1.Aux 20080 _ = x1.Args[2] 20081 idx := x1.Args[0] 20082 p := x1.Args[1] 20083 mem := x1.Args[2] 20084 sh := v.Args[1] 20085 if sh.Op != OpAMD64SHLLconst { 20086 break 20087 } 20088 if sh.AuxInt != 16 { 20089 break 20090 } 20091 r0 := sh.Args[0] 20092 if r0.Op != OpAMD64ROLWconst { 20093 break 20094 } 20095 if r0.AuxInt != 8 { 20096 break 20097 } 20098 x0 := r0.Args[0] 20099 if x0.Op != OpAMD64MOVWloadidx1 { 20100 break 20101 } 20102 i0 := x0.AuxInt 20103 if x0.Aux != s { 20104 break 20105 } 20106 _ = x0.Args[2] 20107 if p != x0.Args[0] { 20108 break 20109 } 20110 if idx != x0.Args[1] { 20111 break 20112 } 20113 if mem != x0.Args[2] { 20114 break 20115 } 20116 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20117 break 20118 } 20119 b = mergePoint(b, x0, x1) 20120 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20121 v.reset(OpCopy) 20122 v.AddArg(v0) 20123 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20124 v1.AuxInt = i0 20125 v1.Aux = s 20126 v1.AddArg(p) 20127 v1.AddArg(idx) 20128 v1.AddArg(mem) 20129 v0.AddArg(v1) 20130 return true 20131 } 20132 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 20133 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20134 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20135 for { 20136 _ = v.Args[1] 20137 r1 := v.Args[0] 20138 if r1.Op != OpAMD64ROLWconst { 20139 break 20140 } 20141 if r1.AuxInt != 8 { 20142 break 20143 } 20144 x1 := r1.Args[0] 20145 if x1.Op != OpAMD64MOVWloadidx1 { 20146 break 20147 } 20148 i1 := x1.AuxInt 20149 s := x1.Aux 20150 _ = x1.Args[2] 20151 p := x1.Args[0] 20152 idx := x1.Args[1] 20153 mem := x1.Args[2] 20154 sh := v.Args[1] 20155 if sh.Op != OpAMD64SHLLconst { 20156 break 20157 } 20158 if sh.AuxInt != 16 { 20159 break 20160 } 20161 r0 := sh.Args[0] 20162 if r0.Op != OpAMD64ROLWconst { 20163 break 20164 } 20165 if r0.AuxInt != 8 { 20166 break 20167 } 20168 x0 := r0.Args[0] 20169 if x0.Op != OpAMD64MOVWloadidx1 { 20170 break 20171 } 20172 i0 := x0.AuxInt 20173 if x0.Aux != s { 20174 break 20175 } 20176 _ = x0.Args[2] 20177 if idx != x0.Args[0] { 20178 break 20179 } 20180 if p != x0.Args[1] { 20181 break 20182 } 20183 if mem != x0.Args[2] { 20184 break 20185 } 20186 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20187 break 20188 } 20189 b = mergePoint(b, x0, x1) 20190 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20191 v.reset(OpCopy) 20192 v.AddArg(v0) 20193 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20194 v1.AuxInt = i0 20195 v1.Aux = s 20196 v1.AddArg(p) 20197 v1.AddArg(idx) 20198 v1.AddArg(mem) 20199 v0.AddArg(v1) 20200 return true 20201 } 20202 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 20203 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20204 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20205 for { 20206 _ = v.Args[1] 20207 r1 := v.Args[0] 20208 if r1.Op != OpAMD64ROLWconst { 20209 break 20210 } 20211 if r1.AuxInt != 8 { 20212 break 20213 } 20214 x1 := r1.Args[0] 20215 if x1.Op != OpAMD64MOVWloadidx1 { 20216 break 20217 } 20218 i1 := x1.AuxInt 20219 s := x1.Aux 20220 _ = x1.Args[2] 20221 idx := x1.Args[0] 20222 p := x1.Args[1] 20223 mem := x1.Args[2] 20224 sh := v.Args[1] 20225 if sh.Op != OpAMD64SHLLconst { 20226 break 20227 } 20228 if sh.AuxInt != 16 { 20229 break 20230 } 20231 r0 := sh.Args[0] 20232 if r0.Op != OpAMD64ROLWconst { 20233 break 20234 } 20235 if r0.AuxInt != 8 { 20236 break 20237 } 20238 x0 := r0.Args[0] 20239 if x0.Op != OpAMD64MOVWloadidx1 { 20240 break 20241 } 20242 i0 := x0.AuxInt 20243 if x0.Aux != s { 20244 break 20245 } 20246 _ = x0.Args[2] 20247 if idx != x0.Args[0] { 20248 break 20249 } 20250 if p != x0.Args[1] { 20251 break 20252 } 20253 if mem != x0.Args[2] { 20254 break 20255 } 20256 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20257 break 20258 } 20259 b = mergePoint(b, x0, x1) 20260 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20261 v.reset(OpCopy) 20262 v.AddArg(v0) 20263 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20264 v1.AuxInt = i0 20265 v1.Aux = s 20266 v1.AddArg(p) 20267 v1.AddArg(idx) 20268 v1.AddArg(mem) 20269 v0.AddArg(v1) 20270 return true 20271 } 20272 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 20273 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20274 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20275 for { 20276 _ = v.Args[1] 20277 sh := v.Args[0] 20278 if sh.Op != OpAMD64SHLLconst { 20279 break 20280 } 20281 if sh.AuxInt != 16 { 20282 break 20283 } 20284 r0 := sh.Args[0] 20285 if r0.Op != OpAMD64ROLWconst { 20286 break 20287 } 20288 if r0.AuxInt != 8 { 20289 break 20290 } 20291 x0 := r0.Args[0] 20292 if x0.Op != OpAMD64MOVWloadidx1 { 20293 break 20294 } 20295 i0 := x0.AuxInt 20296 s := x0.Aux 20297 _ = x0.Args[2] 20298 p := x0.Args[0] 20299 idx := x0.Args[1] 20300 mem := x0.Args[2] 20301 r1 := v.Args[1] 20302 if r1.Op != OpAMD64ROLWconst { 20303 break 20304 } 20305 if r1.AuxInt != 8 { 20306 break 20307 } 20308 x1 := r1.Args[0] 20309 if x1.Op != OpAMD64MOVWloadidx1 { 20310 break 20311 } 20312 i1 := x1.AuxInt 20313 if x1.Aux != s { 20314 break 20315 } 20316 _ = x1.Args[2] 20317 if p != x1.Args[0] { 20318 break 20319 } 20320 if idx != x1.Args[1] { 20321 break 20322 } 20323 if mem != x1.Args[2] { 20324 break 20325 } 20326 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20327 break 20328 } 20329 b = mergePoint(b, x0, x1) 20330 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20331 v.reset(OpCopy) 20332 v.AddArg(v0) 20333 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20334 v1.AuxInt = i0 20335 v1.Aux = s 20336 v1.AddArg(p) 20337 v1.AddArg(idx) 20338 v1.AddArg(mem) 20339 v0.AddArg(v1) 20340 return true 20341 } 20342 return false 20343 } 20344 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 20345 b := v.Block 20346 _ = b 20347 typ := &b.Func.Config.Types 20348 _ = typ 20349 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 20350 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20351 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20352 for { 20353 _ = v.Args[1] 20354 sh := v.Args[0] 20355 if sh.Op != OpAMD64SHLLconst { 20356 break 20357 } 20358 if sh.AuxInt != 16 { 20359 break 20360 } 20361 r0 := sh.Args[0] 20362 if r0.Op != OpAMD64ROLWconst { 20363 break 20364 } 20365 if r0.AuxInt != 8 { 20366 break 20367 } 20368 x0 := r0.Args[0] 20369 if x0.Op != OpAMD64MOVWloadidx1 { 20370 break 20371 } 20372 i0 := x0.AuxInt 20373 s := x0.Aux 20374 _ = x0.Args[2] 20375 idx := x0.Args[0] 20376 p := x0.Args[1] 20377 mem := x0.Args[2] 20378 r1 := v.Args[1] 20379 if r1.Op != OpAMD64ROLWconst { 20380 break 20381 } 20382 if r1.AuxInt != 8 { 20383 break 20384 } 20385 x1 := r1.Args[0] 20386 if x1.Op != OpAMD64MOVWloadidx1 { 20387 break 20388 } 20389 i1 := x1.AuxInt 20390 if x1.Aux != s { 20391 break 20392 } 20393 _ = x1.Args[2] 20394 if p != x1.Args[0] { 20395 break 20396 } 20397 if idx != x1.Args[1] { 20398 break 20399 } 20400 if mem != x1.Args[2] { 20401 break 20402 } 20403 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20404 break 20405 } 20406 b = mergePoint(b, x0, x1) 20407 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20408 v.reset(OpCopy) 20409 v.AddArg(v0) 20410 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20411 v1.AuxInt = i0 20412 v1.Aux = s 20413 v1.AddArg(p) 20414 v1.AddArg(idx) 20415 v1.AddArg(mem) 20416 v0.AddArg(v1) 20417 return true 20418 } 20419 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 20420 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20421 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20422 for { 20423 _ = v.Args[1] 20424 sh := v.Args[0] 20425 if sh.Op != OpAMD64SHLLconst { 20426 break 20427 } 20428 if sh.AuxInt != 16 { 20429 break 20430 } 20431 r0 := sh.Args[0] 20432 if r0.Op != OpAMD64ROLWconst { 20433 break 20434 } 20435 if r0.AuxInt != 8 { 20436 break 20437 } 20438 x0 := r0.Args[0] 20439 if x0.Op != OpAMD64MOVWloadidx1 { 20440 break 20441 } 20442 i0 := x0.AuxInt 20443 s := x0.Aux 20444 _ = x0.Args[2] 20445 p := x0.Args[0] 20446 idx := x0.Args[1] 20447 mem := x0.Args[2] 20448 r1 := v.Args[1] 20449 if r1.Op != OpAMD64ROLWconst { 20450 break 20451 } 20452 if r1.AuxInt != 8 { 20453 break 20454 } 20455 x1 := r1.Args[0] 20456 if x1.Op != OpAMD64MOVWloadidx1 { 20457 break 20458 } 20459 i1 := x1.AuxInt 20460 if x1.Aux != s { 20461 break 20462 } 20463 _ = x1.Args[2] 20464 if idx != x1.Args[0] { 20465 break 20466 } 20467 if p != x1.Args[1] { 20468 break 20469 } 20470 if mem != x1.Args[2] { 20471 break 20472 } 20473 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20474 break 20475 } 20476 b = mergePoint(b, x0, x1) 20477 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20478 v.reset(OpCopy) 20479 v.AddArg(v0) 20480 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20481 v1.AuxInt = i0 20482 v1.Aux = s 20483 v1.AddArg(p) 20484 v1.AddArg(idx) 20485 v1.AddArg(mem) 20486 v0.AddArg(v1) 20487 return true 20488 } 20489 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 20490 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20491 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20492 for { 20493 _ = v.Args[1] 20494 sh := v.Args[0] 20495 if sh.Op != OpAMD64SHLLconst { 20496 break 20497 } 20498 if sh.AuxInt != 16 { 20499 break 20500 } 20501 r0 := sh.Args[0] 20502 if r0.Op != OpAMD64ROLWconst { 20503 break 20504 } 20505 if r0.AuxInt != 8 { 20506 break 20507 } 20508 x0 := r0.Args[0] 20509 if x0.Op != OpAMD64MOVWloadidx1 { 20510 break 20511 } 20512 i0 := x0.AuxInt 20513 s := x0.Aux 20514 _ = x0.Args[2] 20515 idx := x0.Args[0] 20516 p := x0.Args[1] 20517 mem := x0.Args[2] 20518 r1 := v.Args[1] 20519 if r1.Op != OpAMD64ROLWconst { 20520 break 20521 } 20522 if r1.AuxInt != 8 { 20523 break 20524 } 20525 x1 := r1.Args[0] 20526 if x1.Op != OpAMD64MOVWloadidx1 { 20527 break 20528 } 20529 i1 := x1.AuxInt 20530 if x1.Aux != s { 20531 break 20532 } 20533 _ = x1.Args[2] 20534 if idx != x1.Args[0] { 20535 break 20536 } 20537 if p != x1.Args[1] { 20538 break 20539 } 20540 if mem != x1.Args[2] { 20541 break 20542 } 20543 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20544 break 20545 } 20546 b = mergePoint(b, x0, x1) 20547 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20548 v.reset(OpCopy) 20549 v.AddArg(v0) 20550 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20551 v1.AuxInt = i0 20552 v1.Aux = s 20553 v1.AddArg(p) 20554 v1.AddArg(idx) 20555 v1.AddArg(mem) 20556 v0.AddArg(v1) 20557 return true 20558 } 20559 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 20560 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20561 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20562 for { 20563 _ = v.Args[1] 20564 s0 := v.Args[0] 20565 if s0.Op != OpAMD64SHLLconst { 20566 break 20567 } 20568 j0 := s0.AuxInt 20569 x0 := s0.Args[0] 20570 if x0.Op != OpAMD64MOVBloadidx1 { 20571 break 20572 } 20573 i0 := x0.AuxInt 20574 s := x0.Aux 20575 _ = x0.Args[2] 20576 p := x0.Args[0] 20577 idx := x0.Args[1] 20578 mem := x0.Args[2] 20579 or := v.Args[1] 20580 if or.Op != OpAMD64ORL { 20581 break 20582 } 20583 _ = or.Args[1] 20584 s1 := or.Args[0] 20585 if s1.Op != OpAMD64SHLLconst { 20586 break 20587 } 20588 j1 := s1.AuxInt 20589 x1 := s1.Args[0] 20590 if x1.Op != OpAMD64MOVBloadidx1 { 20591 break 20592 } 20593 i1 := x1.AuxInt 20594 if x1.Aux != s { 20595 break 20596 } 20597 _ = x1.Args[2] 20598 if p != x1.Args[0] { 20599 break 20600 } 20601 if idx != x1.Args[1] { 20602 break 20603 } 20604 if mem != x1.Args[2] { 20605 break 20606 } 20607 y := or.Args[1] 20608 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20609 break 20610 } 20611 b = mergePoint(b, x0, x1) 20612 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20613 v.reset(OpCopy) 20614 v.AddArg(v0) 20615 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20616 v1.AuxInt = j1 20617 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20618 v2.AuxInt = 8 20619 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20620 v3.AuxInt = i0 20621 v3.Aux = s 20622 v3.AddArg(p) 20623 v3.AddArg(idx) 20624 v3.AddArg(mem) 20625 v2.AddArg(v3) 20626 v1.AddArg(v2) 20627 v0.AddArg(v1) 20628 v0.AddArg(y) 20629 return true 20630 } 20631 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 20632 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20633 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20634 for { 20635 _ = v.Args[1] 20636 s0 := v.Args[0] 20637 if s0.Op != OpAMD64SHLLconst { 20638 break 20639 } 20640 j0 := s0.AuxInt 20641 x0 := s0.Args[0] 20642 if x0.Op != OpAMD64MOVBloadidx1 { 20643 break 20644 } 20645 i0 := x0.AuxInt 20646 s := x0.Aux 20647 _ = x0.Args[2] 20648 idx := x0.Args[0] 20649 p := x0.Args[1] 20650 mem := x0.Args[2] 20651 or := v.Args[1] 20652 if or.Op != OpAMD64ORL { 20653 break 20654 } 20655 _ = or.Args[1] 20656 s1 := or.Args[0] 20657 if s1.Op != OpAMD64SHLLconst { 20658 break 20659 } 20660 j1 := s1.AuxInt 20661 x1 := s1.Args[0] 20662 if x1.Op != OpAMD64MOVBloadidx1 { 20663 break 20664 } 20665 i1 := x1.AuxInt 20666 if x1.Aux != s { 20667 break 20668 } 20669 _ = x1.Args[2] 20670 if p != x1.Args[0] { 20671 break 20672 } 20673 if idx != x1.Args[1] { 20674 break 20675 } 20676 if mem != x1.Args[2] { 20677 break 20678 } 20679 y := or.Args[1] 20680 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20681 break 20682 } 20683 b = mergePoint(b, x0, x1) 20684 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20685 v.reset(OpCopy) 20686 v.AddArg(v0) 20687 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20688 v1.AuxInt = j1 20689 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20690 v2.AuxInt = 8 20691 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20692 v3.AuxInt = i0 20693 v3.Aux = s 20694 v3.AddArg(p) 20695 v3.AddArg(idx) 20696 v3.AddArg(mem) 20697 v2.AddArg(v3) 20698 v1.AddArg(v2) 20699 v0.AddArg(v1) 20700 v0.AddArg(y) 20701 return true 20702 } 20703 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 20704 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20705 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20706 for { 20707 _ = v.Args[1] 20708 s0 := v.Args[0] 20709 if s0.Op != OpAMD64SHLLconst { 20710 break 20711 } 20712 j0 := s0.AuxInt 20713 x0 := s0.Args[0] 20714 if x0.Op != OpAMD64MOVBloadidx1 { 20715 break 20716 } 20717 i0 := x0.AuxInt 20718 s := x0.Aux 20719 _ = x0.Args[2] 20720 p := x0.Args[0] 20721 idx := x0.Args[1] 20722 mem := x0.Args[2] 20723 or := v.Args[1] 20724 if or.Op != OpAMD64ORL { 20725 break 20726 } 20727 _ = or.Args[1] 20728 s1 := or.Args[0] 20729 if s1.Op != OpAMD64SHLLconst { 20730 break 20731 } 20732 j1 := s1.AuxInt 20733 x1 := s1.Args[0] 20734 if x1.Op != OpAMD64MOVBloadidx1 { 20735 break 20736 } 20737 i1 := x1.AuxInt 20738 if x1.Aux != s { 20739 break 20740 } 20741 _ = x1.Args[2] 20742 if idx != x1.Args[0] { 20743 break 20744 } 20745 if p != x1.Args[1] { 20746 break 20747 } 20748 if mem != x1.Args[2] { 20749 break 20750 } 20751 y := or.Args[1] 20752 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20753 break 20754 } 20755 b = mergePoint(b, x0, x1) 20756 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20757 v.reset(OpCopy) 20758 v.AddArg(v0) 20759 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20760 v1.AuxInt = j1 20761 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20762 v2.AuxInt = 8 20763 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20764 v3.AuxInt = i0 20765 v3.Aux = s 20766 v3.AddArg(p) 20767 v3.AddArg(idx) 20768 v3.AddArg(mem) 20769 v2.AddArg(v3) 20770 v1.AddArg(v2) 20771 v0.AddArg(v1) 20772 v0.AddArg(y) 20773 return true 20774 } 20775 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 20776 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20777 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20778 for { 20779 _ = v.Args[1] 20780 s0 := v.Args[0] 20781 if s0.Op != OpAMD64SHLLconst { 20782 break 20783 } 20784 j0 := s0.AuxInt 20785 x0 := s0.Args[0] 20786 if x0.Op != OpAMD64MOVBloadidx1 { 20787 break 20788 } 20789 i0 := x0.AuxInt 20790 s := x0.Aux 20791 _ = x0.Args[2] 20792 idx := x0.Args[0] 20793 p := x0.Args[1] 20794 mem := x0.Args[2] 20795 or := v.Args[1] 20796 if or.Op != OpAMD64ORL { 20797 break 20798 } 20799 _ = or.Args[1] 20800 s1 := or.Args[0] 20801 if s1.Op != OpAMD64SHLLconst { 20802 break 20803 } 20804 j1 := s1.AuxInt 20805 x1 := s1.Args[0] 20806 if x1.Op != OpAMD64MOVBloadidx1 { 20807 break 20808 } 20809 i1 := x1.AuxInt 20810 if x1.Aux != s { 20811 break 20812 } 20813 _ = x1.Args[2] 20814 if idx != x1.Args[0] { 20815 break 20816 } 20817 if p != x1.Args[1] { 20818 break 20819 } 20820 if mem != x1.Args[2] { 20821 break 20822 } 20823 y := or.Args[1] 20824 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20825 break 20826 } 20827 b = mergePoint(b, x0, x1) 20828 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20829 v.reset(OpCopy) 20830 v.AddArg(v0) 20831 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20832 v1.AuxInt = j1 20833 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20834 v2.AuxInt = 8 20835 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20836 v3.AuxInt = i0 20837 v3.Aux = s 20838 v3.AddArg(p) 20839 v3.AddArg(idx) 20840 v3.AddArg(mem) 20841 v2.AddArg(v3) 20842 v1.AddArg(v2) 20843 v0.AddArg(v1) 20844 v0.AddArg(y) 20845 return true 20846 } 20847 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 20848 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20849 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20850 for { 20851 _ = v.Args[1] 20852 s0 := v.Args[0] 20853 if s0.Op != OpAMD64SHLLconst { 20854 break 20855 } 20856 j0 := s0.AuxInt 20857 x0 := s0.Args[0] 20858 if x0.Op != OpAMD64MOVBloadidx1 { 20859 break 20860 } 20861 i0 := x0.AuxInt 20862 s := x0.Aux 20863 _ = x0.Args[2] 20864 p := x0.Args[0] 20865 idx := x0.Args[1] 20866 mem := x0.Args[2] 20867 or := v.Args[1] 20868 if or.Op != OpAMD64ORL { 20869 break 20870 } 20871 _ = or.Args[1] 20872 y := or.Args[0] 20873 s1 := or.Args[1] 20874 if s1.Op != OpAMD64SHLLconst { 20875 break 20876 } 20877 j1 := s1.AuxInt 20878 x1 := s1.Args[0] 20879 if x1.Op != OpAMD64MOVBloadidx1 { 20880 break 20881 } 20882 i1 := x1.AuxInt 20883 if x1.Aux != s { 20884 break 20885 } 20886 _ = x1.Args[2] 20887 if p != x1.Args[0] { 20888 break 20889 } 20890 if idx != x1.Args[1] { 20891 break 20892 } 20893 if mem != x1.Args[2] { 20894 break 20895 } 20896 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20897 break 20898 } 20899 b = mergePoint(b, x0, x1) 20900 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20901 v.reset(OpCopy) 20902 v.AddArg(v0) 20903 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20904 v1.AuxInt = j1 20905 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20906 v2.AuxInt = 8 20907 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20908 v3.AuxInt = i0 20909 v3.Aux = s 20910 v3.AddArg(p) 20911 v3.AddArg(idx) 20912 v3.AddArg(mem) 20913 v2.AddArg(v3) 20914 v1.AddArg(v2) 20915 v0.AddArg(v1) 20916 v0.AddArg(y) 20917 return true 20918 } 20919 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 20920 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20921 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20922 for { 20923 _ = v.Args[1] 20924 s0 := v.Args[0] 20925 if s0.Op != OpAMD64SHLLconst { 20926 break 20927 } 20928 j0 := s0.AuxInt 20929 x0 := s0.Args[0] 20930 if x0.Op != OpAMD64MOVBloadidx1 { 20931 break 20932 } 20933 i0 := x0.AuxInt 20934 s := x0.Aux 20935 _ = x0.Args[2] 20936 idx := x0.Args[0] 20937 p := x0.Args[1] 20938 mem := x0.Args[2] 20939 or := v.Args[1] 20940 if or.Op != OpAMD64ORL { 20941 break 20942 } 20943 _ = or.Args[1] 20944 y := or.Args[0] 20945 s1 := or.Args[1] 20946 if s1.Op != OpAMD64SHLLconst { 20947 break 20948 } 20949 j1 := s1.AuxInt 20950 x1 := s1.Args[0] 20951 if x1.Op != OpAMD64MOVBloadidx1 { 20952 break 20953 } 20954 i1 := x1.AuxInt 20955 if x1.Aux != s { 20956 break 20957 } 20958 _ = x1.Args[2] 20959 if p != x1.Args[0] { 20960 break 20961 } 20962 if idx != x1.Args[1] { 20963 break 20964 } 20965 if mem != x1.Args[2] { 20966 break 20967 } 20968 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20969 break 20970 } 20971 b = mergePoint(b, x0, x1) 20972 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20973 v.reset(OpCopy) 20974 v.AddArg(v0) 20975 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20976 v1.AuxInt = j1 20977 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20978 v2.AuxInt = 8 20979 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20980 v3.AuxInt = i0 20981 v3.Aux = s 20982 v3.AddArg(p) 20983 v3.AddArg(idx) 20984 v3.AddArg(mem) 20985 v2.AddArg(v3) 20986 v1.AddArg(v2) 20987 v0.AddArg(v1) 20988 v0.AddArg(y) 20989 return true 20990 } 20991 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 20992 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20993 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20994 for { 20995 _ = v.Args[1] 20996 s0 := v.Args[0] 20997 if s0.Op != OpAMD64SHLLconst { 20998 break 20999 } 21000 j0 := s0.AuxInt 21001 x0 := s0.Args[0] 21002 if x0.Op != OpAMD64MOVBloadidx1 { 21003 break 21004 } 21005 i0 := x0.AuxInt 21006 s := x0.Aux 21007 _ = x0.Args[2] 21008 p := x0.Args[0] 21009 idx := x0.Args[1] 21010 mem := x0.Args[2] 21011 or := v.Args[1] 21012 if or.Op != OpAMD64ORL { 21013 break 21014 } 21015 _ = or.Args[1] 21016 y := or.Args[0] 21017 s1 := or.Args[1] 21018 if s1.Op != OpAMD64SHLLconst { 21019 break 21020 } 21021 j1 := s1.AuxInt 21022 x1 := s1.Args[0] 21023 if x1.Op != OpAMD64MOVBloadidx1 { 21024 break 21025 } 21026 i1 := x1.AuxInt 21027 if x1.Aux != s { 21028 break 21029 } 21030 _ = x1.Args[2] 21031 if idx != x1.Args[0] { 21032 break 21033 } 21034 if p != x1.Args[1] { 21035 break 21036 } 21037 if mem != x1.Args[2] { 21038 break 21039 } 21040 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21041 break 21042 } 21043 b = mergePoint(b, x0, x1) 21044 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21045 v.reset(OpCopy) 21046 v.AddArg(v0) 21047 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21048 v1.AuxInt = j1 21049 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21050 v2.AuxInt = 8 21051 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21052 v3.AuxInt = i0 21053 v3.Aux = s 21054 v3.AddArg(p) 21055 v3.AddArg(idx) 21056 v3.AddArg(mem) 21057 v2.AddArg(v3) 21058 v1.AddArg(v2) 21059 v0.AddArg(v1) 21060 v0.AddArg(y) 21061 return true 21062 } 21063 return false 21064 } 21065 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 21066 b := v.Block 21067 _ = b 21068 typ := &b.Func.Config.Types 21069 _ = typ 21070 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 21071 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21072 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21073 for { 21074 _ = v.Args[1] 21075 s0 := v.Args[0] 21076 if s0.Op != OpAMD64SHLLconst { 21077 break 21078 } 21079 j0 := s0.AuxInt 21080 x0 := s0.Args[0] 21081 if x0.Op != OpAMD64MOVBloadidx1 { 21082 break 21083 } 21084 i0 := x0.AuxInt 21085 s := x0.Aux 21086 _ = x0.Args[2] 21087 idx := x0.Args[0] 21088 p := x0.Args[1] 21089 mem := x0.Args[2] 21090 or := v.Args[1] 21091 if or.Op != OpAMD64ORL { 21092 break 21093 } 21094 _ = or.Args[1] 21095 y := or.Args[0] 21096 s1 := or.Args[1] 21097 if s1.Op != OpAMD64SHLLconst { 21098 break 21099 } 21100 j1 := s1.AuxInt 21101 x1 := s1.Args[0] 21102 if x1.Op != OpAMD64MOVBloadidx1 { 21103 break 21104 } 21105 i1 := x1.AuxInt 21106 if x1.Aux != s { 21107 break 21108 } 21109 _ = x1.Args[2] 21110 if idx != x1.Args[0] { 21111 break 21112 } 21113 if p != x1.Args[1] { 21114 break 21115 } 21116 if mem != x1.Args[2] { 21117 break 21118 } 21119 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21120 break 21121 } 21122 b = mergePoint(b, x0, x1) 21123 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21124 v.reset(OpCopy) 21125 v.AddArg(v0) 21126 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21127 v1.AuxInt = j1 21128 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21129 v2.AuxInt = 8 21130 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21131 v3.AuxInt = i0 21132 v3.Aux = s 21133 v3.AddArg(p) 21134 v3.AddArg(idx) 21135 v3.AddArg(mem) 21136 v2.AddArg(v3) 21137 v1.AddArg(v2) 21138 v0.AddArg(v1) 21139 v0.AddArg(y) 21140 return true 21141 } 21142 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21143 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21144 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21145 for { 21146 _ = v.Args[1] 21147 or := v.Args[0] 21148 if or.Op != OpAMD64ORL { 21149 break 21150 } 21151 _ = or.Args[1] 21152 s1 := or.Args[0] 21153 if s1.Op != OpAMD64SHLLconst { 21154 break 21155 } 21156 j1 := s1.AuxInt 21157 x1 := s1.Args[0] 21158 if x1.Op != OpAMD64MOVBloadidx1 { 21159 break 21160 } 21161 i1 := x1.AuxInt 21162 s := x1.Aux 21163 _ = x1.Args[2] 21164 p := x1.Args[0] 21165 idx := x1.Args[1] 21166 mem := x1.Args[2] 21167 y := or.Args[1] 21168 s0 := v.Args[1] 21169 if s0.Op != OpAMD64SHLLconst { 21170 break 21171 } 21172 j0 := s0.AuxInt 21173 x0 := s0.Args[0] 21174 if x0.Op != OpAMD64MOVBloadidx1 { 21175 break 21176 } 21177 i0 := x0.AuxInt 21178 if x0.Aux != s { 21179 break 21180 } 21181 _ = x0.Args[2] 21182 if p != x0.Args[0] { 21183 break 21184 } 21185 if idx != x0.Args[1] { 21186 break 21187 } 21188 if mem != x0.Args[2] { 21189 break 21190 } 21191 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21192 break 21193 } 21194 b = mergePoint(b, x0, x1) 21195 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21196 v.reset(OpCopy) 21197 v.AddArg(v0) 21198 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21199 v1.AuxInt = j1 21200 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21201 v2.AuxInt = 8 21202 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21203 v3.AuxInt = i0 21204 v3.Aux = s 21205 v3.AddArg(p) 21206 v3.AddArg(idx) 21207 v3.AddArg(mem) 21208 v2.AddArg(v3) 21209 v1.AddArg(v2) 21210 v0.AddArg(v1) 21211 v0.AddArg(y) 21212 return true 21213 } 21214 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21215 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21216 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21217 for { 21218 _ = v.Args[1] 21219 or := v.Args[0] 21220 if or.Op != OpAMD64ORL { 21221 break 21222 } 21223 _ = or.Args[1] 21224 s1 := or.Args[0] 21225 if s1.Op != OpAMD64SHLLconst { 21226 break 21227 } 21228 j1 := s1.AuxInt 21229 x1 := s1.Args[0] 21230 if x1.Op != OpAMD64MOVBloadidx1 { 21231 break 21232 } 21233 i1 := x1.AuxInt 21234 s := x1.Aux 21235 _ = x1.Args[2] 21236 idx := x1.Args[0] 21237 p := x1.Args[1] 21238 mem := x1.Args[2] 21239 y := or.Args[1] 21240 s0 := v.Args[1] 21241 if s0.Op != OpAMD64SHLLconst { 21242 break 21243 } 21244 j0 := s0.AuxInt 21245 x0 := s0.Args[0] 21246 if x0.Op != OpAMD64MOVBloadidx1 { 21247 break 21248 } 21249 i0 := x0.AuxInt 21250 if x0.Aux != s { 21251 break 21252 } 21253 _ = x0.Args[2] 21254 if p != x0.Args[0] { 21255 break 21256 } 21257 if idx != x0.Args[1] { 21258 break 21259 } 21260 if mem != x0.Args[2] { 21261 break 21262 } 21263 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21264 break 21265 } 21266 b = mergePoint(b, x0, x1) 21267 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21268 v.reset(OpCopy) 21269 v.AddArg(v0) 21270 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21271 v1.AuxInt = j1 21272 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21273 v2.AuxInt = 8 21274 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21275 v3.AuxInt = i0 21276 v3.Aux = s 21277 v3.AddArg(p) 21278 v3.AddArg(idx) 21279 v3.AddArg(mem) 21280 v2.AddArg(v3) 21281 v1.AddArg(v2) 21282 v0.AddArg(v1) 21283 v0.AddArg(y) 21284 return true 21285 } 21286 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21287 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21288 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21289 for { 21290 _ = v.Args[1] 21291 or := v.Args[0] 21292 if or.Op != OpAMD64ORL { 21293 break 21294 } 21295 _ = or.Args[1] 21296 y := or.Args[0] 21297 s1 := or.Args[1] 21298 if s1.Op != OpAMD64SHLLconst { 21299 break 21300 } 21301 j1 := s1.AuxInt 21302 x1 := s1.Args[0] 21303 if x1.Op != OpAMD64MOVBloadidx1 { 21304 break 21305 } 21306 i1 := x1.AuxInt 21307 s := x1.Aux 21308 _ = x1.Args[2] 21309 p := x1.Args[0] 21310 idx := x1.Args[1] 21311 mem := x1.Args[2] 21312 s0 := v.Args[1] 21313 if s0.Op != OpAMD64SHLLconst { 21314 break 21315 } 21316 j0 := s0.AuxInt 21317 x0 := s0.Args[0] 21318 if x0.Op != OpAMD64MOVBloadidx1 { 21319 break 21320 } 21321 i0 := x0.AuxInt 21322 if x0.Aux != s { 21323 break 21324 } 21325 _ = x0.Args[2] 21326 if p != x0.Args[0] { 21327 break 21328 } 21329 if idx != x0.Args[1] { 21330 break 21331 } 21332 if mem != x0.Args[2] { 21333 break 21334 } 21335 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21336 break 21337 } 21338 b = mergePoint(b, x0, x1) 21339 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21340 v.reset(OpCopy) 21341 v.AddArg(v0) 21342 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21343 v1.AuxInt = j1 21344 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21345 v2.AuxInt = 8 21346 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21347 v3.AuxInt = i0 21348 v3.Aux = s 21349 v3.AddArg(p) 21350 v3.AddArg(idx) 21351 v3.AddArg(mem) 21352 v2.AddArg(v3) 21353 v1.AddArg(v2) 21354 v0.AddArg(v1) 21355 v0.AddArg(y) 21356 return true 21357 } 21358 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21359 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21360 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21361 for { 21362 _ = v.Args[1] 21363 or := v.Args[0] 21364 if or.Op != OpAMD64ORL { 21365 break 21366 } 21367 _ = or.Args[1] 21368 y := or.Args[0] 21369 s1 := or.Args[1] 21370 if s1.Op != OpAMD64SHLLconst { 21371 break 21372 } 21373 j1 := s1.AuxInt 21374 x1 := s1.Args[0] 21375 if x1.Op != OpAMD64MOVBloadidx1 { 21376 break 21377 } 21378 i1 := x1.AuxInt 21379 s := x1.Aux 21380 _ = x1.Args[2] 21381 idx := x1.Args[0] 21382 p := x1.Args[1] 21383 mem := x1.Args[2] 21384 s0 := v.Args[1] 21385 if s0.Op != OpAMD64SHLLconst { 21386 break 21387 } 21388 j0 := s0.AuxInt 21389 x0 := s0.Args[0] 21390 if x0.Op != OpAMD64MOVBloadidx1 { 21391 break 21392 } 21393 i0 := x0.AuxInt 21394 if x0.Aux != s { 21395 break 21396 } 21397 _ = x0.Args[2] 21398 if p != x0.Args[0] { 21399 break 21400 } 21401 if idx != x0.Args[1] { 21402 break 21403 } 21404 if mem != x0.Args[2] { 21405 break 21406 } 21407 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21408 break 21409 } 21410 b = mergePoint(b, x0, x1) 21411 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21412 v.reset(OpCopy) 21413 v.AddArg(v0) 21414 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21415 v1.AuxInt = j1 21416 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21417 v2.AuxInt = 8 21418 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21419 v3.AuxInt = i0 21420 v3.Aux = s 21421 v3.AddArg(p) 21422 v3.AddArg(idx) 21423 v3.AddArg(mem) 21424 v2.AddArg(v3) 21425 v1.AddArg(v2) 21426 v0.AddArg(v1) 21427 v0.AddArg(y) 21428 return true 21429 } 21430 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21431 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21432 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21433 for { 21434 _ = v.Args[1] 21435 or := v.Args[0] 21436 if or.Op != OpAMD64ORL { 21437 break 21438 } 21439 _ = or.Args[1] 21440 s1 := or.Args[0] 21441 if s1.Op != OpAMD64SHLLconst { 21442 break 21443 } 21444 j1 := s1.AuxInt 21445 x1 := s1.Args[0] 21446 if x1.Op != OpAMD64MOVBloadidx1 { 21447 break 21448 } 21449 i1 := x1.AuxInt 21450 s := x1.Aux 21451 _ = x1.Args[2] 21452 p := x1.Args[0] 21453 idx := x1.Args[1] 21454 mem := x1.Args[2] 21455 y := or.Args[1] 21456 s0 := v.Args[1] 21457 if s0.Op != OpAMD64SHLLconst { 21458 break 21459 } 21460 j0 := s0.AuxInt 21461 x0 := s0.Args[0] 21462 if x0.Op != OpAMD64MOVBloadidx1 { 21463 break 21464 } 21465 i0 := x0.AuxInt 21466 if x0.Aux != s { 21467 break 21468 } 21469 _ = x0.Args[2] 21470 if idx != x0.Args[0] { 21471 break 21472 } 21473 if p != x0.Args[1] { 21474 break 21475 } 21476 if mem != x0.Args[2] { 21477 break 21478 } 21479 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21480 break 21481 } 21482 b = mergePoint(b, x0, x1) 21483 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21484 v.reset(OpCopy) 21485 v.AddArg(v0) 21486 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21487 v1.AuxInt = j1 21488 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21489 v2.AuxInt = 8 21490 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21491 v3.AuxInt = i0 21492 v3.Aux = s 21493 v3.AddArg(p) 21494 v3.AddArg(idx) 21495 v3.AddArg(mem) 21496 v2.AddArg(v3) 21497 v1.AddArg(v2) 21498 v0.AddArg(v1) 21499 v0.AddArg(y) 21500 return true 21501 } 21502 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21503 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21504 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21505 for { 21506 _ = v.Args[1] 21507 or := v.Args[0] 21508 if or.Op != OpAMD64ORL { 21509 break 21510 } 21511 _ = or.Args[1] 21512 s1 := or.Args[0] 21513 if s1.Op != OpAMD64SHLLconst { 21514 break 21515 } 21516 j1 := s1.AuxInt 21517 x1 := s1.Args[0] 21518 if x1.Op != OpAMD64MOVBloadidx1 { 21519 break 21520 } 21521 i1 := x1.AuxInt 21522 s := x1.Aux 21523 _ = x1.Args[2] 21524 idx := x1.Args[0] 21525 p := x1.Args[1] 21526 mem := x1.Args[2] 21527 y := or.Args[1] 21528 s0 := v.Args[1] 21529 if s0.Op != OpAMD64SHLLconst { 21530 break 21531 } 21532 j0 := s0.AuxInt 21533 x0 := s0.Args[0] 21534 if x0.Op != OpAMD64MOVBloadidx1 { 21535 break 21536 } 21537 i0 := x0.AuxInt 21538 if x0.Aux != s { 21539 break 21540 } 21541 _ = x0.Args[2] 21542 if idx != x0.Args[0] { 21543 break 21544 } 21545 if p != x0.Args[1] { 21546 break 21547 } 21548 if mem != x0.Args[2] { 21549 break 21550 } 21551 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21552 break 21553 } 21554 b = mergePoint(b, x0, x1) 21555 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21556 v.reset(OpCopy) 21557 v.AddArg(v0) 21558 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21559 v1.AuxInt = j1 21560 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21561 v2.AuxInt = 8 21562 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21563 v3.AuxInt = i0 21564 v3.Aux = s 21565 v3.AddArg(p) 21566 v3.AddArg(idx) 21567 v3.AddArg(mem) 21568 v2.AddArg(v3) 21569 v1.AddArg(v2) 21570 v0.AddArg(v1) 21571 v0.AddArg(y) 21572 return true 21573 } 21574 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21575 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21576 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21577 for { 21578 _ = v.Args[1] 21579 or := v.Args[0] 21580 if or.Op != OpAMD64ORL { 21581 break 21582 } 21583 _ = or.Args[1] 21584 y := or.Args[0] 21585 s1 := or.Args[1] 21586 if s1.Op != OpAMD64SHLLconst { 21587 break 21588 } 21589 j1 := s1.AuxInt 21590 x1 := s1.Args[0] 21591 if x1.Op != OpAMD64MOVBloadidx1 { 21592 break 21593 } 21594 i1 := x1.AuxInt 21595 s := x1.Aux 21596 _ = x1.Args[2] 21597 p := x1.Args[0] 21598 idx := x1.Args[1] 21599 mem := x1.Args[2] 21600 s0 := v.Args[1] 21601 if s0.Op != OpAMD64SHLLconst { 21602 break 21603 } 21604 j0 := s0.AuxInt 21605 x0 := s0.Args[0] 21606 if x0.Op != OpAMD64MOVBloadidx1 { 21607 break 21608 } 21609 i0 := x0.AuxInt 21610 if x0.Aux != s { 21611 break 21612 } 21613 _ = x0.Args[2] 21614 if idx != x0.Args[0] { 21615 break 21616 } 21617 if p != x0.Args[1] { 21618 break 21619 } 21620 if mem != x0.Args[2] { 21621 break 21622 } 21623 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21624 break 21625 } 21626 b = mergePoint(b, x0, x1) 21627 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21628 v.reset(OpCopy) 21629 v.AddArg(v0) 21630 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21631 v1.AuxInt = j1 21632 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21633 v2.AuxInt = 8 21634 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21635 v3.AuxInt = i0 21636 v3.Aux = s 21637 v3.AddArg(p) 21638 v3.AddArg(idx) 21639 v3.AddArg(mem) 21640 v2.AddArg(v3) 21641 v1.AddArg(v2) 21642 v0.AddArg(v1) 21643 v0.AddArg(y) 21644 return true 21645 } 21646 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21647 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21648 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21649 for { 21650 _ = v.Args[1] 21651 or := v.Args[0] 21652 if or.Op != OpAMD64ORL { 21653 break 21654 } 21655 _ = or.Args[1] 21656 y := or.Args[0] 21657 s1 := or.Args[1] 21658 if s1.Op != OpAMD64SHLLconst { 21659 break 21660 } 21661 j1 := s1.AuxInt 21662 x1 := s1.Args[0] 21663 if x1.Op != OpAMD64MOVBloadidx1 { 21664 break 21665 } 21666 i1 := x1.AuxInt 21667 s := x1.Aux 21668 _ = x1.Args[2] 21669 idx := x1.Args[0] 21670 p := x1.Args[1] 21671 mem := x1.Args[2] 21672 s0 := v.Args[1] 21673 if s0.Op != OpAMD64SHLLconst { 21674 break 21675 } 21676 j0 := s0.AuxInt 21677 x0 := s0.Args[0] 21678 if x0.Op != OpAMD64MOVBloadidx1 { 21679 break 21680 } 21681 i0 := x0.AuxInt 21682 if x0.Aux != s { 21683 break 21684 } 21685 _ = x0.Args[2] 21686 if idx != x0.Args[0] { 21687 break 21688 } 21689 if p != x0.Args[1] { 21690 break 21691 } 21692 if mem != x0.Args[2] { 21693 break 21694 } 21695 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21696 break 21697 } 21698 b = mergePoint(b, x0, x1) 21699 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21700 v.reset(OpCopy) 21701 v.AddArg(v0) 21702 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21703 v1.AuxInt = j1 21704 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21705 v2.AuxInt = 8 21706 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21707 v3.AuxInt = i0 21708 v3.Aux = s 21709 v3.AddArg(p) 21710 v3.AddArg(idx) 21711 v3.AddArg(mem) 21712 v2.AddArg(v3) 21713 v1.AddArg(v2) 21714 v0.AddArg(v1) 21715 v0.AddArg(y) 21716 return true 21717 } 21718 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 21719 // cond: canMergeLoad(v, l, x) && clobber(l) 21720 // result: (ORLmem x [off] {sym} ptr mem) 21721 for { 21722 _ = v.Args[1] 21723 x := v.Args[0] 21724 l := v.Args[1] 21725 if l.Op != OpAMD64MOVLload { 21726 break 21727 } 21728 off := l.AuxInt 21729 sym := l.Aux 21730 _ = l.Args[1] 21731 ptr := l.Args[0] 21732 mem := l.Args[1] 21733 if !(canMergeLoad(v, l, x) && clobber(l)) { 21734 break 21735 } 21736 v.reset(OpAMD64ORLmem) 21737 v.AuxInt = off 21738 v.Aux = sym 21739 v.AddArg(x) 21740 v.AddArg(ptr) 21741 v.AddArg(mem) 21742 return true 21743 } 21744 return false 21745 } 21746 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 21747 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 21748 // cond: canMergeLoad(v, l, x) && clobber(l) 21749 // result: (ORLmem x [off] {sym} ptr mem) 21750 for { 21751 _ = v.Args[1] 21752 l := v.Args[0] 21753 if l.Op != OpAMD64MOVLload { 21754 break 21755 } 21756 off := l.AuxInt 21757 sym := l.Aux 21758 _ = l.Args[1] 21759 ptr := l.Args[0] 21760 mem := l.Args[1] 21761 x := v.Args[1] 21762 if !(canMergeLoad(v, l, x) && clobber(l)) { 21763 break 21764 } 21765 v.reset(OpAMD64ORLmem) 21766 v.AuxInt = off 21767 v.Aux = sym 21768 v.AddArg(x) 21769 v.AddArg(ptr) 21770 v.AddArg(mem) 21771 return true 21772 } 21773 return false 21774 } 21775 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 21776 // match: (ORLconst [c] x) 21777 // cond: int32(c)==0 21778 // result: x 21779 for { 21780 c := v.AuxInt 21781 x := v.Args[0] 21782 if !(int32(c) == 0) { 21783 break 21784 } 21785 v.reset(OpCopy) 21786 v.Type = x.Type 21787 v.AddArg(x) 21788 return true 21789 } 21790 // match: (ORLconst [c] _) 21791 // cond: int32(c)==-1 21792 // result: (MOVLconst [-1]) 21793 for { 21794 c := v.AuxInt 21795 if !(int32(c) == -1) { 21796 break 21797 } 21798 v.reset(OpAMD64MOVLconst) 21799 v.AuxInt = -1 21800 return true 21801 } 21802 // match: (ORLconst [c] (MOVLconst [d])) 21803 // cond: 21804 // result: (MOVLconst [c|d]) 21805 for { 21806 c := v.AuxInt 21807 v_0 := v.Args[0] 21808 if v_0.Op != OpAMD64MOVLconst { 21809 break 21810 } 21811 d := v_0.AuxInt 21812 v.reset(OpAMD64MOVLconst) 21813 v.AuxInt = c | d 21814 return true 21815 } 21816 return false 21817 } 21818 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 21819 // match: (ORQ x (MOVQconst [c])) 21820 // cond: is32Bit(c) 21821 // result: (ORQconst [c] x) 21822 for { 21823 _ = v.Args[1] 21824 x := v.Args[0] 21825 v_1 := v.Args[1] 21826 if v_1.Op != OpAMD64MOVQconst { 21827 break 21828 } 21829 c := v_1.AuxInt 21830 if !(is32Bit(c)) { 21831 break 21832 } 21833 v.reset(OpAMD64ORQconst) 21834 v.AuxInt = c 21835 v.AddArg(x) 21836 return true 21837 } 21838 // match: (ORQ (MOVQconst [c]) x) 21839 // cond: is32Bit(c) 21840 // result: (ORQconst [c] x) 21841 for { 21842 _ = v.Args[1] 21843 v_0 := v.Args[0] 21844 if v_0.Op != OpAMD64MOVQconst { 21845 break 21846 } 21847 c := v_0.AuxInt 21848 x := v.Args[1] 21849 if !(is32Bit(c)) { 21850 break 21851 } 21852 v.reset(OpAMD64ORQconst) 21853 v.AuxInt = c 21854 v.AddArg(x) 21855 return true 21856 } 21857 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 21858 // cond: d==64-c 21859 // result: (ROLQconst x [c]) 21860 for { 21861 _ = v.Args[1] 21862 v_0 := v.Args[0] 21863 if v_0.Op != OpAMD64SHLQconst { 21864 break 21865 } 21866 c := v_0.AuxInt 21867 x := v_0.Args[0] 21868 v_1 := v.Args[1] 21869 if v_1.Op != OpAMD64SHRQconst { 21870 break 21871 } 21872 d := v_1.AuxInt 21873 if x != v_1.Args[0] { 21874 break 21875 } 21876 if !(d == 64-c) { 21877 break 21878 } 21879 v.reset(OpAMD64ROLQconst) 21880 v.AuxInt = c 21881 v.AddArg(x) 21882 return true 21883 } 21884 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 21885 // cond: d==64-c 21886 // result: (ROLQconst x [c]) 21887 for { 21888 _ = v.Args[1] 21889 v_0 := v.Args[0] 21890 if v_0.Op != OpAMD64SHRQconst { 21891 break 21892 } 21893 d := v_0.AuxInt 21894 x := v_0.Args[0] 21895 v_1 := v.Args[1] 21896 if v_1.Op != OpAMD64SHLQconst { 21897 break 21898 } 21899 c := v_1.AuxInt 21900 if x != v_1.Args[0] { 21901 break 21902 } 21903 if !(d == 64-c) { 21904 break 21905 } 21906 v.reset(OpAMD64ROLQconst) 21907 v.AuxInt = c 21908 v.AddArg(x) 21909 return true 21910 } 21911 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 21912 // cond: 21913 // result: (ROLQ x y) 21914 for { 21915 _ = v.Args[1] 21916 v_0 := v.Args[0] 21917 if v_0.Op != OpAMD64SHLQ { 21918 break 21919 } 21920 _ = v_0.Args[1] 21921 x := v_0.Args[0] 21922 y := v_0.Args[1] 21923 v_1 := v.Args[1] 21924 if v_1.Op != OpAMD64ANDQ { 21925 break 21926 } 21927 _ = v_1.Args[1] 21928 v_1_0 := v_1.Args[0] 21929 if v_1_0.Op != OpAMD64SHRQ { 21930 break 21931 } 21932 _ = v_1_0.Args[1] 21933 if x != v_1_0.Args[0] { 21934 break 21935 } 21936 v_1_0_1 := v_1_0.Args[1] 21937 if v_1_0_1.Op != OpAMD64NEGQ { 21938 break 21939 } 21940 if y != v_1_0_1.Args[0] { 21941 break 21942 } 21943 v_1_1 := v_1.Args[1] 21944 if v_1_1.Op != OpAMD64SBBQcarrymask { 21945 break 21946 } 21947 v_1_1_0 := v_1_1.Args[0] 21948 if v_1_1_0.Op != OpAMD64CMPQconst { 21949 break 21950 } 21951 if v_1_1_0.AuxInt != 64 { 21952 break 21953 } 21954 v_1_1_0_0 := v_1_1_0.Args[0] 21955 if v_1_1_0_0.Op != OpAMD64NEGQ { 21956 break 21957 } 21958 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 21959 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 21960 break 21961 } 21962 if v_1_1_0_0_0.AuxInt != -64 { 21963 break 21964 } 21965 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 21966 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 21967 break 21968 } 21969 if v_1_1_0_0_0_0.AuxInt != 63 { 21970 break 21971 } 21972 if y != v_1_1_0_0_0_0.Args[0] { 21973 break 21974 } 21975 v.reset(OpAMD64ROLQ) 21976 v.AddArg(x) 21977 v.AddArg(y) 21978 return true 21979 } 21980 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 21981 // cond: 21982 // result: (ROLQ x y) 21983 for { 21984 _ = v.Args[1] 21985 v_0 := v.Args[0] 21986 if v_0.Op != OpAMD64SHLQ { 21987 break 21988 } 21989 _ = v_0.Args[1] 21990 x := v_0.Args[0] 21991 y := v_0.Args[1] 21992 v_1 := v.Args[1] 21993 if v_1.Op != OpAMD64ANDQ { 21994 break 21995 } 21996 _ = v_1.Args[1] 21997 v_1_0 := v_1.Args[0] 21998 if v_1_0.Op != OpAMD64SBBQcarrymask { 21999 break 22000 } 22001 v_1_0_0 := v_1_0.Args[0] 22002 if v_1_0_0.Op != OpAMD64CMPQconst { 22003 break 22004 } 22005 if v_1_0_0.AuxInt != 64 { 22006 break 22007 } 22008 v_1_0_0_0 := v_1_0_0.Args[0] 22009 if v_1_0_0_0.Op != OpAMD64NEGQ { 22010 break 22011 } 22012 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 22013 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 22014 break 22015 } 22016 if v_1_0_0_0_0.AuxInt != -64 { 22017 break 22018 } 22019 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 22020 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 22021 break 22022 } 22023 if v_1_0_0_0_0_0.AuxInt != 63 { 22024 break 22025 } 22026 if y != v_1_0_0_0_0_0.Args[0] { 22027 break 22028 } 22029 v_1_1 := v_1.Args[1] 22030 if v_1_1.Op != OpAMD64SHRQ { 22031 break 22032 } 22033 _ = v_1_1.Args[1] 22034 if x != v_1_1.Args[0] { 22035 break 22036 } 22037 v_1_1_1 := v_1_1.Args[1] 22038 if v_1_1_1.Op != OpAMD64NEGQ { 22039 break 22040 } 22041 if y != v_1_1_1.Args[0] { 22042 break 22043 } 22044 v.reset(OpAMD64ROLQ) 22045 v.AddArg(x) 22046 v.AddArg(y) 22047 return true 22048 } 22049 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 22050 // cond: 22051 // result: (ROLQ x y) 22052 for { 22053 _ = v.Args[1] 22054 v_0 := v.Args[0] 22055 if v_0.Op != OpAMD64ANDQ { 22056 break 22057 } 22058 _ = v_0.Args[1] 22059 v_0_0 := v_0.Args[0] 22060 if v_0_0.Op != OpAMD64SHRQ { 22061 break 22062 } 22063 _ = v_0_0.Args[1] 22064 x := v_0_0.Args[0] 22065 v_0_0_1 := v_0_0.Args[1] 22066 if v_0_0_1.Op != OpAMD64NEGQ { 22067 break 22068 } 22069 y := v_0_0_1.Args[0] 22070 v_0_1 := v_0.Args[1] 22071 if v_0_1.Op != OpAMD64SBBQcarrymask { 22072 break 22073 } 22074 v_0_1_0 := v_0_1.Args[0] 22075 if v_0_1_0.Op != OpAMD64CMPQconst { 22076 break 22077 } 22078 if v_0_1_0.AuxInt != 64 { 22079 break 22080 } 22081 v_0_1_0_0 := v_0_1_0.Args[0] 22082 if v_0_1_0_0.Op != OpAMD64NEGQ { 22083 break 22084 } 22085 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 22086 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 22087 break 22088 } 22089 if v_0_1_0_0_0.AuxInt != -64 { 22090 break 22091 } 22092 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 22093 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 22094 break 22095 } 22096 if v_0_1_0_0_0_0.AuxInt != 63 { 22097 break 22098 } 22099 if y != v_0_1_0_0_0_0.Args[0] { 22100 break 22101 } 22102 v_1 := v.Args[1] 22103 if v_1.Op != OpAMD64SHLQ { 22104 break 22105 } 22106 _ = v_1.Args[1] 22107 if x != v_1.Args[0] { 22108 break 22109 } 22110 if y != v_1.Args[1] { 22111 break 22112 } 22113 v.reset(OpAMD64ROLQ) 22114 v.AddArg(x) 22115 v.AddArg(y) 22116 return true 22117 } 22118 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 22119 // cond: 22120 // result: (ROLQ x y) 22121 for { 22122 _ = v.Args[1] 22123 v_0 := v.Args[0] 22124 if v_0.Op != OpAMD64ANDQ { 22125 break 22126 } 22127 _ = v_0.Args[1] 22128 v_0_0 := v_0.Args[0] 22129 if v_0_0.Op != OpAMD64SBBQcarrymask { 22130 break 22131 } 22132 v_0_0_0 := v_0_0.Args[0] 22133 if v_0_0_0.Op != OpAMD64CMPQconst { 22134 break 22135 } 22136 if v_0_0_0.AuxInt != 64 { 22137 break 22138 } 22139 v_0_0_0_0 := v_0_0_0.Args[0] 22140 if v_0_0_0_0.Op != OpAMD64NEGQ { 22141 break 22142 } 22143 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 22144 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 22145 break 22146 } 22147 if v_0_0_0_0_0.AuxInt != -64 { 22148 break 22149 } 22150 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 22151 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 22152 break 22153 } 22154 if v_0_0_0_0_0_0.AuxInt != 63 { 22155 break 22156 } 22157 y := v_0_0_0_0_0_0.Args[0] 22158 v_0_1 := v_0.Args[1] 22159 if v_0_1.Op != OpAMD64SHRQ { 22160 break 22161 } 22162 _ = v_0_1.Args[1] 22163 x := v_0_1.Args[0] 22164 v_0_1_1 := v_0_1.Args[1] 22165 if v_0_1_1.Op != OpAMD64NEGQ { 22166 break 22167 } 22168 if y != v_0_1_1.Args[0] { 22169 break 22170 } 22171 v_1 := v.Args[1] 22172 if v_1.Op != OpAMD64SHLQ { 22173 break 22174 } 22175 _ = v_1.Args[1] 22176 if x != v_1.Args[0] { 22177 break 22178 } 22179 if y != v_1.Args[1] { 22180 break 22181 } 22182 v.reset(OpAMD64ROLQ) 22183 v.AddArg(x) 22184 v.AddArg(y) 22185 return true 22186 } 22187 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 22188 // cond: 22189 // result: (ROLQ x y) 22190 for { 22191 _ = v.Args[1] 22192 v_0 := v.Args[0] 22193 if v_0.Op != OpAMD64SHLQ { 22194 break 22195 } 22196 _ = v_0.Args[1] 22197 x := v_0.Args[0] 22198 y := v_0.Args[1] 22199 v_1 := v.Args[1] 22200 if v_1.Op != OpAMD64ANDQ { 22201 break 22202 } 22203 _ = v_1.Args[1] 22204 v_1_0 := v_1.Args[0] 22205 if v_1_0.Op != OpAMD64SHRQ { 22206 break 22207 } 22208 _ = v_1_0.Args[1] 22209 if x != v_1_0.Args[0] { 22210 break 22211 } 22212 v_1_0_1 := v_1_0.Args[1] 22213 if v_1_0_1.Op != OpAMD64NEGL { 22214 break 22215 } 22216 if y != v_1_0_1.Args[0] { 22217 break 22218 } 22219 v_1_1 := v_1.Args[1] 22220 if v_1_1.Op != OpAMD64SBBQcarrymask { 22221 break 22222 } 22223 v_1_1_0 := v_1_1.Args[0] 22224 if v_1_1_0.Op != OpAMD64CMPLconst { 22225 break 22226 } 22227 if v_1_1_0.AuxInt != 64 { 22228 break 22229 } 22230 v_1_1_0_0 := v_1_1_0.Args[0] 22231 if v_1_1_0_0.Op != OpAMD64NEGL { 22232 break 22233 } 22234 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 22235 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 22236 break 22237 } 22238 if v_1_1_0_0_0.AuxInt != -64 { 22239 break 22240 } 22241 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 22242 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 22243 break 22244 } 22245 if v_1_1_0_0_0_0.AuxInt != 63 { 22246 break 22247 } 22248 if y != v_1_1_0_0_0_0.Args[0] { 22249 break 22250 } 22251 v.reset(OpAMD64ROLQ) 22252 v.AddArg(x) 22253 v.AddArg(y) 22254 return true 22255 } 22256 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 22257 // cond: 22258 // result: (ROLQ x y) 22259 for { 22260 _ = v.Args[1] 22261 v_0 := v.Args[0] 22262 if v_0.Op != OpAMD64SHLQ { 22263 break 22264 } 22265 _ = v_0.Args[1] 22266 x := v_0.Args[0] 22267 y := v_0.Args[1] 22268 v_1 := v.Args[1] 22269 if v_1.Op != OpAMD64ANDQ { 22270 break 22271 } 22272 _ = v_1.Args[1] 22273 v_1_0 := v_1.Args[0] 22274 if v_1_0.Op != OpAMD64SBBQcarrymask { 22275 break 22276 } 22277 v_1_0_0 := v_1_0.Args[0] 22278 if v_1_0_0.Op != OpAMD64CMPLconst { 22279 break 22280 } 22281 if v_1_0_0.AuxInt != 64 { 22282 break 22283 } 22284 v_1_0_0_0 := v_1_0_0.Args[0] 22285 if v_1_0_0_0.Op != OpAMD64NEGL { 22286 break 22287 } 22288 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 22289 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 22290 break 22291 } 22292 if v_1_0_0_0_0.AuxInt != -64 { 22293 break 22294 } 22295 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 22296 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 22297 break 22298 } 22299 if v_1_0_0_0_0_0.AuxInt != 63 { 22300 break 22301 } 22302 if y != v_1_0_0_0_0_0.Args[0] { 22303 break 22304 } 22305 v_1_1 := v_1.Args[1] 22306 if v_1_1.Op != OpAMD64SHRQ { 22307 break 22308 } 22309 _ = v_1_1.Args[1] 22310 if x != v_1_1.Args[0] { 22311 break 22312 } 22313 v_1_1_1 := v_1_1.Args[1] 22314 if v_1_1_1.Op != OpAMD64NEGL { 22315 break 22316 } 22317 if y != v_1_1_1.Args[0] { 22318 break 22319 } 22320 v.reset(OpAMD64ROLQ) 22321 v.AddArg(x) 22322 v.AddArg(y) 22323 return true 22324 } 22325 return false 22326 } 22327 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 22328 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 22329 // cond: 22330 // result: (ROLQ x y) 22331 for { 22332 _ = v.Args[1] 22333 v_0 := v.Args[0] 22334 if v_0.Op != OpAMD64ANDQ { 22335 break 22336 } 22337 _ = v_0.Args[1] 22338 v_0_0 := v_0.Args[0] 22339 if v_0_0.Op != OpAMD64SHRQ { 22340 break 22341 } 22342 _ = v_0_0.Args[1] 22343 x := v_0_0.Args[0] 22344 v_0_0_1 := v_0_0.Args[1] 22345 if v_0_0_1.Op != OpAMD64NEGL { 22346 break 22347 } 22348 y := v_0_0_1.Args[0] 22349 v_0_1 := v_0.Args[1] 22350 if v_0_1.Op != OpAMD64SBBQcarrymask { 22351 break 22352 } 22353 v_0_1_0 := v_0_1.Args[0] 22354 if v_0_1_0.Op != OpAMD64CMPLconst { 22355 break 22356 } 22357 if v_0_1_0.AuxInt != 64 { 22358 break 22359 } 22360 v_0_1_0_0 := v_0_1_0.Args[0] 22361 if v_0_1_0_0.Op != OpAMD64NEGL { 22362 break 22363 } 22364 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 22365 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 22366 break 22367 } 22368 if v_0_1_0_0_0.AuxInt != -64 { 22369 break 22370 } 22371 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 22372 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 22373 break 22374 } 22375 if v_0_1_0_0_0_0.AuxInt != 63 { 22376 break 22377 } 22378 if y != v_0_1_0_0_0_0.Args[0] { 22379 break 22380 } 22381 v_1 := v.Args[1] 22382 if v_1.Op != OpAMD64SHLQ { 22383 break 22384 } 22385 _ = v_1.Args[1] 22386 if x != v_1.Args[0] { 22387 break 22388 } 22389 if y != v_1.Args[1] { 22390 break 22391 } 22392 v.reset(OpAMD64ROLQ) 22393 v.AddArg(x) 22394 v.AddArg(y) 22395 return true 22396 } 22397 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 22398 // cond: 22399 // result: (ROLQ x y) 22400 for { 22401 _ = v.Args[1] 22402 v_0 := v.Args[0] 22403 if v_0.Op != OpAMD64ANDQ { 22404 break 22405 } 22406 _ = v_0.Args[1] 22407 v_0_0 := v_0.Args[0] 22408 if v_0_0.Op != OpAMD64SBBQcarrymask { 22409 break 22410 } 22411 v_0_0_0 := v_0_0.Args[0] 22412 if v_0_0_0.Op != OpAMD64CMPLconst { 22413 break 22414 } 22415 if v_0_0_0.AuxInt != 64 { 22416 break 22417 } 22418 v_0_0_0_0 := v_0_0_0.Args[0] 22419 if v_0_0_0_0.Op != OpAMD64NEGL { 22420 break 22421 } 22422 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 22423 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 22424 break 22425 } 22426 if v_0_0_0_0_0.AuxInt != -64 { 22427 break 22428 } 22429 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 22430 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 22431 break 22432 } 22433 if v_0_0_0_0_0_0.AuxInt != 63 { 22434 break 22435 } 22436 y := v_0_0_0_0_0_0.Args[0] 22437 v_0_1 := v_0.Args[1] 22438 if v_0_1.Op != OpAMD64SHRQ { 22439 break 22440 } 22441 _ = v_0_1.Args[1] 22442 x := v_0_1.Args[0] 22443 v_0_1_1 := v_0_1.Args[1] 22444 if v_0_1_1.Op != OpAMD64NEGL { 22445 break 22446 } 22447 if y != v_0_1_1.Args[0] { 22448 break 22449 } 22450 v_1 := v.Args[1] 22451 if v_1.Op != OpAMD64SHLQ { 22452 break 22453 } 22454 _ = v_1.Args[1] 22455 if x != v_1.Args[0] { 22456 break 22457 } 22458 if y != v_1.Args[1] { 22459 break 22460 } 22461 v.reset(OpAMD64ROLQ) 22462 v.AddArg(x) 22463 v.AddArg(y) 22464 return true 22465 } 22466 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 22467 // cond: 22468 // result: (RORQ x y) 22469 for { 22470 _ = v.Args[1] 22471 v_0 := v.Args[0] 22472 if v_0.Op != OpAMD64SHRQ { 22473 break 22474 } 22475 _ = v_0.Args[1] 22476 x := v_0.Args[0] 22477 y := v_0.Args[1] 22478 v_1 := v.Args[1] 22479 if v_1.Op != OpAMD64ANDQ { 22480 break 22481 } 22482 _ = v_1.Args[1] 22483 v_1_0 := v_1.Args[0] 22484 if v_1_0.Op != OpAMD64SHLQ { 22485 break 22486 } 22487 _ = v_1_0.Args[1] 22488 if x != v_1_0.Args[0] { 22489 break 22490 } 22491 v_1_0_1 := v_1_0.Args[1] 22492 if v_1_0_1.Op != OpAMD64NEGQ { 22493 break 22494 } 22495 if y != v_1_0_1.Args[0] { 22496 break 22497 } 22498 v_1_1 := v_1.Args[1] 22499 if v_1_1.Op != OpAMD64SBBQcarrymask { 22500 break 22501 } 22502 v_1_1_0 := v_1_1.Args[0] 22503 if v_1_1_0.Op != OpAMD64CMPQconst { 22504 break 22505 } 22506 if v_1_1_0.AuxInt != 64 { 22507 break 22508 } 22509 v_1_1_0_0 := v_1_1_0.Args[0] 22510 if v_1_1_0_0.Op != OpAMD64NEGQ { 22511 break 22512 } 22513 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 22514 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 22515 break 22516 } 22517 if v_1_1_0_0_0.AuxInt != -64 { 22518 break 22519 } 22520 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 22521 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 22522 break 22523 } 22524 if v_1_1_0_0_0_0.AuxInt != 63 { 22525 break 22526 } 22527 if y != v_1_1_0_0_0_0.Args[0] { 22528 break 22529 } 22530 v.reset(OpAMD64RORQ) 22531 v.AddArg(x) 22532 v.AddArg(y) 22533 return true 22534 } 22535 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 22536 // cond: 22537 // result: (RORQ x y) 22538 for { 22539 _ = v.Args[1] 22540 v_0 := v.Args[0] 22541 if v_0.Op != OpAMD64SHRQ { 22542 break 22543 } 22544 _ = v_0.Args[1] 22545 x := v_0.Args[0] 22546 y := v_0.Args[1] 22547 v_1 := v.Args[1] 22548 if v_1.Op != OpAMD64ANDQ { 22549 break 22550 } 22551 _ = v_1.Args[1] 22552 v_1_0 := v_1.Args[0] 22553 if v_1_0.Op != OpAMD64SBBQcarrymask { 22554 break 22555 } 22556 v_1_0_0 := v_1_0.Args[0] 22557 if v_1_0_0.Op != OpAMD64CMPQconst { 22558 break 22559 } 22560 if v_1_0_0.AuxInt != 64 { 22561 break 22562 } 22563 v_1_0_0_0 := v_1_0_0.Args[0] 22564 if v_1_0_0_0.Op != OpAMD64NEGQ { 22565 break 22566 } 22567 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 22568 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 22569 break 22570 } 22571 if v_1_0_0_0_0.AuxInt != -64 { 22572 break 22573 } 22574 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 22575 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 22576 break 22577 } 22578 if v_1_0_0_0_0_0.AuxInt != 63 { 22579 break 22580 } 22581 if y != v_1_0_0_0_0_0.Args[0] { 22582 break 22583 } 22584 v_1_1 := v_1.Args[1] 22585 if v_1_1.Op != OpAMD64SHLQ { 22586 break 22587 } 22588 _ = v_1_1.Args[1] 22589 if x != v_1_1.Args[0] { 22590 break 22591 } 22592 v_1_1_1 := v_1_1.Args[1] 22593 if v_1_1_1.Op != OpAMD64NEGQ { 22594 break 22595 } 22596 if y != v_1_1_1.Args[0] { 22597 break 22598 } 22599 v.reset(OpAMD64RORQ) 22600 v.AddArg(x) 22601 v.AddArg(y) 22602 return true 22603 } 22604 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 22605 // cond: 22606 // result: (RORQ x y) 22607 for { 22608 _ = v.Args[1] 22609 v_0 := v.Args[0] 22610 if v_0.Op != OpAMD64ANDQ { 22611 break 22612 } 22613 _ = v_0.Args[1] 22614 v_0_0 := v_0.Args[0] 22615 if v_0_0.Op != OpAMD64SHLQ { 22616 break 22617 } 22618 _ = v_0_0.Args[1] 22619 x := v_0_0.Args[0] 22620 v_0_0_1 := v_0_0.Args[1] 22621 if v_0_0_1.Op != OpAMD64NEGQ { 22622 break 22623 } 22624 y := v_0_0_1.Args[0] 22625 v_0_1 := v_0.Args[1] 22626 if v_0_1.Op != OpAMD64SBBQcarrymask { 22627 break 22628 } 22629 v_0_1_0 := v_0_1.Args[0] 22630 if v_0_1_0.Op != OpAMD64CMPQconst { 22631 break 22632 } 22633 if v_0_1_0.AuxInt != 64 { 22634 break 22635 } 22636 v_0_1_0_0 := v_0_1_0.Args[0] 22637 if v_0_1_0_0.Op != OpAMD64NEGQ { 22638 break 22639 } 22640 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 22641 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 22642 break 22643 } 22644 if v_0_1_0_0_0.AuxInt != -64 { 22645 break 22646 } 22647 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 22648 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 22649 break 22650 } 22651 if v_0_1_0_0_0_0.AuxInt != 63 { 22652 break 22653 } 22654 if y != v_0_1_0_0_0_0.Args[0] { 22655 break 22656 } 22657 v_1 := v.Args[1] 22658 if v_1.Op != OpAMD64SHRQ { 22659 break 22660 } 22661 _ = v_1.Args[1] 22662 if x != v_1.Args[0] { 22663 break 22664 } 22665 if y != v_1.Args[1] { 22666 break 22667 } 22668 v.reset(OpAMD64RORQ) 22669 v.AddArg(x) 22670 v.AddArg(y) 22671 return true 22672 } 22673 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 22674 // cond: 22675 // result: (RORQ x y) 22676 for { 22677 _ = v.Args[1] 22678 v_0 := v.Args[0] 22679 if v_0.Op != OpAMD64ANDQ { 22680 break 22681 } 22682 _ = v_0.Args[1] 22683 v_0_0 := v_0.Args[0] 22684 if v_0_0.Op != OpAMD64SBBQcarrymask { 22685 break 22686 } 22687 v_0_0_0 := v_0_0.Args[0] 22688 if v_0_0_0.Op != OpAMD64CMPQconst { 22689 break 22690 } 22691 if v_0_0_0.AuxInt != 64 { 22692 break 22693 } 22694 v_0_0_0_0 := v_0_0_0.Args[0] 22695 if v_0_0_0_0.Op != OpAMD64NEGQ { 22696 break 22697 } 22698 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 22699 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 22700 break 22701 } 22702 if v_0_0_0_0_0.AuxInt != -64 { 22703 break 22704 } 22705 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 22706 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 22707 break 22708 } 22709 if v_0_0_0_0_0_0.AuxInt != 63 { 22710 break 22711 } 22712 y := v_0_0_0_0_0_0.Args[0] 22713 v_0_1 := v_0.Args[1] 22714 if v_0_1.Op != OpAMD64SHLQ { 22715 break 22716 } 22717 _ = v_0_1.Args[1] 22718 x := v_0_1.Args[0] 22719 v_0_1_1 := v_0_1.Args[1] 22720 if v_0_1_1.Op != OpAMD64NEGQ { 22721 break 22722 } 22723 if y != v_0_1_1.Args[0] { 22724 break 22725 } 22726 v_1 := v.Args[1] 22727 if v_1.Op != OpAMD64SHRQ { 22728 break 22729 } 22730 _ = v_1.Args[1] 22731 if x != v_1.Args[0] { 22732 break 22733 } 22734 if y != v_1.Args[1] { 22735 break 22736 } 22737 v.reset(OpAMD64RORQ) 22738 v.AddArg(x) 22739 v.AddArg(y) 22740 return true 22741 } 22742 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 22743 // cond: 22744 // result: (RORQ x y) 22745 for { 22746 _ = v.Args[1] 22747 v_0 := v.Args[0] 22748 if v_0.Op != OpAMD64SHRQ { 22749 break 22750 } 22751 _ = v_0.Args[1] 22752 x := v_0.Args[0] 22753 y := v_0.Args[1] 22754 v_1 := v.Args[1] 22755 if v_1.Op != OpAMD64ANDQ { 22756 break 22757 } 22758 _ = v_1.Args[1] 22759 v_1_0 := v_1.Args[0] 22760 if v_1_0.Op != OpAMD64SHLQ { 22761 break 22762 } 22763 _ = v_1_0.Args[1] 22764 if x != v_1_0.Args[0] { 22765 break 22766 } 22767 v_1_0_1 := v_1_0.Args[1] 22768 if v_1_0_1.Op != OpAMD64NEGL { 22769 break 22770 } 22771 if y != v_1_0_1.Args[0] { 22772 break 22773 } 22774 v_1_1 := v_1.Args[1] 22775 if v_1_1.Op != OpAMD64SBBQcarrymask { 22776 break 22777 } 22778 v_1_1_0 := v_1_1.Args[0] 22779 if v_1_1_0.Op != OpAMD64CMPLconst { 22780 break 22781 } 22782 if v_1_1_0.AuxInt != 64 { 22783 break 22784 } 22785 v_1_1_0_0 := v_1_1_0.Args[0] 22786 if v_1_1_0_0.Op != OpAMD64NEGL { 22787 break 22788 } 22789 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 22790 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 22791 break 22792 } 22793 if v_1_1_0_0_0.AuxInt != -64 { 22794 break 22795 } 22796 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 22797 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 22798 break 22799 } 22800 if v_1_1_0_0_0_0.AuxInt != 63 { 22801 break 22802 } 22803 if y != v_1_1_0_0_0_0.Args[0] { 22804 break 22805 } 22806 v.reset(OpAMD64RORQ) 22807 v.AddArg(x) 22808 v.AddArg(y) 22809 return true 22810 } 22811 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 22812 // cond: 22813 // result: (RORQ x y) 22814 for { 22815 _ = v.Args[1] 22816 v_0 := v.Args[0] 22817 if v_0.Op != OpAMD64SHRQ { 22818 break 22819 } 22820 _ = v_0.Args[1] 22821 x := v_0.Args[0] 22822 y := v_0.Args[1] 22823 v_1 := v.Args[1] 22824 if v_1.Op != OpAMD64ANDQ { 22825 break 22826 } 22827 _ = v_1.Args[1] 22828 v_1_0 := v_1.Args[0] 22829 if v_1_0.Op != OpAMD64SBBQcarrymask { 22830 break 22831 } 22832 v_1_0_0 := v_1_0.Args[0] 22833 if v_1_0_0.Op != OpAMD64CMPLconst { 22834 break 22835 } 22836 if v_1_0_0.AuxInt != 64 { 22837 break 22838 } 22839 v_1_0_0_0 := v_1_0_0.Args[0] 22840 if v_1_0_0_0.Op != OpAMD64NEGL { 22841 break 22842 } 22843 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 22844 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 22845 break 22846 } 22847 if v_1_0_0_0_0.AuxInt != -64 { 22848 break 22849 } 22850 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 22851 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 22852 break 22853 } 22854 if v_1_0_0_0_0_0.AuxInt != 63 { 22855 break 22856 } 22857 if y != v_1_0_0_0_0_0.Args[0] { 22858 break 22859 } 22860 v_1_1 := v_1.Args[1] 22861 if v_1_1.Op != OpAMD64SHLQ { 22862 break 22863 } 22864 _ = v_1_1.Args[1] 22865 if x != v_1_1.Args[0] { 22866 break 22867 } 22868 v_1_1_1 := v_1_1.Args[1] 22869 if v_1_1_1.Op != OpAMD64NEGL { 22870 break 22871 } 22872 if y != v_1_1_1.Args[0] { 22873 break 22874 } 22875 v.reset(OpAMD64RORQ) 22876 v.AddArg(x) 22877 v.AddArg(y) 22878 return true 22879 } 22880 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 22881 // cond: 22882 // result: (RORQ x y) 22883 for { 22884 _ = v.Args[1] 22885 v_0 := v.Args[0] 22886 if v_0.Op != OpAMD64ANDQ { 22887 break 22888 } 22889 _ = v_0.Args[1] 22890 v_0_0 := v_0.Args[0] 22891 if v_0_0.Op != OpAMD64SHLQ { 22892 break 22893 } 22894 _ = v_0_0.Args[1] 22895 x := v_0_0.Args[0] 22896 v_0_0_1 := v_0_0.Args[1] 22897 if v_0_0_1.Op != OpAMD64NEGL { 22898 break 22899 } 22900 y := v_0_0_1.Args[0] 22901 v_0_1 := v_0.Args[1] 22902 if v_0_1.Op != OpAMD64SBBQcarrymask { 22903 break 22904 } 22905 v_0_1_0 := v_0_1.Args[0] 22906 if v_0_1_0.Op != OpAMD64CMPLconst { 22907 break 22908 } 22909 if v_0_1_0.AuxInt != 64 { 22910 break 22911 } 22912 v_0_1_0_0 := v_0_1_0.Args[0] 22913 if v_0_1_0_0.Op != OpAMD64NEGL { 22914 break 22915 } 22916 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 22917 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 22918 break 22919 } 22920 if v_0_1_0_0_0.AuxInt != -64 { 22921 break 22922 } 22923 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 22924 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 22925 break 22926 } 22927 if v_0_1_0_0_0_0.AuxInt != 63 { 22928 break 22929 } 22930 if y != v_0_1_0_0_0_0.Args[0] { 22931 break 22932 } 22933 v_1 := v.Args[1] 22934 if v_1.Op != OpAMD64SHRQ { 22935 break 22936 } 22937 _ = v_1.Args[1] 22938 if x != v_1.Args[0] { 22939 break 22940 } 22941 if y != v_1.Args[1] { 22942 break 22943 } 22944 v.reset(OpAMD64RORQ) 22945 v.AddArg(x) 22946 v.AddArg(y) 22947 return true 22948 } 22949 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 22950 // cond: 22951 // result: (RORQ x y) 22952 for { 22953 _ = v.Args[1] 22954 v_0 := v.Args[0] 22955 if v_0.Op != OpAMD64ANDQ { 22956 break 22957 } 22958 _ = v_0.Args[1] 22959 v_0_0 := v_0.Args[0] 22960 if v_0_0.Op != OpAMD64SBBQcarrymask { 22961 break 22962 } 22963 v_0_0_0 := v_0_0.Args[0] 22964 if v_0_0_0.Op != OpAMD64CMPLconst { 22965 break 22966 } 22967 if v_0_0_0.AuxInt != 64 { 22968 break 22969 } 22970 v_0_0_0_0 := v_0_0_0.Args[0] 22971 if v_0_0_0_0.Op != OpAMD64NEGL { 22972 break 22973 } 22974 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 22975 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 22976 break 22977 } 22978 if v_0_0_0_0_0.AuxInt != -64 { 22979 break 22980 } 22981 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 22982 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 22983 break 22984 } 22985 if v_0_0_0_0_0_0.AuxInt != 63 { 22986 break 22987 } 22988 y := v_0_0_0_0_0_0.Args[0] 22989 v_0_1 := v_0.Args[1] 22990 if v_0_1.Op != OpAMD64SHLQ { 22991 break 22992 } 22993 _ = v_0_1.Args[1] 22994 x := v_0_1.Args[0] 22995 v_0_1_1 := v_0_1.Args[1] 22996 if v_0_1_1.Op != OpAMD64NEGL { 22997 break 22998 } 22999 if y != v_0_1_1.Args[0] { 23000 break 23001 } 23002 v_1 := v.Args[1] 23003 if v_1.Op != OpAMD64SHRQ { 23004 break 23005 } 23006 _ = v_1.Args[1] 23007 if x != v_1.Args[0] { 23008 break 23009 } 23010 if y != v_1.Args[1] { 23011 break 23012 } 23013 v.reset(OpAMD64RORQ) 23014 v.AddArg(x) 23015 v.AddArg(y) 23016 return true 23017 } 23018 return false 23019 } 23020 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 23021 b := v.Block 23022 _ = b 23023 typ := &b.Func.Config.Types 23024 _ = typ 23025 // match: (ORQ x x) 23026 // cond: 23027 // result: x 23028 for { 23029 _ = v.Args[1] 23030 x := v.Args[0] 23031 if x != v.Args[1] { 23032 break 23033 } 23034 v.reset(OpCopy) 23035 v.Type = x.Type 23036 v.AddArg(x) 23037 return true 23038 } 23039 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 23040 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23041 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 23042 for { 23043 _ = v.Args[1] 23044 x0 := v.Args[0] 23045 if x0.Op != OpAMD64MOVBload { 23046 break 23047 } 23048 i0 := x0.AuxInt 23049 s := x0.Aux 23050 _ = x0.Args[1] 23051 p := x0.Args[0] 23052 mem := x0.Args[1] 23053 sh := v.Args[1] 23054 if sh.Op != OpAMD64SHLQconst { 23055 break 23056 } 23057 if sh.AuxInt != 8 { 23058 break 23059 } 23060 x1 := sh.Args[0] 23061 if x1.Op != OpAMD64MOVBload { 23062 break 23063 } 23064 i1 := x1.AuxInt 23065 if x1.Aux != s { 23066 break 23067 } 23068 _ = x1.Args[1] 23069 if p != x1.Args[0] { 23070 break 23071 } 23072 if mem != x1.Args[1] { 23073 break 23074 } 23075 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23076 break 23077 } 23078 b = mergePoint(b, x0, x1) 23079 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23080 v.reset(OpCopy) 23081 v.AddArg(v0) 23082 v0.AuxInt = i0 23083 v0.Aux = s 23084 v0.AddArg(p) 23085 v0.AddArg(mem) 23086 return true 23087 } 23088 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 23089 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23090 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 23091 for { 23092 _ = v.Args[1] 23093 sh := v.Args[0] 23094 if sh.Op != OpAMD64SHLQconst { 23095 break 23096 } 23097 if sh.AuxInt != 8 { 23098 break 23099 } 23100 x1 := sh.Args[0] 23101 if x1.Op != OpAMD64MOVBload { 23102 break 23103 } 23104 i1 := x1.AuxInt 23105 s := x1.Aux 23106 _ = x1.Args[1] 23107 p := x1.Args[0] 23108 mem := x1.Args[1] 23109 x0 := v.Args[1] 23110 if x0.Op != OpAMD64MOVBload { 23111 break 23112 } 23113 i0 := x0.AuxInt 23114 if x0.Aux != s { 23115 break 23116 } 23117 _ = x0.Args[1] 23118 if p != x0.Args[0] { 23119 break 23120 } 23121 if mem != x0.Args[1] { 23122 break 23123 } 23124 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23125 break 23126 } 23127 b = mergePoint(b, x0, x1) 23128 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23129 v.reset(OpCopy) 23130 v.AddArg(v0) 23131 v0.AuxInt = i0 23132 v0.Aux = s 23133 v0.AddArg(p) 23134 v0.AddArg(mem) 23135 return true 23136 } 23137 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 23138 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23139 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 23140 for { 23141 _ = v.Args[1] 23142 x0 := v.Args[0] 23143 if x0.Op != OpAMD64MOVWload { 23144 break 23145 } 23146 i0 := x0.AuxInt 23147 s := x0.Aux 23148 _ = x0.Args[1] 23149 p := x0.Args[0] 23150 mem := x0.Args[1] 23151 sh := v.Args[1] 23152 if sh.Op != OpAMD64SHLQconst { 23153 break 23154 } 23155 if sh.AuxInt != 16 { 23156 break 23157 } 23158 x1 := sh.Args[0] 23159 if x1.Op != OpAMD64MOVWload { 23160 break 23161 } 23162 i1 := x1.AuxInt 23163 if x1.Aux != s { 23164 break 23165 } 23166 _ = x1.Args[1] 23167 if p != x1.Args[0] { 23168 break 23169 } 23170 if mem != x1.Args[1] { 23171 break 23172 } 23173 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23174 break 23175 } 23176 b = mergePoint(b, x0, x1) 23177 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23178 v.reset(OpCopy) 23179 v.AddArg(v0) 23180 v0.AuxInt = i0 23181 v0.Aux = s 23182 v0.AddArg(p) 23183 v0.AddArg(mem) 23184 return true 23185 } 23186 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 23187 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23188 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 23189 for { 23190 _ = v.Args[1] 23191 sh := v.Args[0] 23192 if sh.Op != OpAMD64SHLQconst { 23193 break 23194 } 23195 if sh.AuxInt != 16 { 23196 break 23197 } 23198 x1 := sh.Args[0] 23199 if x1.Op != OpAMD64MOVWload { 23200 break 23201 } 23202 i1 := x1.AuxInt 23203 s := x1.Aux 23204 _ = x1.Args[1] 23205 p := x1.Args[0] 23206 mem := x1.Args[1] 23207 x0 := v.Args[1] 23208 if x0.Op != OpAMD64MOVWload { 23209 break 23210 } 23211 i0 := x0.AuxInt 23212 if x0.Aux != s { 23213 break 23214 } 23215 _ = x0.Args[1] 23216 if p != x0.Args[0] { 23217 break 23218 } 23219 if mem != x0.Args[1] { 23220 break 23221 } 23222 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23223 break 23224 } 23225 b = mergePoint(b, x0, x1) 23226 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23227 v.reset(OpCopy) 23228 v.AddArg(v0) 23229 v0.AuxInt = i0 23230 v0.Aux = s 23231 v0.AddArg(p) 23232 v0.AddArg(mem) 23233 return true 23234 } 23235 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 23236 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23237 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 23238 for { 23239 _ = v.Args[1] 23240 x0 := v.Args[0] 23241 if x0.Op != OpAMD64MOVLload { 23242 break 23243 } 23244 i0 := x0.AuxInt 23245 s := x0.Aux 23246 _ = x0.Args[1] 23247 p := x0.Args[0] 23248 mem := x0.Args[1] 23249 sh := v.Args[1] 23250 if sh.Op != OpAMD64SHLQconst { 23251 break 23252 } 23253 if sh.AuxInt != 32 { 23254 break 23255 } 23256 x1 := sh.Args[0] 23257 if x1.Op != OpAMD64MOVLload { 23258 break 23259 } 23260 i1 := x1.AuxInt 23261 if x1.Aux != s { 23262 break 23263 } 23264 _ = x1.Args[1] 23265 if p != x1.Args[0] { 23266 break 23267 } 23268 if mem != x1.Args[1] { 23269 break 23270 } 23271 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23272 break 23273 } 23274 b = mergePoint(b, x0, x1) 23275 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 23276 v.reset(OpCopy) 23277 v.AddArg(v0) 23278 v0.AuxInt = i0 23279 v0.Aux = s 23280 v0.AddArg(p) 23281 v0.AddArg(mem) 23282 return true 23283 } 23284 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 23285 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23286 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 23287 for { 23288 _ = v.Args[1] 23289 sh := v.Args[0] 23290 if sh.Op != OpAMD64SHLQconst { 23291 break 23292 } 23293 if sh.AuxInt != 32 { 23294 break 23295 } 23296 x1 := sh.Args[0] 23297 if x1.Op != OpAMD64MOVLload { 23298 break 23299 } 23300 i1 := x1.AuxInt 23301 s := x1.Aux 23302 _ = x1.Args[1] 23303 p := x1.Args[0] 23304 mem := x1.Args[1] 23305 x0 := v.Args[1] 23306 if x0.Op != OpAMD64MOVLload { 23307 break 23308 } 23309 i0 := x0.AuxInt 23310 if x0.Aux != s { 23311 break 23312 } 23313 _ = x0.Args[1] 23314 if p != x0.Args[0] { 23315 break 23316 } 23317 if mem != x0.Args[1] { 23318 break 23319 } 23320 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23321 break 23322 } 23323 b = mergePoint(b, x0, x1) 23324 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 23325 v.reset(OpCopy) 23326 v.AddArg(v0) 23327 v0.AuxInt = i0 23328 v0.Aux = s 23329 v0.AddArg(p) 23330 v0.AddArg(mem) 23331 return true 23332 } 23333 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 23334 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23335 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23336 for { 23337 _ = v.Args[1] 23338 s1 := v.Args[0] 23339 if s1.Op != OpAMD64SHLQconst { 23340 break 23341 } 23342 j1 := s1.AuxInt 23343 x1 := s1.Args[0] 23344 if x1.Op != OpAMD64MOVBload { 23345 break 23346 } 23347 i1 := x1.AuxInt 23348 s := x1.Aux 23349 _ = x1.Args[1] 23350 p := x1.Args[0] 23351 mem := x1.Args[1] 23352 or := v.Args[1] 23353 if or.Op != OpAMD64ORQ { 23354 break 23355 } 23356 _ = or.Args[1] 23357 s0 := or.Args[0] 23358 if s0.Op != OpAMD64SHLQconst { 23359 break 23360 } 23361 j0 := s0.AuxInt 23362 x0 := s0.Args[0] 23363 if x0.Op != OpAMD64MOVBload { 23364 break 23365 } 23366 i0 := x0.AuxInt 23367 if x0.Aux != s { 23368 break 23369 } 23370 _ = x0.Args[1] 23371 if p != x0.Args[0] { 23372 break 23373 } 23374 if mem != x0.Args[1] { 23375 break 23376 } 23377 y := or.Args[1] 23378 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23379 break 23380 } 23381 b = mergePoint(b, x0, x1) 23382 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23383 v.reset(OpCopy) 23384 v.AddArg(v0) 23385 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23386 v1.AuxInt = j0 23387 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23388 v2.AuxInt = i0 23389 v2.Aux = s 23390 v2.AddArg(p) 23391 v2.AddArg(mem) 23392 v1.AddArg(v2) 23393 v0.AddArg(v1) 23394 v0.AddArg(y) 23395 return true 23396 } 23397 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 23398 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23399 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23400 for { 23401 _ = v.Args[1] 23402 s1 := v.Args[0] 23403 if s1.Op != OpAMD64SHLQconst { 23404 break 23405 } 23406 j1 := s1.AuxInt 23407 x1 := s1.Args[0] 23408 if x1.Op != OpAMD64MOVBload { 23409 break 23410 } 23411 i1 := x1.AuxInt 23412 s := x1.Aux 23413 _ = x1.Args[1] 23414 p := x1.Args[0] 23415 mem := x1.Args[1] 23416 or := v.Args[1] 23417 if or.Op != OpAMD64ORQ { 23418 break 23419 } 23420 _ = or.Args[1] 23421 y := or.Args[0] 23422 s0 := or.Args[1] 23423 if s0.Op != OpAMD64SHLQconst { 23424 break 23425 } 23426 j0 := s0.AuxInt 23427 x0 := s0.Args[0] 23428 if x0.Op != OpAMD64MOVBload { 23429 break 23430 } 23431 i0 := x0.AuxInt 23432 if x0.Aux != s { 23433 break 23434 } 23435 _ = x0.Args[1] 23436 if p != x0.Args[0] { 23437 break 23438 } 23439 if mem != x0.Args[1] { 23440 break 23441 } 23442 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23443 break 23444 } 23445 b = mergePoint(b, x0, x1) 23446 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23447 v.reset(OpCopy) 23448 v.AddArg(v0) 23449 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23450 v1.AuxInt = j0 23451 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23452 v2.AuxInt = i0 23453 v2.Aux = s 23454 v2.AddArg(p) 23455 v2.AddArg(mem) 23456 v1.AddArg(v2) 23457 v0.AddArg(v1) 23458 v0.AddArg(y) 23459 return true 23460 } 23461 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 23462 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23463 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23464 for { 23465 _ = v.Args[1] 23466 or := v.Args[0] 23467 if or.Op != OpAMD64ORQ { 23468 break 23469 } 23470 _ = or.Args[1] 23471 s0 := or.Args[0] 23472 if s0.Op != OpAMD64SHLQconst { 23473 break 23474 } 23475 j0 := s0.AuxInt 23476 x0 := s0.Args[0] 23477 if x0.Op != OpAMD64MOVBload { 23478 break 23479 } 23480 i0 := x0.AuxInt 23481 s := x0.Aux 23482 _ = x0.Args[1] 23483 p := x0.Args[0] 23484 mem := x0.Args[1] 23485 y := or.Args[1] 23486 s1 := v.Args[1] 23487 if s1.Op != OpAMD64SHLQconst { 23488 break 23489 } 23490 j1 := s1.AuxInt 23491 x1 := s1.Args[0] 23492 if x1.Op != OpAMD64MOVBload { 23493 break 23494 } 23495 i1 := x1.AuxInt 23496 if x1.Aux != s { 23497 break 23498 } 23499 _ = x1.Args[1] 23500 if p != x1.Args[0] { 23501 break 23502 } 23503 if mem != x1.Args[1] { 23504 break 23505 } 23506 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23507 break 23508 } 23509 b = mergePoint(b, x0, x1) 23510 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23511 v.reset(OpCopy) 23512 v.AddArg(v0) 23513 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23514 v1.AuxInt = j0 23515 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23516 v2.AuxInt = i0 23517 v2.Aux = s 23518 v2.AddArg(p) 23519 v2.AddArg(mem) 23520 v1.AddArg(v2) 23521 v0.AddArg(v1) 23522 v0.AddArg(y) 23523 return true 23524 } 23525 return false 23526 } 23527 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 23528 b := v.Block 23529 _ = b 23530 typ := &b.Func.Config.Types 23531 _ = typ 23532 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 23533 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23534 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23535 for { 23536 _ = v.Args[1] 23537 or := v.Args[0] 23538 if or.Op != OpAMD64ORQ { 23539 break 23540 } 23541 _ = or.Args[1] 23542 y := or.Args[0] 23543 s0 := or.Args[1] 23544 if s0.Op != OpAMD64SHLQconst { 23545 break 23546 } 23547 j0 := s0.AuxInt 23548 x0 := s0.Args[0] 23549 if x0.Op != OpAMD64MOVBload { 23550 break 23551 } 23552 i0 := x0.AuxInt 23553 s := x0.Aux 23554 _ = x0.Args[1] 23555 p := x0.Args[0] 23556 mem := x0.Args[1] 23557 s1 := v.Args[1] 23558 if s1.Op != OpAMD64SHLQconst { 23559 break 23560 } 23561 j1 := s1.AuxInt 23562 x1 := s1.Args[0] 23563 if x1.Op != OpAMD64MOVBload { 23564 break 23565 } 23566 i1 := x1.AuxInt 23567 if x1.Aux != s { 23568 break 23569 } 23570 _ = x1.Args[1] 23571 if p != x1.Args[0] { 23572 break 23573 } 23574 if mem != x1.Args[1] { 23575 break 23576 } 23577 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23578 break 23579 } 23580 b = mergePoint(b, x0, x1) 23581 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23582 v.reset(OpCopy) 23583 v.AddArg(v0) 23584 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23585 v1.AuxInt = j0 23586 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23587 v2.AuxInt = i0 23588 v2.Aux = s 23589 v2.AddArg(p) 23590 v2.AddArg(mem) 23591 v1.AddArg(v2) 23592 v0.AddArg(v1) 23593 v0.AddArg(y) 23594 return true 23595 } 23596 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 23597 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23598 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23599 for { 23600 _ = v.Args[1] 23601 s1 := v.Args[0] 23602 if s1.Op != OpAMD64SHLQconst { 23603 break 23604 } 23605 j1 := s1.AuxInt 23606 x1 := s1.Args[0] 23607 if x1.Op != OpAMD64MOVWload { 23608 break 23609 } 23610 i1 := x1.AuxInt 23611 s := x1.Aux 23612 _ = x1.Args[1] 23613 p := x1.Args[0] 23614 mem := x1.Args[1] 23615 or := v.Args[1] 23616 if or.Op != OpAMD64ORQ { 23617 break 23618 } 23619 _ = or.Args[1] 23620 s0 := or.Args[0] 23621 if s0.Op != OpAMD64SHLQconst { 23622 break 23623 } 23624 j0 := s0.AuxInt 23625 x0 := s0.Args[0] 23626 if x0.Op != OpAMD64MOVWload { 23627 break 23628 } 23629 i0 := x0.AuxInt 23630 if x0.Aux != s { 23631 break 23632 } 23633 _ = x0.Args[1] 23634 if p != x0.Args[0] { 23635 break 23636 } 23637 if mem != x0.Args[1] { 23638 break 23639 } 23640 y := or.Args[1] 23641 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23642 break 23643 } 23644 b = mergePoint(b, x0, x1) 23645 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23646 v.reset(OpCopy) 23647 v.AddArg(v0) 23648 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23649 v1.AuxInt = j0 23650 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23651 v2.AuxInt = i0 23652 v2.Aux = s 23653 v2.AddArg(p) 23654 v2.AddArg(mem) 23655 v1.AddArg(v2) 23656 v0.AddArg(v1) 23657 v0.AddArg(y) 23658 return true 23659 } 23660 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 23661 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23662 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23663 for { 23664 _ = v.Args[1] 23665 s1 := v.Args[0] 23666 if s1.Op != OpAMD64SHLQconst { 23667 break 23668 } 23669 j1 := s1.AuxInt 23670 x1 := s1.Args[0] 23671 if x1.Op != OpAMD64MOVWload { 23672 break 23673 } 23674 i1 := x1.AuxInt 23675 s := x1.Aux 23676 _ = x1.Args[1] 23677 p := x1.Args[0] 23678 mem := x1.Args[1] 23679 or := v.Args[1] 23680 if or.Op != OpAMD64ORQ { 23681 break 23682 } 23683 _ = or.Args[1] 23684 y := or.Args[0] 23685 s0 := or.Args[1] 23686 if s0.Op != OpAMD64SHLQconst { 23687 break 23688 } 23689 j0 := s0.AuxInt 23690 x0 := s0.Args[0] 23691 if x0.Op != OpAMD64MOVWload { 23692 break 23693 } 23694 i0 := x0.AuxInt 23695 if x0.Aux != s { 23696 break 23697 } 23698 _ = x0.Args[1] 23699 if p != x0.Args[0] { 23700 break 23701 } 23702 if mem != x0.Args[1] { 23703 break 23704 } 23705 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23706 break 23707 } 23708 b = mergePoint(b, x0, x1) 23709 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23710 v.reset(OpCopy) 23711 v.AddArg(v0) 23712 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23713 v1.AuxInt = j0 23714 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23715 v2.AuxInt = i0 23716 v2.Aux = s 23717 v2.AddArg(p) 23718 v2.AddArg(mem) 23719 v1.AddArg(v2) 23720 v0.AddArg(v1) 23721 v0.AddArg(y) 23722 return true 23723 } 23724 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 23725 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23726 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23727 for { 23728 _ = v.Args[1] 23729 or := v.Args[0] 23730 if or.Op != OpAMD64ORQ { 23731 break 23732 } 23733 _ = or.Args[1] 23734 s0 := or.Args[0] 23735 if s0.Op != OpAMD64SHLQconst { 23736 break 23737 } 23738 j0 := s0.AuxInt 23739 x0 := s0.Args[0] 23740 if x0.Op != OpAMD64MOVWload { 23741 break 23742 } 23743 i0 := x0.AuxInt 23744 s := x0.Aux 23745 _ = x0.Args[1] 23746 p := x0.Args[0] 23747 mem := x0.Args[1] 23748 y := or.Args[1] 23749 s1 := v.Args[1] 23750 if s1.Op != OpAMD64SHLQconst { 23751 break 23752 } 23753 j1 := s1.AuxInt 23754 x1 := s1.Args[0] 23755 if x1.Op != OpAMD64MOVWload { 23756 break 23757 } 23758 i1 := x1.AuxInt 23759 if x1.Aux != s { 23760 break 23761 } 23762 _ = x1.Args[1] 23763 if p != x1.Args[0] { 23764 break 23765 } 23766 if mem != x1.Args[1] { 23767 break 23768 } 23769 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23770 break 23771 } 23772 b = mergePoint(b, x0, x1) 23773 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23774 v.reset(OpCopy) 23775 v.AddArg(v0) 23776 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23777 v1.AuxInt = j0 23778 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23779 v2.AuxInt = i0 23780 v2.Aux = s 23781 v2.AddArg(p) 23782 v2.AddArg(mem) 23783 v1.AddArg(v2) 23784 v0.AddArg(v1) 23785 v0.AddArg(y) 23786 return true 23787 } 23788 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 23789 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23790 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23791 for { 23792 _ = v.Args[1] 23793 or := v.Args[0] 23794 if or.Op != OpAMD64ORQ { 23795 break 23796 } 23797 _ = or.Args[1] 23798 y := or.Args[0] 23799 s0 := or.Args[1] 23800 if s0.Op != OpAMD64SHLQconst { 23801 break 23802 } 23803 j0 := s0.AuxInt 23804 x0 := s0.Args[0] 23805 if x0.Op != OpAMD64MOVWload { 23806 break 23807 } 23808 i0 := x0.AuxInt 23809 s := x0.Aux 23810 _ = x0.Args[1] 23811 p := x0.Args[0] 23812 mem := x0.Args[1] 23813 s1 := v.Args[1] 23814 if s1.Op != OpAMD64SHLQconst { 23815 break 23816 } 23817 j1 := s1.AuxInt 23818 x1 := s1.Args[0] 23819 if x1.Op != OpAMD64MOVWload { 23820 break 23821 } 23822 i1 := x1.AuxInt 23823 if x1.Aux != s { 23824 break 23825 } 23826 _ = x1.Args[1] 23827 if p != x1.Args[0] { 23828 break 23829 } 23830 if mem != x1.Args[1] { 23831 break 23832 } 23833 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23834 break 23835 } 23836 b = mergePoint(b, x0, x1) 23837 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23838 v.reset(OpCopy) 23839 v.AddArg(v0) 23840 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23841 v1.AuxInt = j0 23842 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23843 v2.AuxInt = i0 23844 v2.Aux = s 23845 v2.AddArg(p) 23846 v2.AddArg(mem) 23847 v1.AddArg(v2) 23848 v0.AddArg(v1) 23849 v0.AddArg(y) 23850 return true 23851 } 23852 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 23853 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23854 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23855 for { 23856 _ = v.Args[1] 23857 x0 := v.Args[0] 23858 if x0.Op != OpAMD64MOVBloadidx1 { 23859 break 23860 } 23861 i0 := x0.AuxInt 23862 s := x0.Aux 23863 _ = x0.Args[2] 23864 p := x0.Args[0] 23865 idx := x0.Args[1] 23866 mem := x0.Args[2] 23867 sh := v.Args[1] 23868 if sh.Op != OpAMD64SHLQconst { 23869 break 23870 } 23871 if sh.AuxInt != 8 { 23872 break 23873 } 23874 x1 := sh.Args[0] 23875 if x1.Op != OpAMD64MOVBloadidx1 { 23876 break 23877 } 23878 i1 := x1.AuxInt 23879 if x1.Aux != s { 23880 break 23881 } 23882 _ = x1.Args[2] 23883 if p != x1.Args[0] { 23884 break 23885 } 23886 if idx != x1.Args[1] { 23887 break 23888 } 23889 if mem != x1.Args[2] { 23890 break 23891 } 23892 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23893 break 23894 } 23895 b = mergePoint(b, x0, x1) 23896 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23897 v.reset(OpCopy) 23898 v.AddArg(v0) 23899 v0.AuxInt = i0 23900 v0.Aux = s 23901 v0.AddArg(p) 23902 v0.AddArg(idx) 23903 v0.AddArg(mem) 23904 return true 23905 } 23906 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 23907 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23908 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23909 for { 23910 _ = v.Args[1] 23911 x0 := v.Args[0] 23912 if x0.Op != OpAMD64MOVBloadidx1 { 23913 break 23914 } 23915 i0 := x0.AuxInt 23916 s := x0.Aux 23917 _ = x0.Args[2] 23918 idx := x0.Args[0] 23919 p := x0.Args[1] 23920 mem := x0.Args[2] 23921 sh := v.Args[1] 23922 if sh.Op != OpAMD64SHLQconst { 23923 break 23924 } 23925 if sh.AuxInt != 8 { 23926 break 23927 } 23928 x1 := sh.Args[0] 23929 if x1.Op != OpAMD64MOVBloadidx1 { 23930 break 23931 } 23932 i1 := x1.AuxInt 23933 if x1.Aux != s { 23934 break 23935 } 23936 _ = x1.Args[2] 23937 if p != x1.Args[0] { 23938 break 23939 } 23940 if idx != x1.Args[1] { 23941 break 23942 } 23943 if mem != x1.Args[2] { 23944 break 23945 } 23946 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23947 break 23948 } 23949 b = mergePoint(b, x0, x1) 23950 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23951 v.reset(OpCopy) 23952 v.AddArg(v0) 23953 v0.AuxInt = i0 23954 v0.Aux = s 23955 v0.AddArg(p) 23956 v0.AddArg(idx) 23957 v0.AddArg(mem) 23958 return true 23959 } 23960 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 23961 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23962 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23963 for { 23964 _ = v.Args[1] 23965 x0 := v.Args[0] 23966 if x0.Op != OpAMD64MOVBloadidx1 { 23967 break 23968 } 23969 i0 := x0.AuxInt 23970 s := x0.Aux 23971 _ = x0.Args[2] 23972 p := x0.Args[0] 23973 idx := x0.Args[1] 23974 mem := x0.Args[2] 23975 sh := v.Args[1] 23976 if sh.Op != OpAMD64SHLQconst { 23977 break 23978 } 23979 if sh.AuxInt != 8 { 23980 break 23981 } 23982 x1 := sh.Args[0] 23983 if x1.Op != OpAMD64MOVBloadidx1 { 23984 break 23985 } 23986 i1 := x1.AuxInt 23987 if x1.Aux != s { 23988 break 23989 } 23990 _ = x1.Args[2] 23991 if idx != x1.Args[0] { 23992 break 23993 } 23994 if p != x1.Args[1] { 23995 break 23996 } 23997 if mem != x1.Args[2] { 23998 break 23999 } 24000 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24001 break 24002 } 24003 b = mergePoint(b, x0, x1) 24004 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 24005 v.reset(OpCopy) 24006 v.AddArg(v0) 24007 v0.AuxInt = i0 24008 v0.Aux = s 24009 v0.AddArg(p) 24010 v0.AddArg(idx) 24011 v0.AddArg(mem) 24012 return true 24013 } 24014 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 24015 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24016 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 24017 for { 24018 _ = v.Args[1] 24019 x0 := v.Args[0] 24020 if x0.Op != OpAMD64MOVBloadidx1 { 24021 break 24022 } 24023 i0 := x0.AuxInt 24024 s := x0.Aux 24025 _ = x0.Args[2] 24026 idx := x0.Args[0] 24027 p := x0.Args[1] 24028 mem := x0.Args[2] 24029 sh := v.Args[1] 24030 if sh.Op != OpAMD64SHLQconst { 24031 break 24032 } 24033 if sh.AuxInt != 8 { 24034 break 24035 } 24036 x1 := sh.Args[0] 24037 if x1.Op != OpAMD64MOVBloadidx1 { 24038 break 24039 } 24040 i1 := x1.AuxInt 24041 if x1.Aux != s { 24042 break 24043 } 24044 _ = x1.Args[2] 24045 if idx != x1.Args[0] { 24046 break 24047 } 24048 if p != x1.Args[1] { 24049 break 24050 } 24051 if mem != x1.Args[2] { 24052 break 24053 } 24054 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24055 break 24056 } 24057 b = mergePoint(b, x0, x1) 24058 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 24059 v.reset(OpCopy) 24060 v.AddArg(v0) 24061 v0.AuxInt = i0 24062 v0.Aux = s 24063 v0.AddArg(p) 24064 v0.AddArg(idx) 24065 v0.AddArg(mem) 24066 return true 24067 } 24068 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 24069 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24070 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 24071 for { 24072 _ = v.Args[1] 24073 sh := v.Args[0] 24074 if sh.Op != OpAMD64SHLQconst { 24075 break 24076 } 24077 if sh.AuxInt != 8 { 24078 break 24079 } 24080 x1 := sh.Args[0] 24081 if x1.Op != OpAMD64MOVBloadidx1 { 24082 break 24083 } 24084 i1 := x1.AuxInt 24085 s := x1.Aux 24086 _ = x1.Args[2] 24087 p := x1.Args[0] 24088 idx := x1.Args[1] 24089 mem := x1.Args[2] 24090 x0 := v.Args[1] 24091 if x0.Op != OpAMD64MOVBloadidx1 { 24092 break 24093 } 24094 i0 := x0.AuxInt 24095 if x0.Aux != s { 24096 break 24097 } 24098 _ = x0.Args[2] 24099 if p != x0.Args[0] { 24100 break 24101 } 24102 if idx != x0.Args[1] { 24103 break 24104 } 24105 if mem != x0.Args[2] { 24106 break 24107 } 24108 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24109 break 24110 } 24111 b = mergePoint(b, x0, x1) 24112 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 24113 v.reset(OpCopy) 24114 v.AddArg(v0) 24115 v0.AuxInt = i0 24116 v0.Aux = s 24117 v0.AddArg(p) 24118 v0.AddArg(idx) 24119 v0.AddArg(mem) 24120 return true 24121 } 24122 return false 24123 } 24124 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 24125 b := v.Block 24126 _ = b 24127 typ := &b.Func.Config.Types 24128 _ = typ 24129 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 24130 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24131 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 24132 for { 24133 _ = v.Args[1] 24134 sh := v.Args[0] 24135 if sh.Op != OpAMD64SHLQconst { 24136 break 24137 } 24138 if sh.AuxInt != 8 { 24139 break 24140 } 24141 x1 := sh.Args[0] 24142 if x1.Op != OpAMD64MOVBloadidx1 { 24143 break 24144 } 24145 i1 := x1.AuxInt 24146 s := x1.Aux 24147 _ = x1.Args[2] 24148 idx := x1.Args[0] 24149 p := x1.Args[1] 24150 mem := x1.Args[2] 24151 x0 := v.Args[1] 24152 if x0.Op != OpAMD64MOVBloadidx1 { 24153 break 24154 } 24155 i0 := x0.AuxInt 24156 if x0.Aux != s { 24157 break 24158 } 24159 _ = x0.Args[2] 24160 if p != x0.Args[0] { 24161 break 24162 } 24163 if idx != x0.Args[1] { 24164 break 24165 } 24166 if mem != x0.Args[2] { 24167 break 24168 } 24169 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24170 break 24171 } 24172 b = mergePoint(b, x0, x1) 24173 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 24174 v.reset(OpCopy) 24175 v.AddArg(v0) 24176 v0.AuxInt = i0 24177 v0.Aux = s 24178 v0.AddArg(p) 24179 v0.AddArg(idx) 24180 v0.AddArg(mem) 24181 return true 24182 } 24183 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 24184 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24185 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 24186 for { 24187 _ = v.Args[1] 24188 sh := v.Args[0] 24189 if sh.Op != OpAMD64SHLQconst { 24190 break 24191 } 24192 if sh.AuxInt != 8 { 24193 break 24194 } 24195 x1 := sh.Args[0] 24196 if x1.Op != OpAMD64MOVBloadidx1 { 24197 break 24198 } 24199 i1 := x1.AuxInt 24200 s := x1.Aux 24201 _ = x1.Args[2] 24202 p := x1.Args[0] 24203 idx := x1.Args[1] 24204 mem := x1.Args[2] 24205 x0 := v.Args[1] 24206 if x0.Op != OpAMD64MOVBloadidx1 { 24207 break 24208 } 24209 i0 := x0.AuxInt 24210 if x0.Aux != s { 24211 break 24212 } 24213 _ = x0.Args[2] 24214 if idx != x0.Args[0] { 24215 break 24216 } 24217 if p != x0.Args[1] { 24218 break 24219 } 24220 if mem != x0.Args[2] { 24221 break 24222 } 24223 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24224 break 24225 } 24226 b = mergePoint(b, x0, x1) 24227 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 24228 v.reset(OpCopy) 24229 v.AddArg(v0) 24230 v0.AuxInt = i0 24231 v0.Aux = s 24232 v0.AddArg(p) 24233 v0.AddArg(idx) 24234 v0.AddArg(mem) 24235 return true 24236 } 24237 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 24238 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24239 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 24240 for { 24241 _ = v.Args[1] 24242 sh := v.Args[0] 24243 if sh.Op != OpAMD64SHLQconst { 24244 break 24245 } 24246 if sh.AuxInt != 8 { 24247 break 24248 } 24249 x1 := sh.Args[0] 24250 if x1.Op != OpAMD64MOVBloadidx1 { 24251 break 24252 } 24253 i1 := x1.AuxInt 24254 s := x1.Aux 24255 _ = x1.Args[2] 24256 idx := x1.Args[0] 24257 p := x1.Args[1] 24258 mem := x1.Args[2] 24259 x0 := v.Args[1] 24260 if x0.Op != OpAMD64MOVBloadidx1 { 24261 break 24262 } 24263 i0 := x0.AuxInt 24264 if x0.Aux != s { 24265 break 24266 } 24267 _ = x0.Args[2] 24268 if idx != x0.Args[0] { 24269 break 24270 } 24271 if p != x0.Args[1] { 24272 break 24273 } 24274 if mem != x0.Args[2] { 24275 break 24276 } 24277 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24278 break 24279 } 24280 b = mergePoint(b, x0, x1) 24281 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 24282 v.reset(OpCopy) 24283 v.AddArg(v0) 24284 v0.AuxInt = i0 24285 v0.Aux = s 24286 v0.AddArg(p) 24287 v0.AddArg(idx) 24288 v0.AddArg(mem) 24289 return true 24290 } 24291 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 24292 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24293 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24294 for { 24295 _ = v.Args[1] 24296 x0 := v.Args[0] 24297 if x0.Op != OpAMD64MOVWloadidx1 { 24298 break 24299 } 24300 i0 := x0.AuxInt 24301 s := x0.Aux 24302 _ = x0.Args[2] 24303 p := x0.Args[0] 24304 idx := x0.Args[1] 24305 mem := x0.Args[2] 24306 sh := v.Args[1] 24307 if sh.Op != OpAMD64SHLQconst { 24308 break 24309 } 24310 if sh.AuxInt != 16 { 24311 break 24312 } 24313 x1 := sh.Args[0] 24314 if x1.Op != OpAMD64MOVWloadidx1 { 24315 break 24316 } 24317 i1 := x1.AuxInt 24318 if x1.Aux != s { 24319 break 24320 } 24321 _ = x1.Args[2] 24322 if p != x1.Args[0] { 24323 break 24324 } 24325 if idx != x1.Args[1] { 24326 break 24327 } 24328 if mem != x1.Args[2] { 24329 break 24330 } 24331 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24332 break 24333 } 24334 b = mergePoint(b, x0, x1) 24335 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24336 v.reset(OpCopy) 24337 v.AddArg(v0) 24338 v0.AuxInt = i0 24339 v0.Aux = s 24340 v0.AddArg(p) 24341 v0.AddArg(idx) 24342 v0.AddArg(mem) 24343 return true 24344 } 24345 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 24346 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24347 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24348 for { 24349 _ = v.Args[1] 24350 x0 := v.Args[0] 24351 if x0.Op != OpAMD64MOVWloadidx1 { 24352 break 24353 } 24354 i0 := x0.AuxInt 24355 s := x0.Aux 24356 _ = x0.Args[2] 24357 idx := x0.Args[0] 24358 p := x0.Args[1] 24359 mem := x0.Args[2] 24360 sh := v.Args[1] 24361 if sh.Op != OpAMD64SHLQconst { 24362 break 24363 } 24364 if sh.AuxInt != 16 { 24365 break 24366 } 24367 x1 := sh.Args[0] 24368 if x1.Op != OpAMD64MOVWloadidx1 { 24369 break 24370 } 24371 i1 := x1.AuxInt 24372 if x1.Aux != s { 24373 break 24374 } 24375 _ = x1.Args[2] 24376 if p != x1.Args[0] { 24377 break 24378 } 24379 if idx != x1.Args[1] { 24380 break 24381 } 24382 if mem != x1.Args[2] { 24383 break 24384 } 24385 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24386 break 24387 } 24388 b = mergePoint(b, x0, x1) 24389 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24390 v.reset(OpCopy) 24391 v.AddArg(v0) 24392 v0.AuxInt = i0 24393 v0.Aux = s 24394 v0.AddArg(p) 24395 v0.AddArg(idx) 24396 v0.AddArg(mem) 24397 return true 24398 } 24399 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 24400 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24401 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24402 for { 24403 _ = v.Args[1] 24404 x0 := v.Args[0] 24405 if x0.Op != OpAMD64MOVWloadidx1 { 24406 break 24407 } 24408 i0 := x0.AuxInt 24409 s := x0.Aux 24410 _ = x0.Args[2] 24411 p := x0.Args[0] 24412 idx := x0.Args[1] 24413 mem := x0.Args[2] 24414 sh := v.Args[1] 24415 if sh.Op != OpAMD64SHLQconst { 24416 break 24417 } 24418 if sh.AuxInt != 16 { 24419 break 24420 } 24421 x1 := sh.Args[0] 24422 if x1.Op != OpAMD64MOVWloadidx1 { 24423 break 24424 } 24425 i1 := x1.AuxInt 24426 if x1.Aux != s { 24427 break 24428 } 24429 _ = x1.Args[2] 24430 if idx != x1.Args[0] { 24431 break 24432 } 24433 if p != x1.Args[1] { 24434 break 24435 } 24436 if mem != x1.Args[2] { 24437 break 24438 } 24439 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24440 break 24441 } 24442 b = mergePoint(b, x0, x1) 24443 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24444 v.reset(OpCopy) 24445 v.AddArg(v0) 24446 v0.AuxInt = i0 24447 v0.Aux = s 24448 v0.AddArg(p) 24449 v0.AddArg(idx) 24450 v0.AddArg(mem) 24451 return true 24452 } 24453 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 24454 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24455 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24456 for { 24457 _ = v.Args[1] 24458 x0 := v.Args[0] 24459 if x0.Op != OpAMD64MOVWloadidx1 { 24460 break 24461 } 24462 i0 := x0.AuxInt 24463 s := x0.Aux 24464 _ = x0.Args[2] 24465 idx := x0.Args[0] 24466 p := x0.Args[1] 24467 mem := x0.Args[2] 24468 sh := v.Args[1] 24469 if sh.Op != OpAMD64SHLQconst { 24470 break 24471 } 24472 if sh.AuxInt != 16 { 24473 break 24474 } 24475 x1 := sh.Args[0] 24476 if x1.Op != OpAMD64MOVWloadidx1 { 24477 break 24478 } 24479 i1 := x1.AuxInt 24480 if x1.Aux != s { 24481 break 24482 } 24483 _ = x1.Args[2] 24484 if idx != x1.Args[0] { 24485 break 24486 } 24487 if p != x1.Args[1] { 24488 break 24489 } 24490 if mem != x1.Args[2] { 24491 break 24492 } 24493 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24494 break 24495 } 24496 b = mergePoint(b, x0, x1) 24497 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24498 v.reset(OpCopy) 24499 v.AddArg(v0) 24500 v0.AuxInt = i0 24501 v0.Aux = s 24502 v0.AddArg(p) 24503 v0.AddArg(idx) 24504 v0.AddArg(mem) 24505 return true 24506 } 24507 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 24508 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24509 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24510 for { 24511 _ = v.Args[1] 24512 sh := v.Args[0] 24513 if sh.Op != OpAMD64SHLQconst { 24514 break 24515 } 24516 if sh.AuxInt != 16 { 24517 break 24518 } 24519 x1 := sh.Args[0] 24520 if x1.Op != OpAMD64MOVWloadidx1 { 24521 break 24522 } 24523 i1 := x1.AuxInt 24524 s := x1.Aux 24525 _ = x1.Args[2] 24526 p := x1.Args[0] 24527 idx := x1.Args[1] 24528 mem := x1.Args[2] 24529 x0 := v.Args[1] 24530 if x0.Op != OpAMD64MOVWloadidx1 { 24531 break 24532 } 24533 i0 := x0.AuxInt 24534 if x0.Aux != s { 24535 break 24536 } 24537 _ = x0.Args[2] 24538 if p != x0.Args[0] { 24539 break 24540 } 24541 if idx != x0.Args[1] { 24542 break 24543 } 24544 if mem != x0.Args[2] { 24545 break 24546 } 24547 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24548 break 24549 } 24550 b = mergePoint(b, x0, x1) 24551 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24552 v.reset(OpCopy) 24553 v.AddArg(v0) 24554 v0.AuxInt = i0 24555 v0.Aux = s 24556 v0.AddArg(p) 24557 v0.AddArg(idx) 24558 v0.AddArg(mem) 24559 return true 24560 } 24561 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 24562 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24563 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24564 for { 24565 _ = v.Args[1] 24566 sh := v.Args[0] 24567 if sh.Op != OpAMD64SHLQconst { 24568 break 24569 } 24570 if sh.AuxInt != 16 { 24571 break 24572 } 24573 x1 := sh.Args[0] 24574 if x1.Op != OpAMD64MOVWloadidx1 { 24575 break 24576 } 24577 i1 := x1.AuxInt 24578 s := x1.Aux 24579 _ = x1.Args[2] 24580 idx := x1.Args[0] 24581 p := x1.Args[1] 24582 mem := x1.Args[2] 24583 x0 := v.Args[1] 24584 if x0.Op != OpAMD64MOVWloadidx1 { 24585 break 24586 } 24587 i0 := x0.AuxInt 24588 if x0.Aux != s { 24589 break 24590 } 24591 _ = x0.Args[2] 24592 if p != x0.Args[0] { 24593 break 24594 } 24595 if idx != x0.Args[1] { 24596 break 24597 } 24598 if mem != x0.Args[2] { 24599 break 24600 } 24601 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24602 break 24603 } 24604 b = mergePoint(b, x0, x1) 24605 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24606 v.reset(OpCopy) 24607 v.AddArg(v0) 24608 v0.AuxInt = i0 24609 v0.Aux = s 24610 v0.AddArg(p) 24611 v0.AddArg(idx) 24612 v0.AddArg(mem) 24613 return true 24614 } 24615 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 24616 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24617 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24618 for { 24619 _ = v.Args[1] 24620 sh := v.Args[0] 24621 if sh.Op != OpAMD64SHLQconst { 24622 break 24623 } 24624 if sh.AuxInt != 16 { 24625 break 24626 } 24627 x1 := sh.Args[0] 24628 if x1.Op != OpAMD64MOVWloadidx1 { 24629 break 24630 } 24631 i1 := x1.AuxInt 24632 s := x1.Aux 24633 _ = x1.Args[2] 24634 p := x1.Args[0] 24635 idx := x1.Args[1] 24636 mem := x1.Args[2] 24637 x0 := v.Args[1] 24638 if x0.Op != OpAMD64MOVWloadidx1 { 24639 break 24640 } 24641 i0 := x0.AuxInt 24642 if x0.Aux != s { 24643 break 24644 } 24645 _ = x0.Args[2] 24646 if idx != x0.Args[0] { 24647 break 24648 } 24649 if p != x0.Args[1] { 24650 break 24651 } 24652 if mem != x0.Args[2] { 24653 break 24654 } 24655 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24656 break 24657 } 24658 b = mergePoint(b, x0, x1) 24659 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24660 v.reset(OpCopy) 24661 v.AddArg(v0) 24662 v0.AuxInt = i0 24663 v0.Aux = s 24664 v0.AddArg(p) 24665 v0.AddArg(idx) 24666 v0.AddArg(mem) 24667 return true 24668 } 24669 return false 24670 } 24671 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 24672 b := v.Block 24673 _ = b 24674 typ := &b.Func.Config.Types 24675 _ = typ 24676 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 24677 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24678 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24679 for { 24680 _ = v.Args[1] 24681 sh := v.Args[0] 24682 if sh.Op != OpAMD64SHLQconst { 24683 break 24684 } 24685 if sh.AuxInt != 16 { 24686 break 24687 } 24688 x1 := sh.Args[0] 24689 if x1.Op != OpAMD64MOVWloadidx1 { 24690 break 24691 } 24692 i1 := x1.AuxInt 24693 s := x1.Aux 24694 _ = x1.Args[2] 24695 idx := x1.Args[0] 24696 p := x1.Args[1] 24697 mem := x1.Args[2] 24698 x0 := v.Args[1] 24699 if x0.Op != OpAMD64MOVWloadidx1 { 24700 break 24701 } 24702 i0 := x0.AuxInt 24703 if x0.Aux != s { 24704 break 24705 } 24706 _ = x0.Args[2] 24707 if idx != x0.Args[0] { 24708 break 24709 } 24710 if p != x0.Args[1] { 24711 break 24712 } 24713 if mem != x0.Args[2] { 24714 break 24715 } 24716 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24717 break 24718 } 24719 b = mergePoint(b, x0, x1) 24720 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24721 v.reset(OpCopy) 24722 v.AddArg(v0) 24723 v0.AuxInt = i0 24724 v0.Aux = s 24725 v0.AddArg(p) 24726 v0.AddArg(idx) 24727 v0.AddArg(mem) 24728 return true 24729 } 24730 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 24731 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24732 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24733 for { 24734 _ = v.Args[1] 24735 x0 := v.Args[0] 24736 if x0.Op != OpAMD64MOVLloadidx1 { 24737 break 24738 } 24739 i0 := x0.AuxInt 24740 s := x0.Aux 24741 _ = x0.Args[2] 24742 p := x0.Args[0] 24743 idx := x0.Args[1] 24744 mem := x0.Args[2] 24745 sh := v.Args[1] 24746 if sh.Op != OpAMD64SHLQconst { 24747 break 24748 } 24749 if sh.AuxInt != 32 { 24750 break 24751 } 24752 x1 := sh.Args[0] 24753 if x1.Op != OpAMD64MOVLloadidx1 { 24754 break 24755 } 24756 i1 := x1.AuxInt 24757 if x1.Aux != s { 24758 break 24759 } 24760 _ = x1.Args[2] 24761 if p != x1.Args[0] { 24762 break 24763 } 24764 if idx != x1.Args[1] { 24765 break 24766 } 24767 if mem != x1.Args[2] { 24768 break 24769 } 24770 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24771 break 24772 } 24773 b = mergePoint(b, x0, x1) 24774 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24775 v.reset(OpCopy) 24776 v.AddArg(v0) 24777 v0.AuxInt = i0 24778 v0.Aux = s 24779 v0.AddArg(p) 24780 v0.AddArg(idx) 24781 v0.AddArg(mem) 24782 return true 24783 } 24784 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 24785 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24786 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24787 for { 24788 _ = v.Args[1] 24789 x0 := v.Args[0] 24790 if x0.Op != OpAMD64MOVLloadidx1 { 24791 break 24792 } 24793 i0 := x0.AuxInt 24794 s := x0.Aux 24795 _ = x0.Args[2] 24796 idx := x0.Args[0] 24797 p := x0.Args[1] 24798 mem := x0.Args[2] 24799 sh := v.Args[1] 24800 if sh.Op != OpAMD64SHLQconst { 24801 break 24802 } 24803 if sh.AuxInt != 32 { 24804 break 24805 } 24806 x1 := sh.Args[0] 24807 if x1.Op != OpAMD64MOVLloadidx1 { 24808 break 24809 } 24810 i1 := x1.AuxInt 24811 if x1.Aux != s { 24812 break 24813 } 24814 _ = x1.Args[2] 24815 if p != x1.Args[0] { 24816 break 24817 } 24818 if idx != x1.Args[1] { 24819 break 24820 } 24821 if mem != x1.Args[2] { 24822 break 24823 } 24824 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24825 break 24826 } 24827 b = mergePoint(b, x0, x1) 24828 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24829 v.reset(OpCopy) 24830 v.AddArg(v0) 24831 v0.AuxInt = i0 24832 v0.Aux = s 24833 v0.AddArg(p) 24834 v0.AddArg(idx) 24835 v0.AddArg(mem) 24836 return true 24837 } 24838 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 24839 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24840 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24841 for { 24842 _ = v.Args[1] 24843 x0 := v.Args[0] 24844 if x0.Op != OpAMD64MOVLloadidx1 { 24845 break 24846 } 24847 i0 := x0.AuxInt 24848 s := x0.Aux 24849 _ = x0.Args[2] 24850 p := x0.Args[0] 24851 idx := x0.Args[1] 24852 mem := x0.Args[2] 24853 sh := v.Args[1] 24854 if sh.Op != OpAMD64SHLQconst { 24855 break 24856 } 24857 if sh.AuxInt != 32 { 24858 break 24859 } 24860 x1 := sh.Args[0] 24861 if x1.Op != OpAMD64MOVLloadidx1 { 24862 break 24863 } 24864 i1 := x1.AuxInt 24865 if x1.Aux != s { 24866 break 24867 } 24868 _ = x1.Args[2] 24869 if idx != x1.Args[0] { 24870 break 24871 } 24872 if p != x1.Args[1] { 24873 break 24874 } 24875 if mem != x1.Args[2] { 24876 break 24877 } 24878 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24879 break 24880 } 24881 b = mergePoint(b, x0, x1) 24882 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24883 v.reset(OpCopy) 24884 v.AddArg(v0) 24885 v0.AuxInt = i0 24886 v0.Aux = s 24887 v0.AddArg(p) 24888 v0.AddArg(idx) 24889 v0.AddArg(mem) 24890 return true 24891 } 24892 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 24893 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24894 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24895 for { 24896 _ = v.Args[1] 24897 x0 := v.Args[0] 24898 if x0.Op != OpAMD64MOVLloadidx1 { 24899 break 24900 } 24901 i0 := x0.AuxInt 24902 s := x0.Aux 24903 _ = x0.Args[2] 24904 idx := x0.Args[0] 24905 p := x0.Args[1] 24906 mem := x0.Args[2] 24907 sh := v.Args[1] 24908 if sh.Op != OpAMD64SHLQconst { 24909 break 24910 } 24911 if sh.AuxInt != 32 { 24912 break 24913 } 24914 x1 := sh.Args[0] 24915 if x1.Op != OpAMD64MOVLloadidx1 { 24916 break 24917 } 24918 i1 := x1.AuxInt 24919 if x1.Aux != s { 24920 break 24921 } 24922 _ = x1.Args[2] 24923 if idx != x1.Args[0] { 24924 break 24925 } 24926 if p != x1.Args[1] { 24927 break 24928 } 24929 if mem != x1.Args[2] { 24930 break 24931 } 24932 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24933 break 24934 } 24935 b = mergePoint(b, x0, x1) 24936 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24937 v.reset(OpCopy) 24938 v.AddArg(v0) 24939 v0.AuxInt = i0 24940 v0.Aux = s 24941 v0.AddArg(p) 24942 v0.AddArg(idx) 24943 v0.AddArg(mem) 24944 return true 24945 } 24946 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 24947 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24948 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24949 for { 24950 _ = v.Args[1] 24951 sh := v.Args[0] 24952 if sh.Op != OpAMD64SHLQconst { 24953 break 24954 } 24955 if sh.AuxInt != 32 { 24956 break 24957 } 24958 x1 := sh.Args[0] 24959 if x1.Op != OpAMD64MOVLloadidx1 { 24960 break 24961 } 24962 i1 := x1.AuxInt 24963 s := x1.Aux 24964 _ = x1.Args[2] 24965 p := x1.Args[0] 24966 idx := x1.Args[1] 24967 mem := x1.Args[2] 24968 x0 := v.Args[1] 24969 if x0.Op != OpAMD64MOVLloadidx1 { 24970 break 24971 } 24972 i0 := x0.AuxInt 24973 if x0.Aux != s { 24974 break 24975 } 24976 _ = x0.Args[2] 24977 if p != x0.Args[0] { 24978 break 24979 } 24980 if idx != x0.Args[1] { 24981 break 24982 } 24983 if mem != x0.Args[2] { 24984 break 24985 } 24986 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24987 break 24988 } 24989 b = mergePoint(b, x0, x1) 24990 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24991 v.reset(OpCopy) 24992 v.AddArg(v0) 24993 v0.AuxInt = i0 24994 v0.Aux = s 24995 v0.AddArg(p) 24996 v0.AddArg(idx) 24997 v0.AddArg(mem) 24998 return true 24999 } 25000 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 25001 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25002 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 25003 for { 25004 _ = v.Args[1] 25005 sh := v.Args[0] 25006 if sh.Op != OpAMD64SHLQconst { 25007 break 25008 } 25009 if sh.AuxInt != 32 { 25010 break 25011 } 25012 x1 := sh.Args[0] 25013 if x1.Op != OpAMD64MOVLloadidx1 { 25014 break 25015 } 25016 i1 := x1.AuxInt 25017 s := x1.Aux 25018 _ = x1.Args[2] 25019 idx := x1.Args[0] 25020 p := x1.Args[1] 25021 mem := x1.Args[2] 25022 x0 := v.Args[1] 25023 if x0.Op != OpAMD64MOVLloadidx1 { 25024 break 25025 } 25026 i0 := x0.AuxInt 25027 if x0.Aux != s { 25028 break 25029 } 25030 _ = x0.Args[2] 25031 if p != x0.Args[0] { 25032 break 25033 } 25034 if idx != x0.Args[1] { 25035 break 25036 } 25037 if mem != x0.Args[2] { 25038 break 25039 } 25040 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25041 break 25042 } 25043 b = mergePoint(b, x0, x1) 25044 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 25045 v.reset(OpCopy) 25046 v.AddArg(v0) 25047 v0.AuxInt = i0 25048 v0.Aux = s 25049 v0.AddArg(p) 25050 v0.AddArg(idx) 25051 v0.AddArg(mem) 25052 return true 25053 } 25054 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 25055 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25056 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 25057 for { 25058 _ = v.Args[1] 25059 sh := v.Args[0] 25060 if sh.Op != OpAMD64SHLQconst { 25061 break 25062 } 25063 if sh.AuxInt != 32 { 25064 break 25065 } 25066 x1 := sh.Args[0] 25067 if x1.Op != OpAMD64MOVLloadidx1 { 25068 break 25069 } 25070 i1 := x1.AuxInt 25071 s := x1.Aux 25072 _ = x1.Args[2] 25073 p := x1.Args[0] 25074 idx := x1.Args[1] 25075 mem := x1.Args[2] 25076 x0 := v.Args[1] 25077 if x0.Op != OpAMD64MOVLloadidx1 { 25078 break 25079 } 25080 i0 := x0.AuxInt 25081 if x0.Aux != s { 25082 break 25083 } 25084 _ = x0.Args[2] 25085 if idx != x0.Args[0] { 25086 break 25087 } 25088 if p != x0.Args[1] { 25089 break 25090 } 25091 if mem != x0.Args[2] { 25092 break 25093 } 25094 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25095 break 25096 } 25097 b = mergePoint(b, x0, x1) 25098 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 25099 v.reset(OpCopy) 25100 v.AddArg(v0) 25101 v0.AuxInt = i0 25102 v0.Aux = s 25103 v0.AddArg(p) 25104 v0.AddArg(idx) 25105 v0.AddArg(mem) 25106 return true 25107 } 25108 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 25109 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25110 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 25111 for { 25112 _ = v.Args[1] 25113 sh := v.Args[0] 25114 if sh.Op != OpAMD64SHLQconst { 25115 break 25116 } 25117 if sh.AuxInt != 32 { 25118 break 25119 } 25120 x1 := sh.Args[0] 25121 if x1.Op != OpAMD64MOVLloadidx1 { 25122 break 25123 } 25124 i1 := x1.AuxInt 25125 s := x1.Aux 25126 _ = x1.Args[2] 25127 idx := x1.Args[0] 25128 p := x1.Args[1] 25129 mem := x1.Args[2] 25130 x0 := v.Args[1] 25131 if x0.Op != OpAMD64MOVLloadidx1 { 25132 break 25133 } 25134 i0 := x0.AuxInt 25135 if x0.Aux != s { 25136 break 25137 } 25138 _ = x0.Args[2] 25139 if idx != x0.Args[0] { 25140 break 25141 } 25142 if p != x0.Args[1] { 25143 break 25144 } 25145 if mem != x0.Args[2] { 25146 break 25147 } 25148 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25149 break 25150 } 25151 b = mergePoint(b, x0, x1) 25152 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 25153 v.reset(OpCopy) 25154 v.AddArg(v0) 25155 v0.AuxInt = i0 25156 v0.Aux = s 25157 v0.AddArg(p) 25158 v0.AddArg(idx) 25159 v0.AddArg(mem) 25160 return true 25161 } 25162 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 25163 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25164 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25165 for { 25166 _ = v.Args[1] 25167 s1 := v.Args[0] 25168 if s1.Op != OpAMD64SHLQconst { 25169 break 25170 } 25171 j1 := s1.AuxInt 25172 x1 := s1.Args[0] 25173 if x1.Op != OpAMD64MOVBloadidx1 { 25174 break 25175 } 25176 i1 := x1.AuxInt 25177 s := x1.Aux 25178 _ = x1.Args[2] 25179 p := x1.Args[0] 25180 idx := x1.Args[1] 25181 mem := x1.Args[2] 25182 or := v.Args[1] 25183 if or.Op != OpAMD64ORQ { 25184 break 25185 } 25186 _ = or.Args[1] 25187 s0 := or.Args[0] 25188 if s0.Op != OpAMD64SHLQconst { 25189 break 25190 } 25191 j0 := s0.AuxInt 25192 x0 := s0.Args[0] 25193 if x0.Op != OpAMD64MOVBloadidx1 { 25194 break 25195 } 25196 i0 := x0.AuxInt 25197 if x0.Aux != s { 25198 break 25199 } 25200 _ = x0.Args[2] 25201 if p != x0.Args[0] { 25202 break 25203 } 25204 if idx != x0.Args[1] { 25205 break 25206 } 25207 if mem != x0.Args[2] { 25208 break 25209 } 25210 y := or.Args[1] 25211 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25212 break 25213 } 25214 b = mergePoint(b, x0, x1) 25215 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25216 v.reset(OpCopy) 25217 v.AddArg(v0) 25218 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25219 v1.AuxInt = j0 25220 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25221 v2.AuxInt = i0 25222 v2.Aux = s 25223 v2.AddArg(p) 25224 v2.AddArg(idx) 25225 v2.AddArg(mem) 25226 v1.AddArg(v2) 25227 v0.AddArg(v1) 25228 v0.AddArg(y) 25229 return true 25230 } 25231 return false 25232 } 25233 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 25234 b := v.Block 25235 _ = b 25236 typ := &b.Func.Config.Types 25237 _ = typ 25238 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 25239 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25240 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25241 for { 25242 _ = v.Args[1] 25243 s1 := v.Args[0] 25244 if s1.Op != OpAMD64SHLQconst { 25245 break 25246 } 25247 j1 := s1.AuxInt 25248 x1 := s1.Args[0] 25249 if x1.Op != OpAMD64MOVBloadidx1 { 25250 break 25251 } 25252 i1 := x1.AuxInt 25253 s := x1.Aux 25254 _ = x1.Args[2] 25255 idx := x1.Args[0] 25256 p := x1.Args[1] 25257 mem := x1.Args[2] 25258 or := v.Args[1] 25259 if or.Op != OpAMD64ORQ { 25260 break 25261 } 25262 _ = or.Args[1] 25263 s0 := or.Args[0] 25264 if s0.Op != OpAMD64SHLQconst { 25265 break 25266 } 25267 j0 := s0.AuxInt 25268 x0 := s0.Args[0] 25269 if x0.Op != OpAMD64MOVBloadidx1 { 25270 break 25271 } 25272 i0 := x0.AuxInt 25273 if x0.Aux != s { 25274 break 25275 } 25276 _ = x0.Args[2] 25277 if p != x0.Args[0] { 25278 break 25279 } 25280 if idx != x0.Args[1] { 25281 break 25282 } 25283 if mem != x0.Args[2] { 25284 break 25285 } 25286 y := or.Args[1] 25287 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25288 break 25289 } 25290 b = mergePoint(b, x0, x1) 25291 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25292 v.reset(OpCopy) 25293 v.AddArg(v0) 25294 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25295 v1.AuxInt = j0 25296 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25297 v2.AuxInt = i0 25298 v2.Aux = s 25299 v2.AddArg(p) 25300 v2.AddArg(idx) 25301 v2.AddArg(mem) 25302 v1.AddArg(v2) 25303 v0.AddArg(v1) 25304 v0.AddArg(y) 25305 return true 25306 } 25307 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 25308 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25309 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25310 for { 25311 _ = v.Args[1] 25312 s1 := v.Args[0] 25313 if s1.Op != OpAMD64SHLQconst { 25314 break 25315 } 25316 j1 := s1.AuxInt 25317 x1 := s1.Args[0] 25318 if x1.Op != OpAMD64MOVBloadidx1 { 25319 break 25320 } 25321 i1 := x1.AuxInt 25322 s := x1.Aux 25323 _ = x1.Args[2] 25324 p := x1.Args[0] 25325 idx := x1.Args[1] 25326 mem := x1.Args[2] 25327 or := v.Args[1] 25328 if or.Op != OpAMD64ORQ { 25329 break 25330 } 25331 _ = or.Args[1] 25332 s0 := or.Args[0] 25333 if s0.Op != OpAMD64SHLQconst { 25334 break 25335 } 25336 j0 := s0.AuxInt 25337 x0 := s0.Args[0] 25338 if x0.Op != OpAMD64MOVBloadidx1 { 25339 break 25340 } 25341 i0 := x0.AuxInt 25342 if x0.Aux != s { 25343 break 25344 } 25345 _ = x0.Args[2] 25346 if idx != x0.Args[0] { 25347 break 25348 } 25349 if p != x0.Args[1] { 25350 break 25351 } 25352 if mem != x0.Args[2] { 25353 break 25354 } 25355 y := or.Args[1] 25356 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25357 break 25358 } 25359 b = mergePoint(b, x0, x1) 25360 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25361 v.reset(OpCopy) 25362 v.AddArg(v0) 25363 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25364 v1.AuxInt = j0 25365 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25366 v2.AuxInt = i0 25367 v2.Aux = s 25368 v2.AddArg(p) 25369 v2.AddArg(idx) 25370 v2.AddArg(mem) 25371 v1.AddArg(v2) 25372 v0.AddArg(v1) 25373 v0.AddArg(y) 25374 return true 25375 } 25376 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 25377 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25378 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25379 for { 25380 _ = v.Args[1] 25381 s1 := v.Args[0] 25382 if s1.Op != OpAMD64SHLQconst { 25383 break 25384 } 25385 j1 := s1.AuxInt 25386 x1 := s1.Args[0] 25387 if x1.Op != OpAMD64MOVBloadidx1 { 25388 break 25389 } 25390 i1 := x1.AuxInt 25391 s := x1.Aux 25392 _ = x1.Args[2] 25393 idx := x1.Args[0] 25394 p := x1.Args[1] 25395 mem := x1.Args[2] 25396 or := v.Args[1] 25397 if or.Op != OpAMD64ORQ { 25398 break 25399 } 25400 _ = or.Args[1] 25401 s0 := or.Args[0] 25402 if s0.Op != OpAMD64SHLQconst { 25403 break 25404 } 25405 j0 := s0.AuxInt 25406 x0 := s0.Args[0] 25407 if x0.Op != OpAMD64MOVBloadidx1 { 25408 break 25409 } 25410 i0 := x0.AuxInt 25411 if x0.Aux != s { 25412 break 25413 } 25414 _ = x0.Args[2] 25415 if idx != x0.Args[0] { 25416 break 25417 } 25418 if p != x0.Args[1] { 25419 break 25420 } 25421 if mem != x0.Args[2] { 25422 break 25423 } 25424 y := or.Args[1] 25425 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25426 break 25427 } 25428 b = mergePoint(b, x0, x1) 25429 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25430 v.reset(OpCopy) 25431 v.AddArg(v0) 25432 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25433 v1.AuxInt = j0 25434 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25435 v2.AuxInt = i0 25436 v2.Aux = s 25437 v2.AddArg(p) 25438 v2.AddArg(idx) 25439 v2.AddArg(mem) 25440 v1.AddArg(v2) 25441 v0.AddArg(v1) 25442 v0.AddArg(y) 25443 return true 25444 } 25445 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 25446 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25447 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25448 for { 25449 _ = v.Args[1] 25450 s1 := v.Args[0] 25451 if s1.Op != OpAMD64SHLQconst { 25452 break 25453 } 25454 j1 := s1.AuxInt 25455 x1 := s1.Args[0] 25456 if x1.Op != OpAMD64MOVBloadidx1 { 25457 break 25458 } 25459 i1 := x1.AuxInt 25460 s := x1.Aux 25461 _ = x1.Args[2] 25462 p := x1.Args[0] 25463 idx := x1.Args[1] 25464 mem := x1.Args[2] 25465 or := v.Args[1] 25466 if or.Op != OpAMD64ORQ { 25467 break 25468 } 25469 _ = or.Args[1] 25470 y := or.Args[0] 25471 s0 := or.Args[1] 25472 if s0.Op != OpAMD64SHLQconst { 25473 break 25474 } 25475 j0 := s0.AuxInt 25476 x0 := s0.Args[0] 25477 if x0.Op != OpAMD64MOVBloadidx1 { 25478 break 25479 } 25480 i0 := x0.AuxInt 25481 if x0.Aux != s { 25482 break 25483 } 25484 _ = x0.Args[2] 25485 if p != x0.Args[0] { 25486 break 25487 } 25488 if idx != x0.Args[1] { 25489 break 25490 } 25491 if mem != x0.Args[2] { 25492 break 25493 } 25494 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25495 break 25496 } 25497 b = mergePoint(b, x0, x1) 25498 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25499 v.reset(OpCopy) 25500 v.AddArg(v0) 25501 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25502 v1.AuxInt = j0 25503 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25504 v2.AuxInt = i0 25505 v2.Aux = s 25506 v2.AddArg(p) 25507 v2.AddArg(idx) 25508 v2.AddArg(mem) 25509 v1.AddArg(v2) 25510 v0.AddArg(v1) 25511 v0.AddArg(y) 25512 return true 25513 } 25514 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 25515 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25516 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25517 for { 25518 _ = v.Args[1] 25519 s1 := v.Args[0] 25520 if s1.Op != OpAMD64SHLQconst { 25521 break 25522 } 25523 j1 := s1.AuxInt 25524 x1 := s1.Args[0] 25525 if x1.Op != OpAMD64MOVBloadidx1 { 25526 break 25527 } 25528 i1 := x1.AuxInt 25529 s := x1.Aux 25530 _ = x1.Args[2] 25531 idx := x1.Args[0] 25532 p := x1.Args[1] 25533 mem := x1.Args[2] 25534 or := v.Args[1] 25535 if or.Op != OpAMD64ORQ { 25536 break 25537 } 25538 _ = or.Args[1] 25539 y := or.Args[0] 25540 s0 := or.Args[1] 25541 if s0.Op != OpAMD64SHLQconst { 25542 break 25543 } 25544 j0 := s0.AuxInt 25545 x0 := s0.Args[0] 25546 if x0.Op != OpAMD64MOVBloadidx1 { 25547 break 25548 } 25549 i0 := x0.AuxInt 25550 if x0.Aux != s { 25551 break 25552 } 25553 _ = x0.Args[2] 25554 if p != x0.Args[0] { 25555 break 25556 } 25557 if idx != x0.Args[1] { 25558 break 25559 } 25560 if mem != x0.Args[2] { 25561 break 25562 } 25563 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25564 break 25565 } 25566 b = mergePoint(b, x0, x1) 25567 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25568 v.reset(OpCopy) 25569 v.AddArg(v0) 25570 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25571 v1.AuxInt = j0 25572 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25573 v2.AuxInt = i0 25574 v2.Aux = s 25575 v2.AddArg(p) 25576 v2.AddArg(idx) 25577 v2.AddArg(mem) 25578 v1.AddArg(v2) 25579 v0.AddArg(v1) 25580 v0.AddArg(y) 25581 return true 25582 } 25583 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 25584 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25585 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25586 for { 25587 _ = v.Args[1] 25588 s1 := v.Args[0] 25589 if s1.Op != OpAMD64SHLQconst { 25590 break 25591 } 25592 j1 := s1.AuxInt 25593 x1 := s1.Args[0] 25594 if x1.Op != OpAMD64MOVBloadidx1 { 25595 break 25596 } 25597 i1 := x1.AuxInt 25598 s := x1.Aux 25599 _ = x1.Args[2] 25600 p := x1.Args[0] 25601 idx := x1.Args[1] 25602 mem := x1.Args[2] 25603 or := v.Args[1] 25604 if or.Op != OpAMD64ORQ { 25605 break 25606 } 25607 _ = or.Args[1] 25608 y := or.Args[0] 25609 s0 := or.Args[1] 25610 if s0.Op != OpAMD64SHLQconst { 25611 break 25612 } 25613 j0 := s0.AuxInt 25614 x0 := s0.Args[0] 25615 if x0.Op != OpAMD64MOVBloadidx1 { 25616 break 25617 } 25618 i0 := x0.AuxInt 25619 if x0.Aux != s { 25620 break 25621 } 25622 _ = x0.Args[2] 25623 if idx != x0.Args[0] { 25624 break 25625 } 25626 if p != x0.Args[1] { 25627 break 25628 } 25629 if mem != x0.Args[2] { 25630 break 25631 } 25632 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25633 break 25634 } 25635 b = mergePoint(b, x0, x1) 25636 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25637 v.reset(OpCopy) 25638 v.AddArg(v0) 25639 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25640 v1.AuxInt = j0 25641 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25642 v2.AuxInt = i0 25643 v2.Aux = s 25644 v2.AddArg(p) 25645 v2.AddArg(idx) 25646 v2.AddArg(mem) 25647 v1.AddArg(v2) 25648 v0.AddArg(v1) 25649 v0.AddArg(y) 25650 return true 25651 } 25652 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 25653 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25654 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25655 for { 25656 _ = v.Args[1] 25657 s1 := v.Args[0] 25658 if s1.Op != OpAMD64SHLQconst { 25659 break 25660 } 25661 j1 := s1.AuxInt 25662 x1 := s1.Args[0] 25663 if x1.Op != OpAMD64MOVBloadidx1 { 25664 break 25665 } 25666 i1 := x1.AuxInt 25667 s := x1.Aux 25668 _ = x1.Args[2] 25669 idx := x1.Args[0] 25670 p := x1.Args[1] 25671 mem := x1.Args[2] 25672 or := v.Args[1] 25673 if or.Op != OpAMD64ORQ { 25674 break 25675 } 25676 _ = or.Args[1] 25677 y := or.Args[0] 25678 s0 := or.Args[1] 25679 if s0.Op != OpAMD64SHLQconst { 25680 break 25681 } 25682 j0 := s0.AuxInt 25683 x0 := s0.Args[0] 25684 if x0.Op != OpAMD64MOVBloadidx1 { 25685 break 25686 } 25687 i0 := x0.AuxInt 25688 if x0.Aux != s { 25689 break 25690 } 25691 _ = x0.Args[2] 25692 if idx != x0.Args[0] { 25693 break 25694 } 25695 if p != x0.Args[1] { 25696 break 25697 } 25698 if mem != x0.Args[2] { 25699 break 25700 } 25701 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25702 break 25703 } 25704 b = mergePoint(b, x0, x1) 25705 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25706 v.reset(OpCopy) 25707 v.AddArg(v0) 25708 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25709 v1.AuxInt = j0 25710 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25711 v2.AuxInt = i0 25712 v2.Aux = s 25713 v2.AddArg(p) 25714 v2.AddArg(idx) 25715 v2.AddArg(mem) 25716 v1.AddArg(v2) 25717 v0.AddArg(v1) 25718 v0.AddArg(y) 25719 return true 25720 } 25721 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25722 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25723 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25724 for { 25725 _ = v.Args[1] 25726 or := v.Args[0] 25727 if or.Op != OpAMD64ORQ { 25728 break 25729 } 25730 _ = or.Args[1] 25731 s0 := or.Args[0] 25732 if s0.Op != OpAMD64SHLQconst { 25733 break 25734 } 25735 j0 := s0.AuxInt 25736 x0 := s0.Args[0] 25737 if x0.Op != OpAMD64MOVBloadidx1 { 25738 break 25739 } 25740 i0 := x0.AuxInt 25741 s := x0.Aux 25742 _ = x0.Args[2] 25743 p := x0.Args[0] 25744 idx := x0.Args[1] 25745 mem := x0.Args[2] 25746 y := or.Args[1] 25747 s1 := v.Args[1] 25748 if s1.Op != OpAMD64SHLQconst { 25749 break 25750 } 25751 j1 := s1.AuxInt 25752 x1 := s1.Args[0] 25753 if x1.Op != OpAMD64MOVBloadidx1 { 25754 break 25755 } 25756 i1 := x1.AuxInt 25757 if x1.Aux != s { 25758 break 25759 } 25760 _ = x1.Args[2] 25761 if p != x1.Args[0] { 25762 break 25763 } 25764 if idx != x1.Args[1] { 25765 break 25766 } 25767 if mem != x1.Args[2] { 25768 break 25769 } 25770 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25771 break 25772 } 25773 b = mergePoint(b, x0, x1) 25774 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25775 v.reset(OpCopy) 25776 v.AddArg(v0) 25777 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25778 v1.AuxInt = j0 25779 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25780 v2.AuxInt = i0 25781 v2.Aux = s 25782 v2.AddArg(p) 25783 v2.AddArg(idx) 25784 v2.AddArg(mem) 25785 v1.AddArg(v2) 25786 v0.AddArg(v1) 25787 v0.AddArg(y) 25788 return true 25789 } 25790 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25791 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25792 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25793 for { 25794 _ = v.Args[1] 25795 or := v.Args[0] 25796 if or.Op != OpAMD64ORQ { 25797 break 25798 } 25799 _ = or.Args[1] 25800 s0 := or.Args[0] 25801 if s0.Op != OpAMD64SHLQconst { 25802 break 25803 } 25804 j0 := s0.AuxInt 25805 x0 := s0.Args[0] 25806 if x0.Op != OpAMD64MOVBloadidx1 { 25807 break 25808 } 25809 i0 := x0.AuxInt 25810 s := x0.Aux 25811 _ = x0.Args[2] 25812 idx := x0.Args[0] 25813 p := x0.Args[1] 25814 mem := x0.Args[2] 25815 y := or.Args[1] 25816 s1 := v.Args[1] 25817 if s1.Op != OpAMD64SHLQconst { 25818 break 25819 } 25820 j1 := s1.AuxInt 25821 x1 := s1.Args[0] 25822 if x1.Op != OpAMD64MOVBloadidx1 { 25823 break 25824 } 25825 i1 := x1.AuxInt 25826 if x1.Aux != s { 25827 break 25828 } 25829 _ = x1.Args[2] 25830 if p != x1.Args[0] { 25831 break 25832 } 25833 if idx != x1.Args[1] { 25834 break 25835 } 25836 if mem != x1.Args[2] { 25837 break 25838 } 25839 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25840 break 25841 } 25842 b = mergePoint(b, x0, x1) 25843 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25844 v.reset(OpCopy) 25845 v.AddArg(v0) 25846 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25847 v1.AuxInt = j0 25848 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25849 v2.AuxInt = i0 25850 v2.Aux = s 25851 v2.AddArg(p) 25852 v2.AddArg(idx) 25853 v2.AddArg(mem) 25854 v1.AddArg(v2) 25855 v0.AddArg(v1) 25856 v0.AddArg(y) 25857 return true 25858 } 25859 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25860 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25861 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25862 for { 25863 _ = v.Args[1] 25864 or := v.Args[0] 25865 if or.Op != OpAMD64ORQ { 25866 break 25867 } 25868 _ = or.Args[1] 25869 y := or.Args[0] 25870 s0 := or.Args[1] 25871 if s0.Op != OpAMD64SHLQconst { 25872 break 25873 } 25874 j0 := s0.AuxInt 25875 x0 := s0.Args[0] 25876 if x0.Op != OpAMD64MOVBloadidx1 { 25877 break 25878 } 25879 i0 := x0.AuxInt 25880 s := x0.Aux 25881 _ = x0.Args[2] 25882 p := x0.Args[0] 25883 idx := x0.Args[1] 25884 mem := x0.Args[2] 25885 s1 := v.Args[1] 25886 if s1.Op != OpAMD64SHLQconst { 25887 break 25888 } 25889 j1 := s1.AuxInt 25890 x1 := s1.Args[0] 25891 if x1.Op != OpAMD64MOVBloadidx1 { 25892 break 25893 } 25894 i1 := x1.AuxInt 25895 if x1.Aux != s { 25896 break 25897 } 25898 _ = x1.Args[2] 25899 if p != x1.Args[0] { 25900 break 25901 } 25902 if idx != x1.Args[1] { 25903 break 25904 } 25905 if mem != x1.Args[2] { 25906 break 25907 } 25908 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25909 break 25910 } 25911 b = mergePoint(b, x0, x1) 25912 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25913 v.reset(OpCopy) 25914 v.AddArg(v0) 25915 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25916 v1.AuxInt = j0 25917 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25918 v2.AuxInt = i0 25919 v2.Aux = s 25920 v2.AddArg(p) 25921 v2.AddArg(idx) 25922 v2.AddArg(mem) 25923 v1.AddArg(v2) 25924 v0.AddArg(v1) 25925 v0.AddArg(y) 25926 return true 25927 } 25928 return false 25929 } 25930 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 25931 b := v.Block 25932 _ = b 25933 typ := &b.Func.Config.Types 25934 _ = typ 25935 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25936 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25937 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25938 for { 25939 _ = v.Args[1] 25940 or := v.Args[0] 25941 if or.Op != OpAMD64ORQ { 25942 break 25943 } 25944 _ = or.Args[1] 25945 y := or.Args[0] 25946 s0 := or.Args[1] 25947 if s0.Op != OpAMD64SHLQconst { 25948 break 25949 } 25950 j0 := s0.AuxInt 25951 x0 := s0.Args[0] 25952 if x0.Op != OpAMD64MOVBloadidx1 { 25953 break 25954 } 25955 i0 := x0.AuxInt 25956 s := x0.Aux 25957 _ = x0.Args[2] 25958 idx := x0.Args[0] 25959 p := x0.Args[1] 25960 mem := x0.Args[2] 25961 s1 := v.Args[1] 25962 if s1.Op != OpAMD64SHLQconst { 25963 break 25964 } 25965 j1 := s1.AuxInt 25966 x1 := s1.Args[0] 25967 if x1.Op != OpAMD64MOVBloadidx1 { 25968 break 25969 } 25970 i1 := x1.AuxInt 25971 if x1.Aux != s { 25972 break 25973 } 25974 _ = x1.Args[2] 25975 if p != x1.Args[0] { 25976 break 25977 } 25978 if idx != x1.Args[1] { 25979 break 25980 } 25981 if mem != x1.Args[2] { 25982 break 25983 } 25984 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25985 break 25986 } 25987 b = mergePoint(b, x0, x1) 25988 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25989 v.reset(OpCopy) 25990 v.AddArg(v0) 25991 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25992 v1.AuxInt = j0 25993 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25994 v2.AuxInt = i0 25995 v2.Aux = s 25996 v2.AddArg(p) 25997 v2.AddArg(idx) 25998 v2.AddArg(mem) 25999 v1.AddArg(v2) 26000 v0.AddArg(v1) 26001 v0.AddArg(y) 26002 return true 26003 } 26004 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 26005 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26006 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26007 for { 26008 _ = v.Args[1] 26009 or := v.Args[0] 26010 if or.Op != OpAMD64ORQ { 26011 break 26012 } 26013 _ = or.Args[1] 26014 s0 := or.Args[0] 26015 if s0.Op != OpAMD64SHLQconst { 26016 break 26017 } 26018 j0 := s0.AuxInt 26019 x0 := s0.Args[0] 26020 if x0.Op != OpAMD64MOVBloadidx1 { 26021 break 26022 } 26023 i0 := x0.AuxInt 26024 s := x0.Aux 26025 _ = x0.Args[2] 26026 p := x0.Args[0] 26027 idx := x0.Args[1] 26028 mem := x0.Args[2] 26029 y := or.Args[1] 26030 s1 := v.Args[1] 26031 if s1.Op != OpAMD64SHLQconst { 26032 break 26033 } 26034 j1 := s1.AuxInt 26035 x1 := s1.Args[0] 26036 if x1.Op != OpAMD64MOVBloadidx1 { 26037 break 26038 } 26039 i1 := x1.AuxInt 26040 if x1.Aux != s { 26041 break 26042 } 26043 _ = x1.Args[2] 26044 if idx != x1.Args[0] { 26045 break 26046 } 26047 if p != x1.Args[1] { 26048 break 26049 } 26050 if mem != x1.Args[2] { 26051 break 26052 } 26053 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26054 break 26055 } 26056 b = mergePoint(b, x0, x1) 26057 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26058 v.reset(OpCopy) 26059 v.AddArg(v0) 26060 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26061 v1.AuxInt = j0 26062 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26063 v2.AuxInt = i0 26064 v2.Aux = s 26065 v2.AddArg(p) 26066 v2.AddArg(idx) 26067 v2.AddArg(mem) 26068 v1.AddArg(v2) 26069 v0.AddArg(v1) 26070 v0.AddArg(y) 26071 return true 26072 } 26073 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 26074 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26075 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26076 for { 26077 _ = v.Args[1] 26078 or := v.Args[0] 26079 if or.Op != OpAMD64ORQ { 26080 break 26081 } 26082 _ = or.Args[1] 26083 s0 := or.Args[0] 26084 if s0.Op != OpAMD64SHLQconst { 26085 break 26086 } 26087 j0 := s0.AuxInt 26088 x0 := s0.Args[0] 26089 if x0.Op != OpAMD64MOVBloadidx1 { 26090 break 26091 } 26092 i0 := x0.AuxInt 26093 s := x0.Aux 26094 _ = x0.Args[2] 26095 idx := x0.Args[0] 26096 p := x0.Args[1] 26097 mem := x0.Args[2] 26098 y := or.Args[1] 26099 s1 := v.Args[1] 26100 if s1.Op != OpAMD64SHLQconst { 26101 break 26102 } 26103 j1 := s1.AuxInt 26104 x1 := s1.Args[0] 26105 if x1.Op != OpAMD64MOVBloadidx1 { 26106 break 26107 } 26108 i1 := x1.AuxInt 26109 if x1.Aux != s { 26110 break 26111 } 26112 _ = x1.Args[2] 26113 if idx != x1.Args[0] { 26114 break 26115 } 26116 if p != x1.Args[1] { 26117 break 26118 } 26119 if mem != x1.Args[2] { 26120 break 26121 } 26122 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26123 break 26124 } 26125 b = mergePoint(b, x0, x1) 26126 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26127 v.reset(OpCopy) 26128 v.AddArg(v0) 26129 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26130 v1.AuxInt = j0 26131 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26132 v2.AuxInt = i0 26133 v2.Aux = s 26134 v2.AddArg(p) 26135 v2.AddArg(idx) 26136 v2.AddArg(mem) 26137 v1.AddArg(v2) 26138 v0.AddArg(v1) 26139 v0.AddArg(y) 26140 return true 26141 } 26142 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 26143 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26144 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26145 for { 26146 _ = v.Args[1] 26147 or := v.Args[0] 26148 if or.Op != OpAMD64ORQ { 26149 break 26150 } 26151 _ = or.Args[1] 26152 y := or.Args[0] 26153 s0 := or.Args[1] 26154 if s0.Op != OpAMD64SHLQconst { 26155 break 26156 } 26157 j0 := s0.AuxInt 26158 x0 := s0.Args[0] 26159 if x0.Op != OpAMD64MOVBloadidx1 { 26160 break 26161 } 26162 i0 := x0.AuxInt 26163 s := x0.Aux 26164 _ = x0.Args[2] 26165 p := x0.Args[0] 26166 idx := x0.Args[1] 26167 mem := x0.Args[2] 26168 s1 := v.Args[1] 26169 if s1.Op != OpAMD64SHLQconst { 26170 break 26171 } 26172 j1 := s1.AuxInt 26173 x1 := s1.Args[0] 26174 if x1.Op != OpAMD64MOVBloadidx1 { 26175 break 26176 } 26177 i1 := x1.AuxInt 26178 if x1.Aux != s { 26179 break 26180 } 26181 _ = x1.Args[2] 26182 if idx != x1.Args[0] { 26183 break 26184 } 26185 if p != x1.Args[1] { 26186 break 26187 } 26188 if mem != x1.Args[2] { 26189 break 26190 } 26191 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26192 break 26193 } 26194 b = mergePoint(b, x0, x1) 26195 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26196 v.reset(OpCopy) 26197 v.AddArg(v0) 26198 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26199 v1.AuxInt = j0 26200 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26201 v2.AuxInt = i0 26202 v2.Aux = s 26203 v2.AddArg(p) 26204 v2.AddArg(idx) 26205 v2.AddArg(mem) 26206 v1.AddArg(v2) 26207 v0.AddArg(v1) 26208 v0.AddArg(y) 26209 return true 26210 } 26211 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 26212 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26213 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26214 for { 26215 _ = v.Args[1] 26216 or := v.Args[0] 26217 if or.Op != OpAMD64ORQ { 26218 break 26219 } 26220 _ = or.Args[1] 26221 y := or.Args[0] 26222 s0 := or.Args[1] 26223 if s0.Op != OpAMD64SHLQconst { 26224 break 26225 } 26226 j0 := s0.AuxInt 26227 x0 := s0.Args[0] 26228 if x0.Op != OpAMD64MOVBloadidx1 { 26229 break 26230 } 26231 i0 := x0.AuxInt 26232 s := x0.Aux 26233 _ = x0.Args[2] 26234 idx := x0.Args[0] 26235 p := x0.Args[1] 26236 mem := x0.Args[2] 26237 s1 := v.Args[1] 26238 if s1.Op != OpAMD64SHLQconst { 26239 break 26240 } 26241 j1 := s1.AuxInt 26242 x1 := s1.Args[0] 26243 if x1.Op != OpAMD64MOVBloadidx1 { 26244 break 26245 } 26246 i1 := x1.AuxInt 26247 if x1.Aux != s { 26248 break 26249 } 26250 _ = x1.Args[2] 26251 if idx != x1.Args[0] { 26252 break 26253 } 26254 if p != x1.Args[1] { 26255 break 26256 } 26257 if mem != x1.Args[2] { 26258 break 26259 } 26260 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26261 break 26262 } 26263 b = mergePoint(b, x0, x1) 26264 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26265 v.reset(OpCopy) 26266 v.AddArg(v0) 26267 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26268 v1.AuxInt = j0 26269 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26270 v2.AuxInt = i0 26271 v2.Aux = s 26272 v2.AddArg(p) 26273 v2.AddArg(idx) 26274 v2.AddArg(mem) 26275 v1.AddArg(v2) 26276 v0.AddArg(v1) 26277 v0.AddArg(y) 26278 return true 26279 } 26280 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 26281 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26282 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26283 for { 26284 _ = v.Args[1] 26285 s1 := v.Args[0] 26286 if s1.Op != OpAMD64SHLQconst { 26287 break 26288 } 26289 j1 := s1.AuxInt 26290 x1 := s1.Args[0] 26291 if x1.Op != OpAMD64MOVWloadidx1 { 26292 break 26293 } 26294 i1 := x1.AuxInt 26295 s := x1.Aux 26296 _ = x1.Args[2] 26297 p := x1.Args[0] 26298 idx := x1.Args[1] 26299 mem := x1.Args[2] 26300 or := v.Args[1] 26301 if or.Op != OpAMD64ORQ { 26302 break 26303 } 26304 _ = or.Args[1] 26305 s0 := or.Args[0] 26306 if s0.Op != OpAMD64SHLQconst { 26307 break 26308 } 26309 j0 := s0.AuxInt 26310 x0 := s0.Args[0] 26311 if x0.Op != OpAMD64MOVWloadidx1 { 26312 break 26313 } 26314 i0 := x0.AuxInt 26315 if x0.Aux != s { 26316 break 26317 } 26318 _ = x0.Args[2] 26319 if p != x0.Args[0] { 26320 break 26321 } 26322 if idx != x0.Args[1] { 26323 break 26324 } 26325 if mem != x0.Args[2] { 26326 break 26327 } 26328 y := or.Args[1] 26329 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26330 break 26331 } 26332 b = mergePoint(b, x0, x1) 26333 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26334 v.reset(OpCopy) 26335 v.AddArg(v0) 26336 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26337 v1.AuxInt = j0 26338 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26339 v2.AuxInt = i0 26340 v2.Aux = s 26341 v2.AddArg(p) 26342 v2.AddArg(idx) 26343 v2.AddArg(mem) 26344 v1.AddArg(v2) 26345 v0.AddArg(v1) 26346 v0.AddArg(y) 26347 return true 26348 } 26349 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 26350 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26351 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26352 for { 26353 _ = v.Args[1] 26354 s1 := v.Args[0] 26355 if s1.Op != OpAMD64SHLQconst { 26356 break 26357 } 26358 j1 := s1.AuxInt 26359 x1 := s1.Args[0] 26360 if x1.Op != OpAMD64MOVWloadidx1 { 26361 break 26362 } 26363 i1 := x1.AuxInt 26364 s := x1.Aux 26365 _ = x1.Args[2] 26366 idx := x1.Args[0] 26367 p := x1.Args[1] 26368 mem := x1.Args[2] 26369 or := v.Args[1] 26370 if or.Op != OpAMD64ORQ { 26371 break 26372 } 26373 _ = or.Args[1] 26374 s0 := or.Args[0] 26375 if s0.Op != OpAMD64SHLQconst { 26376 break 26377 } 26378 j0 := s0.AuxInt 26379 x0 := s0.Args[0] 26380 if x0.Op != OpAMD64MOVWloadidx1 { 26381 break 26382 } 26383 i0 := x0.AuxInt 26384 if x0.Aux != s { 26385 break 26386 } 26387 _ = x0.Args[2] 26388 if p != x0.Args[0] { 26389 break 26390 } 26391 if idx != x0.Args[1] { 26392 break 26393 } 26394 if mem != x0.Args[2] { 26395 break 26396 } 26397 y := or.Args[1] 26398 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26399 break 26400 } 26401 b = mergePoint(b, x0, x1) 26402 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26403 v.reset(OpCopy) 26404 v.AddArg(v0) 26405 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26406 v1.AuxInt = j0 26407 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26408 v2.AuxInt = i0 26409 v2.Aux = s 26410 v2.AddArg(p) 26411 v2.AddArg(idx) 26412 v2.AddArg(mem) 26413 v1.AddArg(v2) 26414 v0.AddArg(v1) 26415 v0.AddArg(y) 26416 return true 26417 } 26418 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 26419 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26420 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26421 for { 26422 _ = v.Args[1] 26423 s1 := v.Args[0] 26424 if s1.Op != OpAMD64SHLQconst { 26425 break 26426 } 26427 j1 := s1.AuxInt 26428 x1 := s1.Args[0] 26429 if x1.Op != OpAMD64MOVWloadidx1 { 26430 break 26431 } 26432 i1 := x1.AuxInt 26433 s := x1.Aux 26434 _ = x1.Args[2] 26435 p := x1.Args[0] 26436 idx := x1.Args[1] 26437 mem := x1.Args[2] 26438 or := v.Args[1] 26439 if or.Op != OpAMD64ORQ { 26440 break 26441 } 26442 _ = or.Args[1] 26443 s0 := or.Args[0] 26444 if s0.Op != OpAMD64SHLQconst { 26445 break 26446 } 26447 j0 := s0.AuxInt 26448 x0 := s0.Args[0] 26449 if x0.Op != OpAMD64MOVWloadidx1 { 26450 break 26451 } 26452 i0 := x0.AuxInt 26453 if x0.Aux != s { 26454 break 26455 } 26456 _ = x0.Args[2] 26457 if idx != x0.Args[0] { 26458 break 26459 } 26460 if p != x0.Args[1] { 26461 break 26462 } 26463 if mem != x0.Args[2] { 26464 break 26465 } 26466 y := or.Args[1] 26467 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26468 break 26469 } 26470 b = mergePoint(b, x0, x1) 26471 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26472 v.reset(OpCopy) 26473 v.AddArg(v0) 26474 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26475 v1.AuxInt = j0 26476 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26477 v2.AuxInt = i0 26478 v2.Aux = s 26479 v2.AddArg(p) 26480 v2.AddArg(idx) 26481 v2.AddArg(mem) 26482 v1.AddArg(v2) 26483 v0.AddArg(v1) 26484 v0.AddArg(y) 26485 return true 26486 } 26487 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 26488 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26489 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26490 for { 26491 _ = v.Args[1] 26492 s1 := v.Args[0] 26493 if s1.Op != OpAMD64SHLQconst { 26494 break 26495 } 26496 j1 := s1.AuxInt 26497 x1 := s1.Args[0] 26498 if x1.Op != OpAMD64MOVWloadidx1 { 26499 break 26500 } 26501 i1 := x1.AuxInt 26502 s := x1.Aux 26503 _ = x1.Args[2] 26504 idx := x1.Args[0] 26505 p := x1.Args[1] 26506 mem := x1.Args[2] 26507 or := v.Args[1] 26508 if or.Op != OpAMD64ORQ { 26509 break 26510 } 26511 _ = or.Args[1] 26512 s0 := or.Args[0] 26513 if s0.Op != OpAMD64SHLQconst { 26514 break 26515 } 26516 j0 := s0.AuxInt 26517 x0 := s0.Args[0] 26518 if x0.Op != OpAMD64MOVWloadidx1 { 26519 break 26520 } 26521 i0 := x0.AuxInt 26522 if x0.Aux != s { 26523 break 26524 } 26525 _ = x0.Args[2] 26526 if idx != x0.Args[0] { 26527 break 26528 } 26529 if p != x0.Args[1] { 26530 break 26531 } 26532 if mem != x0.Args[2] { 26533 break 26534 } 26535 y := or.Args[1] 26536 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26537 break 26538 } 26539 b = mergePoint(b, x0, x1) 26540 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26541 v.reset(OpCopy) 26542 v.AddArg(v0) 26543 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26544 v1.AuxInt = j0 26545 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26546 v2.AuxInt = i0 26547 v2.Aux = s 26548 v2.AddArg(p) 26549 v2.AddArg(idx) 26550 v2.AddArg(mem) 26551 v1.AddArg(v2) 26552 v0.AddArg(v1) 26553 v0.AddArg(y) 26554 return true 26555 } 26556 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 26557 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26558 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26559 for { 26560 _ = v.Args[1] 26561 s1 := v.Args[0] 26562 if s1.Op != OpAMD64SHLQconst { 26563 break 26564 } 26565 j1 := s1.AuxInt 26566 x1 := s1.Args[0] 26567 if x1.Op != OpAMD64MOVWloadidx1 { 26568 break 26569 } 26570 i1 := x1.AuxInt 26571 s := x1.Aux 26572 _ = x1.Args[2] 26573 p := x1.Args[0] 26574 idx := x1.Args[1] 26575 mem := x1.Args[2] 26576 or := v.Args[1] 26577 if or.Op != OpAMD64ORQ { 26578 break 26579 } 26580 _ = or.Args[1] 26581 y := or.Args[0] 26582 s0 := or.Args[1] 26583 if s0.Op != OpAMD64SHLQconst { 26584 break 26585 } 26586 j0 := s0.AuxInt 26587 x0 := s0.Args[0] 26588 if x0.Op != OpAMD64MOVWloadidx1 { 26589 break 26590 } 26591 i0 := x0.AuxInt 26592 if x0.Aux != s { 26593 break 26594 } 26595 _ = x0.Args[2] 26596 if p != x0.Args[0] { 26597 break 26598 } 26599 if idx != x0.Args[1] { 26600 break 26601 } 26602 if mem != x0.Args[2] { 26603 break 26604 } 26605 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26606 break 26607 } 26608 b = mergePoint(b, x0, x1) 26609 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26610 v.reset(OpCopy) 26611 v.AddArg(v0) 26612 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26613 v1.AuxInt = j0 26614 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26615 v2.AuxInt = i0 26616 v2.Aux = s 26617 v2.AddArg(p) 26618 v2.AddArg(idx) 26619 v2.AddArg(mem) 26620 v1.AddArg(v2) 26621 v0.AddArg(v1) 26622 v0.AddArg(y) 26623 return true 26624 } 26625 return false 26626 } 26627 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 26628 b := v.Block 26629 _ = b 26630 typ := &b.Func.Config.Types 26631 _ = typ 26632 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 26633 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26634 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26635 for { 26636 _ = v.Args[1] 26637 s1 := v.Args[0] 26638 if s1.Op != OpAMD64SHLQconst { 26639 break 26640 } 26641 j1 := s1.AuxInt 26642 x1 := s1.Args[0] 26643 if x1.Op != OpAMD64MOVWloadidx1 { 26644 break 26645 } 26646 i1 := x1.AuxInt 26647 s := x1.Aux 26648 _ = x1.Args[2] 26649 idx := x1.Args[0] 26650 p := x1.Args[1] 26651 mem := x1.Args[2] 26652 or := v.Args[1] 26653 if or.Op != OpAMD64ORQ { 26654 break 26655 } 26656 _ = or.Args[1] 26657 y := or.Args[0] 26658 s0 := or.Args[1] 26659 if s0.Op != OpAMD64SHLQconst { 26660 break 26661 } 26662 j0 := s0.AuxInt 26663 x0 := s0.Args[0] 26664 if x0.Op != OpAMD64MOVWloadidx1 { 26665 break 26666 } 26667 i0 := x0.AuxInt 26668 if x0.Aux != s { 26669 break 26670 } 26671 _ = x0.Args[2] 26672 if p != x0.Args[0] { 26673 break 26674 } 26675 if idx != x0.Args[1] { 26676 break 26677 } 26678 if mem != x0.Args[2] { 26679 break 26680 } 26681 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26682 break 26683 } 26684 b = mergePoint(b, x0, x1) 26685 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26686 v.reset(OpCopy) 26687 v.AddArg(v0) 26688 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26689 v1.AuxInt = j0 26690 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26691 v2.AuxInt = i0 26692 v2.Aux = s 26693 v2.AddArg(p) 26694 v2.AddArg(idx) 26695 v2.AddArg(mem) 26696 v1.AddArg(v2) 26697 v0.AddArg(v1) 26698 v0.AddArg(y) 26699 return true 26700 } 26701 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 26702 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26703 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26704 for { 26705 _ = v.Args[1] 26706 s1 := v.Args[0] 26707 if s1.Op != OpAMD64SHLQconst { 26708 break 26709 } 26710 j1 := s1.AuxInt 26711 x1 := s1.Args[0] 26712 if x1.Op != OpAMD64MOVWloadidx1 { 26713 break 26714 } 26715 i1 := x1.AuxInt 26716 s := x1.Aux 26717 _ = x1.Args[2] 26718 p := x1.Args[0] 26719 idx := x1.Args[1] 26720 mem := x1.Args[2] 26721 or := v.Args[1] 26722 if or.Op != OpAMD64ORQ { 26723 break 26724 } 26725 _ = or.Args[1] 26726 y := or.Args[0] 26727 s0 := or.Args[1] 26728 if s0.Op != OpAMD64SHLQconst { 26729 break 26730 } 26731 j0 := s0.AuxInt 26732 x0 := s0.Args[0] 26733 if x0.Op != OpAMD64MOVWloadidx1 { 26734 break 26735 } 26736 i0 := x0.AuxInt 26737 if x0.Aux != s { 26738 break 26739 } 26740 _ = x0.Args[2] 26741 if idx != x0.Args[0] { 26742 break 26743 } 26744 if p != x0.Args[1] { 26745 break 26746 } 26747 if mem != x0.Args[2] { 26748 break 26749 } 26750 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26751 break 26752 } 26753 b = mergePoint(b, x0, x1) 26754 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26755 v.reset(OpCopy) 26756 v.AddArg(v0) 26757 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26758 v1.AuxInt = j0 26759 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26760 v2.AuxInt = i0 26761 v2.Aux = s 26762 v2.AddArg(p) 26763 v2.AddArg(idx) 26764 v2.AddArg(mem) 26765 v1.AddArg(v2) 26766 v0.AddArg(v1) 26767 v0.AddArg(y) 26768 return true 26769 } 26770 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 26771 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26772 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26773 for { 26774 _ = v.Args[1] 26775 s1 := v.Args[0] 26776 if s1.Op != OpAMD64SHLQconst { 26777 break 26778 } 26779 j1 := s1.AuxInt 26780 x1 := s1.Args[0] 26781 if x1.Op != OpAMD64MOVWloadidx1 { 26782 break 26783 } 26784 i1 := x1.AuxInt 26785 s := x1.Aux 26786 _ = x1.Args[2] 26787 idx := x1.Args[0] 26788 p := x1.Args[1] 26789 mem := x1.Args[2] 26790 or := v.Args[1] 26791 if or.Op != OpAMD64ORQ { 26792 break 26793 } 26794 _ = or.Args[1] 26795 y := or.Args[0] 26796 s0 := or.Args[1] 26797 if s0.Op != OpAMD64SHLQconst { 26798 break 26799 } 26800 j0 := s0.AuxInt 26801 x0 := s0.Args[0] 26802 if x0.Op != OpAMD64MOVWloadidx1 { 26803 break 26804 } 26805 i0 := x0.AuxInt 26806 if x0.Aux != s { 26807 break 26808 } 26809 _ = x0.Args[2] 26810 if idx != x0.Args[0] { 26811 break 26812 } 26813 if p != x0.Args[1] { 26814 break 26815 } 26816 if mem != x0.Args[2] { 26817 break 26818 } 26819 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26820 break 26821 } 26822 b = mergePoint(b, x0, x1) 26823 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26824 v.reset(OpCopy) 26825 v.AddArg(v0) 26826 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26827 v1.AuxInt = j0 26828 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26829 v2.AuxInt = i0 26830 v2.Aux = s 26831 v2.AddArg(p) 26832 v2.AddArg(idx) 26833 v2.AddArg(mem) 26834 v1.AddArg(v2) 26835 v0.AddArg(v1) 26836 v0.AddArg(y) 26837 return true 26838 } 26839 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26840 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26841 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26842 for { 26843 _ = v.Args[1] 26844 or := v.Args[0] 26845 if or.Op != OpAMD64ORQ { 26846 break 26847 } 26848 _ = or.Args[1] 26849 s0 := or.Args[0] 26850 if s0.Op != OpAMD64SHLQconst { 26851 break 26852 } 26853 j0 := s0.AuxInt 26854 x0 := s0.Args[0] 26855 if x0.Op != OpAMD64MOVWloadidx1 { 26856 break 26857 } 26858 i0 := x0.AuxInt 26859 s := x0.Aux 26860 _ = x0.Args[2] 26861 p := x0.Args[0] 26862 idx := x0.Args[1] 26863 mem := x0.Args[2] 26864 y := or.Args[1] 26865 s1 := v.Args[1] 26866 if s1.Op != OpAMD64SHLQconst { 26867 break 26868 } 26869 j1 := s1.AuxInt 26870 x1 := s1.Args[0] 26871 if x1.Op != OpAMD64MOVWloadidx1 { 26872 break 26873 } 26874 i1 := x1.AuxInt 26875 if x1.Aux != s { 26876 break 26877 } 26878 _ = x1.Args[2] 26879 if p != x1.Args[0] { 26880 break 26881 } 26882 if idx != x1.Args[1] { 26883 break 26884 } 26885 if mem != x1.Args[2] { 26886 break 26887 } 26888 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26889 break 26890 } 26891 b = mergePoint(b, x0, x1) 26892 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26893 v.reset(OpCopy) 26894 v.AddArg(v0) 26895 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26896 v1.AuxInt = j0 26897 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26898 v2.AuxInt = i0 26899 v2.Aux = s 26900 v2.AddArg(p) 26901 v2.AddArg(idx) 26902 v2.AddArg(mem) 26903 v1.AddArg(v2) 26904 v0.AddArg(v1) 26905 v0.AddArg(y) 26906 return true 26907 } 26908 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26909 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26910 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26911 for { 26912 _ = v.Args[1] 26913 or := v.Args[0] 26914 if or.Op != OpAMD64ORQ { 26915 break 26916 } 26917 _ = or.Args[1] 26918 s0 := or.Args[0] 26919 if s0.Op != OpAMD64SHLQconst { 26920 break 26921 } 26922 j0 := s0.AuxInt 26923 x0 := s0.Args[0] 26924 if x0.Op != OpAMD64MOVWloadidx1 { 26925 break 26926 } 26927 i0 := x0.AuxInt 26928 s := x0.Aux 26929 _ = x0.Args[2] 26930 idx := x0.Args[0] 26931 p := x0.Args[1] 26932 mem := x0.Args[2] 26933 y := or.Args[1] 26934 s1 := v.Args[1] 26935 if s1.Op != OpAMD64SHLQconst { 26936 break 26937 } 26938 j1 := s1.AuxInt 26939 x1 := s1.Args[0] 26940 if x1.Op != OpAMD64MOVWloadidx1 { 26941 break 26942 } 26943 i1 := x1.AuxInt 26944 if x1.Aux != s { 26945 break 26946 } 26947 _ = x1.Args[2] 26948 if p != x1.Args[0] { 26949 break 26950 } 26951 if idx != x1.Args[1] { 26952 break 26953 } 26954 if mem != x1.Args[2] { 26955 break 26956 } 26957 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26958 break 26959 } 26960 b = mergePoint(b, x0, x1) 26961 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26962 v.reset(OpCopy) 26963 v.AddArg(v0) 26964 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26965 v1.AuxInt = j0 26966 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26967 v2.AuxInt = i0 26968 v2.Aux = s 26969 v2.AddArg(p) 26970 v2.AddArg(idx) 26971 v2.AddArg(mem) 26972 v1.AddArg(v2) 26973 v0.AddArg(v1) 26974 v0.AddArg(y) 26975 return true 26976 } 26977 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26978 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26979 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26980 for { 26981 _ = v.Args[1] 26982 or := v.Args[0] 26983 if or.Op != OpAMD64ORQ { 26984 break 26985 } 26986 _ = or.Args[1] 26987 y := or.Args[0] 26988 s0 := or.Args[1] 26989 if s0.Op != OpAMD64SHLQconst { 26990 break 26991 } 26992 j0 := s0.AuxInt 26993 x0 := s0.Args[0] 26994 if x0.Op != OpAMD64MOVWloadidx1 { 26995 break 26996 } 26997 i0 := x0.AuxInt 26998 s := x0.Aux 26999 _ = x0.Args[2] 27000 p := x0.Args[0] 27001 idx := x0.Args[1] 27002 mem := x0.Args[2] 27003 s1 := v.Args[1] 27004 if s1.Op != OpAMD64SHLQconst { 27005 break 27006 } 27007 j1 := s1.AuxInt 27008 x1 := s1.Args[0] 27009 if x1.Op != OpAMD64MOVWloadidx1 { 27010 break 27011 } 27012 i1 := x1.AuxInt 27013 if x1.Aux != s { 27014 break 27015 } 27016 _ = x1.Args[2] 27017 if p != x1.Args[0] { 27018 break 27019 } 27020 if idx != x1.Args[1] { 27021 break 27022 } 27023 if mem != x1.Args[2] { 27024 break 27025 } 27026 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27027 break 27028 } 27029 b = mergePoint(b, x0, x1) 27030 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27031 v.reset(OpCopy) 27032 v.AddArg(v0) 27033 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27034 v1.AuxInt = j0 27035 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27036 v2.AuxInt = i0 27037 v2.Aux = s 27038 v2.AddArg(p) 27039 v2.AddArg(idx) 27040 v2.AddArg(mem) 27041 v1.AddArg(v2) 27042 v0.AddArg(v1) 27043 v0.AddArg(y) 27044 return true 27045 } 27046 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 27047 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27048 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27049 for { 27050 _ = v.Args[1] 27051 or := v.Args[0] 27052 if or.Op != OpAMD64ORQ { 27053 break 27054 } 27055 _ = or.Args[1] 27056 y := or.Args[0] 27057 s0 := or.Args[1] 27058 if s0.Op != OpAMD64SHLQconst { 27059 break 27060 } 27061 j0 := s0.AuxInt 27062 x0 := s0.Args[0] 27063 if x0.Op != OpAMD64MOVWloadidx1 { 27064 break 27065 } 27066 i0 := x0.AuxInt 27067 s := x0.Aux 27068 _ = x0.Args[2] 27069 idx := x0.Args[0] 27070 p := x0.Args[1] 27071 mem := x0.Args[2] 27072 s1 := v.Args[1] 27073 if s1.Op != OpAMD64SHLQconst { 27074 break 27075 } 27076 j1 := s1.AuxInt 27077 x1 := s1.Args[0] 27078 if x1.Op != OpAMD64MOVWloadidx1 { 27079 break 27080 } 27081 i1 := x1.AuxInt 27082 if x1.Aux != s { 27083 break 27084 } 27085 _ = x1.Args[2] 27086 if p != x1.Args[0] { 27087 break 27088 } 27089 if idx != x1.Args[1] { 27090 break 27091 } 27092 if mem != x1.Args[2] { 27093 break 27094 } 27095 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27096 break 27097 } 27098 b = mergePoint(b, x0, x1) 27099 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27100 v.reset(OpCopy) 27101 v.AddArg(v0) 27102 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27103 v1.AuxInt = j0 27104 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27105 v2.AuxInt = i0 27106 v2.Aux = s 27107 v2.AddArg(p) 27108 v2.AddArg(idx) 27109 v2.AddArg(mem) 27110 v1.AddArg(v2) 27111 v0.AddArg(v1) 27112 v0.AddArg(y) 27113 return true 27114 } 27115 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27116 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27117 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27118 for { 27119 _ = v.Args[1] 27120 or := v.Args[0] 27121 if or.Op != OpAMD64ORQ { 27122 break 27123 } 27124 _ = or.Args[1] 27125 s0 := or.Args[0] 27126 if s0.Op != OpAMD64SHLQconst { 27127 break 27128 } 27129 j0 := s0.AuxInt 27130 x0 := s0.Args[0] 27131 if x0.Op != OpAMD64MOVWloadidx1 { 27132 break 27133 } 27134 i0 := x0.AuxInt 27135 s := x0.Aux 27136 _ = x0.Args[2] 27137 p := x0.Args[0] 27138 idx := x0.Args[1] 27139 mem := x0.Args[2] 27140 y := or.Args[1] 27141 s1 := v.Args[1] 27142 if s1.Op != OpAMD64SHLQconst { 27143 break 27144 } 27145 j1 := s1.AuxInt 27146 x1 := s1.Args[0] 27147 if x1.Op != OpAMD64MOVWloadidx1 { 27148 break 27149 } 27150 i1 := x1.AuxInt 27151 if x1.Aux != s { 27152 break 27153 } 27154 _ = x1.Args[2] 27155 if idx != x1.Args[0] { 27156 break 27157 } 27158 if p != x1.Args[1] { 27159 break 27160 } 27161 if mem != x1.Args[2] { 27162 break 27163 } 27164 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27165 break 27166 } 27167 b = mergePoint(b, x0, x1) 27168 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27169 v.reset(OpCopy) 27170 v.AddArg(v0) 27171 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27172 v1.AuxInt = j0 27173 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27174 v2.AuxInt = i0 27175 v2.Aux = s 27176 v2.AddArg(p) 27177 v2.AddArg(idx) 27178 v2.AddArg(mem) 27179 v1.AddArg(v2) 27180 v0.AddArg(v1) 27181 v0.AddArg(y) 27182 return true 27183 } 27184 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27185 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27186 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27187 for { 27188 _ = v.Args[1] 27189 or := v.Args[0] 27190 if or.Op != OpAMD64ORQ { 27191 break 27192 } 27193 _ = or.Args[1] 27194 s0 := or.Args[0] 27195 if s0.Op != OpAMD64SHLQconst { 27196 break 27197 } 27198 j0 := s0.AuxInt 27199 x0 := s0.Args[0] 27200 if x0.Op != OpAMD64MOVWloadidx1 { 27201 break 27202 } 27203 i0 := x0.AuxInt 27204 s := x0.Aux 27205 _ = x0.Args[2] 27206 idx := x0.Args[0] 27207 p := x0.Args[1] 27208 mem := x0.Args[2] 27209 y := or.Args[1] 27210 s1 := v.Args[1] 27211 if s1.Op != OpAMD64SHLQconst { 27212 break 27213 } 27214 j1 := s1.AuxInt 27215 x1 := s1.Args[0] 27216 if x1.Op != OpAMD64MOVWloadidx1 { 27217 break 27218 } 27219 i1 := x1.AuxInt 27220 if x1.Aux != s { 27221 break 27222 } 27223 _ = x1.Args[2] 27224 if idx != x1.Args[0] { 27225 break 27226 } 27227 if p != x1.Args[1] { 27228 break 27229 } 27230 if mem != x1.Args[2] { 27231 break 27232 } 27233 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27234 break 27235 } 27236 b = mergePoint(b, x0, x1) 27237 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27238 v.reset(OpCopy) 27239 v.AddArg(v0) 27240 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27241 v1.AuxInt = j0 27242 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27243 v2.AuxInt = i0 27244 v2.Aux = s 27245 v2.AddArg(p) 27246 v2.AddArg(idx) 27247 v2.AddArg(mem) 27248 v1.AddArg(v2) 27249 v0.AddArg(v1) 27250 v0.AddArg(y) 27251 return true 27252 } 27253 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27254 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27255 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27256 for { 27257 _ = v.Args[1] 27258 or := v.Args[0] 27259 if or.Op != OpAMD64ORQ { 27260 break 27261 } 27262 _ = or.Args[1] 27263 y := or.Args[0] 27264 s0 := or.Args[1] 27265 if s0.Op != OpAMD64SHLQconst { 27266 break 27267 } 27268 j0 := s0.AuxInt 27269 x0 := s0.Args[0] 27270 if x0.Op != OpAMD64MOVWloadidx1 { 27271 break 27272 } 27273 i0 := x0.AuxInt 27274 s := x0.Aux 27275 _ = x0.Args[2] 27276 p := x0.Args[0] 27277 idx := x0.Args[1] 27278 mem := x0.Args[2] 27279 s1 := v.Args[1] 27280 if s1.Op != OpAMD64SHLQconst { 27281 break 27282 } 27283 j1 := s1.AuxInt 27284 x1 := s1.Args[0] 27285 if x1.Op != OpAMD64MOVWloadidx1 { 27286 break 27287 } 27288 i1 := x1.AuxInt 27289 if x1.Aux != s { 27290 break 27291 } 27292 _ = x1.Args[2] 27293 if idx != x1.Args[0] { 27294 break 27295 } 27296 if p != x1.Args[1] { 27297 break 27298 } 27299 if mem != x1.Args[2] { 27300 break 27301 } 27302 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27303 break 27304 } 27305 b = mergePoint(b, x0, x1) 27306 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27307 v.reset(OpCopy) 27308 v.AddArg(v0) 27309 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27310 v1.AuxInt = j0 27311 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27312 v2.AuxInt = i0 27313 v2.Aux = s 27314 v2.AddArg(p) 27315 v2.AddArg(idx) 27316 v2.AddArg(mem) 27317 v1.AddArg(v2) 27318 v0.AddArg(v1) 27319 v0.AddArg(y) 27320 return true 27321 } 27322 return false 27323 } 27324 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 27325 b := v.Block 27326 _ = b 27327 typ := &b.Func.Config.Types 27328 _ = typ 27329 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27330 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27331 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27332 for { 27333 _ = v.Args[1] 27334 or := v.Args[0] 27335 if or.Op != OpAMD64ORQ { 27336 break 27337 } 27338 _ = or.Args[1] 27339 y := or.Args[0] 27340 s0 := or.Args[1] 27341 if s0.Op != OpAMD64SHLQconst { 27342 break 27343 } 27344 j0 := s0.AuxInt 27345 x0 := s0.Args[0] 27346 if x0.Op != OpAMD64MOVWloadidx1 { 27347 break 27348 } 27349 i0 := x0.AuxInt 27350 s := x0.Aux 27351 _ = x0.Args[2] 27352 idx := x0.Args[0] 27353 p := x0.Args[1] 27354 mem := x0.Args[2] 27355 s1 := v.Args[1] 27356 if s1.Op != OpAMD64SHLQconst { 27357 break 27358 } 27359 j1 := s1.AuxInt 27360 x1 := s1.Args[0] 27361 if x1.Op != OpAMD64MOVWloadidx1 { 27362 break 27363 } 27364 i1 := x1.AuxInt 27365 if x1.Aux != s { 27366 break 27367 } 27368 _ = x1.Args[2] 27369 if idx != x1.Args[0] { 27370 break 27371 } 27372 if p != x1.Args[1] { 27373 break 27374 } 27375 if mem != x1.Args[2] { 27376 break 27377 } 27378 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27379 break 27380 } 27381 b = mergePoint(b, x0, x1) 27382 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27383 v.reset(OpCopy) 27384 v.AddArg(v0) 27385 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27386 v1.AuxInt = j0 27387 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27388 v2.AuxInt = i0 27389 v2.Aux = s 27390 v2.AddArg(p) 27391 v2.AddArg(idx) 27392 v2.AddArg(mem) 27393 v1.AddArg(v2) 27394 v0.AddArg(v1) 27395 v0.AddArg(y) 27396 return true 27397 } 27398 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 27399 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27400 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 27401 for { 27402 _ = v.Args[1] 27403 x1 := v.Args[0] 27404 if x1.Op != OpAMD64MOVBload { 27405 break 27406 } 27407 i1 := x1.AuxInt 27408 s := x1.Aux 27409 _ = x1.Args[1] 27410 p := x1.Args[0] 27411 mem := x1.Args[1] 27412 sh := v.Args[1] 27413 if sh.Op != OpAMD64SHLQconst { 27414 break 27415 } 27416 if sh.AuxInt != 8 { 27417 break 27418 } 27419 x0 := sh.Args[0] 27420 if x0.Op != OpAMD64MOVBload { 27421 break 27422 } 27423 i0 := x0.AuxInt 27424 if x0.Aux != s { 27425 break 27426 } 27427 _ = x0.Args[1] 27428 if p != x0.Args[0] { 27429 break 27430 } 27431 if mem != x0.Args[1] { 27432 break 27433 } 27434 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27435 break 27436 } 27437 b = mergePoint(b, x0, x1) 27438 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 27439 v.reset(OpCopy) 27440 v.AddArg(v0) 27441 v0.AuxInt = 8 27442 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27443 v1.AuxInt = i0 27444 v1.Aux = s 27445 v1.AddArg(p) 27446 v1.AddArg(mem) 27447 v0.AddArg(v1) 27448 return true 27449 } 27450 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 27451 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27452 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 27453 for { 27454 _ = v.Args[1] 27455 sh := v.Args[0] 27456 if sh.Op != OpAMD64SHLQconst { 27457 break 27458 } 27459 if sh.AuxInt != 8 { 27460 break 27461 } 27462 x0 := sh.Args[0] 27463 if x0.Op != OpAMD64MOVBload { 27464 break 27465 } 27466 i0 := x0.AuxInt 27467 s := x0.Aux 27468 _ = x0.Args[1] 27469 p := x0.Args[0] 27470 mem := x0.Args[1] 27471 x1 := v.Args[1] 27472 if x1.Op != OpAMD64MOVBload { 27473 break 27474 } 27475 i1 := x1.AuxInt 27476 if x1.Aux != s { 27477 break 27478 } 27479 _ = x1.Args[1] 27480 if p != x1.Args[0] { 27481 break 27482 } 27483 if mem != x1.Args[1] { 27484 break 27485 } 27486 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27487 break 27488 } 27489 b = mergePoint(b, x0, x1) 27490 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 27491 v.reset(OpCopy) 27492 v.AddArg(v0) 27493 v0.AuxInt = 8 27494 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27495 v1.AuxInt = i0 27496 v1.Aux = s 27497 v1.AddArg(p) 27498 v1.AddArg(mem) 27499 v0.AddArg(v1) 27500 return true 27501 } 27502 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 27503 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27504 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 27505 for { 27506 _ = v.Args[1] 27507 r1 := v.Args[0] 27508 if r1.Op != OpAMD64ROLWconst { 27509 break 27510 } 27511 if r1.AuxInt != 8 { 27512 break 27513 } 27514 x1 := r1.Args[0] 27515 if x1.Op != OpAMD64MOVWload { 27516 break 27517 } 27518 i1 := x1.AuxInt 27519 s := x1.Aux 27520 _ = x1.Args[1] 27521 p := x1.Args[0] 27522 mem := x1.Args[1] 27523 sh := v.Args[1] 27524 if sh.Op != OpAMD64SHLQconst { 27525 break 27526 } 27527 if sh.AuxInt != 16 { 27528 break 27529 } 27530 r0 := sh.Args[0] 27531 if r0.Op != OpAMD64ROLWconst { 27532 break 27533 } 27534 if r0.AuxInt != 8 { 27535 break 27536 } 27537 x0 := r0.Args[0] 27538 if x0.Op != OpAMD64MOVWload { 27539 break 27540 } 27541 i0 := x0.AuxInt 27542 if x0.Aux != s { 27543 break 27544 } 27545 _ = x0.Args[1] 27546 if p != x0.Args[0] { 27547 break 27548 } 27549 if mem != x0.Args[1] { 27550 break 27551 } 27552 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27553 break 27554 } 27555 b = mergePoint(b, x0, x1) 27556 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27557 v.reset(OpCopy) 27558 v.AddArg(v0) 27559 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27560 v1.AuxInt = i0 27561 v1.Aux = s 27562 v1.AddArg(p) 27563 v1.AddArg(mem) 27564 v0.AddArg(v1) 27565 return true 27566 } 27567 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 27568 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27569 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 27570 for { 27571 _ = v.Args[1] 27572 sh := v.Args[0] 27573 if sh.Op != OpAMD64SHLQconst { 27574 break 27575 } 27576 if sh.AuxInt != 16 { 27577 break 27578 } 27579 r0 := sh.Args[0] 27580 if r0.Op != OpAMD64ROLWconst { 27581 break 27582 } 27583 if r0.AuxInt != 8 { 27584 break 27585 } 27586 x0 := r0.Args[0] 27587 if x0.Op != OpAMD64MOVWload { 27588 break 27589 } 27590 i0 := x0.AuxInt 27591 s := x0.Aux 27592 _ = x0.Args[1] 27593 p := x0.Args[0] 27594 mem := x0.Args[1] 27595 r1 := v.Args[1] 27596 if r1.Op != OpAMD64ROLWconst { 27597 break 27598 } 27599 if r1.AuxInt != 8 { 27600 break 27601 } 27602 x1 := r1.Args[0] 27603 if x1.Op != OpAMD64MOVWload { 27604 break 27605 } 27606 i1 := x1.AuxInt 27607 if x1.Aux != s { 27608 break 27609 } 27610 _ = x1.Args[1] 27611 if p != x1.Args[0] { 27612 break 27613 } 27614 if mem != x1.Args[1] { 27615 break 27616 } 27617 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27618 break 27619 } 27620 b = mergePoint(b, x0, x1) 27621 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27622 v.reset(OpCopy) 27623 v.AddArg(v0) 27624 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27625 v1.AuxInt = i0 27626 v1.Aux = s 27627 v1.AddArg(p) 27628 v1.AddArg(mem) 27629 v0.AddArg(v1) 27630 return true 27631 } 27632 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 27633 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27634 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 27635 for { 27636 _ = v.Args[1] 27637 r1 := v.Args[0] 27638 if r1.Op != OpAMD64BSWAPL { 27639 break 27640 } 27641 x1 := r1.Args[0] 27642 if x1.Op != OpAMD64MOVLload { 27643 break 27644 } 27645 i1 := x1.AuxInt 27646 s := x1.Aux 27647 _ = x1.Args[1] 27648 p := x1.Args[0] 27649 mem := x1.Args[1] 27650 sh := v.Args[1] 27651 if sh.Op != OpAMD64SHLQconst { 27652 break 27653 } 27654 if sh.AuxInt != 32 { 27655 break 27656 } 27657 r0 := sh.Args[0] 27658 if r0.Op != OpAMD64BSWAPL { 27659 break 27660 } 27661 x0 := r0.Args[0] 27662 if x0.Op != OpAMD64MOVLload { 27663 break 27664 } 27665 i0 := x0.AuxInt 27666 if x0.Aux != s { 27667 break 27668 } 27669 _ = x0.Args[1] 27670 if p != x0.Args[0] { 27671 break 27672 } 27673 if mem != x0.Args[1] { 27674 break 27675 } 27676 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27677 break 27678 } 27679 b = mergePoint(b, x0, x1) 27680 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 27681 v.reset(OpCopy) 27682 v.AddArg(v0) 27683 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27684 v1.AuxInt = i0 27685 v1.Aux = s 27686 v1.AddArg(p) 27687 v1.AddArg(mem) 27688 v0.AddArg(v1) 27689 return true 27690 } 27691 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 27692 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27693 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 27694 for { 27695 _ = v.Args[1] 27696 sh := v.Args[0] 27697 if sh.Op != OpAMD64SHLQconst { 27698 break 27699 } 27700 if sh.AuxInt != 32 { 27701 break 27702 } 27703 r0 := sh.Args[0] 27704 if r0.Op != OpAMD64BSWAPL { 27705 break 27706 } 27707 x0 := r0.Args[0] 27708 if x0.Op != OpAMD64MOVLload { 27709 break 27710 } 27711 i0 := x0.AuxInt 27712 s := x0.Aux 27713 _ = x0.Args[1] 27714 p := x0.Args[0] 27715 mem := x0.Args[1] 27716 r1 := v.Args[1] 27717 if r1.Op != OpAMD64BSWAPL { 27718 break 27719 } 27720 x1 := r1.Args[0] 27721 if x1.Op != OpAMD64MOVLload { 27722 break 27723 } 27724 i1 := x1.AuxInt 27725 if x1.Aux != s { 27726 break 27727 } 27728 _ = x1.Args[1] 27729 if p != x1.Args[0] { 27730 break 27731 } 27732 if mem != x1.Args[1] { 27733 break 27734 } 27735 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27736 break 27737 } 27738 b = mergePoint(b, x0, x1) 27739 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 27740 v.reset(OpCopy) 27741 v.AddArg(v0) 27742 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27743 v1.AuxInt = i0 27744 v1.Aux = s 27745 v1.AddArg(p) 27746 v1.AddArg(mem) 27747 v0.AddArg(v1) 27748 return true 27749 } 27750 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 27751 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27752 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27753 for { 27754 _ = v.Args[1] 27755 s0 := v.Args[0] 27756 if s0.Op != OpAMD64SHLQconst { 27757 break 27758 } 27759 j0 := s0.AuxInt 27760 x0 := s0.Args[0] 27761 if x0.Op != OpAMD64MOVBload { 27762 break 27763 } 27764 i0 := x0.AuxInt 27765 s := x0.Aux 27766 _ = x0.Args[1] 27767 p := x0.Args[0] 27768 mem := x0.Args[1] 27769 or := v.Args[1] 27770 if or.Op != OpAMD64ORQ { 27771 break 27772 } 27773 _ = or.Args[1] 27774 s1 := or.Args[0] 27775 if s1.Op != OpAMD64SHLQconst { 27776 break 27777 } 27778 j1 := s1.AuxInt 27779 x1 := s1.Args[0] 27780 if x1.Op != OpAMD64MOVBload { 27781 break 27782 } 27783 i1 := x1.AuxInt 27784 if x1.Aux != s { 27785 break 27786 } 27787 _ = x1.Args[1] 27788 if p != x1.Args[0] { 27789 break 27790 } 27791 if mem != x1.Args[1] { 27792 break 27793 } 27794 y := or.Args[1] 27795 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27796 break 27797 } 27798 b = mergePoint(b, x0, x1) 27799 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27800 v.reset(OpCopy) 27801 v.AddArg(v0) 27802 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27803 v1.AuxInt = j1 27804 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 27805 v2.AuxInt = 8 27806 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27807 v3.AuxInt = i0 27808 v3.Aux = s 27809 v3.AddArg(p) 27810 v3.AddArg(mem) 27811 v2.AddArg(v3) 27812 v1.AddArg(v2) 27813 v0.AddArg(v1) 27814 v0.AddArg(y) 27815 return true 27816 } 27817 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 27818 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27819 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27820 for { 27821 _ = v.Args[1] 27822 s0 := v.Args[0] 27823 if s0.Op != OpAMD64SHLQconst { 27824 break 27825 } 27826 j0 := s0.AuxInt 27827 x0 := s0.Args[0] 27828 if x0.Op != OpAMD64MOVBload { 27829 break 27830 } 27831 i0 := x0.AuxInt 27832 s := x0.Aux 27833 _ = x0.Args[1] 27834 p := x0.Args[0] 27835 mem := x0.Args[1] 27836 or := v.Args[1] 27837 if or.Op != OpAMD64ORQ { 27838 break 27839 } 27840 _ = or.Args[1] 27841 y := or.Args[0] 27842 s1 := or.Args[1] 27843 if s1.Op != OpAMD64SHLQconst { 27844 break 27845 } 27846 j1 := s1.AuxInt 27847 x1 := s1.Args[0] 27848 if x1.Op != OpAMD64MOVBload { 27849 break 27850 } 27851 i1 := x1.AuxInt 27852 if x1.Aux != s { 27853 break 27854 } 27855 _ = x1.Args[1] 27856 if p != x1.Args[0] { 27857 break 27858 } 27859 if mem != x1.Args[1] { 27860 break 27861 } 27862 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27863 break 27864 } 27865 b = mergePoint(b, x0, x1) 27866 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27867 v.reset(OpCopy) 27868 v.AddArg(v0) 27869 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27870 v1.AuxInt = j1 27871 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 27872 v2.AuxInt = 8 27873 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27874 v3.AuxInt = i0 27875 v3.Aux = s 27876 v3.AddArg(p) 27877 v3.AddArg(mem) 27878 v2.AddArg(v3) 27879 v1.AddArg(v2) 27880 v0.AddArg(v1) 27881 v0.AddArg(y) 27882 return true 27883 } 27884 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 27885 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27886 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27887 for { 27888 _ = v.Args[1] 27889 or := v.Args[0] 27890 if or.Op != OpAMD64ORQ { 27891 break 27892 } 27893 _ = or.Args[1] 27894 s1 := or.Args[0] 27895 if s1.Op != OpAMD64SHLQconst { 27896 break 27897 } 27898 j1 := s1.AuxInt 27899 x1 := s1.Args[0] 27900 if x1.Op != OpAMD64MOVBload { 27901 break 27902 } 27903 i1 := x1.AuxInt 27904 s := x1.Aux 27905 _ = x1.Args[1] 27906 p := x1.Args[0] 27907 mem := x1.Args[1] 27908 y := or.Args[1] 27909 s0 := v.Args[1] 27910 if s0.Op != OpAMD64SHLQconst { 27911 break 27912 } 27913 j0 := s0.AuxInt 27914 x0 := s0.Args[0] 27915 if x0.Op != OpAMD64MOVBload { 27916 break 27917 } 27918 i0 := x0.AuxInt 27919 if x0.Aux != s { 27920 break 27921 } 27922 _ = x0.Args[1] 27923 if p != x0.Args[0] { 27924 break 27925 } 27926 if mem != x0.Args[1] { 27927 break 27928 } 27929 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27930 break 27931 } 27932 b = mergePoint(b, x0, x1) 27933 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27934 v.reset(OpCopy) 27935 v.AddArg(v0) 27936 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27937 v1.AuxInt = j1 27938 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 27939 v2.AuxInt = 8 27940 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27941 v3.AuxInt = i0 27942 v3.Aux = s 27943 v3.AddArg(p) 27944 v3.AddArg(mem) 27945 v2.AddArg(v3) 27946 v1.AddArg(v2) 27947 v0.AddArg(v1) 27948 v0.AddArg(y) 27949 return true 27950 } 27951 return false 27952 } 27953 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 27954 b := v.Block 27955 _ = b 27956 typ := &b.Func.Config.Types 27957 _ = typ 27958 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 27959 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27960 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27961 for { 27962 _ = v.Args[1] 27963 or := v.Args[0] 27964 if or.Op != OpAMD64ORQ { 27965 break 27966 } 27967 _ = or.Args[1] 27968 y := or.Args[0] 27969 s1 := or.Args[1] 27970 if s1.Op != OpAMD64SHLQconst { 27971 break 27972 } 27973 j1 := s1.AuxInt 27974 x1 := s1.Args[0] 27975 if x1.Op != OpAMD64MOVBload { 27976 break 27977 } 27978 i1 := x1.AuxInt 27979 s := x1.Aux 27980 _ = x1.Args[1] 27981 p := x1.Args[0] 27982 mem := x1.Args[1] 27983 s0 := v.Args[1] 27984 if s0.Op != OpAMD64SHLQconst { 27985 break 27986 } 27987 j0 := s0.AuxInt 27988 x0 := s0.Args[0] 27989 if x0.Op != OpAMD64MOVBload { 27990 break 27991 } 27992 i0 := x0.AuxInt 27993 if x0.Aux != s { 27994 break 27995 } 27996 _ = x0.Args[1] 27997 if p != x0.Args[0] { 27998 break 27999 } 28000 if mem != x0.Args[1] { 28001 break 28002 } 28003 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28004 break 28005 } 28006 b = mergePoint(b, x0, x1) 28007 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28008 v.reset(OpCopy) 28009 v.AddArg(v0) 28010 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28011 v1.AuxInt = j1 28012 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 28013 v2.AuxInt = 8 28014 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 28015 v3.AuxInt = i0 28016 v3.Aux = s 28017 v3.AddArg(p) 28018 v3.AddArg(mem) 28019 v2.AddArg(v3) 28020 v1.AddArg(v2) 28021 v0.AddArg(v1) 28022 v0.AddArg(y) 28023 return true 28024 } 28025 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 28026 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 28027 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 28028 for { 28029 _ = v.Args[1] 28030 s0 := v.Args[0] 28031 if s0.Op != OpAMD64SHLQconst { 28032 break 28033 } 28034 j0 := s0.AuxInt 28035 r0 := s0.Args[0] 28036 if r0.Op != OpAMD64ROLWconst { 28037 break 28038 } 28039 if r0.AuxInt != 8 { 28040 break 28041 } 28042 x0 := r0.Args[0] 28043 if x0.Op != OpAMD64MOVWload { 28044 break 28045 } 28046 i0 := x0.AuxInt 28047 s := x0.Aux 28048 _ = x0.Args[1] 28049 p := x0.Args[0] 28050 mem := x0.Args[1] 28051 or := v.Args[1] 28052 if or.Op != OpAMD64ORQ { 28053 break 28054 } 28055 _ = or.Args[1] 28056 s1 := or.Args[0] 28057 if s1.Op != OpAMD64SHLQconst { 28058 break 28059 } 28060 j1 := s1.AuxInt 28061 r1 := s1.Args[0] 28062 if r1.Op != OpAMD64ROLWconst { 28063 break 28064 } 28065 if r1.AuxInt != 8 { 28066 break 28067 } 28068 x1 := r1.Args[0] 28069 if x1.Op != OpAMD64MOVWload { 28070 break 28071 } 28072 i1 := x1.AuxInt 28073 if x1.Aux != s { 28074 break 28075 } 28076 _ = x1.Args[1] 28077 if p != x1.Args[0] { 28078 break 28079 } 28080 if mem != x1.Args[1] { 28081 break 28082 } 28083 y := or.Args[1] 28084 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 28085 break 28086 } 28087 b = mergePoint(b, x0, x1) 28088 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28089 v.reset(OpCopy) 28090 v.AddArg(v0) 28091 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28092 v1.AuxInt = j1 28093 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 28094 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 28095 v3.AuxInt = i0 28096 v3.Aux = s 28097 v3.AddArg(p) 28098 v3.AddArg(mem) 28099 v2.AddArg(v3) 28100 v1.AddArg(v2) 28101 v0.AddArg(v1) 28102 v0.AddArg(y) 28103 return true 28104 } 28105 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 28106 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 28107 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 28108 for { 28109 _ = v.Args[1] 28110 s0 := v.Args[0] 28111 if s0.Op != OpAMD64SHLQconst { 28112 break 28113 } 28114 j0 := s0.AuxInt 28115 r0 := s0.Args[0] 28116 if r0.Op != OpAMD64ROLWconst { 28117 break 28118 } 28119 if r0.AuxInt != 8 { 28120 break 28121 } 28122 x0 := r0.Args[0] 28123 if x0.Op != OpAMD64MOVWload { 28124 break 28125 } 28126 i0 := x0.AuxInt 28127 s := x0.Aux 28128 _ = x0.Args[1] 28129 p := x0.Args[0] 28130 mem := x0.Args[1] 28131 or := v.Args[1] 28132 if or.Op != OpAMD64ORQ { 28133 break 28134 } 28135 _ = or.Args[1] 28136 y := or.Args[0] 28137 s1 := or.Args[1] 28138 if s1.Op != OpAMD64SHLQconst { 28139 break 28140 } 28141 j1 := s1.AuxInt 28142 r1 := s1.Args[0] 28143 if r1.Op != OpAMD64ROLWconst { 28144 break 28145 } 28146 if r1.AuxInt != 8 { 28147 break 28148 } 28149 x1 := r1.Args[0] 28150 if x1.Op != OpAMD64MOVWload { 28151 break 28152 } 28153 i1 := x1.AuxInt 28154 if x1.Aux != s { 28155 break 28156 } 28157 _ = x1.Args[1] 28158 if p != x1.Args[0] { 28159 break 28160 } 28161 if mem != x1.Args[1] { 28162 break 28163 } 28164 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 28165 break 28166 } 28167 b = mergePoint(b, x0, x1) 28168 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28169 v.reset(OpCopy) 28170 v.AddArg(v0) 28171 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28172 v1.AuxInt = j1 28173 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 28174 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 28175 v3.AuxInt = i0 28176 v3.Aux = s 28177 v3.AddArg(p) 28178 v3.AddArg(mem) 28179 v2.AddArg(v3) 28180 v1.AddArg(v2) 28181 v0.AddArg(v1) 28182 v0.AddArg(y) 28183 return true 28184 } 28185 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 28186 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 28187 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 28188 for { 28189 _ = v.Args[1] 28190 or := v.Args[0] 28191 if or.Op != OpAMD64ORQ { 28192 break 28193 } 28194 _ = or.Args[1] 28195 s1 := or.Args[0] 28196 if s1.Op != OpAMD64SHLQconst { 28197 break 28198 } 28199 j1 := s1.AuxInt 28200 r1 := s1.Args[0] 28201 if r1.Op != OpAMD64ROLWconst { 28202 break 28203 } 28204 if r1.AuxInt != 8 { 28205 break 28206 } 28207 x1 := r1.Args[0] 28208 if x1.Op != OpAMD64MOVWload { 28209 break 28210 } 28211 i1 := x1.AuxInt 28212 s := x1.Aux 28213 _ = x1.Args[1] 28214 p := x1.Args[0] 28215 mem := x1.Args[1] 28216 y := or.Args[1] 28217 s0 := v.Args[1] 28218 if s0.Op != OpAMD64SHLQconst { 28219 break 28220 } 28221 j0 := s0.AuxInt 28222 r0 := s0.Args[0] 28223 if r0.Op != OpAMD64ROLWconst { 28224 break 28225 } 28226 if r0.AuxInt != 8 { 28227 break 28228 } 28229 x0 := r0.Args[0] 28230 if x0.Op != OpAMD64MOVWload { 28231 break 28232 } 28233 i0 := x0.AuxInt 28234 if x0.Aux != s { 28235 break 28236 } 28237 _ = x0.Args[1] 28238 if p != x0.Args[0] { 28239 break 28240 } 28241 if mem != x0.Args[1] { 28242 break 28243 } 28244 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 28245 break 28246 } 28247 b = mergePoint(b, x0, x1) 28248 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28249 v.reset(OpCopy) 28250 v.AddArg(v0) 28251 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28252 v1.AuxInt = j1 28253 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 28254 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 28255 v3.AuxInt = i0 28256 v3.Aux = s 28257 v3.AddArg(p) 28258 v3.AddArg(mem) 28259 v2.AddArg(v3) 28260 v1.AddArg(v2) 28261 v0.AddArg(v1) 28262 v0.AddArg(y) 28263 return true 28264 } 28265 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 28266 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 28267 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 28268 for { 28269 _ = v.Args[1] 28270 or := v.Args[0] 28271 if or.Op != OpAMD64ORQ { 28272 break 28273 } 28274 _ = or.Args[1] 28275 y := or.Args[0] 28276 s1 := or.Args[1] 28277 if s1.Op != OpAMD64SHLQconst { 28278 break 28279 } 28280 j1 := s1.AuxInt 28281 r1 := s1.Args[0] 28282 if r1.Op != OpAMD64ROLWconst { 28283 break 28284 } 28285 if r1.AuxInt != 8 { 28286 break 28287 } 28288 x1 := r1.Args[0] 28289 if x1.Op != OpAMD64MOVWload { 28290 break 28291 } 28292 i1 := x1.AuxInt 28293 s := x1.Aux 28294 _ = x1.Args[1] 28295 p := x1.Args[0] 28296 mem := x1.Args[1] 28297 s0 := v.Args[1] 28298 if s0.Op != OpAMD64SHLQconst { 28299 break 28300 } 28301 j0 := s0.AuxInt 28302 r0 := s0.Args[0] 28303 if r0.Op != OpAMD64ROLWconst { 28304 break 28305 } 28306 if r0.AuxInt != 8 { 28307 break 28308 } 28309 x0 := r0.Args[0] 28310 if x0.Op != OpAMD64MOVWload { 28311 break 28312 } 28313 i0 := x0.AuxInt 28314 if x0.Aux != s { 28315 break 28316 } 28317 _ = x0.Args[1] 28318 if p != x0.Args[0] { 28319 break 28320 } 28321 if mem != x0.Args[1] { 28322 break 28323 } 28324 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 28325 break 28326 } 28327 b = mergePoint(b, x0, x1) 28328 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28329 v.reset(OpCopy) 28330 v.AddArg(v0) 28331 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28332 v1.AuxInt = j1 28333 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 28334 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 28335 v3.AuxInt = i0 28336 v3.Aux = s 28337 v3.AddArg(p) 28338 v3.AddArg(mem) 28339 v2.AddArg(v3) 28340 v1.AddArg(v2) 28341 v0.AddArg(v1) 28342 v0.AddArg(y) 28343 return true 28344 } 28345 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 28346 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28347 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28348 for { 28349 _ = v.Args[1] 28350 x1 := v.Args[0] 28351 if x1.Op != OpAMD64MOVBloadidx1 { 28352 break 28353 } 28354 i1 := x1.AuxInt 28355 s := x1.Aux 28356 _ = x1.Args[2] 28357 p := x1.Args[0] 28358 idx := x1.Args[1] 28359 mem := x1.Args[2] 28360 sh := v.Args[1] 28361 if sh.Op != OpAMD64SHLQconst { 28362 break 28363 } 28364 if sh.AuxInt != 8 { 28365 break 28366 } 28367 x0 := sh.Args[0] 28368 if x0.Op != OpAMD64MOVBloadidx1 { 28369 break 28370 } 28371 i0 := x0.AuxInt 28372 if x0.Aux != s { 28373 break 28374 } 28375 _ = x0.Args[2] 28376 if p != x0.Args[0] { 28377 break 28378 } 28379 if idx != x0.Args[1] { 28380 break 28381 } 28382 if mem != x0.Args[2] { 28383 break 28384 } 28385 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28386 break 28387 } 28388 b = mergePoint(b, x0, x1) 28389 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28390 v.reset(OpCopy) 28391 v.AddArg(v0) 28392 v0.AuxInt = 8 28393 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28394 v1.AuxInt = i0 28395 v1.Aux = s 28396 v1.AddArg(p) 28397 v1.AddArg(idx) 28398 v1.AddArg(mem) 28399 v0.AddArg(v1) 28400 return true 28401 } 28402 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 28403 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28404 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28405 for { 28406 _ = v.Args[1] 28407 x1 := v.Args[0] 28408 if x1.Op != OpAMD64MOVBloadidx1 { 28409 break 28410 } 28411 i1 := x1.AuxInt 28412 s := x1.Aux 28413 _ = x1.Args[2] 28414 idx := x1.Args[0] 28415 p := x1.Args[1] 28416 mem := x1.Args[2] 28417 sh := v.Args[1] 28418 if sh.Op != OpAMD64SHLQconst { 28419 break 28420 } 28421 if sh.AuxInt != 8 { 28422 break 28423 } 28424 x0 := sh.Args[0] 28425 if x0.Op != OpAMD64MOVBloadidx1 { 28426 break 28427 } 28428 i0 := x0.AuxInt 28429 if x0.Aux != s { 28430 break 28431 } 28432 _ = x0.Args[2] 28433 if p != x0.Args[0] { 28434 break 28435 } 28436 if idx != x0.Args[1] { 28437 break 28438 } 28439 if mem != x0.Args[2] { 28440 break 28441 } 28442 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28443 break 28444 } 28445 b = mergePoint(b, x0, x1) 28446 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28447 v.reset(OpCopy) 28448 v.AddArg(v0) 28449 v0.AuxInt = 8 28450 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28451 v1.AuxInt = i0 28452 v1.Aux = s 28453 v1.AddArg(p) 28454 v1.AddArg(idx) 28455 v1.AddArg(mem) 28456 v0.AddArg(v1) 28457 return true 28458 } 28459 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 28460 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28461 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28462 for { 28463 _ = v.Args[1] 28464 x1 := v.Args[0] 28465 if x1.Op != OpAMD64MOVBloadidx1 { 28466 break 28467 } 28468 i1 := x1.AuxInt 28469 s := x1.Aux 28470 _ = x1.Args[2] 28471 p := x1.Args[0] 28472 idx := x1.Args[1] 28473 mem := x1.Args[2] 28474 sh := v.Args[1] 28475 if sh.Op != OpAMD64SHLQconst { 28476 break 28477 } 28478 if sh.AuxInt != 8 { 28479 break 28480 } 28481 x0 := sh.Args[0] 28482 if x0.Op != OpAMD64MOVBloadidx1 { 28483 break 28484 } 28485 i0 := x0.AuxInt 28486 if x0.Aux != s { 28487 break 28488 } 28489 _ = x0.Args[2] 28490 if idx != x0.Args[0] { 28491 break 28492 } 28493 if p != x0.Args[1] { 28494 break 28495 } 28496 if mem != x0.Args[2] { 28497 break 28498 } 28499 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28500 break 28501 } 28502 b = mergePoint(b, x0, x1) 28503 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28504 v.reset(OpCopy) 28505 v.AddArg(v0) 28506 v0.AuxInt = 8 28507 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28508 v1.AuxInt = i0 28509 v1.Aux = s 28510 v1.AddArg(p) 28511 v1.AddArg(idx) 28512 v1.AddArg(mem) 28513 v0.AddArg(v1) 28514 return true 28515 } 28516 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 28517 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28518 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28519 for { 28520 _ = v.Args[1] 28521 x1 := v.Args[0] 28522 if x1.Op != OpAMD64MOVBloadidx1 { 28523 break 28524 } 28525 i1 := x1.AuxInt 28526 s := x1.Aux 28527 _ = x1.Args[2] 28528 idx := x1.Args[0] 28529 p := x1.Args[1] 28530 mem := x1.Args[2] 28531 sh := v.Args[1] 28532 if sh.Op != OpAMD64SHLQconst { 28533 break 28534 } 28535 if sh.AuxInt != 8 { 28536 break 28537 } 28538 x0 := sh.Args[0] 28539 if x0.Op != OpAMD64MOVBloadidx1 { 28540 break 28541 } 28542 i0 := x0.AuxInt 28543 if x0.Aux != s { 28544 break 28545 } 28546 _ = x0.Args[2] 28547 if idx != x0.Args[0] { 28548 break 28549 } 28550 if p != x0.Args[1] { 28551 break 28552 } 28553 if mem != x0.Args[2] { 28554 break 28555 } 28556 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28557 break 28558 } 28559 b = mergePoint(b, x0, x1) 28560 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28561 v.reset(OpCopy) 28562 v.AddArg(v0) 28563 v0.AuxInt = 8 28564 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28565 v1.AuxInt = i0 28566 v1.Aux = s 28567 v1.AddArg(p) 28568 v1.AddArg(idx) 28569 v1.AddArg(mem) 28570 v0.AddArg(v1) 28571 return true 28572 } 28573 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 28574 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28575 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28576 for { 28577 _ = v.Args[1] 28578 sh := v.Args[0] 28579 if sh.Op != OpAMD64SHLQconst { 28580 break 28581 } 28582 if sh.AuxInt != 8 { 28583 break 28584 } 28585 x0 := sh.Args[0] 28586 if x0.Op != OpAMD64MOVBloadidx1 { 28587 break 28588 } 28589 i0 := x0.AuxInt 28590 s := x0.Aux 28591 _ = x0.Args[2] 28592 p := x0.Args[0] 28593 idx := x0.Args[1] 28594 mem := x0.Args[2] 28595 x1 := v.Args[1] 28596 if x1.Op != OpAMD64MOVBloadidx1 { 28597 break 28598 } 28599 i1 := x1.AuxInt 28600 if x1.Aux != s { 28601 break 28602 } 28603 _ = x1.Args[2] 28604 if p != x1.Args[0] { 28605 break 28606 } 28607 if idx != x1.Args[1] { 28608 break 28609 } 28610 if mem != x1.Args[2] { 28611 break 28612 } 28613 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28614 break 28615 } 28616 b = mergePoint(b, x0, x1) 28617 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28618 v.reset(OpCopy) 28619 v.AddArg(v0) 28620 v0.AuxInt = 8 28621 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28622 v1.AuxInt = i0 28623 v1.Aux = s 28624 v1.AddArg(p) 28625 v1.AddArg(idx) 28626 v1.AddArg(mem) 28627 v0.AddArg(v1) 28628 return true 28629 } 28630 return false 28631 } 28632 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 28633 b := v.Block 28634 _ = b 28635 typ := &b.Func.Config.Types 28636 _ = typ 28637 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 28638 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28639 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28640 for { 28641 _ = v.Args[1] 28642 sh := v.Args[0] 28643 if sh.Op != OpAMD64SHLQconst { 28644 break 28645 } 28646 if sh.AuxInt != 8 { 28647 break 28648 } 28649 x0 := sh.Args[0] 28650 if x0.Op != OpAMD64MOVBloadidx1 { 28651 break 28652 } 28653 i0 := x0.AuxInt 28654 s := x0.Aux 28655 _ = x0.Args[2] 28656 idx := x0.Args[0] 28657 p := x0.Args[1] 28658 mem := x0.Args[2] 28659 x1 := v.Args[1] 28660 if x1.Op != OpAMD64MOVBloadidx1 { 28661 break 28662 } 28663 i1 := x1.AuxInt 28664 if x1.Aux != s { 28665 break 28666 } 28667 _ = x1.Args[2] 28668 if p != x1.Args[0] { 28669 break 28670 } 28671 if idx != x1.Args[1] { 28672 break 28673 } 28674 if mem != x1.Args[2] { 28675 break 28676 } 28677 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28678 break 28679 } 28680 b = mergePoint(b, x0, x1) 28681 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28682 v.reset(OpCopy) 28683 v.AddArg(v0) 28684 v0.AuxInt = 8 28685 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28686 v1.AuxInt = i0 28687 v1.Aux = s 28688 v1.AddArg(p) 28689 v1.AddArg(idx) 28690 v1.AddArg(mem) 28691 v0.AddArg(v1) 28692 return true 28693 } 28694 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 28695 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28696 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28697 for { 28698 _ = v.Args[1] 28699 sh := v.Args[0] 28700 if sh.Op != OpAMD64SHLQconst { 28701 break 28702 } 28703 if sh.AuxInt != 8 { 28704 break 28705 } 28706 x0 := sh.Args[0] 28707 if x0.Op != OpAMD64MOVBloadidx1 { 28708 break 28709 } 28710 i0 := x0.AuxInt 28711 s := x0.Aux 28712 _ = x0.Args[2] 28713 p := x0.Args[0] 28714 idx := x0.Args[1] 28715 mem := x0.Args[2] 28716 x1 := v.Args[1] 28717 if x1.Op != OpAMD64MOVBloadidx1 { 28718 break 28719 } 28720 i1 := x1.AuxInt 28721 if x1.Aux != s { 28722 break 28723 } 28724 _ = x1.Args[2] 28725 if idx != x1.Args[0] { 28726 break 28727 } 28728 if p != x1.Args[1] { 28729 break 28730 } 28731 if mem != x1.Args[2] { 28732 break 28733 } 28734 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28735 break 28736 } 28737 b = mergePoint(b, x0, x1) 28738 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28739 v.reset(OpCopy) 28740 v.AddArg(v0) 28741 v0.AuxInt = 8 28742 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28743 v1.AuxInt = i0 28744 v1.Aux = s 28745 v1.AddArg(p) 28746 v1.AddArg(idx) 28747 v1.AddArg(mem) 28748 v0.AddArg(v1) 28749 return true 28750 } 28751 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 28752 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28753 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28754 for { 28755 _ = v.Args[1] 28756 sh := v.Args[0] 28757 if sh.Op != OpAMD64SHLQconst { 28758 break 28759 } 28760 if sh.AuxInt != 8 { 28761 break 28762 } 28763 x0 := sh.Args[0] 28764 if x0.Op != OpAMD64MOVBloadidx1 { 28765 break 28766 } 28767 i0 := x0.AuxInt 28768 s := x0.Aux 28769 _ = x0.Args[2] 28770 idx := x0.Args[0] 28771 p := x0.Args[1] 28772 mem := x0.Args[2] 28773 x1 := v.Args[1] 28774 if x1.Op != OpAMD64MOVBloadidx1 { 28775 break 28776 } 28777 i1 := x1.AuxInt 28778 if x1.Aux != s { 28779 break 28780 } 28781 _ = x1.Args[2] 28782 if idx != x1.Args[0] { 28783 break 28784 } 28785 if p != x1.Args[1] { 28786 break 28787 } 28788 if mem != x1.Args[2] { 28789 break 28790 } 28791 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28792 break 28793 } 28794 b = mergePoint(b, x0, x1) 28795 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28796 v.reset(OpCopy) 28797 v.AddArg(v0) 28798 v0.AuxInt = 8 28799 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28800 v1.AuxInt = i0 28801 v1.Aux = s 28802 v1.AddArg(p) 28803 v1.AddArg(idx) 28804 v1.AddArg(mem) 28805 v0.AddArg(v1) 28806 return true 28807 } 28808 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28809 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28810 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28811 for { 28812 _ = v.Args[1] 28813 r1 := v.Args[0] 28814 if r1.Op != OpAMD64ROLWconst { 28815 break 28816 } 28817 if r1.AuxInt != 8 { 28818 break 28819 } 28820 x1 := r1.Args[0] 28821 if x1.Op != OpAMD64MOVWloadidx1 { 28822 break 28823 } 28824 i1 := x1.AuxInt 28825 s := x1.Aux 28826 _ = x1.Args[2] 28827 p := x1.Args[0] 28828 idx := x1.Args[1] 28829 mem := x1.Args[2] 28830 sh := v.Args[1] 28831 if sh.Op != OpAMD64SHLQconst { 28832 break 28833 } 28834 if sh.AuxInt != 16 { 28835 break 28836 } 28837 r0 := sh.Args[0] 28838 if r0.Op != OpAMD64ROLWconst { 28839 break 28840 } 28841 if r0.AuxInt != 8 { 28842 break 28843 } 28844 x0 := r0.Args[0] 28845 if x0.Op != OpAMD64MOVWloadidx1 { 28846 break 28847 } 28848 i0 := x0.AuxInt 28849 if x0.Aux != s { 28850 break 28851 } 28852 _ = x0.Args[2] 28853 if p != x0.Args[0] { 28854 break 28855 } 28856 if idx != x0.Args[1] { 28857 break 28858 } 28859 if mem != x0.Args[2] { 28860 break 28861 } 28862 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28863 break 28864 } 28865 b = mergePoint(b, x0, x1) 28866 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28867 v.reset(OpCopy) 28868 v.AddArg(v0) 28869 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28870 v1.AuxInt = i0 28871 v1.Aux = s 28872 v1.AddArg(p) 28873 v1.AddArg(idx) 28874 v1.AddArg(mem) 28875 v0.AddArg(v1) 28876 return true 28877 } 28878 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28879 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28880 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28881 for { 28882 _ = v.Args[1] 28883 r1 := v.Args[0] 28884 if r1.Op != OpAMD64ROLWconst { 28885 break 28886 } 28887 if r1.AuxInt != 8 { 28888 break 28889 } 28890 x1 := r1.Args[0] 28891 if x1.Op != OpAMD64MOVWloadidx1 { 28892 break 28893 } 28894 i1 := x1.AuxInt 28895 s := x1.Aux 28896 _ = x1.Args[2] 28897 idx := x1.Args[0] 28898 p := x1.Args[1] 28899 mem := x1.Args[2] 28900 sh := v.Args[1] 28901 if sh.Op != OpAMD64SHLQconst { 28902 break 28903 } 28904 if sh.AuxInt != 16 { 28905 break 28906 } 28907 r0 := sh.Args[0] 28908 if r0.Op != OpAMD64ROLWconst { 28909 break 28910 } 28911 if r0.AuxInt != 8 { 28912 break 28913 } 28914 x0 := r0.Args[0] 28915 if x0.Op != OpAMD64MOVWloadidx1 { 28916 break 28917 } 28918 i0 := x0.AuxInt 28919 if x0.Aux != s { 28920 break 28921 } 28922 _ = x0.Args[2] 28923 if p != x0.Args[0] { 28924 break 28925 } 28926 if idx != x0.Args[1] { 28927 break 28928 } 28929 if mem != x0.Args[2] { 28930 break 28931 } 28932 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28933 break 28934 } 28935 b = mergePoint(b, x0, x1) 28936 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28937 v.reset(OpCopy) 28938 v.AddArg(v0) 28939 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28940 v1.AuxInt = i0 28941 v1.Aux = s 28942 v1.AddArg(p) 28943 v1.AddArg(idx) 28944 v1.AddArg(mem) 28945 v0.AddArg(v1) 28946 return true 28947 } 28948 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28949 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28950 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28951 for { 28952 _ = v.Args[1] 28953 r1 := v.Args[0] 28954 if r1.Op != OpAMD64ROLWconst { 28955 break 28956 } 28957 if r1.AuxInt != 8 { 28958 break 28959 } 28960 x1 := r1.Args[0] 28961 if x1.Op != OpAMD64MOVWloadidx1 { 28962 break 28963 } 28964 i1 := x1.AuxInt 28965 s := x1.Aux 28966 _ = x1.Args[2] 28967 p := x1.Args[0] 28968 idx := x1.Args[1] 28969 mem := x1.Args[2] 28970 sh := v.Args[1] 28971 if sh.Op != OpAMD64SHLQconst { 28972 break 28973 } 28974 if sh.AuxInt != 16 { 28975 break 28976 } 28977 r0 := sh.Args[0] 28978 if r0.Op != OpAMD64ROLWconst { 28979 break 28980 } 28981 if r0.AuxInt != 8 { 28982 break 28983 } 28984 x0 := r0.Args[0] 28985 if x0.Op != OpAMD64MOVWloadidx1 { 28986 break 28987 } 28988 i0 := x0.AuxInt 28989 if x0.Aux != s { 28990 break 28991 } 28992 _ = x0.Args[2] 28993 if idx != x0.Args[0] { 28994 break 28995 } 28996 if p != x0.Args[1] { 28997 break 28998 } 28999 if mem != x0.Args[2] { 29000 break 29001 } 29002 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29003 break 29004 } 29005 b = mergePoint(b, x0, x1) 29006 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29007 v.reset(OpCopy) 29008 v.AddArg(v0) 29009 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29010 v1.AuxInt = i0 29011 v1.Aux = s 29012 v1.AddArg(p) 29013 v1.AddArg(idx) 29014 v1.AddArg(mem) 29015 v0.AddArg(v1) 29016 return true 29017 } 29018 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 29019 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29020 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29021 for { 29022 _ = v.Args[1] 29023 r1 := v.Args[0] 29024 if r1.Op != OpAMD64ROLWconst { 29025 break 29026 } 29027 if r1.AuxInt != 8 { 29028 break 29029 } 29030 x1 := r1.Args[0] 29031 if x1.Op != OpAMD64MOVWloadidx1 { 29032 break 29033 } 29034 i1 := x1.AuxInt 29035 s := x1.Aux 29036 _ = x1.Args[2] 29037 idx := x1.Args[0] 29038 p := x1.Args[1] 29039 mem := x1.Args[2] 29040 sh := v.Args[1] 29041 if sh.Op != OpAMD64SHLQconst { 29042 break 29043 } 29044 if sh.AuxInt != 16 { 29045 break 29046 } 29047 r0 := sh.Args[0] 29048 if r0.Op != OpAMD64ROLWconst { 29049 break 29050 } 29051 if r0.AuxInt != 8 { 29052 break 29053 } 29054 x0 := r0.Args[0] 29055 if x0.Op != OpAMD64MOVWloadidx1 { 29056 break 29057 } 29058 i0 := x0.AuxInt 29059 if x0.Aux != s { 29060 break 29061 } 29062 _ = x0.Args[2] 29063 if idx != x0.Args[0] { 29064 break 29065 } 29066 if p != x0.Args[1] { 29067 break 29068 } 29069 if mem != x0.Args[2] { 29070 break 29071 } 29072 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29073 break 29074 } 29075 b = mergePoint(b, x0, x1) 29076 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29077 v.reset(OpCopy) 29078 v.AddArg(v0) 29079 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29080 v1.AuxInt = i0 29081 v1.Aux = s 29082 v1.AddArg(p) 29083 v1.AddArg(idx) 29084 v1.AddArg(mem) 29085 v0.AddArg(v1) 29086 return true 29087 } 29088 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 29089 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29090 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29091 for { 29092 _ = v.Args[1] 29093 sh := v.Args[0] 29094 if sh.Op != OpAMD64SHLQconst { 29095 break 29096 } 29097 if sh.AuxInt != 16 { 29098 break 29099 } 29100 r0 := sh.Args[0] 29101 if r0.Op != OpAMD64ROLWconst { 29102 break 29103 } 29104 if r0.AuxInt != 8 { 29105 break 29106 } 29107 x0 := r0.Args[0] 29108 if x0.Op != OpAMD64MOVWloadidx1 { 29109 break 29110 } 29111 i0 := x0.AuxInt 29112 s := x0.Aux 29113 _ = x0.Args[2] 29114 p := x0.Args[0] 29115 idx := x0.Args[1] 29116 mem := x0.Args[2] 29117 r1 := v.Args[1] 29118 if r1.Op != OpAMD64ROLWconst { 29119 break 29120 } 29121 if r1.AuxInt != 8 { 29122 break 29123 } 29124 x1 := r1.Args[0] 29125 if x1.Op != OpAMD64MOVWloadidx1 { 29126 break 29127 } 29128 i1 := x1.AuxInt 29129 if x1.Aux != s { 29130 break 29131 } 29132 _ = x1.Args[2] 29133 if p != x1.Args[0] { 29134 break 29135 } 29136 if idx != x1.Args[1] { 29137 break 29138 } 29139 if mem != x1.Args[2] { 29140 break 29141 } 29142 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29143 break 29144 } 29145 b = mergePoint(b, x0, x1) 29146 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29147 v.reset(OpCopy) 29148 v.AddArg(v0) 29149 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29150 v1.AuxInt = i0 29151 v1.Aux = s 29152 v1.AddArg(p) 29153 v1.AddArg(idx) 29154 v1.AddArg(mem) 29155 v0.AddArg(v1) 29156 return true 29157 } 29158 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 29159 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29160 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29161 for { 29162 _ = v.Args[1] 29163 sh := v.Args[0] 29164 if sh.Op != OpAMD64SHLQconst { 29165 break 29166 } 29167 if sh.AuxInt != 16 { 29168 break 29169 } 29170 r0 := sh.Args[0] 29171 if r0.Op != OpAMD64ROLWconst { 29172 break 29173 } 29174 if r0.AuxInt != 8 { 29175 break 29176 } 29177 x0 := r0.Args[0] 29178 if x0.Op != OpAMD64MOVWloadidx1 { 29179 break 29180 } 29181 i0 := x0.AuxInt 29182 s := x0.Aux 29183 _ = x0.Args[2] 29184 idx := x0.Args[0] 29185 p := x0.Args[1] 29186 mem := x0.Args[2] 29187 r1 := v.Args[1] 29188 if r1.Op != OpAMD64ROLWconst { 29189 break 29190 } 29191 if r1.AuxInt != 8 { 29192 break 29193 } 29194 x1 := r1.Args[0] 29195 if x1.Op != OpAMD64MOVWloadidx1 { 29196 break 29197 } 29198 i1 := x1.AuxInt 29199 if x1.Aux != s { 29200 break 29201 } 29202 _ = x1.Args[2] 29203 if p != x1.Args[0] { 29204 break 29205 } 29206 if idx != x1.Args[1] { 29207 break 29208 } 29209 if mem != x1.Args[2] { 29210 break 29211 } 29212 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29213 break 29214 } 29215 b = mergePoint(b, x0, x1) 29216 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29217 v.reset(OpCopy) 29218 v.AddArg(v0) 29219 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29220 v1.AuxInt = i0 29221 v1.Aux = s 29222 v1.AddArg(p) 29223 v1.AddArg(idx) 29224 v1.AddArg(mem) 29225 v0.AddArg(v1) 29226 return true 29227 } 29228 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 29229 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29230 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29231 for { 29232 _ = v.Args[1] 29233 sh := v.Args[0] 29234 if sh.Op != OpAMD64SHLQconst { 29235 break 29236 } 29237 if sh.AuxInt != 16 { 29238 break 29239 } 29240 r0 := sh.Args[0] 29241 if r0.Op != OpAMD64ROLWconst { 29242 break 29243 } 29244 if r0.AuxInt != 8 { 29245 break 29246 } 29247 x0 := r0.Args[0] 29248 if x0.Op != OpAMD64MOVWloadidx1 { 29249 break 29250 } 29251 i0 := x0.AuxInt 29252 s := x0.Aux 29253 _ = x0.Args[2] 29254 p := x0.Args[0] 29255 idx := x0.Args[1] 29256 mem := x0.Args[2] 29257 r1 := v.Args[1] 29258 if r1.Op != OpAMD64ROLWconst { 29259 break 29260 } 29261 if r1.AuxInt != 8 { 29262 break 29263 } 29264 x1 := r1.Args[0] 29265 if x1.Op != OpAMD64MOVWloadidx1 { 29266 break 29267 } 29268 i1 := x1.AuxInt 29269 if x1.Aux != s { 29270 break 29271 } 29272 _ = x1.Args[2] 29273 if idx != x1.Args[0] { 29274 break 29275 } 29276 if p != x1.Args[1] { 29277 break 29278 } 29279 if mem != x1.Args[2] { 29280 break 29281 } 29282 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29283 break 29284 } 29285 b = mergePoint(b, x0, x1) 29286 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29287 v.reset(OpCopy) 29288 v.AddArg(v0) 29289 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29290 v1.AuxInt = i0 29291 v1.Aux = s 29292 v1.AddArg(p) 29293 v1.AddArg(idx) 29294 v1.AddArg(mem) 29295 v0.AddArg(v1) 29296 return true 29297 } 29298 return false 29299 } 29300 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 29301 b := v.Block 29302 _ = b 29303 typ := &b.Func.Config.Types 29304 _ = typ 29305 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 29306 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29307 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29308 for { 29309 _ = v.Args[1] 29310 sh := v.Args[0] 29311 if sh.Op != OpAMD64SHLQconst { 29312 break 29313 } 29314 if sh.AuxInt != 16 { 29315 break 29316 } 29317 r0 := sh.Args[0] 29318 if r0.Op != OpAMD64ROLWconst { 29319 break 29320 } 29321 if r0.AuxInt != 8 { 29322 break 29323 } 29324 x0 := r0.Args[0] 29325 if x0.Op != OpAMD64MOVWloadidx1 { 29326 break 29327 } 29328 i0 := x0.AuxInt 29329 s := x0.Aux 29330 _ = x0.Args[2] 29331 idx := x0.Args[0] 29332 p := x0.Args[1] 29333 mem := x0.Args[2] 29334 r1 := v.Args[1] 29335 if r1.Op != OpAMD64ROLWconst { 29336 break 29337 } 29338 if r1.AuxInt != 8 { 29339 break 29340 } 29341 x1 := r1.Args[0] 29342 if x1.Op != OpAMD64MOVWloadidx1 { 29343 break 29344 } 29345 i1 := x1.AuxInt 29346 if x1.Aux != s { 29347 break 29348 } 29349 _ = x1.Args[2] 29350 if idx != x1.Args[0] { 29351 break 29352 } 29353 if p != x1.Args[1] { 29354 break 29355 } 29356 if mem != x1.Args[2] { 29357 break 29358 } 29359 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29360 break 29361 } 29362 b = mergePoint(b, x0, x1) 29363 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29364 v.reset(OpCopy) 29365 v.AddArg(v0) 29366 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29367 v1.AuxInt = i0 29368 v1.Aux = s 29369 v1.AddArg(p) 29370 v1.AddArg(idx) 29371 v1.AddArg(mem) 29372 v0.AddArg(v1) 29373 return true 29374 } 29375 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 29376 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29377 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29378 for { 29379 _ = v.Args[1] 29380 r1 := v.Args[0] 29381 if r1.Op != OpAMD64BSWAPL { 29382 break 29383 } 29384 x1 := r1.Args[0] 29385 if x1.Op != OpAMD64MOVLloadidx1 { 29386 break 29387 } 29388 i1 := x1.AuxInt 29389 s := x1.Aux 29390 _ = x1.Args[2] 29391 p := x1.Args[0] 29392 idx := x1.Args[1] 29393 mem := x1.Args[2] 29394 sh := v.Args[1] 29395 if sh.Op != OpAMD64SHLQconst { 29396 break 29397 } 29398 if sh.AuxInt != 32 { 29399 break 29400 } 29401 r0 := sh.Args[0] 29402 if r0.Op != OpAMD64BSWAPL { 29403 break 29404 } 29405 x0 := r0.Args[0] 29406 if x0.Op != OpAMD64MOVLloadidx1 { 29407 break 29408 } 29409 i0 := x0.AuxInt 29410 if x0.Aux != s { 29411 break 29412 } 29413 _ = x0.Args[2] 29414 if p != x0.Args[0] { 29415 break 29416 } 29417 if idx != x0.Args[1] { 29418 break 29419 } 29420 if mem != x0.Args[2] { 29421 break 29422 } 29423 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29424 break 29425 } 29426 b = mergePoint(b, x0, x1) 29427 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29428 v.reset(OpCopy) 29429 v.AddArg(v0) 29430 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29431 v1.AuxInt = i0 29432 v1.Aux = s 29433 v1.AddArg(p) 29434 v1.AddArg(idx) 29435 v1.AddArg(mem) 29436 v0.AddArg(v1) 29437 return true 29438 } 29439 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 29440 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29441 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29442 for { 29443 _ = v.Args[1] 29444 r1 := v.Args[0] 29445 if r1.Op != OpAMD64BSWAPL { 29446 break 29447 } 29448 x1 := r1.Args[0] 29449 if x1.Op != OpAMD64MOVLloadidx1 { 29450 break 29451 } 29452 i1 := x1.AuxInt 29453 s := x1.Aux 29454 _ = x1.Args[2] 29455 idx := x1.Args[0] 29456 p := x1.Args[1] 29457 mem := x1.Args[2] 29458 sh := v.Args[1] 29459 if sh.Op != OpAMD64SHLQconst { 29460 break 29461 } 29462 if sh.AuxInt != 32 { 29463 break 29464 } 29465 r0 := sh.Args[0] 29466 if r0.Op != OpAMD64BSWAPL { 29467 break 29468 } 29469 x0 := r0.Args[0] 29470 if x0.Op != OpAMD64MOVLloadidx1 { 29471 break 29472 } 29473 i0 := x0.AuxInt 29474 if x0.Aux != s { 29475 break 29476 } 29477 _ = x0.Args[2] 29478 if p != x0.Args[0] { 29479 break 29480 } 29481 if idx != x0.Args[1] { 29482 break 29483 } 29484 if mem != x0.Args[2] { 29485 break 29486 } 29487 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29488 break 29489 } 29490 b = mergePoint(b, x0, x1) 29491 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29492 v.reset(OpCopy) 29493 v.AddArg(v0) 29494 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29495 v1.AuxInt = i0 29496 v1.Aux = s 29497 v1.AddArg(p) 29498 v1.AddArg(idx) 29499 v1.AddArg(mem) 29500 v0.AddArg(v1) 29501 return true 29502 } 29503 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 29504 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29505 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29506 for { 29507 _ = v.Args[1] 29508 r1 := v.Args[0] 29509 if r1.Op != OpAMD64BSWAPL { 29510 break 29511 } 29512 x1 := r1.Args[0] 29513 if x1.Op != OpAMD64MOVLloadidx1 { 29514 break 29515 } 29516 i1 := x1.AuxInt 29517 s := x1.Aux 29518 _ = x1.Args[2] 29519 p := x1.Args[0] 29520 idx := x1.Args[1] 29521 mem := x1.Args[2] 29522 sh := v.Args[1] 29523 if sh.Op != OpAMD64SHLQconst { 29524 break 29525 } 29526 if sh.AuxInt != 32 { 29527 break 29528 } 29529 r0 := sh.Args[0] 29530 if r0.Op != OpAMD64BSWAPL { 29531 break 29532 } 29533 x0 := r0.Args[0] 29534 if x0.Op != OpAMD64MOVLloadidx1 { 29535 break 29536 } 29537 i0 := x0.AuxInt 29538 if x0.Aux != s { 29539 break 29540 } 29541 _ = x0.Args[2] 29542 if idx != x0.Args[0] { 29543 break 29544 } 29545 if p != x0.Args[1] { 29546 break 29547 } 29548 if mem != x0.Args[2] { 29549 break 29550 } 29551 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29552 break 29553 } 29554 b = mergePoint(b, x0, x1) 29555 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29556 v.reset(OpCopy) 29557 v.AddArg(v0) 29558 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29559 v1.AuxInt = i0 29560 v1.Aux = s 29561 v1.AddArg(p) 29562 v1.AddArg(idx) 29563 v1.AddArg(mem) 29564 v0.AddArg(v1) 29565 return true 29566 } 29567 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 29568 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29569 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29570 for { 29571 _ = v.Args[1] 29572 r1 := v.Args[0] 29573 if r1.Op != OpAMD64BSWAPL { 29574 break 29575 } 29576 x1 := r1.Args[0] 29577 if x1.Op != OpAMD64MOVLloadidx1 { 29578 break 29579 } 29580 i1 := x1.AuxInt 29581 s := x1.Aux 29582 _ = x1.Args[2] 29583 idx := x1.Args[0] 29584 p := x1.Args[1] 29585 mem := x1.Args[2] 29586 sh := v.Args[1] 29587 if sh.Op != OpAMD64SHLQconst { 29588 break 29589 } 29590 if sh.AuxInt != 32 { 29591 break 29592 } 29593 r0 := sh.Args[0] 29594 if r0.Op != OpAMD64BSWAPL { 29595 break 29596 } 29597 x0 := r0.Args[0] 29598 if x0.Op != OpAMD64MOVLloadidx1 { 29599 break 29600 } 29601 i0 := x0.AuxInt 29602 if x0.Aux != s { 29603 break 29604 } 29605 _ = x0.Args[2] 29606 if idx != x0.Args[0] { 29607 break 29608 } 29609 if p != x0.Args[1] { 29610 break 29611 } 29612 if mem != x0.Args[2] { 29613 break 29614 } 29615 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29616 break 29617 } 29618 b = mergePoint(b, x0, x1) 29619 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29620 v.reset(OpCopy) 29621 v.AddArg(v0) 29622 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29623 v1.AuxInt = i0 29624 v1.Aux = s 29625 v1.AddArg(p) 29626 v1.AddArg(idx) 29627 v1.AddArg(mem) 29628 v0.AddArg(v1) 29629 return true 29630 } 29631 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 29632 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29633 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29634 for { 29635 _ = v.Args[1] 29636 sh := v.Args[0] 29637 if sh.Op != OpAMD64SHLQconst { 29638 break 29639 } 29640 if sh.AuxInt != 32 { 29641 break 29642 } 29643 r0 := sh.Args[0] 29644 if r0.Op != OpAMD64BSWAPL { 29645 break 29646 } 29647 x0 := r0.Args[0] 29648 if x0.Op != OpAMD64MOVLloadidx1 { 29649 break 29650 } 29651 i0 := x0.AuxInt 29652 s := x0.Aux 29653 _ = x0.Args[2] 29654 p := x0.Args[0] 29655 idx := x0.Args[1] 29656 mem := x0.Args[2] 29657 r1 := v.Args[1] 29658 if r1.Op != OpAMD64BSWAPL { 29659 break 29660 } 29661 x1 := r1.Args[0] 29662 if x1.Op != OpAMD64MOVLloadidx1 { 29663 break 29664 } 29665 i1 := x1.AuxInt 29666 if x1.Aux != s { 29667 break 29668 } 29669 _ = x1.Args[2] 29670 if p != x1.Args[0] { 29671 break 29672 } 29673 if idx != x1.Args[1] { 29674 break 29675 } 29676 if mem != x1.Args[2] { 29677 break 29678 } 29679 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29680 break 29681 } 29682 b = mergePoint(b, x0, x1) 29683 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29684 v.reset(OpCopy) 29685 v.AddArg(v0) 29686 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29687 v1.AuxInt = i0 29688 v1.Aux = s 29689 v1.AddArg(p) 29690 v1.AddArg(idx) 29691 v1.AddArg(mem) 29692 v0.AddArg(v1) 29693 return true 29694 } 29695 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 29696 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29697 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29698 for { 29699 _ = v.Args[1] 29700 sh := v.Args[0] 29701 if sh.Op != OpAMD64SHLQconst { 29702 break 29703 } 29704 if sh.AuxInt != 32 { 29705 break 29706 } 29707 r0 := sh.Args[0] 29708 if r0.Op != OpAMD64BSWAPL { 29709 break 29710 } 29711 x0 := r0.Args[0] 29712 if x0.Op != OpAMD64MOVLloadidx1 { 29713 break 29714 } 29715 i0 := x0.AuxInt 29716 s := x0.Aux 29717 _ = x0.Args[2] 29718 idx := x0.Args[0] 29719 p := x0.Args[1] 29720 mem := x0.Args[2] 29721 r1 := v.Args[1] 29722 if r1.Op != OpAMD64BSWAPL { 29723 break 29724 } 29725 x1 := r1.Args[0] 29726 if x1.Op != OpAMD64MOVLloadidx1 { 29727 break 29728 } 29729 i1 := x1.AuxInt 29730 if x1.Aux != s { 29731 break 29732 } 29733 _ = x1.Args[2] 29734 if p != x1.Args[0] { 29735 break 29736 } 29737 if idx != x1.Args[1] { 29738 break 29739 } 29740 if mem != x1.Args[2] { 29741 break 29742 } 29743 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29744 break 29745 } 29746 b = mergePoint(b, x0, x1) 29747 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29748 v.reset(OpCopy) 29749 v.AddArg(v0) 29750 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29751 v1.AuxInt = i0 29752 v1.Aux = s 29753 v1.AddArg(p) 29754 v1.AddArg(idx) 29755 v1.AddArg(mem) 29756 v0.AddArg(v1) 29757 return true 29758 } 29759 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 29760 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29761 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29762 for { 29763 _ = v.Args[1] 29764 sh := v.Args[0] 29765 if sh.Op != OpAMD64SHLQconst { 29766 break 29767 } 29768 if sh.AuxInt != 32 { 29769 break 29770 } 29771 r0 := sh.Args[0] 29772 if r0.Op != OpAMD64BSWAPL { 29773 break 29774 } 29775 x0 := r0.Args[0] 29776 if x0.Op != OpAMD64MOVLloadidx1 { 29777 break 29778 } 29779 i0 := x0.AuxInt 29780 s := x0.Aux 29781 _ = x0.Args[2] 29782 p := x0.Args[0] 29783 idx := x0.Args[1] 29784 mem := x0.Args[2] 29785 r1 := v.Args[1] 29786 if r1.Op != OpAMD64BSWAPL { 29787 break 29788 } 29789 x1 := r1.Args[0] 29790 if x1.Op != OpAMD64MOVLloadidx1 { 29791 break 29792 } 29793 i1 := x1.AuxInt 29794 if x1.Aux != s { 29795 break 29796 } 29797 _ = x1.Args[2] 29798 if idx != x1.Args[0] { 29799 break 29800 } 29801 if p != x1.Args[1] { 29802 break 29803 } 29804 if mem != x1.Args[2] { 29805 break 29806 } 29807 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29808 break 29809 } 29810 b = mergePoint(b, x0, x1) 29811 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29812 v.reset(OpCopy) 29813 v.AddArg(v0) 29814 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29815 v1.AuxInt = i0 29816 v1.Aux = s 29817 v1.AddArg(p) 29818 v1.AddArg(idx) 29819 v1.AddArg(mem) 29820 v0.AddArg(v1) 29821 return true 29822 } 29823 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 29824 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29825 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29826 for { 29827 _ = v.Args[1] 29828 sh := v.Args[0] 29829 if sh.Op != OpAMD64SHLQconst { 29830 break 29831 } 29832 if sh.AuxInt != 32 { 29833 break 29834 } 29835 r0 := sh.Args[0] 29836 if r0.Op != OpAMD64BSWAPL { 29837 break 29838 } 29839 x0 := r0.Args[0] 29840 if x0.Op != OpAMD64MOVLloadidx1 { 29841 break 29842 } 29843 i0 := x0.AuxInt 29844 s := x0.Aux 29845 _ = x0.Args[2] 29846 idx := x0.Args[0] 29847 p := x0.Args[1] 29848 mem := x0.Args[2] 29849 r1 := v.Args[1] 29850 if r1.Op != OpAMD64BSWAPL { 29851 break 29852 } 29853 x1 := r1.Args[0] 29854 if x1.Op != OpAMD64MOVLloadidx1 { 29855 break 29856 } 29857 i1 := x1.AuxInt 29858 if x1.Aux != s { 29859 break 29860 } 29861 _ = x1.Args[2] 29862 if idx != x1.Args[0] { 29863 break 29864 } 29865 if p != x1.Args[1] { 29866 break 29867 } 29868 if mem != x1.Args[2] { 29869 break 29870 } 29871 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29872 break 29873 } 29874 b = mergePoint(b, x0, x1) 29875 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29876 v.reset(OpCopy) 29877 v.AddArg(v0) 29878 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29879 v1.AuxInt = i0 29880 v1.Aux = s 29881 v1.AddArg(p) 29882 v1.AddArg(idx) 29883 v1.AddArg(mem) 29884 v0.AddArg(v1) 29885 return true 29886 } 29887 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 29888 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29889 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29890 for { 29891 _ = v.Args[1] 29892 s0 := v.Args[0] 29893 if s0.Op != OpAMD64SHLQconst { 29894 break 29895 } 29896 j0 := s0.AuxInt 29897 x0 := s0.Args[0] 29898 if x0.Op != OpAMD64MOVBloadidx1 { 29899 break 29900 } 29901 i0 := x0.AuxInt 29902 s := x0.Aux 29903 _ = x0.Args[2] 29904 p := x0.Args[0] 29905 idx := x0.Args[1] 29906 mem := x0.Args[2] 29907 or := v.Args[1] 29908 if or.Op != OpAMD64ORQ { 29909 break 29910 } 29911 _ = or.Args[1] 29912 s1 := or.Args[0] 29913 if s1.Op != OpAMD64SHLQconst { 29914 break 29915 } 29916 j1 := s1.AuxInt 29917 x1 := s1.Args[0] 29918 if x1.Op != OpAMD64MOVBloadidx1 { 29919 break 29920 } 29921 i1 := x1.AuxInt 29922 if x1.Aux != s { 29923 break 29924 } 29925 _ = x1.Args[2] 29926 if p != x1.Args[0] { 29927 break 29928 } 29929 if idx != x1.Args[1] { 29930 break 29931 } 29932 if mem != x1.Args[2] { 29933 break 29934 } 29935 y := or.Args[1] 29936 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29937 break 29938 } 29939 b = mergePoint(b, x0, x1) 29940 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29941 v.reset(OpCopy) 29942 v.AddArg(v0) 29943 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29944 v1.AuxInt = j1 29945 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29946 v2.AuxInt = 8 29947 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29948 v3.AuxInt = i0 29949 v3.Aux = s 29950 v3.AddArg(p) 29951 v3.AddArg(idx) 29952 v3.AddArg(mem) 29953 v2.AddArg(v3) 29954 v1.AddArg(v2) 29955 v0.AddArg(v1) 29956 v0.AddArg(y) 29957 return true 29958 } 29959 return false 29960 } 29961 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 29962 b := v.Block 29963 _ = b 29964 typ := &b.Func.Config.Types 29965 _ = typ 29966 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 29967 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29968 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29969 for { 29970 _ = v.Args[1] 29971 s0 := v.Args[0] 29972 if s0.Op != OpAMD64SHLQconst { 29973 break 29974 } 29975 j0 := s0.AuxInt 29976 x0 := s0.Args[0] 29977 if x0.Op != OpAMD64MOVBloadidx1 { 29978 break 29979 } 29980 i0 := x0.AuxInt 29981 s := x0.Aux 29982 _ = x0.Args[2] 29983 idx := x0.Args[0] 29984 p := x0.Args[1] 29985 mem := x0.Args[2] 29986 or := v.Args[1] 29987 if or.Op != OpAMD64ORQ { 29988 break 29989 } 29990 _ = or.Args[1] 29991 s1 := or.Args[0] 29992 if s1.Op != OpAMD64SHLQconst { 29993 break 29994 } 29995 j1 := s1.AuxInt 29996 x1 := s1.Args[0] 29997 if x1.Op != OpAMD64MOVBloadidx1 { 29998 break 29999 } 30000 i1 := x1.AuxInt 30001 if x1.Aux != s { 30002 break 30003 } 30004 _ = x1.Args[2] 30005 if p != x1.Args[0] { 30006 break 30007 } 30008 if idx != x1.Args[1] { 30009 break 30010 } 30011 if mem != x1.Args[2] { 30012 break 30013 } 30014 y := or.Args[1] 30015 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30016 break 30017 } 30018 b = mergePoint(b, x0, x1) 30019 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30020 v.reset(OpCopy) 30021 v.AddArg(v0) 30022 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30023 v1.AuxInt = j1 30024 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30025 v2.AuxInt = 8 30026 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30027 v3.AuxInt = i0 30028 v3.Aux = s 30029 v3.AddArg(p) 30030 v3.AddArg(idx) 30031 v3.AddArg(mem) 30032 v2.AddArg(v3) 30033 v1.AddArg(v2) 30034 v0.AddArg(v1) 30035 v0.AddArg(y) 30036 return true 30037 } 30038 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 30039 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30040 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30041 for { 30042 _ = v.Args[1] 30043 s0 := v.Args[0] 30044 if s0.Op != OpAMD64SHLQconst { 30045 break 30046 } 30047 j0 := s0.AuxInt 30048 x0 := s0.Args[0] 30049 if x0.Op != OpAMD64MOVBloadidx1 { 30050 break 30051 } 30052 i0 := x0.AuxInt 30053 s := x0.Aux 30054 _ = x0.Args[2] 30055 p := x0.Args[0] 30056 idx := x0.Args[1] 30057 mem := x0.Args[2] 30058 or := v.Args[1] 30059 if or.Op != OpAMD64ORQ { 30060 break 30061 } 30062 _ = or.Args[1] 30063 s1 := or.Args[0] 30064 if s1.Op != OpAMD64SHLQconst { 30065 break 30066 } 30067 j1 := s1.AuxInt 30068 x1 := s1.Args[0] 30069 if x1.Op != OpAMD64MOVBloadidx1 { 30070 break 30071 } 30072 i1 := x1.AuxInt 30073 if x1.Aux != s { 30074 break 30075 } 30076 _ = x1.Args[2] 30077 if idx != x1.Args[0] { 30078 break 30079 } 30080 if p != x1.Args[1] { 30081 break 30082 } 30083 if mem != x1.Args[2] { 30084 break 30085 } 30086 y := or.Args[1] 30087 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30088 break 30089 } 30090 b = mergePoint(b, x0, x1) 30091 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30092 v.reset(OpCopy) 30093 v.AddArg(v0) 30094 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30095 v1.AuxInt = j1 30096 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30097 v2.AuxInt = 8 30098 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30099 v3.AuxInt = i0 30100 v3.Aux = s 30101 v3.AddArg(p) 30102 v3.AddArg(idx) 30103 v3.AddArg(mem) 30104 v2.AddArg(v3) 30105 v1.AddArg(v2) 30106 v0.AddArg(v1) 30107 v0.AddArg(y) 30108 return true 30109 } 30110 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 30111 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30112 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30113 for { 30114 _ = v.Args[1] 30115 s0 := v.Args[0] 30116 if s0.Op != OpAMD64SHLQconst { 30117 break 30118 } 30119 j0 := s0.AuxInt 30120 x0 := s0.Args[0] 30121 if x0.Op != OpAMD64MOVBloadidx1 { 30122 break 30123 } 30124 i0 := x0.AuxInt 30125 s := x0.Aux 30126 _ = x0.Args[2] 30127 idx := x0.Args[0] 30128 p := x0.Args[1] 30129 mem := x0.Args[2] 30130 or := v.Args[1] 30131 if or.Op != OpAMD64ORQ { 30132 break 30133 } 30134 _ = or.Args[1] 30135 s1 := or.Args[0] 30136 if s1.Op != OpAMD64SHLQconst { 30137 break 30138 } 30139 j1 := s1.AuxInt 30140 x1 := s1.Args[0] 30141 if x1.Op != OpAMD64MOVBloadidx1 { 30142 break 30143 } 30144 i1 := x1.AuxInt 30145 if x1.Aux != s { 30146 break 30147 } 30148 _ = x1.Args[2] 30149 if idx != x1.Args[0] { 30150 break 30151 } 30152 if p != x1.Args[1] { 30153 break 30154 } 30155 if mem != x1.Args[2] { 30156 break 30157 } 30158 y := or.Args[1] 30159 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30160 break 30161 } 30162 b = mergePoint(b, x0, x1) 30163 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30164 v.reset(OpCopy) 30165 v.AddArg(v0) 30166 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30167 v1.AuxInt = j1 30168 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30169 v2.AuxInt = 8 30170 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30171 v3.AuxInt = i0 30172 v3.Aux = s 30173 v3.AddArg(p) 30174 v3.AddArg(idx) 30175 v3.AddArg(mem) 30176 v2.AddArg(v3) 30177 v1.AddArg(v2) 30178 v0.AddArg(v1) 30179 v0.AddArg(y) 30180 return true 30181 } 30182 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 30183 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30184 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30185 for { 30186 _ = v.Args[1] 30187 s0 := v.Args[0] 30188 if s0.Op != OpAMD64SHLQconst { 30189 break 30190 } 30191 j0 := s0.AuxInt 30192 x0 := s0.Args[0] 30193 if x0.Op != OpAMD64MOVBloadidx1 { 30194 break 30195 } 30196 i0 := x0.AuxInt 30197 s := x0.Aux 30198 _ = x0.Args[2] 30199 p := x0.Args[0] 30200 idx := x0.Args[1] 30201 mem := x0.Args[2] 30202 or := v.Args[1] 30203 if or.Op != OpAMD64ORQ { 30204 break 30205 } 30206 _ = or.Args[1] 30207 y := or.Args[0] 30208 s1 := or.Args[1] 30209 if s1.Op != OpAMD64SHLQconst { 30210 break 30211 } 30212 j1 := s1.AuxInt 30213 x1 := s1.Args[0] 30214 if x1.Op != OpAMD64MOVBloadidx1 { 30215 break 30216 } 30217 i1 := x1.AuxInt 30218 if x1.Aux != s { 30219 break 30220 } 30221 _ = x1.Args[2] 30222 if p != x1.Args[0] { 30223 break 30224 } 30225 if idx != x1.Args[1] { 30226 break 30227 } 30228 if mem != x1.Args[2] { 30229 break 30230 } 30231 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30232 break 30233 } 30234 b = mergePoint(b, x0, x1) 30235 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30236 v.reset(OpCopy) 30237 v.AddArg(v0) 30238 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30239 v1.AuxInt = j1 30240 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30241 v2.AuxInt = 8 30242 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30243 v3.AuxInt = i0 30244 v3.Aux = s 30245 v3.AddArg(p) 30246 v3.AddArg(idx) 30247 v3.AddArg(mem) 30248 v2.AddArg(v3) 30249 v1.AddArg(v2) 30250 v0.AddArg(v1) 30251 v0.AddArg(y) 30252 return true 30253 } 30254 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 30255 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30256 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30257 for { 30258 _ = v.Args[1] 30259 s0 := v.Args[0] 30260 if s0.Op != OpAMD64SHLQconst { 30261 break 30262 } 30263 j0 := s0.AuxInt 30264 x0 := s0.Args[0] 30265 if x0.Op != OpAMD64MOVBloadidx1 { 30266 break 30267 } 30268 i0 := x0.AuxInt 30269 s := x0.Aux 30270 _ = x0.Args[2] 30271 idx := x0.Args[0] 30272 p := x0.Args[1] 30273 mem := x0.Args[2] 30274 or := v.Args[1] 30275 if or.Op != OpAMD64ORQ { 30276 break 30277 } 30278 _ = or.Args[1] 30279 y := or.Args[0] 30280 s1 := or.Args[1] 30281 if s1.Op != OpAMD64SHLQconst { 30282 break 30283 } 30284 j1 := s1.AuxInt 30285 x1 := s1.Args[0] 30286 if x1.Op != OpAMD64MOVBloadidx1 { 30287 break 30288 } 30289 i1 := x1.AuxInt 30290 if x1.Aux != s { 30291 break 30292 } 30293 _ = x1.Args[2] 30294 if p != x1.Args[0] { 30295 break 30296 } 30297 if idx != x1.Args[1] { 30298 break 30299 } 30300 if mem != x1.Args[2] { 30301 break 30302 } 30303 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30304 break 30305 } 30306 b = mergePoint(b, x0, x1) 30307 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30308 v.reset(OpCopy) 30309 v.AddArg(v0) 30310 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30311 v1.AuxInt = j1 30312 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30313 v2.AuxInt = 8 30314 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30315 v3.AuxInt = i0 30316 v3.Aux = s 30317 v3.AddArg(p) 30318 v3.AddArg(idx) 30319 v3.AddArg(mem) 30320 v2.AddArg(v3) 30321 v1.AddArg(v2) 30322 v0.AddArg(v1) 30323 v0.AddArg(y) 30324 return true 30325 } 30326 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 30327 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30328 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30329 for { 30330 _ = v.Args[1] 30331 s0 := v.Args[0] 30332 if s0.Op != OpAMD64SHLQconst { 30333 break 30334 } 30335 j0 := s0.AuxInt 30336 x0 := s0.Args[0] 30337 if x0.Op != OpAMD64MOVBloadidx1 { 30338 break 30339 } 30340 i0 := x0.AuxInt 30341 s := x0.Aux 30342 _ = x0.Args[2] 30343 p := x0.Args[0] 30344 idx := x0.Args[1] 30345 mem := x0.Args[2] 30346 or := v.Args[1] 30347 if or.Op != OpAMD64ORQ { 30348 break 30349 } 30350 _ = or.Args[1] 30351 y := or.Args[0] 30352 s1 := or.Args[1] 30353 if s1.Op != OpAMD64SHLQconst { 30354 break 30355 } 30356 j1 := s1.AuxInt 30357 x1 := s1.Args[0] 30358 if x1.Op != OpAMD64MOVBloadidx1 { 30359 break 30360 } 30361 i1 := x1.AuxInt 30362 if x1.Aux != s { 30363 break 30364 } 30365 _ = x1.Args[2] 30366 if idx != x1.Args[0] { 30367 break 30368 } 30369 if p != x1.Args[1] { 30370 break 30371 } 30372 if mem != x1.Args[2] { 30373 break 30374 } 30375 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30376 break 30377 } 30378 b = mergePoint(b, x0, x1) 30379 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30380 v.reset(OpCopy) 30381 v.AddArg(v0) 30382 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30383 v1.AuxInt = j1 30384 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30385 v2.AuxInt = 8 30386 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30387 v3.AuxInt = i0 30388 v3.Aux = s 30389 v3.AddArg(p) 30390 v3.AddArg(idx) 30391 v3.AddArg(mem) 30392 v2.AddArg(v3) 30393 v1.AddArg(v2) 30394 v0.AddArg(v1) 30395 v0.AddArg(y) 30396 return true 30397 } 30398 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 30399 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30400 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30401 for { 30402 _ = v.Args[1] 30403 s0 := v.Args[0] 30404 if s0.Op != OpAMD64SHLQconst { 30405 break 30406 } 30407 j0 := s0.AuxInt 30408 x0 := s0.Args[0] 30409 if x0.Op != OpAMD64MOVBloadidx1 { 30410 break 30411 } 30412 i0 := x0.AuxInt 30413 s := x0.Aux 30414 _ = x0.Args[2] 30415 idx := x0.Args[0] 30416 p := x0.Args[1] 30417 mem := x0.Args[2] 30418 or := v.Args[1] 30419 if or.Op != OpAMD64ORQ { 30420 break 30421 } 30422 _ = or.Args[1] 30423 y := or.Args[0] 30424 s1 := or.Args[1] 30425 if s1.Op != OpAMD64SHLQconst { 30426 break 30427 } 30428 j1 := s1.AuxInt 30429 x1 := s1.Args[0] 30430 if x1.Op != OpAMD64MOVBloadidx1 { 30431 break 30432 } 30433 i1 := x1.AuxInt 30434 if x1.Aux != s { 30435 break 30436 } 30437 _ = x1.Args[2] 30438 if idx != x1.Args[0] { 30439 break 30440 } 30441 if p != x1.Args[1] { 30442 break 30443 } 30444 if mem != x1.Args[2] { 30445 break 30446 } 30447 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30448 break 30449 } 30450 b = mergePoint(b, x0, x1) 30451 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30452 v.reset(OpCopy) 30453 v.AddArg(v0) 30454 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30455 v1.AuxInt = j1 30456 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30457 v2.AuxInt = 8 30458 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30459 v3.AuxInt = i0 30460 v3.Aux = s 30461 v3.AddArg(p) 30462 v3.AddArg(idx) 30463 v3.AddArg(mem) 30464 v2.AddArg(v3) 30465 v1.AddArg(v2) 30466 v0.AddArg(v1) 30467 v0.AddArg(y) 30468 return true 30469 } 30470 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30471 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30472 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30473 for { 30474 _ = v.Args[1] 30475 or := v.Args[0] 30476 if or.Op != OpAMD64ORQ { 30477 break 30478 } 30479 _ = or.Args[1] 30480 s1 := or.Args[0] 30481 if s1.Op != OpAMD64SHLQconst { 30482 break 30483 } 30484 j1 := s1.AuxInt 30485 x1 := s1.Args[0] 30486 if x1.Op != OpAMD64MOVBloadidx1 { 30487 break 30488 } 30489 i1 := x1.AuxInt 30490 s := x1.Aux 30491 _ = x1.Args[2] 30492 p := x1.Args[0] 30493 idx := x1.Args[1] 30494 mem := x1.Args[2] 30495 y := or.Args[1] 30496 s0 := v.Args[1] 30497 if s0.Op != OpAMD64SHLQconst { 30498 break 30499 } 30500 j0 := s0.AuxInt 30501 x0 := s0.Args[0] 30502 if x0.Op != OpAMD64MOVBloadidx1 { 30503 break 30504 } 30505 i0 := x0.AuxInt 30506 if x0.Aux != s { 30507 break 30508 } 30509 _ = x0.Args[2] 30510 if p != x0.Args[0] { 30511 break 30512 } 30513 if idx != x0.Args[1] { 30514 break 30515 } 30516 if mem != x0.Args[2] { 30517 break 30518 } 30519 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30520 break 30521 } 30522 b = mergePoint(b, x0, x1) 30523 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30524 v.reset(OpCopy) 30525 v.AddArg(v0) 30526 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30527 v1.AuxInt = j1 30528 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30529 v2.AuxInt = 8 30530 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30531 v3.AuxInt = i0 30532 v3.Aux = s 30533 v3.AddArg(p) 30534 v3.AddArg(idx) 30535 v3.AddArg(mem) 30536 v2.AddArg(v3) 30537 v1.AddArg(v2) 30538 v0.AddArg(v1) 30539 v0.AddArg(y) 30540 return true 30541 } 30542 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30543 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30544 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30545 for { 30546 _ = v.Args[1] 30547 or := v.Args[0] 30548 if or.Op != OpAMD64ORQ { 30549 break 30550 } 30551 _ = or.Args[1] 30552 s1 := or.Args[0] 30553 if s1.Op != OpAMD64SHLQconst { 30554 break 30555 } 30556 j1 := s1.AuxInt 30557 x1 := s1.Args[0] 30558 if x1.Op != OpAMD64MOVBloadidx1 { 30559 break 30560 } 30561 i1 := x1.AuxInt 30562 s := x1.Aux 30563 _ = x1.Args[2] 30564 idx := x1.Args[0] 30565 p := x1.Args[1] 30566 mem := x1.Args[2] 30567 y := or.Args[1] 30568 s0 := v.Args[1] 30569 if s0.Op != OpAMD64SHLQconst { 30570 break 30571 } 30572 j0 := s0.AuxInt 30573 x0 := s0.Args[0] 30574 if x0.Op != OpAMD64MOVBloadidx1 { 30575 break 30576 } 30577 i0 := x0.AuxInt 30578 if x0.Aux != s { 30579 break 30580 } 30581 _ = x0.Args[2] 30582 if p != x0.Args[0] { 30583 break 30584 } 30585 if idx != x0.Args[1] { 30586 break 30587 } 30588 if mem != x0.Args[2] { 30589 break 30590 } 30591 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30592 break 30593 } 30594 b = mergePoint(b, x0, x1) 30595 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30596 v.reset(OpCopy) 30597 v.AddArg(v0) 30598 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30599 v1.AuxInt = j1 30600 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30601 v2.AuxInt = 8 30602 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30603 v3.AuxInt = i0 30604 v3.Aux = s 30605 v3.AddArg(p) 30606 v3.AddArg(idx) 30607 v3.AddArg(mem) 30608 v2.AddArg(v3) 30609 v1.AddArg(v2) 30610 v0.AddArg(v1) 30611 v0.AddArg(y) 30612 return true 30613 } 30614 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30615 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30616 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30617 for { 30618 _ = v.Args[1] 30619 or := v.Args[0] 30620 if or.Op != OpAMD64ORQ { 30621 break 30622 } 30623 _ = or.Args[1] 30624 y := or.Args[0] 30625 s1 := or.Args[1] 30626 if s1.Op != OpAMD64SHLQconst { 30627 break 30628 } 30629 j1 := s1.AuxInt 30630 x1 := s1.Args[0] 30631 if x1.Op != OpAMD64MOVBloadidx1 { 30632 break 30633 } 30634 i1 := x1.AuxInt 30635 s := x1.Aux 30636 _ = x1.Args[2] 30637 p := x1.Args[0] 30638 idx := x1.Args[1] 30639 mem := x1.Args[2] 30640 s0 := v.Args[1] 30641 if s0.Op != OpAMD64SHLQconst { 30642 break 30643 } 30644 j0 := s0.AuxInt 30645 x0 := s0.Args[0] 30646 if x0.Op != OpAMD64MOVBloadidx1 { 30647 break 30648 } 30649 i0 := x0.AuxInt 30650 if x0.Aux != s { 30651 break 30652 } 30653 _ = x0.Args[2] 30654 if p != x0.Args[0] { 30655 break 30656 } 30657 if idx != x0.Args[1] { 30658 break 30659 } 30660 if mem != x0.Args[2] { 30661 break 30662 } 30663 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30664 break 30665 } 30666 b = mergePoint(b, x0, x1) 30667 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30668 v.reset(OpCopy) 30669 v.AddArg(v0) 30670 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30671 v1.AuxInt = j1 30672 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30673 v2.AuxInt = 8 30674 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30675 v3.AuxInt = i0 30676 v3.Aux = s 30677 v3.AddArg(p) 30678 v3.AddArg(idx) 30679 v3.AddArg(mem) 30680 v2.AddArg(v3) 30681 v1.AddArg(v2) 30682 v0.AddArg(v1) 30683 v0.AddArg(y) 30684 return true 30685 } 30686 return false 30687 } 30688 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 30689 b := v.Block 30690 _ = b 30691 typ := &b.Func.Config.Types 30692 _ = typ 30693 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30694 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30695 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30696 for { 30697 _ = v.Args[1] 30698 or := v.Args[0] 30699 if or.Op != OpAMD64ORQ { 30700 break 30701 } 30702 _ = or.Args[1] 30703 y := or.Args[0] 30704 s1 := or.Args[1] 30705 if s1.Op != OpAMD64SHLQconst { 30706 break 30707 } 30708 j1 := s1.AuxInt 30709 x1 := s1.Args[0] 30710 if x1.Op != OpAMD64MOVBloadidx1 { 30711 break 30712 } 30713 i1 := x1.AuxInt 30714 s := x1.Aux 30715 _ = x1.Args[2] 30716 idx := x1.Args[0] 30717 p := x1.Args[1] 30718 mem := x1.Args[2] 30719 s0 := v.Args[1] 30720 if s0.Op != OpAMD64SHLQconst { 30721 break 30722 } 30723 j0 := s0.AuxInt 30724 x0 := s0.Args[0] 30725 if x0.Op != OpAMD64MOVBloadidx1 { 30726 break 30727 } 30728 i0 := x0.AuxInt 30729 if x0.Aux != s { 30730 break 30731 } 30732 _ = x0.Args[2] 30733 if p != x0.Args[0] { 30734 break 30735 } 30736 if idx != x0.Args[1] { 30737 break 30738 } 30739 if mem != x0.Args[2] { 30740 break 30741 } 30742 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30743 break 30744 } 30745 b = mergePoint(b, x0, x1) 30746 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30747 v.reset(OpCopy) 30748 v.AddArg(v0) 30749 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30750 v1.AuxInt = j1 30751 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30752 v2.AuxInt = 8 30753 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30754 v3.AuxInt = i0 30755 v3.Aux = s 30756 v3.AddArg(p) 30757 v3.AddArg(idx) 30758 v3.AddArg(mem) 30759 v2.AddArg(v3) 30760 v1.AddArg(v2) 30761 v0.AddArg(v1) 30762 v0.AddArg(y) 30763 return true 30764 } 30765 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30766 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30767 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30768 for { 30769 _ = v.Args[1] 30770 or := v.Args[0] 30771 if or.Op != OpAMD64ORQ { 30772 break 30773 } 30774 _ = or.Args[1] 30775 s1 := or.Args[0] 30776 if s1.Op != OpAMD64SHLQconst { 30777 break 30778 } 30779 j1 := s1.AuxInt 30780 x1 := s1.Args[0] 30781 if x1.Op != OpAMD64MOVBloadidx1 { 30782 break 30783 } 30784 i1 := x1.AuxInt 30785 s := x1.Aux 30786 _ = x1.Args[2] 30787 p := x1.Args[0] 30788 idx := x1.Args[1] 30789 mem := x1.Args[2] 30790 y := or.Args[1] 30791 s0 := v.Args[1] 30792 if s0.Op != OpAMD64SHLQconst { 30793 break 30794 } 30795 j0 := s0.AuxInt 30796 x0 := s0.Args[0] 30797 if x0.Op != OpAMD64MOVBloadidx1 { 30798 break 30799 } 30800 i0 := x0.AuxInt 30801 if x0.Aux != s { 30802 break 30803 } 30804 _ = x0.Args[2] 30805 if idx != x0.Args[0] { 30806 break 30807 } 30808 if p != x0.Args[1] { 30809 break 30810 } 30811 if mem != x0.Args[2] { 30812 break 30813 } 30814 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30815 break 30816 } 30817 b = mergePoint(b, x0, x1) 30818 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30819 v.reset(OpCopy) 30820 v.AddArg(v0) 30821 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30822 v1.AuxInt = j1 30823 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30824 v2.AuxInt = 8 30825 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30826 v3.AuxInt = i0 30827 v3.Aux = s 30828 v3.AddArg(p) 30829 v3.AddArg(idx) 30830 v3.AddArg(mem) 30831 v2.AddArg(v3) 30832 v1.AddArg(v2) 30833 v0.AddArg(v1) 30834 v0.AddArg(y) 30835 return true 30836 } 30837 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30838 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30839 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30840 for { 30841 _ = v.Args[1] 30842 or := v.Args[0] 30843 if or.Op != OpAMD64ORQ { 30844 break 30845 } 30846 _ = or.Args[1] 30847 s1 := or.Args[0] 30848 if s1.Op != OpAMD64SHLQconst { 30849 break 30850 } 30851 j1 := s1.AuxInt 30852 x1 := s1.Args[0] 30853 if x1.Op != OpAMD64MOVBloadidx1 { 30854 break 30855 } 30856 i1 := x1.AuxInt 30857 s := x1.Aux 30858 _ = x1.Args[2] 30859 idx := x1.Args[0] 30860 p := x1.Args[1] 30861 mem := x1.Args[2] 30862 y := or.Args[1] 30863 s0 := v.Args[1] 30864 if s0.Op != OpAMD64SHLQconst { 30865 break 30866 } 30867 j0 := s0.AuxInt 30868 x0 := s0.Args[0] 30869 if x0.Op != OpAMD64MOVBloadidx1 { 30870 break 30871 } 30872 i0 := x0.AuxInt 30873 if x0.Aux != s { 30874 break 30875 } 30876 _ = x0.Args[2] 30877 if idx != x0.Args[0] { 30878 break 30879 } 30880 if p != x0.Args[1] { 30881 break 30882 } 30883 if mem != x0.Args[2] { 30884 break 30885 } 30886 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30887 break 30888 } 30889 b = mergePoint(b, x0, x1) 30890 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30891 v.reset(OpCopy) 30892 v.AddArg(v0) 30893 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30894 v1.AuxInt = j1 30895 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30896 v2.AuxInt = 8 30897 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30898 v3.AuxInt = i0 30899 v3.Aux = s 30900 v3.AddArg(p) 30901 v3.AddArg(idx) 30902 v3.AddArg(mem) 30903 v2.AddArg(v3) 30904 v1.AddArg(v2) 30905 v0.AddArg(v1) 30906 v0.AddArg(y) 30907 return true 30908 } 30909 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30910 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30911 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30912 for { 30913 _ = v.Args[1] 30914 or := v.Args[0] 30915 if or.Op != OpAMD64ORQ { 30916 break 30917 } 30918 _ = or.Args[1] 30919 y := or.Args[0] 30920 s1 := or.Args[1] 30921 if s1.Op != OpAMD64SHLQconst { 30922 break 30923 } 30924 j1 := s1.AuxInt 30925 x1 := s1.Args[0] 30926 if x1.Op != OpAMD64MOVBloadidx1 { 30927 break 30928 } 30929 i1 := x1.AuxInt 30930 s := x1.Aux 30931 _ = x1.Args[2] 30932 p := x1.Args[0] 30933 idx := x1.Args[1] 30934 mem := x1.Args[2] 30935 s0 := v.Args[1] 30936 if s0.Op != OpAMD64SHLQconst { 30937 break 30938 } 30939 j0 := s0.AuxInt 30940 x0 := s0.Args[0] 30941 if x0.Op != OpAMD64MOVBloadidx1 { 30942 break 30943 } 30944 i0 := x0.AuxInt 30945 if x0.Aux != s { 30946 break 30947 } 30948 _ = x0.Args[2] 30949 if idx != x0.Args[0] { 30950 break 30951 } 30952 if p != x0.Args[1] { 30953 break 30954 } 30955 if mem != x0.Args[2] { 30956 break 30957 } 30958 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30959 break 30960 } 30961 b = mergePoint(b, x0, x1) 30962 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30963 v.reset(OpCopy) 30964 v.AddArg(v0) 30965 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30966 v1.AuxInt = j1 30967 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30968 v2.AuxInt = 8 30969 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30970 v3.AuxInt = i0 30971 v3.Aux = s 30972 v3.AddArg(p) 30973 v3.AddArg(idx) 30974 v3.AddArg(mem) 30975 v2.AddArg(v3) 30976 v1.AddArg(v2) 30977 v0.AddArg(v1) 30978 v0.AddArg(y) 30979 return true 30980 } 30981 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30982 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30983 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30984 for { 30985 _ = v.Args[1] 30986 or := v.Args[0] 30987 if or.Op != OpAMD64ORQ { 30988 break 30989 } 30990 _ = or.Args[1] 30991 y := or.Args[0] 30992 s1 := or.Args[1] 30993 if s1.Op != OpAMD64SHLQconst { 30994 break 30995 } 30996 j1 := s1.AuxInt 30997 x1 := s1.Args[0] 30998 if x1.Op != OpAMD64MOVBloadidx1 { 30999 break 31000 } 31001 i1 := x1.AuxInt 31002 s := x1.Aux 31003 _ = x1.Args[2] 31004 idx := x1.Args[0] 31005 p := x1.Args[1] 31006 mem := x1.Args[2] 31007 s0 := v.Args[1] 31008 if s0.Op != OpAMD64SHLQconst { 31009 break 31010 } 31011 j0 := s0.AuxInt 31012 x0 := s0.Args[0] 31013 if x0.Op != OpAMD64MOVBloadidx1 { 31014 break 31015 } 31016 i0 := x0.AuxInt 31017 if x0.Aux != s { 31018 break 31019 } 31020 _ = x0.Args[2] 31021 if idx != x0.Args[0] { 31022 break 31023 } 31024 if p != x0.Args[1] { 31025 break 31026 } 31027 if mem != x0.Args[2] { 31028 break 31029 } 31030 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31031 break 31032 } 31033 b = mergePoint(b, x0, x1) 31034 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31035 v.reset(OpCopy) 31036 v.AddArg(v0) 31037 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31038 v1.AuxInt = j1 31039 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31040 v2.AuxInt = 8 31041 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31042 v3.AuxInt = i0 31043 v3.Aux = s 31044 v3.AddArg(p) 31045 v3.AddArg(idx) 31046 v3.AddArg(mem) 31047 v2.AddArg(v3) 31048 v1.AddArg(v2) 31049 v0.AddArg(v1) 31050 v0.AddArg(y) 31051 return true 31052 } 31053 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 31054 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31055 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31056 for { 31057 _ = v.Args[1] 31058 s0 := v.Args[0] 31059 if s0.Op != OpAMD64SHLQconst { 31060 break 31061 } 31062 j0 := s0.AuxInt 31063 r0 := s0.Args[0] 31064 if r0.Op != OpAMD64ROLWconst { 31065 break 31066 } 31067 if r0.AuxInt != 8 { 31068 break 31069 } 31070 x0 := r0.Args[0] 31071 if x0.Op != OpAMD64MOVWloadidx1 { 31072 break 31073 } 31074 i0 := x0.AuxInt 31075 s := x0.Aux 31076 _ = x0.Args[2] 31077 p := x0.Args[0] 31078 idx := x0.Args[1] 31079 mem := x0.Args[2] 31080 or := v.Args[1] 31081 if or.Op != OpAMD64ORQ { 31082 break 31083 } 31084 _ = or.Args[1] 31085 s1 := or.Args[0] 31086 if s1.Op != OpAMD64SHLQconst { 31087 break 31088 } 31089 j1 := s1.AuxInt 31090 r1 := s1.Args[0] 31091 if r1.Op != OpAMD64ROLWconst { 31092 break 31093 } 31094 if r1.AuxInt != 8 { 31095 break 31096 } 31097 x1 := r1.Args[0] 31098 if x1.Op != OpAMD64MOVWloadidx1 { 31099 break 31100 } 31101 i1 := x1.AuxInt 31102 if x1.Aux != s { 31103 break 31104 } 31105 _ = x1.Args[2] 31106 if p != x1.Args[0] { 31107 break 31108 } 31109 if idx != x1.Args[1] { 31110 break 31111 } 31112 if mem != x1.Args[2] { 31113 break 31114 } 31115 y := or.Args[1] 31116 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31117 break 31118 } 31119 b = mergePoint(b, x0, x1) 31120 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31121 v.reset(OpCopy) 31122 v.AddArg(v0) 31123 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31124 v1.AuxInt = j1 31125 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31126 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31127 v3.AuxInt = i0 31128 v3.Aux = s 31129 v3.AddArg(p) 31130 v3.AddArg(idx) 31131 v3.AddArg(mem) 31132 v2.AddArg(v3) 31133 v1.AddArg(v2) 31134 v0.AddArg(v1) 31135 v0.AddArg(y) 31136 return true 31137 } 31138 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 31139 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31140 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31141 for { 31142 _ = v.Args[1] 31143 s0 := v.Args[0] 31144 if s0.Op != OpAMD64SHLQconst { 31145 break 31146 } 31147 j0 := s0.AuxInt 31148 r0 := s0.Args[0] 31149 if r0.Op != OpAMD64ROLWconst { 31150 break 31151 } 31152 if r0.AuxInt != 8 { 31153 break 31154 } 31155 x0 := r0.Args[0] 31156 if x0.Op != OpAMD64MOVWloadidx1 { 31157 break 31158 } 31159 i0 := x0.AuxInt 31160 s := x0.Aux 31161 _ = x0.Args[2] 31162 idx := x0.Args[0] 31163 p := x0.Args[1] 31164 mem := x0.Args[2] 31165 or := v.Args[1] 31166 if or.Op != OpAMD64ORQ { 31167 break 31168 } 31169 _ = or.Args[1] 31170 s1 := or.Args[0] 31171 if s1.Op != OpAMD64SHLQconst { 31172 break 31173 } 31174 j1 := s1.AuxInt 31175 r1 := s1.Args[0] 31176 if r1.Op != OpAMD64ROLWconst { 31177 break 31178 } 31179 if r1.AuxInt != 8 { 31180 break 31181 } 31182 x1 := r1.Args[0] 31183 if x1.Op != OpAMD64MOVWloadidx1 { 31184 break 31185 } 31186 i1 := x1.AuxInt 31187 if x1.Aux != s { 31188 break 31189 } 31190 _ = x1.Args[2] 31191 if p != x1.Args[0] { 31192 break 31193 } 31194 if idx != x1.Args[1] { 31195 break 31196 } 31197 if mem != x1.Args[2] { 31198 break 31199 } 31200 y := or.Args[1] 31201 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31202 break 31203 } 31204 b = mergePoint(b, x0, x1) 31205 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31206 v.reset(OpCopy) 31207 v.AddArg(v0) 31208 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31209 v1.AuxInt = j1 31210 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31211 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31212 v3.AuxInt = i0 31213 v3.Aux = s 31214 v3.AddArg(p) 31215 v3.AddArg(idx) 31216 v3.AddArg(mem) 31217 v2.AddArg(v3) 31218 v1.AddArg(v2) 31219 v0.AddArg(v1) 31220 v0.AddArg(y) 31221 return true 31222 } 31223 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 31224 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31225 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31226 for { 31227 _ = v.Args[1] 31228 s0 := v.Args[0] 31229 if s0.Op != OpAMD64SHLQconst { 31230 break 31231 } 31232 j0 := s0.AuxInt 31233 r0 := s0.Args[0] 31234 if r0.Op != OpAMD64ROLWconst { 31235 break 31236 } 31237 if r0.AuxInt != 8 { 31238 break 31239 } 31240 x0 := r0.Args[0] 31241 if x0.Op != OpAMD64MOVWloadidx1 { 31242 break 31243 } 31244 i0 := x0.AuxInt 31245 s := x0.Aux 31246 _ = x0.Args[2] 31247 p := x0.Args[0] 31248 idx := x0.Args[1] 31249 mem := x0.Args[2] 31250 or := v.Args[1] 31251 if or.Op != OpAMD64ORQ { 31252 break 31253 } 31254 _ = or.Args[1] 31255 s1 := or.Args[0] 31256 if s1.Op != OpAMD64SHLQconst { 31257 break 31258 } 31259 j1 := s1.AuxInt 31260 r1 := s1.Args[0] 31261 if r1.Op != OpAMD64ROLWconst { 31262 break 31263 } 31264 if r1.AuxInt != 8 { 31265 break 31266 } 31267 x1 := r1.Args[0] 31268 if x1.Op != OpAMD64MOVWloadidx1 { 31269 break 31270 } 31271 i1 := x1.AuxInt 31272 if x1.Aux != s { 31273 break 31274 } 31275 _ = x1.Args[2] 31276 if idx != x1.Args[0] { 31277 break 31278 } 31279 if p != x1.Args[1] { 31280 break 31281 } 31282 if mem != x1.Args[2] { 31283 break 31284 } 31285 y := or.Args[1] 31286 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31287 break 31288 } 31289 b = mergePoint(b, x0, x1) 31290 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31291 v.reset(OpCopy) 31292 v.AddArg(v0) 31293 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31294 v1.AuxInt = j1 31295 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31296 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31297 v3.AuxInt = i0 31298 v3.Aux = s 31299 v3.AddArg(p) 31300 v3.AddArg(idx) 31301 v3.AddArg(mem) 31302 v2.AddArg(v3) 31303 v1.AddArg(v2) 31304 v0.AddArg(v1) 31305 v0.AddArg(y) 31306 return true 31307 } 31308 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 31309 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31310 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31311 for { 31312 _ = v.Args[1] 31313 s0 := v.Args[0] 31314 if s0.Op != OpAMD64SHLQconst { 31315 break 31316 } 31317 j0 := s0.AuxInt 31318 r0 := s0.Args[0] 31319 if r0.Op != OpAMD64ROLWconst { 31320 break 31321 } 31322 if r0.AuxInt != 8 { 31323 break 31324 } 31325 x0 := r0.Args[0] 31326 if x0.Op != OpAMD64MOVWloadidx1 { 31327 break 31328 } 31329 i0 := x0.AuxInt 31330 s := x0.Aux 31331 _ = x0.Args[2] 31332 idx := x0.Args[0] 31333 p := x0.Args[1] 31334 mem := x0.Args[2] 31335 or := v.Args[1] 31336 if or.Op != OpAMD64ORQ { 31337 break 31338 } 31339 _ = or.Args[1] 31340 s1 := or.Args[0] 31341 if s1.Op != OpAMD64SHLQconst { 31342 break 31343 } 31344 j1 := s1.AuxInt 31345 r1 := s1.Args[0] 31346 if r1.Op != OpAMD64ROLWconst { 31347 break 31348 } 31349 if r1.AuxInt != 8 { 31350 break 31351 } 31352 x1 := r1.Args[0] 31353 if x1.Op != OpAMD64MOVWloadidx1 { 31354 break 31355 } 31356 i1 := x1.AuxInt 31357 if x1.Aux != s { 31358 break 31359 } 31360 _ = x1.Args[2] 31361 if idx != x1.Args[0] { 31362 break 31363 } 31364 if p != x1.Args[1] { 31365 break 31366 } 31367 if mem != x1.Args[2] { 31368 break 31369 } 31370 y := or.Args[1] 31371 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31372 break 31373 } 31374 b = mergePoint(b, x0, x1) 31375 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31376 v.reset(OpCopy) 31377 v.AddArg(v0) 31378 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31379 v1.AuxInt = j1 31380 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31381 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31382 v3.AuxInt = i0 31383 v3.Aux = s 31384 v3.AddArg(p) 31385 v3.AddArg(idx) 31386 v3.AddArg(mem) 31387 v2.AddArg(v3) 31388 v1.AddArg(v2) 31389 v0.AddArg(v1) 31390 v0.AddArg(y) 31391 return true 31392 } 31393 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 31394 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31395 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31396 for { 31397 _ = v.Args[1] 31398 s0 := v.Args[0] 31399 if s0.Op != OpAMD64SHLQconst { 31400 break 31401 } 31402 j0 := s0.AuxInt 31403 r0 := s0.Args[0] 31404 if r0.Op != OpAMD64ROLWconst { 31405 break 31406 } 31407 if r0.AuxInt != 8 { 31408 break 31409 } 31410 x0 := r0.Args[0] 31411 if x0.Op != OpAMD64MOVWloadidx1 { 31412 break 31413 } 31414 i0 := x0.AuxInt 31415 s := x0.Aux 31416 _ = x0.Args[2] 31417 p := x0.Args[0] 31418 idx := x0.Args[1] 31419 mem := x0.Args[2] 31420 or := v.Args[1] 31421 if or.Op != OpAMD64ORQ { 31422 break 31423 } 31424 _ = or.Args[1] 31425 y := or.Args[0] 31426 s1 := or.Args[1] 31427 if s1.Op != OpAMD64SHLQconst { 31428 break 31429 } 31430 j1 := s1.AuxInt 31431 r1 := s1.Args[0] 31432 if r1.Op != OpAMD64ROLWconst { 31433 break 31434 } 31435 if r1.AuxInt != 8 { 31436 break 31437 } 31438 x1 := r1.Args[0] 31439 if x1.Op != OpAMD64MOVWloadidx1 { 31440 break 31441 } 31442 i1 := x1.AuxInt 31443 if x1.Aux != s { 31444 break 31445 } 31446 _ = x1.Args[2] 31447 if p != x1.Args[0] { 31448 break 31449 } 31450 if idx != x1.Args[1] { 31451 break 31452 } 31453 if mem != x1.Args[2] { 31454 break 31455 } 31456 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31457 break 31458 } 31459 b = mergePoint(b, x0, x1) 31460 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31461 v.reset(OpCopy) 31462 v.AddArg(v0) 31463 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31464 v1.AuxInt = j1 31465 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31466 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31467 v3.AuxInt = i0 31468 v3.Aux = s 31469 v3.AddArg(p) 31470 v3.AddArg(idx) 31471 v3.AddArg(mem) 31472 v2.AddArg(v3) 31473 v1.AddArg(v2) 31474 v0.AddArg(v1) 31475 v0.AddArg(y) 31476 return true 31477 } 31478 return false 31479 } 31480 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 31481 b := v.Block 31482 _ = b 31483 typ := &b.Func.Config.Types 31484 _ = typ 31485 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 31486 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31487 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31488 for { 31489 _ = v.Args[1] 31490 s0 := v.Args[0] 31491 if s0.Op != OpAMD64SHLQconst { 31492 break 31493 } 31494 j0 := s0.AuxInt 31495 r0 := s0.Args[0] 31496 if r0.Op != OpAMD64ROLWconst { 31497 break 31498 } 31499 if r0.AuxInt != 8 { 31500 break 31501 } 31502 x0 := r0.Args[0] 31503 if x0.Op != OpAMD64MOVWloadidx1 { 31504 break 31505 } 31506 i0 := x0.AuxInt 31507 s := x0.Aux 31508 _ = x0.Args[2] 31509 idx := x0.Args[0] 31510 p := x0.Args[1] 31511 mem := x0.Args[2] 31512 or := v.Args[1] 31513 if or.Op != OpAMD64ORQ { 31514 break 31515 } 31516 _ = or.Args[1] 31517 y := or.Args[0] 31518 s1 := or.Args[1] 31519 if s1.Op != OpAMD64SHLQconst { 31520 break 31521 } 31522 j1 := s1.AuxInt 31523 r1 := s1.Args[0] 31524 if r1.Op != OpAMD64ROLWconst { 31525 break 31526 } 31527 if r1.AuxInt != 8 { 31528 break 31529 } 31530 x1 := r1.Args[0] 31531 if x1.Op != OpAMD64MOVWloadidx1 { 31532 break 31533 } 31534 i1 := x1.AuxInt 31535 if x1.Aux != s { 31536 break 31537 } 31538 _ = x1.Args[2] 31539 if p != x1.Args[0] { 31540 break 31541 } 31542 if idx != x1.Args[1] { 31543 break 31544 } 31545 if mem != x1.Args[2] { 31546 break 31547 } 31548 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31549 break 31550 } 31551 b = mergePoint(b, x0, x1) 31552 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31553 v.reset(OpCopy) 31554 v.AddArg(v0) 31555 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31556 v1.AuxInt = j1 31557 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31558 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31559 v3.AuxInt = i0 31560 v3.Aux = s 31561 v3.AddArg(p) 31562 v3.AddArg(idx) 31563 v3.AddArg(mem) 31564 v2.AddArg(v3) 31565 v1.AddArg(v2) 31566 v0.AddArg(v1) 31567 v0.AddArg(y) 31568 return true 31569 } 31570 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 31571 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31572 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31573 for { 31574 _ = v.Args[1] 31575 s0 := v.Args[0] 31576 if s0.Op != OpAMD64SHLQconst { 31577 break 31578 } 31579 j0 := s0.AuxInt 31580 r0 := s0.Args[0] 31581 if r0.Op != OpAMD64ROLWconst { 31582 break 31583 } 31584 if r0.AuxInt != 8 { 31585 break 31586 } 31587 x0 := r0.Args[0] 31588 if x0.Op != OpAMD64MOVWloadidx1 { 31589 break 31590 } 31591 i0 := x0.AuxInt 31592 s := x0.Aux 31593 _ = x0.Args[2] 31594 p := x0.Args[0] 31595 idx := x0.Args[1] 31596 mem := x0.Args[2] 31597 or := v.Args[1] 31598 if or.Op != OpAMD64ORQ { 31599 break 31600 } 31601 _ = or.Args[1] 31602 y := or.Args[0] 31603 s1 := or.Args[1] 31604 if s1.Op != OpAMD64SHLQconst { 31605 break 31606 } 31607 j1 := s1.AuxInt 31608 r1 := s1.Args[0] 31609 if r1.Op != OpAMD64ROLWconst { 31610 break 31611 } 31612 if r1.AuxInt != 8 { 31613 break 31614 } 31615 x1 := r1.Args[0] 31616 if x1.Op != OpAMD64MOVWloadidx1 { 31617 break 31618 } 31619 i1 := x1.AuxInt 31620 if x1.Aux != s { 31621 break 31622 } 31623 _ = x1.Args[2] 31624 if idx != x1.Args[0] { 31625 break 31626 } 31627 if p != x1.Args[1] { 31628 break 31629 } 31630 if mem != x1.Args[2] { 31631 break 31632 } 31633 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31634 break 31635 } 31636 b = mergePoint(b, x0, x1) 31637 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31638 v.reset(OpCopy) 31639 v.AddArg(v0) 31640 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31641 v1.AuxInt = j1 31642 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31643 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31644 v3.AuxInt = i0 31645 v3.Aux = s 31646 v3.AddArg(p) 31647 v3.AddArg(idx) 31648 v3.AddArg(mem) 31649 v2.AddArg(v3) 31650 v1.AddArg(v2) 31651 v0.AddArg(v1) 31652 v0.AddArg(y) 31653 return true 31654 } 31655 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 31656 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31657 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31658 for { 31659 _ = v.Args[1] 31660 s0 := v.Args[0] 31661 if s0.Op != OpAMD64SHLQconst { 31662 break 31663 } 31664 j0 := s0.AuxInt 31665 r0 := s0.Args[0] 31666 if r0.Op != OpAMD64ROLWconst { 31667 break 31668 } 31669 if r0.AuxInt != 8 { 31670 break 31671 } 31672 x0 := r0.Args[0] 31673 if x0.Op != OpAMD64MOVWloadidx1 { 31674 break 31675 } 31676 i0 := x0.AuxInt 31677 s := x0.Aux 31678 _ = x0.Args[2] 31679 idx := x0.Args[0] 31680 p := x0.Args[1] 31681 mem := x0.Args[2] 31682 or := v.Args[1] 31683 if or.Op != OpAMD64ORQ { 31684 break 31685 } 31686 _ = or.Args[1] 31687 y := or.Args[0] 31688 s1 := or.Args[1] 31689 if s1.Op != OpAMD64SHLQconst { 31690 break 31691 } 31692 j1 := s1.AuxInt 31693 r1 := s1.Args[0] 31694 if r1.Op != OpAMD64ROLWconst { 31695 break 31696 } 31697 if r1.AuxInt != 8 { 31698 break 31699 } 31700 x1 := r1.Args[0] 31701 if x1.Op != OpAMD64MOVWloadidx1 { 31702 break 31703 } 31704 i1 := x1.AuxInt 31705 if x1.Aux != s { 31706 break 31707 } 31708 _ = x1.Args[2] 31709 if idx != x1.Args[0] { 31710 break 31711 } 31712 if p != x1.Args[1] { 31713 break 31714 } 31715 if mem != x1.Args[2] { 31716 break 31717 } 31718 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31719 break 31720 } 31721 b = mergePoint(b, x0, x1) 31722 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31723 v.reset(OpCopy) 31724 v.AddArg(v0) 31725 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31726 v1.AuxInt = j1 31727 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31728 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31729 v3.AuxInt = i0 31730 v3.Aux = s 31731 v3.AddArg(p) 31732 v3.AddArg(idx) 31733 v3.AddArg(mem) 31734 v2.AddArg(v3) 31735 v1.AddArg(v2) 31736 v0.AddArg(v1) 31737 v0.AddArg(y) 31738 return true 31739 } 31740 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31741 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31742 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31743 for { 31744 _ = v.Args[1] 31745 or := v.Args[0] 31746 if or.Op != OpAMD64ORQ { 31747 break 31748 } 31749 _ = or.Args[1] 31750 s1 := or.Args[0] 31751 if s1.Op != OpAMD64SHLQconst { 31752 break 31753 } 31754 j1 := s1.AuxInt 31755 r1 := s1.Args[0] 31756 if r1.Op != OpAMD64ROLWconst { 31757 break 31758 } 31759 if r1.AuxInt != 8 { 31760 break 31761 } 31762 x1 := r1.Args[0] 31763 if x1.Op != OpAMD64MOVWloadidx1 { 31764 break 31765 } 31766 i1 := x1.AuxInt 31767 s := x1.Aux 31768 _ = x1.Args[2] 31769 p := x1.Args[0] 31770 idx := x1.Args[1] 31771 mem := x1.Args[2] 31772 y := or.Args[1] 31773 s0 := v.Args[1] 31774 if s0.Op != OpAMD64SHLQconst { 31775 break 31776 } 31777 j0 := s0.AuxInt 31778 r0 := s0.Args[0] 31779 if r0.Op != OpAMD64ROLWconst { 31780 break 31781 } 31782 if r0.AuxInt != 8 { 31783 break 31784 } 31785 x0 := r0.Args[0] 31786 if x0.Op != OpAMD64MOVWloadidx1 { 31787 break 31788 } 31789 i0 := x0.AuxInt 31790 if x0.Aux != s { 31791 break 31792 } 31793 _ = x0.Args[2] 31794 if p != x0.Args[0] { 31795 break 31796 } 31797 if idx != x0.Args[1] { 31798 break 31799 } 31800 if mem != x0.Args[2] { 31801 break 31802 } 31803 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31804 break 31805 } 31806 b = mergePoint(b, x0, x1) 31807 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31808 v.reset(OpCopy) 31809 v.AddArg(v0) 31810 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31811 v1.AuxInt = j1 31812 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31813 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31814 v3.AuxInt = i0 31815 v3.Aux = s 31816 v3.AddArg(p) 31817 v3.AddArg(idx) 31818 v3.AddArg(mem) 31819 v2.AddArg(v3) 31820 v1.AddArg(v2) 31821 v0.AddArg(v1) 31822 v0.AddArg(y) 31823 return true 31824 } 31825 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31826 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31827 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31828 for { 31829 _ = v.Args[1] 31830 or := v.Args[0] 31831 if or.Op != OpAMD64ORQ { 31832 break 31833 } 31834 _ = or.Args[1] 31835 s1 := or.Args[0] 31836 if s1.Op != OpAMD64SHLQconst { 31837 break 31838 } 31839 j1 := s1.AuxInt 31840 r1 := s1.Args[0] 31841 if r1.Op != OpAMD64ROLWconst { 31842 break 31843 } 31844 if r1.AuxInt != 8 { 31845 break 31846 } 31847 x1 := r1.Args[0] 31848 if x1.Op != OpAMD64MOVWloadidx1 { 31849 break 31850 } 31851 i1 := x1.AuxInt 31852 s := x1.Aux 31853 _ = x1.Args[2] 31854 idx := x1.Args[0] 31855 p := x1.Args[1] 31856 mem := x1.Args[2] 31857 y := or.Args[1] 31858 s0 := v.Args[1] 31859 if s0.Op != OpAMD64SHLQconst { 31860 break 31861 } 31862 j0 := s0.AuxInt 31863 r0 := s0.Args[0] 31864 if r0.Op != OpAMD64ROLWconst { 31865 break 31866 } 31867 if r0.AuxInt != 8 { 31868 break 31869 } 31870 x0 := r0.Args[0] 31871 if x0.Op != OpAMD64MOVWloadidx1 { 31872 break 31873 } 31874 i0 := x0.AuxInt 31875 if x0.Aux != s { 31876 break 31877 } 31878 _ = x0.Args[2] 31879 if p != x0.Args[0] { 31880 break 31881 } 31882 if idx != x0.Args[1] { 31883 break 31884 } 31885 if mem != x0.Args[2] { 31886 break 31887 } 31888 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31889 break 31890 } 31891 b = mergePoint(b, x0, x1) 31892 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31893 v.reset(OpCopy) 31894 v.AddArg(v0) 31895 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31896 v1.AuxInt = j1 31897 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31898 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31899 v3.AuxInt = i0 31900 v3.Aux = s 31901 v3.AddArg(p) 31902 v3.AddArg(idx) 31903 v3.AddArg(mem) 31904 v2.AddArg(v3) 31905 v1.AddArg(v2) 31906 v0.AddArg(v1) 31907 v0.AddArg(y) 31908 return true 31909 } 31910 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31911 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31912 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31913 for { 31914 _ = v.Args[1] 31915 or := v.Args[0] 31916 if or.Op != OpAMD64ORQ { 31917 break 31918 } 31919 _ = or.Args[1] 31920 y := or.Args[0] 31921 s1 := or.Args[1] 31922 if s1.Op != OpAMD64SHLQconst { 31923 break 31924 } 31925 j1 := s1.AuxInt 31926 r1 := s1.Args[0] 31927 if r1.Op != OpAMD64ROLWconst { 31928 break 31929 } 31930 if r1.AuxInt != 8 { 31931 break 31932 } 31933 x1 := r1.Args[0] 31934 if x1.Op != OpAMD64MOVWloadidx1 { 31935 break 31936 } 31937 i1 := x1.AuxInt 31938 s := x1.Aux 31939 _ = x1.Args[2] 31940 p := x1.Args[0] 31941 idx := x1.Args[1] 31942 mem := x1.Args[2] 31943 s0 := v.Args[1] 31944 if s0.Op != OpAMD64SHLQconst { 31945 break 31946 } 31947 j0 := s0.AuxInt 31948 r0 := s0.Args[0] 31949 if r0.Op != OpAMD64ROLWconst { 31950 break 31951 } 31952 if r0.AuxInt != 8 { 31953 break 31954 } 31955 x0 := r0.Args[0] 31956 if x0.Op != OpAMD64MOVWloadidx1 { 31957 break 31958 } 31959 i0 := x0.AuxInt 31960 if x0.Aux != s { 31961 break 31962 } 31963 _ = x0.Args[2] 31964 if p != x0.Args[0] { 31965 break 31966 } 31967 if idx != x0.Args[1] { 31968 break 31969 } 31970 if mem != x0.Args[2] { 31971 break 31972 } 31973 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31974 break 31975 } 31976 b = mergePoint(b, x0, x1) 31977 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31978 v.reset(OpCopy) 31979 v.AddArg(v0) 31980 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31981 v1.AuxInt = j1 31982 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31983 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31984 v3.AuxInt = i0 31985 v3.Aux = s 31986 v3.AddArg(p) 31987 v3.AddArg(idx) 31988 v3.AddArg(mem) 31989 v2.AddArg(v3) 31990 v1.AddArg(v2) 31991 v0.AddArg(v1) 31992 v0.AddArg(y) 31993 return true 31994 } 31995 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31996 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31997 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31998 for { 31999 _ = v.Args[1] 32000 or := v.Args[0] 32001 if or.Op != OpAMD64ORQ { 32002 break 32003 } 32004 _ = or.Args[1] 32005 y := or.Args[0] 32006 s1 := or.Args[1] 32007 if s1.Op != OpAMD64SHLQconst { 32008 break 32009 } 32010 j1 := s1.AuxInt 32011 r1 := s1.Args[0] 32012 if r1.Op != OpAMD64ROLWconst { 32013 break 32014 } 32015 if r1.AuxInt != 8 { 32016 break 32017 } 32018 x1 := r1.Args[0] 32019 if x1.Op != OpAMD64MOVWloadidx1 { 32020 break 32021 } 32022 i1 := x1.AuxInt 32023 s := x1.Aux 32024 _ = x1.Args[2] 32025 idx := x1.Args[0] 32026 p := x1.Args[1] 32027 mem := x1.Args[2] 32028 s0 := v.Args[1] 32029 if s0.Op != OpAMD64SHLQconst { 32030 break 32031 } 32032 j0 := s0.AuxInt 32033 r0 := s0.Args[0] 32034 if r0.Op != OpAMD64ROLWconst { 32035 break 32036 } 32037 if r0.AuxInt != 8 { 32038 break 32039 } 32040 x0 := r0.Args[0] 32041 if x0.Op != OpAMD64MOVWloadidx1 { 32042 break 32043 } 32044 i0 := x0.AuxInt 32045 if x0.Aux != s { 32046 break 32047 } 32048 _ = x0.Args[2] 32049 if p != x0.Args[0] { 32050 break 32051 } 32052 if idx != x0.Args[1] { 32053 break 32054 } 32055 if mem != x0.Args[2] { 32056 break 32057 } 32058 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32059 break 32060 } 32061 b = mergePoint(b, x0, x1) 32062 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32063 v.reset(OpCopy) 32064 v.AddArg(v0) 32065 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32066 v1.AuxInt = j1 32067 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32068 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32069 v3.AuxInt = i0 32070 v3.Aux = s 32071 v3.AddArg(p) 32072 v3.AddArg(idx) 32073 v3.AddArg(mem) 32074 v2.AddArg(v3) 32075 v1.AddArg(v2) 32076 v0.AddArg(v1) 32077 v0.AddArg(y) 32078 return true 32079 } 32080 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 32081 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32082 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32083 for { 32084 _ = v.Args[1] 32085 or := v.Args[0] 32086 if or.Op != OpAMD64ORQ { 32087 break 32088 } 32089 _ = or.Args[1] 32090 s1 := or.Args[0] 32091 if s1.Op != OpAMD64SHLQconst { 32092 break 32093 } 32094 j1 := s1.AuxInt 32095 r1 := s1.Args[0] 32096 if r1.Op != OpAMD64ROLWconst { 32097 break 32098 } 32099 if r1.AuxInt != 8 { 32100 break 32101 } 32102 x1 := r1.Args[0] 32103 if x1.Op != OpAMD64MOVWloadidx1 { 32104 break 32105 } 32106 i1 := x1.AuxInt 32107 s := x1.Aux 32108 _ = x1.Args[2] 32109 p := x1.Args[0] 32110 idx := x1.Args[1] 32111 mem := x1.Args[2] 32112 y := or.Args[1] 32113 s0 := v.Args[1] 32114 if s0.Op != OpAMD64SHLQconst { 32115 break 32116 } 32117 j0 := s0.AuxInt 32118 r0 := s0.Args[0] 32119 if r0.Op != OpAMD64ROLWconst { 32120 break 32121 } 32122 if r0.AuxInt != 8 { 32123 break 32124 } 32125 x0 := r0.Args[0] 32126 if x0.Op != OpAMD64MOVWloadidx1 { 32127 break 32128 } 32129 i0 := x0.AuxInt 32130 if x0.Aux != s { 32131 break 32132 } 32133 _ = x0.Args[2] 32134 if idx != x0.Args[0] { 32135 break 32136 } 32137 if p != x0.Args[1] { 32138 break 32139 } 32140 if mem != x0.Args[2] { 32141 break 32142 } 32143 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32144 break 32145 } 32146 b = mergePoint(b, x0, x1) 32147 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32148 v.reset(OpCopy) 32149 v.AddArg(v0) 32150 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32151 v1.AuxInt = j1 32152 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32153 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32154 v3.AuxInt = i0 32155 v3.Aux = s 32156 v3.AddArg(p) 32157 v3.AddArg(idx) 32158 v3.AddArg(mem) 32159 v2.AddArg(v3) 32160 v1.AddArg(v2) 32161 v0.AddArg(v1) 32162 v0.AddArg(y) 32163 return true 32164 } 32165 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 32166 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32167 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32168 for { 32169 _ = v.Args[1] 32170 or := v.Args[0] 32171 if or.Op != OpAMD64ORQ { 32172 break 32173 } 32174 _ = or.Args[1] 32175 s1 := or.Args[0] 32176 if s1.Op != OpAMD64SHLQconst { 32177 break 32178 } 32179 j1 := s1.AuxInt 32180 r1 := s1.Args[0] 32181 if r1.Op != OpAMD64ROLWconst { 32182 break 32183 } 32184 if r1.AuxInt != 8 { 32185 break 32186 } 32187 x1 := r1.Args[0] 32188 if x1.Op != OpAMD64MOVWloadidx1 { 32189 break 32190 } 32191 i1 := x1.AuxInt 32192 s := x1.Aux 32193 _ = x1.Args[2] 32194 idx := x1.Args[0] 32195 p := x1.Args[1] 32196 mem := x1.Args[2] 32197 y := or.Args[1] 32198 s0 := v.Args[1] 32199 if s0.Op != OpAMD64SHLQconst { 32200 break 32201 } 32202 j0 := s0.AuxInt 32203 r0 := s0.Args[0] 32204 if r0.Op != OpAMD64ROLWconst { 32205 break 32206 } 32207 if r0.AuxInt != 8 { 32208 break 32209 } 32210 x0 := r0.Args[0] 32211 if x0.Op != OpAMD64MOVWloadidx1 { 32212 break 32213 } 32214 i0 := x0.AuxInt 32215 if x0.Aux != s { 32216 break 32217 } 32218 _ = x0.Args[2] 32219 if idx != x0.Args[0] { 32220 break 32221 } 32222 if p != x0.Args[1] { 32223 break 32224 } 32225 if mem != x0.Args[2] { 32226 break 32227 } 32228 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32229 break 32230 } 32231 b = mergePoint(b, x0, x1) 32232 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32233 v.reset(OpCopy) 32234 v.AddArg(v0) 32235 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32236 v1.AuxInt = j1 32237 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32238 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32239 v3.AuxInt = i0 32240 v3.Aux = s 32241 v3.AddArg(p) 32242 v3.AddArg(idx) 32243 v3.AddArg(mem) 32244 v2.AddArg(v3) 32245 v1.AddArg(v2) 32246 v0.AddArg(v1) 32247 v0.AddArg(y) 32248 return true 32249 } 32250 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 32251 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32252 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32253 for { 32254 _ = v.Args[1] 32255 or := v.Args[0] 32256 if or.Op != OpAMD64ORQ { 32257 break 32258 } 32259 _ = or.Args[1] 32260 y := or.Args[0] 32261 s1 := or.Args[1] 32262 if s1.Op != OpAMD64SHLQconst { 32263 break 32264 } 32265 j1 := s1.AuxInt 32266 r1 := s1.Args[0] 32267 if r1.Op != OpAMD64ROLWconst { 32268 break 32269 } 32270 if r1.AuxInt != 8 { 32271 break 32272 } 32273 x1 := r1.Args[0] 32274 if x1.Op != OpAMD64MOVWloadidx1 { 32275 break 32276 } 32277 i1 := x1.AuxInt 32278 s := x1.Aux 32279 _ = x1.Args[2] 32280 p := x1.Args[0] 32281 idx := x1.Args[1] 32282 mem := x1.Args[2] 32283 s0 := v.Args[1] 32284 if s0.Op != OpAMD64SHLQconst { 32285 break 32286 } 32287 j0 := s0.AuxInt 32288 r0 := s0.Args[0] 32289 if r0.Op != OpAMD64ROLWconst { 32290 break 32291 } 32292 if r0.AuxInt != 8 { 32293 break 32294 } 32295 x0 := r0.Args[0] 32296 if x0.Op != OpAMD64MOVWloadidx1 { 32297 break 32298 } 32299 i0 := x0.AuxInt 32300 if x0.Aux != s { 32301 break 32302 } 32303 _ = x0.Args[2] 32304 if idx != x0.Args[0] { 32305 break 32306 } 32307 if p != x0.Args[1] { 32308 break 32309 } 32310 if mem != x0.Args[2] { 32311 break 32312 } 32313 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32314 break 32315 } 32316 b = mergePoint(b, x0, x1) 32317 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32318 v.reset(OpCopy) 32319 v.AddArg(v0) 32320 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32321 v1.AuxInt = j1 32322 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32323 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32324 v3.AuxInt = i0 32325 v3.Aux = s 32326 v3.AddArg(p) 32327 v3.AddArg(idx) 32328 v3.AddArg(mem) 32329 v2.AddArg(v3) 32330 v1.AddArg(v2) 32331 v0.AddArg(v1) 32332 v0.AddArg(y) 32333 return true 32334 } 32335 return false 32336 } 32337 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 32338 b := v.Block 32339 _ = b 32340 typ := &b.Func.Config.Types 32341 _ = typ 32342 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 32343 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32344 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32345 for { 32346 _ = v.Args[1] 32347 or := v.Args[0] 32348 if or.Op != OpAMD64ORQ { 32349 break 32350 } 32351 _ = or.Args[1] 32352 y := or.Args[0] 32353 s1 := or.Args[1] 32354 if s1.Op != OpAMD64SHLQconst { 32355 break 32356 } 32357 j1 := s1.AuxInt 32358 r1 := s1.Args[0] 32359 if r1.Op != OpAMD64ROLWconst { 32360 break 32361 } 32362 if r1.AuxInt != 8 { 32363 break 32364 } 32365 x1 := r1.Args[0] 32366 if x1.Op != OpAMD64MOVWloadidx1 { 32367 break 32368 } 32369 i1 := x1.AuxInt 32370 s := x1.Aux 32371 _ = x1.Args[2] 32372 idx := x1.Args[0] 32373 p := x1.Args[1] 32374 mem := x1.Args[2] 32375 s0 := v.Args[1] 32376 if s0.Op != OpAMD64SHLQconst { 32377 break 32378 } 32379 j0 := s0.AuxInt 32380 r0 := s0.Args[0] 32381 if r0.Op != OpAMD64ROLWconst { 32382 break 32383 } 32384 if r0.AuxInt != 8 { 32385 break 32386 } 32387 x0 := r0.Args[0] 32388 if x0.Op != OpAMD64MOVWloadidx1 { 32389 break 32390 } 32391 i0 := x0.AuxInt 32392 if x0.Aux != s { 32393 break 32394 } 32395 _ = x0.Args[2] 32396 if idx != x0.Args[0] { 32397 break 32398 } 32399 if p != x0.Args[1] { 32400 break 32401 } 32402 if mem != x0.Args[2] { 32403 break 32404 } 32405 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32406 break 32407 } 32408 b = mergePoint(b, x0, x1) 32409 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32410 v.reset(OpCopy) 32411 v.AddArg(v0) 32412 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32413 v1.AuxInt = j1 32414 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32415 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32416 v3.AuxInt = i0 32417 v3.Aux = s 32418 v3.AddArg(p) 32419 v3.AddArg(idx) 32420 v3.AddArg(mem) 32421 v2.AddArg(v3) 32422 v1.AddArg(v2) 32423 v0.AddArg(v1) 32424 v0.AddArg(y) 32425 return true 32426 } 32427 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 32428 // cond: canMergeLoad(v, l, x) && clobber(l) 32429 // result: (ORQmem x [off] {sym} ptr mem) 32430 for { 32431 _ = v.Args[1] 32432 x := v.Args[0] 32433 l := v.Args[1] 32434 if l.Op != OpAMD64MOVQload { 32435 break 32436 } 32437 off := l.AuxInt 32438 sym := l.Aux 32439 _ = l.Args[1] 32440 ptr := l.Args[0] 32441 mem := l.Args[1] 32442 if !(canMergeLoad(v, l, x) && clobber(l)) { 32443 break 32444 } 32445 v.reset(OpAMD64ORQmem) 32446 v.AuxInt = off 32447 v.Aux = sym 32448 v.AddArg(x) 32449 v.AddArg(ptr) 32450 v.AddArg(mem) 32451 return true 32452 } 32453 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 32454 // cond: canMergeLoad(v, l, x) && clobber(l) 32455 // result: (ORQmem x [off] {sym} ptr mem) 32456 for { 32457 _ = v.Args[1] 32458 l := v.Args[0] 32459 if l.Op != OpAMD64MOVQload { 32460 break 32461 } 32462 off := l.AuxInt 32463 sym := l.Aux 32464 _ = l.Args[1] 32465 ptr := l.Args[0] 32466 mem := l.Args[1] 32467 x := v.Args[1] 32468 if !(canMergeLoad(v, l, x) && clobber(l)) { 32469 break 32470 } 32471 v.reset(OpAMD64ORQmem) 32472 v.AuxInt = off 32473 v.Aux = sym 32474 v.AddArg(x) 32475 v.AddArg(ptr) 32476 v.AddArg(mem) 32477 return true 32478 } 32479 return false 32480 } 32481 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 32482 // match: (ORQconst [0] x) 32483 // cond: 32484 // result: x 32485 for { 32486 if v.AuxInt != 0 { 32487 break 32488 } 32489 x := v.Args[0] 32490 v.reset(OpCopy) 32491 v.Type = x.Type 32492 v.AddArg(x) 32493 return true 32494 } 32495 // match: (ORQconst [-1] _) 32496 // cond: 32497 // result: (MOVQconst [-1]) 32498 for { 32499 if v.AuxInt != -1 { 32500 break 32501 } 32502 v.reset(OpAMD64MOVQconst) 32503 v.AuxInt = -1 32504 return true 32505 } 32506 // match: (ORQconst [c] (MOVQconst [d])) 32507 // cond: 32508 // result: (MOVQconst [c|d]) 32509 for { 32510 c := v.AuxInt 32511 v_0 := v.Args[0] 32512 if v_0.Op != OpAMD64MOVQconst { 32513 break 32514 } 32515 d := v_0.AuxInt 32516 v.reset(OpAMD64MOVQconst) 32517 v.AuxInt = c | d 32518 return true 32519 } 32520 return false 32521 } 32522 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 32523 // match: (ROLB x (NEGQ y)) 32524 // cond: 32525 // result: (RORB x y) 32526 for { 32527 _ = v.Args[1] 32528 x := v.Args[0] 32529 v_1 := v.Args[1] 32530 if v_1.Op != OpAMD64NEGQ { 32531 break 32532 } 32533 y := v_1.Args[0] 32534 v.reset(OpAMD64RORB) 32535 v.AddArg(x) 32536 v.AddArg(y) 32537 return true 32538 } 32539 // match: (ROLB x (NEGL y)) 32540 // cond: 32541 // result: (RORB x y) 32542 for { 32543 _ = v.Args[1] 32544 x := v.Args[0] 32545 v_1 := v.Args[1] 32546 if v_1.Op != OpAMD64NEGL { 32547 break 32548 } 32549 y := v_1.Args[0] 32550 v.reset(OpAMD64RORB) 32551 v.AddArg(x) 32552 v.AddArg(y) 32553 return true 32554 } 32555 // match: (ROLB x (MOVQconst [c])) 32556 // cond: 32557 // result: (ROLBconst [c&7 ] x) 32558 for { 32559 _ = v.Args[1] 32560 x := v.Args[0] 32561 v_1 := v.Args[1] 32562 if v_1.Op != OpAMD64MOVQconst { 32563 break 32564 } 32565 c := v_1.AuxInt 32566 v.reset(OpAMD64ROLBconst) 32567 v.AuxInt = c & 7 32568 v.AddArg(x) 32569 return true 32570 } 32571 // match: (ROLB x (MOVLconst [c])) 32572 // cond: 32573 // result: (ROLBconst [c&7 ] x) 32574 for { 32575 _ = v.Args[1] 32576 x := v.Args[0] 32577 v_1 := v.Args[1] 32578 if v_1.Op != OpAMD64MOVLconst { 32579 break 32580 } 32581 c := v_1.AuxInt 32582 v.reset(OpAMD64ROLBconst) 32583 v.AuxInt = c & 7 32584 v.AddArg(x) 32585 return true 32586 } 32587 return false 32588 } 32589 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 32590 // match: (ROLBconst [c] (ROLBconst [d] x)) 32591 // cond: 32592 // result: (ROLBconst [(c+d)& 7] x) 32593 for { 32594 c := v.AuxInt 32595 v_0 := v.Args[0] 32596 if v_0.Op != OpAMD64ROLBconst { 32597 break 32598 } 32599 d := v_0.AuxInt 32600 x := v_0.Args[0] 32601 v.reset(OpAMD64ROLBconst) 32602 v.AuxInt = (c + d) & 7 32603 v.AddArg(x) 32604 return true 32605 } 32606 // match: (ROLBconst x [0]) 32607 // cond: 32608 // result: x 32609 for { 32610 if v.AuxInt != 0 { 32611 break 32612 } 32613 x := v.Args[0] 32614 v.reset(OpCopy) 32615 v.Type = x.Type 32616 v.AddArg(x) 32617 return true 32618 } 32619 return false 32620 } 32621 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 32622 // match: (ROLL x (NEGQ y)) 32623 // cond: 32624 // result: (RORL x y) 32625 for { 32626 _ = v.Args[1] 32627 x := v.Args[0] 32628 v_1 := v.Args[1] 32629 if v_1.Op != OpAMD64NEGQ { 32630 break 32631 } 32632 y := v_1.Args[0] 32633 v.reset(OpAMD64RORL) 32634 v.AddArg(x) 32635 v.AddArg(y) 32636 return true 32637 } 32638 // match: (ROLL x (NEGL y)) 32639 // cond: 32640 // result: (RORL x y) 32641 for { 32642 _ = v.Args[1] 32643 x := v.Args[0] 32644 v_1 := v.Args[1] 32645 if v_1.Op != OpAMD64NEGL { 32646 break 32647 } 32648 y := v_1.Args[0] 32649 v.reset(OpAMD64RORL) 32650 v.AddArg(x) 32651 v.AddArg(y) 32652 return true 32653 } 32654 // match: (ROLL x (MOVQconst [c])) 32655 // cond: 32656 // result: (ROLLconst [c&31] x) 32657 for { 32658 _ = v.Args[1] 32659 x := v.Args[0] 32660 v_1 := v.Args[1] 32661 if v_1.Op != OpAMD64MOVQconst { 32662 break 32663 } 32664 c := v_1.AuxInt 32665 v.reset(OpAMD64ROLLconst) 32666 v.AuxInt = c & 31 32667 v.AddArg(x) 32668 return true 32669 } 32670 // match: (ROLL x (MOVLconst [c])) 32671 // cond: 32672 // result: (ROLLconst [c&31] x) 32673 for { 32674 _ = v.Args[1] 32675 x := v.Args[0] 32676 v_1 := v.Args[1] 32677 if v_1.Op != OpAMD64MOVLconst { 32678 break 32679 } 32680 c := v_1.AuxInt 32681 v.reset(OpAMD64ROLLconst) 32682 v.AuxInt = c & 31 32683 v.AddArg(x) 32684 return true 32685 } 32686 return false 32687 } 32688 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 32689 // match: (ROLLconst [c] (ROLLconst [d] x)) 32690 // cond: 32691 // result: (ROLLconst [(c+d)&31] x) 32692 for { 32693 c := v.AuxInt 32694 v_0 := v.Args[0] 32695 if v_0.Op != OpAMD64ROLLconst { 32696 break 32697 } 32698 d := v_0.AuxInt 32699 x := v_0.Args[0] 32700 v.reset(OpAMD64ROLLconst) 32701 v.AuxInt = (c + d) & 31 32702 v.AddArg(x) 32703 return true 32704 } 32705 // match: (ROLLconst x [0]) 32706 // cond: 32707 // result: x 32708 for { 32709 if v.AuxInt != 0 { 32710 break 32711 } 32712 x := v.Args[0] 32713 v.reset(OpCopy) 32714 v.Type = x.Type 32715 v.AddArg(x) 32716 return true 32717 } 32718 return false 32719 } 32720 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 32721 // match: (ROLQ x (NEGQ y)) 32722 // cond: 32723 // result: (RORQ x y) 32724 for { 32725 _ = v.Args[1] 32726 x := v.Args[0] 32727 v_1 := v.Args[1] 32728 if v_1.Op != OpAMD64NEGQ { 32729 break 32730 } 32731 y := v_1.Args[0] 32732 v.reset(OpAMD64RORQ) 32733 v.AddArg(x) 32734 v.AddArg(y) 32735 return true 32736 } 32737 // match: (ROLQ x (NEGL y)) 32738 // cond: 32739 // result: (RORQ x y) 32740 for { 32741 _ = v.Args[1] 32742 x := v.Args[0] 32743 v_1 := v.Args[1] 32744 if v_1.Op != OpAMD64NEGL { 32745 break 32746 } 32747 y := v_1.Args[0] 32748 v.reset(OpAMD64RORQ) 32749 v.AddArg(x) 32750 v.AddArg(y) 32751 return true 32752 } 32753 // match: (ROLQ x (MOVQconst [c])) 32754 // cond: 32755 // result: (ROLQconst [c&63] x) 32756 for { 32757 _ = v.Args[1] 32758 x := v.Args[0] 32759 v_1 := v.Args[1] 32760 if v_1.Op != OpAMD64MOVQconst { 32761 break 32762 } 32763 c := v_1.AuxInt 32764 v.reset(OpAMD64ROLQconst) 32765 v.AuxInt = c & 63 32766 v.AddArg(x) 32767 return true 32768 } 32769 // match: (ROLQ x (MOVLconst [c])) 32770 // cond: 32771 // result: (ROLQconst [c&63] x) 32772 for { 32773 _ = v.Args[1] 32774 x := v.Args[0] 32775 v_1 := v.Args[1] 32776 if v_1.Op != OpAMD64MOVLconst { 32777 break 32778 } 32779 c := v_1.AuxInt 32780 v.reset(OpAMD64ROLQconst) 32781 v.AuxInt = c & 63 32782 v.AddArg(x) 32783 return true 32784 } 32785 return false 32786 } 32787 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 32788 // match: (ROLQconst [c] (ROLQconst [d] x)) 32789 // cond: 32790 // result: (ROLQconst [(c+d)&63] x) 32791 for { 32792 c := v.AuxInt 32793 v_0 := v.Args[0] 32794 if v_0.Op != OpAMD64ROLQconst { 32795 break 32796 } 32797 d := v_0.AuxInt 32798 x := v_0.Args[0] 32799 v.reset(OpAMD64ROLQconst) 32800 v.AuxInt = (c + d) & 63 32801 v.AddArg(x) 32802 return true 32803 } 32804 // match: (ROLQconst x [0]) 32805 // cond: 32806 // result: x 32807 for { 32808 if v.AuxInt != 0 { 32809 break 32810 } 32811 x := v.Args[0] 32812 v.reset(OpCopy) 32813 v.Type = x.Type 32814 v.AddArg(x) 32815 return true 32816 } 32817 return false 32818 } 32819 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 32820 // match: (ROLW x (NEGQ y)) 32821 // cond: 32822 // result: (RORW x y) 32823 for { 32824 _ = v.Args[1] 32825 x := v.Args[0] 32826 v_1 := v.Args[1] 32827 if v_1.Op != OpAMD64NEGQ { 32828 break 32829 } 32830 y := v_1.Args[0] 32831 v.reset(OpAMD64RORW) 32832 v.AddArg(x) 32833 v.AddArg(y) 32834 return true 32835 } 32836 // match: (ROLW x (NEGL y)) 32837 // cond: 32838 // result: (RORW x y) 32839 for { 32840 _ = v.Args[1] 32841 x := v.Args[0] 32842 v_1 := v.Args[1] 32843 if v_1.Op != OpAMD64NEGL { 32844 break 32845 } 32846 y := v_1.Args[0] 32847 v.reset(OpAMD64RORW) 32848 v.AddArg(x) 32849 v.AddArg(y) 32850 return true 32851 } 32852 // match: (ROLW x (MOVQconst [c])) 32853 // cond: 32854 // result: (ROLWconst [c&15] x) 32855 for { 32856 _ = v.Args[1] 32857 x := v.Args[0] 32858 v_1 := v.Args[1] 32859 if v_1.Op != OpAMD64MOVQconst { 32860 break 32861 } 32862 c := v_1.AuxInt 32863 v.reset(OpAMD64ROLWconst) 32864 v.AuxInt = c & 15 32865 v.AddArg(x) 32866 return true 32867 } 32868 // match: (ROLW x (MOVLconst [c])) 32869 // cond: 32870 // result: (ROLWconst [c&15] x) 32871 for { 32872 _ = v.Args[1] 32873 x := v.Args[0] 32874 v_1 := v.Args[1] 32875 if v_1.Op != OpAMD64MOVLconst { 32876 break 32877 } 32878 c := v_1.AuxInt 32879 v.reset(OpAMD64ROLWconst) 32880 v.AuxInt = c & 15 32881 v.AddArg(x) 32882 return true 32883 } 32884 return false 32885 } 32886 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 32887 // match: (ROLWconst [c] (ROLWconst [d] x)) 32888 // cond: 32889 // result: (ROLWconst [(c+d)&15] x) 32890 for { 32891 c := v.AuxInt 32892 v_0 := v.Args[0] 32893 if v_0.Op != OpAMD64ROLWconst { 32894 break 32895 } 32896 d := v_0.AuxInt 32897 x := v_0.Args[0] 32898 v.reset(OpAMD64ROLWconst) 32899 v.AuxInt = (c + d) & 15 32900 v.AddArg(x) 32901 return true 32902 } 32903 // match: (ROLWconst x [0]) 32904 // cond: 32905 // result: x 32906 for { 32907 if v.AuxInt != 0 { 32908 break 32909 } 32910 x := v.Args[0] 32911 v.reset(OpCopy) 32912 v.Type = x.Type 32913 v.AddArg(x) 32914 return true 32915 } 32916 return false 32917 } 32918 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 32919 // match: (RORB x (NEGQ y)) 32920 // cond: 32921 // result: (ROLB x y) 32922 for { 32923 _ = v.Args[1] 32924 x := v.Args[0] 32925 v_1 := v.Args[1] 32926 if v_1.Op != OpAMD64NEGQ { 32927 break 32928 } 32929 y := v_1.Args[0] 32930 v.reset(OpAMD64ROLB) 32931 v.AddArg(x) 32932 v.AddArg(y) 32933 return true 32934 } 32935 // match: (RORB x (NEGL y)) 32936 // cond: 32937 // result: (ROLB x y) 32938 for { 32939 _ = v.Args[1] 32940 x := v.Args[0] 32941 v_1 := v.Args[1] 32942 if v_1.Op != OpAMD64NEGL { 32943 break 32944 } 32945 y := v_1.Args[0] 32946 v.reset(OpAMD64ROLB) 32947 v.AddArg(x) 32948 v.AddArg(y) 32949 return true 32950 } 32951 // match: (RORB x (MOVQconst [c])) 32952 // cond: 32953 // result: (ROLBconst [(-c)&7 ] x) 32954 for { 32955 _ = v.Args[1] 32956 x := v.Args[0] 32957 v_1 := v.Args[1] 32958 if v_1.Op != OpAMD64MOVQconst { 32959 break 32960 } 32961 c := v_1.AuxInt 32962 v.reset(OpAMD64ROLBconst) 32963 v.AuxInt = (-c) & 7 32964 v.AddArg(x) 32965 return true 32966 } 32967 // match: (RORB x (MOVLconst [c])) 32968 // cond: 32969 // result: (ROLBconst [(-c)&7 ] x) 32970 for { 32971 _ = v.Args[1] 32972 x := v.Args[0] 32973 v_1 := v.Args[1] 32974 if v_1.Op != OpAMD64MOVLconst { 32975 break 32976 } 32977 c := v_1.AuxInt 32978 v.reset(OpAMD64ROLBconst) 32979 v.AuxInt = (-c) & 7 32980 v.AddArg(x) 32981 return true 32982 } 32983 return false 32984 } 32985 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 32986 // match: (RORL x (NEGQ y)) 32987 // cond: 32988 // result: (ROLL x y) 32989 for { 32990 _ = v.Args[1] 32991 x := v.Args[0] 32992 v_1 := v.Args[1] 32993 if v_1.Op != OpAMD64NEGQ { 32994 break 32995 } 32996 y := v_1.Args[0] 32997 v.reset(OpAMD64ROLL) 32998 v.AddArg(x) 32999 v.AddArg(y) 33000 return true 33001 } 33002 // match: (RORL x (NEGL y)) 33003 // cond: 33004 // result: (ROLL x y) 33005 for { 33006 _ = v.Args[1] 33007 x := v.Args[0] 33008 v_1 := v.Args[1] 33009 if v_1.Op != OpAMD64NEGL { 33010 break 33011 } 33012 y := v_1.Args[0] 33013 v.reset(OpAMD64ROLL) 33014 v.AddArg(x) 33015 v.AddArg(y) 33016 return true 33017 } 33018 // match: (RORL x (MOVQconst [c])) 33019 // cond: 33020 // result: (ROLLconst [(-c)&31] x) 33021 for { 33022 _ = v.Args[1] 33023 x := v.Args[0] 33024 v_1 := v.Args[1] 33025 if v_1.Op != OpAMD64MOVQconst { 33026 break 33027 } 33028 c := v_1.AuxInt 33029 v.reset(OpAMD64ROLLconst) 33030 v.AuxInt = (-c) & 31 33031 v.AddArg(x) 33032 return true 33033 } 33034 // match: (RORL x (MOVLconst [c])) 33035 // cond: 33036 // result: (ROLLconst [(-c)&31] x) 33037 for { 33038 _ = v.Args[1] 33039 x := v.Args[0] 33040 v_1 := v.Args[1] 33041 if v_1.Op != OpAMD64MOVLconst { 33042 break 33043 } 33044 c := v_1.AuxInt 33045 v.reset(OpAMD64ROLLconst) 33046 v.AuxInt = (-c) & 31 33047 v.AddArg(x) 33048 return true 33049 } 33050 return false 33051 } 33052 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 33053 // match: (RORQ x (NEGQ y)) 33054 // cond: 33055 // result: (ROLQ x y) 33056 for { 33057 _ = v.Args[1] 33058 x := v.Args[0] 33059 v_1 := v.Args[1] 33060 if v_1.Op != OpAMD64NEGQ { 33061 break 33062 } 33063 y := v_1.Args[0] 33064 v.reset(OpAMD64ROLQ) 33065 v.AddArg(x) 33066 v.AddArg(y) 33067 return true 33068 } 33069 // match: (RORQ x (NEGL y)) 33070 // cond: 33071 // result: (ROLQ x y) 33072 for { 33073 _ = v.Args[1] 33074 x := v.Args[0] 33075 v_1 := v.Args[1] 33076 if v_1.Op != OpAMD64NEGL { 33077 break 33078 } 33079 y := v_1.Args[0] 33080 v.reset(OpAMD64ROLQ) 33081 v.AddArg(x) 33082 v.AddArg(y) 33083 return true 33084 } 33085 // match: (RORQ x (MOVQconst [c])) 33086 // cond: 33087 // result: (ROLQconst [(-c)&63] x) 33088 for { 33089 _ = v.Args[1] 33090 x := v.Args[0] 33091 v_1 := v.Args[1] 33092 if v_1.Op != OpAMD64MOVQconst { 33093 break 33094 } 33095 c := v_1.AuxInt 33096 v.reset(OpAMD64ROLQconst) 33097 v.AuxInt = (-c) & 63 33098 v.AddArg(x) 33099 return true 33100 } 33101 // match: (RORQ x (MOVLconst [c])) 33102 // cond: 33103 // result: (ROLQconst [(-c)&63] x) 33104 for { 33105 _ = v.Args[1] 33106 x := v.Args[0] 33107 v_1 := v.Args[1] 33108 if v_1.Op != OpAMD64MOVLconst { 33109 break 33110 } 33111 c := v_1.AuxInt 33112 v.reset(OpAMD64ROLQconst) 33113 v.AuxInt = (-c) & 63 33114 v.AddArg(x) 33115 return true 33116 } 33117 return false 33118 } 33119 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 33120 // match: (RORW x (NEGQ y)) 33121 // cond: 33122 // result: (ROLW x y) 33123 for { 33124 _ = v.Args[1] 33125 x := v.Args[0] 33126 v_1 := v.Args[1] 33127 if v_1.Op != OpAMD64NEGQ { 33128 break 33129 } 33130 y := v_1.Args[0] 33131 v.reset(OpAMD64ROLW) 33132 v.AddArg(x) 33133 v.AddArg(y) 33134 return true 33135 } 33136 // match: (RORW x (NEGL y)) 33137 // cond: 33138 // result: (ROLW x y) 33139 for { 33140 _ = v.Args[1] 33141 x := v.Args[0] 33142 v_1 := v.Args[1] 33143 if v_1.Op != OpAMD64NEGL { 33144 break 33145 } 33146 y := v_1.Args[0] 33147 v.reset(OpAMD64ROLW) 33148 v.AddArg(x) 33149 v.AddArg(y) 33150 return true 33151 } 33152 // match: (RORW x (MOVQconst [c])) 33153 // cond: 33154 // result: (ROLWconst [(-c)&15] x) 33155 for { 33156 _ = v.Args[1] 33157 x := v.Args[0] 33158 v_1 := v.Args[1] 33159 if v_1.Op != OpAMD64MOVQconst { 33160 break 33161 } 33162 c := v_1.AuxInt 33163 v.reset(OpAMD64ROLWconst) 33164 v.AuxInt = (-c) & 15 33165 v.AddArg(x) 33166 return true 33167 } 33168 // match: (RORW x (MOVLconst [c])) 33169 // cond: 33170 // result: (ROLWconst [(-c)&15] x) 33171 for { 33172 _ = v.Args[1] 33173 x := v.Args[0] 33174 v_1 := v.Args[1] 33175 if v_1.Op != OpAMD64MOVLconst { 33176 break 33177 } 33178 c := v_1.AuxInt 33179 v.reset(OpAMD64ROLWconst) 33180 v.AuxInt = (-c) & 15 33181 v.AddArg(x) 33182 return true 33183 } 33184 return false 33185 } 33186 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 33187 // match: (SARB x (MOVQconst [c])) 33188 // cond: 33189 // result: (SARBconst [min(c&31,7)] x) 33190 for { 33191 _ = v.Args[1] 33192 x := v.Args[0] 33193 v_1 := v.Args[1] 33194 if v_1.Op != OpAMD64MOVQconst { 33195 break 33196 } 33197 c := v_1.AuxInt 33198 v.reset(OpAMD64SARBconst) 33199 v.AuxInt = min(c&31, 7) 33200 v.AddArg(x) 33201 return true 33202 } 33203 // match: (SARB x (MOVLconst [c])) 33204 // cond: 33205 // result: (SARBconst [min(c&31,7)] x) 33206 for { 33207 _ = v.Args[1] 33208 x := v.Args[0] 33209 v_1 := v.Args[1] 33210 if v_1.Op != OpAMD64MOVLconst { 33211 break 33212 } 33213 c := v_1.AuxInt 33214 v.reset(OpAMD64SARBconst) 33215 v.AuxInt = min(c&31, 7) 33216 v.AddArg(x) 33217 return true 33218 } 33219 return false 33220 } 33221 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 33222 // match: (SARBconst x [0]) 33223 // cond: 33224 // result: x 33225 for { 33226 if v.AuxInt != 0 { 33227 break 33228 } 33229 x := v.Args[0] 33230 v.reset(OpCopy) 33231 v.Type = x.Type 33232 v.AddArg(x) 33233 return true 33234 } 33235 // match: (SARBconst [c] (MOVQconst [d])) 33236 // cond: 33237 // result: (MOVQconst [d>>uint64(c)]) 33238 for { 33239 c := v.AuxInt 33240 v_0 := v.Args[0] 33241 if v_0.Op != OpAMD64MOVQconst { 33242 break 33243 } 33244 d := v_0.AuxInt 33245 v.reset(OpAMD64MOVQconst) 33246 v.AuxInt = d >> uint64(c) 33247 return true 33248 } 33249 return false 33250 } 33251 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 33252 b := v.Block 33253 _ = b 33254 // match: (SARL x (MOVQconst [c])) 33255 // cond: 33256 // result: (SARLconst [c&31] x) 33257 for { 33258 _ = v.Args[1] 33259 x := v.Args[0] 33260 v_1 := v.Args[1] 33261 if v_1.Op != OpAMD64MOVQconst { 33262 break 33263 } 33264 c := v_1.AuxInt 33265 v.reset(OpAMD64SARLconst) 33266 v.AuxInt = c & 31 33267 v.AddArg(x) 33268 return true 33269 } 33270 // match: (SARL x (MOVLconst [c])) 33271 // cond: 33272 // result: (SARLconst [c&31] x) 33273 for { 33274 _ = v.Args[1] 33275 x := v.Args[0] 33276 v_1 := v.Args[1] 33277 if v_1.Op != OpAMD64MOVLconst { 33278 break 33279 } 33280 c := v_1.AuxInt 33281 v.reset(OpAMD64SARLconst) 33282 v.AuxInt = c & 31 33283 v.AddArg(x) 33284 return true 33285 } 33286 // match: (SARL x (ADDQconst [c] y)) 33287 // cond: c & 31 == 0 33288 // result: (SARL x y) 33289 for { 33290 _ = v.Args[1] 33291 x := v.Args[0] 33292 v_1 := v.Args[1] 33293 if v_1.Op != OpAMD64ADDQconst { 33294 break 33295 } 33296 c := v_1.AuxInt 33297 y := v_1.Args[0] 33298 if !(c&31 == 0) { 33299 break 33300 } 33301 v.reset(OpAMD64SARL) 33302 v.AddArg(x) 33303 v.AddArg(y) 33304 return true 33305 } 33306 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 33307 // cond: c & 31 == 0 33308 // result: (SARL x (NEGQ <t> y)) 33309 for { 33310 _ = v.Args[1] 33311 x := v.Args[0] 33312 v_1 := v.Args[1] 33313 if v_1.Op != OpAMD64NEGQ { 33314 break 33315 } 33316 t := v_1.Type 33317 v_1_0 := v_1.Args[0] 33318 if v_1_0.Op != OpAMD64ADDQconst { 33319 break 33320 } 33321 c := v_1_0.AuxInt 33322 y := v_1_0.Args[0] 33323 if !(c&31 == 0) { 33324 break 33325 } 33326 v.reset(OpAMD64SARL) 33327 v.AddArg(x) 33328 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33329 v0.AddArg(y) 33330 v.AddArg(v0) 33331 return true 33332 } 33333 // match: (SARL x (ANDQconst [c] y)) 33334 // cond: c & 31 == 31 33335 // result: (SARL x y) 33336 for { 33337 _ = v.Args[1] 33338 x := v.Args[0] 33339 v_1 := v.Args[1] 33340 if v_1.Op != OpAMD64ANDQconst { 33341 break 33342 } 33343 c := v_1.AuxInt 33344 y := v_1.Args[0] 33345 if !(c&31 == 31) { 33346 break 33347 } 33348 v.reset(OpAMD64SARL) 33349 v.AddArg(x) 33350 v.AddArg(y) 33351 return true 33352 } 33353 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 33354 // cond: c & 31 == 31 33355 // result: (SARL x (NEGQ <t> y)) 33356 for { 33357 _ = v.Args[1] 33358 x := v.Args[0] 33359 v_1 := v.Args[1] 33360 if v_1.Op != OpAMD64NEGQ { 33361 break 33362 } 33363 t := v_1.Type 33364 v_1_0 := v_1.Args[0] 33365 if v_1_0.Op != OpAMD64ANDQconst { 33366 break 33367 } 33368 c := v_1_0.AuxInt 33369 y := v_1_0.Args[0] 33370 if !(c&31 == 31) { 33371 break 33372 } 33373 v.reset(OpAMD64SARL) 33374 v.AddArg(x) 33375 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33376 v0.AddArg(y) 33377 v.AddArg(v0) 33378 return true 33379 } 33380 // match: (SARL x (ADDLconst [c] y)) 33381 // cond: c & 31 == 0 33382 // result: (SARL x y) 33383 for { 33384 _ = v.Args[1] 33385 x := v.Args[0] 33386 v_1 := v.Args[1] 33387 if v_1.Op != OpAMD64ADDLconst { 33388 break 33389 } 33390 c := v_1.AuxInt 33391 y := v_1.Args[0] 33392 if !(c&31 == 0) { 33393 break 33394 } 33395 v.reset(OpAMD64SARL) 33396 v.AddArg(x) 33397 v.AddArg(y) 33398 return true 33399 } 33400 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 33401 // cond: c & 31 == 0 33402 // result: (SARL x (NEGL <t> y)) 33403 for { 33404 _ = v.Args[1] 33405 x := v.Args[0] 33406 v_1 := v.Args[1] 33407 if v_1.Op != OpAMD64NEGL { 33408 break 33409 } 33410 t := v_1.Type 33411 v_1_0 := v_1.Args[0] 33412 if v_1_0.Op != OpAMD64ADDLconst { 33413 break 33414 } 33415 c := v_1_0.AuxInt 33416 y := v_1_0.Args[0] 33417 if !(c&31 == 0) { 33418 break 33419 } 33420 v.reset(OpAMD64SARL) 33421 v.AddArg(x) 33422 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33423 v0.AddArg(y) 33424 v.AddArg(v0) 33425 return true 33426 } 33427 // match: (SARL x (ANDLconst [c] y)) 33428 // cond: c & 31 == 31 33429 // result: (SARL x y) 33430 for { 33431 _ = v.Args[1] 33432 x := v.Args[0] 33433 v_1 := v.Args[1] 33434 if v_1.Op != OpAMD64ANDLconst { 33435 break 33436 } 33437 c := v_1.AuxInt 33438 y := v_1.Args[0] 33439 if !(c&31 == 31) { 33440 break 33441 } 33442 v.reset(OpAMD64SARL) 33443 v.AddArg(x) 33444 v.AddArg(y) 33445 return true 33446 } 33447 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 33448 // cond: c & 31 == 31 33449 // result: (SARL x (NEGL <t> y)) 33450 for { 33451 _ = v.Args[1] 33452 x := v.Args[0] 33453 v_1 := v.Args[1] 33454 if v_1.Op != OpAMD64NEGL { 33455 break 33456 } 33457 t := v_1.Type 33458 v_1_0 := v_1.Args[0] 33459 if v_1_0.Op != OpAMD64ANDLconst { 33460 break 33461 } 33462 c := v_1_0.AuxInt 33463 y := v_1_0.Args[0] 33464 if !(c&31 == 31) { 33465 break 33466 } 33467 v.reset(OpAMD64SARL) 33468 v.AddArg(x) 33469 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33470 v0.AddArg(y) 33471 v.AddArg(v0) 33472 return true 33473 } 33474 return false 33475 } 33476 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 33477 // match: (SARLconst x [0]) 33478 // cond: 33479 // result: x 33480 for { 33481 if v.AuxInt != 0 { 33482 break 33483 } 33484 x := v.Args[0] 33485 v.reset(OpCopy) 33486 v.Type = x.Type 33487 v.AddArg(x) 33488 return true 33489 } 33490 // match: (SARLconst [c] (MOVQconst [d])) 33491 // cond: 33492 // result: (MOVQconst [d>>uint64(c)]) 33493 for { 33494 c := v.AuxInt 33495 v_0 := v.Args[0] 33496 if v_0.Op != OpAMD64MOVQconst { 33497 break 33498 } 33499 d := v_0.AuxInt 33500 v.reset(OpAMD64MOVQconst) 33501 v.AuxInt = d >> uint64(c) 33502 return true 33503 } 33504 return false 33505 } 33506 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 33507 b := v.Block 33508 _ = b 33509 // match: (SARQ x (MOVQconst [c])) 33510 // cond: 33511 // result: (SARQconst [c&63] x) 33512 for { 33513 _ = v.Args[1] 33514 x := v.Args[0] 33515 v_1 := v.Args[1] 33516 if v_1.Op != OpAMD64MOVQconst { 33517 break 33518 } 33519 c := v_1.AuxInt 33520 v.reset(OpAMD64SARQconst) 33521 v.AuxInt = c & 63 33522 v.AddArg(x) 33523 return true 33524 } 33525 // match: (SARQ x (MOVLconst [c])) 33526 // cond: 33527 // result: (SARQconst [c&63] x) 33528 for { 33529 _ = v.Args[1] 33530 x := v.Args[0] 33531 v_1 := v.Args[1] 33532 if v_1.Op != OpAMD64MOVLconst { 33533 break 33534 } 33535 c := v_1.AuxInt 33536 v.reset(OpAMD64SARQconst) 33537 v.AuxInt = c & 63 33538 v.AddArg(x) 33539 return true 33540 } 33541 // match: (SARQ x (ADDQconst [c] y)) 33542 // cond: c & 63 == 0 33543 // result: (SARQ x y) 33544 for { 33545 _ = v.Args[1] 33546 x := v.Args[0] 33547 v_1 := v.Args[1] 33548 if v_1.Op != OpAMD64ADDQconst { 33549 break 33550 } 33551 c := v_1.AuxInt 33552 y := v_1.Args[0] 33553 if !(c&63 == 0) { 33554 break 33555 } 33556 v.reset(OpAMD64SARQ) 33557 v.AddArg(x) 33558 v.AddArg(y) 33559 return true 33560 } 33561 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 33562 // cond: c & 63 == 0 33563 // result: (SARQ x (NEGQ <t> y)) 33564 for { 33565 _ = v.Args[1] 33566 x := v.Args[0] 33567 v_1 := v.Args[1] 33568 if v_1.Op != OpAMD64NEGQ { 33569 break 33570 } 33571 t := v_1.Type 33572 v_1_0 := v_1.Args[0] 33573 if v_1_0.Op != OpAMD64ADDQconst { 33574 break 33575 } 33576 c := v_1_0.AuxInt 33577 y := v_1_0.Args[0] 33578 if !(c&63 == 0) { 33579 break 33580 } 33581 v.reset(OpAMD64SARQ) 33582 v.AddArg(x) 33583 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33584 v0.AddArg(y) 33585 v.AddArg(v0) 33586 return true 33587 } 33588 // match: (SARQ x (ANDQconst [c] y)) 33589 // cond: c & 63 == 63 33590 // result: (SARQ x y) 33591 for { 33592 _ = v.Args[1] 33593 x := v.Args[0] 33594 v_1 := v.Args[1] 33595 if v_1.Op != OpAMD64ANDQconst { 33596 break 33597 } 33598 c := v_1.AuxInt 33599 y := v_1.Args[0] 33600 if !(c&63 == 63) { 33601 break 33602 } 33603 v.reset(OpAMD64SARQ) 33604 v.AddArg(x) 33605 v.AddArg(y) 33606 return true 33607 } 33608 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 33609 // cond: c & 63 == 63 33610 // result: (SARQ x (NEGQ <t> y)) 33611 for { 33612 _ = v.Args[1] 33613 x := v.Args[0] 33614 v_1 := v.Args[1] 33615 if v_1.Op != OpAMD64NEGQ { 33616 break 33617 } 33618 t := v_1.Type 33619 v_1_0 := v_1.Args[0] 33620 if v_1_0.Op != OpAMD64ANDQconst { 33621 break 33622 } 33623 c := v_1_0.AuxInt 33624 y := v_1_0.Args[0] 33625 if !(c&63 == 63) { 33626 break 33627 } 33628 v.reset(OpAMD64SARQ) 33629 v.AddArg(x) 33630 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33631 v0.AddArg(y) 33632 v.AddArg(v0) 33633 return true 33634 } 33635 // match: (SARQ x (ADDLconst [c] y)) 33636 // cond: c & 63 == 0 33637 // result: (SARQ x y) 33638 for { 33639 _ = v.Args[1] 33640 x := v.Args[0] 33641 v_1 := v.Args[1] 33642 if v_1.Op != OpAMD64ADDLconst { 33643 break 33644 } 33645 c := v_1.AuxInt 33646 y := v_1.Args[0] 33647 if !(c&63 == 0) { 33648 break 33649 } 33650 v.reset(OpAMD64SARQ) 33651 v.AddArg(x) 33652 v.AddArg(y) 33653 return true 33654 } 33655 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 33656 // cond: c & 63 == 0 33657 // result: (SARQ x (NEGL <t> y)) 33658 for { 33659 _ = v.Args[1] 33660 x := v.Args[0] 33661 v_1 := v.Args[1] 33662 if v_1.Op != OpAMD64NEGL { 33663 break 33664 } 33665 t := v_1.Type 33666 v_1_0 := v_1.Args[0] 33667 if v_1_0.Op != OpAMD64ADDLconst { 33668 break 33669 } 33670 c := v_1_0.AuxInt 33671 y := v_1_0.Args[0] 33672 if !(c&63 == 0) { 33673 break 33674 } 33675 v.reset(OpAMD64SARQ) 33676 v.AddArg(x) 33677 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33678 v0.AddArg(y) 33679 v.AddArg(v0) 33680 return true 33681 } 33682 // match: (SARQ x (ANDLconst [c] y)) 33683 // cond: c & 63 == 63 33684 // result: (SARQ x y) 33685 for { 33686 _ = v.Args[1] 33687 x := v.Args[0] 33688 v_1 := v.Args[1] 33689 if v_1.Op != OpAMD64ANDLconst { 33690 break 33691 } 33692 c := v_1.AuxInt 33693 y := v_1.Args[0] 33694 if !(c&63 == 63) { 33695 break 33696 } 33697 v.reset(OpAMD64SARQ) 33698 v.AddArg(x) 33699 v.AddArg(y) 33700 return true 33701 } 33702 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 33703 // cond: c & 63 == 63 33704 // result: (SARQ x (NEGL <t> y)) 33705 for { 33706 _ = v.Args[1] 33707 x := v.Args[0] 33708 v_1 := v.Args[1] 33709 if v_1.Op != OpAMD64NEGL { 33710 break 33711 } 33712 t := v_1.Type 33713 v_1_0 := v_1.Args[0] 33714 if v_1_0.Op != OpAMD64ANDLconst { 33715 break 33716 } 33717 c := v_1_0.AuxInt 33718 y := v_1_0.Args[0] 33719 if !(c&63 == 63) { 33720 break 33721 } 33722 v.reset(OpAMD64SARQ) 33723 v.AddArg(x) 33724 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33725 v0.AddArg(y) 33726 v.AddArg(v0) 33727 return true 33728 } 33729 return false 33730 } 33731 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 33732 // match: (SARQconst x [0]) 33733 // cond: 33734 // result: x 33735 for { 33736 if v.AuxInt != 0 { 33737 break 33738 } 33739 x := v.Args[0] 33740 v.reset(OpCopy) 33741 v.Type = x.Type 33742 v.AddArg(x) 33743 return true 33744 } 33745 // match: (SARQconst [c] (MOVQconst [d])) 33746 // cond: 33747 // result: (MOVQconst [d>>uint64(c)]) 33748 for { 33749 c := v.AuxInt 33750 v_0 := v.Args[0] 33751 if v_0.Op != OpAMD64MOVQconst { 33752 break 33753 } 33754 d := v_0.AuxInt 33755 v.reset(OpAMD64MOVQconst) 33756 v.AuxInt = d >> uint64(c) 33757 return true 33758 } 33759 return false 33760 } 33761 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 33762 // match: (SARW x (MOVQconst [c])) 33763 // cond: 33764 // result: (SARWconst [min(c&31,15)] x) 33765 for { 33766 _ = v.Args[1] 33767 x := v.Args[0] 33768 v_1 := v.Args[1] 33769 if v_1.Op != OpAMD64MOVQconst { 33770 break 33771 } 33772 c := v_1.AuxInt 33773 v.reset(OpAMD64SARWconst) 33774 v.AuxInt = min(c&31, 15) 33775 v.AddArg(x) 33776 return true 33777 } 33778 // match: (SARW x (MOVLconst [c])) 33779 // cond: 33780 // result: (SARWconst [min(c&31,15)] x) 33781 for { 33782 _ = v.Args[1] 33783 x := v.Args[0] 33784 v_1 := v.Args[1] 33785 if v_1.Op != OpAMD64MOVLconst { 33786 break 33787 } 33788 c := v_1.AuxInt 33789 v.reset(OpAMD64SARWconst) 33790 v.AuxInt = min(c&31, 15) 33791 v.AddArg(x) 33792 return true 33793 } 33794 return false 33795 } 33796 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 33797 // match: (SARWconst x [0]) 33798 // cond: 33799 // result: x 33800 for { 33801 if v.AuxInt != 0 { 33802 break 33803 } 33804 x := v.Args[0] 33805 v.reset(OpCopy) 33806 v.Type = x.Type 33807 v.AddArg(x) 33808 return true 33809 } 33810 // match: (SARWconst [c] (MOVQconst [d])) 33811 // cond: 33812 // result: (MOVQconst [d>>uint64(c)]) 33813 for { 33814 c := v.AuxInt 33815 v_0 := v.Args[0] 33816 if v_0.Op != OpAMD64MOVQconst { 33817 break 33818 } 33819 d := v_0.AuxInt 33820 v.reset(OpAMD64MOVQconst) 33821 v.AuxInt = d >> uint64(c) 33822 return true 33823 } 33824 return false 33825 } 33826 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 33827 // match: (SBBLcarrymask (FlagEQ)) 33828 // cond: 33829 // result: (MOVLconst [0]) 33830 for { 33831 v_0 := v.Args[0] 33832 if v_0.Op != OpAMD64FlagEQ { 33833 break 33834 } 33835 v.reset(OpAMD64MOVLconst) 33836 v.AuxInt = 0 33837 return true 33838 } 33839 // match: (SBBLcarrymask (FlagLT_ULT)) 33840 // cond: 33841 // result: (MOVLconst [-1]) 33842 for { 33843 v_0 := v.Args[0] 33844 if v_0.Op != OpAMD64FlagLT_ULT { 33845 break 33846 } 33847 v.reset(OpAMD64MOVLconst) 33848 v.AuxInt = -1 33849 return true 33850 } 33851 // match: (SBBLcarrymask (FlagLT_UGT)) 33852 // cond: 33853 // result: (MOVLconst [0]) 33854 for { 33855 v_0 := v.Args[0] 33856 if v_0.Op != OpAMD64FlagLT_UGT { 33857 break 33858 } 33859 v.reset(OpAMD64MOVLconst) 33860 v.AuxInt = 0 33861 return true 33862 } 33863 // match: (SBBLcarrymask (FlagGT_ULT)) 33864 // cond: 33865 // result: (MOVLconst [-1]) 33866 for { 33867 v_0 := v.Args[0] 33868 if v_0.Op != OpAMD64FlagGT_ULT { 33869 break 33870 } 33871 v.reset(OpAMD64MOVLconst) 33872 v.AuxInt = -1 33873 return true 33874 } 33875 // match: (SBBLcarrymask (FlagGT_UGT)) 33876 // cond: 33877 // result: (MOVLconst [0]) 33878 for { 33879 v_0 := v.Args[0] 33880 if v_0.Op != OpAMD64FlagGT_UGT { 33881 break 33882 } 33883 v.reset(OpAMD64MOVLconst) 33884 v.AuxInt = 0 33885 return true 33886 } 33887 return false 33888 } 33889 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 33890 // match: (SBBQcarrymask (FlagEQ)) 33891 // cond: 33892 // result: (MOVQconst [0]) 33893 for { 33894 v_0 := v.Args[0] 33895 if v_0.Op != OpAMD64FlagEQ { 33896 break 33897 } 33898 v.reset(OpAMD64MOVQconst) 33899 v.AuxInt = 0 33900 return true 33901 } 33902 // match: (SBBQcarrymask (FlagLT_ULT)) 33903 // cond: 33904 // result: (MOVQconst [-1]) 33905 for { 33906 v_0 := v.Args[0] 33907 if v_0.Op != OpAMD64FlagLT_ULT { 33908 break 33909 } 33910 v.reset(OpAMD64MOVQconst) 33911 v.AuxInt = -1 33912 return true 33913 } 33914 // match: (SBBQcarrymask (FlagLT_UGT)) 33915 // cond: 33916 // result: (MOVQconst [0]) 33917 for { 33918 v_0 := v.Args[0] 33919 if v_0.Op != OpAMD64FlagLT_UGT { 33920 break 33921 } 33922 v.reset(OpAMD64MOVQconst) 33923 v.AuxInt = 0 33924 return true 33925 } 33926 // match: (SBBQcarrymask (FlagGT_ULT)) 33927 // cond: 33928 // result: (MOVQconst [-1]) 33929 for { 33930 v_0 := v.Args[0] 33931 if v_0.Op != OpAMD64FlagGT_ULT { 33932 break 33933 } 33934 v.reset(OpAMD64MOVQconst) 33935 v.AuxInt = -1 33936 return true 33937 } 33938 // match: (SBBQcarrymask (FlagGT_UGT)) 33939 // cond: 33940 // result: (MOVQconst [0]) 33941 for { 33942 v_0 := v.Args[0] 33943 if v_0.Op != OpAMD64FlagGT_UGT { 33944 break 33945 } 33946 v.reset(OpAMD64MOVQconst) 33947 v.AuxInt = 0 33948 return true 33949 } 33950 return false 33951 } 33952 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 33953 // match: (SETA (InvertFlags x)) 33954 // cond: 33955 // result: (SETB x) 33956 for { 33957 v_0 := v.Args[0] 33958 if v_0.Op != OpAMD64InvertFlags { 33959 break 33960 } 33961 x := v_0.Args[0] 33962 v.reset(OpAMD64SETB) 33963 v.AddArg(x) 33964 return true 33965 } 33966 // match: (SETA (FlagEQ)) 33967 // cond: 33968 // result: (MOVLconst [0]) 33969 for { 33970 v_0 := v.Args[0] 33971 if v_0.Op != OpAMD64FlagEQ { 33972 break 33973 } 33974 v.reset(OpAMD64MOVLconst) 33975 v.AuxInt = 0 33976 return true 33977 } 33978 // match: (SETA (FlagLT_ULT)) 33979 // cond: 33980 // result: (MOVLconst [0]) 33981 for { 33982 v_0 := v.Args[0] 33983 if v_0.Op != OpAMD64FlagLT_ULT { 33984 break 33985 } 33986 v.reset(OpAMD64MOVLconst) 33987 v.AuxInt = 0 33988 return true 33989 } 33990 // match: (SETA (FlagLT_UGT)) 33991 // cond: 33992 // result: (MOVLconst [1]) 33993 for { 33994 v_0 := v.Args[0] 33995 if v_0.Op != OpAMD64FlagLT_UGT { 33996 break 33997 } 33998 v.reset(OpAMD64MOVLconst) 33999 v.AuxInt = 1 34000 return true 34001 } 34002 // match: (SETA (FlagGT_ULT)) 34003 // cond: 34004 // result: (MOVLconst [0]) 34005 for { 34006 v_0 := v.Args[0] 34007 if v_0.Op != OpAMD64FlagGT_ULT { 34008 break 34009 } 34010 v.reset(OpAMD64MOVLconst) 34011 v.AuxInt = 0 34012 return true 34013 } 34014 // match: (SETA (FlagGT_UGT)) 34015 // cond: 34016 // result: (MOVLconst [1]) 34017 for { 34018 v_0 := v.Args[0] 34019 if v_0.Op != OpAMD64FlagGT_UGT { 34020 break 34021 } 34022 v.reset(OpAMD64MOVLconst) 34023 v.AuxInt = 1 34024 return true 34025 } 34026 return false 34027 } 34028 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 34029 // match: (SETAE (InvertFlags x)) 34030 // cond: 34031 // result: (SETBE x) 34032 for { 34033 v_0 := v.Args[0] 34034 if v_0.Op != OpAMD64InvertFlags { 34035 break 34036 } 34037 x := v_0.Args[0] 34038 v.reset(OpAMD64SETBE) 34039 v.AddArg(x) 34040 return true 34041 } 34042 // match: (SETAE (FlagEQ)) 34043 // cond: 34044 // result: (MOVLconst [1]) 34045 for { 34046 v_0 := v.Args[0] 34047 if v_0.Op != OpAMD64FlagEQ { 34048 break 34049 } 34050 v.reset(OpAMD64MOVLconst) 34051 v.AuxInt = 1 34052 return true 34053 } 34054 // match: (SETAE (FlagLT_ULT)) 34055 // cond: 34056 // result: (MOVLconst [0]) 34057 for { 34058 v_0 := v.Args[0] 34059 if v_0.Op != OpAMD64FlagLT_ULT { 34060 break 34061 } 34062 v.reset(OpAMD64MOVLconst) 34063 v.AuxInt = 0 34064 return true 34065 } 34066 // match: (SETAE (FlagLT_UGT)) 34067 // cond: 34068 // result: (MOVLconst [1]) 34069 for { 34070 v_0 := v.Args[0] 34071 if v_0.Op != OpAMD64FlagLT_UGT { 34072 break 34073 } 34074 v.reset(OpAMD64MOVLconst) 34075 v.AuxInt = 1 34076 return true 34077 } 34078 // match: (SETAE (FlagGT_ULT)) 34079 // cond: 34080 // result: (MOVLconst [0]) 34081 for { 34082 v_0 := v.Args[0] 34083 if v_0.Op != OpAMD64FlagGT_ULT { 34084 break 34085 } 34086 v.reset(OpAMD64MOVLconst) 34087 v.AuxInt = 0 34088 return true 34089 } 34090 // match: (SETAE (FlagGT_UGT)) 34091 // cond: 34092 // result: (MOVLconst [1]) 34093 for { 34094 v_0 := v.Args[0] 34095 if v_0.Op != OpAMD64FlagGT_UGT { 34096 break 34097 } 34098 v.reset(OpAMD64MOVLconst) 34099 v.AuxInt = 1 34100 return true 34101 } 34102 return false 34103 } 34104 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 34105 // match: (SETB (InvertFlags x)) 34106 // cond: 34107 // result: (SETA x) 34108 for { 34109 v_0 := v.Args[0] 34110 if v_0.Op != OpAMD64InvertFlags { 34111 break 34112 } 34113 x := v_0.Args[0] 34114 v.reset(OpAMD64SETA) 34115 v.AddArg(x) 34116 return true 34117 } 34118 // match: (SETB (FlagEQ)) 34119 // cond: 34120 // result: (MOVLconst [0]) 34121 for { 34122 v_0 := v.Args[0] 34123 if v_0.Op != OpAMD64FlagEQ { 34124 break 34125 } 34126 v.reset(OpAMD64MOVLconst) 34127 v.AuxInt = 0 34128 return true 34129 } 34130 // match: (SETB (FlagLT_ULT)) 34131 // cond: 34132 // result: (MOVLconst [1]) 34133 for { 34134 v_0 := v.Args[0] 34135 if v_0.Op != OpAMD64FlagLT_ULT { 34136 break 34137 } 34138 v.reset(OpAMD64MOVLconst) 34139 v.AuxInt = 1 34140 return true 34141 } 34142 // match: (SETB (FlagLT_UGT)) 34143 // cond: 34144 // result: (MOVLconst [0]) 34145 for { 34146 v_0 := v.Args[0] 34147 if v_0.Op != OpAMD64FlagLT_UGT { 34148 break 34149 } 34150 v.reset(OpAMD64MOVLconst) 34151 v.AuxInt = 0 34152 return true 34153 } 34154 // match: (SETB (FlagGT_ULT)) 34155 // cond: 34156 // result: (MOVLconst [1]) 34157 for { 34158 v_0 := v.Args[0] 34159 if v_0.Op != OpAMD64FlagGT_ULT { 34160 break 34161 } 34162 v.reset(OpAMD64MOVLconst) 34163 v.AuxInt = 1 34164 return true 34165 } 34166 // match: (SETB (FlagGT_UGT)) 34167 // cond: 34168 // result: (MOVLconst [0]) 34169 for { 34170 v_0 := v.Args[0] 34171 if v_0.Op != OpAMD64FlagGT_UGT { 34172 break 34173 } 34174 v.reset(OpAMD64MOVLconst) 34175 v.AuxInt = 0 34176 return true 34177 } 34178 return false 34179 } 34180 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 34181 // match: (SETBE (InvertFlags x)) 34182 // cond: 34183 // result: (SETAE x) 34184 for { 34185 v_0 := v.Args[0] 34186 if v_0.Op != OpAMD64InvertFlags { 34187 break 34188 } 34189 x := v_0.Args[0] 34190 v.reset(OpAMD64SETAE) 34191 v.AddArg(x) 34192 return true 34193 } 34194 // match: (SETBE (FlagEQ)) 34195 // cond: 34196 // result: (MOVLconst [1]) 34197 for { 34198 v_0 := v.Args[0] 34199 if v_0.Op != OpAMD64FlagEQ { 34200 break 34201 } 34202 v.reset(OpAMD64MOVLconst) 34203 v.AuxInt = 1 34204 return true 34205 } 34206 // match: (SETBE (FlagLT_ULT)) 34207 // cond: 34208 // result: (MOVLconst [1]) 34209 for { 34210 v_0 := v.Args[0] 34211 if v_0.Op != OpAMD64FlagLT_ULT { 34212 break 34213 } 34214 v.reset(OpAMD64MOVLconst) 34215 v.AuxInt = 1 34216 return true 34217 } 34218 // match: (SETBE (FlagLT_UGT)) 34219 // cond: 34220 // result: (MOVLconst [0]) 34221 for { 34222 v_0 := v.Args[0] 34223 if v_0.Op != OpAMD64FlagLT_UGT { 34224 break 34225 } 34226 v.reset(OpAMD64MOVLconst) 34227 v.AuxInt = 0 34228 return true 34229 } 34230 // match: (SETBE (FlagGT_ULT)) 34231 // cond: 34232 // result: (MOVLconst [1]) 34233 for { 34234 v_0 := v.Args[0] 34235 if v_0.Op != OpAMD64FlagGT_ULT { 34236 break 34237 } 34238 v.reset(OpAMD64MOVLconst) 34239 v.AuxInt = 1 34240 return true 34241 } 34242 // match: (SETBE (FlagGT_UGT)) 34243 // cond: 34244 // result: (MOVLconst [0]) 34245 for { 34246 v_0 := v.Args[0] 34247 if v_0.Op != OpAMD64FlagGT_UGT { 34248 break 34249 } 34250 v.reset(OpAMD64MOVLconst) 34251 v.AuxInt = 0 34252 return true 34253 } 34254 return false 34255 } 34256 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 34257 b := v.Block 34258 _ = b 34259 config := b.Func.Config 34260 _ = config 34261 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 34262 // cond: !config.nacl 34263 // result: (SETAE (BTL x y)) 34264 for { 34265 v_0 := v.Args[0] 34266 if v_0.Op != OpAMD64TESTL { 34267 break 34268 } 34269 _ = v_0.Args[1] 34270 v_0_0 := v_0.Args[0] 34271 if v_0_0.Op != OpAMD64SHLL { 34272 break 34273 } 34274 _ = v_0_0.Args[1] 34275 v_0_0_0 := v_0_0.Args[0] 34276 if v_0_0_0.Op != OpAMD64MOVLconst { 34277 break 34278 } 34279 if v_0_0_0.AuxInt != 1 { 34280 break 34281 } 34282 x := v_0_0.Args[1] 34283 y := v_0.Args[1] 34284 if !(!config.nacl) { 34285 break 34286 } 34287 v.reset(OpAMD64SETAE) 34288 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34289 v0.AddArg(x) 34290 v0.AddArg(y) 34291 v.AddArg(v0) 34292 return true 34293 } 34294 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 34295 // cond: !config.nacl 34296 // result: (SETAE (BTL x y)) 34297 for { 34298 v_0 := v.Args[0] 34299 if v_0.Op != OpAMD64TESTL { 34300 break 34301 } 34302 _ = v_0.Args[1] 34303 y := v_0.Args[0] 34304 v_0_1 := v_0.Args[1] 34305 if v_0_1.Op != OpAMD64SHLL { 34306 break 34307 } 34308 _ = v_0_1.Args[1] 34309 v_0_1_0 := v_0_1.Args[0] 34310 if v_0_1_0.Op != OpAMD64MOVLconst { 34311 break 34312 } 34313 if v_0_1_0.AuxInt != 1 { 34314 break 34315 } 34316 x := v_0_1.Args[1] 34317 if !(!config.nacl) { 34318 break 34319 } 34320 v.reset(OpAMD64SETAE) 34321 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34322 v0.AddArg(x) 34323 v0.AddArg(y) 34324 v.AddArg(v0) 34325 return true 34326 } 34327 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 34328 // cond: !config.nacl 34329 // result: (SETAE (BTQ x y)) 34330 for { 34331 v_0 := v.Args[0] 34332 if v_0.Op != OpAMD64TESTQ { 34333 break 34334 } 34335 _ = v_0.Args[1] 34336 v_0_0 := v_0.Args[0] 34337 if v_0_0.Op != OpAMD64SHLQ { 34338 break 34339 } 34340 _ = v_0_0.Args[1] 34341 v_0_0_0 := v_0_0.Args[0] 34342 if v_0_0_0.Op != OpAMD64MOVQconst { 34343 break 34344 } 34345 if v_0_0_0.AuxInt != 1 { 34346 break 34347 } 34348 x := v_0_0.Args[1] 34349 y := v_0.Args[1] 34350 if !(!config.nacl) { 34351 break 34352 } 34353 v.reset(OpAMD64SETAE) 34354 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34355 v0.AddArg(x) 34356 v0.AddArg(y) 34357 v.AddArg(v0) 34358 return true 34359 } 34360 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 34361 // cond: !config.nacl 34362 // result: (SETAE (BTQ x y)) 34363 for { 34364 v_0 := v.Args[0] 34365 if v_0.Op != OpAMD64TESTQ { 34366 break 34367 } 34368 _ = v_0.Args[1] 34369 y := v_0.Args[0] 34370 v_0_1 := v_0.Args[1] 34371 if v_0_1.Op != OpAMD64SHLQ { 34372 break 34373 } 34374 _ = v_0_1.Args[1] 34375 v_0_1_0 := v_0_1.Args[0] 34376 if v_0_1_0.Op != OpAMD64MOVQconst { 34377 break 34378 } 34379 if v_0_1_0.AuxInt != 1 { 34380 break 34381 } 34382 x := v_0_1.Args[1] 34383 if !(!config.nacl) { 34384 break 34385 } 34386 v.reset(OpAMD64SETAE) 34387 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34388 v0.AddArg(x) 34389 v0.AddArg(y) 34390 v.AddArg(v0) 34391 return true 34392 } 34393 // match: (SETEQ (TESTLconst [c] x)) 34394 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 34395 // result: (SETAE (BTLconst [log2(c)] x)) 34396 for { 34397 v_0 := v.Args[0] 34398 if v_0.Op != OpAMD64TESTLconst { 34399 break 34400 } 34401 c := v_0.AuxInt 34402 x := v_0.Args[0] 34403 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 34404 break 34405 } 34406 v.reset(OpAMD64SETAE) 34407 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 34408 v0.AuxInt = log2(c) 34409 v0.AddArg(x) 34410 v.AddArg(v0) 34411 return true 34412 } 34413 // match: (SETEQ (TESTQconst [c] x)) 34414 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34415 // result: (SETAE (BTQconst [log2(c)] x)) 34416 for { 34417 v_0 := v.Args[0] 34418 if v_0.Op != OpAMD64TESTQconst { 34419 break 34420 } 34421 c := v_0.AuxInt 34422 x := v_0.Args[0] 34423 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34424 break 34425 } 34426 v.reset(OpAMD64SETAE) 34427 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34428 v0.AuxInt = log2(c) 34429 v0.AddArg(x) 34430 v.AddArg(v0) 34431 return true 34432 } 34433 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 34434 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34435 // result: (SETAE (BTQconst [log2(c)] x)) 34436 for { 34437 v_0 := v.Args[0] 34438 if v_0.Op != OpAMD64TESTQ { 34439 break 34440 } 34441 _ = v_0.Args[1] 34442 v_0_0 := v_0.Args[0] 34443 if v_0_0.Op != OpAMD64MOVQconst { 34444 break 34445 } 34446 c := v_0_0.AuxInt 34447 x := v_0.Args[1] 34448 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34449 break 34450 } 34451 v.reset(OpAMD64SETAE) 34452 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34453 v0.AuxInt = log2(c) 34454 v0.AddArg(x) 34455 v.AddArg(v0) 34456 return true 34457 } 34458 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 34459 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34460 // result: (SETAE (BTQconst [log2(c)] x)) 34461 for { 34462 v_0 := v.Args[0] 34463 if v_0.Op != OpAMD64TESTQ { 34464 break 34465 } 34466 _ = v_0.Args[1] 34467 x := v_0.Args[0] 34468 v_0_1 := v_0.Args[1] 34469 if v_0_1.Op != OpAMD64MOVQconst { 34470 break 34471 } 34472 c := v_0_1.AuxInt 34473 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34474 break 34475 } 34476 v.reset(OpAMD64SETAE) 34477 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34478 v0.AuxInt = log2(c) 34479 v0.AddArg(x) 34480 v.AddArg(v0) 34481 return true 34482 } 34483 // match: (SETEQ (InvertFlags x)) 34484 // cond: 34485 // result: (SETEQ x) 34486 for { 34487 v_0 := v.Args[0] 34488 if v_0.Op != OpAMD64InvertFlags { 34489 break 34490 } 34491 x := v_0.Args[0] 34492 v.reset(OpAMD64SETEQ) 34493 v.AddArg(x) 34494 return true 34495 } 34496 // match: (SETEQ (FlagEQ)) 34497 // cond: 34498 // result: (MOVLconst [1]) 34499 for { 34500 v_0 := v.Args[0] 34501 if v_0.Op != OpAMD64FlagEQ { 34502 break 34503 } 34504 v.reset(OpAMD64MOVLconst) 34505 v.AuxInt = 1 34506 return true 34507 } 34508 return false 34509 } 34510 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 34511 // match: (SETEQ (FlagLT_ULT)) 34512 // cond: 34513 // result: (MOVLconst [0]) 34514 for { 34515 v_0 := v.Args[0] 34516 if v_0.Op != OpAMD64FlagLT_ULT { 34517 break 34518 } 34519 v.reset(OpAMD64MOVLconst) 34520 v.AuxInt = 0 34521 return true 34522 } 34523 // match: (SETEQ (FlagLT_UGT)) 34524 // cond: 34525 // result: (MOVLconst [0]) 34526 for { 34527 v_0 := v.Args[0] 34528 if v_0.Op != OpAMD64FlagLT_UGT { 34529 break 34530 } 34531 v.reset(OpAMD64MOVLconst) 34532 v.AuxInt = 0 34533 return true 34534 } 34535 // match: (SETEQ (FlagGT_ULT)) 34536 // cond: 34537 // result: (MOVLconst [0]) 34538 for { 34539 v_0 := v.Args[0] 34540 if v_0.Op != OpAMD64FlagGT_ULT { 34541 break 34542 } 34543 v.reset(OpAMD64MOVLconst) 34544 v.AuxInt = 0 34545 return true 34546 } 34547 // match: (SETEQ (FlagGT_UGT)) 34548 // cond: 34549 // result: (MOVLconst [0]) 34550 for { 34551 v_0 := v.Args[0] 34552 if v_0.Op != OpAMD64FlagGT_UGT { 34553 break 34554 } 34555 v.reset(OpAMD64MOVLconst) 34556 v.AuxInt = 0 34557 return true 34558 } 34559 return false 34560 } 34561 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 34562 // match: (SETG (InvertFlags x)) 34563 // cond: 34564 // result: (SETL x) 34565 for { 34566 v_0 := v.Args[0] 34567 if v_0.Op != OpAMD64InvertFlags { 34568 break 34569 } 34570 x := v_0.Args[0] 34571 v.reset(OpAMD64SETL) 34572 v.AddArg(x) 34573 return true 34574 } 34575 // match: (SETG (FlagEQ)) 34576 // cond: 34577 // result: (MOVLconst [0]) 34578 for { 34579 v_0 := v.Args[0] 34580 if v_0.Op != OpAMD64FlagEQ { 34581 break 34582 } 34583 v.reset(OpAMD64MOVLconst) 34584 v.AuxInt = 0 34585 return true 34586 } 34587 // match: (SETG (FlagLT_ULT)) 34588 // cond: 34589 // result: (MOVLconst [0]) 34590 for { 34591 v_0 := v.Args[0] 34592 if v_0.Op != OpAMD64FlagLT_ULT { 34593 break 34594 } 34595 v.reset(OpAMD64MOVLconst) 34596 v.AuxInt = 0 34597 return true 34598 } 34599 // match: (SETG (FlagLT_UGT)) 34600 // cond: 34601 // result: (MOVLconst [0]) 34602 for { 34603 v_0 := v.Args[0] 34604 if v_0.Op != OpAMD64FlagLT_UGT { 34605 break 34606 } 34607 v.reset(OpAMD64MOVLconst) 34608 v.AuxInt = 0 34609 return true 34610 } 34611 // match: (SETG (FlagGT_ULT)) 34612 // cond: 34613 // result: (MOVLconst [1]) 34614 for { 34615 v_0 := v.Args[0] 34616 if v_0.Op != OpAMD64FlagGT_ULT { 34617 break 34618 } 34619 v.reset(OpAMD64MOVLconst) 34620 v.AuxInt = 1 34621 return true 34622 } 34623 // match: (SETG (FlagGT_UGT)) 34624 // cond: 34625 // result: (MOVLconst [1]) 34626 for { 34627 v_0 := v.Args[0] 34628 if v_0.Op != OpAMD64FlagGT_UGT { 34629 break 34630 } 34631 v.reset(OpAMD64MOVLconst) 34632 v.AuxInt = 1 34633 return true 34634 } 34635 return false 34636 } 34637 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 34638 // match: (SETGE (InvertFlags x)) 34639 // cond: 34640 // result: (SETLE x) 34641 for { 34642 v_0 := v.Args[0] 34643 if v_0.Op != OpAMD64InvertFlags { 34644 break 34645 } 34646 x := v_0.Args[0] 34647 v.reset(OpAMD64SETLE) 34648 v.AddArg(x) 34649 return true 34650 } 34651 // match: (SETGE (FlagEQ)) 34652 // cond: 34653 // result: (MOVLconst [1]) 34654 for { 34655 v_0 := v.Args[0] 34656 if v_0.Op != OpAMD64FlagEQ { 34657 break 34658 } 34659 v.reset(OpAMD64MOVLconst) 34660 v.AuxInt = 1 34661 return true 34662 } 34663 // match: (SETGE (FlagLT_ULT)) 34664 // cond: 34665 // result: (MOVLconst [0]) 34666 for { 34667 v_0 := v.Args[0] 34668 if v_0.Op != OpAMD64FlagLT_ULT { 34669 break 34670 } 34671 v.reset(OpAMD64MOVLconst) 34672 v.AuxInt = 0 34673 return true 34674 } 34675 // match: (SETGE (FlagLT_UGT)) 34676 // cond: 34677 // result: (MOVLconst [0]) 34678 for { 34679 v_0 := v.Args[0] 34680 if v_0.Op != OpAMD64FlagLT_UGT { 34681 break 34682 } 34683 v.reset(OpAMD64MOVLconst) 34684 v.AuxInt = 0 34685 return true 34686 } 34687 // match: (SETGE (FlagGT_ULT)) 34688 // cond: 34689 // result: (MOVLconst [1]) 34690 for { 34691 v_0 := v.Args[0] 34692 if v_0.Op != OpAMD64FlagGT_ULT { 34693 break 34694 } 34695 v.reset(OpAMD64MOVLconst) 34696 v.AuxInt = 1 34697 return true 34698 } 34699 // match: (SETGE (FlagGT_UGT)) 34700 // cond: 34701 // result: (MOVLconst [1]) 34702 for { 34703 v_0 := v.Args[0] 34704 if v_0.Op != OpAMD64FlagGT_UGT { 34705 break 34706 } 34707 v.reset(OpAMD64MOVLconst) 34708 v.AuxInt = 1 34709 return true 34710 } 34711 return false 34712 } 34713 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 34714 // match: (SETL (InvertFlags x)) 34715 // cond: 34716 // result: (SETG x) 34717 for { 34718 v_0 := v.Args[0] 34719 if v_0.Op != OpAMD64InvertFlags { 34720 break 34721 } 34722 x := v_0.Args[0] 34723 v.reset(OpAMD64SETG) 34724 v.AddArg(x) 34725 return true 34726 } 34727 // match: (SETL (FlagEQ)) 34728 // cond: 34729 // result: (MOVLconst [0]) 34730 for { 34731 v_0 := v.Args[0] 34732 if v_0.Op != OpAMD64FlagEQ { 34733 break 34734 } 34735 v.reset(OpAMD64MOVLconst) 34736 v.AuxInt = 0 34737 return true 34738 } 34739 // match: (SETL (FlagLT_ULT)) 34740 // cond: 34741 // result: (MOVLconst [1]) 34742 for { 34743 v_0 := v.Args[0] 34744 if v_0.Op != OpAMD64FlagLT_ULT { 34745 break 34746 } 34747 v.reset(OpAMD64MOVLconst) 34748 v.AuxInt = 1 34749 return true 34750 } 34751 // match: (SETL (FlagLT_UGT)) 34752 // cond: 34753 // result: (MOVLconst [1]) 34754 for { 34755 v_0 := v.Args[0] 34756 if v_0.Op != OpAMD64FlagLT_UGT { 34757 break 34758 } 34759 v.reset(OpAMD64MOVLconst) 34760 v.AuxInt = 1 34761 return true 34762 } 34763 // match: (SETL (FlagGT_ULT)) 34764 // cond: 34765 // result: (MOVLconst [0]) 34766 for { 34767 v_0 := v.Args[0] 34768 if v_0.Op != OpAMD64FlagGT_ULT { 34769 break 34770 } 34771 v.reset(OpAMD64MOVLconst) 34772 v.AuxInt = 0 34773 return true 34774 } 34775 // match: (SETL (FlagGT_UGT)) 34776 // cond: 34777 // result: (MOVLconst [0]) 34778 for { 34779 v_0 := v.Args[0] 34780 if v_0.Op != OpAMD64FlagGT_UGT { 34781 break 34782 } 34783 v.reset(OpAMD64MOVLconst) 34784 v.AuxInt = 0 34785 return true 34786 } 34787 return false 34788 } 34789 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 34790 // match: (SETLE (InvertFlags x)) 34791 // cond: 34792 // result: (SETGE x) 34793 for { 34794 v_0 := v.Args[0] 34795 if v_0.Op != OpAMD64InvertFlags { 34796 break 34797 } 34798 x := v_0.Args[0] 34799 v.reset(OpAMD64SETGE) 34800 v.AddArg(x) 34801 return true 34802 } 34803 // match: (SETLE (FlagEQ)) 34804 // cond: 34805 // result: (MOVLconst [1]) 34806 for { 34807 v_0 := v.Args[0] 34808 if v_0.Op != OpAMD64FlagEQ { 34809 break 34810 } 34811 v.reset(OpAMD64MOVLconst) 34812 v.AuxInt = 1 34813 return true 34814 } 34815 // match: (SETLE (FlagLT_ULT)) 34816 // cond: 34817 // result: (MOVLconst [1]) 34818 for { 34819 v_0 := v.Args[0] 34820 if v_0.Op != OpAMD64FlagLT_ULT { 34821 break 34822 } 34823 v.reset(OpAMD64MOVLconst) 34824 v.AuxInt = 1 34825 return true 34826 } 34827 // match: (SETLE (FlagLT_UGT)) 34828 // cond: 34829 // result: (MOVLconst [1]) 34830 for { 34831 v_0 := v.Args[0] 34832 if v_0.Op != OpAMD64FlagLT_UGT { 34833 break 34834 } 34835 v.reset(OpAMD64MOVLconst) 34836 v.AuxInt = 1 34837 return true 34838 } 34839 // match: (SETLE (FlagGT_ULT)) 34840 // cond: 34841 // result: (MOVLconst [0]) 34842 for { 34843 v_0 := v.Args[0] 34844 if v_0.Op != OpAMD64FlagGT_ULT { 34845 break 34846 } 34847 v.reset(OpAMD64MOVLconst) 34848 v.AuxInt = 0 34849 return true 34850 } 34851 // match: (SETLE (FlagGT_UGT)) 34852 // cond: 34853 // result: (MOVLconst [0]) 34854 for { 34855 v_0 := v.Args[0] 34856 if v_0.Op != OpAMD64FlagGT_UGT { 34857 break 34858 } 34859 v.reset(OpAMD64MOVLconst) 34860 v.AuxInt = 0 34861 return true 34862 } 34863 return false 34864 } 34865 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 34866 b := v.Block 34867 _ = b 34868 config := b.Func.Config 34869 _ = config 34870 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 34871 // cond: !config.nacl 34872 // result: (SETB (BTL x y)) 34873 for { 34874 v_0 := v.Args[0] 34875 if v_0.Op != OpAMD64TESTL { 34876 break 34877 } 34878 _ = v_0.Args[1] 34879 v_0_0 := v_0.Args[0] 34880 if v_0_0.Op != OpAMD64SHLL { 34881 break 34882 } 34883 _ = v_0_0.Args[1] 34884 v_0_0_0 := v_0_0.Args[0] 34885 if v_0_0_0.Op != OpAMD64MOVLconst { 34886 break 34887 } 34888 if v_0_0_0.AuxInt != 1 { 34889 break 34890 } 34891 x := v_0_0.Args[1] 34892 y := v_0.Args[1] 34893 if !(!config.nacl) { 34894 break 34895 } 34896 v.reset(OpAMD64SETB) 34897 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34898 v0.AddArg(x) 34899 v0.AddArg(y) 34900 v.AddArg(v0) 34901 return true 34902 } 34903 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 34904 // cond: !config.nacl 34905 // result: (SETB (BTL x y)) 34906 for { 34907 v_0 := v.Args[0] 34908 if v_0.Op != OpAMD64TESTL { 34909 break 34910 } 34911 _ = v_0.Args[1] 34912 y := v_0.Args[0] 34913 v_0_1 := v_0.Args[1] 34914 if v_0_1.Op != OpAMD64SHLL { 34915 break 34916 } 34917 _ = v_0_1.Args[1] 34918 v_0_1_0 := v_0_1.Args[0] 34919 if v_0_1_0.Op != OpAMD64MOVLconst { 34920 break 34921 } 34922 if v_0_1_0.AuxInt != 1 { 34923 break 34924 } 34925 x := v_0_1.Args[1] 34926 if !(!config.nacl) { 34927 break 34928 } 34929 v.reset(OpAMD64SETB) 34930 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34931 v0.AddArg(x) 34932 v0.AddArg(y) 34933 v.AddArg(v0) 34934 return true 34935 } 34936 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 34937 // cond: !config.nacl 34938 // result: (SETB (BTQ x y)) 34939 for { 34940 v_0 := v.Args[0] 34941 if v_0.Op != OpAMD64TESTQ { 34942 break 34943 } 34944 _ = v_0.Args[1] 34945 v_0_0 := v_0.Args[0] 34946 if v_0_0.Op != OpAMD64SHLQ { 34947 break 34948 } 34949 _ = v_0_0.Args[1] 34950 v_0_0_0 := v_0_0.Args[0] 34951 if v_0_0_0.Op != OpAMD64MOVQconst { 34952 break 34953 } 34954 if v_0_0_0.AuxInt != 1 { 34955 break 34956 } 34957 x := v_0_0.Args[1] 34958 y := v_0.Args[1] 34959 if !(!config.nacl) { 34960 break 34961 } 34962 v.reset(OpAMD64SETB) 34963 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34964 v0.AddArg(x) 34965 v0.AddArg(y) 34966 v.AddArg(v0) 34967 return true 34968 } 34969 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 34970 // cond: !config.nacl 34971 // result: (SETB (BTQ x y)) 34972 for { 34973 v_0 := v.Args[0] 34974 if v_0.Op != OpAMD64TESTQ { 34975 break 34976 } 34977 _ = v_0.Args[1] 34978 y := v_0.Args[0] 34979 v_0_1 := v_0.Args[1] 34980 if v_0_1.Op != OpAMD64SHLQ { 34981 break 34982 } 34983 _ = v_0_1.Args[1] 34984 v_0_1_0 := v_0_1.Args[0] 34985 if v_0_1_0.Op != OpAMD64MOVQconst { 34986 break 34987 } 34988 if v_0_1_0.AuxInt != 1 { 34989 break 34990 } 34991 x := v_0_1.Args[1] 34992 if !(!config.nacl) { 34993 break 34994 } 34995 v.reset(OpAMD64SETB) 34996 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34997 v0.AddArg(x) 34998 v0.AddArg(y) 34999 v.AddArg(v0) 35000 return true 35001 } 35002 // match: (SETNE (TESTLconst [c] x)) 35003 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 35004 // result: (SETB (BTLconst [log2(c)] x)) 35005 for { 35006 v_0 := v.Args[0] 35007 if v_0.Op != OpAMD64TESTLconst { 35008 break 35009 } 35010 c := v_0.AuxInt 35011 x := v_0.Args[0] 35012 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 35013 break 35014 } 35015 v.reset(OpAMD64SETB) 35016 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 35017 v0.AuxInt = log2(c) 35018 v0.AddArg(x) 35019 v.AddArg(v0) 35020 return true 35021 } 35022 // match: (SETNE (TESTQconst [c] x)) 35023 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 35024 // result: (SETB (BTQconst [log2(c)] x)) 35025 for { 35026 v_0 := v.Args[0] 35027 if v_0.Op != OpAMD64TESTQconst { 35028 break 35029 } 35030 c := v_0.AuxInt 35031 x := v_0.Args[0] 35032 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 35033 break 35034 } 35035 v.reset(OpAMD64SETB) 35036 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 35037 v0.AuxInt = log2(c) 35038 v0.AddArg(x) 35039 v.AddArg(v0) 35040 return true 35041 } 35042 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 35043 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 35044 // result: (SETB (BTQconst [log2(c)] x)) 35045 for { 35046 v_0 := v.Args[0] 35047 if v_0.Op != OpAMD64TESTQ { 35048 break 35049 } 35050 _ = v_0.Args[1] 35051 v_0_0 := v_0.Args[0] 35052 if v_0_0.Op != OpAMD64MOVQconst { 35053 break 35054 } 35055 c := v_0_0.AuxInt 35056 x := v_0.Args[1] 35057 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 35058 break 35059 } 35060 v.reset(OpAMD64SETB) 35061 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 35062 v0.AuxInt = log2(c) 35063 v0.AddArg(x) 35064 v.AddArg(v0) 35065 return true 35066 } 35067 // match: (SETNE (TESTQ x (MOVQconst [c]))) 35068 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 35069 // result: (SETB (BTQconst [log2(c)] x)) 35070 for { 35071 v_0 := v.Args[0] 35072 if v_0.Op != OpAMD64TESTQ { 35073 break 35074 } 35075 _ = v_0.Args[1] 35076 x := v_0.Args[0] 35077 v_0_1 := v_0.Args[1] 35078 if v_0_1.Op != OpAMD64MOVQconst { 35079 break 35080 } 35081 c := v_0_1.AuxInt 35082 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 35083 break 35084 } 35085 v.reset(OpAMD64SETB) 35086 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 35087 v0.AuxInt = log2(c) 35088 v0.AddArg(x) 35089 v.AddArg(v0) 35090 return true 35091 } 35092 // match: (SETNE (InvertFlags x)) 35093 // cond: 35094 // result: (SETNE x) 35095 for { 35096 v_0 := v.Args[0] 35097 if v_0.Op != OpAMD64InvertFlags { 35098 break 35099 } 35100 x := v_0.Args[0] 35101 v.reset(OpAMD64SETNE) 35102 v.AddArg(x) 35103 return true 35104 } 35105 // match: (SETNE (FlagEQ)) 35106 // cond: 35107 // result: (MOVLconst [0]) 35108 for { 35109 v_0 := v.Args[0] 35110 if v_0.Op != OpAMD64FlagEQ { 35111 break 35112 } 35113 v.reset(OpAMD64MOVLconst) 35114 v.AuxInt = 0 35115 return true 35116 } 35117 return false 35118 } 35119 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 35120 // match: (SETNE (FlagLT_ULT)) 35121 // cond: 35122 // result: (MOVLconst [1]) 35123 for { 35124 v_0 := v.Args[0] 35125 if v_0.Op != OpAMD64FlagLT_ULT { 35126 break 35127 } 35128 v.reset(OpAMD64MOVLconst) 35129 v.AuxInt = 1 35130 return true 35131 } 35132 // match: (SETNE (FlagLT_UGT)) 35133 // cond: 35134 // result: (MOVLconst [1]) 35135 for { 35136 v_0 := v.Args[0] 35137 if v_0.Op != OpAMD64FlagLT_UGT { 35138 break 35139 } 35140 v.reset(OpAMD64MOVLconst) 35141 v.AuxInt = 1 35142 return true 35143 } 35144 // match: (SETNE (FlagGT_ULT)) 35145 // cond: 35146 // result: (MOVLconst [1]) 35147 for { 35148 v_0 := v.Args[0] 35149 if v_0.Op != OpAMD64FlagGT_ULT { 35150 break 35151 } 35152 v.reset(OpAMD64MOVLconst) 35153 v.AuxInt = 1 35154 return true 35155 } 35156 // match: (SETNE (FlagGT_UGT)) 35157 // cond: 35158 // result: (MOVLconst [1]) 35159 for { 35160 v_0 := v.Args[0] 35161 if v_0.Op != OpAMD64FlagGT_UGT { 35162 break 35163 } 35164 v.reset(OpAMD64MOVLconst) 35165 v.AuxInt = 1 35166 return true 35167 } 35168 return false 35169 } 35170 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 35171 b := v.Block 35172 _ = b 35173 // match: (SHLL x (MOVQconst [c])) 35174 // cond: 35175 // result: (SHLLconst [c&31] x) 35176 for { 35177 _ = v.Args[1] 35178 x := v.Args[0] 35179 v_1 := v.Args[1] 35180 if v_1.Op != OpAMD64MOVQconst { 35181 break 35182 } 35183 c := v_1.AuxInt 35184 v.reset(OpAMD64SHLLconst) 35185 v.AuxInt = c & 31 35186 v.AddArg(x) 35187 return true 35188 } 35189 // match: (SHLL x (MOVLconst [c])) 35190 // cond: 35191 // result: (SHLLconst [c&31] x) 35192 for { 35193 _ = v.Args[1] 35194 x := v.Args[0] 35195 v_1 := v.Args[1] 35196 if v_1.Op != OpAMD64MOVLconst { 35197 break 35198 } 35199 c := v_1.AuxInt 35200 v.reset(OpAMD64SHLLconst) 35201 v.AuxInt = c & 31 35202 v.AddArg(x) 35203 return true 35204 } 35205 // match: (SHLL x (ADDQconst [c] y)) 35206 // cond: c & 31 == 0 35207 // result: (SHLL x y) 35208 for { 35209 _ = v.Args[1] 35210 x := v.Args[0] 35211 v_1 := v.Args[1] 35212 if v_1.Op != OpAMD64ADDQconst { 35213 break 35214 } 35215 c := v_1.AuxInt 35216 y := v_1.Args[0] 35217 if !(c&31 == 0) { 35218 break 35219 } 35220 v.reset(OpAMD64SHLL) 35221 v.AddArg(x) 35222 v.AddArg(y) 35223 return true 35224 } 35225 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 35226 // cond: c & 31 == 0 35227 // result: (SHLL x (NEGQ <t> y)) 35228 for { 35229 _ = v.Args[1] 35230 x := v.Args[0] 35231 v_1 := v.Args[1] 35232 if v_1.Op != OpAMD64NEGQ { 35233 break 35234 } 35235 t := v_1.Type 35236 v_1_0 := v_1.Args[0] 35237 if v_1_0.Op != OpAMD64ADDQconst { 35238 break 35239 } 35240 c := v_1_0.AuxInt 35241 y := v_1_0.Args[0] 35242 if !(c&31 == 0) { 35243 break 35244 } 35245 v.reset(OpAMD64SHLL) 35246 v.AddArg(x) 35247 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35248 v0.AddArg(y) 35249 v.AddArg(v0) 35250 return true 35251 } 35252 // match: (SHLL x (ANDQconst [c] y)) 35253 // cond: c & 31 == 31 35254 // result: (SHLL x y) 35255 for { 35256 _ = v.Args[1] 35257 x := v.Args[0] 35258 v_1 := v.Args[1] 35259 if v_1.Op != OpAMD64ANDQconst { 35260 break 35261 } 35262 c := v_1.AuxInt 35263 y := v_1.Args[0] 35264 if !(c&31 == 31) { 35265 break 35266 } 35267 v.reset(OpAMD64SHLL) 35268 v.AddArg(x) 35269 v.AddArg(y) 35270 return true 35271 } 35272 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 35273 // cond: c & 31 == 31 35274 // result: (SHLL x (NEGQ <t> y)) 35275 for { 35276 _ = v.Args[1] 35277 x := v.Args[0] 35278 v_1 := v.Args[1] 35279 if v_1.Op != OpAMD64NEGQ { 35280 break 35281 } 35282 t := v_1.Type 35283 v_1_0 := v_1.Args[0] 35284 if v_1_0.Op != OpAMD64ANDQconst { 35285 break 35286 } 35287 c := v_1_0.AuxInt 35288 y := v_1_0.Args[0] 35289 if !(c&31 == 31) { 35290 break 35291 } 35292 v.reset(OpAMD64SHLL) 35293 v.AddArg(x) 35294 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35295 v0.AddArg(y) 35296 v.AddArg(v0) 35297 return true 35298 } 35299 // match: (SHLL x (ADDLconst [c] y)) 35300 // cond: c & 31 == 0 35301 // result: (SHLL x y) 35302 for { 35303 _ = v.Args[1] 35304 x := v.Args[0] 35305 v_1 := v.Args[1] 35306 if v_1.Op != OpAMD64ADDLconst { 35307 break 35308 } 35309 c := v_1.AuxInt 35310 y := v_1.Args[0] 35311 if !(c&31 == 0) { 35312 break 35313 } 35314 v.reset(OpAMD64SHLL) 35315 v.AddArg(x) 35316 v.AddArg(y) 35317 return true 35318 } 35319 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 35320 // cond: c & 31 == 0 35321 // result: (SHLL x (NEGL <t> y)) 35322 for { 35323 _ = v.Args[1] 35324 x := v.Args[0] 35325 v_1 := v.Args[1] 35326 if v_1.Op != OpAMD64NEGL { 35327 break 35328 } 35329 t := v_1.Type 35330 v_1_0 := v_1.Args[0] 35331 if v_1_0.Op != OpAMD64ADDLconst { 35332 break 35333 } 35334 c := v_1_0.AuxInt 35335 y := v_1_0.Args[0] 35336 if !(c&31 == 0) { 35337 break 35338 } 35339 v.reset(OpAMD64SHLL) 35340 v.AddArg(x) 35341 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35342 v0.AddArg(y) 35343 v.AddArg(v0) 35344 return true 35345 } 35346 // match: (SHLL x (ANDLconst [c] y)) 35347 // cond: c & 31 == 31 35348 // result: (SHLL x y) 35349 for { 35350 _ = v.Args[1] 35351 x := v.Args[0] 35352 v_1 := v.Args[1] 35353 if v_1.Op != OpAMD64ANDLconst { 35354 break 35355 } 35356 c := v_1.AuxInt 35357 y := v_1.Args[0] 35358 if !(c&31 == 31) { 35359 break 35360 } 35361 v.reset(OpAMD64SHLL) 35362 v.AddArg(x) 35363 v.AddArg(y) 35364 return true 35365 } 35366 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 35367 // cond: c & 31 == 31 35368 // result: (SHLL x (NEGL <t> y)) 35369 for { 35370 _ = v.Args[1] 35371 x := v.Args[0] 35372 v_1 := v.Args[1] 35373 if v_1.Op != OpAMD64NEGL { 35374 break 35375 } 35376 t := v_1.Type 35377 v_1_0 := v_1.Args[0] 35378 if v_1_0.Op != OpAMD64ANDLconst { 35379 break 35380 } 35381 c := v_1_0.AuxInt 35382 y := v_1_0.Args[0] 35383 if !(c&31 == 31) { 35384 break 35385 } 35386 v.reset(OpAMD64SHLL) 35387 v.AddArg(x) 35388 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35389 v0.AddArg(y) 35390 v.AddArg(v0) 35391 return true 35392 } 35393 return false 35394 } 35395 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 35396 // match: (SHLLconst x [0]) 35397 // cond: 35398 // result: x 35399 for { 35400 if v.AuxInt != 0 { 35401 break 35402 } 35403 x := v.Args[0] 35404 v.reset(OpCopy) 35405 v.Type = x.Type 35406 v.AddArg(x) 35407 return true 35408 } 35409 return false 35410 } 35411 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 35412 b := v.Block 35413 _ = b 35414 // match: (SHLQ x (MOVQconst [c])) 35415 // cond: 35416 // result: (SHLQconst [c&63] x) 35417 for { 35418 _ = v.Args[1] 35419 x := v.Args[0] 35420 v_1 := v.Args[1] 35421 if v_1.Op != OpAMD64MOVQconst { 35422 break 35423 } 35424 c := v_1.AuxInt 35425 v.reset(OpAMD64SHLQconst) 35426 v.AuxInt = c & 63 35427 v.AddArg(x) 35428 return true 35429 } 35430 // match: (SHLQ x (MOVLconst [c])) 35431 // cond: 35432 // result: (SHLQconst [c&63] x) 35433 for { 35434 _ = v.Args[1] 35435 x := v.Args[0] 35436 v_1 := v.Args[1] 35437 if v_1.Op != OpAMD64MOVLconst { 35438 break 35439 } 35440 c := v_1.AuxInt 35441 v.reset(OpAMD64SHLQconst) 35442 v.AuxInt = c & 63 35443 v.AddArg(x) 35444 return true 35445 } 35446 // match: (SHLQ x (ADDQconst [c] y)) 35447 // cond: c & 63 == 0 35448 // result: (SHLQ x y) 35449 for { 35450 _ = v.Args[1] 35451 x := v.Args[0] 35452 v_1 := v.Args[1] 35453 if v_1.Op != OpAMD64ADDQconst { 35454 break 35455 } 35456 c := v_1.AuxInt 35457 y := v_1.Args[0] 35458 if !(c&63 == 0) { 35459 break 35460 } 35461 v.reset(OpAMD64SHLQ) 35462 v.AddArg(x) 35463 v.AddArg(y) 35464 return true 35465 } 35466 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 35467 // cond: c & 63 == 0 35468 // result: (SHLQ x (NEGQ <t> y)) 35469 for { 35470 _ = v.Args[1] 35471 x := v.Args[0] 35472 v_1 := v.Args[1] 35473 if v_1.Op != OpAMD64NEGQ { 35474 break 35475 } 35476 t := v_1.Type 35477 v_1_0 := v_1.Args[0] 35478 if v_1_0.Op != OpAMD64ADDQconst { 35479 break 35480 } 35481 c := v_1_0.AuxInt 35482 y := v_1_0.Args[0] 35483 if !(c&63 == 0) { 35484 break 35485 } 35486 v.reset(OpAMD64SHLQ) 35487 v.AddArg(x) 35488 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35489 v0.AddArg(y) 35490 v.AddArg(v0) 35491 return true 35492 } 35493 // match: (SHLQ x (ANDQconst [c] y)) 35494 // cond: c & 63 == 63 35495 // result: (SHLQ x y) 35496 for { 35497 _ = v.Args[1] 35498 x := v.Args[0] 35499 v_1 := v.Args[1] 35500 if v_1.Op != OpAMD64ANDQconst { 35501 break 35502 } 35503 c := v_1.AuxInt 35504 y := v_1.Args[0] 35505 if !(c&63 == 63) { 35506 break 35507 } 35508 v.reset(OpAMD64SHLQ) 35509 v.AddArg(x) 35510 v.AddArg(y) 35511 return true 35512 } 35513 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 35514 // cond: c & 63 == 63 35515 // result: (SHLQ x (NEGQ <t> y)) 35516 for { 35517 _ = v.Args[1] 35518 x := v.Args[0] 35519 v_1 := v.Args[1] 35520 if v_1.Op != OpAMD64NEGQ { 35521 break 35522 } 35523 t := v_1.Type 35524 v_1_0 := v_1.Args[0] 35525 if v_1_0.Op != OpAMD64ANDQconst { 35526 break 35527 } 35528 c := v_1_0.AuxInt 35529 y := v_1_0.Args[0] 35530 if !(c&63 == 63) { 35531 break 35532 } 35533 v.reset(OpAMD64SHLQ) 35534 v.AddArg(x) 35535 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35536 v0.AddArg(y) 35537 v.AddArg(v0) 35538 return true 35539 } 35540 // match: (SHLQ x (ADDLconst [c] y)) 35541 // cond: c & 63 == 0 35542 // result: (SHLQ x y) 35543 for { 35544 _ = v.Args[1] 35545 x := v.Args[0] 35546 v_1 := v.Args[1] 35547 if v_1.Op != OpAMD64ADDLconst { 35548 break 35549 } 35550 c := v_1.AuxInt 35551 y := v_1.Args[0] 35552 if !(c&63 == 0) { 35553 break 35554 } 35555 v.reset(OpAMD64SHLQ) 35556 v.AddArg(x) 35557 v.AddArg(y) 35558 return true 35559 } 35560 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 35561 // cond: c & 63 == 0 35562 // result: (SHLQ x (NEGL <t> y)) 35563 for { 35564 _ = v.Args[1] 35565 x := v.Args[0] 35566 v_1 := v.Args[1] 35567 if v_1.Op != OpAMD64NEGL { 35568 break 35569 } 35570 t := v_1.Type 35571 v_1_0 := v_1.Args[0] 35572 if v_1_0.Op != OpAMD64ADDLconst { 35573 break 35574 } 35575 c := v_1_0.AuxInt 35576 y := v_1_0.Args[0] 35577 if !(c&63 == 0) { 35578 break 35579 } 35580 v.reset(OpAMD64SHLQ) 35581 v.AddArg(x) 35582 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35583 v0.AddArg(y) 35584 v.AddArg(v0) 35585 return true 35586 } 35587 // match: (SHLQ x (ANDLconst [c] y)) 35588 // cond: c & 63 == 63 35589 // result: (SHLQ x y) 35590 for { 35591 _ = v.Args[1] 35592 x := v.Args[0] 35593 v_1 := v.Args[1] 35594 if v_1.Op != OpAMD64ANDLconst { 35595 break 35596 } 35597 c := v_1.AuxInt 35598 y := v_1.Args[0] 35599 if !(c&63 == 63) { 35600 break 35601 } 35602 v.reset(OpAMD64SHLQ) 35603 v.AddArg(x) 35604 v.AddArg(y) 35605 return true 35606 } 35607 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 35608 // cond: c & 63 == 63 35609 // result: (SHLQ x (NEGL <t> y)) 35610 for { 35611 _ = v.Args[1] 35612 x := v.Args[0] 35613 v_1 := v.Args[1] 35614 if v_1.Op != OpAMD64NEGL { 35615 break 35616 } 35617 t := v_1.Type 35618 v_1_0 := v_1.Args[0] 35619 if v_1_0.Op != OpAMD64ANDLconst { 35620 break 35621 } 35622 c := v_1_0.AuxInt 35623 y := v_1_0.Args[0] 35624 if !(c&63 == 63) { 35625 break 35626 } 35627 v.reset(OpAMD64SHLQ) 35628 v.AddArg(x) 35629 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35630 v0.AddArg(y) 35631 v.AddArg(v0) 35632 return true 35633 } 35634 return false 35635 } 35636 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 35637 // match: (SHLQconst x [0]) 35638 // cond: 35639 // result: x 35640 for { 35641 if v.AuxInt != 0 { 35642 break 35643 } 35644 x := v.Args[0] 35645 v.reset(OpCopy) 35646 v.Type = x.Type 35647 v.AddArg(x) 35648 return true 35649 } 35650 return false 35651 } 35652 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 35653 // match: (SHRB x (MOVQconst [c])) 35654 // cond: c&31 < 8 35655 // result: (SHRBconst [c&31] x) 35656 for { 35657 _ = v.Args[1] 35658 x := v.Args[0] 35659 v_1 := v.Args[1] 35660 if v_1.Op != OpAMD64MOVQconst { 35661 break 35662 } 35663 c := v_1.AuxInt 35664 if !(c&31 < 8) { 35665 break 35666 } 35667 v.reset(OpAMD64SHRBconst) 35668 v.AuxInt = c & 31 35669 v.AddArg(x) 35670 return true 35671 } 35672 // match: (SHRB x (MOVLconst [c])) 35673 // cond: c&31 < 8 35674 // result: (SHRBconst [c&31] x) 35675 for { 35676 _ = v.Args[1] 35677 x := v.Args[0] 35678 v_1 := v.Args[1] 35679 if v_1.Op != OpAMD64MOVLconst { 35680 break 35681 } 35682 c := v_1.AuxInt 35683 if !(c&31 < 8) { 35684 break 35685 } 35686 v.reset(OpAMD64SHRBconst) 35687 v.AuxInt = c & 31 35688 v.AddArg(x) 35689 return true 35690 } 35691 // match: (SHRB _ (MOVQconst [c])) 35692 // cond: c&31 >= 8 35693 // result: (MOVLconst [0]) 35694 for { 35695 _ = v.Args[1] 35696 v_1 := v.Args[1] 35697 if v_1.Op != OpAMD64MOVQconst { 35698 break 35699 } 35700 c := v_1.AuxInt 35701 if !(c&31 >= 8) { 35702 break 35703 } 35704 v.reset(OpAMD64MOVLconst) 35705 v.AuxInt = 0 35706 return true 35707 } 35708 // match: (SHRB _ (MOVLconst [c])) 35709 // cond: c&31 >= 8 35710 // result: (MOVLconst [0]) 35711 for { 35712 _ = v.Args[1] 35713 v_1 := v.Args[1] 35714 if v_1.Op != OpAMD64MOVLconst { 35715 break 35716 } 35717 c := v_1.AuxInt 35718 if !(c&31 >= 8) { 35719 break 35720 } 35721 v.reset(OpAMD64MOVLconst) 35722 v.AuxInt = 0 35723 return true 35724 } 35725 return false 35726 } 35727 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 35728 // match: (SHRBconst x [0]) 35729 // cond: 35730 // result: x 35731 for { 35732 if v.AuxInt != 0 { 35733 break 35734 } 35735 x := v.Args[0] 35736 v.reset(OpCopy) 35737 v.Type = x.Type 35738 v.AddArg(x) 35739 return true 35740 } 35741 return false 35742 } 35743 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 35744 b := v.Block 35745 _ = b 35746 // match: (SHRL x (MOVQconst [c])) 35747 // cond: 35748 // result: (SHRLconst [c&31] x) 35749 for { 35750 _ = v.Args[1] 35751 x := v.Args[0] 35752 v_1 := v.Args[1] 35753 if v_1.Op != OpAMD64MOVQconst { 35754 break 35755 } 35756 c := v_1.AuxInt 35757 v.reset(OpAMD64SHRLconst) 35758 v.AuxInt = c & 31 35759 v.AddArg(x) 35760 return true 35761 } 35762 // match: (SHRL x (MOVLconst [c])) 35763 // cond: 35764 // result: (SHRLconst [c&31] x) 35765 for { 35766 _ = v.Args[1] 35767 x := v.Args[0] 35768 v_1 := v.Args[1] 35769 if v_1.Op != OpAMD64MOVLconst { 35770 break 35771 } 35772 c := v_1.AuxInt 35773 v.reset(OpAMD64SHRLconst) 35774 v.AuxInt = c & 31 35775 v.AddArg(x) 35776 return true 35777 } 35778 // match: (SHRL x (ADDQconst [c] y)) 35779 // cond: c & 31 == 0 35780 // result: (SHRL x y) 35781 for { 35782 _ = v.Args[1] 35783 x := v.Args[0] 35784 v_1 := v.Args[1] 35785 if v_1.Op != OpAMD64ADDQconst { 35786 break 35787 } 35788 c := v_1.AuxInt 35789 y := v_1.Args[0] 35790 if !(c&31 == 0) { 35791 break 35792 } 35793 v.reset(OpAMD64SHRL) 35794 v.AddArg(x) 35795 v.AddArg(y) 35796 return true 35797 } 35798 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 35799 // cond: c & 31 == 0 35800 // result: (SHRL x (NEGQ <t> y)) 35801 for { 35802 _ = v.Args[1] 35803 x := v.Args[0] 35804 v_1 := v.Args[1] 35805 if v_1.Op != OpAMD64NEGQ { 35806 break 35807 } 35808 t := v_1.Type 35809 v_1_0 := v_1.Args[0] 35810 if v_1_0.Op != OpAMD64ADDQconst { 35811 break 35812 } 35813 c := v_1_0.AuxInt 35814 y := v_1_0.Args[0] 35815 if !(c&31 == 0) { 35816 break 35817 } 35818 v.reset(OpAMD64SHRL) 35819 v.AddArg(x) 35820 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35821 v0.AddArg(y) 35822 v.AddArg(v0) 35823 return true 35824 } 35825 // match: (SHRL x (ANDQconst [c] y)) 35826 // cond: c & 31 == 31 35827 // result: (SHRL x y) 35828 for { 35829 _ = v.Args[1] 35830 x := v.Args[0] 35831 v_1 := v.Args[1] 35832 if v_1.Op != OpAMD64ANDQconst { 35833 break 35834 } 35835 c := v_1.AuxInt 35836 y := v_1.Args[0] 35837 if !(c&31 == 31) { 35838 break 35839 } 35840 v.reset(OpAMD64SHRL) 35841 v.AddArg(x) 35842 v.AddArg(y) 35843 return true 35844 } 35845 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 35846 // cond: c & 31 == 31 35847 // result: (SHRL x (NEGQ <t> y)) 35848 for { 35849 _ = v.Args[1] 35850 x := v.Args[0] 35851 v_1 := v.Args[1] 35852 if v_1.Op != OpAMD64NEGQ { 35853 break 35854 } 35855 t := v_1.Type 35856 v_1_0 := v_1.Args[0] 35857 if v_1_0.Op != OpAMD64ANDQconst { 35858 break 35859 } 35860 c := v_1_0.AuxInt 35861 y := v_1_0.Args[0] 35862 if !(c&31 == 31) { 35863 break 35864 } 35865 v.reset(OpAMD64SHRL) 35866 v.AddArg(x) 35867 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35868 v0.AddArg(y) 35869 v.AddArg(v0) 35870 return true 35871 } 35872 // match: (SHRL x (ADDLconst [c] y)) 35873 // cond: c & 31 == 0 35874 // result: (SHRL x y) 35875 for { 35876 _ = v.Args[1] 35877 x := v.Args[0] 35878 v_1 := v.Args[1] 35879 if v_1.Op != OpAMD64ADDLconst { 35880 break 35881 } 35882 c := v_1.AuxInt 35883 y := v_1.Args[0] 35884 if !(c&31 == 0) { 35885 break 35886 } 35887 v.reset(OpAMD64SHRL) 35888 v.AddArg(x) 35889 v.AddArg(y) 35890 return true 35891 } 35892 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 35893 // cond: c & 31 == 0 35894 // result: (SHRL x (NEGL <t> y)) 35895 for { 35896 _ = v.Args[1] 35897 x := v.Args[0] 35898 v_1 := v.Args[1] 35899 if v_1.Op != OpAMD64NEGL { 35900 break 35901 } 35902 t := v_1.Type 35903 v_1_0 := v_1.Args[0] 35904 if v_1_0.Op != OpAMD64ADDLconst { 35905 break 35906 } 35907 c := v_1_0.AuxInt 35908 y := v_1_0.Args[0] 35909 if !(c&31 == 0) { 35910 break 35911 } 35912 v.reset(OpAMD64SHRL) 35913 v.AddArg(x) 35914 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35915 v0.AddArg(y) 35916 v.AddArg(v0) 35917 return true 35918 } 35919 // match: (SHRL x (ANDLconst [c] y)) 35920 // cond: c & 31 == 31 35921 // result: (SHRL x y) 35922 for { 35923 _ = v.Args[1] 35924 x := v.Args[0] 35925 v_1 := v.Args[1] 35926 if v_1.Op != OpAMD64ANDLconst { 35927 break 35928 } 35929 c := v_1.AuxInt 35930 y := v_1.Args[0] 35931 if !(c&31 == 31) { 35932 break 35933 } 35934 v.reset(OpAMD64SHRL) 35935 v.AddArg(x) 35936 v.AddArg(y) 35937 return true 35938 } 35939 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 35940 // cond: c & 31 == 31 35941 // result: (SHRL x (NEGL <t> y)) 35942 for { 35943 _ = v.Args[1] 35944 x := v.Args[0] 35945 v_1 := v.Args[1] 35946 if v_1.Op != OpAMD64NEGL { 35947 break 35948 } 35949 t := v_1.Type 35950 v_1_0 := v_1.Args[0] 35951 if v_1_0.Op != OpAMD64ANDLconst { 35952 break 35953 } 35954 c := v_1_0.AuxInt 35955 y := v_1_0.Args[0] 35956 if !(c&31 == 31) { 35957 break 35958 } 35959 v.reset(OpAMD64SHRL) 35960 v.AddArg(x) 35961 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35962 v0.AddArg(y) 35963 v.AddArg(v0) 35964 return true 35965 } 35966 return false 35967 } 35968 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 35969 // match: (SHRLconst x [0]) 35970 // cond: 35971 // result: x 35972 for { 35973 if v.AuxInt != 0 { 35974 break 35975 } 35976 x := v.Args[0] 35977 v.reset(OpCopy) 35978 v.Type = x.Type 35979 v.AddArg(x) 35980 return true 35981 } 35982 return false 35983 } 35984 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 35985 b := v.Block 35986 _ = b 35987 // match: (SHRQ x (MOVQconst [c])) 35988 // cond: 35989 // result: (SHRQconst [c&63] x) 35990 for { 35991 _ = v.Args[1] 35992 x := v.Args[0] 35993 v_1 := v.Args[1] 35994 if v_1.Op != OpAMD64MOVQconst { 35995 break 35996 } 35997 c := v_1.AuxInt 35998 v.reset(OpAMD64SHRQconst) 35999 v.AuxInt = c & 63 36000 v.AddArg(x) 36001 return true 36002 } 36003 // match: (SHRQ x (MOVLconst [c])) 36004 // cond: 36005 // result: (SHRQconst [c&63] x) 36006 for { 36007 _ = v.Args[1] 36008 x := v.Args[0] 36009 v_1 := v.Args[1] 36010 if v_1.Op != OpAMD64MOVLconst { 36011 break 36012 } 36013 c := v_1.AuxInt 36014 v.reset(OpAMD64SHRQconst) 36015 v.AuxInt = c & 63 36016 v.AddArg(x) 36017 return true 36018 } 36019 // match: (SHRQ x (ADDQconst [c] y)) 36020 // cond: c & 63 == 0 36021 // result: (SHRQ x y) 36022 for { 36023 _ = v.Args[1] 36024 x := v.Args[0] 36025 v_1 := v.Args[1] 36026 if v_1.Op != OpAMD64ADDQconst { 36027 break 36028 } 36029 c := v_1.AuxInt 36030 y := v_1.Args[0] 36031 if !(c&63 == 0) { 36032 break 36033 } 36034 v.reset(OpAMD64SHRQ) 36035 v.AddArg(x) 36036 v.AddArg(y) 36037 return true 36038 } 36039 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 36040 // cond: c & 63 == 0 36041 // result: (SHRQ x (NEGQ <t> y)) 36042 for { 36043 _ = v.Args[1] 36044 x := v.Args[0] 36045 v_1 := v.Args[1] 36046 if v_1.Op != OpAMD64NEGQ { 36047 break 36048 } 36049 t := v_1.Type 36050 v_1_0 := v_1.Args[0] 36051 if v_1_0.Op != OpAMD64ADDQconst { 36052 break 36053 } 36054 c := v_1_0.AuxInt 36055 y := v_1_0.Args[0] 36056 if !(c&63 == 0) { 36057 break 36058 } 36059 v.reset(OpAMD64SHRQ) 36060 v.AddArg(x) 36061 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 36062 v0.AddArg(y) 36063 v.AddArg(v0) 36064 return true 36065 } 36066 // match: (SHRQ x (ANDQconst [c] y)) 36067 // cond: c & 63 == 63 36068 // result: (SHRQ x y) 36069 for { 36070 _ = v.Args[1] 36071 x := v.Args[0] 36072 v_1 := v.Args[1] 36073 if v_1.Op != OpAMD64ANDQconst { 36074 break 36075 } 36076 c := v_1.AuxInt 36077 y := v_1.Args[0] 36078 if !(c&63 == 63) { 36079 break 36080 } 36081 v.reset(OpAMD64SHRQ) 36082 v.AddArg(x) 36083 v.AddArg(y) 36084 return true 36085 } 36086 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 36087 // cond: c & 63 == 63 36088 // result: (SHRQ x (NEGQ <t> y)) 36089 for { 36090 _ = v.Args[1] 36091 x := v.Args[0] 36092 v_1 := v.Args[1] 36093 if v_1.Op != OpAMD64NEGQ { 36094 break 36095 } 36096 t := v_1.Type 36097 v_1_0 := v_1.Args[0] 36098 if v_1_0.Op != OpAMD64ANDQconst { 36099 break 36100 } 36101 c := v_1_0.AuxInt 36102 y := v_1_0.Args[0] 36103 if !(c&63 == 63) { 36104 break 36105 } 36106 v.reset(OpAMD64SHRQ) 36107 v.AddArg(x) 36108 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 36109 v0.AddArg(y) 36110 v.AddArg(v0) 36111 return true 36112 } 36113 // match: (SHRQ x (ADDLconst [c] y)) 36114 // cond: c & 63 == 0 36115 // result: (SHRQ x y) 36116 for { 36117 _ = v.Args[1] 36118 x := v.Args[0] 36119 v_1 := v.Args[1] 36120 if v_1.Op != OpAMD64ADDLconst { 36121 break 36122 } 36123 c := v_1.AuxInt 36124 y := v_1.Args[0] 36125 if !(c&63 == 0) { 36126 break 36127 } 36128 v.reset(OpAMD64SHRQ) 36129 v.AddArg(x) 36130 v.AddArg(y) 36131 return true 36132 } 36133 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 36134 // cond: c & 63 == 0 36135 // result: (SHRQ x (NEGL <t> y)) 36136 for { 36137 _ = v.Args[1] 36138 x := v.Args[0] 36139 v_1 := v.Args[1] 36140 if v_1.Op != OpAMD64NEGL { 36141 break 36142 } 36143 t := v_1.Type 36144 v_1_0 := v_1.Args[0] 36145 if v_1_0.Op != OpAMD64ADDLconst { 36146 break 36147 } 36148 c := v_1_0.AuxInt 36149 y := v_1_0.Args[0] 36150 if !(c&63 == 0) { 36151 break 36152 } 36153 v.reset(OpAMD64SHRQ) 36154 v.AddArg(x) 36155 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 36156 v0.AddArg(y) 36157 v.AddArg(v0) 36158 return true 36159 } 36160 // match: (SHRQ x (ANDLconst [c] y)) 36161 // cond: c & 63 == 63 36162 // result: (SHRQ x y) 36163 for { 36164 _ = v.Args[1] 36165 x := v.Args[0] 36166 v_1 := v.Args[1] 36167 if v_1.Op != OpAMD64ANDLconst { 36168 break 36169 } 36170 c := v_1.AuxInt 36171 y := v_1.Args[0] 36172 if !(c&63 == 63) { 36173 break 36174 } 36175 v.reset(OpAMD64SHRQ) 36176 v.AddArg(x) 36177 v.AddArg(y) 36178 return true 36179 } 36180 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 36181 // cond: c & 63 == 63 36182 // result: (SHRQ x (NEGL <t> y)) 36183 for { 36184 _ = v.Args[1] 36185 x := v.Args[0] 36186 v_1 := v.Args[1] 36187 if v_1.Op != OpAMD64NEGL { 36188 break 36189 } 36190 t := v_1.Type 36191 v_1_0 := v_1.Args[0] 36192 if v_1_0.Op != OpAMD64ANDLconst { 36193 break 36194 } 36195 c := v_1_0.AuxInt 36196 y := v_1_0.Args[0] 36197 if !(c&63 == 63) { 36198 break 36199 } 36200 v.reset(OpAMD64SHRQ) 36201 v.AddArg(x) 36202 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 36203 v0.AddArg(y) 36204 v.AddArg(v0) 36205 return true 36206 } 36207 return false 36208 } 36209 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 36210 // match: (SHRQconst x [0]) 36211 // cond: 36212 // result: x 36213 for { 36214 if v.AuxInt != 0 { 36215 break 36216 } 36217 x := v.Args[0] 36218 v.reset(OpCopy) 36219 v.Type = x.Type 36220 v.AddArg(x) 36221 return true 36222 } 36223 return false 36224 } 36225 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 36226 // match: (SHRW x (MOVQconst [c])) 36227 // cond: c&31 < 16 36228 // result: (SHRWconst [c&31] x) 36229 for { 36230 _ = v.Args[1] 36231 x := v.Args[0] 36232 v_1 := v.Args[1] 36233 if v_1.Op != OpAMD64MOVQconst { 36234 break 36235 } 36236 c := v_1.AuxInt 36237 if !(c&31 < 16) { 36238 break 36239 } 36240 v.reset(OpAMD64SHRWconst) 36241 v.AuxInt = c & 31 36242 v.AddArg(x) 36243 return true 36244 } 36245 // match: (SHRW x (MOVLconst [c])) 36246 // cond: c&31 < 16 36247 // result: (SHRWconst [c&31] x) 36248 for { 36249 _ = v.Args[1] 36250 x := v.Args[0] 36251 v_1 := v.Args[1] 36252 if v_1.Op != OpAMD64MOVLconst { 36253 break 36254 } 36255 c := v_1.AuxInt 36256 if !(c&31 < 16) { 36257 break 36258 } 36259 v.reset(OpAMD64SHRWconst) 36260 v.AuxInt = c & 31 36261 v.AddArg(x) 36262 return true 36263 } 36264 // match: (SHRW _ (MOVQconst [c])) 36265 // cond: c&31 >= 16 36266 // result: (MOVLconst [0]) 36267 for { 36268 _ = v.Args[1] 36269 v_1 := v.Args[1] 36270 if v_1.Op != OpAMD64MOVQconst { 36271 break 36272 } 36273 c := v_1.AuxInt 36274 if !(c&31 >= 16) { 36275 break 36276 } 36277 v.reset(OpAMD64MOVLconst) 36278 v.AuxInt = 0 36279 return true 36280 } 36281 // match: (SHRW _ (MOVLconst [c])) 36282 // cond: c&31 >= 16 36283 // result: (MOVLconst [0]) 36284 for { 36285 _ = v.Args[1] 36286 v_1 := v.Args[1] 36287 if v_1.Op != OpAMD64MOVLconst { 36288 break 36289 } 36290 c := v_1.AuxInt 36291 if !(c&31 >= 16) { 36292 break 36293 } 36294 v.reset(OpAMD64MOVLconst) 36295 v.AuxInt = 0 36296 return true 36297 } 36298 return false 36299 } 36300 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 36301 // match: (SHRWconst x [0]) 36302 // cond: 36303 // result: x 36304 for { 36305 if v.AuxInt != 0 { 36306 break 36307 } 36308 x := v.Args[0] 36309 v.reset(OpCopy) 36310 v.Type = x.Type 36311 v.AddArg(x) 36312 return true 36313 } 36314 return false 36315 } 36316 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 36317 b := v.Block 36318 _ = b 36319 // match: (SUBL x (MOVLconst [c])) 36320 // cond: 36321 // result: (SUBLconst x [c]) 36322 for { 36323 _ = v.Args[1] 36324 x := v.Args[0] 36325 v_1 := v.Args[1] 36326 if v_1.Op != OpAMD64MOVLconst { 36327 break 36328 } 36329 c := v_1.AuxInt 36330 v.reset(OpAMD64SUBLconst) 36331 v.AuxInt = c 36332 v.AddArg(x) 36333 return true 36334 } 36335 // match: (SUBL (MOVLconst [c]) x) 36336 // cond: 36337 // result: (NEGL (SUBLconst <v.Type> x [c])) 36338 for { 36339 _ = v.Args[1] 36340 v_0 := v.Args[0] 36341 if v_0.Op != OpAMD64MOVLconst { 36342 break 36343 } 36344 c := v_0.AuxInt 36345 x := v.Args[1] 36346 v.reset(OpAMD64NEGL) 36347 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 36348 v0.AuxInt = c 36349 v0.AddArg(x) 36350 v.AddArg(v0) 36351 return true 36352 } 36353 // match: (SUBL x x) 36354 // cond: 36355 // result: (MOVLconst [0]) 36356 for { 36357 _ = v.Args[1] 36358 x := v.Args[0] 36359 if x != v.Args[1] { 36360 break 36361 } 36362 v.reset(OpAMD64MOVLconst) 36363 v.AuxInt = 0 36364 return true 36365 } 36366 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 36367 // cond: canMergeLoad(v, l, x) && clobber(l) 36368 // result: (SUBLmem x [off] {sym} ptr mem) 36369 for { 36370 _ = v.Args[1] 36371 x := v.Args[0] 36372 l := v.Args[1] 36373 if l.Op != OpAMD64MOVLload { 36374 break 36375 } 36376 off := l.AuxInt 36377 sym := l.Aux 36378 _ = l.Args[1] 36379 ptr := l.Args[0] 36380 mem := l.Args[1] 36381 if !(canMergeLoad(v, l, x) && clobber(l)) { 36382 break 36383 } 36384 v.reset(OpAMD64SUBLmem) 36385 v.AuxInt = off 36386 v.Aux = sym 36387 v.AddArg(x) 36388 v.AddArg(ptr) 36389 v.AddArg(mem) 36390 return true 36391 } 36392 return false 36393 } 36394 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 36395 // match: (SUBLconst [c] x) 36396 // cond: int32(c) == 0 36397 // result: x 36398 for { 36399 c := v.AuxInt 36400 x := v.Args[0] 36401 if !(int32(c) == 0) { 36402 break 36403 } 36404 v.reset(OpCopy) 36405 v.Type = x.Type 36406 v.AddArg(x) 36407 return true 36408 } 36409 // match: (SUBLconst [c] x) 36410 // cond: 36411 // result: (ADDLconst [int64(int32(-c))] x) 36412 for { 36413 c := v.AuxInt 36414 x := v.Args[0] 36415 v.reset(OpAMD64ADDLconst) 36416 v.AuxInt = int64(int32(-c)) 36417 v.AddArg(x) 36418 return true 36419 } 36420 } 36421 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 36422 b := v.Block 36423 _ = b 36424 // match: (SUBQ x (MOVQconst [c])) 36425 // cond: is32Bit(c) 36426 // result: (SUBQconst x [c]) 36427 for { 36428 _ = v.Args[1] 36429 x := v.Args[0] 36430 v_1 := v.Args[1] 36431 if v_1.Op != OpAMD64MOVQconst { 36432 break 36433 } 36434 c := v_1.AuxInt 36435 if !(is32Bit(c)) { 36436 break 36437 } 36438 v.reset(OpAMD64SUBQconst) 36439 v.AuxInt = c 36440 v.AddArg(x) 36441 return true 36442 } 36443 // match: (SUBQ (MOVQconst [c]) x) 36444 // cond: is32Bit(c) 36445 // result: (NEGQ (SUBQconst <v.Type> x [c])) 36446 for { 36447 _ = v.Args[1] 36448 v_0 := v.Args[0] 36449 if v_0.Op != OpAMD64MOVQconst { 36450 break 36451 } 36452 c := v_0.AuxInt 36453 x := v.Args[1] 36454 if !(is32Bit(c)) { 36455 break 36456 } 36457 v.reset(OpAMD64NEGQ) 36458 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 36459 v0.AuxInt = c 36460 v0.AddArg(x) 36461 v.AddArg(v0) 36462 return true 36463 } 36464 // match: (SUBQ x x) 36465 // cond: 36466 // result: (MOVQconst [0]) 36467 for { 36468 _ = v.Args[1] 36469 x := v.Args[0] 36470 if x != v.Args[1] { 36471 break 36472 } 36473 v.reset(OpAMD64MOVQconst) 36474 v.AuxInt = 0 36475 return true 36476 } 36477 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 36478 // cond: canMergeLoad(v, l, x) && clobber(l) 36479 // result: (SUBQmem x [off] {sym} ptr mem) 36480 for { 36481 _ = v.Args[1] 36482 x := v.Args[0] 36483 l := v.Args[1] 36484 if l.Op != OpAMD64MOVQload { 36485 break 36486 } 36487 off := l.AuxInt 36488 sym := l.Aux 36489 _ = l.Args[1] 36490 ptr := l.Args[0] 36491 mem := l.Args[1] 36492 if !(canMergeLoad(v, l, x) && clobber(l)) { 36493 break 36494 } 36495 v.reset(OpAMD64SUBQmem) 36496 v.AuxInt = off 36497 v.Aux = sym 36498 v.AddArg(x) 36499 v.AddArg(ptr) 36500 v.AddArg(mem) 36501 return true 36502 } 36503 return false 36504 } 36505 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 36506 // match: (SUBQconst [0] x) 36507 // cond: 36508 // result: x 36509 for { 36510 if v.AuxInt != 0 { 36511 break 36512 } 36513 x := v.Args[0] 36514 v.reset(OpCopy) 36515 v.Type = x.Type 36516 v.AddArg(x) 36517 return true 36518 } 36519 // match: (SUBQconst [c] x) 36520 // cond: c != -(1<<31) 36521 // result: (ADDQconst [-c] x) 36522 for { 36523 c := v.AuxInt 36524 x := v.Args[0] 36525 if !(c != -(1 << 31)) { 36526 break 36527 } 36528 v.reset(OpAMD64ADDQconst) 36529 v.AuxInt = -c 36530 v.AddArg(x) 36531 return true 36532 } 36533 // match: (SUBQconst (MOVQconst [d]) [c]) 36534 // cond: 36535 // result: (MOVQconst [d-c]) 36536 for { 36537 c := v.AuxInt 36538 v_0 := v.Args[0] 36539 if v_0.Op != OpAMD64MOVQconst { 36540 break 36541 } 36542 d := v_0.AuxInt 36543 v.reset(OpAMD64MOVQconst) 36544 v.AuxInt = d - c 36545 return true 36546 } 36547 // match: (SUBQconst (SUBQconst x [d]) [c]) 36548 // cond: is32Bit(-c-d) 36549 // result: (ADDQconst [-c-d] x) 36550 for { 36551 c := v.AuxInt 36552 v_0 := v.Args[0] 36553 if v_0.Op != OpAMD64SUBQconst { 36554 break 36555 } 36556 d := v_0.AuxInt 36557 x := v_0.Args[0] 36558 if !(is32Bit(-c - d)) { 36559 break 36560 } 36561 v.reset(OpAMD64ADDQconst) 36562 v.AuxInt = -c - d 36563 v.AddArg(x) 36564 return true 36565 } 36566 return false 36567 } 36568 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 36569 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 36570 // cond: canMergeLoad(v, l, x) && clobber(l) 36571 // result: (SUBSDmem x [off] {sym} ptr mem) 36572 for { 36573 _ = v.Args[1] 36574 x := v.Args[0] 36575 l := v.Args[1] 36576 if l.Op != OpAMD64MOVSDload { 36577 break 36578 } 36579 off := l.AuxInt 36580 sym := l.Aux 36581 _ = l.Args[1] 36582 ptr := l.Args[0] 36583 mem := l.Args[1] 36584 if !(canMergeLoad(v, l, x) && clobber(l)) { 36585 break 36586 } 36587 v.reset(OpAMD64SUBSDmem) 36588 v.AuxInt = off 36589 v.Aux = sym 36590 v.AddArg(x) 36591 v.AddArg(ptr) 36592 v.AddArg(mem) 36593 return true 36594 } 36595 return false 36596 } 36597 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 36598 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 36599 // cond: canMergeLoad(v, l, x) && clobber(l) 36600 // result: (SUBSSmem x [off] {sym} ptr mem) 36601 for { 36602 _ = v.Args[1] 36603 x := v.Args[0] 36604 l := v.Args[1] 36605 if l.Op != OpAMD64MOVSSload { 36606 break 36607 } 36608 off := l.AuxInt 36609 sym := l.Aux 36610 _ = l.Args[1] 36611 ptr := l.Args[0] 36612 mem := l.Args[1] 36613 if !(canMergeLoad(v, l, x) && clobber(l)) { 36614 break 36615 } 36616 v.reset(OpAMD64SUBSSmem) 36617 v.AuxInt = off 36618 v.Aux = sym 36619 v.AddArg(x) 36620 v.AddArg(ptr) 36621 v.AddArg(mem) 36622 return true 36623 } 36624 return false 36625 } 36626 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 36627 // match: (TESTB (MOVLconst [c]) x) 36628 // cond: 36629 // result: (TESTBconst [c] x) 36630 for { 36631 _ = v.Args[1] 36632 v_0 := v.Args[0] 36633 if v_0.Op != OpAMD64MOVLconst { 36634 break 36635 } 36636 c := v_0.AuxInt 36637 x := v.Args[1] 36638 v.reset(OpAMD64TESTBconst) 36639 v.AuxInt = c 36640 v.AddArg(x) 36641 return true 36642 } 36643 // match: (TESTB x (MOVLconst [c])) 36644 // cond: 36645 // result: (TESTBconst [c] x) 36646 for { 36647 _ = v.Args[1] 36648 x := v.Args[0] 36649 v_1 := v.Args[1] 36650 if v_1.Op != OpAMD64MOVLconst { 36651 break 36652 } 36653 c := v_1.AuxInt 36654 v.reset(OpAMD64TESTBconst) 36655 v.AuxInt = c 36656 v.AddArg(x) 36657 return true 36658 } 36659 return false 36660 } 36661 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 36662 // match: (TESTL (MOVLconst [c]) x) 36663 // cond: 36664 // result: (TESTLconst [c] x) 36665 for { 36666 _ = v.Args[1] 36667 v_0 := v.Args[0] 36668 if v_0.Op != OpAMD64MOVLconst { 36669 break 36670 } 36671 c := v_0.AuxInt 36672 x := v.Args[1] 36673 v.reset(OpAMD64TESTLconst) 36674 v.AuxInt = c 36675 v.AddArg(x) 36676 return true 36677 } 36678 // match: (TESTL x (MOVLconst [c])) 36679 // cond: 36680 // result: (TESTLconst [c] x) 36681 for { 36682 _ = v.Args[1] 36683 x := v.Args[0] 36684 v_1 := v.Args[1] 36685 if v_1.Op != OpAMD64MOVLconst { 36686 break 36687 } 36688 c := v_1.AuxInt 36689 v.reset(OpAMD64TESTLconst) 36690 v.AuxInt = c 36691 v.AddArg(x) 36692 return true 36693 } 36694 return false 36695 } 36696 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 36697 // match: (TESTQ (MOVQconst [c]) x) 36698 // cond: is32Bit(c) 36699 // result: (TESTQconst [c] x) 36700 for { 36701 _ = v.Args[1] 36702 v_0 := v.Args[0] 36703 if v_0.Op != OpAMD64MOVQconst { 36704 break 36705 } 36706 c := v_0.AuxInt 36707 x := v.Args[1] 36708 if !(is32Bit(c)) { 36709 break 36710 } 36711 v.reset(OpAMD64TESTQconst) 36712 v.AuxInt = c 36713 v.AddArg(x) 36714 return true 36715 } 36716 // match: (TESTQ x (MOVQconst [c])) 36717 // cond: is32Bit(c) 36718 // result: (TESTQconst [c] x) 36719 for { 36720 _ = v.Args[1] 36721 x := v.Args[0] 36722 v_1 := v.Args[1] 36723 if v_1.Op != OpAMD64MOVQconst { 36724 break 36725 } 36726 c := v_1.AuxInt 36727 if !(is32Bit(c)) { 36728 break 36729 } 36730 v.reset(OpAMD64TESTQconst) 36731 v.AuxInt = c 36732 v.AddArg(x) 36733 return true 36734 } 36735 return false 36736 } 36737 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 36738 // match: (TESTW (MOVLconst [c]) x) 36739 // cond: 36740 // result: (TESTWconst [c] x) 36741 for { 36742 _ = v.Args[1] 36743 v_0 := v.Args[0] 36744 if v_0.Op != OpAMD64MOVLconst { 36745 break 36746 } 36747 c := v_0.AuxInt 36748 x := v.Args[1] 36749 v.reset(OpAMD64TESTWconst) 36750 v.AuxInt = c 36751 v.AddArg(x) 36752 return true 36753 } 36754 // match: (TESTW x (MOVLconst [c])) 36755 // cond: 36756 // result: (TESTWconst [c] x) 36757 for { 36758 _ = v.Args[1] 36759 x := v.Args[0] 36760 v_1 := v.Args[1] 36761 if v_1.Op != OpAMD64MOVLconst { 36762 break 36763 } 36764 c := v_1.AuxInt 36765 v.reset(OpAMD64TESTWconst) 36766 v.AuxInt = c 36767 v.AddArg(x) 36768 return true 36769 } 36770 return false 36771 } 36772 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 36773 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 36774 // cond: is32Bit(off1+off2) 36775 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 36776 for { 36777 off1 := v.AuxInt 36778 sym := v.Aux 36779 _ = v.Args[2] 36780 val := v.Args[0] 36781 v_1 := v.Args[1] 36782 if v_1.Op != OpAMD64ADDQconst { 36783 break 36784 } 36785 off2 := v_1.AuxInt 36786 ptr := v_1.Args[0] 36787 mem := v.Args[2] 36788 if !(is32Bit(off1 + off2)) { 36789 break 36790 } 36791 v.reset(OpAMD64XADDLlock) 36792 v.AuxInt = off1 + off2 36793 v.Aux = sym 36794 v.AddArg(val) 36795 v.AddArg(ptr) 36796 v.AddArg(mem) 36797 return true 36798 } 36799 return false 36800 } 36801 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 36802 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 36803 // cond: is32Bit(off1+off2) 36804 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 36805 for { 36806 off1 := v.AuxInt 36807 sym := v.Aux 36808 _ = v.Args[2] 36809 val := v.Args[0] 36810 v_1 := v.Args[1] 36811 if v_1.Op != OpAMD64ADDQconst { 36812 break 36813 } 36814 off2 := v_1.AuxInt 36815 ptr := v_1.Args[0] 36816 mem := v.Args[2] 36817 if !(is32Bit(off1 + off2)) { 36818 break 36819 } 36820 v.reset(OpAMD64XADDQlock) 36821 v.AuxInt = off1 + off2 36822 v.Aux = sym 36823 v.AddArg(val) 36824 v.AddArg(ptr) 36825 v.AddArg(mem) 36826 return true 36827 } 36828 return false 36829 } 36830 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 36831 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 36832 // cond: is32Bit(off1+off2) 36833 // result: (XCHGL [off1+off2] {sym} val ptr mem) 36834 for { 36835 off1 := v.AuxInt 36836 sym := v.Aux 36837 _ = v.Args[2] 36838 val := v.Args[0] 36839 v_1 := v.Args[1] 36840 if v_1.Op != OpAMD64ADDQconst { 36841 break 36842 } 36843 off2 := v_1.AuxInt 36844 ptr := v_1.Args[0] 36845 mem := v.Args[2] 36846 if !(is32Bit(off1 + off2)) { 36847 break 36848 } 36849 v.reset(OpAMD64XCHGL) 36850 v.AuxInt = off1 + off2 36851 v.Aux = sym 36852 v.AddArg(val) 36853 v.AddArg(ptr) 36854 v.AddArg(mem) 36855 return true 36856 } 36857 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 36858 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 36859 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 36860 for { 36861 off1 := v.AuxInt 36862 sym1 := v.Aux 36863 _ = v.Args[2] 36864 val := v.Args[0] 36865 v_1 := v.Args[1] 36866 if v_1.Op != OpAMD64LEAQ { 36867 break 36868 } 36869 off2 := v_1.AuxInt 36870 sym2 := v_1.Aux 36871 ptr := v_1.Args[0] 36872 mem := v.Args[2] 36873 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 36874 break 36875 } 36876 v.reset(OpAMD64XCHGL) 36877 v.AuxInt = off1 + off2 36878 v.Aux = mergeSym(sym1, sym2) 36879 v.AddArg(val) 36880 v.AddArg(ptr) 36881 v.AddArg(mem) 36882 return true 36883 } 36884 return false 36885 } 36886 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 36887 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 36888 // cond: is32Bit(off1+off2) 36889 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 36890 for { 36891 off1 := v.AuxInt 36892 sym := v.Aux 36893 _ = v.Args[2] 36894 val := v.Args[0] 36895 v_1 := v.Args[1] 36896 if v_1.Op != OpAMD64ADDQconst { 36897 break 36898 } 36899 off2 := v_1.AuxInt 36900 ptr := v_1.Args[0] 36901 mem := v.Args[2] 36902 if !(is32Bit(off1 + off2)) { 36903 break 36904 } 36905 v.reset(OpAMD64XCHGQ) 36906 v.AuxInt = off1 + off2 36907 v.Aux = sym 36908 v.AddArg(val) 36909 v.AddArg(ptr) 36910 v.AddArg(mem) 36911 return true 36912 } 36913 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 36914 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 36915 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 36916 for { 36917 off1 := v.AuxInt 36918 sym1 := v.Aux 36919 _ = v.Args[2] 36920 val := v.Args[0] 36921 v_1 := v.Args[1] 36922 if v_1.Op != OpAMD64LEAQ { 36923 break 36924 } 36925 off2 := v_1.AuxInt 36926 sym2 := v_1.Aux 36927 ptr := v_1.Args[0] 36928 mem := v.Args[2] 36929 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 36930 break 36931 } 36932 v.reset(OpAMD64XCHGQ) 36933 v.AuxInt = off1 + off2 36934 v.Aux = mergeSym(sym1, sym2) 36935 v.AddArg(val) 36936 v.AddArg(ptr) 36937 v.AddArg(mem) 36938 return true 36939 } 36940 return false 36941 } 36942 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 36943 // match: (XORL x (MOVLconst [c])) 36944 // cond: 36945 // result: (XORLconst [c] x) 36946 for { 36947 _ = v.Args[1] 36948 x := v.Args[0] 36949 v_1 := v.Args[1] 36950 if v_1.Op != OpAMD64MOVLconst { 36951 break 36952 } 36953 c := v_1.AuxInt 36954 v.reset(OpAMD64XORLconst) 36955 v.AuxInt = c 36956 v.AddArg(x) 36957 return true 36958 } 36959 // match: (XORL (MOVLconst [c]) x) 36960 // cond: 36961 // result: (XORLconst [c] x) 36962 for { 36963 _ = v.Args[1] 36964 v_0 := v.Args[0] 36965 if v_0.Op != OpAMD64MOVLconst { 36966 break 36967 } 36968 c := v_0.AuxInt 36969 x := v.Args[1] 36970 v.reset(OpAMD64XORLconst) 36971 v.AuxInt = c 36972 v.AddArg(x) 36973 return true 36974 } 36975 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 36976 // cond: d==32-c 36977 // result: (ROLLconst x [c]) 36978 for { 36979 _ = v.Args[1] 36980 v_0 := v.Args[0] 36981 if v_0.Op != OpAMD64SHLLconst { 36982 break 36983 } 36984 c := v_0.AuxInt 36985 x := v_0.Args[0] 36986 v_1 := v.Args[1] 36987 if v_1.Op != OpAMD64SHRLconst { 36988 break 36989 } 36990 d := v_1.AuxInt 36991 if x != v_1.Args[0] { 36992 break 36993 } 36994 if !(d == 32-c) { 36995 break 36996 } 36997 v.reset(OpAMD64ROLLconst) 36998 v.AuxInt = c 36999 v.AddArg(x) 37000 return true 37001 } 37002 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 37003 // cond: d==32-c 37004 // result: (ROLLconst x [c]) 37005 for { 37006 _ = v.Args[1] 37007 v_0 := v.Args[0] 37008 if v_0.Op != OpAMD64SHRLconst { 37009 break 37010 } 37011 d := v_0.AuxInt 37012 x := v_0.Args[0] 37013 v_1 := v.Args[1] 37014 if v_1.Op != OpAMD64SHLLconst { 37015 break 37016 } 37017 c := v_1.AuxInt 37018 if x != v_1.Args[0] { 37019 break 37020 } 37021 if !(d == 32-c) { 37022 break 37023 } 37024 v.reset(OpAMD64ROLLconst) 37025 v.AuxInt = c 37026 v.AddArg(x) 37027 return true 37028 } 37029 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 37030 // cond: d==16-c && c < 16 && t.Size() == 2 37031 // result: (ROLWconst x [c]) 37032 for { 37033 t := v.Type 37034 _ = v.Args[1] 37035 v_0 := v.Args[0] 37036 if v_0.Op != OpAMD64SHLLconst { 37037 break 37038 } 37039 c := v_0.AuxInt 37040 x := v_0.Args[0] 37041 v_1 := v.Args[1] 37042 if v_1.Op != OpAMD64SHRWconst { 37043 break 37044 } 37045 d := v_1.AuxInt 37046 if x != v_1.Args[0] { 37047 break 37048 } 37049 if !(d == 16-c && c < 16 && t.Size() == 2) { 37050 break 37051 } 37052 v.reset(OpAMD64ROLWconst) 37053 v.AuxInt = c 37054 v.AddArg(x) 37055 return true 37056 } 37057 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 37058 // cond: d==16-c && c < 16 && t.Size() == 2 37059 // result: (ROLWconst x [c]) 37060 for { 37061 t := v.Type 37062 _ = v.Args[1] 37063 v_0 := v.Args[0] 37064 if v_0.Op != OpAMD64SHRWconst { 37065 break 37066 } 37067 d := v_0.AuxInt 37068 x := v_0.Args[0] 37069 v_1 := v.Args[1] 37070 if v_1.Op != OpAMD64SHLLconst { 37071 break 37072 } 37073 c := v_1.AuxInt 37074 if x != v_1.Args[0] { 37075 break 37076 } 37077 if !(d == 16-c && c < 16 && t.Size() == 2) { 37078 break 37079 } 37080 v.reset(OpAMD64ROLWconst) 37081 v.AuxInt = c 37082 v.AddArg(x) 37083 return true 37084 } 37085 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 37086 // cond: d==8-c && c < 8 && t.Size() == 1 37087 // result: (ROLBconst x [c]) 37088 for { 37089 t := v.Type 37090 _ = v.Args[1] 37091 v_0 := v.Args[0] 37092 if v_0.Op != OpAMD64SHLLconst { 37093 break 37094 } 37095 c := v_0.AuxInt 37096 x := v_0.Args[0] 37097 v_1 := v.Args[1] 37098 if v_1.Op != OpAMD64SHRBconst { 37099 break 37100 } 37101 d := v_1.AuxInt 37102 if x != v_1.Args[0] { 37103 break 37104 } 37105 if !(d == 8-c && c < 8 && t.Size() == 1) { 37106 break 37107 } 37108 v.reset(OpAMD64ROLBconst) 37109 v.AuxInt = c 37110 v.AddArg(x) 37111 return true 37112 } 37113 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 37114 // cond: d==8-c && c < 8 && t.Size() == 1 37115 // result: (ROLBconst x [c]) 37116 for { 37117 t := v.Type 37118 _ = v.Args[1] 37119 v_0 := v.Args[0] 37120 if v_0.Op != OpAMD64SHRBconst { 37121 break 37122 } 37123 d := v_0.AuxInt 37124 x := v_0.Args[0] 37125 v_1 := v.Args[1] 37126 if v_1.Op != OpAMD64SHLLconst { 37127 break 37128 } 37129 c := v_1.AuxInt 37130 if x != v_1.Args[0] { 37131 break 37132 } 37133 if !(d == 8-c && c < 8 && t.Size() == 1) { 37134 break 37135 } 37136 v.reset(OpAMD64ROLBconst) 37137 v.AuxInt = c 37138 v.AddArg(x) 37139 return true 37140 } 37141 // match: (XORL x x) 37142 // cond: 37143 // result: (MOVLconst [0]) 37144 for { 37145 _ = v.Args[1] 37146 x := v.Args[0] 37147 if x != v.Args[1] { 37148 break 37149 } 37150 v.reset(OpAMD64MOVLconst) 37151 v.AuxInt = 0 37152 return true 37153 } 37154 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 37155 // cond: canMergeLoad(v, l, x) && clobber(l) 37156 // result: (XORLmem x [off] {sym} ptr mem) 37157 for { 37158 _ = v.Args[1] 37159 x := v.Args[0] 37160 l := v.Args[1] 37161 if l.Op != OpAMD64MOVLload { 37162 break 37163 } 37164 off := l.AuxInt 37165 sym := l.Aux 37166 _ = l.Args[1] 37167 ptr := l.Args[0] 37168 mem := l.Args[1] 37169 if !(canMergeLoad(v, l, x) && clobber(l)) { 37170 break 37171 } 37172 v.reset(OpAMD64XORLmem) 37173 v.AuxInt = off 37174 v.Aux = sym 37175 v.AddArg(x) 37176 v.AddArg(ptr) 37177 v.AddArg(mem) 37178 return true 37179 } 37180 return false 37181 } 37182 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 37183 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 37184 // cond: canMergeLoad(v, l, x) && clobber(l) 37185 // result: (XORLmem x [off] {sym} ptr mem) 37186 for { 37187 _ = v.Args[1] 37188 l := v.Args[0] 37189 if l.Op != OpAMD64MOVLload { 37190 break 37191 } 37192 off := l.AuxInt 37193 sym := l.Aux 37194 _ = l.Args[1] 37195 ptr := l.Args[0] 37196 mem := l.Args[1] 37197 x := v.Args[1] 37198 if !(canMergeLoad(v, l, x) && clobber(l)) { 37199 break 37200 } 37201 v.reset(OpAMD64XORLmem) 37202 v.AuxInt = off 37203 v.Aux = sym 37204 v.AddArg(x) 37205 v.AddArg(ptr) 37206 v.AddArg(mem) 37207 return true 37208 } 37209 return false 37210 } 37211 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 37212 // match: (XORLconst [1] (SETNE x)) 37213 // cond: 37214 // result: (SETEQ x) 37215 for { 37216 if v.AuxInt != 1 { 37217 break 37218 } 37219 v_0 := v.Args[0] 37220 if v_0.Op != OpAMD64SETNE { 37221 break 37222 } 37223 x := v_0.Args[0] 37224 v.reset(OpAMD64SETEQ) 37225 v.AddArg(x) 37226 return true 37227 } 37228 // match: (XORLconst [1] (SETEQ x)) 37229 // cond: 37230 // result: (SETNE x) 37231 for { 37232 if v.AuxInt != 1 { 37233 break 37234 } 37235 v_0 := v.Args[0] 37236 if v_0.Op != OpAMD64SETEQ { 37237 break 37238 } 37239 x := v_0.Args[0] 37240 v.reset(OpAMD64SETNE) 37241 v.AddArg(x) 37242 return true 37243 } 37244 // match: (XORLconst [1] (SETL x)) 37245 // cond: 37246 // result: (SETGE x) 37247 for { 37248 if v.AuxInt != 1 { 37249 break 37250 } 37251 v_0 := v.Args[0] 37252 if v_0.Op != OpAMD64SETL { 37253 break 37254 } 37255 x := v_0.Args[0] 37256 v.reset(OpAMD64SETGE) 37257 v.AddArg(x) 37258 return true 37259 } 37260 // match: (XORLconst [1] (SETGE x)) 37261 // cond: 37262 // result: (SETL x) 37263 for { 37264 if v.AuxInt != 1 { 37265 break 37266 } 37267 v_0 := v.Args[0] 37268 if v_0.Op != OpAMD64SETGE { 37269 break 37270 } 37271 x := v_0.Args[0] 37272 v.reset(OpAMD64SETL) 37273 v.AddArg(x) 37274 return true 37275 } 37276 // match: (XORLconst [1] (SETLE x)) 37277 // cond: 37278 // result: (SETG x) 37279 for { 37280 if v.AuxInt != 1 { 37281 break 37282 } 37283 v_0 := v.Args[0] 37284 if v_0.Op != OpAMD64SETLE { 37285 break 37286 } 37287 x := v_0.Args[0] 37288 v.reset(OpAMD64SETG) 37289 v.AddArg(x) 37290 return true 37291 } 37292 // match: (XORLconst [1] (SETG x)) 37293 // cond: 37294 // result: (SETLE x) 37295 for { 37296 if v.AuxInt != 1 { 37297 break 37298 } 37299 v_0 := v.Args[0] 37300 if v_0.Op != OpAMD64SETG { 37301 break 37302 } 37303 x := v_0.Args[0] 37304 v.reset(OpAMD64SETLE) 37305 v.AddArg(x) 37306 return true 37307 } 37308 // match: (XORLconst [1] (SETB x)) 37309 // cond: 37310 // result: (SETAE x) 37311 for { 37312 if v.AuxInt != 1 { 37313 break 37314 } 37315 v_0 := v.Args[0] 37316 if v_0.Op != OpAMD64SETB { 37317 break 37318 } 37319 x := v_0.Args[0] 37320 v.reset(OpAMD64SETAE) 37321 v.AddArg(x) 37322 return true 37323 } 37324 // match: (XORLconst [1] (SETAE x)) 37325 // cond: 37326 // result: (SETB x) 37327 for { 37328 if v.AuxInt != 1 { 37329 break 37330 } 37331 v_0 := v.Args[0] 37332 if v_0.Op != OpAMD64SETAE { 37333 break 37334 } 37335 x := v_0.Args[0] 37336 v.reset(OpAMD64SETB) 37337 v.AddArg(x) 37338 return true 37339 } 37340 // match: (XORLconst [1] (SETBE x)) 37341 // cond: 37342 // result: (SETA x) 37343 for { 37344 if v.AuxInt != 1 { 37345 break 37346 } 37347 v_0 := v.Args[0] 37348 if v_0.Op != OpAMD64SETBE { 37349 break 37350 } 37351 x := v_0.Args[0] 37352 v.reset(OpAMD64SETA) 37353 v.AddArg(x) 37354 return true 37355 } 37356 // match: (XORLconst [1] (SETA x)) 37357 // cond: 37358 // result: (SETBE x) 37359 for { 37360 if v.AuxInt != 1 { 37361 break 37362 } 37363 v_0 := v.Args[0] 37364 if v_0.Op != OpAMD64SETA { 37365 break 37366 } 37367 x := v_0.Args[0] 37368 v.reset(OpAMD64SETBE) 37369 v.AddArg(x) 37370 return true 37371 } 37372 return false 37373 } 37374 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { 37375 // match: (XORLconst [c] (XORLconst [d] x)) 37376 // cond: 37377 // result: (XORLconst [c ^ d] x) 37378 for { 37379 c := v.AuxInt 37380 v_0 := v.Args[0] 37381 if v_0.Op != OpAMD64XORLconst { 37382 break 37383 } 37384 d := v_0.AuxInt 37385 x := v_0.Args[0] 37386 v.reset(OpAMD64XORLconst) 37387 v.AuxInt = c ^ d 37388 v.AddArg(x) 37389 return true 37390 } 37391 // match: (XORLconst [c] x) 37392 // cond: int32(c)==0 37393 // result: x 37394 for { 37395 c := v.AuxInt 37396 x := v.Args[0] 37397 if !(int32(c) == 0) { 37398 break 37399 } 37400 v.reset(OpCopy) 37401 v.Type = x.Type 37402 v.AddArg(x) 37403 return true 37404 } 37405 // match: (XORLconst [c] (MOVLconst [d])) 37406 // cond: 37407 // result: (MOVLconst [c^d]) 37408 for { 37409 c := v.AuxInt 37410 v_0 := v.Args[0] 37411 if v_0.Op != OpAMD64MOVLconst { 37412 break 37413 } 37414 d := v_0.AuxInt 37415 v.reset(OpAMD64MOVLconst) 37416 v.AuxInt = c ^ d 37417 return true 37418 } 37419 return false 37420 } 37421 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 37422 // match: (XORQ x (MOVQconst [c])) 37423 // cond: is32Bit(c) 37424 // result: (XORQconst [c] x) 37425 for { 37426 _ = v.Args[1] 37427 x := v.Args[0] 37428 v_1 := v.Args[1] 37429 if v_1.Op != OpAMD64MOVQconst { 37430 break 37431 } 37432 c := v_1.AuxInt 37433 if !(is32Bit(c)) { 37434 break 37435 } 37436 v.reset(OpAMD64XORQconst) 37437 v.AuxInt = c 37438 v.AddArg(x) 37439 return true 37440 } 37441 // match: (XORQ (MOVQconst [c]) x) 37442 // cond: is32Bit(c) 37443 // result: (XORQconst [c] x) 37444 for { 37445 _ = v.Args[1] 37446 v_0 := v.Args[0] 37447 if v_0.Op != OpAMD64MOVQconst { 37448 break 37449 } 37450 c := v_0.AuxInt 37451 x := v.Args[1] 37452 if !(is32Bit(c)) { 37453 break 37454 } 37455 v.reset(OpAMD64XORQconst) 37456 v.AuxInt = c 37457 v.AddArg(x) 37458 return true 37459 } 37460 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 37461 // cond: d==64-c 37462 // result: (ROLQconst x [c]) 37463 for { 37464 _ = v.Args[1] 37465 v_0 := v.Args[0] 37466 if v_0.Op != OpAMD64SHLQconst { 37467 break 37468 } 37469 c := v_0.AuxInt 37470 x := v_0.Args[0] 37471 v_1 := v.Args[1] 37472 if v_1.Op != OpAMD64SHRQconst { 37473 break 37474 } 37475 d := v_1.AuxInt 37476 if x != v_1.Args[0] { 37477 break 37478 } 37479 if !(d == 64-c) { 37480 break 37481 } 37482 v.reset(OpAMD64ROLQconst) 37483 v.AuxInt = c 37484 v.AddArg(x) 37485 return true 37486 } 37487 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 37488 // cond: d==64-c 37489 // result: (ROLQconst x [c]) 37490 for { 37491 _ = v.Args[1] 37492 v_0 := v.Args[0] 37493 if v_0.Op != OpAMD64SHRQconst { 37494 break 37495 } 37496 d := v_0.AuxInt 37497 x := v_0.Args[0] 37498 v_1 := v.Args[1] 37499 if v_1.Op != OpAMD64SHLQconst { 37500 break 37501 } 37502 c := v_1.AuxInt 37503 if x != v_1.Args[0] { 37504 break 37505 } 37506 if !(d == 64-c) { 37507 break 37508 } 37509 v.reset(OpAMD64ROLQconst) 37510 v.AuxInt = c 37511 v.AddArg(x) 37512 return true 37513 } 37514 // match: (XORQ x x) 37515 // cond: 37516 // result: (MOVQconst [0]) 37517 for { 37518 _ = v.Args[1] 37519 x := v.Args[0] 37520 if x != v.Args[1] { 37521 break 37522 } 37523 v.reset(OpAMD64MOVQconst) 37524 v.AuxInt = 0 37525 return true 37526 } 37527 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 37528 // cond: canMergeLoad(v, l, x) && clobber(l) 37529 // result: (XORQmem x [off] {sym} ptr mem) 37530 for { 37531 _ = v.Args[1] 37532 x := v.Args[0] 37533 l := v.Args[1] 37534 if l.Op != OpAMD64MOVQload { 37535 break 37536 } 37537 off := l.AuxInt 37538 sym := l.Aux 37539 _ = l.Args[1] 37540 ptr := l.Args[0] 37541 mem := l.Args[1] 37542 if !(canMergeLoad(v, l, x) && clobber(l)) { 37543 break 37544 } 37545 v.reset(OpAMD64XORQmem) 37546 v.AuxInt = off 37547 v.Aux = sym 37548 v.AddArg(x) 37549 v.AddArg(ptr) 37550 v.AddArg(mem) 37551 return true 37552 } 37553 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 37554 // cond: canMergeLoad(v, l, x) && clobber(l) 37555 // result: (XORQmem x [off] {sym} ptr mem) 37556 for { 37557 _ = v.Args[1] 37558 l := v.Args[0] 37559 if l.Op != OpAMD64MOVQload { 37560 break 37561 } 37562 off := l.AuxInt 37563 sym := l.Aux 37564 _ = l.Args[1] 37565 ptr := l.Args[0] 37566 mem := l.Args[1] 37567 x := v.Args[1] 37568 if !(canMergeLoad(v, l, x) && clobber(l)) { 37569 break 37570 } 37571 v.reset(OpAMD64XORQmem) 37572 v.AuxInt = off 37573 v.Aux = sym 37574 v.AddArg(x) 37575 v.AddArg(ptr) 37576 v.AddArg(mem) 37577 return true 37578 } 37579 return false 37580 } 37581 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 37582 // match: (XORQconst [c] (XORQconst [d] x)) 37583 // cond: 37584 // result: (XORQconst [c ^ d] x) 37585 for { 37586 c := v.AuxInt 37587 v_0 := v.Args[0] 37588 if v_0.Op != OpAMD64XORQconst { 37589 break 37590 } 37591 d := v_0.AuxInt 37592 x := v_0.Args[0] 37593 v.reset(OpAMD64XORQconst) 37594 v.AuxInt = c ^ d 37595 v.AddArg(x) 37596 return true 37597 } 37598 // match: (XORQconst [0] x) 37599 // cond: 37600 // result: x 37601 for { 37602 if v.AuxInt != 0 { 37603 break 37604 } 37605 x := v.Args[0] 37606 v.reset(OpCopy) 37607 v.Type = x.Type 37608 v.AddArg(x) 37609 return true 37610 } 37611 // match: (XORQconst [c] (MOVQconst [d])) 37612 // cond: 37613 // result: (MOVQconst [c^d]) 37614 for { 37615 c := v.AuxInt 37616 v_0 := v.Args[0] 37617 if v_0.Op != OpAMD64MOVQconst { 37618 break 37619 } 37620 d := v_0.AuxInt 37621 v.reset(OpAMD64MOVQconst) 37622 v.AuxInt = c ^ d 37623 return true 37624 } 37625 return false 37626 } 37627 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 37628 // match: (Add16 x y) 37629 // cond: 37630 // result: (ADDL x y) 37631 for { 37632 _ = v.Args[1] 37633 x := v.Args[0] 37634 y := v.Args[1] 37635 v.reset(OpAMD64ADDL) 37636 v.AddArg(x) 37637 v.AddArg(y) 37638 return true 37639 } 37640 } 37641 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 37642 // match: (Add32 x y) 37643 // cond: 37644 // result: (ADDL x y) 37645 for { 37646 _ = v.Args[1] 37647 x := v.Args[0] 37648 y := v.Args[1] 37649 v.reset(OpAMD64ADDL) 37650 v.AddArg(x) 37651 v.AddArg(y) 37652 return true 37653 } 37654 } 37655 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 37656 // match: (Add32F x y) 37657 // cond: 37658 // result: (ADDSS x y) 37659 for { 37660 _ = v.Args[1] 37661 x := v.Args[0] 37662 y := v.Args[1] 37663 v.reset(OpAMD64ADDSS) 37664 v.AddArg(x) 37665 v.AddArg(y) 37666 return true 37667 } 37668 } 37669 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 37670 // match: (Add64 x y) 37671 // cond: 37672 // result: (ADDQ x y) 37673 for { 37674 _ = v.Args[1] 37675 x := v.Args[0] 37676 y := v.Args[1] 37677 v.reset(OpAMD64ADDQ) 37678 v.AddArg(x) 37679 v.AddArg(y) 37680 return true 37681 } 37682 } 37683 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 37684 // match: (Add64F x y) 37685 // cond: 37686 // result: (ADDSD x y) 37687 for { 37688 _ = v.Args[1] 37689 x := v.Args[0] 37690 y := v.Args[1] 37691 v.reset(OpAMD64ADDSD) 37692 v.AddArg(x) 37693 v.AddArg(y) 37694 return true 37695 } 37696 } 37697 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 37698 // match: (Add8 x y) 37699 // cond: 37700 // result: (ADDL x y) 37701 for { 37702 _ = v.Args[1] 37703 x := v.Args[0] 37704 y := v.Args[1] 37705 v.reset(OpAMD64ADDL) 37706 v.AddArg(x) 37707 v.AddArg(y) 37708 return true 37709 } 37710 } 37711 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 37712 b := v.Block 37713 _ = b 37714 config := b.Func.Config 37715 _ = config 37716 // match: (AddPtr x y) 37717 // cond: config.PtrSize == 8 37718 // result: (ADDQ x y) 37719 for { 37720 _ = v.Args[1] 37721 x := v.Args[0] 37722 y := v.Args[1] 37723 if !(config.PtrSize == 8) { 37724 break 37725 } 37726 v.reset(OpAMD64ADDQ) 37727 v.AddArg(x) 37728 v.AddArg(y) 37729 return true 37730 } 37731 // match: (AddPtr x y) 37732 // cond: config.PtrSize == 4 37733 // result: (ADDL x y) 37734 for { 37735 _ = v.Args[1] 37736 x := v.Args[0] 37737 y := v.Args[1] 37738 if !(config.PtrSize == 4) { 37739 break 37740 } 37741 v.reset(OpAMD64ADDL) 37742 v.AddArg(x) 37743 v.AddArg(y) 37744 return true 37745 } 37746 return false 37747 } 37748 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 37749 b := v.Block 37750 _ = b 37751 config := b.Func.Config 37752 _ = config 37753 // match: (Addr {sym} base) 37754 // cond: config.PtrSize == 8 37755 // result: (LEAQ {sym} base) 37756 for { 37757 sym := v.Aux 37758 base := v.Args[0] 37759 if !(config.PtrSize == 8) { 37760 break 37761 } 37762 v.reset(OpAMD64LEAQ) 37763 v.Aux = sym 37764 v.AddArg(base) 37765 return true 37766 } 37767 // match: (Addr {sym} base) 37768 // cond: config.PtrSize == 4 37769 // result: (LEAL {sym} base) 37770 for { 37771 sym := v.Aux 37772 base := v.Args[0] 37773 if !(config.PtrSize == 4) { 37774 break 37775 } 37776 v.reset(OpAMD64LEAL) 37777 v.Aux = sym 37778 v.AddArg(base) 37779 return true 37780 } 37781 return false 37782 } 37783 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 37784 // match: (And16 x y) 37785 // cond: 37786 // result: (ANDL x y) 37787 for { 37788 _ = v.Args[1] 37789 x := v.Args[0] 37790 y := v.Args[1] 37791 v.reset(OpAMD64ANDL) 37792 v.AddArg(x) 37793 v.AddArg(y) 37794 return true 37795 } 37796 } 37797 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 37798 // match: (And32 x y) 37799 // cond: 37800 // result: (ANDL x y) 37801 for { 37802 _ = v.Args[1] 37803 x := v.Args[0] 37804 y := v.Args[1] 37805 v.reset(OpAMD64ANDL) 37806 v.AddArg(x) 37807 v.AddArg(y) 37808 return true 37809 } 37810 } 37811 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 37812 // match: (And64 x y) 37813 // cond: 37814 // result: (ANDQ x y) 37815 for { 37816 _ = v.Args[1] 37817 x := v.Args[0] 37818 y := v.Args[1] 37819 v.reset(OpAMD64ANDQ) 37820 v.AddArg(x) 37821 v.AddArg(y) 37822 return true 37823 } 37824 } 37825 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 37826 // match: (And8 x y) 37827 // cond: 37828 // result: (ANDL x y) 37829 for { 37830 _ = v.Args[1] 37831 x := v.Args[0] 37832 y := v.Args[1] 37833 v.reset(OpAMD64ANDL) 37834 v.AddArg(x) 37835 v.AddArg(y) 37836 return true 37837 } 37838 } 37839 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 37840 // match: (AndB x y) 37841 // cond: 37842 // result: (ANDL x y) 37843 for { 37844 _ = v.Args[1] 37845 x := v.Args[0] 37846 y := v.Args[1] 37847 v.reset(OpAMD64ANDL) 37848 v.AddArg(x) 37849 v.AddArg(y) 37850 return true 37851 } 37852 } 37853 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 37854 b := v.Block 37855 _ = b 37856 typ := &b.Func.Config.Types 37857 _ = typ 37858 // match: (AtomicAdd32 ptr val mem) 37859 // cond: 37860 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 37861 for { 37862 _ = v.Args[2] 37863 ptr := v.Args[0] 37864 val := v.Args[1] 37865 mem := v.Args[2] 37866 v.reset(OpAMD64AddTupleFirst32) 37867 v.AddArg(val) 37868 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 37869 v0.AddArg(val) 37870 v0.AddArg(ptr) 37871 v0.AddArg(mem) 37872 v.AddArg(v0) 37873 return true 37874 } 37875 } 37876 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 37877 b := v.Block 37878 _ = b 37879 typ := &b.Func.Config.Types 37880 _ = typ 37881 // match: (AtomicAdd64 ptr val mem) 37882 // cond: 37883 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 37884 for { 37885 _ = v.Args[2] 37886 ptr := v.Args[0] 37887 val := v.Args[1] 37888 mem := v.Args[2] 37889 v.reset(OpAMD64AddTupleFirst64) 37890 v.AddArg(val) 37891 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 37892 v0.AddArg(val) 37893 v0.AddArg(ptr) 37894 v0.AddArg(mem) 37895 v.AddArg(v0) 37896 return true 37897 } 37898 } 37899 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 37900 // match: (AtomicAnd8 ptr val mem) 37901 // cond: 37902 // result: (ANDBlock ptr val mem) 37903 for { 37904 _ = v.Args[2] 37905 ptr := v.Args[0] 37906 val := v.Args[1] 37907 mem := v.Args[2] 37908 v.reset(OpAMD64ANDBlock) 37909 v.AddArg(ptr) 37910 v.AddArg(val) 37911 v.AddArg(mem) 37912 return true 37913 } 37914 } 37915 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 37916 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 37917 // cond: 37918 // result: (CMPXCHGLlock ptr old new_ mem) 37919 for { 37920 _ = v.Args[3] 37921 ptr := v.Args[0] 37922 old := v.Args[1] 37923 new_ := v.Args[2] 37924 mem := v.Args[3] 37925 v.reset(OpAMD64CMPXCHGLlock) 37926 v.AddArg(ptr) 37927 v.AddArg(old) 37928 v.AddArg(new_) 37929 v.AddArg(mem) 37930 return true 37931 } 37932 } 37933 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 37934 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 37935 // cond: 37936 // result: (CMPXCHGQlock ptr old new_ mem) 37937 for { 37938 _ = v.Args[3] 37939 ptr := v.Args[0] 37940 old := v.Args[1] 37941 new_ := v.Args[2] 37942 mem := v.Args[3] 37943 v.reset(OpAMD64CMPXCHGQlock) 37944 v.AddArg(ptr) 37945 v.AddArg(old) 37946 v.AddArg(new_) 37947 v.AddArg(mem) 37948 return true 37949 } 37950 } 37951 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 37952 // match: (AtomicExchange32 ptr val mem) 37953 // cond: 37954 // result: (XCHGL val ptr mem) 37955 for { 37956 _ = v.Args[2] 37957 ptr := v.Args[0] 37958 val := v.Args[1] 37959 mem := v.Args[2] 37960 v.reset(OpAMD64XCHGL) 37961 v.AddArg(val) 37962 v.AddArg(ptr) 37963 v.AddArg(mem) 37964 return true 37965 } 37966 } 37967 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 37968 // match: (AtomicExchange64 ptr val mem) 37969 // cond: 37970 // result: (XCHGQ val ptr mem) 37971 for { 37972 _ = v.Args[2] 37973 ptr := v.Args[0] 37974 val := v.Args[1] 37975 mem := v.Args[2] 37976 v.reset(OpAMD64XCHGQ) 37977 v.AddArg(val) 37978 v.AddArg(ptr) 37979 v.AddArg(mem) 37980 return true 37981 } 37982 } 37983 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 37984 // match: (AtomicLoad32 ptr mem) 37985 // cond: 37986 // result: (MOVLatomicload ptr mem) 37987 for { 37988 _ = v.Args[1] 37989 ptr := v.Args[0] 37990 mem := v.Args[1] 37991 v.reset(OpAMD64MOVLatomicload) 37992 v.AddArg(ptr) 37993 v.AddArg(mem) 37994 return true 37995 } 37996 } 37997 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 37998 // match: (AtomicLoad64 ptr mem) 37999 // cond: 38000 // result: (MOVQatomicload ptr mem) 38001 for { 38002 _ = v.Args[1] 38003 ptr := v.Args[0] 38004 mem := v.Args[1] 38005 v.reset(OpAMD64MOVQatomicload) 38006 v.AddArg(ptr) 38007 v.AddArg(mem) 38008 return true 38009 } 38010 } 38011 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 38012 b := v.Block 38013 _ = b 38014 config := b.Func.Config 38015 _ = config 38016 // match: (AtomicLoadPtr ptr mem) 38017 // cond: config.PtrSize == 8 38018 // result: (MOVQatomicload ptr mem) 38019 for { 38020 _ = v.Args[1] 38021 ptr := v.Args[0] 38022 mem := v.Args[1] 38023 if !(config.PtrSize == 8) { 38024 break 38025 } 38026 v.reset(OpAMD64MOVQatomicload) 38027 v.AddArg(ptr) 38028 v.AddArg(mem) 38029 return true 38030 } 38031 // match: (AtomicLoadPtr ptr mem) 38032 // cond: config.PtrSize == 4 38033 // result: (MOVLatomicload ptr mem) 38034 for { 38035 _ = v.Args[1] 38036 ptr := v.Args[0] 38037 mem := v.Args[1] 38038 if !(config.PtrSize == 4) { 38039 break 38040 } 38041 v.reset(OpAMD64MOVLatomicload) 38042 v.AddArg(ptr) 38043 v.AddArg(mem) 38044 return true 38045 } 38046 return false 38047 } 38048 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 38049 // match: (AtomicOr8 ptr val mem) 38050 // cond: 38051 // result: (ORBlock ptr val mem) 38052 for { 38053 _ = v.Args[2] 38054 ptr := v.Args[0] 38055 val := v.Args[1] 38056 mem := v.Args[2] 38057 v.reset(OpAMD64ORBlock) 38058 v.AddArg(ptr) 38059 v.AddArg(val) 38060 v.AddArg(mem) 38061 return true 38062 } 38063 } 38064 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 38065 b := v.Block 38066 _ = b 38067 typ := &b.Func.Config.Types 38068 _ = typ 38069 // match: (AtomicStore32 ptr val mem) 38070 // cond: 38071 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 38072 for { 38073 _ = v.Args[2] 38074 ptr := v.Args[0] 38075 val := v.Args[1] 38076 mem := v.Args[2] 38077 v.reset(OpSelect1) 38078 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 38079 v0.AddArg(val) 38080 v0.AddArg(ptr) 38081 v0.AddArg(mem) 38082 v.AddArg(v0) 38083 return true 38084 } 38085 } 38086 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 38087 b := v.Block 38088 _ = b 38089 typ := &b.Func.Config.Types 38090 _ = typ 38091 // match: (AtomicStore64 ptr val mem) 38092 // cond: 38093 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 38094 for { 38095 _ = v.Args[2] 38096 ptr := v.Args[0] 38097 val := v.Args[1] 38098 mem := v.Args[2] 38099 v.reset(OpSelect1) 38100 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 38101 v0.AddArg(val) 38102 v0.AddArg(ptr) 38103 v0.AddArg(mem) 38104 v.AddArg(v0) 38105 return true 38106 } 38107 } 38108 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 38109 b := v.Block 38110 _ = b 38111 config := b.Func.Config 38112 _ = config 38113 typ := &b.Func.Config.Types 38114 _ = typ 38115 // match: (AtomicStorePtrNoWB ptr val mem) 38116 // cond: config.PtrSize == 8 38117 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 38118 for { 38119 _ = v.Args[2] 38120 ptr := v.Args[0] 38121 val := v.Args[1] 38122 mem := v.Args[2] 38123 if !(config.PtrSize == 8) { 38124 break 38125 } 38126 v.reset(OpSelect1) 38127 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 38128 v0.AddArg(val) 38129 v0.AddArg(ptr) 38130 v0.AddArg(mem) 38131 v.AddArg(v0) 38132 return true 38133 } 38134 // match: (AtomicStorePtrNoWB ptr val mem) 38135 // cond: config.PtrSize == 4 38136 // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 38137 for { 38138 _ = v.Args[2] 38139 ptr := v.Args[0] 38140 val := v.Args[1] 38141 mem := v.Args[2] 38142 if !(config.PtrSize == 4) { 38143 break 38144 } 38145 v.reset(OpSelect1) 38146 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem)) 38147 v0.AddArg(val) 38148 v0.AddArg(ptr) 38149 v0.AddArg(mem) 38150 v.AddArg(v0) 38151 return true 38152 } 38153 return false 38154 } 38155 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 38156 // match: (Avg64u x y) 38157 // cond: 38158 // result: (AVGQU x y) 38159 for { 38160 _ = v.Args[1] 38161 x := v.Args[0] 38162 y := v.Args[1] 38163 v.reset(OpAMD64AVGQU) 38164 v.AddArg(x) 38165 v.AddArg(y) 38166 return true 38167 } 38168 } 38169 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 38170 b := v.Block 38171 _ = b 38172 typ := &b.Func.Config.Types 38173 _ = typ 38174 // match: (BitLen32 x) 38175 // cond: 38176 // result: (BitLen64 (MOVLQZX <typ.UInt64> x)) 38177 for { 38178 x := v.Args[0] 38179 v.reset(OpBitLen64) 38180 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 38181 v0.AddArg(x) 38182 v.AddArg(v0) 38183 return true 38184 } 38185 } 38186 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 38187 b := v.Block 38188 _ = b 38189 typ := &b.Func.Config.Types 38190 _ = typ 38191 // match: (BitLen64 <t> x) 38192 // cond: 38193 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 38194 for { 38195 t := v.Type 38196 x := v.Args[0] 38197 v.reset(OpAMD64ADDQconst) 38198 v.AuxInt = 1 38199 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 38200 v1 := b.NewValue0(v.Pos, OpSelect0, t) 38201 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38202 v2.AddArg(x) 38203 v1.AddArg(v2) 38204 v0.AddArg(v1) 38205 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 38206 v3.AuxInt = -1 38207 v0.AddArg(v3) 38208 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 38209 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38210 v5.AddArg(x) 38211 v4.AddArg(v5) 38212 v0.AddArg(v4) 38213 v.AddArg(v0) 38214 return true 38215 } 38216 } 38217 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 38218 // match: (Bswap32 x) 38219 // cond: 38220 // result: (BSWAPL x) 38221 for { 38222 x := v.Args[0] 38223 v.reset(OpAMD64BSWAPL) 38224 v.AddArg(x) 38225 return true 38226 } 38227 } 38228 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 38229 // match: (Bswap64 x) 38230 // cond: 38231 // result: (BSWAPQ x) 38232 for { 38233 x := v.Args[0] 38234 v.reset(OpAMD64BSWAPQ) 38235 v.AddArg(x) 38236 return true 38237 } 38238 } 38239 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 38240 // match: (ClosureCall [argwid] entry closure mem) 38241 // cond: 38242 // result: (CALLclosure [argwid] entry closure mem) 38243 for { 38244 argwid := v.AuxInt 38245 _ = v.Args[2] 38246 entry := v.Args[0] 38247 closure := v.Args[1] 38248 mem := v.Args[2] 38249 v.reset(OpAMD64CALLclosure) 38250 v.AuxInt = argwid 38251 v.AddArg(entry) 38252 v.AddArg(closure) 38253 v.AddArg(mem) 38254 return true 38255 } 38256 } 38257 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 38258 // match: (Com16 x) 38259 // cond: 38260 // result: (NOTL x) 38261 for { 38262 x := v.Args[0] 38263 v.reset(OpAMD64NOTL) 38264 v.AddArg(x) 38265 return true 38266 } 38267 } 38268 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 38269 // match: (Com32 x) 38270 // cond: 38271 // result: (NOTL x) 38272 for { 38273 x := v.Args[0] 38274 v.reset(OpAMD64NOTL) 38275 v.AddArg(x) 38276 return true 38277 } 38278 } 38279 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 38280 // match: (Com64 x) 38281 // cond: 38282 // result: (NOTQ x) 38283 for { 38284 x := v.Args[0] 38285 v.reset(OpAMD64NOTQ) 38286 v.AddArg(x) 38287 return true 38288 } 38289 } 38290 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 38291 // match: (Com8 x) 38292 // cond: 38293 // result: (NOTL x) 38294 for { 38295 x := v.Args[0] 38296 v.reset(OpAMD64NOTL) 38297 v.AddArg(x) 38298 return true 38299 } 38300 } 38301 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 38302 // match: (Const16 [val]) 38303 // cond: 38304 // result: (MOVLconst [val]) 38305 for { 38306 val := v.AuxInt 38307 v.reset(OpAMD64MOVLconst) 38308 v.AuxInt = val 38309 return true 38310 } 38311 } 38312 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 38313 // match: (Const32 [val]) 38314 // cond: 38315 // result: (MOVLconst [val]) 38316 for { 38317 val := v.AuxInt 38318 v.reset(OpAMD64MOVLconst) 38319 v.AuxInt = val 38320 return true 38321 } 38322 } 38323 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 38324 // match: (Const32F [val]) 38325 // cond: 38326 // result: (MOVSSconst [val]) 38327 for { 38328 val := v.AuxInt 38329 v.reset(OpAMD64MOVSSconst) 38330 v.AuxInt = val 38331 return true 38332 } 38333 } 38334 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 38335 // match: (Const64 [val]) 38336 // cond: 38337 // result: (MOVQconst [val]) 38338 for { 38339 val := v.AuxInt 38340 v.reset(OpAMD64MOVQconst) 38341 v.AuxInt = val 38342 return true 38343 } 38344 } 38345 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 38346 // match: (Const64F [val]) 38347 // cond: 38348 // result: (MOVSDconst [val]) 38349 for { 38350 val := v.AuxInt 38351 v.reset(OpAMD64MOVSDconst) 38352 v.AuxInt = val 38353 return true 38354 } 38355 } 38356 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 38357 // match: (Const8 [val]) 38358 // cond: 38359 // result: (MOVLconst [val]) 38360 for { 38361 val := v.AuxInt 38362 v.reset(OpAMD64MOVLconst) 38363 v.AuxInt = val 38364 return true 38365 } 38366 } 38367 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 38368 // match: (ConstBool [b]) 38369 // cond: 38370 // result: (MOVLconst [b]) 38371 for { 38372 b := v.AuxInt 38373 v.reset(OpAMD64MOVLconst) 38374 v.AuxInt = b 38375 return true 38376 } 38377 } 38378 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 38379 b := v.Block 38380 _ = b 38381 config := b.Func.Config 38382 _ = config 38383 // match: (ConstNil) 38384 // cond: config.PtrSize == 8 38385 // result: (MOVQconst [0]) 38386 for { 38387 if !(config.PtrSize == 8) { 38388 break 38389 } 38390 v.reset(OpAMD64MOVQconst) 38391 v.AuxInt = 0 38392 return true 38393 } 38394 // match: (ConstNil) 38395 // cond: config.PtrSize == 4 38396 // result: (MOVLconst [0]) 38397 for { 38398 if !(config.PtrSize == 4) { 38399 break 38400 } 38401 v.reset(OpAMD64MOVLconst) 38402 v.AuxInt = 0 38403 return true 38404 } 38405 return false 38406 } 38407 func rewriteValueAMD64_OpConvert_0(v *Value) bool { 38408 b := v.Block 38409 _ = b 38410 config := b.Func.Config 38411 _ = config 38412 // match: (Convert <t> x mem) 38413 // cond: config.PtrSize == 8 38414 // result: (MOVQconvert <t> x mem) 38415 for { 38416 t := v.Type 38417 _ = v.Args[1] 38418 x := v.Args[0] 38419 mem := v.Args[1] 38420 if !(config.PtrSize == 8) { 38421 break 38422 } 38423 v.reset(OpAMD64MOVQconvert) 38424 v.Type = t 38425 v.AddArg(x) 38426 v.AddArg(mem) 38427 return true 38428 } 38429 // match: (Convert <t> x mem) 38430 // cond: config.PtrSize == 4 38431 // result: (MOVLconvert <t> x mem) 38432 for { 38433 t := v.Type 38434 _ = v.Args[1] 38435 x := v.Args[0] 38436 mem := v.Args[1] 38437 if !(config.PtrSize == 4) { 38438 break 38439 } 38440 v.reset(OpAMD64MOVLconvert) 38441 v.Type = t 38442 v.AddArg(x) 38443 v.AddArg(mem) 38444 return true 38445 } 38446 return false 38447 } 38448 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 38449 b := v.Block 38450 _ = b 38451 typ := &b.Func.Config.Types 38452 _ = typ 38453 // match: (Ctz32 x) 38454 // cond: 38455 // result: (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x))) 38456 for { 38457 x := v.Args[0] 38458 v.reset(OpSelect0) 38459 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38460 v1 := b.NewValue0(v.Pos, OpAMD64ORQ, typ.UInt64) 38461 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 38462 v2.AuxInt = 1 << 32 38463 v1.AddArg(v2) 38464 v1.AddArg(x) 38465 v0.AddArg(v1) 38466 v.AddArg(v0) 38467 return true 38468 } 38469 } 38470 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 38471 b := v.Block 38472 _ = b 38473 typ := &b.Func.Config.Types 38474 _ = typ 38475 // match: (Ctz64 <t> x) 38476 // cond: 38477 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 38478 for { 38479 t := v.Type 38480 x := v.Args[0] 38481 v.reset(OpAMD64CMOVQEQ) 38482 v0 := b.NewValue0(v.Pos, OpSelect0, t) 38483 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38484 v1.AddArg(x) 38485 v0.AddArg(v1) 38486 v.AddArg(v0) 38487 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 38488 v2.AuxInt = 64 38489 v.AddArg(v2) 38490 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 38491 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38492 v4.AddArg(x) 38493 v3.AddArg(v4) 38494 v.AddArg(v3) 38495 return true 38496 } 38497 } 38498 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 38499 // match: (Cvt32Fto32 x) 38500 // cond: 38501 // result: (CVTTSS2SL x) 38502 for { 38503 x := v.Args[0] 38504 v.reset(OpAMD64CVTTSS2SL) 38505 v.AddArg(x) 38506 return true 38507 } 38508 } 38509 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 38510 // match: (Cvt32Fto64 x) 38511 // cond: 38512 // result: (CVTTSS2SQ x) 38513 for { 38514 x := v.Args[0] 38515 v.reset(OpAMD64CVTTSS2SQ) 38516 v.AddArg(x) 38517 return true 38518 } 38519 } 38520 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 38521 // match: (Cvt32Fto64F x) 38522 // cond: 38523 // result: (CVTSS2SD x) 38524 for { 38525 x := v.Args[0] 38526 v.reset(OpAMD64CVTSS2SD) 38527 v.AddArg(x) 38528 return true 38529 } 38530 } 38531 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 38532 // match: (Cvt32to32F x) 38533 // cond: 38534 // result: (CVTSL2SS x) 38535 for { 38536 x := v.Args[0] 38537 v.reset(OpAMD64CVTSL2SS) 38538 v.AddArg(x) 38539 return true 38540 } 38541 } 38542 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 38543 // match: (Cvt32to64F x) 38544 // cond: 38545 // result: (CVTSL2SD x) 38546 for { 38547 x := v.Args[0] 38548 v.reset(OpAMD64CVTSL2SD) 38549 v.AddArg(x) 38550 return true 38551 } 38552 } 38553 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 38554 // match: (Cvt64Fto32 x) 38555 // cond: 38556 // result: (CVTTSD2SL x) 38557 for { 38558 x := v.Args[0] 38559 v.reset(OpAMD64CVTTSD2SL) 38560 v.AddArg(x) 38561 return true 38562 } 38563 } 38564 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 38565 // match: (Cvt64Fto32F x) 38566 // cond: 38567 // result: (CVTSD2SS x) 38568 for { 38569 x := v.Args[0] 38570 v.reset(OpAMD64CVTSD2SS) 38571 v.AddArg(x) 38572 return true 38573 } 38574 } 38575 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 38576 // match: (Cvt64Fto64 x) 38577 // cond: 38578 // result: (CVTTSD2SQ x) 38579 for { 38580 x := v.Args[0] 38581 v.reset(OpAMD64CVTTSD2SQ) 38582 v.AddArg(x) 38583 return true 38584 } 38585 } 38586 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 38587 // match: (Cvt64to32F x) 38588 // cond: 38589 // result: (CVTSQ2SS x) 38590 for { 38591 x := v.Args[0] 38592 v.reset(OpAMD64CVTSQ2SS) 38593 v.AddArg(x) 38594 return true 38595 } 38596 } 38597 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 38598 // match: (Cvt64to64F x) 38599 // cond: 38600 // result: (CVTSQ2SD x) 38601 for { 38602 x := v.Args[0] 38603 v.reset(OpAMD64CVTSQ2SD) 38604 v.AddArg(x) 38605 return true 38606 } 38607 } 38608 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 38609 // match: (Div128u xhi xlo y) 38610 // cond: 38611 // result: (DIVQU2 xhi xlo y) 38612 for { 38613 _ = v.Args[2] 38614 xhi := v.Args[0] 38615 xlo := v.Args[1] 38616 y := v.Args[2] 38617 v.reset(OpAMD64DIVQU2) 38618 v.AddArg(xhi) 38619 v.AddArg(xlo) 38620 v.AddArg(y) 38621 return true 38622 } 38623 } 38624 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 38625 b := v.Block 38626 _ = b 38627 typ := &b.Func.Config.Types 38628 _ = typ 38629 // match: (Div16 x y) 38630 // cond: 38631 // result: (Select0 (DIVW x y)) 38632 for { 38633 _ = v.Args[1] 38634 x := v.Args[0] 38635 y := v.Args[1] 38636 v.reset(OpSelect0) 38637 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 38638 v0.AddArg(x) 38639 v0.AddArg(y) 38640 v.AddArg(v0) 38641 return true 38642 } 38643 } 38644 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 38645 b := v.Block 38646 _ = b 38647 typ := &b.Func.Config.Types 38648 _ = typ 38649 // match: (Div16u x y) 38650 // cond: 38651 // result: (Select0 (DIVWU x y)) 38652 for { 38653 _ = v.Args[1] 38654 x := v.Args[0] 38655 y := v.Args[1] 38656 v.reset(OpSelect0) 38657 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 38658 v0.AddArg(x) 38659 v0.AddArg(y) 38660 v.AddArg(v0) 38661 return true 38662 } 38663 } 38664 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 38665 b := v.Block 38666 _ = b 38667 typ := &b.Func.Config.Types 38668 _ = typ 38669 // match: (Div32 x y) 38670 // cond: 38671 // result: (Select0 (DIVL x y)) 38672 for { 38673 _ = v.Args[1] 38674 x := v.Args[0] 38675 y := v.Args[1] 38676 v.reset(OpSelect0) 38677 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 38678 v0.AddArg(x) 38679 v0.AddArg(y) 38680 v.AddArg(v0) 38681 return true 38682 } 38683 } 38684 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 38685 // match: (Div32F x y) 38686 // cond: 38687 // result: (DIVSS x y) 38688 for { 38689 _ = v.Args[1] 38690 x := v.Args[0] 38691 y := v.Args[1] 38692 v.reset(OpAMD64DIVSS) 38693 v.AddArg(x) 38694 v.AddArg(y) 38695 return true 38696 } 38697 } 38698 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 38699 b := v.Block 38700 _ = b 38701 typ := &b.Func.Config.Types 38702 _ = typ 38703 // match: (Div32u x y) 38704 // cond: 38705 // result: (Select0 (DIVLU x y)) 38706 for { 38707 _ = v.Args[1] 38708 x := v.Args[0] 38709 y := v.Args[1] 38710 v.reset(OpSelect0) 38711 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 38712 v0.AddArg(x) 38713 v0.AddArg(y) 38714 v.AddArg(v0) 38715 return true 38716 } 38717 } 38718 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 38719 b := v.Block 38720 _ = b 38721 typ := &b.Func.Config.Types 38722 _ = typ 38723 // match: (Div64 x y) 38724 // cond: 38725 // result: (Select0 (DIVQ x y)) 38726 for { 38727 _ = v.Args[1] 38728 x := v.Args[0] 38729 y := v.Args[1] 38730 v.reset(OpSelect0) 38731 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 38732 v0.AddArg(x) 38733 v0.AddArg(y) 38734 v.AddArg(v0) 38735 return true 38736 } 38737 } 38738 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 38739 // match: (Div64F x y) 38740 // cond: 38741 // result: (DIVSD x y) 38742 for { 38743 _ = v.Args[1] 38744 x := v.Args[0] 38745 y := v.Args[1] 38746 v.reset(OpAMD64DIVSD) 38747 v.AddArg(x) 38748 v.AddArg(y) 38749 return true 38750 } 38751 } 38752 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 38753 b := v.Block 38754 _ = b 38755 typ := &b.Func.Config.Types 38756 _ = typ 38757 // match: (Div64u x y) 38758 // cond: 38759 // result: (Select0 (DIVQU x y)) 38760 for { 38761 _ = v.Args[1] 38762 x := v.Args[0] 38763 y := v.Args[1] 38764 v.reset(OpSelect0) 38765 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 38766 v0.AddArg(x) 38767 v0.AddArg(y) 38768 v.AddArg(v0) 38769 return true 38770 } 38771 } 38772 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 38773 b := v.Block 38774 _ = b 38775 typ := &b.Func.Config.Types 38776 _ = typ 38777 // match: (Div8 x y) 38778 // cond: 38779 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 38780 for { 38781 _ = v.Args[1] 38782 x := v.Args[0] 38783 y := v.Args[1] 38784 v.reset(OpSelect0) 38785 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 38786 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 38787 v1.AddArg(x) 38788 v0.AddArg(v1) 38789 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 38790 v2.AddArg(y) 38791 v0.AddArg(v2) 38792 v.AddArg(v0) 38793 return true 38794 } 38795 } 38796 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 38797 b := v.Block 38798 _ = b 38799 typ := &b.Func.Config.Types 38800 _ = typ 38801 // match: (Div8u x y) 38802 // cond: 38803 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 38804 for { 38805 _ = v.Args[1] 38806 x := v.Args[0] 38807 y := v.Args[1] 38808 v.reset(OpSelect0) 38809 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 38810 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 38811 v1.AddArg(x) 38812 v0.AddArg(v1) 38813 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 38814 v2.AddArg(y) 38815 v0.AddArg(v2) 38816 v.AddArg(v0) 38817 return true 38818 } 38819 } 38820 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 38821 b := v.Block 38822 _ = b 38823 // match: (Eq16 x y) 38824 // cond: 38825 // result: (SETEQ (CMPW x y)) 38826 for { 38827 _ = v.Args[1] 38828 x := v.Args[0] 38829 y := v.Args[1] 38830 v.reset(OpAMD64SETEQ) 38831 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 38832 v0.AddArg(x) 38833 v0.AddArg(y) 38834 v.AddArg(v0) 38835 return true 38836 } 38837 } 38838 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 38839 b := v.Block 38840 _ = b 38841 // match: (Eq32 x y) 38842 // cond: 38843 // result: (SETEQ (CMPL x y)) 38844 for { 38845 _ = v.Args[1] 38846 x := v.Args[0] 38847 y := v.Args[1] 38848 v.reset(OpAMD64SETEQ) 38849 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 38850 v0.AddArg(x) 38851 v0.AddArg(y) 38852 v.AddArg(v0) 38853 return true 38854 } 38855 } 38856 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 38857 b := v.Block 38858 _ = b 38859 // match: (Eq32F x y) 38860 // cond: 38861 // result: (SETEQF (UCOMISS x y)) 38862 for { 38863 _ = v.Args[1] 38864 x := v.Args[0] 38865 y := v.Args[1] 38866 v.reset(OpAMD64SETEQF) 38867 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 38868 v0.AddArg(x) 38869 v0.AddArg(y) 38870 v.AddArg(v0) 38871 return true 38872 } 38873 } 38874 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 38875 b := v.Block 38876 _ = b 38877 // match: (Eq64 x y) 38878 // cond: 38879 // result: (SETEQ (CMPQ x y)) 38880 for { 38881 _ = v.Args[1] 38882 x := v.Args[0] 38883 y := v.Args[1] 38884 v.reset(OpAMD64SETEQ) 38885 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 38886 v0.AddArg(x) 38887 v0.AddArg(y) 38888 v.AddArg(v0) 38889 return true 38890 } 38891 } 38892 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 38893 b := v.Block 38894 _ = b 38895 // match: (Eq64F x y) 38896 // cond: 38897 // result: (SETEQF (UCOMISD x y)) 38898 for { 38899 _ = v.Args[1] 38900 x := v.Args[0] 38901 y := v.Args[1] 38902 v.reset(OpAMD64SETEQF) 38903 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 38904 v0.AddArg(x) 38905 v0.AddArg(y) 38906 v.AddArg(v0) 38907 return true 38908 } 38909 } 38910 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 38911 b := v.Block 38912 _ = b 38913 // match: (Eq8 x y) 38914 // cond: 38915 // result: (SETEQ (CMPB x y)) 38916 for { 38917 _ = v.Args[1] 38918 x := v.Args[0] 38919 y := v.Args[1] 38920 v.reset(OpAMD64SETEQ) 38921 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 38922 v0.AddArg(x) 38923 v0.AddArg(y) 38924 v.AddArg(v0) 38925 return true 38926 } 38927 } 38928 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 38929 b := v.Block 38930 _ = b 38931 // match: (EqB x y) 38932 // cond: 38933 // result: (SETEQ (CMPB x y)) 38934 for { 38935 _ = v.Args[1] 38936 x := v.Args[0] 38937 y := v.Args[1] 38938 v.reset(OpAMD64SETEQ) 38939 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 38940 v0.AddArg(x) 38941 v0.AddArg(y) 38942 v.AddArg(v0) 38943 return true 38944 } 38945 } 38946 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 38947 b := v.Block 38948 _ = b 38949 config := b.Func.Config 38950 _ = config 38951 // match: (EqPtr x y) 38952 // cond: config.PtrSize == 8 38953 // result: (SETEQ (CMPQ x y)) 38954 for { 38955 _ = v.Args[1] 38956 x := v.Args[0] 38957 y := v.Args[1] 38958 if !(config.PtrSize == 8) { 38959 break 38960 } 38961 v.reset(OpAMD64SETEQ) 38962 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 38963 v0.AddArg(x) 38964 v0.AddArg(y) 38965 v.AddArg(v0) 38966 return true 38967 } 38968 // match: (EqPtr x y) 38969 // cond: config.PtrSize == 4 38970 // result: (SETEQ (CMPL x y)) 38971 for { 38972 _ = v.Args[1] 38973 x := v.Args[0] 38974 y := v.Args[1] 38975 if !(config.PtrSize == 4) { 38976 break 38977 } 38978 v.reset(OpAMD64SETEQ) 38979 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 38980 v0.AddArg(x) 38981 v0.AddArg(y) 38982 v.AddArg(v0) 38983 return true 38984 } 38985 return false 38986 } 38987 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 38988 b := v.Block 38989 _ = b 38990 // match: (Geq16 x y) 38991 // cond: 38992 // result: (SETGE (CMPW x y)) 38993 for { 38994 _ = v.Args[1] 38995 x := v.Args[0] 38996 y := v.Args[1] 38997 v.reset(OpAMD64SETGE) 38998 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 38999 v0.AddArg(x) 39000 v0.AddArg(y) 39001 v.AddArg(v0) 39002 return true 39003 } 39004 } 39005 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 39006 b := v.Block 39007 _ = b 39008 // match: (Geq16U x y) 39009 // cond: 39010 // result: (SETAE (CMPW x y)) 39011 for { 39012 _ = v.Args[1] 39013 x := v.Args[0] 39014 y := v.Args[1] 39015 v.reset(OpAMD64SETAE) 39016 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39017 v0.AddArg(x) 39018 v0.AddArg(y) 39019 v.AddArg(v0) 39020 return true 39021 } 39022 } 39023 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 39024 b := v.Block 39025 _ = b 39026 // match: (Geq32 x y) 39027 // cond: 39028 // result: (SETGE (CMPL x y)) 39029 for { 39030 _ = v.Args[1] 39031 x := v.Args[0] 39032 y := v.Args[1] 39033 v.reset(OpAMD64SETGE) 39034 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39035 v0.AddArg(x) 39036 v0.AddArg(y) 39037 v.AddArg(v0) 39038 return true 39039 } 39040 } 39041 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 39042 b := v.Block 39043 _ = b 39044 // match: (Geq32F x y) 39045 // cond: 39046 // result: (SETGEF (UCOMISS x y)) 39047 for { 39048 _ = v.Args[1] 39049 x := v.Args[0] 39050 y := v.Args[1] 39051 v.reset(OpAMD64SETGEF) 39052 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 39053 v0.AddArg(x) 39054 v0.AddArg(y) 39055 v.AddArg(v0) 39056 return true 39057 } 39058 } 39059 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 39060 b := v.Block 39061 _ = b 39062 // match: (Geq32U x y) 39063 // cond: 39064 // result: (SETAE (CMPL x y)) 39065 for { 39066 _ = v.Args[1] 39067 x := v.Args[0] 39068 y := v.Args[1] 39069 v.reset(OpAMD64SETAE) 39070 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39071 v0.AddArg(x) 39072 v0.AddArg(y) 39073 v.AddArg(v0) 39074 return true 39075 } 39076 } 39077 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 39078 b := v.Block 39079 _ = b 39080 // match: (Geq64 x y) 39081 // cond: 39082 // result: (SETGE (CMPQ x y)) 39083 for { 39084 _ = v.Args[1] 39085 x := v.Args[0] 39086 y := v.Args[1] 39087 v.reset(OpAMD64SETGE) 39088 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39089 v0.AddArg(x) 39090 v0.AddArg(y) 39091 v.AddArg(v0) 39092 return true 39093 } 39094 } 39095 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 39096 b := v.Block 39097 _ = b 39098 // match: (Geq64F x y) 39099 // cond: 39100 // result: (SETGEF (UCOMISD x y)) 39101 for { 39102 _ = v.Args[1] 39103 x := v.Args[0] 39104 y := v.Args[1] 39105 v.reset(OpAMD64SETGEF) 39106 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 39107 v0.AddArg(x) 39108 v0.AddArg(y) 39109 v.AddArg(v0) 39110 return true 39111 } 39112 } 39113 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 39114 b := v.Block 39115 _ = b 39116 // match: (Geq64U x y) 39117 // cond: 39118 // result: (SETAE (CMPQ x y)) 39119 for { 39120 _ = v.Args[1] 39121 x := v.Args[0] 39122 y := v.Args[1] 39123 v.reset(OpAMD64SETAE) 39124 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39125 v0.AddArg(x) 39126 v0.AddArg(y) 39127 v.AddArg(v0) 39128 return true 39129 } 39130 } 39131 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 39132 b := v.Block 39133 _ = b 39134 // match: (Geq8 x y) 39135 // cond: 39136 // result: (SETGE (CMPB x y)) 39137 for { 39138 _ = v.Args[1] 39139 x := v.Args[0] 39140 y := v.Args[1] 39141 v.reset(OpAMD64SETGE) 39142 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39143 v0.AddArg(x) 39144 v0.AddArg(y) 39145 v.AddArg(v0) 39146 return true 39147 } 39148 } 39149 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 39150 b := v.Block 39151 _ = b 39152 // match: (Geq8U x y) 39153 // cond: 39154 // result: (SETAE (CMPB x y)) 39155 for { 39156 _ = v.Args[1] 39157 x := v.Args[0] 39158 y := v.Args[1] 39159 v.reset(OpAMD64SETAE) 39160 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39161 v0.AddArg(x) 39162 v0.AddArg(y) 39163 v.AddArg(v0) 39164 return true 39165 } 39166 } 39167 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 39168 // match: (GetClosurePtr) 39169 // cond: 39170 // result: (LoweredGetClosurePtr) 39171 for { 39172 v.reset(OpAMD64LoweredGetClosurePtr) 39173 return true 39174 } 39175 } 39176 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 39177 // match: (GetG mem) 39178 // cond: 39179 // result: (LoweredGetG mem) 39180 for { 39181 mem := v.Args[0] 39182 v.reset(OpAMD64LoweredGetG) 39183 v.AddArg(mem) 39184 return true 39185 } 39186 } 39187 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 39188 b := v.Block 39189 _ = b 39190 // match: (Greater16 x y) 39191 // cond: 39192 // result: (SETG (CMPW x y)) 39193 for { 39194 _ = v.Args[1] 39195 x := v.Args[0] 39196 y := v.Args[1] 39197 v.reset(OpAMD64SETG) 39198 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39199 v0.AddArg(x) 39200 v0.AddArg(y) 39201 v.AddArg(v0) 39202 return true 39203 } 39204 } 39205 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 39206 b := v.Block 39207 _ = b 39208 // match: (Greater16U x y) 39209 // cond: 39210 // result: (SETA (CMPW x y)) 39211 for { 39212 _ = v.Args[1] 39213 x := v.Args[0] 39214 y := v.Args[1] 39215 v.reset(OpAMD64SETA) 39216 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39217 v0.AddArg(x) 39218 v0.AddArg(y) 39219 v.AddArg(v0) 39220 return true 39221 } 39222 } 39223 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 39224 b := v.Block 39225 _ = b 39226 // match: (Greater32 x y) 39227 // cond: 39228 // result: (SETG (CMPL x y)) 39229 for { 39230 _ = v.Args[1] 39231 x := v.Args[0] 39232 y := v.Args[1] 39233 v.reset(OpAMD64SETG) 39234 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39235 v0.AddArg(x) 39236 v0.AddArg(y) 39237 v.AddArg(v0) 39238 return true 39239 } 39240 } 39241 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 39242 b := v.Block 39243 _ = b 39244 // match: (Greater32F x y) 39245 // cond: 39246 // result: (SETGF (UCOMISS x y)) 39247 for { 39248 _ = v.Args[1] 39249 x := v.Args[0] 39250 y := v.Args[1] 39251 v.reset(OpAMD64SETGF) 39252 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 39253 v0.AddArg(x) 39254 v0.AddArg(y) 39255 v.AddArg(v0) 39256 return true 39257 } 39258 } 39259 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 39260 b := v.Block 39261 _ = b 39262 // match: (Greater32U x y) 39263 // cond: 39264 // result: (SETA (CMPL x y)) 39265 for { 39266 _ = v.Args[1] 39267 x := v.Args[0] 39268 y := v.Args[1] 39269 v.reset(OpAMD64SETA) 39270 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39271 v0.AddArg(x) 39272 v0.AddArg(y) 39273 v.AddArg(v0) 39274 return true 39275 } 39276 } 39277 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 39278 b := v.Block 39279 _ = b 39280 // match: (Greater64 x y) 39281 // cond: 39282 // result: (SETG (CMPQ x y)) 39283 for { 39284 _ = v.Args[1] 39285 x := v.Args[0] 39286 y := v.Args[1] 39287 v.reset(OpAMD64SETG) 39288 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39289 v0.AddArg(x) 39290 v0.AddArg(y) 39291 v.AddArg(v0) 39292 return true 39293 } 39294 } 39295 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 39296 b := v.Block 39297 _ = b 39298 // match: (Greater64F x y) 39299 // cond: 39300 // result: (SETGF (UCOMISD x y)) 39301 for { 39302 _ = v.Args[1] 39303 x := v.Args[0] 39304 y := v.Args[1] 39305 v.reset(OpAMD64SETGF) 39306 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 39307 v0.AddArg(x) 39308 v0.AddArg(y) 39309 v.AddArg(v0) 39310 return true 39311 } 39312 } 39313 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 39314 b := v.Block 39315 _ = b 39316 // match: (Greater64U x y) 39317 // cond: 39318 // result: (SETA (CMPQ x y)) 39319 for { 39320 _ = v.Args[1] 39321 x := v.Args[0] 39322 y := v.Args[1] 39323 v.reset(OpAMD64SETA) 39324 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39325 v0.AddArg(x) 39326 v0.AddArg(y) 39327 v.AddArg(v0) 39328 return true 39329 } 39330 } 39331 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 39332 b := v.Block 39333 _ = b 39334 // match: (Greater8 x y) 39335 // cond: 39336 // result: (SETG (CMPB x y)) 39337 for { 39338 _ = v.Args[1] 39339 x := v.Args[0] 39340 y := v.Args[1] 39341 v.reset(OpAMD64SETG) 39342 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39343 v0.AddArg(x) 39344 v0.AddArg(y) 39345 v.AddArg(v0) 39346 return true 39347 } 39348 } 39349 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 39350 b := v.Block 39351 _ = b 39352 // match: (Greater8U x y) 39353 // cond: 39354 // result: (SETA (CMPB x y)) 39355 for { 39356 _ = v.Args[1] 39357 x := v.Args[0] 39358 y := v.Args[1] 39359 v.reset(OpAMD64SETA) 39360 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39361 v0.AddArg(x) 39362 v0.AddArg(y) 39363 v.AddArg(v0) 39364 return true 39365 } 39366 } 39367 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 39368 // match: (Hmul32 x y) 39369 // cond: 39370 // result: (HMULL x y) 39371 for { 39372 _ = v.Args[1] 39373 x := v.Args[0] 39374 y := v.Args[1] 39375 v.reset(OpAMD64HMULL) 39376 v.AddArg(x) 39377 v.AddArg(y) 39378 return true 39379 } 39380 } 39381 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 39382 // match: (Hmul32u x y) 39383 // cond: 39384 // result: (HMULLU x y) 39385 for { 39386 _ = v.Args[1] 39387 x := v.Args[0] 39388 y := v.Args[1] 39389 v.reset(OpAMD64HMULLU) 39390 v.AddArg(x) 39391 v.AddArg(y) 39392 return true 39393 } 39394 } 39395 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 39396 // match: (Hmul64 x y) 39397 // cond: 39398 // result: (HMULQ x y) 39399 for { 39400 _ = v.Args[1] 39401 x := v.Args[0] 39402 y := v.Args[1] 39403 v.reset(OpAMD64HMULQ) 39404 v.AddArg(x) 39405 v.AddArg(y) 39406 return true 39407 } 39408 } 39409 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 39410 // match: (Hmul64u x y) 39411 // cond: 39412 // result: (HMULQU x y) 39413 for { 39414 _ = v.Args[1] 39415 x := v.Args[0] 39416 y := v.Args[1] 39417 v.reset(OpAMD64HMULQU) 39418 v.AddArg(x) 39419 v.AddArg(y) 39420 return true 39421 } 39422 } 39423 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool { 39424 // match: (Int64Hi x) 39425 // cond: 39426 // result: (SHRQconst [32] x) 39427 for { 39428 x := v.Args[0] 39429 v.reset(OpAMD64SHRQconst) 39430 v.AuxInt = 32 39431 v.AddArg(x) 39432 return true 39433 } 39434 } 39435 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 39436 // match: (InterCall [argwid] entry mem) 39437 // cond: 39438 // result: (CALLinter [argwid] entry mem) 39439 for { 39440 argwid := v.AuxInt 39441 _ = v.Args[1] 39442 entry := v.Args[0] 39443 mem := v.Args[1] 39444 v.reset(OpAMD64CALLinter) 39445 v.AuxInt = argwid 39446 v.AddArg(entry) 39447 v.AddArg(mem) 39448 return true 39449 } 39450 } 39451 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 39452 b := v.Block 39453 _ = b 39454 config := b.Func.Config 39455 _ = config 39456 // match: (IsInBounds idx len) 39457 // cond: config.PtrSize == 8 39458 // result: (SETB (CMPQ idx len)) 39459 for { 39460 _ = v.Args[1] 39461 idx := v.Args[0] 39462 len := v.Args[1] 39463 if !(config.PtrSize == 8) { 39464 break 39465 } 39466 v.reset(OpAMD64SETB) 39467 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39468 v0.AddArg(idx) 39469 v0.AddArg(len) 39470 v.AddArg(v0) 39471 return true 39472 } 39473 // match: (IsInBounds idx len) 39474 // cond: config.PtrSize == 4 39475 // result: (SETB (CMPL idx len)) 39476 for { 39477 _ = v.Args[1] 39478 idx := v.Args[0] 39479 len := v.Args[1] 39480 if !(config.PtrSize == 4) { 39481 break 39482 } 39483 v.reset(OpAMD64SETB) 39484 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39485 v0.AddArg(idx) 39486 v0.AddArg(len) 39487 v.AddArg(v0) 39488 return true 39489 } 39490 return false 39491 } 39492 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 39493 b := v.Block 39494 _ = b 39495 config := b.Func.Config 39496 _ = config 39497 // match: (IsNonNil p) 39498 // cond: config.PtrSize == 8 39499 // result: (SETNE (TESTQ p p)) 39500 for { 39501 p := v.Args[0] 39502 if !(config.PtrSize == 8) { 39503 break 39504 } 39505 v.reset(OpAMD64SETNE) 39506 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 39507 v0.AddArg(p) 39508 v0.AddArg(p) 39509 v.AddArg(v0) 39510 return true 39511 } 39512 // match: (IsNonNil p) 39513 // cond: config.PtrSize == 4 39514 // result: (SETNE (TESTL p p)) 39515 for { 39516 p := v.Args[0] 39517 if !(config.PtrSize == 4) { 39518 break 39519 } 39520 v.reset(OpAMD64SETNE) 39521 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags) 39522 v0.AddArg(p) 39523 v0.AddArg(p) 39524 v.AddArg(v0) 39525 return true 39526 } 39527 return false 39528 } 39529 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 39530 b := v.Block 39531 _ = b 39532 config := b.Func.Config 39533 _ = config 39534 // match: (IsSliceInBounds idx len) 39535 // cond: config.PtrSize == 8 39536 // result: (SETBE (CMPQ idx len)) 39537 for { 39538 _ = v.Args[1] 39539 idx := v.Args[0] 39540 len := v.Args[1] 39541 if !(config.PtrSize == 8) { 39542 break 39543 } 39544 v.reset(OpAMD64SETBE) 39545 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39546 v0.AddArg(idx) 39547 v0.AddArg(len) 39548 v.AddArg(v0) 39549 return true 39550 } 39551 // match: (IsSliceInBounds idx len) 39552 // cond: config.PtrSize == 4 39553 // result: (SETBE (CMPL idx len)) 39554 for { 39555 _ = v.Args[1] 39556 idx := v.Args[0] 39557 len := v.Args[1] 39558 if !(config.PtrSize == 4) { 39559 break 39560 } 39561 v.reset(OpAMD64SETBE) 39562 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39563 v0.AddArg(idx) 39564 v0.AddArg(len) 39565 v.AddArg(v0) 39566 return true 39567 } 39568 return false 39569 } 39570 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 39571 b := v.Block 39572 _ = b 39573 // match: (Leq16 x y) 39574 // cond: 39575 // result: (SETLE (CMPW x y)) 39576 for { 39577 _ = v.Args[1] 39578 x := v.Args[0] 39579 y := v.Args[1] 39580 v.reset(OpAMD64SETLE) 39581 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39582 v0.AddArg(x) 39583 v0.AddArg(y) 39584 v.AddArg(v0) 39585 return true 39586 } 39587 } 39588 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 39589 b := v.Block 39590 _ = b 39591 // match: (Leq16U x y) 39592 // cond: 39593 // result: (SETBE (CMPW x y)) 39594 for { 39595 _ = v.Args[1] 39596 x := v.Args[0] 39597 y := v.Args[1] 39598 v.reset(OpAMD64SETBE) 39599 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39600 v0.AddArg(x) 39601 v0.AddArg(y) 39602 v.AddArg(v0) 39603 return true 39604 } 39605 } 39606 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 39607 b := v.Block 39608 _ = b 39609 // match: (Leq32 x y) 39610 // cond: 39611 // result: (SETLE (CMPL x y)) 39612 for { 39613 _ = v.Args[1] 39614 x := v.Args[0] 39615 y := v.Args[1] 39616 v.reset(OpAMD64SETLE) 39617 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39618 v0.AddArg(x) 39619 v0.AddArg(y) 39620 v.AddArg(v0) 39621 return true 39622 } 39623 } 39624 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 39625 b := v.Block 39626 _ = b 39627 // match: (Leq32F x y) 39628 // cond: 39629 // result: (SETGEF (UCOMISS y x)) 39630 for { 39631 _ = v.Args[1] 39632 x := v.Args[0] 39633 y := v.Args[1] 39634 v.reset(OpAMD64SETGEF) 39635 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 39636 v0.AddArg(y) 39637 v0.AddArg(x) 39638 v.AddArg(v0) 39639 return true 39640 } 39641 } 39642 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 39643 b := v.Block 39644 _ = b 39645 // match: (Leq32U x y) 39646 // cond: 39647 // result: (SETBE (CMPL x y)) 39648 for { 39649 _ = v.Args[1] 39650 x := v.Args[0] 39651 y := v.Args[1] 39652 v.reset(OpAMD64SETBE) 39653 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39654 v0.AddArg(x) 39655 v0.AddArg(y) 39656 v.AddArg(v0) 39657 return true 39658 } 39659 } 39660 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 39661 b := v.Block 39662 _ = b 39663 // match: (Leq64 x y) 39664 // cond: 39665 // result: (SETLE (CMPQ x y)) 39666 for { 39667 _ = v.Args[1] 39668 x := v.Args[0] 39669 y := v.Args[1] 39670 v.reset(OpAMD64SETLE) 39671 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39672 v0.AddArg(x) 39673 v0.AddArg(y) 39674 v.AddArg(v0) 39675 return true 39676 } 39677 } 39678 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 39679 b := v.Block 39680 _ = b 39681 // match: (Leq64F x y) 39682 // cond: 39683 // result: (SETGEF (UCOMISD y x)) 39684 for { 39685 _ = v.Args[1] 39686 x := v.Args[0] 39687 y := v.Args[1] 39688 v.reset(OpAMD64SETGEF) 39689 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 39690 v0.AddArg(y) 39691 v0.AddArg(x) 39692 v.AddArg(v0) 39693 return true 39694 } 39695 } 39696 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 39697 b := v.Block 39698 _ = b 39699 // match: (Leq64U x y) 39700 // cond: 39701 // result: (SETBE (CMPQ x y)) 39702 for { 39703 _ = v.Args[1] 39704 x := v.Args[0] 39705 y := v.Args[1] 39706 v.reset(OpAMD64SETBE) 39707 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39708 v0.AddArg(x) 39709 v0.AddArg(y) 39710 v.AddArg(v0) 39711 return true 39712 } 39713 } 39714 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 39715 b := v.Block 39716 _ = b 39717 // match: (Leq8 x y) 39718 // cond: 39719 // result: (SETLE (CMPB x y)) 39720 for { 39721 _ = v.Args[1] 39722 x := v.Args[0] 39723 y := v.Args[1] 39724 v.reset(OpAMD64SETLE) 39725 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39726 v0.AddArg(x) 39727 v0.AddArg(y) 39728 v.AddArg(v0) 39729 return true 39730 } 39731 } 39732 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 39733 b := v.Block 39734 _ = b 39735 // match: (Leq8U x y) 39736 // cond: 39737 // result: (SETBE (CMPB x y)) 39738 for { 39739 _ = v.Args[1] 39740 x := v.Args[0] 39741 y := v.Args[1] 39742 v.reset(OpAMD64SETBE) 39743 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39744 v0.AddArg(x) 39745 v0.AddArg(y) 39746 v.AddArg(v0) 39747 return true 39748 } 39749 } 39750 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 39751 b := v.Block 39752 _ = b 39753 // match: (Less16 x y) 39754 // cond: 39755 // result: (SETL (CMPW x y)) 39756 for { 39757 _ = v.Args[1] 39758 x := v.Args[0] 39759 y := v.Args[1] 39760 v.reset(OpAMD64SETL) 39761 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39762 v0.AddArg(x) 39763 v0.AddArg(y) 39764 v.AddArg(v0) 39765 return true 39766 } 39767 } 39768 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 39769 b := v.Block 39770 _ = b 39771 // match: (Less16U x y) 39772 // cond: 39773 // result: (SETB (CMPW x y)) 39774 for { 39775 _ = v.Args[1] 39776 x := v.Args[0] 39777 y := v.Args[1] 39778 v.reset(OpAMD64SETB) 39779 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39780 v0.AddArg(x) 39781 v0.AddArg(y) 39782 v.AddArg(v0) 39783 return true 39784 } 39785 } 39786 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 39787 b := v.Block 39788 _ = b 39789 // match: (Less32 x y) 39790 // cond: 39791 // result: (SETL (CMPL x y)) 39792 for { 39793 _ = v.Args[1] 39794 x := v.Args[0] 39795 y := v.Args[1] 39796 v.reset(OpAMD64SETL) 39797 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39798 v0.AddArg(x) 39799 v0.AddArg(y) 39800 v.AddArg(v0) 39801 return true 39802 } 39803 } 39804 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 39805 b := v.Block 39806 _ = b 39807 // match: (Less32F x y) 39808 // cond: 39809 // result: (SETGF (UCOMISS y x)) 39810 for { 39811 _ = v.Args[1] 39812 x := v.Args[0] 39813 y := v.Args[1] 39814 v.reset(OpAMD64SETGF) 39815 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 39816 v0.AddArg(y) 39817 v0.AddArg(x) 39818 v.AddArg(v0) 39819 return true 39820 } 39821 } 39822 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 39823 b := v.Block 39824 _ = b 39825 // match: (Less32U x y) 39826 // cond: 39827 // result: (SETB (CMPL x y)) 39828 for { 39829 _ = v.Args[1] 39830 x := v.Args[0] 39831 y := v.Args[1] 39832 v.reset(OpAMD64SETB) 39833 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39834 v0.AddArg(x) 39835 v0.AddArg(y) 39836 v.AddArg(v0) 39837 return true 39838 } 39839 } 39840 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 39841 b := v.Block 39842 _ = b 39843 // match: (Less64 x y) 39844 // cond: 39845 // result: (SETL (CMPQ x y)) 39846 for { 39847 _ = v.Args[1] 39848 x := v.Args[0] 39849 y := v.Args[1] 39850 v.reset(OpAMD64SETL) 39851 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39852 v0.AddArg(x) 39853 v0.AddArg(y) 39854 v.AddArg(v0) 39855 return true 39856 } 39857 } 39858 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 39859 b := v.Block 39860 _ = b 39861 // match: (Less64F x y) 39862 // cond: 39863 // result: (SETGF (UCOMISD y x)) 39864 for { 39865 _ = v.Args[1] 39866 x := v.Args[0] 39867 y := v.Args[1] 39868 v.reset(OpAMD64SETGF) 39869 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 39870 v0.AddArg(y) 39871 v0.AddArg(x) 39872 v.AddArg(v0) 39873 return true 39874 } 39875 } 39876 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 39877 b := v.Block 39878 _ = b 39879 // match: (Less64U x y) 39880 // cond: 39881 // result: (SETB (CMPQ x y)) 39882 for { 39883 _ = v.Args[1] 39884 x := v.Args[0] 39885 y := v.Args[1] 39886 v.reset(OpAMD64SETB) 39887 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39888 v0.AddArg(x) 39889 v0.AddArg(y) 39890 v.AddArg(v0) 39891 return true 39892 } 39893 } 39894 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 39895 b := v.Block 39896 _ = b 39897 // match: (Less8 x y) 39898 // cond: 39899 // result: (SETL (CMPB x y)) 39900 for { 39901 _ = v.Args[1] 39902 x := v.Args[0] 39903 y := v.Args[1] 39904 v.reset(OpAMD64SETL) 39905 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39906 v0.AddArg(x) 39907 v0.AddArg(y) 39908 v.AddArg(v0) 39909 return true 39910 } 39911 } 39912 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 39913 b := v.Block 39914 _ = b 39915 // match: (Less8U x y) 39916 // cond: 39917 // result: (SETB (CMPB x y)) 39918 for { 39919 _ = v.Args[1] 39920 x := v.Args[0] 39921 y := v.Args[1] 39922 v.reset(OpAMD64SETB) 39923 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39924 v0.AddArg(x) 39925 v0.AddArg(y) 39926 v.AddArg(v0) 39927 return true 39928 } 39929 } 39930 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 39931 b := v.Block 39932 _ = b 39933 config := b.Func.Config 39934 _ = config 39935 // match: (Load <t> ptr mem) 39936 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 39937 // result: (MOVQload ptr mem) 39938 for { 39939 t := v.Type 39940 _ = v.Args[1] 39941 ptr := v.Args[0] 39942 mem := v.Args[1] 39943 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 39944 break 39945 } 39946 v.reset(OpAMD64MOVQload) 39947 v.AddArg(ptr) 39948 v.AddArg(mem) 39949 return true 39950 } 39951 // match: (Load <t> ptr mem) 39952 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 39953 // result: (MOVLload ptr mem) 39954 for { 39955 t := v.Type 39956 _ = v.Args[1] 39957 ptr := v.Args[0] 39958 mem := v.Args[1] 39959 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 39960 break 39961 } 39962 v.reset(OpAMD64MOVLload) 39963 v.AddArg(ptr) 39964 v.AddArg(mem) 39965 return true 39966 } 39967 // match: (Load <t> ptr mem) 39968 // cond: is16BitInt(t) 39969 // result: (MOVWload ptr mem) 39970 for { 39971 t := v.Type 39972 _ = v.Args[1] 39973 ptr := v.Args[0] 39974 mem := v.Args[1] 39975 if !(is16BitInt(t)) { 39976 break 39977 } 39978 v.reset(OpAMD64MOVWload) 39979 v.AddArg(ptr) 39980 v.AddArg(mem) 39981 return true 39982 } 39983 // match: (Load <t> ptr mem) 39984 // cond: (t.IsBoolean() || is8BitInt(t)) 39985 // result: (MOVBload ptr mem) 39986 for { 39987 t := v.Type 39988 _ = v.Args[1] 39989 ptr := v.Args[0] 39990 mem := v.Args[1] 39991 if !(t.IsBoolean() || is8BitInt(t)) { 39992 break 39993 } 39994 v.reset(OpAMD64MOVBload) 39995 v.AddArg(ptr) 39996 v.AddArg(mem) 39997 return true 39998 } 39999 // match: (Load <t> ptr mem) 40000 // cond: is32BitFloat(t) 40001 // result: (MOVSSload ptr mem) 40002 for { 40003 t := v.Type 40004 _ = v.Args[1] 40005 ptr := v.Args[0] 40006 mem := v.Args[1] 40007 if !(is32BitFloat(t)) { 40008 break 40009 } 40010 v.reset(OpAMD64MOVSSload) 40011 v.AddArg(ptr) 40012 v.AddArg(mem) 40013 return true 40014 } 40015 // match: (Load <t> ptr mem) 40016 // cond: is64BitFloat(t) 40017 // result: (MOVSDload ptr mem) 40018 for { 40019 t := v.Type 40020 _ = v.Args[1] 40021 ptr := v.Args[0] 40022 mem := v.Args[1] 40023 if !(is64BitFloat(t)) { 40024 break 40025 } 40026 v.reset(OpAMD64MOVSDload) 40027 v.AddArg(ptr) 40028 v.AddArg(mem) 40029 return true 40030 } 40031 return false 40032 } 40033 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 40034 b := v.Block 40035 _ = b 40036 // match: (Lsh16x16 <t> x y) 40037 // cond: 40038 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 40039 for { 40040 t := v.Type 40041 _ = v.Args[1] 40042 x := v.Args[0] 40043 y := v.Args[1] 40044 v.reset(OpAMD64ANDL) 40045 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40046 v0.AddArg(x) 40047 v0.AddArg(y) 40048 v.AddArg(v0) 40049 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40050 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 40051 v2.AuxInt = 32 40052 v2.AddArg(y) 40053 v1.AddArg(v2) 40054 v.AddArg(v1) 40055 return true 40056 } 40057 } 40058 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 40059 b := v.Block 40060 _ = b 40061 // match: (Lsh16x32 <t> x y) 40062 // cond: 40063 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 40064 for { 40065 t := v.Type 40066 _ = v.Args[1] 40067 x := v.Args[0] 40068 y := v.Args[1] 40069 v.reset(OpAMD64ANDL) 40070 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40071 v0.AddArg(x) 40072 v0.AddArg(y) 40073 v.AddArg(v0) 40074 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40075 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 40076 v2.AuxInt = 32 40077 v2.AddArg(y) 40078 v1.AddArg(v2) 40079 v.AddArg(v1) 40080 return true 40081 } 40082 } 40083 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 40084 b := v.Block 40085 _ = b 40086 // match: (Lsh16x64 <t> x y) 40087 // cond: 40088 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 40089 for { 40090 t := v.Type 40091 _ = v.Args[1] 40092 x := v.Args[0] 40093 y := v.Args[1] 40094 v.reset(OpAMD64ANDL) 40095 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40096 v0.AddArg(x) 40097 v0.AddArg(y) 40098 v.AddArg(v0) 40099 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40100 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 40101 v2.AuxInt = 32 40102 v2.AddArg(y) 40103 v1.AddArg(v2) 40104 v.AddArg(v1) 40105 return true 40106 } 40107 } 40108 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 40109 b := v.Block 40110 _ = b 40111 // match: (Lsh16x8 <t> x y) 40112 // cond: 40113 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 40114 for { 40115 t := v.Type 40116 _ = v.Args[1] 40117 x := v.Args[0] 40118 y := v.Args[1] 40119 v.reset(OpAMD64ANDL) 40120 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40121 v0.AddArg(x) 40122 v0.AddArg(y) 40123 v.AddArg(v0) 40124 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40125 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 40126 v2.AuxInt = 32 40127 v2.AddArg(y) 40128 v1.AddArg(v2) 40129 v.AddArg(v1) 40130 return true 40131 } 40132 } 40133 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 40134 b := v.Block 40135 _ = b 40136 // match: (Lsh32x16 <t> x y) 40137 // cond: 40138 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 40139 for { 40140 t := v.Type 40141 _ = v.Args[1] 40142 x := v.Args[0] 40143 y := v.Args[1] 40144 v.reset(OpAMD64ANDL) 40145 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40146 v0.AddArg(x) 40147 v0.AddArg(y) 40148 v.AddArg(v0) 40149 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40150 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 40151 v2.AuxInt = 32 40152 v2.AddArg(y) 40153 v1.AddArg(v2) 40154 v.AddArg(v1) 40155 return true 40156 } 40157 } 40158 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 40159 b := v.Block 40160 _ = b 40161 // match: (Lsh32x32 <t> x y) 40162 // cond: 40163 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 40164 for { 40165 t := v.Type 40166 _ = v.Args[1] 40167 x := v.Args[0] 40168 y := v.Args[1] 40169 v.reset(OpAMD64ANDL) 40170 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40171 v0.AddArg(x) 40172 v0.AddArg(y) 40173 v.AddArg(v0) 40174 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40175 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 40176 v2.AuxInt = 32 40177 v2.AddArg(y) 40178 v1.AddArg(v2) 40179 v.AddArg(v1) 40180 return true 40181 } 40182 } 40183 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 40184 b := v.Block 40185 _ = b 40186 // match: (Lsh32x64 <t> x y) 40187 // cond: 40188 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 40189 for { 40190 t := v.Type 40191 _ = v.Args[1] 40192 x := v.Args[0] 40193 y := v.Args[1] 40194 v.reset(OpAMD64ANDL) 40195 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40196 v0.AddArg(x) 40197 v0.AddArg(y) 40198 v.AddArg(v0) 40199 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40200 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 40201 v2.AuxInt = 32 40202 v2.AddArg(y) 40203 v1.AddArg(v2) 40204 v.AddArg(v1) 40205 return true 40206 } 40207 } 40208 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 40209 b := v.Block 40210 _ = b 40211 // match: (Lsh32x8 <t> x y) 40212 // cond: 40213 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 40214 for { 40215 t := v.Type 40216 _ = v.Args[1] 40217 x := v.Args[0] 40218 y := v.Args[1] 40219 v.reset(OpAMD64ANDL) 40220 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40221 v0.AddArg(x) 40222 v0.AddArg(y) 40223 v.AddArg(v0) 40224 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40225 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 40226 v2.AuxInt = 32 40227 v2.AddArg(y) 40228 v1.AddArg(v2) 40229 v.AddArg(v1) 40230 return true 40231 } 40232 } 40233 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 40234 b := v.Block 40235 _ = b 40236 // match: (Lsh64x16 <t> x y) 40237 // cond: 40238 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 40239 for { 40240 t := v.Type 40241 _ = v.Args[1] 40242 x := v.Args[0] 40243 y := v.Args[1] 40244 v.reset(OpAMD64ANDQ) 40245 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40246 v0.AddArg(x) 40247 v0.AddArg(y) 40248 v.AddArg(v0) 40249 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40250 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 40251 v2.AuxInt = 64 40252 v2.AddArg(y) 40253 v1.AddArg(v2) 40254 v.AddArg(v1) 40255 return true 40256 } 40257 } 40258 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 40259 b := v.Block 40260 _ = b 40261 // match: (Lsh64x32 <t> x y) 40262 // cond: 40263 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 40264 for { 40265 t := v.Type 40266 _ = v.Args[1] 40267 x := v.Args[0] 40268 y := v.Args[1] 40269 v.reset(OpAMD64ANDQ) 40270 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40271 v0.AddArg(x) 40272 v0.AddArg(y) 40273 v.AddArg(v0) 40274 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40275 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 40276 v2.AuxInt = 64 40277 v2.AddArg(y) 40278 v1.AddArg(v2) 40279 v.AddArg(v1) 40280 return true 40281 } 40282 } 40283 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 40284 b := v.Block 40285 _ = b 40286 // match: (Lsh64x64 <t> x y) 40287 // cond: 40288 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 40289 for { 40290 t := v.Type 40291 _ = v.Args[1] 40292 x := v.Args[0] 40293 y := v.Args[1] 40294 v.reset(OpAMD64ANDQ) 40295 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40296 v0.AddArg(x) 40297 v0.AddArg(y) 40298 v.AddArg(v0) 40299 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40300 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 40301 v2.AuxInt = 64 40302 v2.AddArg(y) 40303 v1.AddArg(v2) 40304 v.AddArg(v1) 40305 return true 40306 } 40307 } 40308 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 40309 b := v.Block 40310 _ = b 40311 // match: (Lsh64x8 <t> x y) 40312 // cond: 40313 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 40314 for { 40315 t := v.Type 40316 _ = v.Args[1] 40317 x := v.Args[0] 40318 y := v.Args[1] 40319 v.reset(OpAMD64ANDQ) 40320 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40321 v0.AddArg(x) 40322 v0.AddArg(y) 40323 v.AddArg(v0) 40324 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40325 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 40326 v2.AuxInt = 64 40327 v2.AddArg(y) 40328 v1.AddArg(v2) 40329 v.AddArg(v1) 40330 return true 40331 } 40332 } 40333 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 40334 b := v.Block 40335 _ = b 40336 // match: (Lsh8x16 <t> x y) 40337 // cond: 40338 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 40339 for { 40340 t := v.Type 40341 _ = v.Args[1] 40342 x := v.Args[0] 40343 y := v.Args[1] 40344 v.reset(OpAMD64ANDL) 40345 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40346 v0.AddArg(x) 40347 v0.AddArg(y) 40348 v.AddArg(v0) 40349 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40350 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 40351 v2.AuxInt = 32 40352 v2.AddArg(y) 40353 v1.AddArg(v2) 40354 v.AddArg(v1) 40355 return true 40356 } 40357 } 40358 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 40359 b := v.Block 40360 _ = b 40361 // match: (Lsh8x32 <t> x y) 40362 // cond: 40363 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 40364 for { 40365 t := v.Type 40366 _ = v.Args[1] 40367 x := v.Args[0] 40368 y := v.Args[1] 40369 v.reset(OpAMD64ANDL) 40370 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40371 v0.AddArg(x) 40372 v0.AddArg(y) 40373 v.AddArg(v0) 40374 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40375 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 40376 v2.AuxInt = 32 40377 v2.AddArg(y) 40378 v1.AddArg(v2) 40379 v.AddArg(v1) 40380 return true 40381 } 40382 } 40383 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 40384 b := v.Block 40385 _ = b 40386 // match: (Lsh8x64 <t> x y) 40387 // cond: 40388 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 40389 for { 40390 t := v.Type 40391 _ = v.Args[1] 40392 x := v.Args[0] 40393 y := v.Args[1] 40394 v.reset(OpAMD64ANDL) 40395 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40396 v0.AddArg(x) 40397 v0.AddArg(y) 40398 v.AddArg(v0) 40399 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40400 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 40401 v2.AuxInt = 32 40402 v2.AddArg(y) 40403 v1.AddArg(v2) 40404 v.AddArg(v1) 40405 return true 40406 } 40407 } 40408 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 40409 b := v.Block 40410 _ = b 40411 // match: (Lsh8x8 <t> x y) 40412 // cond: 40413 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 40414 for { 40415 t := v.Type 40416 _ = v.Args[1] 40417 x := v.Args[0] 40418 y := v.Args[1] 40419 v.reset(OpAMD64ANDL) 40420 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40421 v0.AddArg(x) 40422 v0.AddArg(y) 40423 v.AddArg(v0) 40424 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40425 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 40426 v2.AuxInt = 32 40427 v2.AddArg(y) 40428 v1.AddArg(v2) 40429 v.AddArg(v1) 40430 return true 40431 } 40432 } 40433 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 40434 b := v.Block 40435 _ = b 40436 typ := &b.Func.Config.Types 40437 _ = typ 40438 // match: (Mod16 x y) 40439 // cond: 40440 // result: (Select1 (DIVW x y)) 40441 for { 40442 _ = v.Args[1] 40443 x := v.Args[0] 40444 y := v.Args[1] 40445 v.reset(OpSelect1) 40446 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 40447 v0.AddArg(x) 40448 v0.AddArg(y) 40449 v.AddArg(v0) 40450 return true 40451 } 40452 } 40453 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 40454 b := v.Block 40455 _ = b 40456 typ := &b.Func.Config.Types 40457 _ = typ 40458 // match: (Mod16u x y) 40459 // cond: 40460 // result: (Select1 (DIVWU x y)) 40461 for { 40462 _ = v.Args[1] 40463 x := v.Args[0] 40464 y := v.Args[1] 40465 v.reset(OpSelect1) 40466 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 40467 v0.AddArg(x) 40468 v0.AddArg(y) 40469 v.AddArg(v0) 40470 return true 40471 } 40472 } 40473 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 40474 b := v.Block 40475 _ = b 40476 typ := &b.Func.Config.Types 40477 _ = typ 40478 // match: (Mod32 x y) 40479 // cond: 40480 // result: (Select1 (DIVL x y)) 40481 for { 40482 _ = v.Args[1] 40483 x := v.Args[0] 40484 y := v.Args[1] 40485 v.reset(OpSelect1) 40486 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 40487 v0.AddArg(x) 40488 v0.AddArg(y) 40489 v.AddArg(v0) 40490 return true 40491 } 40492 } 40493 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 40494 b := v.Block 40495 _ = b 40496 typ := &b.Func.Config.Types 40497 _ = typ 40498 // match: (Mod32u x y) 40499 // cond: 40500 // result: (Select1 (DIVLU x y)) 40501 for { 40502 _ = v.Args[1] 40503 x := v.Args[0] 40504 y := v.Args[1] 40505 v.reset(OpSelect1) 40506 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 40507 v0.AddArg(x) 40508 v0.AddArg(y) 40509 v.AddArg(v0) 40510 return true 40511 } 40512 } 40513 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 40514 b := v.Block 40515 _ = b 40516 typ := &b.Func.Config.Types 40517 _ = typ 40518 // match: (Mod64 x y) 40519 // cond: 40520 // result: (Select1 (DIVQ x y)) 40521 for { 40522 _ = v.Args[1] 40523 x := v.Args[0] 40524 y := v.Args[1] 40525 v.reset(OpSelect1) 40526 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 40527 v0.AddArg(x) 40528 v0.AddArg(y) 40529 v.AddArg(v0) 40530 return true 40531 } 40532 } 40533 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 40534 b := v.Block 40535 _ = b 40536 typ := &b.Func.Config.Types 40537 _ = typ 40538 // match: (Mod64u x y) 40539 // cond: 40540 // result: (Select1 (DIVQU x y)) 40541 for { 40542 _ = v.Args[1] 40543 x := v.Args[0] 40544 y := v.Args[1] 40545 v.reset(OpSelect1) 40546 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 40547 v0.AddArg(x) 40548 v0.AddArg(y) 40549 v.AddArg(v0) 40550 return true 40551 } 40552 } 40553 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 40554 b := v.Block 40555 _ = b 40556 typ := &b.Func.Config.Types 40557 _ = typ 40558 // match: (Mod8 x y) 40559 // cond: 40560 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 40561 for { 40562 _ = v.Args[1] 40563 x := v.Args[0] 40564 y := v.Args[1] 40565 v.reset(OpSelect1) 40566 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 40567 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 40568 v1.AddArg(x) 40569 v0.AddArg(v1) 40570 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 40571 v2.AddArg(y) 40572 v0.AddArg(v2) 40573 v.AddArg(v0) 40574 return true 40575 } 40576 } 40577 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 40578 b := v.Block 40579 _ = b 40580 typ := &b.Func.Config.Types 40581 _ = typ 40582 // match: (Mod8u x y) 40583 // cond: 40584 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 40585 for { 40586 _ = v.Args[1] 40587 x := v.Args[0] 40588 y := v.Args[1] 40589 v.reset(OpSelect1) 40590 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 40591 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 40592 v1.AddArg(x) 40593 v0.AddArg(v1) 40594 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 40595 v2.AddArg(y) 40596 v0.AddArg(v2) 40597 v.AddArg(v0) 40598 return true 40599 } 40600 } 40601 func rewriteValueAMD64_OpMove_0(v *Value) bool { 40602 b := v.Block 40603 _ = b 40604 typ := &b.Func.Config.Types 40605 _ = typ 40606 // match: (Move [0] _ _ mem) 40607 // cond: 40608 // result: mem 40609 for { 40610 if v.AuxInt != 0 { 40611 break 40612 } 40613 _ = v.Args[2] 40614 mem := v.Args[2] 40615 v.reset(OpCopy) 40616 v.Type = mem.Type 40617 v.AddArg(mem) 40618 return true 40619 } 40620 // match: (Move [1] dst src mem) 40621 // cond: 40622 // result: (MOVBstore dst (MOVBload src mem) mem) 40623 for { 40624 if v.AuxInt != 1 { 40625 break 40626 } 40627 _ = v.Args[2] 40628 dst := v.Args[0] 40629 src := v.Args[1] 40630 mem := v.Args[2] 40631 v.reset(OpAMD64MOVBstore) 40632 v.AddArg(dst) 40633 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 40634 v0.AddArg(src) 40635 v0.AddArg(mem) 40636 v.AddArg(v0) 40637 v.AddArg(mem) 40638 return true 40639 } 40640 // match: (Move [2] dst src mem) 40641 // cond: 40642 // result: (MOVWstore dst (MOVWload src mem) mem) 40643 for { 40644 if v.AuxInt != 2 { 40645 break 40646 } 40647 _ = v.Args[2] 40648 dst := v.Args[0] 40649 src := v.Args[1] 40650 mem := v.Args[2] 40651 v.reset(OpAMD64MOVWstore) 40652 v.AddArg(dst) 40653 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 40654 v0.AddArg(src) 40655 v0.AddArg(mem) 40656 v.AddArg(v0) 40657 v.AddArg(mem) 40658 return true 40659 } 40660 // match: (Move [4] dst src mem) 40661 // cond: 40662 // result: (MOVLstore dst (MOVLload src mem) mem) 40663 for { 40664 if v.AuxInt != 4 { 40665 break 40666 } 40667 _ = v.Args[2] 40668 dst := v.Args[0] 40669 src := v.Args[1] 40670 mem := v.Args[2] 40671 v.reset(OpAMD64MOVLstore) 40672 v.AddArg(dst) 40673 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40674 v0.AddArg(src) 40675 v0.AddArg(mem) 40676 v.AddArg(v0) 40677 v.AddArg(mem) 40678 return true 40679 } 40680 // match: (Move [8] dst src mem) 40681 // cond: 40682 // result: (MOVQstore dst (MOVQload src mem) mem) 40683 for { 40684 if v.AuxInt != 8 { 40685 break 40686 } 40687 _ = v.Args[2] 40688 dst := v.Args[0] 40689 src := v.Args[1] 40690 mem := v.Args[2] 40691 v.reset(OpAMD64MOVQstore) 40692 v.AddArg(dst) 40693 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40694 v0.AddArg(src) 40695 v0.AddArg(mem) 40696 v.AddArg(v0) 40697 v.AddArg(mem) 40698 return true 40699 } 40700 // match: (Move [16] dst src mem) 40701 // cond: 40702 // result: (MOVOstore dst (MOVOload src mem) mem) 40703 for { 40704 if v.AuxInt != 16 { 40705 break 40706 } 40707 _ = v.Args[2] 40708 dst := v.Args[0] 40709 src := v.Args[1] 40710 mem := v.Args[2] 40711 v.reset(OpAMD64MOVOstore) 40712 v.AddArg(dst) 40713 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 40714 v0.AddArg(src) 40715 v0.AddArg(mem) 40716 v.AddArg(v0) 40717 v.AddArg(mem) 40718 return true 40719 } 40720 // match: (Move [3] dst src mem) 40721 // cond: 40722 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 40723 for { 40724 if v.AuxInt != 3 { 40725 break 40726 } 40727 _ = v.Args[2] 40728 dst := v.Args[0] 40729 src := v.Args[1] 40730 mem := v.Args[2] 40731 v.reset(OpAMD64MOVBstore) 40732 v.AuxInt = 2 40733 v.AddArg(dst) 40734 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 40735 v0.AuxInt = 2 40736 v0.AddArg(src) 40737 v0.AddArg(mem) 40738 v.AddArg(v0) 40739 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 40740 v1.AddArg(dst) 40741 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 40742 v2.AddArg(src) 40743 v2.AddArg(mem) 40744 v1.AddArg(v2) 40745 v1.AddArg(mem) 40746 v.AddArg(v1) 40747 return true 40748 } 40749 // match: (Move [5] dst src mem) 40750 // cond: 40751 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 40752 for { 40753 if v.AuxInt != 5 { 40754 break 40755 } 40756 _ = v.Args[2] 40757 dst := v.Args[0] 40758 src := v.Args[1] 40759 mem := v.Args[2] 40760 v.reset(OpAMD64MOVBstore) 40761 v.AuxInt = 4 40762 v.AddArg(dst) 40763 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 40764 v0.AuxInt = 4 40765 v0.AddArg(src) 40766 v0.AddArg(mem) 40767 v.AddArg(v0) 40768 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 40769 v1.AddArg(dst) 40770 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40771 v2.AddArg(src) 40772 v2.AddArg(mem) 40773 v1.AddArg(v2) 40774 v1.AddArg(mem) 40775 v.AddArg(v1) 40776 return true 40777 } 40778 // match: (Move [6] dst src mem) 40779 // cond: 40780 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 40781 for { 40782 if v.AuxInt != 6 { 40783 break 40784 } 40785 _ = v.Args[2] 40786 dst := v.Args[0] 40787 src := v.Args[1] 40788 mem := v.Args[2] 40789 v.reset(OpAMD64MOVWstore) 40790 v.AuxInt = 4 40791 v.AddArg(dst) 40792 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 40793 v0.AuxInt = 4 40794 v0.AddArg(src) 40795 v0.AddArg(mem) 40796 v.AddArg(v0) 40797 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 40798 v1.AddArg(dst) 40799 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40800 v2.AddArg(src) 40801 v2.AddArg(mem) 40802 v1.AddArg(v2) 40803 v1.AddArg(mem) 40804 v.AddArg(v1) 40805 return true 40806 } 40807 // match: (Move [7] dst src mem) 40808 // cond: 40809 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 40810 for { 40811 if v.AuxInt != 7 { 40812 break 40813 } 40814 _ = v.Args[2] 40815 dst := v.Args[0] 40816 src := v.Args[1] 40817 mem := v.Args[2] 40818 v.reset(OpAMD64MOVLstore) 40819 v.AuxInt = 3 40820 v.AddArg(dst) 40821 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40822 v0.AuxInt = 3 40823 v0.AddArg(src) 40824 v0.AddArg(mem) 40825 v.AddArg(v0) 40826 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 40827 v1.AddArg(dst) 40828 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40829 v2.AddArg(src) 40830 v2.AddArg(mem) 40831 v1.AddArg(v2) 40832 v1.AddArg(mem) 40833 v.AddArg(v1) 40834 return true 40835 } 40836 return false 40837 } 40838 func rewriteValueAMD64_OpMove_10(v *Value) bool { 40839 b := v.Block 40840 _ = b 40841 config := b.Func.Config 40842 _ = config 40843 typ := &b.Func.Config.Types 40844 _ = typ 40845 // match: (Move [s] dst src mem) 40846 // cond: s > 8 && s < 16 40847 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 40848 for { 40849 s := v.AuxInt 40850 _ = v.Args[2] 40851 dst := v.Args[0] 40852 src := v.Args[1] 40853 mem := v.Args[2] 40854 if !(s > 8 && s < 16) { 40855 break 40856 } 40857 v.reset(OpAMD64MOVQstore) 40858 v.AuxInt = s - 8 40859 v.AddArg(dst) 40860 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40861 v0.AuxInt = s - 8 40862 v0.AddArg(src) 40863 v0.AddArg(mem) 40864 v.AddArg(v0) 40865 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 40866 v1.AddArg(dst) 40867 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40868 v2.AddArg(src) 40869 v2.AddArg(mem) 40870 v1.AddArg(v2) 40871 v1.AddArg(mem) 40872 v.AddArg(v1) 40873 return true 40874 } 40875 // match: (Move [s] dst src mem) 40876 // cond: s > 16 && s%16 != 0 && s%16 <= 8 40877 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 40878 for { 40879 s := v.AuxInt 40880 _ = v.Args[2] 40881 dst := v.Args[0] 40882 src := v.Args[1] 40883 mem := v.Args[2] 40884 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 40885 break 40886 } 40887 v.reset(OpMove) 40888 v.AuxInt = s - s%16 40889 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 40890 v0.AuxInt = s % 16 40891 v0.AddArg(dst) 40892 v.AddArg(v0) 40893 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 40894 v1.AuxInt = s % 16 40895 v1.AddArg(src) 40896 v.AddArg(v1) 40897 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 40898 v2.AddArg(dst) 40899 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40900 v3.AddArg(src) 40901 v3.AddArg(mem) 40902 v2.AddArg(v3) 40903 v2.AddArg(mem) 40904 v.AddArg(v2) 40905 return true 40906 } 40907 // match: (Move [s] dst src mem) 40908 // cond: s > 16 && s%16 != 0 && s%16 > 8 40909 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 40910 for { 40911 s := v.AuxInt 40912 _ = v.Args[2] 40913 dst := v.Args[0] 40914 src := v.Args[1] 40915 mem := v.Args[2] 40916 if !(s > 16 && s%16 != 0 && s%16 > 8) { 40917 break 40918 } 40919 v.reset(OpMove) 40920 v.AuxInt = s - s%16 40921 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 40922 v0.AuxInt = s % 16 40923 v0.AddArg(dst) 40924 v.AddArg(v0) 40925 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 40926 v1.AuxInt = s % 16 40927 v1.AddArg(src) 40928 v.AddArg(v1) 40929 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 40930 v2.AddArg(dst) 40931 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 40932 v3.AddArg(src) 40933 v3.AddArg(mem) 40934 v2.AddArg(v3) 40935 v2.AddArg(mem) 40936 v.AddArg(v2) 40937 return true 40938 } 40939 // match: (Move [s] dst src mem) 40940 // cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 40941 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 40942 for { 40943 s := v.AuxInt 40944 _ = v.Args[2] 40945 dst := v.Args[0] 40946 src := v.Args[1] 40947 mem := v.Args[2] 40948 if !(s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 40949 break 40950 } 40951 v.reset(OpAMD64DUFFCOPY) 40952 v.AuxInt = 14 * (64 - s/16) 40953 v.AddArg(dst) 40954 v.AddArg(src) 40955 v.AddArg(mem) 40956 return true 40957 } 40958 // match: (Move [s] dst src mem) 40959 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 40960 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 40961 for { 40962 s := v.AuxInt 40963 _ = v.Args[2] 40964 dst := v.Args[0] 40965 src := v.Args[1] 40966 mem := v.Args[2] 40967 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 40968 break 40969 } 40970 v.reset(OpAMD64REPMOVSQ) 40971 v.AddArg(dst) 40972 v.AddArg(src) 40973 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 40974 v0.AuxInt = s / 8 40975 v.AddArg(v0) 40976 v.AddArg(mem) 40977 return true 40978 } 40979 return false 40980 } 40981 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 40982 // match: (Mul16 x y) 40983 // cond: 40984 // result: (MULL x y) 40985 for { 40986 _ = v.Args[1] 40987 x := v.Args[0] 40988 y := v.Args[1] 40989 v.reset(OpAMD64MULL) 40990 v.AddArg(x) 40991 v.AddArg(y) 40992 return true 40993 } 40994 } 40995 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 40996 // match: (Mul32 x y) 40997 // cond: 40998 // result: (MULL x y) 40999 for { 41000 _ = v.Args[1] 41001 x := v.Args[0] 41002 y := v.Args[1] 41003 v.reset(OpAMD64MULL) 41004 v.AddArg(x) 41005 v.AddArg(y) 41006 return true 41007 } 41008 } 41009 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 41010 // match: (Mul32F x y) 41011 // cond: 41012 // result: (MULSS x y) 41013 for { 41014 _ = v.Args[1] 41015 x := v.Args[0] 41016 y := v.Args[1] 41017 v.reset(OpAMD64MULSS) 41018 v.AddArg(x) 41019 v.AddArg(y) 41020 return true 41021 } 41022 } 41023 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 41024 // match: (Mul64 x y) 41025 // cond: 41026 // result: (MULQ x y) 41027 for { 41028 _ = v.Args[1] 41029 x := v.Args[0] 41030 y := v.Args[1] 41031 v.reset(OpAMD64MULQ) 41032 v.AddArg(x) 41033 v.AddArg(y) 41034 return true 41035 } 41036 } 41037 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 41038 // match: (Mul64F x y) 41039 // cond: 41040 // result: (MULSD x y) 41041 for { 41042 _ = v.Args[1] 41043 x := v.Args[0] 41044 y := v.Args[1] 41045 v.reset(OpAMD64MULSD) 41046 v.AddArg(x) 41047 v.AddArg(y) 41048 return true 41049 } 41050 } 41051 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 41052 // match: (Mul64uhilo x y) 41053 // cond: 41054 // result: (MULQU2 x y) 41055 for { 41056 _ = v.Args[1] 41057 x := v.Args[0] 41058 y := v.Args[1] 41059 v.reset(OpAMD64MULQU2) 41060 v.AddArg(x) 41061 v.AddArg(y) 41062 return true 41063 } 41064 } 41065 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 41066 // match: (Mul8 x y) 41067 // cond: 41068 // result: (MULL x y) 41069 for { 41070 _ = v.Args[1] 41071 x := v.Args[0] 41072 y := v.Args[1] 41073 v.reset(OpAMD64MULL) 41074 v.AddArg(x) 41075 v.AddArg(y) 41076 return true 41077 } 41078 } 41079 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 41080 // match: (Neg16 x) 41081 // cond: 41082 // result: (NEGL x) 41083 for { 41084 x := v.Args[0] 41085 v.reset(OpAMD64NEGL) 41086 v.AddArg(x) 41087 return true 41088 } 41089 } 41090 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 41091 // match: (Neg32 x) 41092 // cond: 41093 // result: (NEGL x) 41094 for { 41095 x := v.Args[0] 41096 v.reset(OpAMD64NEGL) 41097 v.AddArg(x) 41098 return true 41099 } 41100 } 41101 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 41102 b := v.Block 41103 _ = b 41104 typ := &b.Func.Config.Types 41105 _ = typ 41106 // match: (Neg32F x) 41107 // cond: 41108 // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))])) 41109 for { 41110 x := v.Args[0] 41111 v.reset(OpAMD64PXOR) 41112 v.AddArg(x) 41113 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 41114 v0.AuxInt = f2i(math.Copysign(0, -1)) 41115 v.AddArg(v0) 41116 return true 41117 } 41118 } 41119 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 41120 // match: (Neg64 x) 41121 // cond: 41122 // result: (NEGQ x) 41123 for { 41124 x := v.Args[0] 41125 v.reset(OpAMD64NEGQ) 41126 v.AddArg(x) 41127 return true 41128 } 41129 } 41130 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 41131 b := v.Block 41132 _ = b 41133 typ := &b.Func.Config.Types 41134 _ = typ 41135 // match: (Neg64F x) 41136 // cond: 41137 // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))])) 41138 for { 41139 x := v.Args[0] 41140 v.reset(OpAMD64PXOR) 41141 v.AddArg(x) 41142 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 41143 v0.AuxInt = f2i(math.Copysign(0, -1)) 41144 v.AddArg(v0) 41145 return true 41146 } 41147 } 41148 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 41149 // match: (Neg8 x) 41150 // cond: 41151 // result: (NEGL x) 41152 for { 41153 x := v.Args[0] 41154 v.reset(OpAMD64NEGL) 41155 v.AddArg(x) 41156 return true 41157 } 41158 } 41159 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 41160 b := v.Block 41161 _ = b 41162 // match: (Neq16 x y) 41163 // cond: 41164 // result: (SETNE (CMPW x y)) 41165 for { 41166 _ = v.Args[1] 41167 x := v.Args[0] 41168 y := v.Args[1] 41169 v.reset(OpAMD64SETNE) 41170 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 41171 v0.AddArg(x) 41172 v0.AddArg(y) 41173 v.AddArg(v0) 41174 return true 41175 } 41176 } 41177 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 41178 b := v.Block 41179 _ = b 41180 // match: (Neq32 x y) 41181 // cond: 41182 // result: (SETNE (CMPL x y)) 41183 for { 41184 _ = v.Args[1] 41185 x := v.Args[0] 41186 y := v.Args[1] 41187 v.reset(OpAMD64SETNE) 41188 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41189 v0.AddArg(x) 41190 v0.AddArg(y) 41191 v.AddArg(v0) 41192 return true 41193 } 41194 } 41195 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 41196 b := v.Block 41197 _ = b 41198 // match: (Neq32F x y) 41199 // cond: 41200 // result: (SETNEF (UCOMISS x y)) 41201 for { 41202 _ = v.Args[1] 41203 x := v.Args[0] 41204 y := v.Args[1] 41205 v.reset(OpAMD64SETNEF) 41206 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 41207 v0.AddArg(x) 41208 v0.AddArg(y) 41209 v.AddArg(v0) 41210 return true 41211 } 41212 } 41213 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 41214 b := v.Block 41215 _ = b 41216 // match: (Neq64 x y) 41217 // cond: 41218 // result: (SETNE (CMPQ x y)) 41219 for { 41220 _ = v.Args[1] 41221 x := v.Args[0] 41222 y := v.Args[1] 41223 v.reset(OpAMD64SETNE) 41224 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41225 v0.AddArg(x) 41226 v0.AddArg(y) 41227 v.AddArg(v0) 41228 return true 41229 } 41230 } 41231 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 41232 b := v.Block 41233 _ = b 41234 // match: (Neq64F x y) 41235 // cond: 41236 // result: (SETNEF (UCOMISD x y)) 41237 for { 41238 _ = v.Args[1] 41239 x := v.Args[0] 41240 y := v.Args[1] 41241 v.reset(OpAMD64SETNEF) 41242 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 41243 v0.AddArg(x) 41244 v0.AddArg(y) 41245 v.AddArg(v0) 41246 return true 41247 } 41248 } 41249 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 41250 b := v.Block 41251 _ = b 41252 // match: (Neq8 x y) 41253 // cond: 41254 // result: (SETNE (CMPB x y)) 41255 for { 41256 _ = v.Args[1] 41257 x := v.Args[0] 41258 y := v.Args[1] 41259 v.reset(OpAMD64SETNE) 41260 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41261 v0.AddArg(x) 41262 v0.AddArg(y) 41263 v.AddArg(v0) 41264 return true 41265 } 41266 } 41267 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 41268 b := v.Block 41269 _ = b 41270 // match: (NeqB x y) 41271 // cond: 41272 // result: (SETNE (CMPB x y)) 41273 for { 41274 _ = v.Args[1] 41275 x := v.Args[0] 41276 y := v.Args[1] 41277 v.reset(OpAMD64SETNE) 41278 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41279 v0.AddArg(x) 41280 v0.AddArg(y) 41281 v.AddArg(v0) 41282 return true 41283 } 41284 } 41285 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 41286 b := v.Block 41287 _ = b 41288 config := b.Func.Config 41289 _ = config 41290 // match: (NeqPtr x y) 41291 // cond: config.PtrSize == 8 41292 // result: (SETNE (CMPQ x y)) 41293 for { 41294 _ = v.Args[1] 41295 x := v.Args[0] 41296 y := v.Args[1] 41297 if !(config.PtrSize == 8) { 41298 break 41299 } 41300 v.reset(OpAMD64SETNE) 41301 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41302 v0.AddArg(x) 41303 v0.AddArg(y) 41304 v.AddArg(v0) 41305 return true 41306 } 41307 // match: (NeqPtr x y) 41308 // cond: config.PtrSize == 4 41309 // result: (SETNE (CMPL x y)) 41310 for { 41311 _ = v.Args[1] 41312 x := v.Args[0] 41313 y := v.Args[1] 41314 if !(config.PtrSize == 4) { 41315 break 41316 } 41317 v.reset(OpAMD64SETNE) 41318 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41319 v0.AddArg(x) 41320 v0.AddArg(y) 41321 v.AddArg(v0) 41322 return true 41323 } 41324 return false 41325 } 41326 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 41327 // match: (NilCheck ptr mem) 41328 // cond: 41329 // result: (LoweredNilCheck ptr mem) 41330 for { 41331 _ = v.Args[1] 41332 ptr := v.Args[0] 41333 mem := v.Args[1] 41334 v.reset(OpAMD64LoweredNilCheck) 41335 v.AddArg(ptr) 41336 v.AddArg(mem) 41337 return true 41338 } 41339 } 41340 func rewriteValueAMD64_OpNot_0(v *Value) bool { 41341 // match: (Not x) 41342 // cond: 41343 // result: (XORLconst [1] x) 41344 for { 41345 x := v.Args[0] 41346 v.reset(OpAMD64XORLconst) 41347 v.AuxInt = 1 41348 v.AddArg(x) 41349 return true 41350 } 41351 } 41352 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 41353 b := v.Block 41354 _ = b 41355 config := b.Func.Config 41356 _ = config 41357 typ := &b.Func.Config.Types 41358 _ = typ 41359 // match: (OffPtr [off] ptr) 41360 // cond: config.PtrSize == 8 && is32Bit(off) 41361 // result: (ADDQconst [off] ptr) 41362 for { 41363 off := v.AuxInt 41364 ptr := v.Args[0] 41365 if !(config.PtrSize == 8 && is32Bit(off)) { 41366 break 41367 } 41368 v.reset(OpAMD64ADDQconst) 41369 v.AuxInt = off 41370 v.AddArg(ptr) 41371 return true 41372 } 41373 // match: (OffPtr [off] ptr) 41374 // cond: config.PtrSize == 8 41375 // result: (ADDQ (MOVQconst [off]) ptr) 41376 for { 41377 off := v.AuxInt 41378 ptr := v.Args[0] 41379 if !(config.PtrSize == 8) { 41380 break 41381 } 41382 v.reset(OpAMD64ADDQ) 41383 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 41384 v0.AuxInt = off 41385 v.AddArg(v0) 41386 v.AddArg(ptr) 41387 return true 41388 } 41389 // match: (OffPtr [off] ptr) 41390 // cond: config.PtrSize == 4 41391 // result: (ADDLconst [off] ptr) 41392 for { 41393 off := v.AuxInt 41394 ptr := v.Args[0] 41395 if !(config.PtrSize == 4) { 41396 break 41397 } 41398 v.reset(OpAMD64ADDLconst) 41399 v.AuxInt = off 41400 v.AddArg(ptr) 41401 return true 41402 } 41403 return false 41404 } 41405 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 41406 // match: (Or16 x y) 41407 // cond: 41408 // result: (ORL x y) 41409 for { 41410 _ = v.Args[1] 41411 x := v.Args[0] 41412 y := v.Args[1] 41413 v.reset(OpAMD64ORL) 41414 v.AddArg(x) 41415 v.AddArg(y) 41416 return true 41417 } 41418 } 41419 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 41420 // match: (Or32 x y) 41421 // cond: 41422 // result: (ORL x y) 41423 for { 41424 _ = v.Args[1] 41425 x := v.Args[0] 41426 y := v.Args[1] 41427 v.reset(OpAMD64ORL) 41428 v.AddArg(x) 41429 v.AddArg(y) 41430 return true 41431 } 41432 } 41433 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 41434 // match: (Or64 x y) 41435 // cond: 41436 // result: (ORQ x y) 41437 for { 41438 _ = v.Args[1] 41439 x := v.Args[0] 41440 y := v.Args[1] 41441 v.reset(OpAMD64ORQ) 41442 v.AddArg(x) 41443 v.AddArg(y) 41444 return true 41445 } 41446 } 41447 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 41448 // match: (Or8 x y) 41449 // cond: 41450 // result: (ORL x y) 41451 for { 41452 _ = v.Args[1] 41453 x := v.Args[0] 41454 y := v.Args[1] 41455 v.reset(OpAMD64ORL) 41456 v.AddArg(x) 41457 v.AddArg(y) 41458 return true 41459 } 41460 } 41461 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 41462 // match: (OrB x y) 41463 // cond: 41464 // result: (ORL x y) 41465 for { 41466 _ = v.Args[1] 41467 x := v.Args[0] 41468 y := v.Args[1] 41469 v.reset(OpAMD64ORL) 41470 v.AddArg(x) 41471 v.AddArg(y) 41472 return true 41473 } 41474 } 41475 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 41476 b := v.Block 41477 _ = b 41478 typ := &b.Func.Config.Types 41479 _ = typ 41480 // match: (PopCount16 x) 41481 // cond: 41482 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 41483 for { 41484 x := v.Args[0] 41485 v.reset(OpAMD64POPCNTL) 41486 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 41487 v0.AddArg(x) 41488 v.AddArg(v0) 41489 return true 41490 } 41491 } 41492 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 41493 // match: (PopCount32 x) 41494 // cond: 41495 // result: (POPCNTL x) 41496 for { 41497 x := v.Args[0] 41498 v.reset(OpAMD64POPCNTL) 41499 v.AddArg(x) 41500 return true 41501 } 41502 } 41503 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 41504 // match: (PopCount64 x) 41505 // cond: 41506 // result: (POPCNTQ x) 41507 for { 41508 x := v.Args[0] 41509 v.reset(OpAMD64POPCNTQ) 41510 v.AddArg(x) 41511 return true 41512 } 41513 } 41514 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 41515 b := v.Block 41516 _ = b 41517 typ := &b.Func.Config.Types 41518 _ = typ 41519 // match: (PopCount8 x) 41520 // cond: 41521 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 41522 for { 41523 x := v.Args[0] 41524 v.reset(OpAMD64POPCNTL) 41525 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 41526 v0.AddArg(x) 41527 v.AddArg(v0) 41528 return true 41529 } 41530 } 41531 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 41532 // match: (Round32F x) 41533 // cond: 41534 // result: x 41535 for { 41536 x := v.Args[0] 41537 v.reset(OpCopy) 41538 v.Type = x.Type 41539 v.AddArg(x) 41540 return true 41541 } 41542 } 41543 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 41544 // match: (Round64F x) 41545 // cond: 41546 // result: x 41547 for { 41548 x := v.Args[0] 41549 v.reset(OpCopy) 41550 v.Type = x.Type 41551 v.AddArg(x) 41552 return true 41553 } 41554 } 41555 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 41556 b := v.Block 41557 _ = b 41558 // match: (Rsh16Ux16 <t> x y) 41559 // cond: 41560 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 41561 for { 41562 t := v.Type 41563 _ = v.Args[1] 41564 x := v.Args[0] 41565 y := v.Args[1] 41566 v.reset(OpAMD64ANDL) 41567 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41568 v0.AddArg(x) 41569 v0.AddArg(y) 41570 v.AddArg(v0) 41571 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41572 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41573 v2.AuxInt = 16 41574 v2.AddArg(y) 41575 v1.AddArg(v2) 41576 v.AddArg(v1) 41577 return true 41578 } 41579 } 41580 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 41581 b := v.Block 41582 _ = b 41583 // match: (Rsh16Ux32 <t> x y) 41584 // cond: 41585 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 41586 for { 41587 t := v.Type 41588 _ = v.Args[1] 41589 x := v.Args[0] 41590 y := v.Args[1] 41591 v.reset(OpAMD64ANDL) 41592 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41593 v0.AddArg(x) 41594 v0.AddArg(y) 41595 v.AddArg(v0) 41596 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41597 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41598 v2.AuxInt = 16 41599 v2.AddArg(y) 41600 v1.AddArg(v2) 41601 v.AddArg(v1) 41602 return true 41603 } 41604 } 41605 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 41606 b := v.Block 41607 _ = b 41608 // match: (Rsh16Ux64 <t> x y) 41609 // cond: 41610 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 41611 for { 41612 t := v.Type 41613 _ = v.Args[1] 41614 x := v.Args[0] 41615 y := v.Args[1] 41616 v.reset(OpAMD64ANDL) 41617 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41618 v0.AddArg(x) 41619 v0.AddArg(y) 41620 v.AddArg(v0) 41621 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41622 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41623 v2.AuxInt = 16 41624 v2.AddArg(y) 41625 v1.AddArg(v2) 41626 v.AddArg(v1) 41627 return true 41628 } 41629 } 41630 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 41631 b := v.Block 41632 _ = b 41633 // match: (Rsh16Ux8 <t> x y) 41634 // cond: 41635 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 41636 for { 41637 t := v.Type 41638 _ = v.Args[1] 41639 x := v.Args[0] 41640 y := v.Args[1] 41641 v.reset(OpAMD64ANDL) 41642 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41643 v0.AddArg(x) 41644 v0.AddArg(y) 41645 v.AddArg(v0) 41646 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41647 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41648 v2.AuxInt = 16 41649 v2.AddArg(y) 41650 v1.AddArg(v2) 41651 v.AddArg(v1) 41652 return true 41653 } 41654 } 41655 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 41656 b := v.Block 41657 _ = b 41658 // match: (Rsh16x16 <t> x y) 41659 // cond: 41660 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 41661 for { 41662 t := v.Type 41663 _ = v.Args[1] 41664 x := v.Args[0] 41665 y := v.Args[1] 41666 v.reset(OpAMD64SARW) 41667 v.Type = t 41668 v.AddArg(x) 41669 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41670 v0.AddArg(y) 41671 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41672 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41673 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41674 v3.AuxInt = 16 41675 v3.AddArg(y) 41676 v2.AddArg(v3) 41677 v1.AddArg(v2) 41678 v0.AddArg(v1) 41679 v.AddArg(v0) 41680 return true 41681 } 41682 } 41683 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 41684 b := v.Block 41685 _ = b 41686 // match: (Rsh16x32 <t> x y) 41687 // cond: 41688 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 41689 for { 41690 t := v.Type 41691 _ = v.Args[1] 41692 x := v.Args[0] 41693 y := v.Args[1] 41694 v.reset(OpAMD64SARW) 41695 v.Type = t 41696 v.AddArg(x) 41697 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41698 v0.AddArg(y) 41699 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41700 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41701 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41702 v3.AuxInt = 16 41703 v3.AddArg(y) 41704 v2.AddArg(v3) 41705 v1.AddArg(v2) 41706 v0.AddArg(v1) 41707 v.AddArg(v0) 41708 return true 41709 } 41710 } 41711 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 41712 b := v.Block 41713 _ = b 41714 // match: (Rsh16x64 <t> x y) 41715 // cond: 41716 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 41717 for { 41718 t := v.Type 41719 _ = v.Args[1] 41720 x := v.Args[0] 41721 y := v.Args[1] 41722 v.reset(OpAMD64SARW) 41723 v.Type = t 41724 v.AddArg(x) 41725 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 41726 v0.AddArg(y) 41727 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 41728 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 41729 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41730 v3.AuxInt = 16 41731 v3.AddArg(y) 41732 v2.AddArg(v3) 41733 v1.AddArg(v2) 41734 v0.AddArg(v1) 41735 v.AddArg(v0) 41736 return true 41737 } 41738 } 41739 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 41740 b := v.Block 41741 _ = b 41742 // match: (Rsh16x8 <t> x y) 41743 // cond: 41744 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 41745 for { 41746 t := v.Type 41747 _ = v.Args[1] 41748 x := v.Args[0] 41749 y := v.Args[1] 41750 v.reset(OpAMD64SARW) 41751 v.Type = t 41752 v.AddArg(x) 41753 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41754 v0.AddArg(y) 41755 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41756 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41757 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41758 v3.AuxInt = 16 41759 v3.AddArg(y) 41760 v2.AddArg(v3) 41761 v1.AddArg(v2) 41762 v0.AddArg(v1) 41763 v.AddArg(v0) 41764 return true 41765 } 41766 } 41767 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 41768 b := v.Block 41769 _ = b 41770 // match: (Rsh32Ux16 <t> x y) 41771 // cond: 41772 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 41773 for { 41774 t := v.Type 41775 _ = v.Args[1] 41776 x := v.Args[0] 41777 y := v.Args[1] 41778 v.reset(OpAMD64ANDL) 41779 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41780 v0.AddArg(x) 41781 v0.AddArg(y) 41782 v.AddArg(v0) 41783 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41784 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41785 v2.AuxInt = 32 41786 v2.AddArg(y) 41787 v1.AddArg(v2) 41788 v.AddArg(v1) 41789 return true 41790 } 41791 } 41792 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 41793 b := v.Block 41794 _ = b 41795 // match: (Rsh32Ux32 <t> x y) 41796 // cond: 41797 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 41798 for { 41799 t := v.Type 41800 _ = v.Args[1] 41801 x := v.Args[0] 41802 y := v.Args[1] 41803 v.reset(OpAMD64ANDL) 41804 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41805 v0.AddArg(x) 41806 v0.AddArg(y) 41807 v.AddArg(v0) 41808 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41809 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41810 v2.AuxInt = 32 41811 v2.AddArg(y) 41812 v1.AddArg(v2) 41813 v.AddArg(v1) 41814 return true 41815 } 41816 } 41817 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 41818 b := v.Block 41819 _ = b 41820 // match: (Rsh32Ux64 <t> x y) 41821 // cond: 41822 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 41823 for { 41824 t := v.Type 41825 _ = v.Args[1] 41826 x := v.Args[0] 41827 y := v.Args[1] 41828 v.reset(OpAMD64ANDL) 41829 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41830 v0.AddArg(x) 41831 v0.AddArg(y) 41832 v.AddArg(v0) 41833 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41834 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41835 v2.AuxInt = 32 41836 v2.AddArg(y) 41837 v1.AddArg(v2) 41838 v.AddArg(v1) 41839 return true 41840 } 41841 } 41842 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 41843 b := v.Block 41844 _ = b 41845 // match: (Rsh32Ux8 <t> x y) 41846 // cond: 41847 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 41848 for { 41849 t := v.Type 41850 _ = v.Args[1] 41851 x := v.Args[0] 41852 y := v.Args[1] 41853 v.reset(OpAMD64ANDL) 41854 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41855 v0.AddArg(x) 41856 v0.AddArg(y) 41857 v.AddArg(v0) 41858 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41859 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41860 v2.AuxInt = 32 41861 v2.AddArg(y) 41862 v1.AddArg(v2) 41863 v.AddArg(v1) 41864 return true 41865 } 41866 } 41867 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 41868 b := v.Block 41869 _ = b 41870 // match: (Rsh32x16 <t> x y) 41871 // cond: 41872 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 41873 for { 41874 t := v.Type 41875 _ = v.Args[1] 41876 x := v.Args[0] 41877 y := v.Args[1] 41878 v.reset(OpAMD64SARL) 41879 v.Type = t 41880 v.AddArg(x) 41881 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41882 v0.AddArg(y) 41883 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41884 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41885 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41886 v3.AuxInt = 32 41887 v3.AddArg(y) 41888 v2.AddArg(v3) 41889 v1.AddArg(v2) 41890 v0.AddArg(v1) 41891 v.AddArg(v0) 41892 return true 41893 } 41894 } 41895 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 41896 b := v.Block 41897 _ = b 41898 // match: (Rsh32x32 <t> x y) 41899 // cond: 41900 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 41901 for { 41902 t := v.Type 41903 _ = v.Args[1] 41904 x := v.Args[0] 41905 y := v.Args[1] 41906 v.reset(OpAMD64SARL) 41907 v.Type = t 41908 v.AddArg(x) 41909 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41910 v0.AddArg(y) 41911 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41912 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41913 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41914 v3.AuxInt = 32 41915 v3.AddArg(y) 41916 v2.AddArg(v3) 41917 v1.AddArg(v2) 41918 v0.AddArg(v1) 41919 v.AddArg(v0) 41920 return true 41921 } 41922 } 41923 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 41924 b := v.Block 41925 _ = b 41926 // match: (Rsh32x64 <t> x y) 41927 // cond: 41928 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 41929 for { 41930 t := v.Type 41931 _ = v.Args[1] 41932 x := v.Args[0] 41933 y := v.Args[1] 41934 v.reset(OpAMD64SARL) 41935 v.Type = t 41936 v.AddArg(x) 41937 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 41938 v0.AddArg(y) 41939 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 41940 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 41941 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41942 v3.AuxInt = 32 41943 v3.AddArg(y) 41944 v2.AddArg(v3) 41945 v1.AddArg(v2) 41946 v0.AddArg(v1) 41947 v.AddArg(v0) 41948 return true 41949 } 41950 } 41951 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 41952 b := v.Block 41953 _ = b 41954 // match: (Rsh32x8 <t> x y) 41955 // cond: 41956 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 41957 for { 41958 t := v.Type 41959 _ = v.Args[1] 41960 x := v.Args[0] 41961 y := v.Args[1] 41962 v.reset(OpAMD64SARL) 41963 v.Type = t 41964 v.AddArg(x) 41965 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41966 v0.AddArg(y) 41967 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41968 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41969 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41970 v3.AuxInt = 32 41971 v3.AddArg(y) 41972 v2.AddArg(v3) 41973 v1.AddArg(v2) 41974 v0.AddArg(v1) 41975 v.AddArg(v0) 41976 return true 41977 } 41978 } 41979 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 41980 b := v.Block 41981 _ = b 41982 // match: (Rsh64Ux16 <t> x y) 41983 // cond: 41984 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 41985 for { 41986 t := v.Type 41987 _ = v.Args[1] 41988 x := v.Args[0] 41989 y := v.Args[1] 41990 v.reset(OpAMD64ANDQ) 41991 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 41992 v0.AddArg(x) 41993 v0.AddArg(y) 41994 v.AddArg(v0) 41995 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41996 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41997 v2.AuxInt = 64 41998 v2.AddArg(y) 41999 v1.AddArg(v2) 42000 v.AddArg(v1) 42001 return true 42002 } 42003 } 42004 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 42005 b := v.Block 42006 _ = b 42007 // match: (Rsh64Ux32 <t> x y) 42008 // cond: 42009 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 42010 for { 42011 t := v.Type 42012 _ = v.Args[1] 42013 x := v.Args[0] 42014 y := v.Args[1] 42015 v.reset(OpAMD64ANDQ) 42016 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 42017 v0.AddArg(x) 42018 v0.AddArg(y) 42019 v.AddArg(v0) 42020 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 42021 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 42022 v2.AuxInt = 64 42023 v2.AddArg(y) 42024 v1.AddArg(v2) 42025 v.AddArg(v1) 42026 return true 42027 } 42028 } 42029 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 42030 b := v.Block 42031 _ = b 42032 // match: (Rsh64Ux64 <t> x y) 42033 // cond: 42034 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 42035 for { 42036 t := v.Type 42037 _ = v.Args[1] 42038 x := v.Args[0] 42039 y := v.Args[1] 42040 v.reset(OpAMD64ANDQ) 42041 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 42042 v0.AddArg(x) 42043 v0.AddArg(y) 42044 v.AddArg(v0) 42045 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 42046 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 42047 v2.AuxInt = 64 42048 v2.AddArg(y) 42049 v1.AddArg(v2) 42050 v.AddArg(v1) 42051 return true 42052 } 42053 } 42054 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 42055 b := v.Block 42056 _ = b 42057 // match: (Rsh64Ux8 <t> x y) 42058 // cond: 42059 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 42060 for { 42061 t := v.Type 42062 _ = v.Args[1] 42063 x := v.Args[0] 42064 y := v.Args[1] 42065 v.reset(OpAMD64ANDQ) 42066 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 42067 v0.AddArg(x) 42068 v0.AddArg(y) 42069 v.AddArg(v0) 42070 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 42071 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42072 v2.AuxInt = 64 42073 v2.AddArg(y) 42074 v1.AddArg(v2) 42075 v.AddArg(v1) 42076 return true 42077 } 42078 } 42079 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 42080 b := v.Block 42081 _ = b 42082 // match: (Rsh64x16 <t> x y) 42083 // cond: 42084 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 42085 for { 42086 t := v.Type 42087 _ = v.Args[1] 42088 x := v.Args[0] 42089 y := v.Args[1] 42090 v.reset(OpAMD64SARQ) 42091 v.Type = t 42092 v.AddArg(x) 42093 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42094 v0.AddArg(y) 42095 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42096 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42097 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 42098 v3.AuxInt = 64 42099 v3.AddArg(y) 42100 v2.AddArg(v3) 42101 v1.AddArg(v2) 42102 v0.AddArg(v1) 42103 v.AddArg(v0) 42104 return true 42105 } 42106 } 42107 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 42108 b := v.Block 42109 _ = b 42110 // match: (Rsh64x32 <t> x y) 42111 // cond: 42112 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 42113 for { 42114 t := v.Type 42115 _ = v.Args[1] 42116 x := v.Args[0] 42117 y := v.Args[1] 42118 v.reset(OpAMD64SARQ) 42119 v.Type = t 42120 v.AddArg(x) 42121 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42122 v0.AddArg(y) 42123 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42124 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42125 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 42126 v3.AuxInt = 64 42127 v3.AddArg(y) 42128 v2.AddArg(v3) 42129 v1.AddArg(v2) 42130 v0.AddArg(v1) 42131 v.AddArg(v0) 42132 return true 42133 } 42134 } 42135 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 42136 b := v.Block 42137 _ = b 42138 // match: (Rsh64x64 <t> x y) 42139 // cond: 42140 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 42141 for { 42142 t := v.Type 42143 _ = v.Args[1] 42144 x := v.Args[0] 42145 y := v.Args[1] 42146 v.reset(OpAMD64SARQ) 42147 v.Type = t 42148 v.AddArg(x) 42149 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 42150 v0.AddArg(y) 42151 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 42152 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 42153 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 42154 v3.AuxInt = 64 42155 v3.AddArg(y) 42156 v2.AddArg(v3) 42157 v1.AddArg(v2) 42158 v0.AddArg(v1) 42159 v.AddArg(v0) 42160 return true 42161 } 42162 } 42163 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 42164 b := v.Block 42165 _ = b 42166 // match: (Rsh64x8 <t> x y) 42167 // cond: 42168 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 42169 for { 42170 t := v.Type 42171 _ = v.Args[1] 42172 x := v.Args[0] 42173 y := v.Args[1] 42174 v.reset(OpAMD64SARQ) 42175 v.Type = t 42176 v.AddArg(x) 42177 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42178 v0.AddArg(y) 42179 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42180 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42181 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42182 v3.AuxInt = 64 42183 v3.AddArg(y) 42184 v2.AddArg(v3) 42185 v1.AddArg(v2) 42186 v0.AddArg(v1) 42187 v.AddArg(v0) 42188 return true 42189 } 42190 } 42191 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 42192 b := v.Block 42193 _ = b 42194 // match: (Rsh8Ux16 <t> x y) 42195 // cond: 42196 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 42197 for { 42198 t := v.Type 42199 _ = v.Args[1] 42200 x := v.Args[0] 42201 y := v.Args[1] 42202 v.reset(OpAMD64ANDL) 42203 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 42204 v0.AddArg(x) 42205 v0.AddArg(y) 42206 v.AddArg(v0) 42207 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42208 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 42209 v2.AuxInt = 8 42210 v2.AddArg(y) 42211 v1.AddArg(v2) 42212 v.AddArg(v1) 42213 return true 42214 } 42215 } 42216 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 42217 b := v.Block 42218 _ = b 42219 // match: (Rsh8Ux32 <t> x y) 42220 // cond: 42221 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 42222 for { 42223 t := v.Type 42224 _ = v.Args[1] 42225 x := v.Args[0] 42226 y := v.Args[1] 42227 v.reset(OpAMD64ANDL) 42228 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 42229 v0.AddArg(x) 42230 v0.AddArg(y) 42231 v.AddArg(v0) 42232 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42233 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 42234 v2.AuxInt = 8 42235 v2.AddArg(y) 42236 v1.AddArg(v2) 42237 v.AddArg(v1) 42238 return true 42239 } 42240 } 42241 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 42242 b := v.Block 42243 _ = b 42244 // match: (Rsh8Ux64 <t> x y) 42245 // cond: 42246 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 42247 for { 42248 t := v.Type 42249 _ = v.Args[1] 42250 x := v.Args[0] 42251 y := v.Args[1] 42252 v.reset(OpAMD64ANDL) 42253 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 42254 v0.AddArg(x) 42255 v0.AddArg(y) 42256 v.AddArg(v0) 42257 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42258 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 42259 v2.AuxInt = 8 42260 v2.AddArg(y) 42261 v1.AddArg(v2) 42262 v.AddArg(v1) 42263 return true 42264 } 42265 } 42266 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 42267 b := v.Block 42268 _ = b 42269 // match: (Rsh8Ux8 <t> x y) 42270 // cond: 42271 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 42272 for { 42273 t := v.Type 42274 _ = v.Args[1] 42275 x := v.Args[0] 42276 y := v.Args[1] 42277 v.reset(OpAMD64ANDL) 42278 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 42279 v0.AddArg(x) 42280 v0.AddArg(y) 42281 v.AddArg(v0) 42282 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42283 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42284 v2.AuxInt = 8 42285 v2.AddArg(y) 42286 v1.AddArg(v2) 42287 v.AddArg(v1) 42288 return true 42289 } 42290 } 42291 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 42292 b := v.Block 42293 _ = b 42294 // match: (Rsh8x16 <t> x y) 42295 // cond: 42296 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 42297 for { 42298 t := v.Type 42299 _ = v.Args[1] 42300 x := v.Args[0] 42301 y := v.Args[1] 42302 v.reset(OpAMD64SARB) 42303 v.Type = t 42304 v.AddArg(x) 42305 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42306 v0.AddArg(y) 42307 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42308 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42309 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 42310 v3.AuxInt = 8 42311 v3.AddArg(y) 42312 v2.AddArg(v3) 42313 v1.AddArg(v2) 42314 v0.AddArg(v1) 42315 v.AddArg(v0) 42316 return true 42317 } 42318 } 42319 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 42320 b := v.Block 42321 _ = b 42322 // match: (Rsh8x32 <t> x y) 42323 // cond: 42324 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 42325 for { 42326 t := v.Type 42327 _ = v.Args[1] 42328 x := v.Args[0] 42329 y := v.Args[1] 42330 v.reset(OpAMD64SARB) 42331 v.Type = t 42332 v.AddArg(x) 42333 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42334 v0.AddArg(y) 42335 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42336 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42337 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 42338 v3.AuxInt = 8 42339 v3.AddArg(y) 42340 v2.AddArg(v3) 42341 v1.AddArg(v2) 42342 v0.AddArg(v1) 42343 v.AddArg(v0) 42344 return true 42345 } 42346 } 42347 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 42348 b := v.Block 42349 _ = b 42350 // match: (Rsh8x64 <t> x y) 42351 // cond: 42352 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 42353 for { 42354 t := v.Type 42355 _ = v.Args[1] 42356 x := v.Args[0] 42357 y := v.Args[1] 42358 v.reset(OpAMD64SARB) 42359 v.Type = t 42360 v.AddArg(x) 42361 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 42362 v0.AddArg(y) 42363 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 42364 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 42365 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 42366 v3.AuxInt = 8 42367 v3.AddArg(y) 42368 v2.AddArg(v3) 42369 v1.AddArg(v2) 42370 v0.AddArg(v1) 42371 v.AddArg(v0) 42372 return true 42373 } 42374 } 42375 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 42376 b := v.Block 42377 _ = b 42378 // match: (Rsh8x8 <t> x y) 42379 // cond: 42380 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 42381 for { 42382 t := v.Type 42383 _ = v.Args[1] 42384 x := v.Args[0] 42385 y := v.Args[1] 42386 v.reset(OpAMD64SARB) 42387 v.Type = t 42388 v.AddArg(x) 42389 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42390 v0.AddArg(y) 42391 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42392 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42393 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42394 v3.AuxInt = 8 42395 v3.AddArg(y) 42396 v2.AddArg(v3) 42397 v1.AddArg(v2) 42398 v0.AddArg(v1) 42399 v.AddArg(v0) 42400 return true 42401 } 42402 } 42403 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 42404 b := v.Block 42405 _ = b 42406 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 42407 // cond: 42408 // result: (ADDL val (Select0 <t> tuple)) 42409 for { 42410 t := v.Type 42411 v_0 := v.Args[0] 42412 if v_0.Op != OpAMD64AddTupleFirst32 { 42413 break 42414 } 42415 _ = v_0.Args[1] 42416 val := v_0.Args[0] 42417 tuple := v_0.Args[1] 42418 v.reset(OpAMD64ADDL) 42419 v.AddArg(val) 42420 v0 := b.NewValue0(v.Pos, OpSelect0, t) 42421 v0.AddArg(tuple) 42422 v.AddArg(v0) 42423 return true 42424 } 42425 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 42426 // cond: 42427 // result: (ADDQ val (Select0 <t> tuple)) 42428 for { 42429 t := v.Type 42430 v_0 := v.Args[0] 42431 if v_0.Op != OpAMD64AddTupleFirst64 { 42432 break 42433 } 42434 _ = v_0.Args[1] 42435 val := v_0.Args[0] 42436 tuple := v_0.Args[1] 42437 v.reset(OpAMD64ADDQ) 42438 v.AddArg(val) 42439 v0 := b.NewValue0(v.Pos, OpSelect0, t) 42440 v0.AddArg(tuple) 42441 v.AddArg(v0) 42442 return true 42443 } 42444 return false 42445 } 42446 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 42447 // match: (Select1 (AddTupleFirst32 _ tuple)) 42448 // cond: 42449 // result: (Select1 tuple) 42450 for { 42451 v_0 := v.Args[0] 42452 if v_0.Op != OpAMD64AddTupleFirst32 { 42453 break 42454 } 42455 _ = v_0.Args[1] 42456 tuple := v_0.Args[1] 42457 v.reset(OpSelect1) 42458 v.AddArg(tuple) 42459 return true 42460 } 42461 // match: (Select1 (AddTupleFirst64 _ tuple)) 42462 // cond: 42463 // result: (Select1 tuple) 42464 for { 42465 v_0 := v.Args[0] 42466 if v_0.Op != OpAMD64AddTupleFirst64 { 42467 break 42468 } 42469 _ = v_0.Args[1] 42470 tuple := v_0.Args[1] 42471 v.reset(OpSelect1) 42472 v.AddArg(tuple) 42473 return true 42474 } 42475 return false 42476 } 42477 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 42478 // match: (SignExt16to32 x) 42479 // cond: 42480 // result: (MOVWQSX x) 42481 for { 42482 x := v.Args[0] 42483 v.reset(OpAMD64MOVWQSX) 42484 v.AddArg(x) 42485 return true 42486 } 42487 } 42488 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 42489 // match: (SignExt16to64 x) 42490 // cond: 42491 // result: (MOVWQSX x) 42492 for { 42493 x := v.Args[0] 42494 v.reset(OpAMD64MOVWQSX) 42495 v.AddArg(x) 42496 return true 42497 } 42498 } 42499 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 42500 // match: (SignExt32to64 x) 42501 // cond: 42502 // result: (MOVLQSX x) 42503 for { 42504 x := v.Args[0] 42505 v.reset(OpAMD64MOVLQSX) 42506 v.AddArg(x) 42507 return true 42508 } 42509 } 42510 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 42511 // match: (SignExt8to16 x) 42512 // cond: 42513 // result: (MOVBQSX x) 42514 for { 42515 x := v.Args[0] 42516 v.reset(OpAMD64MOVBQSX) 42517 v.AddArg(x) 42518 return true 42519 } 42520 } 42521 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 42522 // match: (SignExt8to32 x) 42523 // cond: 42524 // result: (MOVBQSX x) 42525 for { 42526 x := v.Args[0] 42527 v.reset(OpAMD64MOVBQSX) 42528 v.AddArg(x) 42529 return true 42530 } 42531 } 42532 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 42533 // match: (SignExt8to64 x) 42534 // cond: 42535 // result: (MOVBQSX x) 42536 for { 42537 x := v.Args[0] 42538 v.reset(OpAMD64MOVBQSX) 42539 v.AddArg(x) 42540 return true 42541 } 42542 } 42543 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 42544 b := v.Block 42545 _ = b 42546 // match: (Slicemask <t> x) 42547 // cond: 42548 // result: (SARQconst (NEGQ <t> x) [63]) 42549 for { 42550 t := v.Type 42551 x := v.Args[0] 42552 v.reset(OpAMD64SARQconst) 42553 v.AuxInt = 63 42554 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 42555 v0.AddArg(x) 42556 v.AddArg(v0) 42557 return true 42558 } 42559 } 42560 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 42561 // match: (Sqrt x) 42562 // cond: 42563 // result: (SQRTSD x) 42564 for { 42565 x := v.Args[0] 42566 v.reset(OpAMD64SQRTSD) 42567 v.AddArg(x) 42568 return true 42569 } 42570 } 42571 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 42572 // match: (StaticCall [argwid] {target} mem) 42573 // cond: 42574 // result: (CALLstatic [argwid] {target} mem) 42575 for { 42576 argwid := v.AuxInt 42577 target := v.Aux 42578 mem := v.Args[0] 42579 v.reset(OpAMD64CALLstatic) 42580 v.AuxInt = argwid 42581 v.Aux = target 42582 v.AddArg(mem) 42583 return true 42584 } 42585 } 42586 func rewriteValueAMD64_OpStore_0(v *Value) bool { 42587 // match: (Store {t} ptr val mem) 42588 // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) 42589 // result: (MOVSDstore ptr val mem) 42590 for { 42591 t := v.Aux 42592 _ = v.Args[2] 42593 ptr := v.Args[0] 42594 val := v.Args[1] 42595 mem := v.Args[2] 42596 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { 42597 break 42598 } 42599 v.reset(OpAMD64MOVSDstore) 42600 v.AddArg(ptr) 42601 v.AddArg(val) 42602 v.AddArg(mem) 42603 return true 42604 } 42605 // match: (Store {t} ptr val mem) 42606 // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) 42607 // result: (MOVSSstore ptr val mem) 42608 for { 42609 t := v.Aux 42610 _ = v.Args[2] 42611 ptr := v.Args[0] 42612 val := v.Args[1] 42613 mem := v.Args[2] 42614 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { 42615 break 42616 } 42617 v.reset(OpAMD64MOVSSstore) 42618 v.AddArg(ptr) 42619 v.AddArg(val) 42620 v.AddArg(mem) 42621 return true 42622 } 42623 // match: (Store {t} ptr val mem) 42624 // cond: t.(*types.Type).Size() == 8 42625 // result: (MOVQstore ptr val mem) 42626 for { 42627 t := v.Aux 42628 _ = v.Args[2] 42629 ptr := v.Args[0] 42630 val := v.Args[1] 42631 mem := v.Args[2] 42632 if !(t.(*types.Type).Size() == 8) { 42633 break 42634 } 42635 v.reset(OpAMD64MOVQstore) 42636 v.AddArg(ptr) 42637 v.AddArg(val) 42638 v.AddArg(mem) 42639 return true 42640 } 42641 // match: (Store {t} ptr val mem) 42642 // cond: t.(*types.Type).Size() == 4 42643 // result: (MOVLstore ptr val mem) 42644 for { 42645 t := v.Aux 42646 _ = v.Args[2] 42647 ptr := v.Args[0] 42648 val := v.Args[1] 42649 mem := v.Args[2] 42650 if !(t.(*types.Type).Size() == 4) { 42651 break 42652 } 42653 v.reset(OpAMD64MOVLstore) 42654 v.AddArg(ptr) 42655 v.AddArg(val) 42656 v.AddArg(mem) 42657 return true 42658 } 42659 // match: (Store {t} ptr val mem) 42660 // cond: t.(*types.Type).Size() == 2 42661 // result: (MOVWstore ptr val mem) 42662 for { 42663 t := v.Aux 42664 _ = v.Args[2] 42665 ptr := v.Args[0] 42666 val := v.Args[1] 42667 mem := v.Args[2] 42668 if !(t.(*types.Type).Size() == 2) { 42669 break 42670 } 42671 v.reset(OpAMD64MOVWstore) 42672 v.AddArg(ptr) 42673 v.AddArg(val) 42674 v.AddArg(mem) 42675 return true 42676 } 42677 // match: (Store {t} ptr val mem) 42678 // cond: t.(*types.Type).Size() == 1 42679 // result: (MOVBstore ptr val mem) 42680 for { 42681 t := v.Aux 42682 _ = v.Args[2] 42683 ptr := v.Args[0] 42684 val := v.Args[1] 42685 mem := v.Args[2] 42686 if !(t.(*types.Type).Size() == 1) { 42687 break 42688 } 42689 v.reset(OpAMD64MOVBstore) 42690 v.AddArg(ptr) 42691 v.AddArg(val) 42692 v.AddArg(mem) 42693 return true 42694 } 42695 return false 42696 } 42697 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 42698 // match: (Sub16 x y) 42699 // cond: 42700 // result: (SUBL x y) 42701 for { 42702 _ = v.Args[1] 42703 x := v.Args[0] 42704 y := v.Args[1] 42705 v.reset(OpAMD64SUBL) 42706 v.AddArg(x) 42707 v.AddArg(y) 42708 return true 42709 } 42710 } 42711 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 42712 // match: (Sub32 x y) 42713 // cond: 42714 // result: (SUBL x y) 42715 for { 42716 _ = v.Args[1] 42717 x := v.Args[0] 42718 y := v.Args[1] 42719 v.reset(OpAMD64SUBL) 42720 v.AddArg(x) 42721 v.AddArg(y) 42722 return true 42723 } 42724 } 42725 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 42726 // match: (Sub32F x y) 42727 // cond: 42728 // result: (SUBSS x y) 42729 for { 42730 _ = v.Args[1] 42731 x := v.Args[0] 42732 y := v.Args[1] 42733 v.reset(OpAMD64SUBSS) 42734 v.AddArg(x) 42735 v.AddArg(y) 42736 return true 42737 } 42738 } 42739 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 42740 // match: (Sub64 x y) 42741 // cond: 42742 // result: (SUBQ x y) 42743 for { 42744 _ = v.Args[1] 42745 x := v.Args[0] 42746 y := v.Args[1] 42747 v.reset(OpAMD64SUBQ) 42748 v.AddArg(x) 42749 v.AddArg(y) 42750 return true 42751 } 42752 } 42753 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 42754 // match: (Sub64F x y) 42755 // cond: 42756 // result: (SUBSD x y) 42757 for { 42758 _ = v.Args[1] 42759 x := v.Args[0] 42760 y := v.Args[1] 42761 v.reset(OpAMD64SUBSD) 42762 v.AddArg(x) 42763 v.AddArg(y) 42764 return true 42765 } 42766 } 42767 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 42768 // match: (Sub8 x y) 42769 // cond: 42770 // result: (SUBL x y) 42771 for { 42772 _ = v.Args[1] 42773 x := v.Args[0] 42774 y := v.Args[1] 42775 v.reset(OpAMD64SUBL) 42776 v.AddArg(x) 42777 v.AddArg(y) 42778 return true 42779 } 42780 } 42781 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 42782 b := v.Block 42783 _ = b 42784 config := b.Func.Config 42785 _ = config 42786 // match: (SubPtr x y) 42787 // cond: config.PtrSize == 8 42788 // result: (SUBQ x y) 42789 for { 42790 _ = v.Args[1] 42791 x := v.Args[0] 42792 y := v.Args[1] 42793 if !(config.PtrSize == 8) { 42794 break 42795 } 42796 v.reset(OpAMD64SUBQ) 42797 v.AddArg(x) 42798 v.AddArg(y) 42799 return true 42800 } 42801 // match: (SubPtr x y) 42802 // cond: config.PtrSize == 4 42803 // result: (SUBL x y) 42804 for { 42805 _ = v.Args[1] 42806 x := v.Args[0] 42807 y := v.Args[1] 42808 if !(config.PtrSize == 4) { 42809 break 42810 } 42811 v.reset(OpAMD64SUBL) 42812 v.AddArg(x) 42813 v.AddArg(y) 42814 return true 42815 } 42816 return false 42817 } 42818 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 42819 // match: (Trunc16to8 x) 42820 // cond: 42821 // result: x 42822 for { 42823 x := v.Args[0] 42824 v.reset(OpCopy) 42825 v.Type = x.Type 42826 v.AddArg(x) 42827 return true 42828 } 42829 } 42830 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 42831 // match: (Trunc32to16 x) 42832 // cond: 42833 // result: x 42834 for { 42835 x := v.Args[0] 42836 v.reset(OpCopy) 42837 v.Type = x.Type 42838 v.AddArg(x) 42839 return true 42840 } 42841 } 42842 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 42843 // match: (Trunc32to8 x) 42844 // cond: 42845 // result: x 42846 for { 42847 x := v.Args[0] 42848 v.reset(OpCopy) 42849 v.Type = x.Type 42850 v.AddArg(x) 42851 return true 42852 } 42853 } 42854 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 42855 // match: (Trunc64to16 x) 42856 // cond: 42857 // result: x 42858 for { 42859 x := v.Args[0] 42860 v.reset(OpCopy) 42861 v.Type = x.Type 42862 v.AddArg(x) 42863 return true 42864 } 42865 } 42866 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 42867 // match: (Trunc64to32 x) 42868 // cond: 42869 // result: x 42870 for { 42871 x := v.Args[0] 42872 v.reset(OpCopy) 42873 v.Type = x.Type 42874 v.AddArg(x) 42875 return true 42876 } 42877 } 42878 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 42879 // match: (Trunc64to8 x) 42880 // cond: 42881 // result: x 42882 for { 42883 x := v.Args[0] 42884 v.reset(OpCopy) 42885 v.Type = x.Type 42886 v.AddArg(x) 42887 return true 42888 } 42889 } 42890 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 42891 // match: (Xor16 x y) 42892 // cond: 42893 // result: (XORL x y) 42894 for { 42895 _ = v.Args[1] 42896 x := v.Args[0] 42897 y := v.Args[1] 42898 v.reset(OpAMD64XORL) 42899 v.AddArg(x) 42900 v.AddArg(y) 42901 return true 42902 } 42903 } 42904 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 42905 // match: (Xor32 x y) 42906 // cond: 42907 // result: (XORL x y) 42908 for { 42909 _ = v.Args[1] 42910 x := v.Args[0] 42911 y := v.Args[1] 42912 v.reset(OpAMD64XORL) 42913 v.AddArg(x) 42914 v.AddArg(y) 42915 return true 42916 } 42917 } 42918 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 42919 // match: (Xor64 x y) 42920 // cond: 42921 // result: (XORQ x y) 42922 for { 42923 _ = v.Args[1] 42924 x := v.Args[0] 42925 y := v.Args[1] 42926 v.reset(OpAMD64XORQ) 42927 v.AddArg(x) 42928 v.AddArg(y) 42929 return true 42930 } 42931 } 42932 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 42933 // match: (Xor8 x y) 42934 // cond: 42935 // result: (XORL x y) 42936 for { 42937 _ = v.Args[1] 42938 x := v.Args[0] 42939 y := v.Args[1] 42940 v.reset(OpAMD64XORL) 42941 v.AddArg(x) 42942 v.AddArg(y) 42943 return true 42944 } 42945 } 42946 func rewriteValueAMD64_OpZero_0(v *Value) bool { 42947 b := v.Block 42948 _ = b 42949 // match: (Zero [0] _ mem) 42950 // cond: 42951 // result: mem 42952 for { 42953 if v.AuxInt != 0 { 42954 break 42955 } 42956 _ = v.Args[1] 42957 mem := v.Args[1] 42958 v.reset(OpCopy) 42959 v.Type = mem.Type 42960 v.AddArg(mem) 42961 return true 42962 } 42963 // match: (Zero [1] destptr mem) 42964 // cond: 42965 // result: (MOVBstoreconst [0] destptr mem) 42966 for { 42967 if v.AuxInt != 1 { 42968 break 42969 } 42970 _ = v.Args[1] 42971 destptr := v.Args[0] 42972 mem := v.Args[1] 42973 v.reset(OpAMD64MOVBstoreconst) 42974 v.AuxInt = 0 42975 v.AddArg(destptr) 42976 v.AddArg(mem) 42977 return true 42978 } 42979 // match: (Zero [2] destptr mem) 42980 // cond: 42981 // result: (MOVWstoreconst [0] destptr mem) 42982 for { 42983 if v.AuxInt != 2 { 42984 break 42985 } 42986 _ = v.Args[1] 42987 destptr := v.Args[0] 42988 mem := v.Args[1] 42989 v.reset(OpAMD64MOVWstoreconst) 42990 v.AuxInt = 0 42991 v.AddArg(destptr) 42992 v.AddArg(mem) 42993 return true 42994 } 42995 // match: (Zero [4] destptr mem) 42996 // cond: 42997 // result: (MOVLstoreconst [0] destptr mem) 42998 for { 42999 if v.AuxInt != 4 { 43000 break 43001 } 43002 _ = v.Args[1] 43003 destptr := v.Args[0] 43004 mem := v.Args[1] 43005 v.reset(OpAMD64MOVLstoreconst) 43006 v.AuxInt = 0 43007 v.AddArg(destptr) 43008 v.AddArg(mem) 43009 return true 43010 } 43011 // match: (Zero [8] destptr mem) 43012 // cond: 43013 // result: (MOVQstoreconst [0] destptr mem) 43014 for { 43015 if v.AuxInt != 8 { 43016 break 43017 } 43018 _ = v.Args[1] 43019 destptr := v.Args[0] 43020 mem := v.Args[1] 43021 v.reset(OpAMD64MOVQstoreconst) 43022 v.AuxInt = 0 43023 v.AddArg(destptr) 43024 v.AddArg(mem) 43025 return true 43026 } 43027 // match: (Zero [3] destptr mem) 43028 // cond: 43029 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 43030 for { 43031 if v.AuxInt != 3 { 43032 break 43033 } 43034 _ = v.Args[1] 43035 destptr := v.Args[0] 43036 mem := v.Args[1] 43037 v.reset(OpAMD64MOVBstoreconst) 43038 v.AuxInt = makeValAndOff(0, 2) 43039 v.AddArg(destptr) 43040 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 43041 v0.AuxInt = 0 43042 v0.AddArg(destptr) 43043 v0.AddArg(mem) 43044 v.AddArg(v0) 43045 return true 43046 } 43047 // match: (Zero [5] destptr mem) 43048 // cond: 43049 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 43050 for { 43051 if v.AuxInt != 5 { 43052 break 43053 } 43054 _ = v.Args[1] 43055 destptr := v.Args[0] 43056 mem := v.Args[1] 43057 v.reset(OpAMD64MOVBstoreconst) 43058 v.AuxInt = makeValAndOff(0, 4) 43059 v.AddArg(destptr) 43060 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 43061 v0.AuxInt = 0 43062 v0.AddArg(destptr) 43063 v0.AddArg(mem) 43064 v.AddArg(v0) 43065 return true 43066 } 43067 // match: (Zero [6] destptr mem) 43068 // cond: 43069 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 43070 for { 43071 if v.AuxInt != 6 { 43072 break 43073 } 43074 _ = v.Args[1] 43075 destptr := v.Args[0] 43076 mem := v.Args[1] 43077 v.reset(OpAMD64MOVWstoreconst) 43078 v.AuxInt = makeValAndOff(0, 4) 43079 v.AddArg(destptr) 43080 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 43081 v0.AuxInt = 0 43082 v0.AddArg(destptr) 43083 v0.AddArg(mem) 43084 v.AddArg(v0) 43085 return true 43086 } 43087 // match: (Zero [7] destptr mem) 43088 // cond: 43089 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 43090 for { 43091 if v.AuxInt != 7 { 43092 break 43093 } 43094 _ = v.Args[1] 43095 destptr := v.Args[0] 43096 mem := v.Args[1] 43097 v.reset(OpAMD64MOVLstoreconst) 43098 v.AuxInt = makeValAndOff(0, 3) 43099 v.AddArg(destptr) 43100 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 43101 v0.AuxInt = 0 43102 v0.AddArg(destptr) 43103 v0.AddArg(mem) 43104 v.AddArg(v0) 43105 return true 43106 } 43107 // match: (Zero [s] destptr mem) 43108 // cond: s%8 != 0 && s > 8 43109 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 43110 for { 43111 s := v.AuxInt 43112 _ = v.Args[1] 43113 destptr := v.Args[0] 43114 mem := v.Args[1] 43115 if !(s%8 != 0 && s > 8) { 43116 break 43117 } 43118 v.reset(OpZero) 43119 v.AuxInt = s - s%8 43120 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 43121 v0.AuxInt = s % 8 43122 v0.AddArg(destptr) 43123 v.AddArg(v0) 43124 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 43125 v1.AuxInt = 0 43126 v1.AddArg(destptr) 43127 v1.AddArg(mem) 43128 v.AddArg(v1) 43129 return true 43130 } 43131 return false 43132 } 43133 func rewriteValueAMD64_OpZero_10(v *Value) bool { 43134 b := v.Block 43135 _ = b 43136 config := b.Func.Config 43137 _ = config 43138 typ := &b.Func.Config.Types 43139 _ = typ 43140 // match: (Zero [16] destptr mem) 43141 // cond: 43142 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 43143 for { 43144 if v.AuxInt != 16 { 43145 break 43146 } 43147 _ = v.Args[1] 43148 destptr := v.Args[0] 43149 mem := v.Args[1] 43150 v.reset(OpAMD64MOVQstoreconst) 43151 v.AuxInt = makeValAndOff(0, 8) 43152 v.AddArg(destptr) 43153 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 43154 v0.AuxInt = 0 43155 v0.AddArg(destptr) 43156 v0.AddArg(mem) 43157 v.AddArg(v0) 43158 return true 43159 } 43160 // match: (Zero [24] destptr mem) 43161 // cond: 43162 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 43163 for { 43164 if v.AuxInt != 24 { 43165 break 43166 } 43167 _ = v.Args[1] 43168 destptr := v.Args[0] 43169 mem := v.Args[1] 43170 v.reset(OpAMD64MOVQstoreconst) 43171 v.AuxInt = makeValAndOff(0, 16) 43172 v.AddArg(destptr) 43173 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 43174 v0.AuxInt = makeValAndOff(0, 8) 43175 v0.AddArg(destptr) 43176 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 43177 v1.AuxInt = 0 43178 v1.AddArg(destptr) 43179 v1.AddArg(mem) 43180 v0.AddArg(v1) 43181 v.AddArg(v0) 43182 return true 43183 } 43184 // match: (Zero [32] destptr mem) 43185 // cond: 43186 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 43187 for { 43188 if v.AuxInt != 32 { 43189 break 43190 } 43191 _ = v.Args[1] 43192 destptr := v.Args[0] 43193 mem := v.Args[1] 43194 v.reset(OpAMD64MOVQstoreconst) 43195 v.AuxInt = makeValAndOff(0, 24) 43196 v.AddArg(destptr) 43197 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 43198 v0.AuxInt = makeValAndOff(0, 16) 43199 v0.AddArg(destptr) 43200 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 43201 v1.AuxInt = makeValAndOff(0, 8) 43202 v1.AddArg(destptr) 43203 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 43204 v2.AuxInt = 0 43205 v2.AddArg(destptr) 43206 v2.AddArg(mem) 43207 v1.AddArg(v2) 43208 v0.AddArg(v1) 43209 v.AddArg(v0) 43210 return true 43211 } 43212 // match: (Zero [s] destptr mem) 43213 // cond: s <= 1024 && s%8 == 0 && s%16 != 0 && !config.noDuffDevice 43214 // result: (Zero [s-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 43215 for { 43216 s := v.AuxInt 43217 _ = v.Args[1] 43218 destptr := v.Args[0] 43219 mem := v.Args[1] 43220 if !(s <= 1024 && s%8 == 0 && s%16 != 0 && !config.noDuffDevice) { 43221 break 43222 } 43223 v.reset(OpZero) 43224 v.AuxInt = s - 8 43225 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 43226 v0.AuxInt = 8 43227 v0.AddArg(destptr) 43228 v.AddArg(v0) 43229 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 43230 v1.AddArg(destptr) 43231 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 43232 v2.AuxInt = 0 43233 v1.AddArg(v2) 43234 v1.AddArg(mem) 43235 v.AddArg(v1) 43236 return true 43237 } 43238 // match: (Zero [s] destptr mem) 43239 // cond: s <= 1024 && s%16 == 0 && !config.noDuffDevice 43240 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 43241 for { 43242 s := v.AuxInt 43243 _ = v.Args[1] 43244 destptr := v.Args[0] 43245 mem := v.Args[1] 43246 if !(s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 43247 break 43248 } 43249 v.reset(OpAMD64DUFFZERO) 43250 v.AuxInt = s 43251 v.AddArg(destptr) 43252 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43253 v0.AuxInt = 0 43254 v.AddArg(v0) 43255 v.AddArg(mem) 43256 return true 43257 } 43258 // match: (Zero [s] destptr mem) 43259 // cond: (s > 1024 || (config.noDuffDevice && s > 32)) && s%8 == 0 43260 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 43261 for { 43262 s := v.AuxInt 43263 _ = v.Args[1] 43264 destptr := v.Args[0] 43265 mem := v.Args[1] 43266 if !((s > 1024 || (config.noDuffDevice && s > 32)) && s%8 == 0) { 43267 break 43268 } 43269 v.reset(OpAMD64REPSTOSQ) 43270 v.AddArg(destptr) 43271 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 43272 v0.AuxInt = s / 8 43273 v.AddArg(v0) 43274 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 43275 v1.AuxInt = 0 43276 v.AddArg(v1) 43277 v.AddArg(mem) 43278 return true 43279 } 43280 return false 43281 } 43282 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 43283 // match: (ZeroExt16to32 x) 43284 // cond: 43285 // result: (MOVWQZX x) 43286 for { 43287 x := v.Args[0] 43288 v.reset(OpAMD64MOVWQZX) 43289 v.AddArg(x) 43290 return true 43291 } 43292 } 43293 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 43294 // match: (ZeroExt16to64 x) 43295 // cond: 43296 // result: (MOVWQZX x) 43297 for { 43298 x := v.Args[0] 43299 v.reset(OpAMD64MOVWQZX) 43300 v.AddArg(x) 43301 return true 43302 } 43303 } 43304 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 43305 // match: (ZeroExt32to64 x) 43306 // cond: 43307 // result: (MOVLQZX x) 43308 for { 43309 x := v.Args[0] 43310 v.reset(OpAMD64MOVLQZX) 43311 v.AddArg(x) 43312 return true 43313 } 43314 } 43315 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 43316 // match: (ZeroExt8to16 x) 43317 // cond: 43318 // result: (MOVBQZX x) 43319 for { 43320 x := v.Args[0] 43321 v.reset(OpAMD64MOVBQZX) 43322 v.AddArg(x) 43323 return true 43324 } 43325 } 43326 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 43327 // match: (ZeroExt8to32 x) 43328 // cond: 43329 // result: (MOVBQZX x) 43330 for { 43331 x := v.Args[0] 43332 v.reset(OpAMD64MOVBQZX) 43333 v.AddArg(x) 43334 return true 43335 } 43336 } 43337 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 43338 // match: (ZeroExt8to64 x) 43339 // cond: 43340 // result: (MOVBQZX x) 43341 for { 43342 x := v.Args[0] 43343 v.reset(OpAMD64MOVBQZX) 43344 v.AddArg(x) 43345 return true 43346 } 43347 } 43348 func rewriteBlockAMD64(b *Block) bool { 43349 config := b.Func.Config 43350 _ = config 43351 fe := b.Func.fe 43352 _ = fe 43353 typ := &config.Types 43354 _ = typ 43355 switch b.Kind { 43356 case BlockAMD64EQ: 43357 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 43358 // cond: !config.nacl 43359 // result: (UGE (BTL x y)) 43360 for { 43361 v := b.Control 43362 if v.Op != OpAMD64TESTL { 43363 break 43364 } 43365 _ = v.Args[1] 43366 v_0 := v.Args[0] 43367 if v_0.Op != OpAMD64SHLL { 43368 break 43369 } 43370 _ = v_0.Args[1] 43371 v_0_0 := v_0.Args[0] 43372 if v_0_0.Op != OpAMD64MOVLconst { 43373 break 43374 } 43375 if v_0_0.AuxInt != 1 { 43376 break 43377 } 43378 x := v_0.Args[1] 43379 y := v.Args[1] 43380 if !(!config.nacl) { 43381 break 43382 } 43383 b.Kind = BlockAMD64UGE 43384 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 43385 v0.AddArg(x) 43386 v0.AddArg(y) 43387 b.SetControl(v0) 43388 return true 43389 } 43390 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 43391 // cond: !config.nacl 43392 // result: (UGE (BTL x y)) 43393 for { 43394 v := b.Control 43395 if v.Op != OpAMD64TESTL { 43396 break 43397 } 43398 _ = v.Args[1] 43399 y := v.Args[0] 43400 v_1 := v.Args[1] 43401 if v_1.Op != OpAMD64SHLL { 43402 break 43403 } 43404 _ = v_1.Args[1] 43405 v_1_0 := v_1.Args[0] 43406 if v_1_0.Op != OpAMD64MOVLconst { 43407 break 43408 } 43409 if v_1_0.AuxInt != 1 { 43410 break 43411 } 43412 x := v_1.Args[1] 43413 if !(!config.nacl) { 43414 break 43415 } 43416 b.Kind = BlockAMD64UGE 43417 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 43418 v0.AddArg(x) 43419 v0.AddArg(y) 43420 b.SetControl(v0) 43421 return true 43422 } 43423 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 43424 // cond: !config.nacl 43425 // result: (UGE (BTQ x y)) 43426 for { 43427 v := b.Control 43428 if v.Op != OpAMD64TESTQ { 43429 break 43430 } 43431 _ = v.Args[1] 43432 v_0 := v.Args[0] 43433 if v_0.Op != OpAMD64SHLQ { 43434 break 43435 } 43436 _ = v_0.Args[1] 43437 v_0_0 := v_0.Args[0] 43438 if v_0_0.Op != OpAMD64MOVQconst { 43439 break 43440 } 43441 if v_0_0.AuxInt != 1 { 43442 break 43443 } 43444 x := v_0.Args[1] 43445 y := v.Args[1] 43446 if !(!config.nacl) { 43447 break 43448 } 43449 b.Kind = BlockAMD64UGE 43450 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 43451 v0.AddArg(x) 43452 v0.AddArg(y) 43453 b.SetControl(v0) 43454 return true 43455 } 43456 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 43457 // cond: !config.nacl 43458 // result: (UGE (BTQ x y)) 43459 for { 43460 v := b.Control 43461 if v.Op != OpAMD64TESTQ { 43462 break 43463 } 43464 _ = v.Args[1] 43465 y := v.Args[0] 43466 v_1 := v.Args[1] 43467 if v_1.Op != OpAMD64SHLQ { 43468 break 43469 } 43470 _ = v_1.Args[1] 43471 v_1_0 := v_1.Args[0] 43472 if v_1_0.Op != OpAMD64MOVQconst { 43473 break 43474 } 43475 if v_1_0.AuxInt != 1 { 43476 break 43477 } 43478 x := v_1.Args[1] 43479 if !(!config.nacl) { 43480 break 43481 } 43482 b.Kind = BlockAMD64UGE 43483 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 43484 v0.AddArg(x) 43485 v0.AddArg(y) 43486 b.SetControl(v0) 43487 return true 43488 } 43489 // match: (EQ (TESTLconst [c] x)) 43490 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 43491 // result: (UGE (BTLconst [log2(c)] x)) 43492 for { 43493 v := b.Control 43494 if v.Op != OpAMD64TESTLconst { 43495 break 43496 } 43497 c := v.AuxInt 43498 x := v.Args[0] 43499 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 43500 break 43501 } 43502 b.Kind = BlockAMD64UGE 43503 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 43504 v0.AuxInt = log2(c) 43505 v0.AddArg(x) 43506 b.SetControl(v0) 43507 return true 43508 } 43509 // match: (EQ (TESTQconst [c] x)) 43510 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 43511 // result: (UGE (BTQconst [log2(c)] x)) 43512 for { 43513 v := b.Control 43514 if v.Op != OpAMD64TESTQconst { 43515 break 43516 } 43517 c := v.AuxInt 43518 x := v.Args[0] 43519 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 43520 break 43521 } 43522 b.Kind = BlockAMD64UGE 43523 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43524 v0.AuxInt = log2(c) 43525 v0.AddArg(x) 43526 b.SetControl(v0) 43527 return true 43528 } 43529 // match: (EQ (TESTQ (MOVQconst [c]) x)) 43530 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 43531 // result: (UGE (BTQconst [log2(c)] x)) 43532 for { 43533 v := b.Control 43534 if v.Op != OpAMD64TESTQ { 43535 break 43536 } 43537 _ = v.Args[1] 43538 v_0 := v.Args[0] 43539 if v_0.Op != OpAMD64MOVQconst { 43540 break 43541 } 43542 c := v_0.AuxInt 43543 x := v.Args[1] 43544 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 43545 break 43546 } 43547 b.Kind = BlockAMD64UGE 43548 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43549 v0.AuxInt = log2(c) 43550 v0.AddArg(x) 43551 b.SetControl(v0) 43552 return true 43553 } 43554 // match: (EQ (TESTQ x (MOVQconst [c]))) 43555 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 43556 // result: (UGE (BTQconst [log2(c)] x)) 43557 for { 43558 v := b.Control 43559 if v.Op != OpAMD64TESTQ { 43560 break 43561 } 43562 _ = v.Args[1] 43563 x := v.Args[0] 43564 v_1 := v.Args[1] 43565 if v_1.Op != OpAMD64MOVQconst { 43566 break 43567 } 43568 c := v_1.AuxInt 43569 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 43570 break 43571 } 43572 b.Kind = BlockAMD64UGE 43573 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43574 v0.AuxInt = log2(c) 43575 v0.AddArg(x) 43576 b.SetControl(v0) 43577 return true 43578 } 43579 // match: (EQ (InvertFlags cmp) yes no) 43580 // cond: 43581 // result: (EQ cmp yes no) 43582 for { 43583 v := b.Control 43584 if v.Op != OpAMD64InvertFlags { 43585 break 43586 } 43587 cmp := v.Args[0] 43588 b.Kind = BlockAMD64EQ 43589 b.SetControl(cmp) 43590 return true 43591 } 43592 // match: (EQ (FlagEQ) yes no) 43593 // cond: 43594 // result: (First nil yes no) 43595 for { 43596 v := b.Control 43597 if v.Op != OpAMD64FlagEQ { 43598 break 43599 } 43600 b.Kind = BlockFirst 43601 b.SetControl(nil) 43602 return true 43603 } 43604 // match: (EQ (FlagLT_ULT) yes no) 43605 // cond: 43606 // result: (First nil no yes) 43607 for { 43608 v := b.Control 43609 if v.Op != OpAMD64FlagLT_ULT { 43610 break 43611 } 43612 b.Kind = BlockFirst 43613 b.SetControl(nil) 43614 b.swapSuccessors() 43615 return true 43616 } 43617 // match: (EQ (FlagLT_UGT) yes no) 43618 // cond: 43619 // result: (First nil no yes) 43620 for { 43621 v := b.Control 43622 if v.Op != OpAMD64FlagLT_UGT { 43623 break 43624 } 43625 b.Kind = BlockFirst 43626 b.SetControl(nil) 43627 b.swapSuccessors() 43628 return true 43629 } 43630 // match: (EQ (FlagGT_ULT) yes no) 43631 // cond: 43632 // result: (First nil no yes) 43633 for { 43634 v := b.Control 43635 if v.Op != OpAMD64FlagGT_ULT { 43636 break 43637 } 43638 b.Kind = BlockFirst 43639 b.SetControl(nil) 43640 b.swapSuccessors() 43641 return true 43642 } 43643 // match: (EQ (FlagGT_UGT) yes no) 43644 // cond: 43645 // result: (First nil no yes) 43646 for { 43647 v := b.Control 43648 if v.Op != OpAMD64FlagGT_UGT { 43649 break 43650 } 43651 b.Kind = BlockFirst 43652 b.SetControl(nil) 43653 b.swapSuccessors() 43654 return true 43655 } 43656 case BlockAMD64GE: 43657 // match: (GE (InvertFlags cmp) yes no) 43658 // cond: 43659 // result: (LE cmp yes no) 43660 for { 43661 v := b.Control 43662 if v.Op != OpAMD64InvertFlags { 43663 break 43664 } 43665 cmp := v.Args[0] 43666 b.Kind = BlockAMD64LE 43667 b.SetControl(cmp) 43668 return true 43669 } 43670 // match: (GE (FlagEQ) yes no) 43671 // cond: 43672 // result: (First nil yes no) 43673 for { 43674 v := b.Control 43675 if v.Op != OpAMD64FlagEQ { 43676 break 43677 } 43678 b.Kind = BlockFirst 43679 b.SetControl(nil) 43680 return true 43681 } 43682 // match: (GE (FlagLT_ULT) yes no) 43683 // cond: 43684 // result: (First nil no yes) 43685 for { 43686 v := b.Control 43687 if v.Op != OpAMD64FlagLT_ULT { 43688 break 43689 } 43690 b.Kind = BlockFirst 43691 b.SetControl(nil) 43692 b.swapSuccessors() 43693 return true 43694 } 43695 // match: (GE (FlagLT_UGT) yes no) 43696 // cond: 43697 // result: (First nil no yes) 43698 for { 43699 v := b.Control 43700 if v.Op != OpAMD64FlagLT_UGT { 43701 break 43702 } 43703 b.Kind = BlockFirst 43704 b.SetControl(nil) 43705 b.swapSuccessors() 43706 return true 43707 } 43708 // match: (GE (FlagGT_ULT) yes no) 43709 // cond: 43710 // result: (First nil yes no) 43711 for { 43712 v := b.Control 43713 if v.Op != OpAMD64FlagGT_ULT { 43714 break 43715 } 43716 b.Kind = BlockFirst 43717 b.SetControl(nil) 43718 return true 43719 } 43720 // match: (GE (FlagGT_UGT) yes no) 43721 // cond: 43722 // result: (First nil yes no) 43723 for { 43724 v := b.Control 43725 if v.Op != OpAMD64FlagGT_UGT { 43726 break 43727 } 43728 b.Kind = BlockFirst 43729 b.SetControl(nil) 43730 return true 43731 } 43732 case BlockAMD64GT: 43733 // match: (GT (InvertFlags cmp) yes no) 43734 // cond: 43735 // result: (LT cmp yes no) 43736 for { 43737 v := b.Control 43738 if v.Op != OpAMD64InvertFlags { 43739 break 43740 } 43741 cmp := v.Args[0] 43742 b.Kind = BlockAMD64LT 43743 b.SetControl(cmp) 43744 return true 43745 } 43746 // match: (GT (FlagEQ) yes no) 43747 // cond: 43748 // result: (First nil no yes) 43749 for { 43750 v := b.Control 43751 if v.Op != OpAMD64FlagEQ { 43752 break 43753 } 43754 b.Kind = BlockFirst 43755 b.SetControl(nil) 43756 b.swapSuccessors() 43757 return true 43758 } 43759 // match: (GT (FlagLT_ULT) yes no) 43760 // cond: 43761 // result: (First nil no yes) 43762 for { 43763 v := b.Control 43764 if v.Op != OpAMD64FlagLT_ULT { 43765 break 43766 } 43767 b.Kind = BlockFirst 43768 b.SetControl(nil) 43769 b.swapSuccessors() 43770 return true 43771 } 43772 // match: (GT (FlagLT_UGT) yes no) 43773 // cond: 43774 // result: (First nil no yes) 43775 for { 43776 v := b.Control 43777 if v.Op != OpAMD64FlagLT_UGT { 43778 break 43779 } 43780 b.Kind = BlockFirst 43781 b.SetControl(nil) 43782 b.swapSuccessors() 43783 return true 43784 } 43785 // match: (GT (FlagGT_ULT) yes no) 43786 // cond: 43787 // result: (First nil yes no) 43788 for { 43789 v := b.Control 43790 if v.Op != OpAMD64FlagGT_ULT { 43791 break 43792 } 43793 b.Kind = BlockFirst 43794 b.SetControl(nil) 43795 return true 43796 } 43797 // match: (GT (FlagGT_UGT) yes no) 43798 // cond: 43799 // result: (First nil yes no) 43800 for { 43801 v := b.Control 43802 if v.Op != OpAMD64FlagGT_UGT { 43803 break 43804 } 43805 b.Kind = BlockFirst 43806 b.SetControl(nil) 43807 return true 43808 } 43809 case BlockIf: 43810 // match: (If (SETL cmp) yes no) 43811 // cond: 43812 // result: (LT cmp yes no) 43813 for { 43814 v := b.Control 43815 if v.Op != OpAMD64SETL { 43816 break 43817 } 43818 cmp := v.Args[0] 43819 b.Kind = BlockAMD64LT 43820 b.SetControl(cmp) 43821 return true 43822 } 43823 // match: (If (SETLE cmp) yes no) 43824 // cond: 43825 // result: (LE cmp yes no) 43826 for { 43827 v := b.Control 43828 if v.Op != OpAMD64SETLE { 43829 break 43830 } 43831 cmp := v.Args[0] 43832 b.Kind = BlockAMD64LE 43833 b.SetControl(cmp) 43834 return true 43835 } 43836 // match: (If (SETG cmp) yes no) 43837 // cond: 43838 // result: (GT cmp yes no) 43839 for { 43840 v := b.Control 43841 if v.Op != OpAMD64SETG { 43842 break 43843 } 43844 cmp := v.Args[0] 43845 b.Kind = BlockAMD64GT 43846 b.SetControl(cmp) 43847 return true 43848 } 43849 // match: (If (SETGE cmp) yes no) 43850 // cond: 43851 // result: (GE cmp yes no) 43852 for { 43853 v := b.Control 43854 if v.Op != OpAMD64SETGE { 43855 break 43856 } 43857 cmp := v.Args[0] 43858 b.Kind = BlockAMD64GE 43859 b.SetControl(cmp) 43860 return true 43861 } 43862 // match: (If (SETEQ cmp) yes no) 43863 // cond: 43864 // result: (EQ cmp yes no) 43865 for { 43866 v := b.Control 43867 if v.Op != OpAMD64SETEQ { 43868 break 43869 } 43870 cmp := v.Args[0] 43871 b.Kind = BlockAMD64EQ 43872 b.SetControl(cmp) 43873 return true 43874 } 43875 // match: (If (SETNE cmp) yes no) 43876 // cond: 43877 // result: (NE cmp yes no) 43878 for { 43879 v := b.Control 43880 if v.Op != OpAMD64SETNE { 43881 break 43882 } 43883 cmp := v.Args[0] 43884 b.Kind = BlockAMD64NE 43885 b.SetControl(cmp) 43886 return true 43887 } 43888 // match: (If (SETB cmp) yes no) 43889 // cond: 43890 // result: (ULT cmp yes no) 43891 for { 43892 v := b.Control 43893 if v.Op != OpAMD64SETB { 43894 break 43895 } 43896 cmp := v.Args[0] 43897 b.Kind = BlockAMD64ULT 43898 b.SetControl(cmp) 43899 return true 43900 } 43901 // match: (If (SETBE cmp) yes no) 43902 // cond: 43903 // result: (ULE cmp yes no) 43904 for { 43905 v := b.Control 43906 if v.Op != OpAMD64SETBE { 43907 break 43908 } 43909 cmp := v.Args[0] 43910 b.Kind = BlockAMD64ULE 43911 b.SetControl(cmp) 43912 return true 43913 } 43914 // match: (If (SETA cmp) yes no) 43915 // cond: 43916 // result: (UGT cmp yes no) 43917 for { 43918 v := b.Control 43919 if v.Op != OpAMD64SETA { 43920 break 43921 } 43922 cmp := v.Args[0] 43923 b.Kind = BlockAMD64UGT 43924 b.SetControl(cmp) 43925 return true 43926 } 43927 // match: (If (SETAE cmp) yes no) 43928 // cond: 43929 // result: (UGE cmp yes no) 43930 for { 43931 v := b.Control 43932 if v.Op != OpAMD64SETAE { 43933 break 43934 } 43935 cmp := v.Args[0] 43936 b.Kind = BlockAMD64UGE 43937 b.SetControl(cmp) 43938 return true 43939 } 43940 // match: (If (SETGF cmp) yes no) 43941 // cond: 43942 // result: (UGT cmp yes no) 43943 for { 43944 v := b.Control 43945 if v.Op != OpAMD64SETGF { 43946 break 43947 } 43948 cmp := v.Args[0] 43949 b.Kind = BlockAMD64UGT 43950 b.SetControl(cmp) 43951 return true 43952 } 43953 // match: (If (SETGEF cmp) yes no) 43954 // cond: 43955 // result: (UGE cmp yes no) 43956 for { 43957 v := b.Control 43958 if v.Op != OpAMD64SETGEF { 43959 break 43960 } 43961 cmp := v.Args[0] 43962 b.Kind = BlockAMD64UGE 43963 b.SetControl(cmp) 43964 return true 43965 } 43966 // match: (If (SETEQF cmp) yes no) 43967 // cond: 43968 // result: (EQF cmp yes no) 43969 for { 43970 v := b.Control 43971 if v.Op != OpAMD64SETEQF { 43972 break 43973 } 43974 cmp := v.Args[0] 43975 b.Kind = BlockAMD64EQF 43976 b.SetControl(cmp) 43977 return true 43978 } 43979 // match: (If (SETNEF cmp) yes no) 43980 // cond: 43981 // result: (NEF cmp yes no) 43982 for { 43983 v := b.Control 43984 if v.Op != OpAMD64SETNEF { 43985 break 43986 } 43987 cmp := v.Args[0] 43988 b.Kind = BlockAMD64NEF 43989 b.SetControl(cmp) 43990 return true 43991 } 43992 // match: (If cond yes no) 43993 // cond: 43994 // result: (NE (TESTB cond cond) yes no) 43995 for { 43996 v := b.Control 43997 _ = v 43998 cond := b.Control 43999 b.Kind = BlockAMD64NE 44000 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags) 44001 v0.AddArg(cond) 44002 v0.AddArg(cond) 44003 b.SetControl(v0) 44004 return true 44005 } 44006 case BlockAMD64LE: 44007 // match: (LE (InvertFlags cmp) yes no) 44008 // cond: 44009 // result: (GE cmp yes no) 44010 for { 44011 v := b.Control 44012 if v.Op != OpAMD64InvertFlags { 44013 break 44014 } 44015 cmp := v.Args[0] 44016 b.Kind = BlockAMD64GE 44017 b.SetControl(cmp) 44018 return true 44019 } 44020 // match: (LE (FlagEQ) yes no) 44021 // cond: 44022 // result: (First nil yes no) 44023 for { 44024 v := b.Control 44025 if v.Op != OpAMD64FlagEQ { 44026 break 44027 } 44028 b.Kind = BlockFirst 44029 b.SetControl(nil) 44030 return true 44031 } 44032 // match: (LE (FlagLT_ULT) yes no) 44033 // cond: 44034 // result: (First nil yes no) 44035 for { 44036 v := b.Control 44037 if v.Op != OpAMD64FlagLT_ULT { 44038 break 44039 } 44040 b.Kind = BlockFirst 44041 b.SetControl(nil) 44042 return true 44043 } 44044 // match: (LE (FlagLT_UGT) yes no) 44045 // cond: 44046 // result: (First nil yes no) 44047 for { 44048 v := b.Control 44049 if v.Op != OpAMD64FlagLT_UGT { 44050 break 44051 } 44052 b.Kind = BlockFirst 44053 b.SetControl(nil) 44054 return true 44055 } 44056 // match: (LE (FlagGT_ULT) yes no) 44057 // cond: 44058 // result: (First nil no yes) 44059 for { 44060 v := b.Control 44061 if v.Op != OpAMD64FlagGT_ULT { 44062 break 44063 } 44064 b.Kind = BlockFirst 44065 b.SetControl(nil) 44066 b.swapSuccessors() 44067 return true 44068 } 44069 // match: (LE (FlagGT_UGT) yes no) 44070 // cond: 44071 // result: (First nil no yes) 44072 for { 44073 v := b.Control 44074 if v.Op != OpAMD64FlagGT_UGT { 44075 break 44076 } 44077 b.Kind = BlockFirst 44078 b.SetControl(nil) 44079 b.swapSuccessors() 44080 return true 44081 } 44082 case BlockAMD64LT: 44083 // match: (LT (InvertFlags cmp) yes no) 44084 // cond: 44085 // result: (GT cmp yes no) 44086 for { 44087 v := b.Control 44088 if v.Op != OpAMD64InvertFlags { 44089 break 44090 } 44091 cmp := v.Args[0] 44092 b.Kind = BlockAMD64GT 44093 b.SetControl(cmp) 44094 return true 44095 } 44096 // match: (LT (FlagEQ) yes no) 44097 // cond: 44098 // result: (First nil no yes) 44099 for { 44100 v := b.Control 44101 if v.Op != OpAMD64FlagEQ { 44102 break 44103 } 44104 b.Kind = BlockFirst 44105 b.SetControl(nil) 44106 b.swapSuccessors() 44107 return true 44108 } 44109 // match: (LT (FlagLT_ULT) yes no) 44110 // cond: 44111 // result: (First nil yes no) 44112 for { 44113 v := b.Control 44114 if v.Op != OpAMD64FlagLT_ULT { 44115 break 44116 } 44117 b.Kind = BlockFirst 44118 b.SetControl(nil) 44119 return true 44120 } 44121 // match: (LT (FlagLT_UGT) yes no) 44122 // cond: 44123 // result: (First nil yes no) 44124 for { 44125 v := b.Control 44126 if v.Op != OpAMD64FlagLT_UGT { 44127 break 44128 } 44129 b.Kind = BlockFirst 44130 b.SetControl(nil) 44131 return true 44132 } 44133 // match: (LT (FlagGT_ULT) yes no) 44134 // cond: 44135 // result: (First nil no yes) 44136 for { 44137 v := b.Control 44138 if v.Op != OpAMD64FlagGT_ULT { 44139 break 44140 } 44141 b.Kind = BlockFirst 44142 b.SetControl(nil) 44143 b.swapSuccessors() 44144 return true 44145 } 44146 // match: (LT (FlagGT_UGT) yes no) 44147 // cond: 44148 // result: (First nil no yes) 44149 for { 44150 v := b.Control 44151 if v.Op != OpAMD64FlagGT_UGT { 44152 break 44153 } 44154 b.Kind = BlockFirst 44155 b.SetControl(nil) 44156 b.swapSuccessors() 44157 return true 44158 } 44159 case BlockAMD64NE: 44160 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 44161 // cond: 44162 // result: (LT cmp yes no) 44163 for { 44164 v := b.Control 44165 if v.Op != OpAMD64TESTB { 44166 break 44167 } 44168 _ = v.Args[1] 44169 v_0 := v.Args[0] 44170 if v_0.Op != OpAMD64SETL { 44171 break 44172 } 44173 cmp := v_0.Args[0] 44174 v_1 := v.Args[1] 44175 if v_1.Op != OpAMD64SETL { 44176 break 44177 } 44178 if cmp != v_1.Args[0] { 44179 break 44180 } 44181 b.Kind = BlockAMD64LT 44182 b.SetControl(cmp) 44183 return true 44184 } 44185 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 44186 // cond: 44187 // result: (LT cmp yes no) 44188 for { 44189 v := b.Control 44190 if v.Op != OpAMD64TESTB { 44191 break 44192 } 44193 _ = v.Args[1] 44194 v_0 := v.Args[0] 44195 if v_0.Op != OpAMD64SETL { 44196 break 44197 } 44198 cmp := v_0.Args[0] 44199 v_1 := v.Args[1] 44200 if v_1.Op != OpAMD64SETL { 44201 break 44202 } 44203 if cmp != v_1.Args[0] { 44204 break 44205 } 44206 b.Kind = BlockAMD64LT 44207 b.SetControl(cmp) 44208 return true 44209 } 44210 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 44211 // cond: 44212 // result: (LE cmp yes no) 44213 for { 44214 v := b.Control 44215 if v.Op != OpAMD64TESTB { 44216 break 44217 } 44218 _ = v.Args[1] 44219 v_0 := v.Args[0] 44220 if v_0.Op != OpAMD64SETLE { 44221 break 44222 } 44223 cmp := v_0.Args[0] 44224 v_1 := v.Args[1] 44225 if v_1.Op != OpAMD64SETLE { 44226 break 44227 } 44228 if cmp != v_1.Args[0] { 44229 break 44230 } 44231 b.Kind = BlockAMD64LE 44232 b.SetControl(cmp) 44233 return true 44234 } 44235 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 44236 // cond: 44237 // result: (LE cmp yes no) 44238 for { 44239 v := b.Control 44240 if v.Op != OpAMD64TESTB { 44241 break 44242 } 44243 _ = v.Args[1] 44244 v_0 := v.Args[0] 44245 if v_0.Op != OpAMD64SETLE { 44246 break 44247 } 44248 cmp := v_0.Args[0] 44249 v_1 := v.Args[1] 44250 if v_1.Op != OpAMD64SETLE { 44251 break 44252 } 44253 if cmp != v_1.Args[0] { 44254 break 44255 } 44256 b.Kind = BlockAMD64LE 44257 b.SetControl(cmp) 44258 return true 44259 } 44260 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 44261 // cond: 44262 // result: (GT cmp yes no) 44263 for { 44264 v := b.Control 44265 if v.Op != OpAMD64TESTB { 44266 break 44267 } 44268 _ = v.Args[1] 44269 v_0 := v.Args[0] 44270 if v_0.Op != OpAMD64SETG { 44271 break 44272 } 44273 cmp := v_0.Args[0] 44274 v_1 := v.Args[1] 44275 if v_1.Op != OpAMD64SETG { 44276 break 44277 } 44278 if cmp != v_1.Args[0] { 44279 break 44280 } 44281 b.Kind = BlockAMD64GT 44282 b.SetControl(cmp) 44283 return true 44284 } 44285 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 44286 // cond: 44287 // result: (GT cmp yes no) 44288 for { 44289 v := b.Control 44290 if v.Op != OpAMD64TESTB { 44291 break 44292 } 44293 _ = v.Args[1] 44294 v_0 := v.Args[0] 44295 if v_0.Op != OpAMD64SETG { 44296 break 44297 } 44298 cmp := v_0.Args[0] 44299 v_1 := v.Args[1] 44300 if v_1.Op != OpAMD64SETG { 44301 break 44302 } 44303 if cmp != v_1.Args[0] { 44304 break 44305 } 44306 b.Kind = BlockAMD64GT 44307 b.SetControl(cmp) 44308 return true 44309 } 44310 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 44311 // cond: 44312 // result: (GE cmp yes no) 44313 for { 44314 v := b.Control 44315 if v.Op != OpAMD64TESTB { 44316 break 44317 } 44318 _ = v.Args[1] 44319 v_0 := v.Args[0] 44320 if v_0.Op != OpAMD64SETGE { 44321 break 44322 } 44323 cmp := v_0.Args[0] 44324 v_1 := v.Args[1] 44325 if v_1.Op != OpAMD64SETGE { 44326 break 44327 } 44328 if cmp != v_1.Args[0] { 44329 break 44330 } 44331 b.Kind = BlockAMD64GE 44332 b.SetControl(cmp) 44333 return true 44334 } 44335 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 44336 // cond: 44337 // result: (GE cmp yes no) 44338 for { 44339 v := b.Control 44340 if v.Op != OpAMD64TESTB { 44341 break 44342 } 44343 _ = v.Args[1] 44344 v_0 := v.Args[0] 44345 if v_0.Op != OpAMD64SETGE { 44346 break 44347 } 44348 cmp := v_0.Args[0] 44349 v_1 := v.Args[1] 44350 if v_1.Op != OpAMD64SETGE { 44351 break 44352 } 44353 if cmp != v_1.Args[0] { 44354 break 44355 } 44356 b.Kind = BlockAMD64GE 44357 b.SetControl(cmp) 44358 return true 44359 } 44360 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 44361 // cond: 44362 // result: (EQ cmp yes no) 44363 for { 44364 v := b.Control 44365 if v.Op != OpAMD64TESTB { 44366 break 44367 } 44368 _ = v.Args[1] 44369 v_0 := v.Args[0] 44370 if v_0.Op != OpAMD64SETEQ { 44371 break 44372 } 44373 cmp := v_0.Args[0] 44374 v_1 := v.Args[1] 44375 if v_1.Op != OpAMD64SETEQ { 44376 break 44377 } 44378 if cmp != v_1.Args[0] { 44379 break 44380 } 44381 b.Kind = BlockAMD64EQ 44382 b.SetControl(cmp) 44383 return true 44384 } 44385 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 44386 // cond: 44387 // result: (EQ cmp yes no) 44388 for { 44389 v := b.Control 44390 if v.Op != OpAMD64TESTB { 44391 break 44392 } 44393 _ = v.Args[1] 44394 v_0 := v.Args[0] 44395 if v_0.Op != OpAMD64SETEQ { 44396 break 44397 } 44398 cmp := v_0.Args[0] 44399 v_1 := v.Args[1] 44400 if v_1.Op != OpAMD64SETEQ { 44401 break 44402 } 44403 if cmp != v_1.Args[0] { 44404 break 44405 } 44406 b.Kind = BlockAMD64EQ 44407 b.SetControl(cmp) 44408 return true 44409 } 44410 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 44411 // cond: 44412 // result: (NE cmp yes no) 44413 for { 44414 v := b.Control 44415 if v.Op != OpAMD64TESTB { 44416 break 44417 } 44418 _ = v.Args[1] 44419 v_0 := v.Args[0] 44420 if v_0.Op != OpAMD64SETNE { 44421 break 44422 } 44423 cmp := v_0.Args[0] 44424 v_1 := v.Args[1] 44425 if v_1.Op != OpAMD64SETNE { 44426 break 44427 } 44428 if cmp != v_1.Args[0] { 44429 break 44430 } 44431 b.Kind = BlockAMD64NE 44432 b.SetControl(cmp) 44433 return true 44434 } 44435 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 44436 // cond: 44437 // result: (NE cmp yes no) 44438 for { 44439 v := b.Control 44440 if v.Op != OpAMD64TESTB { 44441 break 44442 } 44443 _ = v.Args[1] 44444 v_0 := v.Args[0] 44445 if v_0.Op != OpAMD64SETNE { 44446 break 44447 } 44448 cmp := v_0.Args[0] 44449 v_1 := v.Args[1] 44450 if v_1.Op != OpAMD64SETNE { 44451 break 44452 } 44453 if cmp != v_1.Args[0] { 44454 break 44455 } 44456 b.Kind = BlockAMD64NE 44457 b.SetControl(cmp) 44458 return true 44459 } 44460 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 44461 // cond: 44462 // result: (ULT cmp yes no) 44463 for { 44464 v := b.Control 44465 if v.Op != OpAMD64TESTB { 44466 break 44467 } 44468 _ = v.Args[1] 44469 v_0 := v.Args[0] 44470 if v_0.Op != OpAMD64SETB { 44471 break 44472 } 44473 cmp := v_0.Args[0] 44474 v_1 := v.Args[1] 44475 if v_1.Op != OpAMD64SETB { 44476 break 44477 } 44478 if cmp != v_1.Args[0] { 44479 break 44480 } 44481 b.Kind = BlockAMD64ULT 44482 b.SetControl(cmp) 44483 return true 44484 } 44485 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 44486 // cond: 44487 // result: (ULT cmp yes no) 44488 for { 44489 v := b.Control 44490 if v.Op != OpAMD64TESTB { 44491 break 44492 } 44493 _ = v.Args[1] 44494 v_0 := v.Args[0] 44495 if v_0.Op != OpAMD64SETB { 44496 break 44497 } 44498 cmp := v_0.Args[0] 44499 v_1 := v.Args[1] 44500 if v_1.Op != OpAMD64SETB { 44501 break 44502 } 44503 if cmp != v_1.Args[0] { 44504 break 44505 } 44506 b.Kind = BlockAMD64ULT 44507 b.SetControl(cmp) 44508 return true 44509 } 44510 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 44511 // cond: 44512 // result: (ULE cmp yes no) 44513 for { 44514 v := b.Control 44515 if v.Op != OpAMD64TESTB { 44516 break 44517 } 44518 _ = v.Args[1] 44519 v_0 := v.Args[0] 44520 if v_0.Op != OpAMD64SETBE { 44521 break 44522 } 44523 cmp := v_0.Args[0] 44524 v_1 := v.Args[1] 44525 if v_1.Op != OpAMD64SETBE { 44526 break 44527 } 44528 if cmp != v_1.Args[0] { 44529 break 44530 } 44531 b.Kind = BlockAMD64ULE 44532 b.SetControl(cmp) 44533 return true 44534 } 44535 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 44536 // cond: 44537 // result: (ULE cmp yes no) 44538 for { 44539 v := b.Control 44540 if v.Op != OpAMD64TESTB { 44541 break 44542 } 44543 _ = v.Args[1] 44544 v_0 := v.Args[0] 44545 if v_0.Op != OpAMD64SETBE { 44546 break 44547 } 44548 cmp := v_0.Args[0] 44549 v_1 := v.Args[1] 44550 if v_1.Op != OpAMD64SETBE { 44551 break 44552 } 44553 if cmp != v_1.Args[0] { 44554 break 44555 } 44556 b.Kind = BlockAMD64ULE 44557 b.SetControl(cmp) 44558 return true 44559 } 44560 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 44561 // cond: 44562 // result: (UGT cmp yes no) 44563 for { 44564 v := b.Control 44565 if v.Op != OpAMD64TESTB { 44566 break 44567 } 44568 _ = v.Args[1] 44569 v_0 := v.Args[0] 44570 if v_0.Op != OpAMD64SETA { 44571 break 44572 } 44573 cmp := v_0.Args[0] 44574 v_1 := v.Args[1] 44575 if v_1.Op != OpAMD64SETA { 44576 break 44577 } 44578 if cmp != v_1.Args[0] { 44579 break 44580 } 44581 b.Kind = BlockAMD64UGT 44582 b.SetControl(cmp) 44583 return true 44584 } 44585 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 44586 // cond: 44587 // result: (UGT cmp yes no) 44588 for { 44589 v := b.Control 44590 if v.Op != OpAMD64TESTB { 44591 break 44592 } 44593 _ = v.Args[1] 44594 v_0 := v.Args[0] 44595 if v_0.Op != OpAMD64SETA { 44596 break 44597 } 44598 cmp := v_0.Args[0] 44599 v_1 := v.Args[1] 44600 if v_1.Op != OpAMD64SETA { 44601 break 44602 } 44603 if cmp != v_1.Args[0] { 44604 break 44605 } 44606 b.Kind = BlockAMD64UGT 44607 b.SetControl(cmp) 44608 return true 44609 } 44610 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 44611 // cond: 44612 // result: (UGE cmp yes no) 44613 for { 44614 v := b.Control 44615 if v.Op != OpAMD64TESTB { 44616 break 44617 } 44618 _ = v.Args[1] 44619 v_0 := v.Args[0] 44620 if v_0.Op != OpAMD64SETAE { 44621 break 44622 } 44623 cmp := v_0.Args[0] 44624 v_1 := v.Args[1] 44625 if v_1.Op != OpAMD64SETAE { 44626 break 44627 } 44628 if cmp != v_1.Args[0] { 44629 break 44630 } 44631 b.Kind = BlockAMD64UGE 44632 b.SetControl(cmp) 44633 return true 44634 } 44635 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 44636 // cond: 44637 // result: (UGE cmp yes no) 44638 for { 44639 v := b.Control 44640 if v.Op != OpAMD64TESTB { 44641 break 44642 } 44643 _ = v.Args[1] 44644 v_0 := v.Args[0] 44645 if v_0.Op != OpAMD64SETAE { 44646 break 44647 } 44648 cmp := v_0.Args[0] 44649 v_1 := v.Args[1] 44650 if v_1.Op != OpAMD64SETAE { 44651 break 44652 } 44653 if cmp != v_1.Args[0] { 44654 break 44655 } 44656 b.Kind = BlockAMD64UGE 44657 b.SetControl(cmp) 44658 return true 44659 } 44660 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 44661 // cond: !config.nacl 44662 // result: (ULT (BTL x y)) 44663 for { 44664 v := b.Control 44665 if v.Op != OpAMD64TESTL { 44666 break 44667 } 44668 _ = v.Args[1] 44669 v_0 := v.Args[0] 44670 if v_0.Op != OpAMD64SHLL { 44671 break 44672 } 44673 _ = v_0.Args[1] 44674 v_0_0 := v_0.Args[0] 44675 if v_0_0.Op != OpAMD64MOVLconst { 44676 break 44677 } 44678 if v_0_0.AuxInt != 1 { 44679 break 44680 } 44681 x := v_0.Args[1] 44682 y := v.Args[1] 44683 if !(!config.nacl) { 44684 break 44685 } 44686 b.Kind = BlockAMD64ULT 44687 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 44688 v0.AddArg(x) 44689 v0.AddArg(y) 44690 b.SetControl(v0) 44691 return true 44692 } 44693 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 44694 // cond: !config.nacl 44695 // result: (ULT (BTL x y)) 44696 for { 44697 v := b.Control 44698 if v.Op != OpAMD64TESTL { 44699 break 44700 } 44701 _ = v.Args[1] 44702 y := v.Args[0] 44703 v_1 := v.Args[1] 44704 if v_1.Op != OpAMD64SHLL { 44705 break 44706 } 44707 _ = v_1.Args[1] 44708 v_1_0 := v_1.Args[0] 44709 if v_1_0.Op != OpAMD64MOVLconst { 44710 break 44711 } 44712 if v_1_0.AuxInt != 1 { 44713 break 44714 } 44715 x := v_1.Args[1] 44716 if !(!config.nacl) { 44717 break 44718 } 44719 b.Kind = BlockAMD64ULT 44720 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 44721 v0.AddArg(x) 44722 v0.AddArg(y) 44723 b.SetControl(v0) 44724 return true 44725 } 44726 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 44727 // cond: !config.nacl 44728 // result: (ULT (BTQ x y)) 44729 for { 44730 v := b.Control 44731 if v.Op != OpAMD64TESTQ { 44732 break 44733 } 44734 _ = v.Args[1] 44735 v_0 := v.Args[0] 44736 if v_0.Op != OpAMD64SHLQ { 44737 break 44738 } 44739 _ = v_0.Args[1] 44740 v_0_0 := v_0.Args[0] 44741 if v_0_0.Op != OpAMD64MOVQconst { 44742 break 44743 } 44744 if v_0_0.AuxInt != 1 { 44745 break 44746 } 44747 x := v_0.Args[1] 44748 y := v.Args[1] 44749 if !(!config.nacl) { 44750 break 44751 } 44752 b.Kind = BlockAMD64ULT 44753 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 44754 v0.AddArg(x) 44755 v0.AddArg(y) 44756 b.SetControl(v0) 44757 return true 44758 } 44759 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 44760 // cond: !config.nacl 44761 // result: (ULT (BTQ x y)) 44762 for { 44763 v := b.Control 44764 if v.Op != OpAMD64TESTQ { 44765 break 44766 } 44767 _ = v.Args[1] 44768 y := v.Args[0] 44769 v_1 := v.Args[1] 44770 if v_1.Op != OpAMD64SHLQ { 44771 break 44772 } 44773 _ = v_1.Args[1] 44774 v_1_0 := v_1.Args[0] 44775 if v_1_0.Op != OpAMD64MOVQconst { 44776 break 44777 } 44778 if v_1_0.AuxInt != 1 { 44779 break 44780 } 44781 x := v_1.Args[1] 44782 if !(!config.nacl) { 44783 break 44784 } 44785 b.Kind = BlockAMD64ULT 44786 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 44787 v0.AddArg(x) 44788 v0.AddArg(y) 44789 b.SetControl(v0) 44790 return true 44791 } 44792 // match: (NE (TESTLconst [c] x)) 44793 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 44794 // result: (ULT (BTLconst [log2(c)] x)) 44795 for { 44796 v := b.Control 44797 if v.Op != OpAMD64TESTLconst { 44798 break 44799 } 44800 c := v.AuxInt 44801 x := v.Args[0] 44802 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 44803 break 44804 } 44805 b.Kind = BlockAMD64ULT 44806 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 44807 v0.AuxInt = log2(c) 44808 v0.AddArg(x) 44809 b.SetControl(v0) 44810 return true 44811 } 44812 // match: (NE (TESTQconst [c] x)) 44813 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 44814 // result: (ULT (BTQconst [log2(c)] x)) 44815 for { 44816 v := b.Control 44817 if v.Op != OpAMD64TESTQconst { 44818 break 44819 } 44820 c := v.AuxInt 44821 x := v.Args[0] 44822 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 44823 break 44824 } 44825 b.Kind = BlockAMD64ULT 44826 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44827 v0.AuxInt = log2(c) 44828 v0.AddArg(x) 44829 b.SetControl(v0) 44830 return true 44831 } 44832 // match: (NE (TESTQ (MOVQconst [c]) x)) 44833 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 44834 // result: (ULT (BTQconst [log2(c)] x)) 44835 for { 44836 v := b.Control 44837 if v.Op != OpAMD64TESTQ { 44838 break 44839 } 44840 _ = v.Args[1] 44841 v_0 := v.Args[0] 44842 if v_0.Op != OpAMD64MOVQconst { 44843 break 44844 } 44845 c := v_0.AuxInt 44846 x := v.Args[1] 44847 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 44848 break 44849 } 44850 b.Kind = BlockAMD64ULT 44851 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44852 v0.AuxInt = log2(c) 44853 v0.AddArg(x) 44854 b.SetControl(v0) 44855 return true 44856 } 44857 // match: (NE (TESTQ x (MOVQconst [c]))) 44858 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 44859 // result: (ULT (BTQconst [log2(c)] x)) 44860 for { 44861 v := b.Control 44862 if v.Op != OpAMD64TESTQ { 44863 break 44864 } 44865 _ = v.Args[1] 44866 x := v.Args[0] 44867 v_1 := v.Args[1] 44868 if v_1.Op != OpAMD64MOVQconst { 44869 break 44870 } 44871 c := v_1.AuxInt 44872 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 44873 break 44874 } 44875 b.Kind = BlockAMD64ULT 44876 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44877 v0.AuxInt = log2(c) 44878 v0.AddArg(x) 44879 b.SetControl(v0) 44880 return true 44881 } 44882 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 44883 // cond: 44884 // result: (UGT cmp yes no) 44885 for { 44886 v := b.Control 44887 if v.Op != OpAMD64TESTB { 44888 break 44889 } 44890 _ = v.Args[1] 44891 v_0 := v.Args[0] 44892 if v_0.Op != OpAMD64SETGF { 44893 break 44894 } 44895 cmp := v_0.Args[0] 44896 v_1 := v.Args[1] 44897 if v_1.Op != OpAMD64SETGF { 44898 break 44899 } 44900 if cmp != v_1.Args[0] { 44901 break 44902 } 44903 b.Kind = BlockAMD64UGT 44904 b.SetControl(cmp) 44905 return true 44906 } 44907 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 44908 // cond: 44909 // result: (UGT cmp yes no) 44910 for { 44911 v := b.Control 44912 if v.Op != OpAMD64TESTB { 44913 break 44914 } 44915 _ = v.Args[1] 44916 v_0 := v.Args[0] 44917 if v_0.Op != OpAMD64SETGF { 44918 break 44919 } 44920 cmp := v_0.Args[0] 44921 v_1 := v.Args[1] 44922 if v_1.Op != OpAMD64SETGF { 44923 break 44924 } 44925 if cmp != v_1.Args[0] { 44926 break 44927 } 44928 b.Kind = BlockAMD64UGT 44929 b.SetControl(cmp) 44930 return true 44931 } 44932 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 44933 // cond: 44934 // result: (UGE cmp yes no) 44935 for { 44936 v := b.Control 44937 if v.Op != OpAMD64TESTB { 44938 break 44939 } 44940 _ = v.Args[1] 44941 v_0 := v.Args[0] 44942 if v_0.Op != OpAMD64SETGEF { 44943 break 44944 } 44945 cmp := v_0.Args[0] 44946 v_1 := v.Args[1] 44947 if v_1.Op != OpAMD64SETGEF { 44948 break 44949 } 44950 if cmp != v_1.Args[0] { 44951 break 44952 } 44953 b.Kind = BlockAMD64UGE 44954 b.SetControl(cmp) 44955 return true 44956 } 44957 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 44958 // cond: 44959 // result: (UGE cmp yes no) 44960 for { 44961 v := b.Control 44962 if v.Op != OpAMD64TESTB { 44963 break 44964 } 44965 _ = v.Args[1] 44966 v_0 := v.Args[0] 44967 if v_0.Op != OpAMD64SETGEF { 44968 break 44969 } 44970 cmp := v_0.Args[0] 44971 v_1 := v.Args[1] 44972 if v_1.Op != OpAMD64SETGEF { 44973 break 44974 } 44975 if cmp != v_1.Args[0] { 44976 break 44977 } 44978 b.Kind = BlockAMD64UGE 44979 b.SetControl(cmp) 44980 return true 44981 } 44982 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 44983 // cond: 44984 // result: (EQF cmp yes no) 44985 for { 44986 v := b.Control 44987 if v.Op != OpAMD64TESTB { 44988 break 44989 } 44990 _ = v.Args[1] 44991 v_0 := v.Args[0] 44992 if v_0.Op != OpAMD64SETEQF { 44993 break 44994 } 44995 cmp := v_0.Args[0] 44996 v_1 := v.Args[1] 44997 if v_1.Op != OpAMD64SETEQF { 44998 break 44999 } 45000 if cmp != v_1.Args[0] { 45001 break 45002 } 45003 b.Kind = BlockAMD64EQF 45004 b.SetControl(cmp) 45005 return true 45006 } 45007 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 45008 // cond: 45009 // result: (EQF cmp yes no) 45010 for { 45011 v := b.Control 45012 if v.Op != OpAMD64TESTB { 45013 break 45014 } 45015 _ = v.Args[1] 45016 v_0 := v.Args[0] 45017 if v_0.Op != OpAMD64SETEQF { 45018 break 45019 } 45020 cmp := v_0.Args[0] 45021 v_1 := v.Args[1] 45022 if v_1.Op != OpAMD64SETEQF { 45023 break 45024 } 45025 if cmp != v_1.Args[0] { 45026 break 45027 } 45028 b.Kind = BlockAMD64EQF 45029 b.SetControl(cmp) 45030 return true 45031 } 45032 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 45033 // cond: 45034 // result: (NEF cmp yes no) 45035 for { 45036 v := b.Control 45037 if v.Op != OpAMD64TESTB { 45038 break 45039 } 45040 _ = v.Args[1] 45041 v_0 := v.Args[0] 45042 if v_0.Op != OpAMD64SETNEF { 45043 break 45044 } 45045 cmp := v_0.Args[0] 45046 v_1 := v.Args[1] 45047 if v_1.Op != OpAMD64SETNEF { 45048 break 45049 } 45050 if cmp != v_1.Args[0] { 45051 break 45052 } 45053 b.Kind = BlockAMD64NEF 45054 b.SetControl(cmp) 45055 return true 45056 } 45057 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 45058 // cond: 45059 // result: (NEF cmp yes no) 45060 for { 45061 v := b.Control 45062 if v.Op != OpAMD64TESTB { 45063 break 45064 } 45065 _ = v.Args[1] 45066 v_0 := v.Args[0] 45067 if v_0.Op != OpAMD64SETNEF { 45068 break 45069 } 45070 cmp := v_0.Args[0] 45071 v_1 := v.Args[1] 45072 if v_1.Op != OpAMD64SETNEF { 45073 break 45074 } 45075 if cmp != v_1.Args[0] { 45076 break 45077 } 45078 b.Kind = BlockAMD64NEF 45079 b.SetControl(cmp) 45080 return true 45081 } 45082 // match: (NE (InvertFlags cmp) yes no) 45083 // cond: 45084 // result: (NE cmp yes no) 45085 for { 45086 v := b.Control 45087 if v.Op != OpAMD64InvertFlags { 45088 break 45089 } 45090 cmp := v.Args[0] 45091 b.Kind = BlockAMD64NE 45092 b.SetControl(cmp) 45093 return true 45094 } 45095 // match: (NE (FlagEQ) yes no) 45096 // cond: 45097 // result: (First nil no yes) 45098 for { 45099 v := b.Control 45100 if v.Op != OpAMD64FlagEQ { 45101 break 45102 } 45103 b.Kind = BlockFirst 45104 b.SetControl(nil) 45105 b.swapSuccessors() 45106 return true 45107 } 45108 // match: (NE (FlagLT_ULT) yes no) 45109 // cond: 45110 // result: (First nil yes no) 45111 for { 45112 v := b.Control 45113 if v.Op != OpAMD64FlagLT_ULT { 45114 break 45115 } 45116 b.Kind = BlockFirst 45117 b.SetControl(nil) 45118 return true 45119 } 45120 // match: (NE (FlagLT_UGT) yes no) 45121 // cond: 45122 // result: (First nil yes no) 45123 for { 45124 v := b.Control 45125 if v.Op != OpAMD64FlagLT_UGT { 45126 break 45127 } 45128 b.Kind = BlockFirst 45129 b.SetControl(nil) 45130 return true 45131 } 45132 // match: (NE (FlagGT_ULT) yes no) 45133 // cond: 45134 // result: (First nil yes no) 45135 for { 45136 v := b.Control 45137 if v.Op != OpAMD64FlagGT_ULT { 45138 break 45139 } 45140 b.Kind = BlockFirst 45141 b.SetControl(nil) 45142 return true 45143 } 45144 // match: (NE (FlagGT_UGT) yes no) 45145 // cond: 45146 // result: (First nil yes no) 45147 for { 45148 v := b.Control 45149 if v.Op != OpAMD64FlagGT_UGT { 45150 break 45151 } 45152 b.Kind = BlockFirst 45153 b.SetControl(nil) 45154 return true 45155 } 45156 case BlockAMD64UGE: 45157 // match: (UGE (InvertFlags cmp) yes no) 45158 // cond: 45159 // result: (ULE cmp yes no) 45160 for { 45161 v := b.Control 45162 if v.Op != OpAMD64InvertFlags { 45163 break 45164 } 45165 cmp := v.Args[0] 45166 b.Kind = BlockAMD64ULE 45167 b.SetControl(cmp) 45168 return true 45169 } 45170 // match: (UGE (FlagEQ) yes no) 45171 // cond: 45172 // result: (First nil yes no) 45173 for { 45174 v := b.Control 45175 if v.Op != OpAMD64FlagEQ { 45176 break 45177 } 45178 b.Kind = BlockFirst 45179 b.SetControl(nil) 45180 return true 45181 } 45182 // match: (UGE (FlagLT_ULT) yes no) 45183 // cond: 45184 // result: (First nil no yes) 45185 for { 45186 v := b.Control 45187 if v.Op != OpAMD64FlagLT_ULT { 45188 break 45189 } 45190 b.Kind = BlockFirst 45191 b.SetControl(nil) 45192 b.swapSuccessors() 45193 return true 45194 } 45195 // match: (UGE (FlagLT_UGT) yes no) 45196 // cond: 45197 // result: (First nil yes no) 45198 for { 45199 v := b.Control 45200 if v.Op != OpAMD64FlagLT_UGT { 45201 break 45202 } 45203 b.Kind = BlockFirst 45204 b.SetControl(nil) 45205 return true 45206 } 45207 // match: (UGE (FlagGT_ULT) yes no) 45208 // cond: 45209 // result: (First nil no yes) 45210 for { 45211 v := b.Control 45212 if v.Op != OpAMD64FlagGT_ULT { 45213 break 45214 } 45215 b.Kind = BlockFirst 45216 b.SetControl(nil) 45217 b.swapSuccessors() 45218 return true 45219 } 45220 // match: (UGE (FlagGT_UGT) yes no) 45221 // cond: 45222 // result: (First nil yes no) 45223 for { 45224 v := b.Control 45225 if v.Op != OpAMD64FlagGT_UGT { 45226 break 45227 } 45228 b.Kind = BlockFirst 45229 b.SetControl(nil) 45230 return true 45231 } 45232 case BlockAMD64UGT: 45233 // match: (UGT (InvertFlags cmp) yes no) 45234 // cond: 45235 // result: (ULT cmp yes no) 45236 for { 45237 v := b.Control 45238 if v.Op != OpAMD64InvertFlags { 45239 break 45240 } 45241 cmp := v.Args[0] 45242 b.Kind = BlockAMD64ULT 45243 b.SetControl(cmp) 45244 return true 45245 } 45246 // match: (UGT (FlagEQ) yes no) 45247 // cond: 45248 // result: (First nil no yes) 45249 for { 45250 v := b.Control 45251 if v.Op != OpAMD64FlagEQ { 45252 break 45253 } 45254 b.Kind = BlockFirst 45255 b.SetControl(nil) 45256 b.swapSuccessors() 45257 return true 45258 } 45259 // match: (UGT (FlagLT_ULT) yes no) 45260 // cond: 45261 // result: (First nil no yes) 45262 for { 45263 v := b.Control 45264 if v.Op != OpAMD64FlagLT_ULT { 45265 break 45266 } 45267 b.Kind = BlockFirst 45268 b.SetControl(nil) 45269 b.swapSuccessors() 45270 return true 45271 } 45272 // match: (UGT (FlagLT_UGT) yes no) 45273 // cond: 45274 // result: (First nil yes no) 45275 for { 45276 v := b.Control 45277 if v.Op != OpAMD64FlagLT_UGT { 45278 break 45279 } 45280 b.Kind = BlockFirst 45281 b.SetControl(nil) 45282 return true 45283 } 45284 // match: (UGT (FlagGT_ULT) yes no) 45285 // cond: 45286 // result: (First nil no yes) 45287 for { 45288 v := b.Control 45289 if v.Op != OpAMD64FlagGT_ULT { 45290 break 45291 } 45292 b.Kind = BlockFirst 45293 b.SetControl(nil) 45294 b.swapSuccessors() 45295 return true 45296 } 45297 // match: (UGT (FlagGT_UGT) yes no) 45298 // cond: 45299 // result: (First nil yes no) 45300 for { 45301 v := b.Control 45302 if v.Op != OpAMD64FlagGT_UGT { 45303 break 45304 } 45305 b.Kind = BlockFirst 45306 b.SetControl(nil) 45307 return true 45308 } 45309 case BlockAMD64ULE: 45310 // match: (ULE (InvertFlags cmp) yes no) 45311 // cond: 45312 // result: (UGE cmp yes no) 45313 for { 45314 v := b.Control 45315 if v.Op != OpAMD64InvertFlags { 45316 break 45317 } 45318 cmp := v.Args[0] 45319 b.Kind = BlockAMD64UGE 45320 b.SetControl(cmp) 45321 return true 45322 } 45323 // match: (ULE (FlagEQ) yes no) 45324 // cond: 45325 // result: (First nil yes no) 45326 for { 45327 v := b.Control 45328 if v.Op != OpAMD64FlagEQ { 45329 break 45330 } 45331 b.Kind = BlockFirst 45332 b.SetControl(nil) 45333 return true 45334 } 45335 // match: (ULE (FlagLT_ULT) yes no) 45336 // cond: 45337 // result: (First nil yes no) 45338 for { 45339 v := b.Control 45340 if v.Op != OpAMD64FlagLT_ULT { 45341 break 45342 } 45343 b.Kind = BlockFirst 45344 b.SetControl(nil) 45345 return true 45346 } 45347 // match: (ULE (FlagLT_UGT) yes no) 45348 // cond: 45349 // result: (First nil no yes) 45350 for { 45351 v := b.Control 45352 if v.Op != OpAMD64FlagLT_UGT { 45353 break 45354 } 45355 b.Kind = BlockFirst 45356 b.SetControl(nil) 45357 b.swapSuccessors() 45358 return true 45359 } 45360 // match: (ULE (FlagGT_ULT) yes no) 45361 // cond: 45362 // result: (First nil yes no) 45363 for { 45364 v := b.Control 45365 if v.Op != OpAMD64FlagGT_ULT { 45366 break 45367 } 45368 b.Kind = BlockFirst 45369 b.SetControl(nil) 45370 return true 45371 } 45372 // match: (ULE (FlagGT_UGT) yes no) 45373 // cond: 45374 // result: (First nil no yes) 45375 for { 45376 v := b.Control 45377 if v.Op != OpAMD64FlagGT_UGT { 45378 break 45379 } 45380 b.Kind = BlockFirst 45381 b.SetControl(nil) 45382 b.swapSuccessors() 45383 return true 45384 } 45385 case BlockAMD64ULT: 45386 // match: (ULT (InvertFlags cmp) yes no) 45387 // cond: 45388 // result: (UGT cmp yes no) 45389 for { 45390 v := b.Control 45391 if v.Op != OpAMD64InvertFlags { 45392 break 45393 } 45394 cmp := v.Args[0] 45395 b.Kind = BlockAMD64UGT 45396 b.SetControl(cmp) 45397 return true 45398 } 45399 // match: (ULT (FlagEQ) yes no) 45400 // cond: 45401 // result: (First nil no yes) 45402 for { 45403 v := b.Control 45404 if v.Op != OpAMD64FlagEQ { 45405 break 45406 } 45407 b.Kind = BlockFirst 45408 b.SetControl(nil) 45409 b.swapSuccessors() 45410 return true 45411 } 45412 // match: (ULT (FlagLT_ULT) yes no) 45413 // cond: 45414 // result: (First nil yes no) 45415 for { 45416 v := b.Control 45417 if v.Op != OpAMD64FlagLT_ULT { 45418 break 45419 } 45420 b.Kind = BlockFirst 45421 b.SetControl(nil) 45422 return true 45423 } 45424 // match: (ULT (FlagLT_UGT) yes no) 45425 // cond: 45426 // result: (First nil no yes) 45427 for { 45428 v := b.Control 45429 if v.Op != OpAMD64FlagLT_UGT { 45430 break 45431 } 45432 b.Kind = BlockFirst 45433 b.SetControl(nil) 45434 b.swapSuccessors() 45435 return true 45436 } 45437 // match: (ULT (FlagGT_ULT) yes no) 45438 // cond: 45439 // result: (First nil yes no) 45440 for { 45441 v := b.Control 45442 if v.Op != OpAMD64FlagGT_ULT { 45443 break 45444 } 45445 b.Kind = BlockFirst 45446 b.SetControl(nil) 45447 return true 45448 } 45449 // match: (ULT (FlagGT_UGT) yes no) 45450 // cond: 45451 // result: (First nil no yes) 45452 for { 45453 v := b.Control 45454 if v.Op != OpAMD64FlagGT_UGT { 45455 break 45456 } 45457 b.Kind = BlockFirst 45458 b.SetControl(nil) 45459 b.swapSuccessors() 45460 return true 45461 } 45462 } 45463 return false 45464 }