github.com/slayercat/go@v0.0.0-20170428012452-c51559813f61/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "cmd/internal/obj" 8 import "cmd/internal/objabi" 9 10 var _ = math.MinInt8 // in case not otherwise used 11 var _ = obj.ANOP // in case not otherwise used 12 var _ = objabi.GOROOT // in case not otherwise used 13 14 func rewriteValueAMD64(v *Value) bool { 15 switch v.Op { 16 case OpAMD64ADDL: 17 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) 18 case OpAMD64ADDLconst: 19 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) 20 case OpAMD64ADDQ: 21 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 22 case OpAMD64ADDQconst: 23 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) 24 case OpAMD64ADDSD: 25 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 26 case OpAMD64ADDSS: 27 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 28 case OpAMD64ANDL: 29 return rewriteValueAMD64_OpAMD64ANDL_0(v) 30 case OpAMD64ANDLconst: 31 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 32 case OpAMD64ANDQ: 33 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 34 case OpAMD64ANDQconst: 35 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 36 case OpAMD64BSFQ: 37 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 38 case OpAMD64BTQconst: 39 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 40 case OpAMD64CMOVQEQ: 41 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 42 case OpAMD64CMPB: 43 return rewriteValueAMD64_OpAMD64CMPB_0(v) 44 case OpAMD64CMPBconst: 45 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 46 case OpAMD64CMPL: 47 return rewriteValueAMD64_OpAMD64CMPL_0(v) 48 case OpAMD64CMPLconst: 49 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) 50 case OpAMD64CMPQ: 51 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 52 case OpAMD64CMPQconst: 53 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 54 case OpAMD64CMPW: 55 return rewriteValueAMD64_OpAMD64CMPW_0(v) 56 case OpAMD64CMPWconst: 57 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 58 case OpAMD64CMPXCHGLlock: 59 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 60 case OpAMD64CMPXCHGQlock: 61 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 62 case OpAMD64LEAL: 63 return rewriteValueAMD64_OpAMD64LEAL_0(v) 64 case OpAMD64LEAQ: 65 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 66 case OpAMD64LEAQ1: 67 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 68 case OpAMD64LEAQ2: 69 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 70 case OpAMD64LEAQ4: 71 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 72 case OpAMD64LEAQ8: 73 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 74 case OpAMD64MOVBQSX: 75 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 76 case OpAMD64MOVBQSXload: 77 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 78 case OpAMD64MOVBQZX: 79 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 80 case OpAMD64MOVBload: 81 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 82 case OpAMD64MOVBloadidx1: 83 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 84 case OpAMD64MOVBstore: 85 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) 86 case OpAMD64MOVBstoreconst: 87 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 88 case OpAMD64MOVBstoreconstidx1: 89 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 90 case OpAMD64MOVBstoreidx1: 91 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) 92 case OpAMD64MOVLQSX: 93 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 94 case OpAMD64MOVLQSXload: 95 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 96 case OpAMD64MOVLQZX: 97 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 98 case OpAMD64MOVLatomicload: 99 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 100 case OpAMD64MOVLload: 101 return rewriteValueAMD64_OpAMD64MOVLload_0(v) 102 case OpAMD64MOVLloadidx1: 103 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 104 case OpAMD64MOVLloadidx4: 105 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 106 case OpAMD64MOVLstore: 107 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) 108 case OpAMD64MOVLstoreconst: 109 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 110 case OpAMD64MOVLstoreconstidx1: 111 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 112 case OpAMD64MOVLstoreconstidx4: 113 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 114 case OpAMD64MOVLstoreidx1: 115 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 116 case OpAMD64MOVLstoreidx4: 117 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 118 case OpAMD64MOVOload: 119 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 120 case OpAMD64MOVOstore: 121 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 122 case OpAMD64MOVQatomicload: 123 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 124 case OpAMD64MOVQload: 125 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 126 case OpAMD64MOVQloadidx1: 127 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 128 case OpAMD64MOVQloadidx8: 129 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 130 case OpAMD64MOVQstore: 131 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) 132 case OpAMD64MOVQstoreconst: 133 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 134 case OpAMD64MOVQstoreconstidx1: 135 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 136 case OpAMD64MOVQstoreconstidx8: 137 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 138 case OpAMD64MOVQstoreidx1: 139 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 140 case OpAMD64MOVQstoreidx8: 141 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 142 case OpAMD64MOVSDload: 143 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 144 case OpAMD64MOVSDloadidx1: 145 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 146 case OpAMD64MOVSDloadidx8: 147 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 148 case OpAMD64MOVSDstore: 149 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 150 case OpAMD64MOVSDstoreidx1: 151 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 152 case OpAMD64MOVSDstoreidx8: 153 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 154 case OpAMD64MOVSSload: 155 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 156 case OpAMD64MOVSSloadidx1: 157 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 158 case OpAMD64MOVSSloadidx4: 159 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 160 case OpAMD64MOVSSstore: 161 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 162 case OpAMD64MOVSSstoreidx1: 163 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 164 case OpAMD64MOVSSstoreidx4: 165 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 166 case OpAMD64MOVWQSX: 167 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 168 case OpAMD64MOVWQSXload: 169 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 170 case OpAMD64MOVWQZX: 171 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 172 case OpAMD64MOVWload: 173 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 174 case OpAMD64MOVWloadidx1: 175 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 176 case OpAMD64MOVWloadidx2: 177 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 178 case OpAMD64MOVWstore: 179 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 180 case OpAMD64MOVWstoreconst: 181 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 182 case OpAMD64MOVWstoreconstidx1: 183 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 184 case OpAMD64MOVWstoreconstidx2: 185 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 186 case OpAMD64MOVWstoreidx1: 187 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 188 case OpAMD64MOVWstoreidx2: 189 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 190 case OpAMD64MULL: 191 return rewriteValueAMD64_OpAMD64MULL_0(v) 192 case OpAMD64MULLconst: 193 return rewriteValueAMD64_OpAMD64MULLconst_0(v) 194 case OpAMD64MULQ: 195 return rewriteValueAMD64_OpAMD64MULQ_0(v) 196 case OpAMD64MULQconst: 197 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) 198 case OpAMD64MULSD: 199 return rewriteValueAMD64_OpAMD64MULSD_0(v) 200 case OpAMD64MULSS: 201 return rewriteValueAMD64_OpAMD64MULSS_0(v) 202 case OpAMD64NEGL: 203 return rewriteValueAMD64_OpAMD64NEGL_0(v) 204 case OpAMD64NEGQ: 205 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 206 case OpAMD64NOTL: 207 return rewriteValueAMD64_OpAMD64NOTL_0(v) 208 case OpAMD64NOTQ: 209 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 210 case OpAMD64ORL: 211 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 212 case OpAMD64ORLconst: 213 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 214 case OpAMD64ORQ: 215 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 216 case OpAMD64ORQconst: 217 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 218 case OpAMD64ROLB: 219 return rewriteValueAMD64_OpAMD64ROLB_0(v) 220 case OpAMD64ROLBconst: 221 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 222 case OpAMD64ROLL: 223 return rewriteValueAMD64_OpAMD64ROLL_0(v) 224 case OpAMD64ROLLconst: 225 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 226 case OpAMD64ROLQ: 227 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 228 case OpAMD64ROLQconst: 229 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 230 case OpAMD64ROLW: 231 return rewriteValueAMD64_OpAMD64ROLW_0(v) 232 case OpAMD64ROLWconst: 233 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 234 case OpAMD64RORB: 235 return rewriteValueAMD64_OpAMD64RORB_0(v) 236 case OpAMD64RORL: 237 return rewriteValueAMD64_OpAMD64RORL_0(v) 238 case OpAMD64RORQ: 239 return rewriteValueAMD64_OpAMD64RORQ_0(v) 240 case OpAMD64RORW: 241 return rewriteValueAMD64_OpAMD64RORW_0(v) 242 case OpAMD64SARB: 243 return rewriteValueAMD64_OpAMD64SARB_0(v) 244 case OpAMD64SARBconst: 245 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 246 case OpAMD64SARL: 247 return rewriteValueAMD64_OpAMD64SARL_0(v) 248 case OpAMD64SARLconst: 249 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 250 case OpAMD64SARQ: 251 return rewriteValueAMD64_OpAMD64SARQ_0(v) 252 case OpAMD64SARQconst: 253 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 254 case OpAMD64SARW: 255 return rewriteValueAMD64_OpAMD64SARW_0(v) 256 case OpAMD64SARWconst: 257 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 258 case OpAMD64SBBLcarrymask: 259 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 260 case OpAMD64SBBQcarrymask: 261 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 262 case OpAMD64SETA: 263 return rewriteValueAMD64_OpAMD64SETA_0(v) 264 case OpAMD64SETAE: 265 return rewriteValueAMD64_OpAMD64SETAE_0(v) 266 case OpAMD64SETB: 267 return rewriteValueAMD64_OpAMD64SETB_0(v) 268 case OpAMD64SETBE: 269 return rewriteValueAMD64_OpAMD64SETBE_0(v) 270 case OpAMD64SETEQ: 271 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) 272 case OpAMD64SETG: 273 return rewriteValueAMD64_OpAMD64SETG_0(v) 274 case OpAMD64SETGE: 275 return rewriteValueAMD64_OpAMD64SETGE_0(v) 276 case OpAMD64SETL: 277 return rewriteValueAMD64_OpAMD64SETL_0(v) 278 case OpAMD64SETLE: 279 return rewriteValueAMD64_OpAMD64SETLE_0(v) 280 case OpAMD64SETNE: 281 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) 282 case OpAMD64SHLL: 283 return rewriteValueAMD64_OpAMD64SHLL_0(v) 284 case OpAMD64SHLLconst: 285 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 286 case OpAMD64SHLQ: 287 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 288 case OpAMD64SHLQconst: 289 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 290 case OpAMD64SHRB: 291 return rewriteValueAMD64_OpAMD64SHRB_0(v) 292 case OpAMD64SHRBconst: 293 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 294 case OpAMD64SHRL: 295 return rewriteValueAMD64_OpAMD64SHRL_0(v) 296 case OpAMD64SHRLconst: 297 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 298 case OpAMD64SHRQ: 299 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 300 case OpAMD64SHRQconst: 301 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 302 case OpAMD64SHRW: 303 return rewriteValueAMD64_OpAMD64SHRW_0(v) 304 case OpAMD64SHRWconst: 305 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 306 case OpAMD64SUBL: 307 return rewriteValueAMD64_OpAMD64SUBL_0(v) 308 case OpAMD64SUBLconst: 309 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 310 case OpAMD64SUBQ: 311 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 312 case OpAMD64SUBQconst: 313 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 314 case OpAMD64SUBSD: 315 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 316 case OpAMD64SUBSS: 317 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 318 case OpAMD64TESTB: 319 return rewriteValueAMD64_OpAMD64TESTB_0(v) 320 case OpAMD64TESTL: 321 return rewriteValueAMD64_OpAMD64TESTL_0(v) 322 case OpAMD64TESTQ: 323 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 324 case OpAMD64TESTW: 325 return rewriteValueAMD64_OpAMD64TESTW_0(v) 326 case OpAMD64XADDLlock: 327 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 328 case OpAMD64XADDQlock: 329 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 330 case OpAMD64XCHGL: 331 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 332 case OpAMD64XCHGQ: 333 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 334 case OpAMD64XORL: 335 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 336 case OpAMD64XORLconst: 337 return rewriteValueAMD64_OpAMD64XORLconst_0(v) 338 case OpAMD64XORQ: 339 return rewriteValueAMD64_OpAMD64XORQ_0(v) 340 case OpAMD64XORQconst: 341 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 342 case OpAdd16: 343 return rewriteValueAMD64_OpAdd16_0(v) 344 case OpAdd32: 345 return rewriteValueAMD64_OpAdd32_0(v) 346 case OpAdd32F: 347 return rewriteValueAMD64_OpAdd32F_0(v) 348 case OpAdd64: 349 return rewriteValueAMD64_OpAdd64_0(v) 350 case OpAdd64F: 351 return rewriteValueAMD64_OpAdd64F_0(v) 352 case OpAdd8: 353 return rewriteValueAMD64_OpAdd8_0(v) 354 case OpAddPtr: 355 return rewriteValueAMD64_OpAddPtr_0(v) 356 case OpAddr: 357 return rewriteValueAMD64_OpAddr_0(v) 358 case OpAnd16: 359 return rewriteValueAMD64_OpAnd16_0(v) 360 case OpAnd32: 361 return rewriteValueAMD64_OpAnd32_0(v) 362 case OpAnd64: 363 return rewriteValueAMD64_OpAnd64_0(v) 364 case OpAnd8: 365 return rewriteValueAMD64_OpAnd8_0(v) 366 case OpAndB: 367 return rewriteValueAMD64_OpAndB_0(v) 368 case OpAtomicAdd32: 369 return rewriteValueAMD64_OpAtomicAdd32_0(v) 370 case OpAtomicAdd64: 371 return rewriteValueAMD64_OpAtomicAdd64_0(v) 372 case OpAtomicAnd8: 373 return rewriteValueAMD64_OpAtomicAnd8_0(v) 374 case OpAtomicCompareAndSwap32: 375 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 376 case OpAtomicCompareAndSwap64: 377 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 378 case OpAtomicExchange32: 379 return rewriteValueAMD64_OpAtomicExchange32_0(v) 380 case OpAtomicExchange64: 381 return rewriteValueAMD64_OpAtomicExchange64_0(v) 382 case OpAtomicLoad32: 383 return rewriteValueAMD64_OpAtomicLoad32_0(v) 384 case OpAtomicLoad64: 385 return rewriteValueAMD64_OpAtomicLoad64_0(v) 386 case OpAtomicLoadPtr: 387 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 388 case OpAtomicOr8: 389 return rewriteValueAMD64_OpAtomicOr8_0(v) 390 case OpAtomicStore32: 391 return rewriteValueAMD64_OpAtomicStore32_0(v) 392 case OpAtomicStore64: 393 return rewriteValueAMD64_OpAtomicStore64_0(v) 394 case OpAtomicStorePtrNoWB: 395 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 396 case OpAvg64u: 397 return rewriteValueAMD64_OpAvg64u_0(v) 398 case OpBitLen32: 399 return rewriteValueAMD64_OpBitLen32_0(v) 400 case OpBitLen64: 401 return rewriteValueAMD64_OpBitLen64_0(v) 402 case OpBswap32: 403 return rewriteValueAMD64_OpBswap32_0(v) 404 case OpBswap64: 405 return rewriteValueAMD64_OpBswap64_0(v) 406 case OpClosureCall: 407 return rewriteValueAMD64_OpClosureCall_0(v) 408 case OpCom16: 409 return rewriteValueAMD64_OpCom16_0(v) 410 case OpCom32: 411 return rewriteValueAMD64_OpCom32_0(v) 412 case OpCom64: 413 return rewriteValueAMD64_OpCom64_0(v) 414 case OpCom8: 415 return rewriteValueAMD64_OpCom8_0(v) 416 case OpConst16: 417 return rewriteValueAMD64_OpConst16_0(v) 418 case OpConst32: 419 return rewriteValueAMD64_OpConst32_0(v) 420 case OpConst32F: 421 return rewriteValueAMD64_OpConst32F_0(v) 422 case OpConst64: 423 return rewriteValueAMD64_OpConst64_0(v) 424 case OpConst64F: 425 return rewriteValueAMD64_OpConst64F_0(v) 426 case OpConst8: 427 return rewriteValueAMD64_OpConst8_0(v) 428 case OpConstBool: 429 return rewriteValueAMD64_OpConstBool_0(v) 430 case OpConstNil: 431 return rewriteValueAMD64_OpConstNil_0(v) 432 case OpConvert: 433 return rewriteValueAMD64_OpConvert_0(v) 434 case OpCtz32: 435 return rewriteValueAMD64_OpCtz32_0(v) 436 case OpCtz64: 437 return rewriteValueAMD64_OpCtz64_0(v) 438 case OpCvt32Fto32: 439 return rewriteValueAMD64_OpCvt32Fto32_0(v) 440 case OpCvt32Fto64: 441 return rewriteValueAMD64_OpCvt32Fto64_0(v) 442 case OpCvt32Fto64F: 443 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 444 case OpCvt32to32F: 445 return rewriteValueAMD64_OpCvt32to32F_0(v) 446 case OpCvt32to64F: 447 return rewriteValueAMD64_OpCvt32to64F_0(v) 448 case OpCvt64Fto32: 449 return rewriteValueAMD64_OpCvt64Fto32_0(v) 450 case OpCvt64Fto32F: 451 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 452 case OpCvt64Fto64: 453 return rewriteValueAMD64_OpCvt64Fto64_0(v) 454 case OpCvt64to32F: 455 return rewriteValueAMD64_OpCvt64to32F_0(v) 456 case OpCvt64to64F: 457 return rewriteValueAMD64_OpCvt64to64F_0(v) 458 case OpDiv128u: 459 return rewriteValueAMD64_OpDiv128u_0(v) 460 case OpDiv16: 461 return rewriteValueAMD64_OpDiv16_0(v) 462 case OpDiv16u: 463 return rewriteValueAMD64_OpDiv16u_0(v) 464 case OpDiv32: 465 return rewriteValueAMD64_OpDiv32_0(v) 466 case OpDiv32F: 467 return rewriteValueAMD64_OpDiv32F_0(v) 468 case OpDiv32u: 469 return rewriteValueAMD64_OpDiv32u_0(v) 470 case OpDiv64: 471 return rewriteValueAMD64_OpDiv64_0(v) 472 case OpDiv64F: 473 return rewriteValueAMD64_OpDiv64F_0(v) 474 case OpDiv64u: 475 return rewriteValueAMD64_OpDiv64u_0(v) 476 case OpDiv8: 477 return rewriteValueAMD64_OpDiv8_0(v) 478 case OpDiv8u: 479 return rewriteValueAMD64_OpDiv8u_0(v) 480 case OpEq16: 481 return rewriteValueAMD64_OpEq16_0(v) 482 case OpEq32: 483 return rewriteValueAMD64_OpEq32_0(v) 484 case OpEq32F: 485 return rewriteValueAMD64_OpEq32F_0(v) 486 case OpEq64: 487 return rewriteValueAMD64_OpEq64_0(v) 488 case OpEq64F: 489 return rewriteValueAMD64_OpEq64F_0(v) 490 case OpEq8: 491 return rewriteValueAMD64_OpEq8_0(v) 492 case OpEqB: 493 return rewriteValueAMD64_OpEqB_0(v) 494 case OpEqPtr: 495 return rewriteValueAMD64_OpEqPtr_0(v) 496 case OpGeq16: 497 return rewriteValueAMD64_OpGeq16_0(v) 498 case OpGeq16U: 499 return rewriteValueAMD64_OpGeq16U_0(v) 500 case OpGeq32: 501 return rewriteValueAMD64_OpGeq32_0(v) 502 case OpGeq32F: 503 return rewriteValueAMD64_OpGeq32F_0(v) 504 case OpGeq32U: 505 return rewriteValueAMD64_OpGeq32U_0(v) 506 case OpGeq64: 507 return rewriteValueAMD64_OpGeq64_0(v) 508 case OpGeq64F: 509 return rewriteValueAMD64_OpGeq64F_0(v) 510 case OpGeq64U: 511 return rewriteValueAMD64_OpGeq64U_0(v) 512 case OpGeq8: 513 return rewriteValueAMD64_OpGeq8_0(v) 514 case OpGeq8U: 515 return rewriteValueAMD64_OpGeq8U_0(v) 516 case OpGetClosurePtr: 517 return rewriteValueAMD64_OpGetClosurePtr_0(v) 518 case OpGetG: 519 return rewriteValueAMD64_OpGetG_0(v) 520 case OpGreater16: 521 return rewriteValueAMD64_OpGreater16_0(v) 522 case OpGreater16U: 523 return rewriteValueAMD64_OpGreater16U_0(v) 524 case OpGreater32: 525 return rewriteValueAMD64_OpGreater32_0(v) 526 case OpGreater32F: 527 return rewriteValueAMD64_OpGreater32F_0(v) 528 case OpGreater32U: 529 return rewriteValueAMD64_OpGreater32U_0(v) 530 case OpGreater64: 531 return rewriteValueAMD64_OpGreater64_0(v) 532 case OpGreater64F: 533 return rewriteValueAMD64_OpGreater64F_0(v) 534 case OpGreater64U: 535 return rewriteValueAMD64_OpGreater64U_0(v) 536 case OpGreater8: 537 return rewriteValueAMD64_OpGreater8_0(v) 538 case OpGreater8U: 539 return rewriteValueAMD64_OpGreater8U_0(v) 540 case OpHmul32: 541 return rewriteValueAMD64_OpHmul32_0(v) 542 case OpHmul32u: 543 return rewriteValueAMD64_OpHmul32u_0(v) 544 case OpHmul64: 545 return rewriteValueAMD64_OpHmul64_0(v) 546 case OpHmul64u: 547 return rewriteValueAMD64_OpHmul64u_0(v) 548 case OpInt64Hi: 549 return rewriteValueAMD64_OpInt64Hi_0(v) 550 case OpInterCall: 551 return rewriteValueAMD64_OpInterCall_0(v) 552 case OpIsInBounds: 553 return rewriteValueAMD64_OpIsInBounds_0(v) 554 case OpIsNonNil: 555 return rewriteValueAMD64_OpIsNonNil_0(v) 556 case OpIsSliceInBounds: 557 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 558 case OpLeq16: 559 return rewriteValueAMD64_OpLeq16_0(v) 560 case OpLeq16U: 561 return rewriteValueAMD64_OpLeq16U_0(v) 562 case OpLeq32: 563 return rewriteValueAMD64_OpLeq32_0(v) 564 case OpLeq32F: 565 return rewriteValueAMD64_OpLeq32F_0(v) 566 case OpLeq32U: 567 return rewriteValueAMD64_OpLeq32U_0(v) 568 case OpLeq64: 569 return rewriteValueAMD64_OpLeq64_0(v) 570 case OpLeq64F: 571 return rewriteValueAMD64_OpLeq64F_0(v) 572 case OpLeq64U: 573 return rewriteValueAMD64_OpLeq64U_0(v) 574 case OpLeq8: 575 return rewriteValueAMD64_OpLeq8_0(v) 576 case OpLeq8U: 577 return rewriteValueAMD64_OpLeq8U_0(v) 578 case OpLess16: 579 return rewriteValueAMD64_OpLess16_0(v) 580 case OpLess16U: 581 return rewriteValueAMD64_OpLess16U_0(v) 582 case OpLess32: 583 return rewriteValueAMD64_OpLess32_0(v) 584 case OpLess32F: 585 return rewriteValueAMD64_OpLess32F_0(v) 586 case OpLess32U: 587 return rewriteValueAMD64_OpLess32U_0(v) 588 case OpLess64: 589 return rewriteValueAMD64_OpLess64_0(v) 590 case OpLess64F: 591 return rewriteValueAMD64_OpLess64F_0(v) 592 case OpLess64U: 593 return rewriteValueAMD64_OpLess64U_0(v) 594 case OpLess8: 595 return rewriteValueAMD64_OpLess8_0(v) 596 case OpLess8U: 597 return rewriteValueAMD64_OpLess8U_0(v) 598 case OpLoad: 599 return rewriteValueAMD64_OpLoad_0(v) 600 case OpLsh16x16: 601 return rewriteValueAMD64_OpLsh16x16_0(v) 602 case OpLsh16x32: 603 return rewriteValueAMD64_OpLsh16x32_0(v) 604 case OpLsh16x64: 605 return rewriteValueAMD64_OpLsh16x64_0(v) 606 case OpLsh16x8: 607 return rewriteValueAMD64_OpLsh16x8_0(v) 608 case OpLsh32x16: 609 return rewriteValueAMD64_OpLsh32x16_0(v) 610 case OpLsh32x32: 611 return rewriteValueAMD64_OpLsh32x32_0(v) 612 case OpLsh32x64: 613 return rewriteValueAMD64_OpLsh32x64_0(v) 614 case OpLsh32x8: 615 return rewriteValueAMD64_OpLsh32x8_0(v) 616 case OpLsh64x16: 617 return rewriteValueAMD64_OpLsh64x16_0(v) 618 case OpLsh64x32: 619 return rewriteValueAMD64_OpLsh64x32_0(v) 620 case OpLsh64x64: 621 return rewriteValueAMD64_OpLsh64x64_0(v) 622 case OpLsh64x8: 623 return rewriteValueAMD64_OpLsh64x8_0(v) 624 case OpLsh8x16: 625 return rewriteValueAMD64_OpLsh8x16_0(v) 626 case OpLsh8x32: 627 return rewriteValueAMD64_OpLsh8x32_0(v) 628 case OpLsh8x64: 629 return rewriteValueAMD64_OpLsh8x64_0(v) 630 case OpLsh8x8: 631 return rewriteValueAMD64_OpLsh8x8_0(v) 632 case OpMod16: 633 return rewriteValueAMD64_OpMod16_0(v) 634 case OpMod16u: 635 return rewriteValueAMD64_OpMod16u_0(v) 636 case OpMod32: 637 return rewriteValueAMD64_OpMod32_0(v) 638 case OpMod32u: 639 return rewriteValueAMD64_OpMod32u_0(v) 640 case OpMod64: 641 return rewriteValueAMD64_OpMod64_0(v) 642 case OpMod64u: 643 return rewriteValueAMD64_OpMod64u_0(v) 644 case OpMod8: 645 return rewriteValueAMD64_OpMod8_0(v) 646 case OpMod8u: 647 return rewriteValueAMD64_OpMod8u_0(v) 648 case OpMove: 649 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) 650 case OpMul16: 651 return rewriteValueAMD64_OpMul16_0(v) 652 case OpMul32: 653 return rewriteValueAMD64_OpMul32_0(v) 654 case OpMul32F: 655 return rewriteValueAMD64_OpMul32F_0(v) 656 case OpMul64: 657 return rewriteValueAMD64_OpMul64_0(v) 658 case OpMul64F: 659 return rewriteValueAMD64_OpMul64F_0(v) 660 case OpMul64uhilo: 661 return rewriteValueAMD64_OpMul64uhilo_0(v) 662 case OpMul8: 663 return rewriteValueAMD64_OpMul8_0(v) 664 case OpNeg16: 665 return rewriteValueAMD64_OpNeg16_0(v) 666 case OpNeg32: 667 return rewriteValueAMD64_OpNeg32_0(v) 668 case OpNeg32F: 669 return rewriteValueAMD64_OpNeg32F_0(v) 670 case OpNeg64: 671 return rewriteValueAMD64_OpNeg64_0(v) 672 case OpNeg64F: 673 return rewriteValueAMD64_OpNeg64F_0(v) 674 case OpNeg8: 675 return rewriteValueAMD64_OpNeg8_0(v) 676 case OpNeq16: 677 return rewriteValueAMD64_OpNeq16_0(v) 678 case OpNeq32: 679 return rewriteValueAMD64_OpNeq32_0(v) 680 case OpNeq32F: 681 return rewriteValueAMD64_OpNeq32F_0(v) 682 case OpNeq64: 683 return rewriteValueAMD64_OpNeq64_0(v) 684 case OpNeq64F: 685 return rewriteValueAMD64_OpNeq64F_0(v) 686 case OpNeq8: 687 return rewriteValueAMD64_OpNeq8_0(v) 688 case OpNeqB: 689 return rewriteValueAMD64_OpNeqB_0(v) 690 case OpNeqPtr: 691 return rewriteValueAMD64_OpNeqPtr_0(v) 692 case OpNilCheck: 693 return rewriteValueAMD64_OpNilCheck_0(v) 694 case OpNot: 695 return rewriteValueAMD64_OpNot_0(v) 696 case OpOffPtr: 697 return rewriteValueAMD64_OpOffPtr_0(v) 698 case OpOr16: 699 return rewriteValueAMD64_OpOr16_0(v) 700 case OpOr32: 701 return rewriteValueAMD64_OpOr32_0(v) 702 case OpOr64: 703 return rewriteValueAMD64_OpOr64_0(v) 704 case OpOr8: 705 return rewriteValueAMD64_OpOr8_0(v) 706 case OpOrB: 707 return rewriteValueAMD64_OpOrB_0(v) 708 case OpPopCount16: 709 return rewriteValueAMD64_OpPopCount16_0(v) 710 case OpPopCount32: 711 return rewriteValueAMD64_OpPopCount32_0(v) 712 case OpPopCount64: 713 return rewriteValueAMD64_OpPopCount64_0(v) 714 case OpPopCount8: 715 return rewriteValueAMD64_OpPopCount8_0(v) 716 case OpRound32F: 717 return rewriteValueAMD64_OpRound32F_0(v) 718 case OpRound64F: 719 return rewriteValueAMD64_OpRound64F_0(v) 720 case OpRsh16Ux16: 721 return rewriteValueAMD64_OpRsh16Ux16_0(v) 722 case OpRsh16Ux32: 723 return rewriteValueAMD64_OpRsh16Ux32_0(v) 724 case OpRsh16Ux64: 725 return rewriteValueAMD64_OpRsh16Ux64_0(v) 726 case OpRsh16Ux8: 727 return rewriteValueAMD64_OpRsh16Ux8_0(v) 728 case OpRsh16x16: 729 return rewriteValueAMD64_OpRsh16x16_0(v) 730 case OpRsh16x32: 731 return rewriteValueAMD64_OpRsh16x32_0(v) 732 case OpRsh16x64: 733 return rewriteValueAMD64_OpRsh16x64_0(v) 734 case OpRsh16x8: 735 return rewriteValueAMD64_OpRsh16x8_0(v) 736 case OpRsh32Ux16: 737 return rewriteValueAMD64_OpRsh32Ux16_0(v) 738 case OpRsh32Ux32: 739 return rewriteValueAMD64_OpRsh32Ux32_0(v) 740 case OpRsh32Ux64: 741 return rewriteValueAMD64_OpRsh32Ux64_0(v) 742 case OpRsh32Ux8: 743 return rewriteValueAMD64_OpRsh32Ux8_0(v) 744 case OpRsh32x16: 745 return rewriteValueAMD64_OpRsh32x16_0(v) 746 case OpRsh32x32: 747 return rewriteValueAMD64_OpRsh32x32_0(v) 748 case OpRsh32x64: 749 return rewriteValueAMD64_OpRsh32x64_0(v) 750 case OpRsh32x8: 751 return rewriteValueAMD64_OpRsh32x8_0(v) 752 case OpRsh64Ux16: 753 return rewriteValueAMD64_OpRsh64Ux16_0(v) 754 case OpRsh64Ux32: 755 return rewriteValueAMD64_OpRsh64Ux32_0(v) 756 case OpRsh64Ux64: 757 return rewriteValueAMD64_OpRsh64Ux64_0(v) 758 case OpRsh64Ux8: 759 return rewriteValueAMD64_OpRsh64Ux8_0(v) 760 case OpRsh64x16: 761 return rewriteValueAMD64_OpRsh64x16_0(v) 762 case OpRsh64x32: 763 return rewriteValueAMD64_OpRsh64x32_0(v) 764 case OpRsh64x64: 765 return rewriteValueAMD64_OpRsh64x64_0(v) 766 case OpRsh64x8: 767 return rewriteValueAMD64_OpRsh64x8_0(v) 768 case OpRsh8Ux16: 769 return rewriteValueAMD64_OpRsh8Ux16_0(v) 770 case OpRsh8Ux32: 771 return rewriteValueAMD64_OpRsh8Ux32_0(v) 772 case OpRsh8Ux64: 773 return rewriteValueAMD64_OpRsh8Ux64_0(v) 774 case OpRsh8Ux8: 775 return rewriteValueAMD64_OpRsh8Ux8_0(v) 776 case OpRsh8x16: 777 return rewriteValueAMD64_OpRsh8x16_0(v) 778 case OpRsh8x32: 779 return rewriteValueAMD64_OpRsh8x32_0(v) 780 case OpRsh8x64: 781 return rewriteValueAMD64_OpRsh8x64_0(v) 782 case OpRsh8x8: 783 return rewriteValueAMD64_OpRsh8x8_0(v) 784 case OpSelect0: 785 return rewriteValueAMD64_OpSelect0_0(v) 786 case OpSelect1: 787 return rewriteValueAMD64_OpSelect1_0(v) 788 case OpSignExt16to32: 789 return rewriteValueAMD64_OpSignExt16to32_0(v) 790 case OpSignExt16to64: 791 return rewriteValueAMD64_OpSignExt16to64_0(v) 792 case OpSignExt32to64: 793 return rewriteValueAMD64_OpSignExt32to64_0(v) 794 case OpSignExt8to16: 795 return rewriteValueAMD64_OpSignExt8to16_0(v) 796 case OpSignExt8to32: 797 return rewriteValueAMD64_OpSignExt8to32_0(v) 798 case OpSignExt8to64: 799 return rewriteValueAMD64_OpSignExt8to64_0(v) 800 case OpSlicemask: 801 return rewriteValueAMD64_OpSlicemask_0(v) 802 case OpSqrt: 803 return rewriteValueAMD64_OpSqrt_0(v) 804 case OpStaticCall: 805 return rewriteValueAMD64_OpStaticCall_0(v) 806 case OpStore: 807 return rewriteValueAMD64_OpStore_0(v) 808 case OpSub16: 809 return rewriteValueAMD64_OpSub16_0(v) 810 case OpSub32: 811 return rewriteValueAMD64_OpSub32_0(v) 812 case OpSub32F: 813 return rewriteValueAMD64_OpSub32F_0(v) 814 case OpSub64: 815 return rewriteValueAMD64_OpSub64_0(v) 816 case OpSub64F: 817 return rewriteValueAMD64_OpSub64F_0(v) 818 case OpSub8: 819 return rewriteValueAMD64_OpSub8_0(v) 820 case OpSubPtr: 821 return rewriteValueAMD64_OpSubPtr_0(v) 822 case OpTrunc16to8: 823 return rewriteValueAMD64_OpTrunc16to8_0(v) 824 case OpTrunc32to16: 825 return rewriteValueAMD64_OpTrunc32to16_0(v) 826 case OpTrunc32to8: 827 return rewriteValueAMD64_OpTrunc32to8_0(v) 828 case OpTrunc64to16: 829 return rewriteValueAMD64_OpTrunc64to16_0(v) 830 case OpTrunc64to32: 831 return rewriteValueAMD64_OpTrunc64to32_0(v) 832 case OpTrunc64to8: 833 return rewriteValueAMD64_OpTrunc64to8_0(v) 834 case OpXor16: 835 return rewriteValueAMD64_OpXor16_0(v) 836 case OpXor32: 837 return rewriteValueAMD64_OpXor32_0(v) 838 case OpXor64: 839 return rewriteValueAMD64_OpXor64_0(v) 840 case OpXor8: 841 return rewriteValueAMD64_OpXor8_0(v) 842 case OpZero: 843 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) 844 case OpZeroExt16to32: 845 return rewriteValueAMD64_OpZeroExt16to32_0(v) 846 case OpZeroExt16to64: 847 return rewriteValueAMD64_OpZeroExt16to64_0(v) 848 case OpZeroExt32to64: 849 return rewriteValueAMD64_OpZeroExt32to64_0(v) 850 case OpZeroExt8to16: 851 return rewriteValueAMD64_OpZeroExt8to16_0(v) 852 case OpZeroExt8to32: 853 return rewriteValueAMD64_OpZeroExt8to32_0(v) 854 case OpZeroExt8to64: 855 return rewriteValueAMD64_OpZeroExt8to64_0(v) 856 } 857 return false 858 } 859 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 860 // match: (ADDL x (MOVLconst [c])) 861 // cond: 862 // result: (ADDLconst [c] x) 863 for { 864 x := v.Args[0] 865 v_1 := v.Args[1] 866 if v_1.Op != OpAMD64MOVLconst { 867 break 868 } 869 c := v_1.AuxInt 870 v.reset(OpAMD64ADDLconst) 871 v.AuxInt = c 872 v.AddArg(x) 873 return true 874 } 875 // match: (ADDL (MOVLconst [c]) x) 876 // cond: 877 // result: (ADDLconst [c] x) 878 for { 879 v_0 := v.Args[0] 880 if v_0.Op != OpAMD64MOVLconst { 881 break 882 } 883 c := v_0.AuxInt 884 x := v.Args[1] 885 v.reset(OpAMD64ADDLconst) 886 v.AuxInt = c 887 v.AddArg(x) 888 return true 889 } 890 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 891 // cond: d==32-c 892 // result: (ROLLconst x [c]) 893 for { 894 v_0 := v.Args[0] 895 if v_0.Op != OpAMD64SHLLconst { 896 break 897 } 898 c := v_0.AuxInt 899 x := v_0.Args[0] 900 v_1 := v.Args[1] 901 if v_1.Op != OpAMD64SHRLconst { 902 break 903 } 904 d := v_1.AuxInt 905 if x != v_1.Args[0] { 906 break 907 } 908 if !(d == 32-c) { 909 break 910 } 911 v.reset(OpAMD64ROLLconst) 912 v.AuxInt = c 913 v.AddArg(x) 914 return true 915 } 916 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 917 // cond: d==32-c 918 // result: (ROLLconst x [c]) 919 for { 920 v_0 := v.Args[0] 921 if v_0.Op != OpAMD64SHRLconst { 922 break 923 } 924 d := v_0.AuxInt 925 x := v_0.Args[0] 926 v_1 := v.Args[1] 927 if v_1.Op != OpAMD64SHLLconst { 928 break 929 } 930 c := v_1.AuxInt 931 if x != v_1.Args[0] { 932 break 933 } 934 if !(d == 32-c) { 935 break 936 } 937 v.reset(OpAMD64ROLLconst) 938 v.AuxInt = c 939 v.AddArg(x) 940 return true 941 } 942 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 943 // cond: d==16-c && c < 16 && t.Size() == 2 944 // result: (ROLWconst x [c]) 945 for { 946 t := v.Type 947 v_0 := v.Args[0] 948 if v_0.Op != OpAMD64SHLLconst { 949 break 950 } 951 c := v_0.AuxInt 952 x := v_0.Args[0] 953 v_1 := v.Args[1] 954 if v_1.Op != OpAMD64SHRWconst { 955 break 956 } 957 d := v_1.AuxInt 958 if x != v_1.Args[0] { 959 break 960 } 961 if !(d == 16-c && c < 16 && t.Size() == 2) { 962 break 963 } 964 v.reset(OpAMD64ROLWconst) 965 v.AuxInt = c 966 v.AddArg(x) 967 return true 968 } 969 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 970 // cond: d==16-c && c < 16 && t.Size() == 2 971 // result: (ROLWconst x [c]) 972 for { 973 t := v.Type 974 v_0 := v.Args[0] 975 if v_0.Op != OpAMD64SHRWconst { 976 break 977 } 978 d := v_0.AuxInt 979 x := v_0.Args[0] 980 v_1 := v.Args[1] 981 if v_1.Op != OpAMD64SHLLconst { 982 break 983 } 984 c := v_1.AuxInt 985 if x != v_1.Args[0] { 986 break 987 } 988 if !(d == 16-c && c < 16 && t.Size() == 2) { 989 break 990 } 991 v.reset(OpAMD64ROLWconst) 992 v.AuxInt = c 993 v.AddArg(x) 994 return true 995 } 996 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 997 // cond: d==8-c && c < 8 && t.Size() == 1 998 // result: (ROLBconst x [c]) 999 for { 1000 t := v.Type 1001 v_0 := v.Args[0] 1002 if v_0.Op != OpAMD64SHLLconst { 1003 break 1004 } 1005 c := v_0.AuxInt 1006 x := v_0.Args[0] 1007 v_1 := v.Args[1] 1008 if v_1.Op != OpAMD64SHRBconst { 1009 break 1010 } 1011 d := v_1.AuxInt 1012 if x != v_1.Args[0] { 1013 break 1014 } 1015 if !(d == 8-c && c < 8 && t.Size() == 1) { 1016 break 1017 } 1018 v.reset(OpAMD64ROLBconst) 1019 v.AuxInt = c 1020 v.AddArg(x) 1021 return true 1022 } 1023 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1024 // cond: d==8-c && c < 8 && t.Size() == 1 1025 // result: (ROLBconst x [c]) 1026 for { 1027 t := v.Type 1028 v_0 := v.Args[0] 1029 if v_0.Op != OpAMD64SHRBconst { 1030 break 1031 } 1032 d := v_0.AuxInt 1033 x := v_0.Args[0] 1034 v_1 := v.Args[1] 1035 if v_1.Op != OpAMD64SHLLconst { 1036 break 1037 } 1038 c := v_1.AuxInt 1039 if x != v_1.Args[0] { 1040 break 1041 } 1042 if !(d == 8-c && c < 8 && t.Size() == 1) { 1043 break 1044 } 1045 v.reset(OpAMD64ROLBconst) 1046 v.AuxInt = c 1047 v.AddArg(x) 1048 return true 1049 } 1050 // match: (ADDL x (NEGL y)) 1051 // cond: 1052 // result: (SUBL x y) 1053 for { 1054 x := v.Args[0] 1055 v_1 := v.Args[1] 1056 if v_1.Op != OpAMD64NEGL { 1057 break 1058 } 1059 y := v_1.Args[0] 1060 v.reset(OpAMD64SUBL) 1061 v.AddArg(x) 1062 v.AddArg(y) 1063 return true 1064 } 1065 // match: (ADDL (NEGL y) x) 1066 // cond: 1067 // result: (SUBL x y) 1068 for { 1069 v_0 := v.Args[0] 1070 if v_0.Op != OpAMD64NEGL { 1071 break 1072 } 1073 y := v_0.Args[0] 1074 x := v.Args[1] 1075 v.reset(OpAMD64SUBL) 1076 v.AddArg(x) 1077 v.AddArg(y) 1078 return true 1079 } 1080 return false 1081 } 1082 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1083 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1084 // cond: canMergeLoad(v, l, x) && clobber(l) 1085 // result: (ADDLmem x [off] {sym} ptr mem) 1086 for { 1087 x := v.Args[0] 1088 l := v.Args[1] 1089 if l.Op != OpAMD64MOVLload { 1090 break 1091 } 1092 off := l.AuxInt 1093 sym := l.Aux 1094 ptr := l.Args[0] 1095 mem := l.Args[1] 1096 if !(canMergeLoad(v, l, x) && clobber(l)) { 1097 break 1098 } 1099 v.reset(OpAMD64ADDLmem) 1100 v.AuxInt = off 1101 v.Aux = sym 1102 v.AddArg(x) 1103 v.AddArg(ptr) 1104 v.AddArg(mem) 1105 return true 1106 } 1107 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1108 // cond: canMergeLoad(v, l, x) && clobber(l) 1109 // result: (ADDLmem x [off] {sym} ptr mem) 1110 for { 1111 l := v.Args[0] 1112 if l.Op != OpAMD64MOVLload { 1113 break 1114 } 1115 off := l.AuxInt 1116 sym := l.Aux 1117 ptr := l.Args[0] 1118 mem := l.Args[1] 1119 x := v.Args[1] 1120 if !(canMergeLoad(v, l, x) && clobber(l)) { 1121 break 1122 } 1123 v.reset(OpAMD64ADDLmem) 1124 v.AuxInt = off 1125 v.Aux = sym 1126 v.AddArg(x) 1127 v.AddArg(ptr) 1128 v.AddArg(mem) 1129 return true 1130 } 1131 return false 1132 } 1133 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1134 // match: (ADDLconst [c] x) 1135 // cond: int32(c)==0 1136 // result: x 1137 for { 1138 c := v.AuxInt 1139 x := v.Args[0] 1140 if !(int32(c) == 0) { 1141 break 1142 } 1143 v.reset(OpCopy) 1144 v.Type = x.Type 1145 v.AddArg(x) 1146 return true 1147 } 1148 // match: (ADDLconst [c] (MOVLconst [d])) 1149 // cond: 1150 // result: (MOVLconst [int64(int32(c+d))]) 1151 for { 1152 c := v.AuxInt 1153 v_0 := v.Args[0] 1154 if v_0.Op != OpAMD64MOVLconst { 1155 break 1156 } 1157 d := v_0.AuxInt 1158 v.reset(OpAMD64MOVLconst) 1159 v.AuxInt = int64(int32(c + d)) 1160 return true 1161 } 1162 // match: (ADDLconst [c] (ADDLconst [d] x)) 1163 // cond: 1164 // result: (ADDLconst [int64(int32(c+d))] x) 1165 for { 1166 c := v.AuxInt 1167 v_0 := v.Args[0] 1168 if v_0.Op != OpAMD64ADDLconst { 1169 break 1170 } 1171 d := v_0.AuxInt 1172 x := v_0.Args[0] 1173 v.reset(OpAMD64ADDLconst) 1174 v.AuxInt = int64(int32(c + d)) 1175 v.AddArg(x) 1176 return true 1177 } 1178 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1179 // cond: is32Bit(c+d) 1180 // result: (LEAL [c+d] {s} x) 1181 for { 1182 c := v.AuxInt 1183 v_0 := v.Args[0] 1184 if v_0.Op != OpAMD64LEAL { 1185 break 1186 } 1187 d := v_0.AuxInt 1188 s := v_0.Aux 1189 x := v_0.Args[0] 1190 if !(is32Bit(c + d)) { 1191 break 1192 } 1193 v.reset(OpAMD64LEAL) 1194 v.AuxInt = c + d 1195 v.Aux = s 1196 v.AddArg(x) 1197 return true 1198 } 1199 return false 1200 } 1201 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 1202 // match: (ADDQ x (MOVQconst [c])) 1203 // cond: is32Bit(c) 1204 // result: (ADDQconst [c] x) 1205 for { 1206 x := v.Args[0] 1207 v_1 := v.Args[1] 1208 if v_1.Op != OpAMD64MOVQconst { 1209 break 1210 } 1211 c := v_1.AuxInt 1212 if !(is32Bit(c)) { 1213 break 1214 } 1215 v.reset(OpAMD64ADDQconst) 1216 v.AuxInt = c 1217 v.AddArg(x) 1218 return true 1219 } 1220 // match: (ADDQ (MOVQconst [c]) x) 1221 // cond: is32Bit(c) 1222 // result: (ADDQconst [c] x) 1223 for { 1224 v_0 := v.Args[0] 1225 if v_0.Op != OpAMD64MOVQconst { 1226 break 1227 } 1228 c := v_0.AuxInt 1229 x := v.Args[1] 1230 if !(is32Bit(c)) { 1231 break 1232 } 1233 v.reset(OpAMD64ADDQconst) 1234 v.AuxInt = c 1235 v.AddArg(x) 1236 return true 1237 } 1238 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 1239 // cond: d==64-c 1240 // result: (ROLQconst x [c]) 1241 for { 1242 v_0 := v.Args[0] 1243 if v_0.Op != OpAMD64SHLQconst { 1244 break 1245 } 1246 c := v_0.AuxInt 1247 x := v_0.Args[0] 1248 v_1 := v.Args[1] 1249 if v_1.Op != OpAMD64SHRQconst { 1250 break 1251 } 1252 d := v_1.AuxInt 1253 if x != v_1.Args[0] { 1254 break 1255 } 1256 if !(d == 64-c) { 1257 break 1258 } 1259 v.reset(OpAMD64ROLQconst) 1260 v.AuxInt = c 1261 v.AddArg(x) 1262 return true 1263 } 1264 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 1265 // cond: d==64-c 1266 // result: (ROLQconst x [c]) 1267 for { 1268 v_0 := v.Args[0] 1269 if v_0.Op != OpAMD64SHRQconst { 1270 break 1271 } 1272 d := v_0.AuxInt 1273 x := v_0.Args[0] 1274 v_1 := v.Args[1] 1275 if v_1.Op != OpAMD64SHLQconst { 1276 break 1277 } 1278 c := v_1.AuxInt 1279 if x != v_1.Args[0] { 1280 break 1281 } 1282 if !(d == 64-c) { 1283 break 1284 } 1285 v.reset(OpAMD64ROLQconst) 1286 v.AuxInt = c 1287 v.AddArg(x) 1288 return true 1289 } 1290 // match: (ADDQ x (SHLQconst [3] y)) 1291 // cond: 1292 // result: (LEAQ8 x y) 1293 for { 1294 x := v.Args[0] 1295 v_1 := v.Args[1] 1296 if v_1.Op != OpAMD64SHLQconst { 1297 break 1298 } 1299 if v_1.AuxInt != 3 { 1300 break 1301 } 1302 y := v_1.Args[0] 1303 v.reset(OpAMD64LEAQ8) 1304 v.AddArg(x) 1305 v.AddArg(y) 1306 return true 1307 } 1308 // match: (ADDQ (SHLQconst [3] y) x) 1309 // cond: 1310 // result: (LEAQ8 x y) 1311 for { 1312 v_0 := v.Args[0] 1313 if v_0.Op != OpAMD64SHLQconst { 1314 break 1315 } 1316 if v_0.AuxInt != 3 { 1317 break 1318 } 1319 y := v_0.Args[0] 1320 x := v.Args[1] 1321 v.reset(OpAMD64LEAQ8) 1322 v.AddArg(x) 1323 v.AddArg(y) 1324 return true 1325 } 1326 // match: (ADDQ x (SHLQconst [2] y)) 1327 // cond: 1328 // result: (LEAQ4 x y) 1329 for { 1330 x := v.Args[0] 1331 v_1 := v.Args[1] 1332 if v_1.Op != OpAMD64SHLQconst { 1333 break 1334 } 1335 if v_1.AuxInt != 2 { 1336 break 1337 } 1338 y := v_1.Args[0] 1339 v.reset(OpAMD64LEAQ4) 1340 v.AddArg(x) 1341 v.AddArg(y) 1342 return true 1343 } 1344 // match: (ADDQ (SHLQconst [2] y) x) 1345 // cond: 1346 // result: (LEAQ4 x y) 1347 for { 1348 v_0 := v.Args[0] 1349 if v_0.Op != OpAMD64SHLQconst { 1350 break 1351 } 1352 if v_0.AuxInt != 2 { 1353 break 1354 } 1355 y := v_0.Args[0] 1356 x := v.Args[1] 1357 v.reset(OpAMD64LEAQ4) 1358 v.AddArg(x) 1359 v.AddArg(y) 1360 return true 1361 } 1362 // match: (ADDQ x (SHLQconst [1] y)) 1363 // cond: 1364 // result: (LEAQ2 x y) 1365 for { 1366 x := v.Args[0] 1367 v_1 := v.Args[1] 1368 if v_1.Op != OpAMD64SHLQconst { 1369 break 1370 } 1371 if v_1.AuxInt != 1 { 1372 break 1373 } 1374 y := v_1.Args[0] 1375 v.reset(OpAMD64LEAQ2) 1376 v.AddArg(x) 1377 v.AddArg(y) 1378 return true 1379 } 1380 // match: (ADDQ (SHLQconst [1] y) x) 1381 // cond: 1382 // result: (LEAQ2 x y) 1383 for { 1384 v_0 := v.Args[0] 1385 if v_0.Op != OpAMD64SHLQconst { 1386 break 1387 } 1388 if v_0.AuxInt != 1 { 1389 break 1390 } 1391 y := v_0.Args[0] 1392 x := v.Args[1] 1393 v.reset(OpAMD64LEAQ2) 1394 v.AddArg(x) 1395 v.AddArg(y) 1396 return true 1397 } 1398 return false 1399 } 1400 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 1401 // match: (ADDQ x (ADDQ y y)) 1402 // cond: 1403 // result: (LEAQ2 x y) 1404 for { 1405 x := v.Args[0] 1406 v_1 := v.Args[1] 1407 if v_1.Op != OpAMD64ADDQ { 1408 break 1409 } 1410 y := v_1.Args[0] 1411 if y != v_1.Args[1] { 1412 break 1413 } 1414 v.reset(OpAMD64LEAQ2) 1415 v.AddArg(x) 1416 v.AddArg(y) 1417 return true 1418 } 1419 // match: (ADDQ (ADDQ y y) x) 1420 // cond: 1421 // result: (LEAQ2 x y) 1422 for { 1423 v_0 := v.Args[0] 1424 if v_0.Op != OpAMD64ADDQ { 1425 break 1426 } 1427 y := v_0.Args[0] 1428 if y != v_0.Args[1] { 1429 break 1430 } 1431 x := v.Args[1] 1432 v.reset(OpAMD64LEAQ2) 1433 v.AddArg(x) 1434 v.AddArg(y) 1435 return true 1436 } 1437 // match: (ADDQ x (ADDQ x y)) 1438 // cond: 1439 // result: (LEAQ2 y x) 1440 for { 1441 x := v.Args[0] 1442 v_1 := v.Args[1] 1443 if v_1.Op != OpAMD64ADDQ { 1444 break 1445 } 1446 if x != v_1.Args[0] { 1447 break 1448 } 1449 y := v_1.Args[1] 1450 v.reset(OpAMD64LEAQ2) 1451 v.AddArg(y) 1452 v.AddArg(x) 1453 return true 1454 } 1455 // match: (ADDQ x (ADDQ y x)) 1456 // cond: 1457 // result: (LEAQ2 y x) 1458 for { 1459 x := v.Args[0] 1460 v_1 := v.Args[1] 1461 if v_1.Op != OpAMD64ADDQ { 1462 break 1463 } 1464 y := v_1.Args[0] 1465 if x != v_1.Args[1] { 1466 break 1467 } 1468 v.reset(OpAMD64LEAQ2) 1469 v.AddArg(y) 1470 v.AddArg(x) 1471 return true 1472 } 1473 // match: (ADDQ (ADDQ x y) x) 1474 // cond: 1475 // result: (LEAQ2 y x) 1476 for { 1477 v_0 := v.Args[0] 1478 if v_0.Op != OpAMD64ADDQ { 1479 break 1480 } 1481 x := v_0.Args[0] 1482 y := v_0.Args[1] 1483 if x != v.Args[1] { 1484 break 1485 } 1486 v.reset(OpAMD64LEAQ2) 1487 v.AddArg(y) 1488 v.AddArg(x) 1489 return true 1490 } 1491 // match: (ADDQ (ADDQ y x) x) 1492 // cond: 1493 // result: (LEAQ2 y x) 1494 for { 1495 v_0 := v.Args[0] 1496 if v_0.Op != OpAMD64ADDQ { 1497 break 1498 } 1499 y := v_0.Args[0] 1500 x := v_0.Args[1] 1501 if x != v.Args[1] { 1502 break 1503 } 1504 v.reset(OpAMD64LEAQ2) 1505 v.AddArg(y) 1506 v.AddArg(x) 1507 return true 1508 } 1509 // match: (ADDQ (ADDQconst [c] x) y) 1510 // cond: 1511 // result: (LEAQ1 [c] x y) 1512 for { 1513 v_0 := v.Args[0] 1514 if v_0.Op != OpAMD64ADDQconst { 1515 break 1516 } 1517 c := v_0.AuxInt 1518 x := v_0.Args[0] 1519 y := v.Args[1] 1520 v.reset(OpAMD64LEAQ1) 1521 v.AuxInt = c 1522 v.AddArg(x) 1523 v.AddArg(y) 1524 return true 1525 } 1526 // match: (ADDQ y (ADDQconst [c] x)) 1527 // cond: 1528 // result: (LEAQ1 [c] x y) 1529 for { 1530 y := v.Args[0] 1531 v_1 := v.Args[1] 1532 if v_1.Op != OpAMD64ADDQconst { 1533 break 1534 } 1535 c := v_1.AuxInt 1536 x := v_1.Args[0] 1537 v.reset(OpAMD64LEAQ1) 1538 v.AuxInt = c 1539 v.AddArg(x) 1540 v.AddArg(y) 1541 return true 1542 } 1543 // match: (ADDQ x (LEAQ [c] {s} y)) 1544 // cond: x.Op != OpSB && y.Op != OpSB 1545 // result: (LEAQ1 [c] {s} x y) 1546 for { 1547 x := v.Args[0] 1548 v_1 := v.Args[1] 1549 if v_1.Op != OpAMD64LEAQ { 1550 break 1551 } 1552 c := v_1.AuxInt 1553 s := v_1.Aux 1554 y := v_1.Args[0] 1555 if !(x.Op != OpSB && y.Op != OpSB) { 1556 break 1557 } 1558 v.reset(OpAMD64LEAQ1) 1559 v.AuxInt = c 1560 v.Aux = s 1561 v.AddArg(x) 1562 v.AddArg(y) 1563 return true 1564 } 1565 // match: (ADDQ (LEAQ [c] {s} y) x) 1566 // cond: x.Op != OpSB && y.Op != OpSB 1567 // result: (LEAQ1 [c] {s} x y) 1568 for { 1569 v_0 := v.Args[0] 1570 if v_0.Op != OpAMD64LEAQ { 1571 break 1572 } 1573 c := v_0.AuxInt 1574 s := v_0.Aux 1575 y := v_0.Args[0] 1576 x := v.Args[1] 1577 if !(x.Op != OpSB && y.Op != OpSB) { 1578 break 1579 } 1580 v.reset(OpAMD64LEAQ1) 1581 v.AuxInt = c 1582 v.Aux = s 1583 v.AddArg(x) 1584 v.AddArg(y) 1585 return true 1586 } 1587 return false 1588 } 1589 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 1590 // match: (ADDQ x (NEGQ y)) 1591 // cond: 1592 // result: (SUBQ x y) 1593 for { 1594 x := v.Args[0] 1595 v_1 := v.Args[1] 1596 if v_1.Op != OpAMD64NEGQ { 1597 break 1598 } 1599 y := v_1.Args[0] 1600 v.reset(OpAMD64SUBQ) 1601 v.AddArg(x) 1602 v.AddArg(y) 1603 return true 1604 } 1605 // match: (ADDQ (NEGQ y) x) 1606 // cond: 1607 // result: (SUBQ x y) 1608 for { 1609 v_0 := v.Args[0] 1610 if v_0.Op != OpAMD64NEGQ { 1611 break 1612 } 1613 y := v_0.Args[0] 1614 x := v.Args[1] 1615 v.reset(OpAMD64SUBQ) 1616 v.AddArg(x) 1617 v.AddArg(y) 1618 return true 1619 } 1620 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1621 // cond: canMergeLoad(v, l, x) && clobber(l) 1622 // result: (ADDQmem x [off] {sym} ptr mem) 1623 for { 1624 x := v.Args[0] 1625 l := v.Args[1] 1626 if l.Op != OpAMD64MOVQload { 1627 break 1628 } 1629 off := l.AuxInt 1630 sym := l.Aux 1631 ptr := l.Args[0] 1632 mem := l.Args[1] 1633 if !(canMergeLoad(v, l, x) && clobber(l)) { 1634 break 1635 } 1636 v.reset(OpAMD64ADDQmem) 1637 v.AuxInt = off 1638 v.Aux = sym 1639 v.AddArg(x) 1640 v.AddArg(ptr) 1641 v.AddArg(mem) 1642 return true 1643 } 1644 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 1645 // cond: canMergeLoad(v, l, x) && clobber(l) 1646 // result: (ADDQmem x [off] {sym} ptr mem) 1647 for { 1648 l := v.Args[0] 1649 if l.Op != OpAMD64MOVQload { 1650 break 1651 } 1652 off := l.AuxInt 1653 sym := l.Aux 1654 ptr := l.Args[0] 1655 mem := l.Args[1] 1656 x := v.Args[1] 1657 if !(canMergeLoad(v, l, x) && clobber(l)) { 1658 break 1659 } 1660 v.reset(OpAMD64ADDQmem) 1661 v.AuxInt = off 1662 v.Aux = sym 1663 v.AddArg(x) 1664 v.AddArg(ptr) 1665 v.AddArg(mem) 1666 return true 1667 } 1668 return false 1669 } 1670 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 1671 // match: (ADDQconst [c] (ADDQ x y)) 1672 // cond: 1673 // result: (LEAQ1 [c] x y) 1674 for { 1675 c := v.AuxInt 1676 v_0 := v.Args[0] 1677 if v_0.Op != OpAMD64ADDQ { 1678 break 1679 } 1680 x := v_0.Args[0] 1681 y := v_0.Args[1] 1682 v.reset(OpAMD64LEAQ1) 1683 v.AuxInt = c 1684 v.AddArg(x) 1685 v.AddArg(y) 1686 return true 1687 } 1688 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1689 // cond: is32Bit(c+d) 1690 // result: (LEAQ [c+d] {s} x) 1691 for { 1692 c := v.AuxInt 1693 v_0 := v.Args[0] 1694 if v_0.Op != OpAMD64LEAQ { 1695 break 1696 } 1697 d := v_0.AuxInt 1698 s := v_0.Aux 1699 x := v_0.Args[0] 1700 if !(is32Bit(c + d)) { 1701 break 1702 } 1703 v.reset(OpAMD64LEAQ) 1704 v.AuxInt = c + d 1705 v.Aux = s 1706 v.AddArg(x) 1707 return true 1708 } 1709 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1710 // cond: is32Bit(c+d) 1711 // result: (LEAQ1 [c+d] {s} x y) 1712 for { 1713 c := v.AuxInt 1714 v_0 := v.Args[0] 1715 if v_0.Op != OpAMD64LEAQ1 { 1716 break 1717 } 1718 d := v_0.AuxInt 1719 s := v_0.Aux 1720 x := v_0.Args[0] 1721 y := v_0.Args[1] 1722 if !(is32Bit(c + d)) { 1723 break 1724 } 1725 v.reset(OpAMD64LEAQ1) 1726 v.AuxInt = c + d 1727 v.Aux = s 1728 v.AddArg(x) 1729 v.AddArg(y) 1730 return true 1731 } 1732 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1733 // cond: is32Bit(c+d) 1734 // result: (LEAQ2 [c+d] {s} x y) 1735 for { 1736 c := v.AuxInt 1737 v_0 := v.Args[0] 1738 if v_0.Op != OpAMD64LEAQ2 { 1739 break 1740 } 1741 d := v_0.AuxInt 1742 s := v_0.Aux 1743 x := v_0.Args[0] 1744 y := v_0.Args[1] 1745 if !(is32Bit(c + d)) { 1746 break 1747 } 1748 v.reset(OpAMD64LEAQ2) 1749 v.AuxInt = c + d 1750 v.Aux = s 1751 v.AddArg(x) 1752 v.AddArg(y) 1753 return true 1754 } 1755 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1756 // cond: is32Bit(c+d) 1757 // result: (LEAQ4 [c+d] {s} x y) 1758 for { 1759 c := v.AuxInt 1760 v_0 := v.Args[0] 1761 if v_0.Op != OpAMD64LEAQ4 { 1762 break 1763 } 1764 d := v_0.AuxInt 1765 s := v_0.Aux 1766 x := v_0.Args[0] 1767 y := v_0.Args[1] 1768 if !(is32Bit(c + d)) { 1769 break 1770 } 1771 v.reset(OpAMD64LEAQ4) 1772 v.AuxInt = c + d 1773 v.Aux = s 1774 v.AddArg(x) 1775 v.AddArg(y) 1776 return true 1777 } 1778 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1779 // cond: is32Bit(c+d) 1780 // result: (LEAQ8 [c+d] {s} x y) 1781 for { 1782 c := v.AuxInt 1783 v_0 := v.Args[0] 1784 if v_0.Op != OpAMD64LEAQ8 { 1785 break 1786 } 1787 d := v_0.AuxInt 1788 s := v_0.Aux 1789 x := v_0.Args[0] 1790 y := v_0.Args[1] 1791 if !(is32Bit(c + d)) { 1792 break 1793 } 1794 v.reset(OpAMD64LEAQ8) 1795 v.AuxInt = c + d 1796 v.Aux = s 1797 v.AddArg(x) 1798 v.AddArg(y) 1799 return true 1800 } 1801 // match: (ADDQconst [0] x) 1802 // cond: 1803 // result: x 1804 for { 1805 if v.AuxInt != 0 { 1806 break 1807 } 1808 x := v.Args[0] 1809 v.reset(OpCopy) 1810 v.Type = x.Type 1811 v.AddArg(x) 1812 return true 1813 } 1814 // match: (ADDQconst [c] (MOVQconst [d])) 1815 // cond: 1816 // result: (MOVQconst [c+d]) 1817 for { 1818 c := v.AuxInt 1819 v_0 := v.Args[0] 1820 if v_0.Op != OpAMD64MOVQconst { 1821 break 1822 } 1823 d := v_0.AuxInt 1824 v.reset(OpAMD64MOVQconst) 1825 v.AuxInt = c + d 1826 return true 1827 } 1828 // match: (ADDQconst [c] (ADDQconst [d] x)) 1829 // cond: is32Bit(c+d) 1830 // result: (ADDQconst [c+d] x) 1831 for { 1832 c := v.AuxInt 1833 v_0 := v.Args[0] 1834 if v_0.Op != OpAMD64ADDQconst { 1835 break 1836 } 1837 d := v_0.AuxInt 1838 x := v_0.Args[0] 1839 if !(is32Bit(c + d)) { 1840 break 1841 } 1842 v.reset(OpAMD64ADDQconst) 1843 v.AuxInt = c + d 1844 v.AddArg(x) 1845 return true 1846 } 1847 return false 1848 } 1849 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 1850 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 1851 // cond: canMergeLoad(v, l, x) && clobber(l) 1852 // result: (ADDSDmem x [off] {sym} ptr mem) 1853 for { 1854 x := v.Args[0] 1855 l := v.Args[1] 1856 if l.Op != OpAMD64MOVSDload { 1857 break 1858 } 1859 off := l.AuxInt 1860 sym := l.Aux 1861 ptr := l.Args[0] 1862 mem := l.Args[1] 1863 if !(canMergeLoad(v, l, x) && clobber(l)) { 1864 break 1865 } 1866 v.reset(OpAMD64ADDSDmem) 1867 v.AuxInt = off 1868 v.Aux = sym 1869 v.AddArg(x) 1870 v.AddArg(ptr) 1871 v.AddArg(mem) 1872 return true 1873 } 1874 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 1875 // cond: canMergeLoad(v, l, x) && clobber(l) 1876 // result: (ADDSDmem x [off] {sym} ptr mem) 1877 for { 1878 l := v.Args[0] 1879 if l.Op != OpAMD64MOVSDload { 1880 break 1881 } 1882 off := l.AuxInt 1883 sym := l.Aux 1884 ptr := l.Args[0] 1885 mem := l.Args[1] 1886 x := v.Args[1] 1887 if !(canMergeLoad(v, l, x) && clobber(l)) { 1888 break 1889 } 1890 v.reset(OpAMD64ADDSDmem) 1891 v.AuxInt = off 1892 v.Aux = sym 1893 v.AddArg(x) 1894 v.AddArg(ptr) 1895 v.AddArg(mem) 1896 return true 1897 } 1898 return false 1899 } 1900 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 1901 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 1902 // cond: canMergeLoad(v, l, x) && clobber(l) 1903 // result: (ADDSSmem x [off] {sym} ptr mem) 1904 for { 1905 x := v.Args[0] 1906 l := v.Args[1] 1907 if l.Op != OpAMD64MOVSSload { 1908 break 1909 } 1910 off := l.AuxInt 1911 sym := l.Aux 1912 ptr := l.Args[0] 1913 mem := l.Args[1] 1914 if !(canMergeLoad(v, l, x) && clobber(l)) { 1915 break 1916 } 1917 v.reset(OpAMD64ADDSSmem) 1918 v.AuxInt = off 1919 v.Aux = sym 1920 v.AddArg(x) 1921 v.AddArg(ptr) 1922 v.AddArg(mem) 1923 return true 1924 } 1925 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 1926 // cond: canMergeLoad(v, l, x) && clobber(l) 1927 // result: (ADDSSmem x [off] {sym} ptr mem) 1928 for { 1929 l := v.Args[0] 1930 if l.Op != OpAMD64MOVSSload { 1931 break 1932 } 1933 off := l.AuxInt 1934 sym := l.Aux 1935 ptr := l.Args[0] 1936 mem := l.Args[1] 1937 x := v.Args[1] 1938 if !(canMergeLoad(v, l, x) && clobber(l)) { 1939 break 1940 } 1941 v.reset(OpAMD64ADDSSmem) 1942 v.AuxInt = off 1943 v.Aux = sym 1944 v.AddArg(x) 1945 v.AddArg(ptr) 1946 v.AddArg(mem) 1947 return true 1948 } 1949 return false 1950 } 1951 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 1952 // match: (ANDL x (MOVLconst [c])) 1953 // cond: 1954 // result: (ANDLconst [c] x) 1955 for { 1956 x := v.Args[0] 1957 v_1 := v.Args[1] 1958 if v_1.Op != OpAMD64MOVLconst { 1959 break 1960 } 1961 c := v_1.AuxInt 1962 v.reset(OpAMD64ANDLconst) 1963 v.AuxInt = c 1964 v.AddArg(x) 1965 return true 1966 } 1967 // match: (ANDL (MOVLconst [c]) x) 1968 // cond: 1969 // result: (ANDLconst [c] x) 1970 for { 1971 v_0 := v.Args[0] 1972 if v_0.Op != OpAMD64MOVLconst { 1973 break 1974 } 1975 c := v_0.AuxInt 1976 x := v.Args[1] 1977 v.reset(OpAMD64ANDLconst) 1978 v.AuxInt = c 1979 v.AddArg(x) 1980 return true 1981 } 1982 // match: (ANDL x x) 1983 // cond: 1984 // result: x 1985 for { 1986 x := v.Args[0] 1987 if x != v.Args[1] { 1988 break 1989 } 1990 v.reset(OpCopy) 1991 v.Type = x.Type 1992 v.AddArg(x) 1993 return true 1994 } 1995 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 1996 // cond: canMergeLoad(v, l, x) && clobber(l) 1997 // result: (ANDLmem x [off] {sym} ptr mem) 1998 for { 1999 x := v.Args[0] 2000 l := v.Args[1] 2001 if l.Op != OpAMD64MOVLload { 2002 break 2003 } 2004 off := l.AuxInt 2005 sym := l.Aux 2006 ptr := l.Args[0] 2007 mem := l.Args[1] 2008 if !(canMergeLoad(v, l, x) && clobber(l)) { 2009 break 2010 } 2011 v.reset(OpAMD64ANDLmem) 2012 v.AuxInt = off 2013 v.Aux = sym 2014 v.AddArg(x) 2015 v.AddArg(ptr) 2016 v.AddArg(mem) 2017 return true 2018 } 2019 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 2020 // cond: canMergeLoad(v, l, x) && clobber(l) 2021 // result: (ANDLmem x [off] {sym} ptr mem) 2022 for { 2023 l := v.Args[0] 2024 if l.Op != OpAMD64MOVLload { 2025 break 2026 } 2027 off := l.AuxInt 2028 sym := l.Aux 2029 ptr := l.Args[0] 2030 mem := l.Args[1] 2031 x := v.Args[1] 2032 if !(canMergeLoad(v, l, x) && clobber(l)) { 2033 break 2034 } 2035 v.reset(OpAMD64ANDLmem) 2036 v.AuxInt = off 2037 v.Aux = sym 2038 v.AddArg(x) 2039 v.AddArg(ptr) 2040 v.AddArg(mem) 2041 return true 2042 } 2043 return false 2044 } 2045 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 2046 // match: (ANDLconst [c] (ANDLconst [d] x)) 2047 // cond: 2048 // result: (ANDLconst [c & d] x) 2049 for { 2050 c := v.AuxInt 2051 v_0 := v.Args[0] 2052 if v_0.Op != OpAMD64ANDLconst { 2053 break 2054 } 2055 d := v_0.AuxInt 2056 x := v_0.Args[0] 2057 v.reset(OpAMD64ANDLconst) 2058 v.AuxInt = c & d 2059 v.AddArg(x) 2060 return true 2061 } 2062 // match: (ANDLconst [0xFF] x) 2063 // cond: 2064 // result: (MOVBQZX x) 2065 for { 2066 if v.AuxInt != 0xFF { 2067 break 2068 } 2069 x := v.Args[0] 2070 v.reset(OpAMD64MOVBQZX) 2071 v.AddArg(x) 2072 return true 2073 } 2074 // match: (ANDLconst [0xFFFF] x) 2075 // cond: 2076 // result: (MOVWQZX x) 2077 for { 2078 if v.AuxInt != 0xFFFF { 2079 break 2080 } 2081 x := v.Args[0] 2082 v.reset(OpAMD64MOVWQZX) 2083 v.AddArg(x) 2084 return true 2085 } 2086 // match: (ANDLconst [c] _) 2087 // cond: int32(c)==0 2088 // result: (MOVLconst [0]) 2089 for { 2090 c := v.AuxInt 2091 if !(int32(c) == 0) { 2092 break 2093 } 2094 v.reset(OpAMD64MOVLconst) 2095 v.AuxInt = 0 2096 return true 2097 } 2098 // match: (ANDLconst [c] x) 2099 // cond: int32(c)==-1 2100 // result: x 2101 for { 2102 c := v.AuxInt 2103 x := v.Args[0] 2104 if !(int32(c) == -1) { 2105 break 2106 } 2107 v.reset(OpCopy) 2108 v.Type = x.Type 2109 v.AddArg(x) 2110 return true 2111 } 2112 // match: (ANDLconst [c] (MOVLconst [d])) 2113 // cond: 2114 // result: (MOVLconst [c&d]) 2115 for { 2116 c := v.AuxInt 2117 v_0 := v.Args[0] 2118 if v_0.Op != OpAMD64MOVLconst { 2119 break 2120 } 2121 d := v_0.AuxInt 2122 v.reset(OpAMD64MOVLconst) 2123 v.AuxInt = c & d 2124 return true 2125 } 2126 return false 2127 } 2128 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 2129 // match: (ANDQ x (MOVQconst [c])) 2130 // cond: is32Bit(c) 2131 // result: (ANDQconst [c] x) 2132 for { 2133 x := v.Args[0] 2134 v_1 := v.Args[1] 2135 if v_1.Op != OpAMD64MOVQconst { 2136 break 2137 } 2138 c := v_1.AuxInt 2139 if !(is32Bit(c)) { 2140 break 2141 } 2142 v.reset(OpAMD64ANDQconst) 2143 v.AuxInt = c 2144 v.AddArg(x) 2145 return true 2146 } 2147 // match: (ANDQ (MOVQconst [c]) x) 2148 // cond: is32Bit(c) 2149 // result: (ANDQconst [c] x) 2150 for { 2151 v_0 := v.Args[0] 2152 if v_0.Op != OpAMD64MOVQconst { 2153 break 2154 } 2155 c := v_0.AuxInt 2156 x := v.Args[1] 2157 if !(is32Bit(c)) { 2158 break 2159 } 2160 v.reset(OpAMD64ANDQconst) 2161 v.AuxInt = c 2162 v.AddArg(x) 2163 return true 2164 } 2165 // match: (ANDQ x x) 2166 // cond: 2167 // result: x 2168 for { 2169 x := v.Args[0] 2170 if x != v.Args[1] { 2171 break 2172 } 2173 v.reset(OpCopy) 2174 v.Type = x.Type 2175 v.AddArg(x) 2176 return true 2177 } 2178 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 2179 // cond: canMergeLoad(v, l, x) && clobber(l) 2180 // result: (ANDQmem x [off] {sym} ptr mem) 2181 for { 2182 x := v.Args[0] 2183 l := v.Args[1] 2184 if l.Op != OpAMD64MOVQload { 2185 break 2186 } 2187 off := l.AuxInt 2188 sym := l.Aux 2189 ptr := l.Args[0] 2190 mem := l.Args[1] 2191 if !(canMergeLoad(v, l, x) && clobber(l)) { 2192 break 2193 } 2194 v.reset(OpAMD64ANDQmem) 2195 v.AuxInt = off 2196 v.Aux = sym 2197 v.AddArg(x) 2198 v.AddArg(ptr) 2199 v.AddArg(mem) 2200 return true 2201 } 2202 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 2203 // cond: canMergeLoad(v, l, x) && clobber(l) 2204 // result: (ANDQmem x [off] {sym} ptr mem) 2205 for { 2206 l := v.Args[0] 2207 if l.Op != OpAMD64MOVQload { 2208 break 2209 } 2210 off := l.AuxInt 2211 sym := l.Aux 2212 ptr := l.Args[0] 2213 mem := l.Args[1] 2214 x := v.Args[1] 2215 if !(canMergeLoad(v, l, x) && clobber(l)) { 2216 break 2217 } 2218 v.reset(OpAMD64ANDQmem) 2219 v.AuxInt = off 2220 v.Aux = sym 2221 v.AddArg(x) 2222 v.AddArg(ptr) 2223 v.AddArg(mem) 2224 return true 2225 } 2226 return false 2227 } 2228 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 2229 // match: (ANDQconst [c] (ANDQconst [d] x)) 2230 // cond: 2231 // result: (ANDQconst [c & d] x) 2232 for { 2233 c := v.AuxInt 2234 v_0 := v.Args[0] 2235 if v_0.Op != OpAMD64ANDQconst { 2236 break 2237 } 2238 d := v_0.AuxInt 2239 x := v_0.Args[0] 2240 v.reset(OpAMD64ANDQconst) 2241 v.AuxInt = c & d 2242 v.AddArg(x) 2243 return true 2244 } 2245 // match: (ANDQconst [0xFF] x) 2246 // cond: 2247 // result: (MOVBQZX x) 2248 for { 2249 if v.AuxInt != 0xFF { 2250 break 2251 } 2252 x := v.Args[0] 2253 v.reset(OpAMD64MOVBQZX) 2254 v.AddArg(x) 2255 return true 2256 } 2257 // match: (ANDQconst [0xFFFF] x) 2258 // cond: 2259 // result: (MOVWQZX x) 2260 for { 2261 if v.AuxInt != 0xFFFF { 2262 break 2263 } 2264 x := v.Args[0] 2265 v.reset(OpAMD64MOVWQZX) 2266 v.AddArg(x) 2267 return true 2268 } 2269 // match: (ANDQconst [0xFFFFFFFF] x) 2270 // cond: 2271 // result: (MOVLQZX x) 2272 for { 2273 if v.AuxInt != 0xFFFFFFFF { 2274 break 2275 } 2276 x := v.Args[0] 2277 v.reset(OpAMD64MOVLQZX) 2278 v.AddArg(x) 2279 return true 2280 } 2281 // match: (ANDQconst [0] _) 2282 // cond: 2283 // result: (MOVQconst [0]) 2284 for { 2285 if v.AuxInt != 0 { 2286 break 2287 } 2288 v.reset(OpAMD64MOVQconst) 2289 v.AuxInt = 0 2290 return true 2291 } 2292 // match: (ANDQconst [-1] x) 2293 // cond: 2294 // result: x 2295 for { 2296 if v.AuxInt != -1 { 2297 break 2298 } 2299 x := v.Args[0] 2300 v.reset(OpCopy) 2301 v.Type = x.Type 2302 v.AddArg(x) 2303 return true 2304 } 2305 // match: (ANDQconst [c] (MOVQconst [d])) 2306 // cond: 2307 // result: (MOVQconst [c&d]) 2308 for { 2309 c := v.AuxInt 2310 v_0 := v.Args[0] 2311 if v_0.Op != OpAMD64MOVQconst { 2312 break 2313 } 2314 d := v_0.AuxInt 2315 v.reset(OpAMD64MOVQconst) 2316 v.AuxInt = c & d 2317 return true 2318 } 2319 return false 2320 } 2321 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 2322 b := v.Block 2323 _ = b 2324 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 2325 // cond: 2326 // result: (BSFQ (ORQconst <t> [1<<8] x)) 2327 for { 2328 v_0 := v.Args[0] 2329 if v_0.Op != OpAMD64ORQconst { 2330 break 2331 } 2332 t := v_0.Type 2333 if v_0.AuxInt != 1<<8 { 2334 break 2335 } 2336 v_0_0 := v_0.Args[0] 2337 if v_0_0.Op != OpAMD64MOVBQZX { 2338 break 2339 } 2340 x := v_0_0.Args[0] 2341 v.reset(OpAMD64BSFQ) 2342 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2343 v0.AuxInt = 1 << 8 2344 v0.AddArg(x) 2345 v.AddArg(v0) 2346 return true 2347 } 2348 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 2349 // cond: 2350 // result: (BSFQ (ORQconst <t> [1<<16] x)) 2351 for { 2352 v_0 := v.Args[0] 2353 if v_0.Op != OpAMD64ORQconst { 2354 break 2355 } 2356 t := v_0.Type 2357 if v_0.AuxInt != 1<<16 { 2358 break 2359 } 2360 v_0_0 := v_0.Args[0] 2361 if v_0_0.Op != OpAMD64MOVWQZX { 2362 break 2363 } 2364 x := v_0_0.Args[0] 2365 v.reset(OpAMD64BSFQ) 2366 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2367 v0.AuxInt = 1 << 16 2368 v0.AddArg(x) 2369 v.AddArg(v0) 2370 return true 2371 } 2372 return false 2373 } 2374 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 2375 // match: (BTQconst [c] x) 2376 // cond: c < 32 2377 // result: (BTLconst [c] x) 2378 for { 2379 c := v.AuxInt 2380 x := v.Args[0] 2381 if !(c < 32) { 2382 break 2383 } 2384 v.reset(OpAMD64BTLconst) 2385 v.AuxInt = c 2386 v.AddArg(x) 2387 return true 2388 } 2389 return false 2390 } 2391 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 2392 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 2393 // cond: c != 0 2394 // result: x 2395 for { 2396 x := v.Args[0] 2397 v_2 := v.Args[2] 2398 if v_2.Op != OpSelect1 { 2399 break 2400 } 2401 v_2_0 := v_2.Args[0] 2402 if v_2_0.Op != OpAMD64BSFQ { 2403 break 2404 } 2405 v_2_0_0 := v_2_0.Args[0] 2406 if v_2_0_0.Op != OpAMD64ORQconst { 2407 break 2408 } 2409 c := v_2_0_0.AuxInt 2410 if !(c != 0) { 2411 break 2412 } 2413 v.reset(OpCopy) 2414 v.Type = x.Type 2415 v.AddArg(x) 2416 return true 2417 } 2418 return false 2419 } 2420 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 2421 b := v.Block 2422 _ = b 2423 // match: (CMPB x (MOVLconst [c])) 2424 // cond: 2425 // result: (CMPBconst x [int64(int8(c))]) 2426 for { 2427 x := v.Args[0] 2428 v_1 := v.Args[1] 2429 if v_1.Op != OpAMD64MOVLconst { 2430 break 2431 } 2432 c := v_1.AuxInt 2433 v.reset(OpAMD64CMPBconst) 2434 v.AuxInt = int64(int8(c)) 2435 v.AddArg(x) 2436 return true 2437 } 2438 // match: (CMPB (MOVLconst [c]) x) 2439 // cond: 2440 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 2441 for { 2442 v_0 := v.Args[0] 2443 if v_0.Op != OpAMD64MOVLconst { 2444 break 2445 } 2446 c := v_0.AuxInt 2447 x := v.Args[1] 2448 v.reset(OpAMD64InvertFlags) 2449 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 2450 v0.AuxInt = int64(int8(c)) 2451 v0.AddArg(x) 2452 v.AddArg(v0) 2453 return true 2454 } 2455 return false 2456 } 2457 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 2458 // match: (CMPBconst (MOVLconst [x]) [y]) 2459 // cond: int8(x)==int8(y) 2460 // result: (FlagEQ) 2461 for { 2462 y := v.AuxInt 2463 v_0 := v.Args[0] 2464 if v_0.Op != OpAMD64MOVLconst { 2465 break 2466 } 2467 x := v_0.AuxInt 2468 if !(int8(x) == int8(y)) { 2469 break 2470 } 2471 v.reset(OpAMD64FlagEQ) 2472 return true 2473 } 2474 // match: (CMPBconst (MOVLconst [x]) [y]) 2475 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 2476 // result: (FlagLT_ULT) 2477 for { 2478 y := v.AuxInt 2479 v_0 := v.Args[0] 2480 if v_0.Op != OpAMD64MOVLconst { 2481 break 2482 } 2483 x := v_0.AuxInt 2484 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 2485 break 2486 } 2487 v.reset(OpAMD64FlagLT_ULT) 2488 return true 2489 } 2490 // match: (CMPBconst (MOVLconst [x]) [y]) 2491 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 2492 // result: (FlagLT_UGT) 2493 for { 2494 y := v.AuxInt 2495 v_0 := v.Args[0] 2496 if v_0.Op != OpAMD64MOVLconst { 2497 break 2498 } 2499 x := v_0.AuxInt 2500 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 2501 break 2502 } 2503 v.reset(OpAMD64FlagLT_UGT) 2504 return true 2505 } 2506 // match: (CMPBconst (MOVLconst [x]) [y]) 2507 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 2508 // result: (FlagGT_ULT) 2509 for { 2510 y := v.AuxInt 2511 v_0 := v.Args[0] 2512 if v_0.Op != OpAMD64MOVLconst { 2513 break 2514 } 2515 x := v_0.AuxInt 2516 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 2517 break 2518 } 2519 v.reset(OpAMD64FlagGT_ULT) 2520 return true 2521 } 2522 // match: (CMPBconst (MOVLconst [x]) [y]) 2523 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 2524 // result: (FlagGT_UGT) 2525 for { 2526 y := v.AuxInt 2527 v_0 := v.Args[0] 2528 if v_0.Op != OpAMD64MOVLconst { 2529 break 2530 } 2531 x := v_0.AuxInt 2532 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 2533 break 2534 } 2535 v.reset(OpAMD64FlagGT_UGT) 2536 return true 2537 } 2538 // match: (CMPBconst (ANDLconst _ [m]) [n]) 2539 // cond: 0 <= int8(m) && int8(m) < int8(n) 2540 // result: (FlagLT_ULT) 2541 for { 2542 n := v.AuxInt 2543 v_0 := v.Args[0] 2544 if v_0.Op != OpAMD64ANDLconst { 2545 break 2546 } 2547 m := v_0.AuxInt 2548 if !(0 <= int8(m) && int8(m) < int8(n)) { 2549 break 2550 } 2551 v.reset(OpAMD64FlagLT_ULT) 2552 return true 2553 } 2554 // match: (CMPBconst (ANDL x y) [0]) 2555 // cond: 2556 // result: (TESTB x y) 2557 for { 2558 if v.AuxInt != 0 { 2559 break 2560 } 2561 v_0 := v.Args[0] 2562 if v_0.Op != OpAMD64ANDL { 2563 break 2564 } 2565 x := v_0.Args[0] 2566 y := v_0.Args[1] 2567 v.reset(OpAMD64TESTB) 2568 v.AddArg(x) 2569 v.AddArg(y) 2570 return true 2571 } 2572 // match: (CMPBconst (ANDLconst [c] x) [0]) 2573 // cond: 2574 // result: (TESTBconst [int64(int8(c))] x) 2575 for { 2576 if v.AuxInt != 0 { 2577 break 2578 } 2579 v_0 := v.Args[0] 2580 if v_0.Op != OpAMD64ANDLconst { 2581 break 2582 } 2583 c := v_0.AuxInt 2584 x := v_0.Args[0] 2585 v.reset(OpAMD64TESTBconst) 2586 v.AuxInt = int64(int8(c)) 2587 v.AddArg(x) 2588 return true 2589 } 2590 // match: (CMPBconst x [0]) 2591 // cond: 2592 // result: (TESTB x x) 2593 for { 2594 if v.AuxInt != 0 { 2595 break 2596 } 2597 x := v.Args[0] 2598 v.reset(OpAMD64TESTB) 2599 v.AddArg(x) 2600 v.AddArg(x) 2601 return true 2602 } 2603 return false 2604 } 2605 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 2606 b := v.Block 2607 _ = b 2608 // match: (CMPL x (MOVLconst [c])) 2609 // cond: 2610 // result: (CMPLconst x [c]) 2611 for { 2612 x := v.Args[0] 2613 v_1 := v.Args[1] 2614 if v_1.Op != OpAMD64MOVLconst { 2615 break 2616 } 2617 c := v_1.AuxInt 2618 v.reset(OpAMD64CMPLconst) 2619 v.AuxInt = c 2620 v.AddArg(x) 2621 return true 2622 } 2623 // match: (CMPL (MOVLconst [c]) x) 2624 // cond: 2625 // result: (InvertFlags (CMPLconst x [c])) 2626 for { 2627 v_0 := v.Args[0] 2628 if v_0.Op != OpAMD64MOVLconst { 2629 break 2630 } 2631 c := v_0.AuxInt 2632 x := v.Args[1] 2633 v.reset(OpAMD64InvertFlags) 2634 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 2635 v0.AuxInt = c 2636 v0.AddArg(x) 2637 v.AddArg(v0) 2638 return true 2639 } 2640 return false 2641 } 2642 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 2643 // match: (CMPLconst (MOVLconst [x]) [y]) 2644 // cond: int32(x)==int32(y) 2645 // result: (FlagEQ) 2646 for { 2647 y := v.AuxInt 2648 v_0 := v.Args[0] 2649 if v_0.Op != OpAMD64MOVLconst { 2650 break 2651 } 2652 x := v_0.AuxInt 2653 if !(int32(x) == int32(y)) { 2654 break 2655 } 2656 v.reset(OpAMD64FlagEQ) 2657 return true 2658 } 2659 // match: (CMPLconst (MOVLconst [x]) [y]) 2660 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 2661 // result: (FlagLT_ULT) 2662 for { 2663 y := v.AuxInt 2664 v_0 := v.Args[0] 2665 if v_0.Op != OpAMD64MOVLconst { 2666 break 2667 } 2668 x := v_0.AuxInt 2669 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 2670 break 2671 } 2672 v.reset(OpAMD64FlagLT_ULT) 2673 return true 2674 } 2675 // match: (CMPLconst (MOVLconst [x]) [y]) 2676 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 2677 // result: (FlagLT_UGT) 2678 for { 2679 y := v.AuxInt 2680 v_0 := v.Args[0] 2681 if v_0.Op != OpAMD64MOVLconst { 2682 break 2683 } 2684 x := v_0.AuxInt 2685 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 2686 break 2687 } 2688 v.reset(OpAMD64FlagLT_UGT) 2689 return true 2690 } 2691 // match: (CMPLconst (MOVLconst [x]) [y]) 2692 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 2693 // result: (FlagGT_ULT) 2694 for { 2695 y := v.AuxInt 2696 v_0 := v.Args[0] 2697 if v_0.Op != OpAMD64MOVLconst { 2698 break 2699 } 2700 x := v_0.AuxInt 2701 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 2702 break 2703 } 2704 v.reset(OpAMD64FlagGT_ULT) 2705 return true 2706 } 2707 // match: (CMPLconst (MOVLconst [x]) [y]) 2708 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 2709 // result: (FlagGT_UGT) 2710 for { 2711 y := v.AuxInt 2712 v_0 := v.Args[0] 2713 if v_0.Op != OpAMD64MOVLconst { 2714 break 2715 } 2716 x := v_0.AuxInt 2717 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 2718 break 2719 } 2720 v.reset(OpAMD64FlagGT_UGT) 2721 return true 2722 } 2723 // match: (CMPLconst (SHRLconst _ [c]) [n]) 2724 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 2725 // result: (FlagLT_ULT) 2726 for { 2727 n := v.AuxInt 2728 v_0 := v.Args[0] 2729 if v_0.Op != OpAMD64SHRLconst { 2730 break 2731 } 2732 c := v_0.AuxInt 2733 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 2734 break 2735 } 2736 v.reset(OpAMD64FlagLT_ULT) 2737 return true 2738 } 2739 // match: (CMPLconst (ANDLconst _ [m]) [n]) 2740 // cond: 0 <= int32(m) && int32(m) < int32(n) 2741 // result: (FlagLT_ULT) 2742 for { 2743 n := v.AuxInt 2744 v_0 := v.Args[0] 2745 if v_0.Op != OpAMD64ANDLconst { 2746 break 2747 } 2748 m := v_0.AuxInt 2749 if !(0 <= int32(m) && int32(m) < int32(n)) { 2750 break 2751 } 2752 v.reset(OpAMD64FlagLT_ULT) 2753 return true 2754 } 2755 // match: (CMPLconst (ANDL x y) [0]) 2756 // cond: 2757 // result: (TESTL x y) 2758 for { 2759 if v.AuxInt != 0 { 2760 break 2761 } 2762 v_0 := v.Args[0] 2763 if v_0.Op != OpAMD64ANDL { 2764 break 2765 } 2766 x := v_0.Args[0] 2767 y := v_0.Args[1] 2768 v.reset(OpAMD64TESTL) 2769 v.AddArg(x) 2770 v.AddArg(y) 2771 return true 2772 } 2773 // match: (CMPLconst (ANDLconst [c] x) [0]) 2774 // cond: 2775 // result: (TESTLconst [c] x) 2776 for { 2777 if v.AuxInt != 0 { 2778 break 2779 } 2780 v_0 := v.Args[0] 2781 if v_0.Op != OpAMD64ANDLconst { 2782 break 2783 } 2784 c := v_0.AuxInt 2785 x := v_0.Args[0] 2786 v.reset(OpAMD64TESTLconst) 2787 v.AuxInt = c 2788 v.AddArg(x) 2789 return true 2790 } 2791 // match: (CMPLconst x [0]) 2792 // cond: 2793 // result: (TESTL x x) 2794 for { 2795 if v.AuxInt != 0 { 2796 break 2797 } 2798 x := v.Args[0] 2799 v.reset(OpAMD64TESTL) 2800 v.AddArg(x) 2801 v.AddArg(x) 2802 return true 2803 } 2804 return false 2805 } 2806 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 2807 b := v.Block 2808 _ = b 2809 // match: (CMPQ x (MOVQconst [c])) 2810 // cond: is32Bit(c) 2811 // result: (CMPQconst x [c]) 2812 for { 2813 x := v.Args[0] 2814 v_1 := v.Args[1] 2815 if v_1.Op != OpAMD64MOVQconst { 2816 break 2817 } 2818 c := v_1.AuxInt 2819 if !(is32Bit(c)) { 2820 break 2821 } 2822 v.reset(OpAMD64CMPQconst) 2823 v.AuxInt = c 2824 v.AddArg(x) 2825 return true 2826 } 2827 // match: (CMPQ (MOVQconst [c]) x) 2828 // cond: is32Bit(c) 2829 // result: (InvertFlags (CMPQconst x [c])) 2830 for { 2831 v_0 := v.Args[0] 2832 if v_0.Op != OpAMD64MOVQconst { 2833 break 2834 } 2835 c := v_0.AuxInt 2836 x := v.Args[1] 2837 if !(is32Bit(c)) { 2838 break 2839 } 2840 v.reset(OpAMD64InvertFlags) 2841 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 2842 v0.AuxInt = c 2843 v0.AddArg(x) 2844 v.AddArg(v0) 2845 return true 2846 } 2847 return false 2848 } 2849 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 2850 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 2851 // cond: 2852 // result: (FlagLT_ULT) 2853 for { 2854 if v.AuxInt != 32 { 2855 break 2856 } 2857 v_0 := v.Args[0] 2858 if v_0.Op != OpAMD64NEGQ { 2859 break 2860 } 2861 v_0_0 := v_0.Args[0] 2862 if v_0_0.Op != OpAMD64ADDQconst { 2863 break 2864 } 2865 if v_0_0.AuxInt != -16 { 2866 break 2867 } 2868 v_0_0_0 := v_0_0.Args[0] 2869 if v_0_0_0.Op != OpAMD64ANDQconst { 2870 break 2871 } 2872 if v_0_0_0.AuxInt != 15 { 2873 break 2874 } 2875 v.reset(OpAMD64FlagLT_ULT) 2876 return true 2877 } 2878 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 2879 // cond: 2880 // result: (FlagLT_ULT) 2881 for { 2882 if v.AuxInt != 32 { 2883 break 2884 } 2885 v_0 := v.Args[0] 2886 if v_0.Op != OpAMD64NEGQ { 2887 break 2888 } 2889 v_0_0 := v_0.Args[0] 2890 if v_0_0.Op != OpAMD64ADDQconst { 2891 break 2892 } 2893 if v_0_0.AuxInt != -8 { 2894 break 2895 } 2896 v_0_0_0 := v_0_0.Args[0] 2897 if v_0_0_0.Op != OpAMD64ANDQconst { 2898 break 2899 } 2900 if v_0_0_0.AuxInt != 7 { 2901 break 2902 } 2903 v.reset(OpAMD64FlagLT_ULT) 2904 return true 2905 } 2906 // match: (CMPQconst (MOVQconst [x]) [y]) 2907 // cond: x==y 2908 // result: (FlagEQ) 2909 for { 2910 y := v.AuxInt 2911 v_0 := v.Args[0] 2912 if v_0.Op != OpAMD64MOVQconst { 2913 break 2914 } 2915 x := v_0.AuxInt 2916 if !(x == y) { 2917 break 2918 } 2919 v.reset(OpAMD64FlagEQ) 2920 return true 2921 } 2922 // match: (CMPQconst (MOVQconst [x]) [y]) 2923 // cond: x<y && uint64(x)<uint64(y) 2924 // result: (FlagLT_ULT) 2925 for { 2926 y := v.AuxInt 2927 v_0 := v.Args[0] 2928 if v_0.Op != OpAMD64MOVQconst { 2929 break 2930 } 2931 x := v_0.AuxInt 2932 if !(x < y && uint64(x) < uint64(y)) { 2933 break 2934 } 2935 v.reset(OpAMD64FlagLT_ULT) 2936 return true 2937 } 2938 // match: (CMPQconst (MOVQconst [x]) [y]) 2939 // cond: x<y && uint64(x)>uint64(y) 2940 // result: (FlagLT_UGT) 2941 for { 2942 y := v.AuxInt 2943 v_0 := v.Args[0] 2944 if v_0.Op != OpAMD64MOVQconst { 2945 break 2946 } 2947 x := v_0.AuxInt 2948 if !(x < y && uint64(x) > uint64(y)) { 2949 break 2950 } 2951 v.reset(OpAMD64FlagLT_UGT) 2952 return true 2953 } 2954 // match: (CMPQconst (MOVQconst [x]) [y]) 2955 // cond: x>y && uint64(x)<uint64(y) 2956 // result: (FlagGT_ULT) 2957 for { 2958 y := v.AuxInt 2959 v_0 := v.Args[0] 2960 if v_0.Op != OpAMD64MOVQconst { 2961 break 2962 } 2963 x := v_0.AuxInt 2964 if !(x > y && uint64(x) < uint64(y)) { 2965 break 2966 } 2967 v.reset(OpAMD64FlagGT_ULT) 2968 return true 2969 } 2970 // match: (CMPQconst (MOVQconst [x]) [y]) 2971 // cond: x>y && uint64(x)>uint64(y) 2972 // result: (FlagGT_UGT) 2973 for { 2974 y := v.AuxInt 2975 v_0 := v.Args[0] 2976 if v_0.Op != OpAMD64MOVQconst { 2977 break 2978 } 2979 x := v_0.AuxInt 2980 if !(x > y && uint64(x) > uint64(y)) { 2981 break 2982 } 2983 v.reset(OpAMD64FlagGT_UGT) 2984 return true 2985 } 2986 // match: (CMPQconst (MOVBQZX _) [c]) 2987 // cond: 0xFF < c 2988 // result: (FlagLT_ULT) 2989 for { 2990 c := v.AuxInt 2991 v_0 := v.Args[0] 2992 if v_0.Op != OpAMD64MOVBQZX { 2993 break 2994 } 2995 if !(0xFF < c) { 2996 break 2997 } 2998 v.reset(OpAMD64FlagLT_ULT) 2999 return true 3000 } 3001 // match: (CMPQconst (MOVWQZX _) [c]) 3002 // cond: 0xFFFF < c 3003 // result: (FlagLT_ULT) 3004 for { 3005 c := v.AuxInt 3006 v_0 := v.Args[0] 3007 if v_0.Op != OpAMD64MOVWQZX { 3008 break 3009 } 3010 if !(0xFFFF < c) { 3011 break 3012 } 3013 v.reset(OpAMD64FlagLT_ULT) 3014 return true 3015 } 3016 // match: (CMPQconst (MOVLQZX _) [c]) 3017 // cond: 0xFFFFFFFF < c 3018 // result: (FlagLT_ULT) 3019 for { 3020 c := v.AuxInt 3021 v_0 := v.Args[0] 3022 if v_0.Op != OpAMD64MOVLQZX { 3023 break 3024 } 3025 if !(0xFFFFFFFF < c) { 3026 break 3027 } 3028 v.reset(OpAMD64FlagLT_ULT) 3029 return true 3030 } 3031 return false 3032 } 3033 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 3034 // match: (CMPQconst (SHRQconst _ [c]) [n]) 3035 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 3036 // result: (FlagLT_ULT) 3037 for { 3038 n := v.AuxInt 3039 v_0 := v.Args[0] 3040 if v_0.Op != OpAMD64SHRQconst { 3041 break 3042 } 3043 c := v_0.AuxInt 3044 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 3045 break 3046 } 3047 v.reset(OpAMD64FlagLT_ULT) 3048 return true 3049 } 3050 // match: (CMPQconst (ANDQconst _ [m]) [n]) 3051 // cond: 0 <= m && m < n 3052 // result: (FlagLT_ULT) 3053 for { 3054 n := v.AuxInt 3055 v_0 := v.Args[0] 3056 if v_0.Op != OpAMD64ANDQconst { 3057 break 3058 } 3059 m := v_0.AuxInt 3060 if !(0 <= m && m < n) { 3061 break 3062 } 3063 v.reset(OpAMD64FlagLT_ULT) 3064 return true 3065 } 3066 // match: (CMPQconst (ANDLconst _ [m]) [n]) 3067 // cond: 0 <= m && m < n 3068 // result: (FlagLT_ULT) 3069 for { 3070 n := v.AuxInt 3071 v_0 := v.Args[0] 3072 if v_0.Op != OpAMD64ANDLconst { 3073 break 3074 } 3075 m := v_0.AuxInt 3076 if !(0 <= m && m < n) { 3077 break 3078 } 3079 v.reset(OpAMD64FlagLT_ULT) 3080 return true 3081 } 3082 // match: (CMPQconst (ANDQ x y) [0]) 3083 // cond: 3084 // result: (TESTQ x y) 3085 for { 3086 if v.AuxInt != 0 { 3087 break 3088 } 3089 v_0 := v.Args[0] 3090 if v_0.Op != OpAMD64ANDQ { 3091 break 3092 } 3093 x := v_0.Args[0] 3094 y := v_0.Args[1] 3095 v.reset(OpAMD64TESTQ) 3096 v.AddArg(x) 3097 v.AddArg(y) 3098 return true 3099 } 3100 // match: (CMPQconst (ANDQconst [c] x) [0]) 3101 // cond: 3102 // result: (TESTQconst [c] x) 3103 for { 3104 if v.AuxInt != 0 { 3105 break 3106 } 3107 v_0 := v.Args[0] 3108 if v_0.Op != OpAMD64ANDQconst { 3109 break 3110 } 3111 c := v_0.AuxInt 3112 x := v_0.Args[0] 3113 v.reset(OpAMD64TESTQconst) 3114 v.AuxInt = c 3115 v.AddArg(x) 3116 return true 3117 } 3118 // match: (CMPQconst x [0]) 3119 // cond: 3120 // result: (TESTQ x x) 3121 for { 3122 if v.AuxInt != 0 { 3123 break 3124 } 3125 x := v.Args[0] 3126 v.reset(OpAMD64TESTQ) 3127 v.AddArg(x) 3128 v.AddArg(x) 3129 return true 3130 } 3131 return false 3132 } 3133 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 3134 b := v.Block 3135 _ = b 3136 // match: (CMPW x (MOVLconst [c])) 3137 // cond: 3138 // result: (CMPWconst x [int64(int16(c))]) 3139 for { 3140 x := v.Args[0] 3141 v_1 := v.Args[1] 3142 if v_1.Op != OpAMD64MOVLconst { 3143 break 3144 } 3145 c := v_1.AuxInt 3146 v.reset(OpAMD64CMPWconst) 3147 v.AuxInt = int64(int16(c)) 3148 v.AddArg(x) 3149 return true 3150 } 3151 // match: (CMPW (MOVLconst [c]) x) 3152 // cond: 3153 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 3154 for { 3155 v_0 := v.Args[0] 3156 if v_0.Op != OpAMD64MOVLconst { 3157 break 3158 } 3159 c := v_0.AuxInt 3160 x := v.Args[1] 3161 v.reset(OpAMD64InvertFlags) 3162 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 3163 v0.AuxInt = int64(int16(c)) 3164 v0.AddArg(x) 3165 v.AddArg(v0) 3166 return true 3167 } 3168 return false 3169 } 3170 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 3171 // match: (CMPWconst (MOVLconst [x]) [y]) 3172 // cond: int16(x)==int16(y) 3173 // result: (FlagEQ) 3174 for { 3175 y := v.AuxInt 3176 v_0 := v.Args[0] 3177 if v_0.Op != OpAMD64MOVLconst { 3178 break 3179 } 3180 x := v_0.AuxInt 3181 if !(int16(x) == int16(y)) { 3182 break 3183 } 3184 v.reset(OpAMD64FlagEQ) 3185 return true 3186 } 3187 // match: (CMPWconst (MOVLconst [x]) [y]) 3188 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 3189 // result: (FlagLT_ULT) 3190 for { 3191 y := v.AuxInt 3192 v_0 := v.Args[0] 3193 if v_0.Op != OpAMD64MOVLconst { 3194 break 3195 } 3196 x := v_0.AuxInt 3197 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 3198 break 3199 } 3200 v.reset(OpAMD64FlagLT_ULT) 3201 return true 3202 } 3203 // match: (CMPWconst (MOVLconst [x]) [y]) 3204 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 3205 // result: (FlagLT_UGT) 3206 for { 3207 y := v.AuxInt 3208 v_0 := v.Args[0] 3209 if v_0.Op != OpAMD64MOVLconst { 3210 break 3211 } 3212 x := v_0.AuxInt 3213 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 3214 break 3215 } 3216 v.reset(OpAMD64FlagLT_UGT) 3217 return true 3218 } 3219 // match: (CMPWconst (MOVLconst [x]) [y]) 3220 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 3221 // result: (FlagGT_ULT) 3222 for { 3223 y := v.AuxInt 3224 v_0 := v.Args[0] 3225 if v_0.Op != OpAMD64MOVLconst { 3226 break 3227 } 3228 x := v_0.AuxInt 3229 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 3230 break 3231 } 3232 v.reset(OpAMD64FlagGT_ULT) 3233 return true 3234 } 3235 // match: (CMPWconst (MOVLconst [x]) [y]) 3236 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 3237 // result: (FlagGT_UGT) 3238 for { 3239 y := v.AuxInt 3240 v_0 := v.Args[0] 3241 if v_0.Op != OpAMD64MOVLconst { 3242 break 3243 } 3244 x := v_0.AuxInt 3245 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 3246 break 3247 } 3248 v.reset(OpAMD64FlagGT_UGT) 3249 return true 3250 } 3251 // match: (CMPWconst (ANDLconst _ [m]) [n]) 3252 // cond: 0 <= int16(m) && int16(m) < int16(n) 3253 // result: (FlagLT_ULT) 3254 for { 3255 n := v.AuxInt 3256 v_0 := v.Args[0] 3257 if v_0.Op != OpAMD64ANDLconst { 3258 break 3259 } 3260 m := v_0.AuxInt 3261 if !(0 <= int16(m) && int16(m) < int16(n)) { 3262 break 3263 } 3264 v.reset(OpAMD64FlagLT_ULT) 3265 return true 3266 } 3267 // match: (CMPWconst (ANDL x y) [0]) 3268 // cond: 3269 // result: (TESTW x y) 3270 for { 3271 if v.AuxInt != 0 { 3272 break 3273 } 3274 v_0 := v.Args[0] 3275 if v_0.Op != OpAMD64ANDL { 3276 break 3277 } 3278 x := v_0.Args[0] 3279 y := v_0.Args[1] 3280 v.reset(OpAMD64TESTW) 3281 v.AddArg(x) 3282 v.AddArg(y) 3283 return true 3284 } 3285 // match: (CMPWconst (ANDLconst [c] x) [0]) 3286 // cond: 3287 // result: (TESTWconst [int64(int16(c))] x) 3288 for { 3289 if v.AuxInt != 0 { 3290 break 3291 } 3292 v_0 := v.Args[0] 3293 if v_0.Op != OpAMD64ANDLconst { 3294 break 3295 } 3296 c := v_0.AuxInt 3297 x := v_0.Args[0] 3298 v.reset(OpAMD64TESTWconst) 3299 v.AuxInt = int64(int16(c)) 3300 v.AddArg(x) 3301 return true 3302 } 3303 // match: (CMPWconst x [0]) 3304 // cond: 3305 // result: (TESTW x x) 3306 for { 3307 if v.AuxInt != 0 { 3308 break 3309 } 3310 x := v.Args[0] 3311 v.reset(OpAMD64TESTW) 3312 v.AddArg(x) 3313 v.AddArg(x) 3314 return true 3315 } 3316 return false 3317 } 3318 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 3319 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3320 // cond: is32Bit(off1+off2) 3321 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 3322 for { 3323 off1 := v.AuxInt 3324 sym := v.Aux 3325 v_0 := v.Args[0] 3326 if v_0.Op != OpAMD64ADDQconst { 3327 break 3328 } 3329 off2 := v_0.AuxInt 3330 ptr := v_0.Args[0] 3331 old := v.Args[1] 3332 new_ := v.Args[2] 3333 mem := v.Args[3] 3334 if !(is32Bit(off1 + off2)) { 3335 break 3336 } 3337 v.reset(OpAMD64CMPXCHGLlock) 3338 v.AuxInt = off1 + off2 3339 v.Aux = sym 3340 v.AddArg(ptr) 3341 v.AddArg(old) 3342 v.AddArg(new_) 3343 v.AddArg(mem) 3344 return true 3345 } 3346 return false 3347 } 3348 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 3349 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3350 // cond: is32Bit(off1+off2) 3351 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 3352 for { 3353 off1 := v.AuxInt 3354 sym := v.Aux 3355 v_0 := v.Args[0] 3356 if v_0.Op != OpAMD64ADDQconst { 3357 break 3358 } 3359 off2 := v_0.AuxInt 3360 ptr := v_0.Args[0] 3361 old := v.Args[1] 3362 new_ := v.Args[2] 3363 mem := v.Args[3] 3364 if !(is32Bit(off1 + off2)) { 3365 break 3366 } 3367 v.reset(OpAMD64CMPXCHGQlock) 3368 v.AuxInt = off1 + off2 3369 v.Aux = sym 3370 v.AddArg(ptr) 3371 v.AddArg(old) 3372 v.AddArg(new_) 3373 v.AddArg(mem) 3374 return true 3375 } 3376 return false 3377 } 3378 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 3379 // match: (LEAL [c] {s} (ADDLconst [d] x)) 3380 // cond: is32Bit(c+d) 3381 // result: (LEAL [c+d] {s} x) 3382 for { 3383 c := v.AuxInt 3384 s := v.Aux 3385 v_0 := v.Args[0] 3386 if v_0.Op != OpAMD64ADDLconst { 3387 break 3388 } 3389 d := v_0.AuxInt 3390 x := v_0.Args[0] 3391 if !(is32Bit(c + d)) { 3392 break 3393 } 3394 v.reset(OpAMD64LEAL) 3395 v.AuxInt = c + d 3396 v.Aux = s 3397 v.AddArg(x) 3398 return true 3399 } 3400 return false 3401 } 3402 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 3403 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 3404 // cond: is32Bit(c+d) 3405 // result: (LEAQ [c+d] {s} x) 3406 for { 3407 c := v.AuxInt 3408 s := v.Aux 3409 v_0 := v.Args[0] 3410 if v_0.Op != OpAMD64ADDQconst { 3411 break 3412 } 3413 d := v_0.AuxInt 3414 x := v_0.Args[0] 3415 if !(is32Bit(c + d)) { 3416 break 3417 } 3418 v.reset(OpAMD64LEAQ) 3419 v.AuxInt = c + d 3420 v.Aux = s 3421 v.AddArg(x) 3422 return true 3423 } 3424 // match: (LEAQ [c] {s} (ADDQ x y)) 3425 // cond: x.Op != OpSB && y.Op != OpSB 3426 // result: (LEAQ1 [c] {s} x y) 3427 for { 3428 c := v.AuxInt 3429 s := v.Aux 3430 v_0 := v.Args[0] 3431 if v_0.Op != OpAMD64ADDQ { 3432 break 3433 } 3434 x := v_0.Args[0] 3435 y := v_0.Args[1] 3436 if !(x.Op != OpSB && y.Op != OpSB) { 3437 break 3438 } 3439 v.reset(OpAMD64LEAQ1) 3440 v.AuxInt = c 3441 v.Aux = s 3442 v.AddArg(x) 3443 v.AddArg(y) 3444 return true 3445 } 3446 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 3447 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3448 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 3449 for { 3450 off1 := v.AuxInt 3451 sym1 := v.Aux 3452 v_0 := v.Args[0] 3453 if v_0.Op != OpAMD64LEAQ { 3454 break 3455 } 3456 off2 := v_0.AuxInt 3457 sym2 := v_0.Aux 3458 x := v_0.Args[0] 3459 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3460 break 3461 } 3462 v.reset(OpAMD64LEAQ) 3463 v.AuxInt = off1 + off2 3464 v.Aux = mergeSym(sym1, sym2) 3465 v.AddArg(x) 3466 return true 3467 } 3468 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 3469 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3470 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3471 for { 3472 off1 := v.AuxInt 3473 sym1 := v.Aux 3474 v_0 := v.Args[0] 3475 if v_0.Op != OpAMD64LEAQ1 { 3476 break 3477 } 3478 off2 := v_0.AuxInt 3479 sym2 := v_0.Aux 3480 x := v_0.Args[0] 3481 y := v_0.Args[1] 3482 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3483 break 3484 } 3485 v.reset(OpAMD64LEAQ1) 3486 v.AuxInt = off1 + off2 3487 v.Aux = mergeSym(sym1, sym2) 3488 v.AddArg(x) 3489 v.AddArg(y) 3490 return true 3491 } 3492 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 3493 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3494 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3495 for { 3496 off1 := v.AuxInt 3497 sym1 := v.Aux 3498 v_0 := v.Args[0] 3499 if v_0.Op != OpAMD64LEAQ2 { 3500 break 3501 } 3502 off2 := v_0.AuxInt 3503 sym2 := v_0.Aux 3504 x := v_0.Args[0] 3505 y := v_0.Args[1] 3506 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3507 break 3508 } 3509 v.reset(OpAMD64LEAQ2) 3510 v.AuxInt = off1 + off2 3511 v.Aux = mergeSym(sym1, sym2) 3512 v.AddArg(x) 3513 v.AddArg(y) 3514 return true 3515 } 3516 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 3517 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3518 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3519 for { 3520 off1 := v.AuxInt 3521 sym1 := v.Aux 3522 v_0 := v.Args[0] 3523 if v_0.Op != OpAMD64LEAQ4 { 3524 break 3525 } 3526 off2 := v_0.AuxInt 3527 sym2 := v_0.Aux 3528 x := v_0.Args[0] 3529 y := v_0.Args[1] 3530 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3531 break 3532 } 3533 v.reset(OpAMD64LEAQ4) 3534 v.AuxInt = off1 + off2 3535 v.Aux = mergeSym(sym1, sym2) 3536 v.AddArg(x) 3537 v.AddArg(y) 3538 return true 3539 } 3540 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 3541 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3542 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3543 for { 3544 off1 := v.AuxInt 3545 sym1 := v.Aux 3546 v_0 := v.Args[0] 3547 if v_0.Op != OpAMD64LEAQ8 { 3548 break 3549 } 3550 off2 := v_0.AuxInt 3551 sym2 := v_0.Aux 3552 x := v_0.Args[0] 3553 y := v_0.Args[1] 3554 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3555 break 3556 } 3557 v.reset(OpAMD64LEAQ8) 3558 v.AuxInt = off1 + off2 3559 v.Aux = mergeSym(sym1, sym2) 3560 v.AddArg(x) 3561 v.AddArg(y) 3562 return true 3563 } 3564 return false 3565 } 3566 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 3567 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 3568 // cond: is32Bit(c+d) && x.Op != OpSB 3569 // result: (LEAQ1 [c+d] {s} x y) 3570 for { 3571 c := v.AuxInt 3572 s := v.Aux 3573 v_0 := v.Args[0] 3574 if v_0.Op != OpAMD64ADDQconst { 3575 break 3576 } 3577 d := v_0.AuxInt 3578 x := v_0.Args[0] 3579 y := v.Args[1] 3580 if !(is32Bit(c+d) && x.Op != OpSB) { 3581 break 3582 } 3583 v.reset(OpAMD64LEAQ1) 3584 v.AuxInt = c + d 3585 v.Aux = s 3586 v.AddArg(x) 3587 v.AddArg(y) 3588 return true 3589 } 3590 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 3591 // cond: is32Bit(c+d) && x.Op != OpSB 3592 // result: (LEAQ1 [c+d] {s} x y) 3593 for { 3594 c := v.AuxInt 3595 s := v.Aux 3596 y := v.Args[0] 3597 v_1 := v.Args[1] 3598 if v_1.Op != OpAMD64ADDQconst { 3599 break 3600 } 3601 d := v_1.AuxInt 3602 x := v_1.Args[0] 3603 if !(is32Bit(c+d) && x.Op != OpSB) { 3604 break 3605 } 3606 v.reset(OpAMD64LEAQ1) 3607 v.AuxInt = c + d 3608 v.Aux = s 3609 v.AddArg(x) 3610 v.AddArg(y) 3611 return true 3612 } 3613 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 3614 // cond: 3615 // result: (LEAQ2 [c] {s} x y) 3616 for { 3617 c := v.AuxInt 3618 s := v.Aux 3619 x := v.Args[0] 3620 v_1 := v.Args[1] 3621 if v_1.Op != OpAMD64SHLQconst { 3622 break 3623 } 3624 if v_1.AuxInt != 1 { 3625 break 3626 } 3627 y := v_1.Args[0] 3628 v.reset(OpAMD64LEAQ2) 3629 v.AuxInt = c 3630 v.Aux = s 3631 v.AddArg(x) 3632 v.AddArg(y) 3633 return true 3634 } 3635 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 3636 // cond: 3637 // result: (LEAQ2 [c] {s} x y) 3638 for { 3639 c := v.AuxInt 3640 s := v.Aux 3641 v_0 := v.Args[0] 3642 if v_0.Op != OpAMD64SHLQconst { 3643 break 3644 } 3645 if v_0.AuxInt != 1 { 3646 break 3647 } 3648 y := v_0.Args[0] 3649 x := v.Args[1] 3650 v.reset(OpAMD64LEAQ2) 3651 v.AuxInt = c 3652 v.Aux = s 3653 v.AddArg(x) 3654 v.AddArg(y) 3655 return true 3656 } 3657 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 3658 // cond: 3659 // result: (LEAQ4 [c] {s} x y) 3660 for { 3661 c := v.AuxInt 3662 s := v.Aux 3663 x := v.Args[0] 3664 v_1 := v.Args[1] 3665 if v_1.Op != OpAMD64SHLQconst { 3666 break 3667 } 3668 if v_1.AuxInt != 2 { 3669 break 3670 } 3671 y := v_1.Args[0] 3672 v.reset(OpAMD64LEAQ4) 3673 v.AuxInt = c 3674 v.Aux = s 3675 v.AddArg(x) 3676 v.AddArg(y) 3677 return true 3678 } 3679 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 3680 // cond: 3681 // result: (LEAQ4 [c] {s} x y) 3682 for { 3683 c := v.AuxInt 3684 s := v.Aux 3685 v_0 := v.Args[0] 3686 if v_0.Op != OpAMD64SHLQconst { 3687 break 3688 } 3689 if v_0.AuxInt != 2 { 3690 break 3691 } 3692 y := v_0.Args[0] 3693 x := v.Args[1] 3694 v.reset(OpAMD64LEAQ4) 3695 v.AuxInt = c 3696 v.Aux = s 3697 v.AddArg(x) 3698 v.AddArg(y) 3699 return true 3700 } 3701 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 3702 // cond: 3703 // result: (LEAQ8 [c] {s} x y) 3704 for { 3705 c := v.AuxInt 3706 s := v.Aux 3707 x := v.Args[0] 3708 v_1 := v.Args[1] 3709 if v_1.Op != OpAMD64SHLQconst { 3710 break 3711 } 3712 if v_1.AuxInt != 3 { 3713 break 3714 } 3715 y := v_1.Args[0] 3716 v.reset(OpAMD64LEAQ8) 3717 v.AuxInt = c 3718 v.Aux = s 3719 v.AddArg(x) 3720 v.AddArg(y) 3721 return true 3722 } 3723 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 3724 // cond: 3725 // result: (LEAQ8 [c] {s} x y) 3726 for { 3727 c := v.AuxInt 3728 s := v.Aux 3729 v_0 := v.Args[0] 3730 if v_0.Op != OpAMD64SHLQconst { 3731 break 3732 } 3733 if v_0.AuxInt != 3 { 3734 break 3735 } 3736 y := v_0.Args[0] 3737 x := v.Args[1] 3738 v.reset(OpAMD64LEAQ8) 3739 v.AuxInt = c 3740 v.Aux = s 3741 v.AddArg(x) 3742 v.AddArg(y) 3743 return true 3744 } 3745 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3746 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3747 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3748 for { 3749 off1 := v.AuxInt 3750 sym1 := v.Aux 3751 v_0 := v.Args[0] 3752 if v_0.Op != OpAMD64LEAQ { 3753 break 3754 } 3755 off2 := v_0.AuxInt 3756 sym2 := v_0.Aux 3757 x := v_0.Args[0] 3758 y := v.Args[1] 3759 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3760 break 3761 } 3762 v.reset(OpAMD64LEAQ1) 3763 v.AuxInt = off1 + off2 3764 v.Aux = mergeSym(sym1, sym2) 3765 v.AddArg(x) 3766 v.AddArg(y) 3767 return true 3768 } 3769 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 3770 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3771 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3772 for { 3773 off1 := v.AuxInt 3774 sym1 := v.Aux 3775 y := v.Args[0] 3776 v_1 := v.Args[1] 3777 if v_1.Op != OpAMD64LEAQ { 3778 break 3779 } 3780 off2 := v_1.AuxInt 3781 sym2 := v_1.Aux 3782 x := v_1.Args[0] 3783 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3784 break 3785 } 3786 v.reset(OpAMD64LEAQ1) 3787 v.AuxInt = off1 + off2 3788 v.Aux = mergeSym(sym1, sym2) 3789 v.AddArg(x) 3790 v.AddArg(y) 3791 return true 3792 } 3793 return false 3794 } 3795 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 3796 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 3797 // cond: is32Bit(c+d) && x.Op != OpSB 3798 // result: (LEAQ2 [c+d] {s} x y) 3799 for { 3800 c := v.AuxInt 3801 s := v.Aux 3802 v_0 := v.Args[0] 3803 if v_0.Op != OpAMD64ADDQconst { 3804 break 3805 } 3806 d := v_0.AuxInt 3807 x := v_0.Args[0] 3808 y := v.Args[1] 3809 if !(is32Bit(c+d) && x.Op != OpSB) { 3810 break 3811 } 3812 v.reset(OpAMD64LEAQ2) 3813 v.AuxInt = c + d 3814 v.Aux = s 3815 v.AddArg(x) 3816 v.AddArg(y) 3817 return true 3818 } 3819 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 3820 // cond: is32Bit(c+2*d) && y.Op != OpSB 3821 // result: (LEAQ2 [c+2*d] {s} x y) 3822 for { 3823 c := v.AuxInt 3824 s := v.Aux 3825 x := v.Args[0] 3826 v_1 := v.Args[1] 3827 if v_1.Op != OpAMD64ADDQconst { 3828 break 3829 } 3830 d := v_1.AuxInt 3831 y := v_1.Args[0] 3832 if !(is32Bit(c+2*d) && y.Op != OpSB) { 3833 break 3834 } 3835 v.reset(OpAMD64LEAQ2) 3836 v.AuxInt = c + 2*d 3837 v.Aux = s 3838 v.AddArg(x) 3839 v.AddArg(y) 3840 return true 3841 } 3842 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 3843 // cond: 3844 // result: (LEAQ4 [c] {s} x y) 3845 for { 3846 c := v.AuxInt 3847 s := v.Aux 3848 x := v.Args[0] 3849 v_1 := v.Args[1] 3850 if v_1.Op != OpAMD64SHLQconst { 3851 break 3852 } 3853 if v_1.AuxInt != 1 { 3854 break 3855 } 3856 y := v_1.Args[0] 3857 v.reset(OpAMD64LEAQ4) 3858 v.AuxInt = c 3859 v.Aux = s 3860 v.AddArg(x) 3861 v.AddArg(y) 3862 return true 3863 } 3864 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 3865 // cond: 3866 // result: (LEAQ8 [c] {s} x y) 3867 for { 3868 c := v.AuxInt 3869 s := v.Aux 3870 x := v.Args[0] 3871 v_1 := v.Args[1] 3872 if v_1.Op != OpAMD64SHLQconst { 3873 break 3874 } 3875 if v_1.AuxInt != 2 { 3876 break 3877 } 3878 y := v_1.Args[0] 3879 v.reset(OpAMD64LEAQ8) 3880 v.AuxInt = c 3881 v.Aux = s 3882 v.AddArg(x) 3883 v.AddArg(y) 3884 return true 3885 } 3886 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3887 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3888 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3889 for { 3890 off1 := v.AuxInt 3891 sym1 := v.Aux 3892 v_0 := v.Args[0] 3893 if v_0.Op != OpAMD64LEAQ { 3894 break 3895 } 3896 off2 := v_0.AuxInt 3897 sym2 := v_0.Aux 3898 x := v_0.Args[0] 3899 y := v.Args[1] 3900 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3901 break 3902 } 3903 v.reset(OpAMD64LEAQ2) 3904 v.AuxInt = off1 + off2 3905 v.Aux = mergeSym(sym1, sym2) 3906 v.AddArg(x) 3907 v.AddArg(y) 3908 return true 3909 } 3910 return false 3911 } 3912 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 3913 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 3914 // cond: is32Bit(c+d) && x.Op != OpSB 3915 // result: (LEAQ4 [c+d] {s} x y) 3916 for { 3917 c := v.AuxInt 3918 s := v.Aux 3919 v_0 := v.Args[0] 3920 if v_0.Op != OpAMD64ADDQconst { 3921 break 3922 } 3923 d := v_0.AuxInt 3924 x := v_0.Args[0] 3925 y := v.Args[1] 3926 if !(is32Bit(c+d) && x.Op != OpSB) { 3927 break 3928 } 3929 v.reset(OpAMD64LEAQ4) 3930 v.AuxInt = c + d 3931 v.Aux = s 3932 v.AddArg(x) 3933 v.AddArg(y) 3934 return true 3935 } 3936 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 3937 // cond: is32Bit(c+4*d) && y.Op != OpSB 3938 // result: (LEAQ4 [c+4*d] {s} x y) 3939 for { 3940 c := v.AuxInt 3941 s := v.Aux 3942 x := v.Args[0] 3943 v_1 := v.Args[1] 3944 if v_1.Op != OpAMD64ADDQconst { 3945 break 3946 } 3947 d := v_1.AuxInt 3948 y := v_1.Args[0] 3949 if !(is32Bit(c+4*d) && y.Op != OpSB) { 3950 break 3951 } 3952 v.reset(OpAMD64LEAQ4) 3953 v.AuxInt = c + 4*d 3954 v.Aux = s 3955 v.AddArg(x) 3956 v.AddArg(y) 3957 return true 3958 } 3959 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 3960 // cond: 3961 // result: (LEAQ8 [c] {s} x y) 3962 for { 3963 c := v.AuxInt 3964 s := v.Aux 3965 x := v.Args[0] 3966 v_1 := v.Args[1] 3967 if v_1.Op != OpAMD64SHLQconst { 3968 break 3969 } 3970 if v_1.AuxInt != 1 { 3971 break 3972 } 3973 y := v_1.Args[0] 3974 v.reset(OpAMD64LEAQ8) 3975 v.AuxInt = c 3976 v.Aux = s 3977 v.AddArg(x) 3978 v.AddArg(y) 3979 return true 3980 } 3981 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3982 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3983 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3984 for { 3985 off1 := v.AuxInt 3986 sym1 := v.Aux 3987 v_0 := v.Args[0] 3988 if v_0.Op != OpAMD64LEAQ { 3989 break 3990 } 3991 off2 := v_0.AuxInt 3992 sym2 := v_0.Aux 3993 x := v_0.Args[0] 3994 y := v.Args[1] 3995 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3996 break 3997 } 3998 v.reset(OpAMD64LEAQ4) 3999 v.AuxInt = off1 + off2 4000 v.Aux = mergeSym(sym1, sym2) 4001 v.AddArg(x) 4002 v.AddArg(y) 4003 return true 4004 } 4005 return false 4006 } 4007 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 4008 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 4009 // cond: is32Bit(c+d) && x.Op != OpSB 4010 // result: (LEAQ8 [c+d] {s} x y) 4011 for { 4012 c := v.AuxInt 4013 s := v.Aux 4014 v_0 := v.Args[0] 4015 if v_0.Op != OpAMD64ADDQconst { 4016 break 4017 } 4018 d := v_0.AuxInt 4019 x := v_0.Args[0] 4020 y := v.Args[1] 4021 if !(is32Bit(c+d) && x.Op != OpSB) { 4022 break 4023 } 4024 v.reset(OpAMD64LEAQ8) 4025 v.AuxInt = c + d 4026 v.Aux = s 4027 v.AddArg(x) 4028 v.AddArg(y) 4029 return true 4030 } 4031 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 4032 // cond: is32Bit(c+8*d) && y.Op != OpSB 4033 // result: (LEAQ8 [c+8*d] {s} x y) 4034 for { 4035 c := v.AuxInt 4036 s := v.Aux 4037 x := v.Args[0] 4038 v_1 := v.Args[1] 4039 if v_1.Op != OpAMD64ADDQconst { 4040 break 4041 } 4042 d := v_1.AuxInt 4043 y := v_1.Args[0] 4044 if !(is32Bit(c+8*d) && y.Op != OpSB) { 4045 break 4046 } 4047 v.reset(OpAMD64LEAQ8) 4048 v.AuxInt = c + 8*d 4049 v.Aux = s 4050 v.AddArg(x) 4051 v.AddArg(y) 4052 return true 4053 } 4054 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4055 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4056 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4057 for { 4058 off1 := v.AuxInt 4059 sym1 := v.Aux 4060 v_0 := v.Args[0] 4061 if v_0.Op != OpAMD64LEAQ { 4062 break 4063 } 4064 off2 := v_0.AuxInt 4065 sym2 := v_0.Aux 4066 x := v_0.Args[0] 4067 y := v.Args[1] 4068 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4069 break 4070 } 4071 v.reset(OpAMD64LEAQ8) 4072 v.AuxInt = off1 + off2 4073 v.Aux = mergeSym(sym1, sym2) 4074 v.AddArg(x) 4075 v.AddArg(y) 4076 return true 4077 } 4078 return false 4079 } 4080 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 4081 b := v.Block 4082 _ = b 4083 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 4084 // cond: x.Uses == 1 && clobber(x) 4085 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4086 for { 4087 x := v.Args[0] 4088 if x.Op != OpAMD64MOVBload { 4089 break 4090 } 4091 off := x.AuxInt 4092 sym := x.Aux 4093 ptr := x.Args[0] 4094 mem := x.Args[1] 4095 if !(x.Uses == 1 && clobber(x)) { 4096 break 4097 } 4098 b = x.Block 4099 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4100 v.reset(OpCopy) 4101 v.AddArg(v0) 4102 v0.AuxInt = off 4103 v0.Aux = sym 4104 v0.AddArg(ptr) 4105 v0.AddArg(mem) 4106 return true 4107 } 4108 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 4109 // cond: x.Uses == 1 && clobber(x) 4110 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4111 for { 4112 x := v.Args[0] 4113 if x.Op != OpAMD64MOVWload { 4114 break 4115 } 4116 off := x.AuxInt 4117 sym := x.Aux 4118 ptr := x.Args[0] 4119 mem := x.Args[1] 4120 if !(x.Uses == 1 && clobber(x)) { 4121 break 4122 } 4123 b = x.Block 4124 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4125 v.reset(OpCopy) 4126 v.AddArg(v0) 4127 v0.AuxInt = off 4128 v0.Aux = sym 4129 v0.AddArg(ptr) 4130 v0.AddArg(mem) 4131 return true 4132 } 4133 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 4134 // cond: x.Uses == 1 && clobber(x) 4135 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4136 for { 4137 x := v.Args[0] 4138 if x.Op != OpAMD64MOVLload { 4139 break 4140 } 4141 off := x.AuxInt 4142 sym := x.Aux 4143 ptr := x.Args[0] 4144 mem := x.Args[1] 4145 if !(x.Uses == 1 && clobber(x)) { 4146 break 4147 } 4148 b = x.Block 4149 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4150 v.reset(OpCopy) 4151 v.AddArg(v0) 4152 v0.AuxInt = off 4153 v0.Aux = sym 4154 v0.AddArg(ptr) 4155 v0.AddArg(mem) 4156 return true 4157 } 4158 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 4159 // cond: x.Uses == 1 && clobber(x) 4160 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4161 for { 4162 x := v.Args[0] 4163 if x.Op != OpAMD64MOVQload { 4164 break 4165 } 4166 off := x.AuxInt 4167 sym := x.Aux 4168 ptr := x.Args[0] 4169 mem := x.Args[1] 4170 if !(x.Uses == 1 && clobber(x)) { 4171 break 4172 } 4173 b = x.Block 4174 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4175 v.reset(OpCopy) 4176 v.AddArg(v0) 4177 v0.AuxInt = off 4178 v0.Aux = sym 4179 v0.AddArg(ptr) 4180 v0.AddArg(mem) 4181 return true 4182 } 4183 // match: (MOVBQSX (ANDLconst [c] x)) 4184 // cond: c & 0x80 == 0 4185 // result: (ANDLconst [c & 0x7f] x) 4186 for { 4187 v_0 := v.Args[0] 4188 if v_0.Op != OpAMD64ANDLconst { 4189 break 4190 } 4191 c := v_0.AuxInt 4192 x := v_0.Args[0] 4193 if !(c&0x80 == 0) { 4194 break 4195 } 4196 v.reset(OpAMD64ANDLconst) 4197 v.AuxInt = c & 0x7f 4198 v.AddArg(x) 4199 return true 4200 } 4201 // match: (MOVBQSX x:(MOVBQSX _)) 4202 // cond: 4203 // result: x 4204 for { 4205 x := v.Args[0] 4206 if x.Op != OpAMD64MOVBQSX { 4207 break 4208 } 4209 v.reset(OpCopy) 4210 v.Type = x.Type 4211 v.AddArg(x) 4212 return true 4213 } 4214 return false 4215 } 4216 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 4217 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4218 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4219 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4220 for { 4221 off1 := v.AuxInt 4222 sym1 := v.Aux 4223 v_0 := v.Args[0] 4224 if v_0.Op != OpAMD64LEAQ { 4225 break 4226 } 4227 off2 := v_0.AuxInt 4228 sym2 := v_0.Aux 4229 base := v_0.Args[0] 4230 mem := v.Args[1] 4231 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4232 break 4233 } 4234 v.reset(OpAMD64MOVBQSXload) 4235 v.AuxInt = off1 + off2 4236 v.Aux = mergeSym(sym1, sym2) 4237 v.AddArg(base) 4238 v.AddArg(mem) 4239 return true 4240 } 4241 return false 4242 } 4243 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 4244 b := v.Block 4245 _ = b 4246 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 4247 // cond: x.Uses == 1 && clobber(x) 4248 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4249 for { 4250 x := v.Args[0] 4251 if x.Op != OpAMD64MOVBload { 4252 break 4253 } 4254 off := x.AuxInt 4255 sym := x.Aux 4256 ptr := x.Args[0] 4257 mem := x.Args[1] 4258 if !(x.Uses == 1 && clobber(x)) { 4259 break 4260 } 4261 b = x.Block 4262 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4263 v.reset(OpCopy) 4264 v.AddArg(v0) 4265 v0.AuxInt = off 4266 v0.Aux = sym 4267 v0.AddArg(ptr) 4268 v0.AddArg(mem) 4269 return true 4270 } 4271 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 4272 // cond: x.Uses == 1 && clobber(x) 4273 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4274 for { 4275 x := v.Args[0] 4276 if x.Op != OpAMD64MOVWload { 4277 break 4278 } 4279 off := x.AuxInt 4280 sym := x.Aux 4281 ptr := x.Args[0] 4282 mem := x.Args[1] 4283 if !(x.Uses == 1 && clobber(x)) { 4284 break 4285 } 4286 b = x.Block 4287 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4288 v.reset(OpCopy) 4289 v.AddArg(v0) 4290 v0.AuxInt = off 4291 v0.Aux = sym 4292 v0.AddArg(ptr) 4293 v0.AddArg(mem) 4294 return true 4295 } 4296 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 4297 // cond: x.Uses == 1 && clobber(x) 4298 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4299 for { 4300 x := v.Args[0] 4301 if x.Op != OpAMD64MOVLload { 4302 break 4303 } 4304 off := x.AuxInt 4305 sym := x.Aux 4306 ptr := x.Args[0] 4307 mem := x.Args[1] 4308 if !(x.Uses == 1 && clobber(x)) { 4309 break 4310 } 4311 b = x.Block 4312 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4313 v.reset(OpCopy) 4314 v.AddArg(v0) 4315 v0.AuxInt = off 4316 v0.Aux = sym 4317 v0.AddArg(ptr) 4318 v0.AddArg(mem) 4319 return true 4320 } 4321 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 4322 // cond: x.Uses == 1 && clobber(x) 4323 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4324 for { 4325 x := v.Args[0] 4326 if x.Op != OpAMD64MOVQload { 4327 break 4328 } 4329 off := x.AuxInt 4330 sym := x.Aux 4331 ptr := x.Args[0] 4332 mem := x.Args[1] 4333 if !(x.Uses == 1 && clobber(x)) { 4334 break 4335 } 4336 b = x.Block 4337 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4338 v.reset(OpCopy) 4339 v.AddArg(v0) 4340 v0.AuxInt = off 4341 v0.Aux = sym 4342 v0.AddArg(ptr) 4343 v0.AddArg(mem) 4344 return true 4345 } 4346 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 4347 // cond: x.Uses == 1 && clobber(x) 4348 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 4349 for { 4350 x := v.Args[0] 4351 if x.Op != OpAMD64MOVBloadidx1 { 4352 break 4353 } 4354 off := x.AuxInt 4355 sym := x.Aux 4356 ptr := x.Args[0] 4357 idx := x.Args[1] 4358 mem := x.Args[2] 4359 if !(x.Uses == 1 && clobber(x)) { 4360 break 4361 } 4362 b = x.Block 4363 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 4364 v.reset(OpCopy) 4365 v.AddArg(v0) 4366 v0.AuxInt = off 4367 v0.Aux = sym 4368 v0.AddArg(ptr) 4369 v0.AddArg(idx) 4370 v0.AddArg(mem) 4371 return true 4372 } 4373 // match: (MOVBQZX (ANDLconst [c] x)) 4374 // cond: 4375 // result: (ANDLconst [c & 0xff] x) 4376 for { 4377 v_0 := v.Args[0] 4378 if v_0.Op != OpAMD64ANDLconst { 4379 break 4380 } 4381 c := v_0.AuxInt 4382 x := v_0.Args[0] 4383 v.reset(OpAMD64ANDLconst) 4384 v.AuxInt = c & 0xff 4385 v.AddArg(x) 4386 return true 4387 } 4388 // match: (MOVBQZX x:(MOVBQZX _)) 4389 // cond: 4390 // result: x 4391 for { 4392 x := v.Args[0] 4393 if x.Op != OpAMD64MOVBQZX { 4394 break 4395 } 4396 v.reset(OpCopy) 4397 v.Type = x.Type 4398 v.AddArg(x) 4399 return true 4400 } 4401 return false 4402 } 4403 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 4404 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4405 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4406 // result: x 4407 for { 4408 off := v.AuxInt 4409 sym := v.Aux 4410 ptr := v.Args[0] 4411 v_1 := v.Args[1] 4412 if v_1.Op != OpAMD64MOVBstore { 4413 break 4414 } 4415 off2 := v_1.AuxInt 4416 sym2 := v_1.Aux 4417 ptr2 := v_1.Args[0] 4418 x := v_1.Args[1] 4419 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4420 break 4421 } 4422 v.reset(OpCopy) 4423 v.Type = x.Type 4424 v.AddArg(x) 4425 return true 4426 } 4427 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 4428 // cond: is32Bit(off1+off2) 4429 // result: (MOVBload [off1+off2] {sym} ptr mem) 4430 for { 4431 off1 := v.AuxInt 4432 sym := v.Aux 4433 v_0 := v.Args[0] 4434 if v_0.Op != OpAMD64ADDQconst { 4435 break 4436 } 4437 off2 := v_0.AuxInt 4438 ptr := v_0.Args[0] 4439 mem := v.Args[1] 4440 if !(is32Bit(off1 + off2)) { 4441 break 4442 } 4443 v.reset(OpAMD64MOVBload) 4444 v.AuxInt = off1 + off2 4445 v.Aux = sym 4446 v.AddArg(ptr) 4447 v.AddArg(mem) 4448 return true 4449 } 4450 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4451 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4452 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4453 for { 4454 off1 := v.AuxInt 4455 sym1 := v.Aux 4456 v_0 := v.Args[0] 4457 if v_0.Op != OpAMD64LEAQ { 4458 break 4459 } 4460 off2 := v_0.AuxInt 4461 sym2 := v_0.Aux 4462 base := v_0.Args[0] 4463 mem := v.Args[1] 4464 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4465 break 4466 } 4467 v.reset(OpAMD64MOVBload) 4468 v.AuxInt = off1 + off2 4469 v.Aux = mergeSym(sym1, sym2) 4470 v.AddArg(base) 4471 v.AddArg(mem) 4472 return true 4473 } 4474 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 4475 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4476 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4477 for { 4478 off1 := v.AuxInt 4479 sym1 := v.Aux 4480 v_0 := v.Args[0] 4481 if v_0.Op != OpAMD64LEAQ1 { 4482 break 4483 } 4484 off2 := v_0.AuxInt 4485 sym2 := v_0.Aux 4486 ptr := v_0.Args[0] 4487 idx := v_0.Args[1] 4488 mem := v.Args[1] 4489 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4490 break 4491 } 4492 v.reset(OpAMD64MOVBloadidx1) 4493 v.AuxInt = off1 + off2 4494 v.Aux = mergeSym(sym1, sym2) 4495 v.AddArg(ptr) 4496 v.AddArg(idx) 4497 v.AddArg(mem) 4498 return true 4499 } 4500 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 4501 // cond: ptr.Op != OpSB 4502 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 4503 for { 4504 off := v.AuxInt 4505 sym := v.Aux 4506 v_0 := v.Args[0] 4507 if v_0.Op != OpAMD64ADDQ { 4508 break 4509 } 4510 ptr := v_0.Args[0] 4511 idx := v_0.Args[1] 4512 mem := v.Args[1] 4513 if !(ptr.Op != OpSB) { 4514 break 4515 } 4516 v.reset(OpAMD64MOVBloadidx1) 4517 v.AuxInt = off 4518 v.Aux = sym 4519 v.AddArg(ptr) 4520 v.AddArg(idx) 4521 v.AddArg(mem) 4522 return true 4523 } 4524 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 4525 // cond: canMergeSym(sym1, sym2) 4526 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4527 for { 4528 off1 := v.AuxInt 4529 sym1 := v.Aux 4530 v_0 := v.Args[0] 4531 if v_0.Op != OpAMD64LEAL { 4532 break 4533 } 4534 off2 := v_0.AuxInt 4535 sym2 := v_0.Aux 4536 base := v_0.Args[0] 4537 mem := v.Args[1] 4538 if !(canMergeSym(sym1, sym2)) { 4539 break 4540 } 4541 v.reset(OpAMD64MOVBload) 4542 v.AuxInt = off1 + off2 4543 v.Aux = mergeSym(sym1, sym2) 4544 v.AddArg(base) 4545 v.AddArg(mem) 4546 return true 4547 } 4548 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 4549 // cond: is32Bit(off1+off2) 4550 // result: (MOVBload [off1+off2] {sym} ptr mem) 4551 for { 4552 off1 := v.AuxInt 4553 sym := v.Aux 4554 v_0 := v.Args[0] 4555 if v_0.Op != OpAMD64ADDLconst { 4556 break 4557 } 4558 off2 := v_0.AuxInt 4559 ptr := v_0.Args[0] 4560 mem := v.Args[1] 4561 if !(is32Bit(off1 + off2)) { 4562 break 4563 } 4564 v.reset(OpAMD64MOVBload) 4565 v.AuxInt = off1 + off2 4566 v.Aux = sym 4567 v.AddArg(ptr) 4568 v.AddArg(mem) 4569 return true 4570 } 4571 return false 4572 } 4573 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 4574 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 4575 // cond: 4576 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4577 for { 4578 c := v.AuxInt 4579 sym := v.Aux 4580 v_0 := v.Args[0] 4581 if v_0.Op != OpAMD64ADDQconst { 4582 break 4583 } 4584 d := v_0.AuxInt 4585 ptr := v_0.Args[0] 4586 idx := v.Args[1] 4587 mem := v.Args[2] 4588 v.reset(OpAMD64MOVBloadidx1) 4589 v.AuxInt = c + d 4590 v.Aux = sym 4591 v.AddArg(ptr) 4592 v.AddArg(idx) 4593 v.AddArg(mem) 4594 return true 4595 } 4596 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 4597 // cond: 4598 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4599 for { 4600 c := v.AuxInt 4601 sym := v.Aux 4602 idx := v.Args[0] 4603 v_1 := v.Args[1] 4604 if v_1.Op != OpAMD64ADDQconst { 4605 break 4606 } 4607 d := v_1.AuxInt 4608 ptr := v_1.Args[0] 4609 mem := v.Args[2] 4610 v.reset(OpAMD64MOVBloadidx1) 4611 v.AuxInt = c + d 4612 v.Aux = sym 4613 v.AddArg(ptr) 4614 v.AddArg(idx) 4615 v.AddArg(mem) 4616 return true 4617 } 4618 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 4619 // cond: 4620 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4621 for { 4622 c := v.AuxInt 4623 sym := v.Aux 4624 ptr := v.Args[0] 4625 v_1 := v.Args[1] 4626 if v_1.Op != OpAMD64ADDQconst { 4627 break 4628 } 4629 d := v_1.AuxInt 4630 idx := v_1.Args[0] 4631 mem := v.Args[2] 4632 v.reset(OpAMD64MOVBloadidx1) 4633 v.AuxInt = c + d 4634 v.Aux = sym 4635 v.AddArg(ptr) 4636 v.AddArg(idx) 4637 v.AddArg(mem) 4638 return true 4639 } 4640 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 4641 // cond: 4642 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4643 for { 4644 c := v.AuxInt 4645 sym := v.Aux 4646 v_0 := v.Args[0] 4647 if v_0.Op != OpAMD64ADDQconst { 4648 break 4649 } 4650 d := v_0.AuxInt 4651 idx := v_0.Args[0] 4652 ptr := v.Args[1] 4653 mem := v.Args[2] 4654 v.reset(OpAMD64MOVBloadidx1) 4655 v.AuxInt = c + d 4656 v.Aux = sym 4657 v.AddArg(ptr) 4658 v.AddArg(idx) 4659 v.AddArg(mem) 4660 return true 4661 } 4662 return false 4663 } 4664 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 4665 b := v.Block 4666 _ = b 4667 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 4668 // cond: 4669 // result: (MOVBstore [off] {sym} ptr x mem) 4670 for { 4671 off := v.AuxInt 4672 sym := v.Aux 4673 ptr := v.Args[0] 4674 v_1 := v.Args[1] 4675 if v_1.Op != OpAMD64MOVBQSX { 4676 break 4677 } 4678 x := v_1.Args[0] 4679 mem := v.Args[2] 4680 v.reset(OpAMD64MOVBstore) 4681 v.AuxInt = off 4682 v.Aux = sym 4683 v.AddArg(ptr) 4684 v.AddArg(x) 4685 v.AddArg(mem) 4686 return true 4687 } 4688 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 4689 // cond: 4690 // result: (MOVBstore [off] {sym} ptr x mem) 4691 for { 4692 off := v.AuxInt 4693 sym := v.Aux 4694 ptr := v.Args[0] 4695 v_1 := v.Args[1] 4696 if v_1.Op != OpAMD64MOVBQZX { 4697 break 4698 } 4699 x := v_1.Args[0] 4700 mem := v.Args[2] 4701 v.reset(OpAMD64MOVBstore) 4702 v.AuxInt = off 4703 v.Aux = sym 4704 v.AddArg(ptr) 4705 v.AddArg(x) 4706 v.AddArg(mem) 4707 return true 4708 } 4709 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 4710 // cond: is32Bit(off1+off2) 4711 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 4712 for { 4713 off1 := v.AuxInt 4714 sym := v.Aux 4715 v_0 := v.Args[0] 4716 if v_0.Op != OpAMD64ADDQconst { 4717 break 4718 } 4719 off2 := v_0.AuxInt 4720 ptr := v_0.Args[0] 4721 val := v.Args[1] 4722 mem := v.Args[2] 4723 if !(is32Bit(off1 + off2)) { 4724 break 4725 } 4726 v.reset(OpAMD64MOVBstore) 4727 v.AuxInt = off1 + off2 4728 v.Aux = sym 4729 v.AddArg(ptr) 4730 v.AddArg(val) 4731 v.AddArg(mem) 4732 return true 4733 } 4734 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 4735 // cond: validOff(off) 4736 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 4737 for { 4738 off := v.AuxInt 4739 sym := v.Aux 4740 ptr := v.Args[0] 4741 v_1 := v.Args[1] 4742 if v_1.Op != OpAMD64MOVLconst { 4743 break 4744 } 4745 c := v_1.AuxInt 4746 mem := v.Args[2] 4747 if !(validOff(off)) { 4748 break 4749 } 4750 v.reset(OpAMD64MOVBstoreconst) 4751 v.AuxInt = makeValAndOff(int64(int8(c)), off) 4752 v.Aux = sym 4753 v.AddArg(ptr) 4754 v.AddArg(mem) 4755 return true 4756 } 4757 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4758 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4759 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4760 for { 4761 off1 := v.AuxInt 4762 sym1 := v.Aux 4763 v_0 := v.Args[0] 4764 if v_0.Op != OpAMD64LEAQ { 4765 break 4766 } 4767 off2 := v_0.AuxInt 4768 sym2 := v_0.Aux 4769 base := v_0.Args[0] 4770 val := v.Args[1] 4771 mem := v.Args[2] 4772 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4773 break 4774 } 4775 v.reset(OpAMD64MOVBstore) 4776 v.AuxInt = off1 + off2 4777 v.Aux = mergeSym(sym1, sym2) 4778 v.AddArg(base) 4779 v.AddArg(val) 4780 v.AddArg(mem) 4781 return true 4782 } 4783 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 4784 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4785 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 4786 for { 4787 off1 := v.AuxInt 4788 sym1 := v.Aux 4789 v_0 := v.Args[0] 4790 if v_0.Op != OpAMD64LEAQ1 { 4791 break 4792 } 4793 off2 := v_0.AuxInt 4794 sym2 := v_0.Aux 4795 ptr := v_0.Args[0] 4796 idx := v_0.Args[1] 4797 val := v.Args[1] 4798 mem := v.Args[2] 4799 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4800 break 4801 } 4802 v.reset(OpAMD64MOVBstoreidx1) 4803 v.AuxInt = off1 + off2 4804 v.Aux = mergeSym(sym1, sym2) 4805 v.AddArg(ptr) 4806 v.AddArg(idx) 4807 v.AddArg(val) 4808 v.AddArg(mem) 4809 return true 4810 } 4811 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 4812 // cond: ptr.Op != OpSB 4813 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 4814 for { 4815 off := v.AuxInt 4816 sym := v.Aux 4817 v_0 := v.Args[0] 4818 if v_0.Op != OpAMD64ADDQ { 4819 break 4820 } 4821 ptr := v_0.Args[0] 4822 idx := v_0.Args[1] 4823 val := v.Args[1] 4824 mem := v.Args[2] 4825 if !(ptr.Op != OpSB) { 4826 break 4827 } 4828 v.reset(OpAMD64MOVBstoreidx1) 4829 v.AuxInt = off 4830 v.Aux = sym 4831 v.AddArg(ptr) 4832 v.AddArg(idx) 4833 v.AddArg(val) 4834 v.AddArg(mem) 4835 return true 4836 } 4837 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 4838 // cond: x0.Uses == 1 && clobber(x0) 4839 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 4840 for { 4841 i := v.AuxInt 4842 s := v.Aux 4843 p := v.Args[0] 4844 w := v.Args[1] 4845 x0 := v.Args[2] 4846 if x0.Op != OpAMD64MOVBstore { 4847 break 4848 } 4849 if x0.AuxInt != i-1 { 4850 break 4851 } 4852 if x0.Aux != s { 4853 break 4854 } 4855 if p != x0.Args[0] { 4856 break 4857 } 4858 x0_1 := x0.Args[1] 4859 if x0_1.Op != OpAMD64SHRWconst { 4860 break 4861 } 4862 if x0_1.AuxInt != 8 { 4863 break 4864 } 4865 if w != x0_1.Args[0] { 4866 break 4867 } 4868 mem := x0.Args[2] 4869 if !(x0.Uses == 1 && clobber(x0)) { 4870 break 4871 } 4872 v.reset(OpAMD64MOVWstore) 4873 v.AuxInt = i - 1 4874 v.Aux = s 4875 v.AddArg(p) 4876 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 4877 v0.AuxInt = 8 4878 v0.AddArg(w) 4879 v.AddArg(v0) 4880 v.AddArg(mem) 4881 return true 4882 } 4883 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 4884 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 4885 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 4886 for { 4887 i := v.AuxInt 4888 s := v.Aux 4889 p := v.Args[0] 4890 w := v.Args[1] 4891 x2 := v.Args[2] 4892 if x2.Op != OpAMD64MOVBstore { 4893 break 4894 } 4895 if x2.AuxInt != i-1 { 4896 break 4897 } 4898 if x2.Aux != s { 4899 break 4900 } 4901 if p != x2.Args[0] { 4902 break 4903 } 4904 x2_1 := x2.Args[1] 4905 if x2_1.Op != OpAMD64SHRLconst { 4906 break 4907 } 4908 if x2_1.AuxInt != 8 { 4909 break 4910 } 4911 if w != x2_1.Args[0] { 4912 break 4913 } 4914 x1 := x2.Args[2] 4915 if x1.Op != OpAMD64MOVBstore { 4916 break 4917 } 4918 if x1.AuxInt != i-2 { 4919 break 4920 } 4921 if x1.Aux != s { 4922 break 4923 } 4924 if p != x1.Args[0] { 4925 break 4926 } 4927 x1_1 := x1.Args[1] 4928 if x1_1.Op != OpAMD64SHRLconst { 4929 break 4930 } 4931 if x1_1.AuxInt != 16 { 4932 break 4933 } 4934 if w != x1_1.Args[0] { 4935 break 4936 } 4937 x0 := x1.Args[2] 4938 if x0.Op != OpAMD64MOVBstore { 4939 break 4940 } 4941 if x0.AuxInt != i-3 { 4942 break 4943 } 4944 if x0.Aux != s { 4945 break 4946 } 4947 if p != x0.Args[0] { 4948 break 4949 } 4950 x0_1 := x0.Args[1] 4951 if x0_1.Op != OpAMD64SHRLconst { 4952 break 4953 } 4954 if x0_1.AuxInt != 24 { 4955 break 4956 } 4957 if w != x0_1.Args[0] { 4958 break 4959 } 4960 mem := x0.Args[2] 4961 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 4962 break 4963 } 4964 v.reset(OpAMD64MOVLstore) 4965 v.AuxInt = i - 3 4966 v.Aux = s 4967 v.AddArg(p) 4968 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 4969 v0.AddArg(w) 4970 v.AddArg(v0) 4971 v.AddArg(mem) 4972 return true 4973 } 4974 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 4975 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 4976 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 4977 for { 4978 i := v.AuxInt 4979 s := v.Aux 4980 p := v.Args[0] 4981 w := v.Args[1] 4982 x6 := v.Args[2] 4983 if x6.Op != OpAMD64MOVBstore { 4984 break 4985 } 4986 if x6.AuxInt != i-1 { 4987 break 4988 } 4989 if x6.Aux != s { 4990 break 4991 } 4992 if p != x6.Args[0] { 4993 break 4994 } 4995 x6_1 := x6.Args[1] 4996 if x6_1.Op != OpAMD64SHRQconst { 4997 break 4998 } 4999 if x6_1.AuxInt != 8 { 5000 break 5001 } 5002 if w != x6_1.Args[0] { 5003 break 5004 } 5005 x5 := x6.Args[2] 5006 if x5.Op != OpAMD64MOVBstore { 5007 break 5008 } 5009 if x5.AuxInt != i-2 { 5010 break 5011 } 5012 if x5.Aux != s { 5013 break 5014 } 5015 if p != x5.Args[0] { 5016 break 5017 } 5018 x5_1 := x5.Args[1] 5019 if x5_1.Op != OpAMD64SHRQconst { 5020 break 5021 } 5022 if x5_1.AuxInt != 16 { 5023 break 5024 } 5025 if w != x5_1.Args[0] { 5026 break 5027 } 5028 x4 := x5.Args[2] 5029 if x4.Op != OpAMD64MOVBstore { 5030 break 5031 } 5032 if x4.AuxInt != i-3 { 5033 break 5034 } 5035 if x4.Aux != s { 5036 break 5037 } 5038 if p != x4.Args[0] { 5039 break 5040 } 5041 x4_1 := x4.Args[1] 5042 if x4_1.Op != OpAMD64SHRQconst { 5043 break 5044 } 5045 if x4_1.AuxInt != 24 { 5046 break 5047 } 5048 if w != x4_1.Args[0] { 5049 break 5050 } 5051 x3 := x4.Args[2] 5052 if x3.Op != OpAMD64MOVBstore { 5053 break 5054 } 5055 if x3.AuxInt != i-4 { 5056 break 5057 } 5058 if x3.Aux != s { 5059 break 5060 } 5061 if p != x3.Args[0] { 5062 break 5063 } 5064 x3_1 := x3.Args[1] 5065 if x3_1.Op != OpAMD64SHRQconst { 5066 break 5067 } 5068 if x3_1.AuxInt != 32 { 5069 break 5070 } 5071 if w != x3_1.Args[0] { 5072 break 5073 } 5074 x2 := x3.Args[2] 5075 if x2.Op != OpAMD64MOVBstore { 5076 break 5077 } 5078 if x2.AuxInt != i-5 { 5079 break 5080 } 5081 if x2.Aux != s { 5082 break 5083 } 5084 if p != x2.Args[0] { 5085 break 5086 } 5087 x2_1 := x2.Args[1] 5088 if x2_1.Op != OpAMD64SHRQconst { 5089 break 5090 } 5091 if x2_1.AuxInt != 40 { 5092 break 5093 } 5094 if w != x2_1.Args[0] { 5095 break 5096 } 5097 x1 := x2.Args[2] 5098 if x1.Op != OpAMD64MOVBstore { 5099 break 5100 } 5101 if x1.AuxInt != i-6 { 5102 break 5103 } 5104 if x1.Aux != s { 5105 break 5106 } 5107 if p != x1.Args[0] { 5108 break 5109 } 5110 x1_1 := x1.Args[1] 5111 if x1_1.Op != OpAMD64SHRQconst { 5112 break 5113 } 5114 if x1_1.AuxInt != 48 { 5115 break 5116 } 5117 if w != x1_1.Args[0] { 5118 break 5119 } 5120 x0 := x1.Args[2] 5121 if x0.Op != OpAMD64MOVBstore { 5122 break 5123 } 5124 if x0.AuxInt != i-7 { 5125 break 5126 } 5127 if x0.Aux != s { 5128 break 5129 } 5130 if p != x0.Args[0] { 5131 break 5132 } 5133 x0_1 := x0.Args[1] 5134 if x0_1.Op != OpAMD64SHRQconst { 5135 break 5136 } 5137 if x0_1.AuxInt != 56 { 5138 break 5139 } 5140 if w != x0_1.Args[0] { 5141 break 5142 } 5143 mem := x0.Args[2] 5144 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5145 break 5146 } 5147 v.reset(OpAMD64MOVQstore) 5148 v.AuxInt = i - 7 5149 v.Aux = s 5150 v.AddArg(p) 5151 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5152 v0.AddArg(w) 5153 v.AddArg(v0) 5154 v.AddArg(mem) 5155 return true 5156 } 5157 return false 5158 } 5159 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 5160 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 5161 // cond: x.Uses == 1 && clobber(x) 5162 // result: (MOVWstore [i-1] {s} p w mem) 5163 for { 5164 i := v.AuxInt 5165 s := v.Aux 5166 p := v.Args[0] 5167 v_1 := v.Args[1] 5168 if v_1.Op != OpAMD64SHRQconst { 5169 break 5170 } 5171 if v_1.AuxInt != 8 { 5172 break 5173 } 5174 w := v_1.Args[0] 5175 x := v.Args[2] 5176 if x.Op != OpAMD64MOVBstore { 5177 break 5178 } 5179 if x.AuxInt != i-1 { 5180 break 5181 } 5182 if x.Aux != s { 5183 break 5184 } 5185 if p != x.Args[0] { 5186 break 5187 } 5188 if w != x.Args[1] { 5189 break 5190 } 5191 mem := x.Args[2] 5192 if !(x.Uses == 1 && clobber(x)) { 5193 break 5194 } 5195 v.reset(OpAMD64MOVWstore) 5196 v.AuxInt = i - 1 5197 v.Aux = s 5198 v.AddArg(p) 5199 v.AddArg(w) 5200 v.AddArg(mem) 5201 return true 5202 } 5203 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 5204 // cond: x.Uses == 1 && clobber(x) 5205 // result: (MOVWstore [i-1] {s} p w0 mem) 5206 for { 5207 i := v.AuxInt 5208 s := v.Aux 5209 p := v.Args[0] 5210 v_1 := v.Args[1] 5211 if v_1.Op != OpAMD64SHRQconst { 5212 break 5213 } 5214 j := v_1.AuxInt 5215 w := v_1.Args[0] 5216 x := v.Args[2] 5217 if x.Op != OpAMD64MOVBstore { 5218 break 5219 } 5220 if x.AuxInt != i-1 { 5221 break 5222 } 5223 if x.Aux != s { 5224 break 5225 } 5226 if p != x.Args[0] { 5227 break 5228 } 5229 w0 := x.Args[1] 5230 if w0.Op != OpAMD64SHRQconst { 5231 break 5232 } 5233 if w0.AuxInt != j-8 { 5234 break 5235 } 5236 if w != w0.Args[0] { 5237 break 5238 } 5239 mem := x.Args[2] 5240 if !(x.Uses == 1 && clobber(x)) { 5241 break 5242 } 5243 v.reset(OpAMD64MOVWstore) 5244 v.AuxInt = i - 1 5245 v.Aux = s 5246 v.AddArg(p) 5247 v.AddArg(w0) 5248 v.AddArg(mem) 5249 return true 5250 } 5251 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 5252 // cond: canMergeSym(sym1, sym2) 5253 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5254 for { 5255 off1 := v.AuxInt 5256 sym1 := v.Aux 5257 v_0 := v.Args[0] 5258 if v_0.Op != OpAMD64LEAL { 5259 break 5260 } 5261 off2 := v_0.AuxInt 5262 sym2 := v_0.Aux 5263 base := v_0.Args[0] 5264 val := v.Args[1] 5265 mem := v.Args[2] 5266 if !(canMergeSym(sym1, sym2)) { 5267 break 5268 } 5269 v.reset(OpAMD64MOVBstore) 5270 v.AuxInt = off1 + off2 5271 v.Aux = mergeSym(sym1, sym2) 5272 v.AddArg(base) 5273 v.AddArg(val) 5274 v.AddArg(mem) 5275 return true 5276 } 5277 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 5278 // cond: is32Bit(off1+off2) 5279 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 5280 for { 5281 off1 := v.AuxInt 5282 sym := v.Aux 5283 v_0 := v.Args[0] 5284 if v_0.Op != OpAMD64ADDLconst { 5285 break 5286 } 5287 off2 := v_0.AuxInt 5288 ptr := v_0.Args[0] 5289 val := v.Args[1] 5290 mem := v.Args[2] 5291 if !(is32Bit(off1 + off2)) { 5292 break 5293 } 5294 v.reset(OpAMD64MOVBstore) 5295 v.AuxInt = off1 + off2 5296 v.Aux = sym 5297 v.AddArg(ptr) 5298 v.AddArg(val) 5299 v.AddArg(mem) 5300 return true 5301 } 5302 return false 5303 } 5304 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 5305 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 5306 // cond: ValAndOff(sc).canAdd(off) 5307 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5308 for { 5309 sc := v.AuxInt 5310 s := v.Aux 5311 v_0 := v.Args[0] 5312 if v_0.Op != OpAMD64ADDQconst { 5313 break 5314 } 5315 off := v_0.AuxInt 5316 ptr := v_0.Args[0] 5317 mem := v.Args[1] 5318 if !(ValAndOff(sc).canAdd(off)) { 5319 break 5320 } 5321 v.reset(OpAMD64MOVBstoreconst) 5322 v.AuxInt = ValAndOff(sc).add(off) 5323 v.Aux = s 5324 v.AddArg(ptr) 5325 v.AddArg(mem) 5326 return true 5327 } 5328 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 5329 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5330 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5331 for { 5332 sc := v.AuxInt 5333 sym1 := v.Aux 5334 v_0 := v.Args[0] 5335 if v_0.Op != OpAMD64LEAQ { 5336 break 5337 } 5338 off := v_0.AuxInt 5339 sym2 := v_0.Aux 5340 ptr := v_0.Args[0] 5341 mem := v.Args[1] 5342 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5343 break 5344 } 5345 v.reset(OpAMD64MOVBstoreconst) 5346 v.AuxInt = ValAndOff(sc).add(off) 5347 v.Aux = mergeSym(sym1, sym2) 5348 v.AddArg(ptr) 5349 v.AddArg(mem) 5350 return true 5351 } 5352 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 5353 // cond: canMergeSym(sym1, sym2) 5354 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5355 for { 5356 x := v.AuxInt 5357 sym1 := v.Aux 5358 v_0 := v.Args[0] 5359 if v_0.Op != OpAMD64LEAQ1 { 5360 break 5361 } 5362 off := v_0.AuxInt 5363 sym2 := v_0.Aux 5364 ptr := v_0.Args[0] 5365 idx := v_0.Args[1] 5366 mem := v.Args[1] 5367 if !(canMergeSym(sym1, sym2)) { 5368 break 5369 } 5370 v.reset(OpAMD64MOVBstoreconstidx1) 5371 v.AuxInt = ValAndOff(x).add(off) 5372 v.Aux = mergeSym(sym1, sym2) 5373 v.AddArg(ptr) 5374 v.AddArg(idx) 5375 v.AddArg(mem) 5376 return true 5377 } 5378 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 5379 // cond: 5380 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 5381 for { 5382 x := v.AuxInt 5383 sym := v.Aux 5384 v_0 := v.Args[0] 5385 if v_0.Op != OpAMD64ADDQ { 5386 break 5387 } 5388 ptr := v_0.Args[0] 5389 idx := v_0.Args[1] 5390 mem := v.Args[1] 5391 v.reset(OpAMD64MOVBstoreconstidx1) 5392 v.AuxInt = x 5393 v.Aux = sym 5394 v.AddArg(ptr) 5395 v.AddArg(idx) 5396 v.AddArg(mem) 5397 return true 5398 } 5399 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 5400 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 5401 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 5402 for { 5403 c := v.AuxInt 5404 s := v.Aux 5405 p := v.Args[0] 5406 x := v.Args[1] 5407 if x.Op != OpAMD64MOVBstoreconst { 5408 break 5409 } 5410 a := x.AuxInt 5411 if x.Aux != s { 5412 break 5413 } 5414 if p != x.Args[0] { 5415 break 5416 } 5417 mem := x.Args[1] 5418 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 5419 break 5420 } 5421 v.reset(OpAMD64MOVWstoreconst) 5422 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 5423 v.Aux = s 5424 v.AddArg(p) 5425 v.AddArg(mem) 5426 return true 5427 } 5428 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 5429 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5430 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5431 for { 5432 sc := v.AuxInt 5433 sym1 := v.Aux 5434 v_0 := v.Args[0] 5435 if v_0.Op != OpAMD64LEAL { 5436 break 5437 } 5438 off := v_0.AuxInt 5439 sym2 := v_0.Aux 5440 ptr := v_0.Args[0] 5441 mem := v.Args[1] 5442 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5443 break 5444 } 5445 v.reset(OpAMD64MOVBstoreconst) 5446 v.AuxInt = ValAndOff(sc).add(off) 5447 v.Aux = mergeSym(sym1, sym2) 5448 v.AddArg(ptr) 5449 v.AddArg(mem) 5450 return true 5451 } 5452 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 5453 // cond: ValAndOff(sc).canAdd(off) 5454 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5455 for { 5456 sc := v.AuxInt 5457 s := v.Aux 5458 v_0 := v.Args[0] 5459 if v_0.Op != OpAMD64ADDLconst { 5460 break 5461 } 5462 off := v_0.AuxInt 5463 ptr := v_0.Args[0] 5464 mem := v.Args[1] 5465 if !(ValAndOff(sc).canAdd(off)) { 5466 break 5467 } 5468 v.reset(OpAMD64MOVBstoreconst) 5469 v.AuxInt = ValAndOff(sc).add(off) 5470 v.Aux = s 5471 v.AddArg(ptr) 5472 v.AddArg(mem) 5473 return true 5474 } 5475 return false 5476 } 5477 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 5478 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 5479 // cond: 5480 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5481 for { 5482 x := v.AuxInt 5483 sym := v.Aux 5484 v_0 := v.Args[0] 5485 if v_0.Op != OpAMD64ADDQconst { 5486 break 5487 } 5488 c := v_0.AuxInt 5489 ptr := v_0.Args[0] 5490 idx := v.Args[1] 5491 mem := v.Args[2] 5492 v.reset(OpAMD64MOVBstoreconstidx1) 5493 v.AuxInt = ValAndOff(x).add(c) 5494 v.Aux = sym 5495 v.AddArg(ptr) 5496 v.AddArg(idx) 5497 v.AddArg(mem) 5498 return true 5499 } 5500 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 5501 // cond: 5502 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5503 for { 5504 x := v.AuxInt 5505 sym := v.Aux 5506 ptr := v.Args[0] 5507 v_1 := v.Args[1] 5508 if v_1.Op != OpAMD64ADDQconst { 5509 break 5510 } 5511 c := v_1.AuxInt 5512 idx := v_1.Args[0] 5513 mem := v.Args[2] 5514 v.reset(OpAMD64MOVBstoreconstidx1) 5515 v.AuxInt = ValAndOff(x).add(c) 5516 v.Aux = sym 5517 v.AddArg(ptr) 5518 v.AddArg(idx) 5519 v.AddArg(mem) 5520 return true 5521 } 5522 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 5523 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 5524 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 5525 for { 5526 c := v.AuxInt 5527 s := v.Aux 5528 p := v.Args[0] 5529 i := v.Args[1] 5530 x := v.Args[2] 5531 if x.Op != OpAMD64MOVBstoreconstidx1 { 5532 break 5533 } 5534 a := x.AuxInt 5535 if x.Aux != s { 5536 break 5537 } 5538 if p != x.Args[0] { 5539 break 5540 } 5541 if i != x.Args[1] { 5542 break 5543 } 5544 mem := x.Args[2] 5545 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 5546 break 5547 } 5548 v.reset(OpAMD64MOVWstoreconstidx1) 5549 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 5550 v.Aux = s 5551 v.AddArg(p) 5552 v.AddArg(i) 5553 v.AddArg(mem) 5554 return true 5555 } 5556 return false 5557 } 5558 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 5559 b := v.Block 5560 _ = b 5561 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 5562 // cond: 5563 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 5564 for { 5565 c := v.AuxInt 5566 sym := v.Aux 5567 v_0 := v.Args[0] 5568 if v_0.Op != OpAMD64ADDQconst { 5569 break 5570 } 5571 d := v_0.AuxInt 5572 ptr := v_0.Args[0] 5573 idx := v.Args[1] 5574 val := v.Args[2] 5575 mem := v.Args[3] 5576 v.reset(OpAMD64MOVBstoreidx1) 5577 v.AuxInt = c + d 5578 v.Aux = sym 5579 v.AddArg(ptr) 5580 v.AddArg(idx) 5581 v.AddArg(val) 5582 v.AddArg(mem) 5583 return true 5584 } 5585 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 5586 // cond: 5587 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 5588 for { 5589 c := v.AuxInt 5590 sym := v.Aux 5591 ptr := v.Args[0] 5592 v_1 := v.Args[1] 5593 if v_1.Op != OpAMD64ADDQconst { 5594 break 5595 } 5596 d := v_1.AuxInt 5597 idx := v_1.Args[0] 5598 val := v.Args[2] 5599 mem := v.Args[3] 5600 v.reset(OpAMD64MOVBstoreidx1) 5601 v.AuxInt = c + d 5602 v.Aux = sym 5603 v.AddArg(ptr) 5604 v.AddArg(idx) 5605 v.AddArg(val) 5606 v.AddArg(mem) 5607 return true 5608 } 5609 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 5610 // cond: x0.Uses == 1 && clobber(x0) 5611 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 5612 for { 5613 i := v.AuxInt 5614 s := v.Aux 5615 p := v.Args[0] 5616 idx := v.Args[1] 5617 w := v.Args[2] 5618 x0 := v.Args[3] 5619 if x0.Op != OpAMD64MOVBstoreidx1 { 5620 break 5621 } 5622 if x0.AuxInt != i-1 { 5623 break 5624 } 5625 if x0.Aux != s { 5626 break 5627 } 5628 if p != x0.Args[0] { 5629 break 5630 } 5631 if idx != x0.Args[1] { 5632 break 5633 } 5634 x0_2 := x0.Args[2] 5635 if x0_2.Op != OpAMD64SHRWconst { 5636 break 5637 } 5638 if x0_2.AuxInt != 8 { 5639 break 5640 } 5641 if w != x0_2.Args[0] { 5642 break 5643 } 5644 mem := x0.Args[3] 5645 if !(x0.Uses == 1 && clobber(x0)) { 5646 break 5647 } 5648 v.reset(OpAMD64MOVWstoreidx1) 5649 v.AuxInt = i - 1 5650 v.Aux = s 5651 v.AddArg(p) 5652 v.AddArg(idx) 5653 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5654 v0.AuxInt = 8 5655 v0.AddArg(w) 5656 v.AddArg(v0) 5657 v.AddArg(mem) 5658 return true 5659 } 5660 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 5661 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5662 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 5663 for { 5664 i := v.AuxInt 5665 s := v.Aux 5666 p := v.Args[0] 5667 idx := v.Args[1] 5668 w := v.Args[2] 5669 x2 := v.Args[3] 5670 if x2.Op != OpAMD64MOVBstoreidx1 { 5671 break 5672 } 5673 if x2.AuxInt != i-1 { 5674 break 5675 } 5676 if x2.Aux != s { 5677 break 5678 } 5679 if p != x2.Args[0] { 5680 break 5681 } 5682 if idx != x2.Args[1] { 5683 break 5684 } 5685 x2_2 := x2.Args[2] 5686 if x2_2.Op != OpAMD64SHRLconst { 5687 break 5688 } 5689 if x2_2.AuxInt != 8 { 5690 break 5691 } 5692 if w != x2_2.Args[0] { 5693 break 5694 } 5695 x1 := x2.Args[3] 5696 if x1.Op != OpAMD64MOVBstoreidx1 { 5697 break 5698 } 5699 if x1.AuxInt != i-2 { 5700 break 5701 } 5702 if x1.Aux != s { 5703 break 5704 } 5705 if p != x1.Args[0] { 5706 break 5707 } 5708 if idx != x1.Args[1] { 5709 break 5710 } 5711 x1_2 := x1.Args[2] 5712 if x1_2.Op != OpAMD64SHRLconst { 5713 break 5714 } 5715 if x1_2.AuxInt != 16 { 5716 break 5717 } 5718 if w != x1_2.Args[0] { 5719 break 5720 } 5721 x0 := x1.Args[3] 5722 if x0.Op != OpAMD64MOVBstoreidx1 { 5723 break 5724 } 5725 if x0.AuxInt != i-3 { 5726 break 5727 } 5728 if x0.Aux != s { 5729 break 5730 } 5731 if p != x0.Args[0] { 5732 break 5733 } 5734 if idx != x0.Args[1] { 5735 break 5736 } 5737 x0_2 := x0.Args[2] 5738 if x0_2.Op != OpAMD64SHRLconst { 5739 break 5740 } 5741 if x0_2.AuxInt != 24 { 5742 break 5743 } 5744 if w != x0_2.Args[0] { 5745 break 5746 } 5747 mem := x0.Args[3] 5748 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5749 break 5750 } 5751 v.reset(OpAMD64MOVLstoreidx1) 5752 v.AuxInt = i - 3 5753 v.Aux = s 5754 v.AddArg(p) 5755 v.AddArg(idx) 5756 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5757 v0.AddArg(w) 5758 v.AddArg(v0) 5759 v.AddArg(mem) 5760 return true 5761 } 5762 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 5763 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5764 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 5765 for { 5766 i := v.AuxInt 5767 s := v.Aux 5768 p := v.Args[0] 5769 idx := v.Args[1] 5770 w := v.Args[2] 5771 x6 := v.Args[3] 5772 if x6.Op != OpAMD64MOVBstoreidx1 { 5773 break 5774 } 5775 if x6.AuxInt != i-1 { 5776 break 5777 } 5778 if x6.Aux != s { 5779 break 5780 } 5781 if p != x6.Args[0] { 5782 break 5783 } 5784 if idx != x6.Args[1] { 5785 break 5786 } 5787 x6_2 := x6.Args[2] 5788 if x6_2.Op != OpAMD64SHRQconst { 5789 break 5790 } 5791 if x6_2.AuxInt != 8 { 5792 break 5793 } 5794 if w != x6_2.Args[0] { 5795 break 5796 } 5797 x5 := x6.Args[3] 5798 if x5.Op != OpAMD64MOVBstoreidx1 { 5799 break 5800 } 5801 if x5.AuxInt != i-2 { 5802 break 5803 } 5804 if x5.Aux != s { 5805 break 5806 } 5807 if p != x5.Args[0] { 5808 break 5809 } 5810 if idx != x5.Args[1] { 5811 break 5812 } 5813 x5_2 := x5.Args[2] 5814 if x5_2.Op != OpAMD64SHRQconst { 5815 break 5816 } 5817 if x5_2.AuxInt != 16 { 5818 break 5819 } 5820 if w != x5_2.Args[0] { 5821 break 5822 } 5823 x4 := x5.Args[3] 5824 if x4.Op != OpAMD64MOVBstoreidx1 { 5825 break 5826 } 5827 if x4.AuxInt != i-3 { 5828 break 5829 } 5830 if x4.Aux != s { 5831 break 5832 } 5833 if p != x4.Args[0] { 5834 break 5835 } 5836 if idx != x4.Args[1] { 5837 break 5838 } 5839 x4_2 := x4.Args[2] 5840 if x4_2.Op != OpAMD64SHRQconst { 5841 break 5842 } 5843 if x4_2.AuxInt != 24 { 5844 break 5845 } 5846 if w != x4_2.Args[0] { 5847 break 5848 } 5849 x3 := x4.Args[3] 5850 if x3.Op != OpAMD64MOVBstoreidx1 { 5851 break 5852 } 5853 if x3.AuxInt != i-4 { 5854 break 5855 } 5856 if x3.Aux != s { 5857 break 5858 } 5859 if p != x3.Args[0] { 5860 break 5861 } 5862 if idx != x3.Args[1] { 5863 break 5864 } 5865 x3_2 := x3.Args[2] 5866 if x3_2.Op != OpAMD64SHRQconst { 5867 break 5868 } 5869 if x3_2.AuxInt != 32 { 5870 break 5871 } 5872 if w != x3_2.Args[0] { 5873 break 5874 } 5875 x2 := x3.Args[3] 5876 if x2.Op != OpAMD64MOVBstoreidx1 { 5877 break 5878 } 5879 if x2.AuxInt != i-5 { 5880 break 5881 } 5882 if x2.Aux != s { 5883 break 5884 } 5885 if p != x2.Args[0] { 5886 break 5887 } 5888 if idx != x2.Args[1] { 5889 break 5890 } 5891 x2_2 := x2.Args[2] 5892 if x2_2.Op != OpAMD64SHRQconst { 5893 break 5894 } 5895 if x2_2.AuxInt != 40 { 5896 break 5897 } 5898 if w != x2_2.Args[0] { 5899 break 5900 } 5901 x1 := x2.Args[3] 5902 if x1.Op != OpAMD64MOVBstoreidx1 { 5903 break 5904 } 5905 if x1.AuxInt != i-6 { 5906 break 5907 } 5908 if x1.Aux != s { 5909 break 5910 } 5911 if p != x1.Args[0] { 5912 break 5913 } 5914 if idx != x1.Args[1] { 5915 break 5916 } 5917 x1_2 := x1.Args[2] 5918 if x1_2.Op != OpAMD64SHRQconst { 5919 break 5920 } 5921 if x1_2.AuxInt != 48 { 5922 break 5923 } 5924 if w != x1_2.Args[0] { 5925 break 5926 } 5927 x0 := x1.Args[3] 5928 if x0.Op != OpAMD64MOVBstoreidx1 { 5929 break 5930 } 5931 if x0.AuxInt != i-7 { 5932 break 5933 } 5934 if x0.Aux != s { 5935 break 5936 } 5937 if p != x0.Args[0] { 5938 break 5939 } 5940 if idx != x0.Args[1] { 5941 break 5942 } 5943 x0_2 := x0.Args[2] 5944 if x0_2.Op != OpAMD64SHRQconst { 5945 break 5946 } 5947 if x0_2.AuxInt != 56 { 5948 break 5949 } 5950 if w != x0_2.Args[0] { 5951 break 5952 } 5953 mem := x0.Args[3] 5954 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5955 break 5956 } 5957 v.reset(OpAMD64MOVQstoreidx1) 5958 v.AuxInt = i - 7 5959 v.Aux = s 5960 v.AddArg(p) 5961 v.AddArg(idx) 5962 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5963 v0.AddArg(w) 5964 v.AddArg(v0) 5965 v.AddArg(mem) 5966 return true 5967 } 5968 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 5969 // cond: x.Uses == 1 && clobber(x) 5970 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 5971 for { 5972 i := v.AuxInt 5973 s := v.Aux 5974 p := v.Args[0] 5975 idx := v.Args[1] 5976 v_2 := v.Args[2] 5977 if v_2.Op != OpAMD64SHRQconst { 5978 break 5979 } 5980 if v_2.AuxInt != 8 { 5981 break 5982 } 5983 w := v_2.Args[0] 5984 x := v.Args[3] 5985 if x.Op != OpAMD64MOVBstoreidx1 { 5986 break 5987 } 5988 if x.AuxInt != i-1 { 5989 break 5990 } 5991 if x.Aux != s { 5992 break 5993 } 5994 if p != x.Args[0] { 5995 break 5996 } 5997 if idx != x.Args[1] { 5998 break 5999 } 6000 if w != x.Args[2] { 6001 break 6002 } 6003 mem := x.Args[3] 6004 if !(x.Uses == 1 && clobber(x)) { 6005 break 6006 } 6007 v.reset(OpAMD64MOVWstoreidx1) 6008 v.AuxInt = i - 1 6009 v.Aux = s 6010 v.AddArg(p) 6011 v.AddArg(idx) 6012 v.AddArg(w) 6013 v.AddArg(mem) 6014 return true 6015 } 6016 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 6017 // cond: x.Uses == 1 && clobber(x) 6018 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 6019 for { 6020 i := v.AuxInt 6021 s := v.Aux 6022 p := v.Args[0] 6023 idx := v.Args[1] 6024 v_2 := v.Args[2] 6025 if v_2.Op != OpAMD64SHRQconst { 6026 break 6027 } 6028 j := v_2.AuxInt 6029 w := v_2.Args[0] 6030 x := v.Args[3] 6031 if x.Op != OpAMD64MOVBstoreidx1 { 6032 break 6033 } 6034 if x.AuxInt != i-1 { 6035 break 6036 } 6037 if x.Aux != s { 6038 break 6039 } 6040 if p != x.Args[0] { 6041 break 6042 } 6043 if idx != x.Args[1] { 6044 break 6045 } 6046 w0 := x.Args[2] 6047 if w0.Op != OpAMD64SHRQconst { 6048 break 6049 } 6050 if w0.AuxInt != j-8 { 6051 break 6052 } 6053 if w != w0.Args[0] { 6054 break 6055 } 6056 mem := x.Args[3] 6057 if !(x.Uses == 1 && clobber(x)) { 6058 break 6059 } 6060 v.reset(OpAMD64MOVWstoreidx1) 6061 v.AuxInt = i - 1 6062 v.Aux = s 6063 v.AddArg(p) 6064 v.AddArg(idx) 6065 v.AddArg(w0) 6066 v.AddArg(mem) 6067 return true 6068 } 6069 return false 6070 } 6071 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 6072 b := v.Block 6073 _ = b 6074 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 6075 // cond: x.Uses == 1 && clobber(x) 6076 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6077 for { 6078 x := v.Args[0] 6079 if x.Op != OpAMD64MOVLload { 6080 break 6081 } 6082 off := x.AuxInt 6083 sym := x.Aux 6084 ptr := x.Args[0] 6085 mem := x.Args[1] 6086 if !(x.Uses == 1 && clobber(x)) { 6087 break 6088 } 6089 b = x.Block 6090 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6091 v.reset(OpCopy) 6092 v.AddArg(v0) 6093 v0.AuxInt = off 6094 v0.Aux = sym 6095 v0.AddArg(ptr) 6096 v0.AddArg(mem) 6097 return true 6098 } 6099 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 6100 // cond: x.Uses == 1 && clobber(x) 6101 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6102 for { 6103 x := v.Args[0] 6104 if x.Op != OpAMD64MOVQload { 6105 break 6106 } 6107 off := x.AuxInt 6108 sym := x.Aux 6109 ptr := x.Args[0] 6110 mem := x.Args[1] 6111 if !(x.Uses == 1 && clobber(x)) { 6112 break 6113 } 6114 b = x.Block 6115 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6116 v.reset(OpCopy) 6117 v.AddArg(v0) 6118 v0.AuxInt = off 6119 v0.Aux = sym 6120 v0.AddArg(ptr) 6121 v0.AddArg(mem) 6122 return true 6123 } 6124 // match: (MOVLQSX (ANDLconst [c] x)) 6125 // cond: c & 0x80000000 == 0 6126 // result: (ANDLconst [c & 0x7fffffff] x) 6127 for { 6128 v_0 := v.Args[0] 6129 if v_0.Op != OpAMD64ANDLconst { 6130 break 6131 } 6132 c := v_0.AuxInt 6133 x := v_0.Args[0] 6134 if !(c&0x80000000 == 0) { 6135 break 6136 } 6137 v.reset(OpAMD64ANDLconst) 6138 v.AuxInt = c & 0x7fffffff 6139 v.AddArg(x) 6140 return true 6141 } 6142 // match: (MOVLQSX x:(MOVLQSX _)) 6143 // cond: 6144 // result: x 6145 for { 6146 x := v.Args[0] 6147 if x.Op != OpAMD64MOVLQSX { 6148 break 6149 } 6150 v.reset(OpCopy) 6151 v.Type = x.Type 6152 v.AddArg(x) 6153 return true 6154 } 6155 // match: (MOVLQSX x:(MOVWQSX _)) 6156 // cond: 6157 // result: x 6158 for { 6159 x := v.Args[0] 6160 if x.Op != OpAMD64MOVWQSX { 6161 break 6162 } 6163 v.reset(OpCopy) 6164 v.Type = x.Type 6165 v.AddArg(x) 6166 return true 6167 } 6168 // match: (MOVLQSX x:(MOVBQSX _)) 6169 // cond: 6170 // result: x 6171 for { 6172 x := v.Args[0] 6173 if x.Op != OpAMD64MOVBQSX { 6174 break 6175 } 6176 v.reset(OpCopy) 6177 v.Type = x.Type 6178 v.AddArg(x) 6179 return true 6180 } 6181 return false 6182 } 6183 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 6184 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6185 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6186 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6187 for { 6188 off1 := v.AuxInt 6189 sym1 := v.Aux 6190 v_0 := v.Args[0] 6191 if v_0.Op != OpAMD64LEAQ { 6192 break 6193 } 6194 off2 := v_0.AuxInt 6195 sym2 := v_0.Aux 6196 base := v_0.Args[0] 6197 mem := v.Args[1] 6198 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6199 break 6200 } 6201 v.reset(OpAMD64MOVLQSXload) 6202 v.AuxInt = off1 + off2 6203 v.Aux = mergeSym(sym1, sym2) 6204 v.AddArg(base) 6205 v.AddArg(mem) 6206 return true 6207 } 6208 return false 6209 } 6210 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 6211 b := v.Block 6212 _ = b 6213 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 6214 // cond: x.Uses == 1 && clobber(x) 6215 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6216 for { 6217 x := v.Args[0] 6218 if x.Op != OpAMD64MOVLload { 6219 break 6220 } 6221 off := x.AuxInt 6222 sym := x.Aux 6223 ptr := x.Args[0] 6224 mem := x.Args[1] 6225 if !(x.Uses == 1 && clobber(x)) { 6226 break 6227 } 6228 b = x.Block 6229 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6230 v.reset(OpCopy) 6231 v.AddArg(v0) 6232 v0.AuxInt = off 6233 v0.Aux = sym 6234 v0.AddArg(ptr) 6235 v0.AddArg(mem) 6236 return true 6237 } 6238 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 6239 // cond: x.Uses == 1 && clobber(x) 6240 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6241 for { 6242 x := v.Args[0] 6243 if x.Op != OpAMD64MOVQload { 6244 break 6245 } 6246 off := x.AuxInt 6247 sym := x.Aux 6248 ptr := x.Args[0] 6249 mem := x.Args[1] 6250 if !(x.Uses == 1 && clobber(x)) { 6251 break 6252 } 6253 b = x.Block 6254 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6255 v.reset(OpCopy) 6256 v.AddArg(v0) 6257 v0.AuxInt = off 6258 v0.Aux = sym 6259 v0.AddArg(ptr) 6260 v0.AddArg(mem) 6261 return true 6262 } 6263 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 6264 // cond: x.Uses == 1 && clobber(x) 6265 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 6266 for { 6267 x := v.Args[0] 6268 if x.Op != OpAMD64MOVLloadidx1 { 6269 break 6270 } 6271 off := x.AuxInt 6272 sym := x.Aux 6273 ptr := x.Args[0] 6274 idx := x.Args[1] 6275 mem := x.Args[2] 6276 if !(x.Uses == 1 && clobber(x)) { 6277 break 6278 } 6279 b = x.Block 6280 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 6281 v.reset(OpCopy) 6282 v.AddArg(v0) 6283 v0.AuxInt = off 6284 v0.Aux = sym 6285 v0.AddArg(ptr) 6286 v0.AddArg(idx) 6287 v0.AddArg(mem) 6288 return true 6289 } 6290 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 6291 // cond: x.Uses == 1 && clobber(x) 6292 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 6293 for { 6294 x := v.Args[0] 6295 if x.Op != OpAMD64MOVLloadidx4 { 6296 break 6297 } 6298 off := x.AuxInt 6299 sym := x.Aux 6300 ptr := x.Args[0] 6301 idx := x.Args[1] 6302 mem := x.Args[2] 6303 if !(x.Uses == 1 && clobber(x)) { 6304 break 6305 } 6306 b = x.Block 6307 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 6308 v.reset(OpCopy) 6309 v.AddArg(v0) 6310 v0.AuxInt = off 6311 v0.Aux = sym 6312 v0.AddArg(ptr) 6313 v0.AddArg(idx) 6314 v0.AddArg(mem) 6315 return true 6316 } 6317 // match: (MOVLQZX (ANDLconst [c] x)) 6318 // cond: 6319 // result: (ANDLconst [c] x) 6320 for { 6321 v_0 := v.Args[0] 6322 if v_0.Op != OpAMD64ANDLconst { 6323 break 6324 } 6325 c := v_0.AuxInt 6326 x := v_0.Args[0] 6327 v.reset(OpAMD64ANDLconst) 6328 v.AuxInt = c 6329 v.AddArg(x) 6330 return true 6331 } 6332 // match: (MOVLQZX x:(MOVLQZX _)) 6333 // cond: 6334 // result: x 6335 for { 6336 x := v.Args[0] 6337 if x.Op != OpAMD64MOVLQZX { 6338 break 6339 } 6340 v.reset(OpCopy) 6341 v.Type = x.Type 6342 v.AddArg(x) 6343 return true 6344 } 6345 // match: (MOVLQZX x:(MOVWQZX _)) 6346 // cond: 6347 // result: x 6348 for { 6349 x := v.Args[0] 6350 if x.Op != OpAMD64MOVWQZX { 6351 break 6352 } 6353 v.reset(OpCopy) 6354 v.Type = x.Type 6355 v.AddArg(x) 6356 return true 6357 } 6358 // match: (MOVLQZX x:(MOVBQZX _)) 6359 // cond: 6360 // result: x 6361 for { 6362 x := v.Args[0] 6363 if x.Op != OpAMD64MOVBQZX { 6364 break 6365 } 6366 v.reset(OpCopy) 6367 v.Type = x.Type 6368 v.AddArg(x) 6369 return true 6370 } 6371 return false 6372 } 6373 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 6374 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 6375 // cond: is32Bit(off1+off2) 6376 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 6377 for { 6378 off1 := v.AuxInt 6379 sym := v.Aux 6380 v_0 := v.Args[0] 6381 if v_0.Op != OpAMD64ADDQconst { 6382 break 6383 } 6384 off2 := v_0.AuxInt 6385 ptr := v_0.Args[0] 6386 mem := v.Args[1] 6387 if !(is32Bit(off1 + off2)) { 6388 break 6389 } 6390 v.reset(OpAMD64MOVLatomicload) 6391 v.AuxInt = off1 + off2 6392 v.Aux = sym 6393 v.AddArg(ptr) 6394 v.AddArg(mem) 6395 return true 6396 } 6397 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 6398 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6399 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 6400 for { 6401 off1 := v.AuxInt 6402 sym1 := v.Aux 6403 v_0 := v.Args[0] 6404 if v_0.Op != OpAMD64LEAQ { 6405 break 6406 } 6407 off2 := v_0.AuxInt 6408 sym2 := v_0.Aux 6409 ptr := v_0.Args[0] 6410 mem := v.Args[1] 6411 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6412 break 6413 } 6414 v.reset(OpAMD64MOVLatomicload) 6415 v.AuxInt = off1 + off2 6416 v.Aux = mergeSym(sym1, sym2) 6417 v.AddArg(ptr) 6418 v.AddArg(mem) 6419 return true 6420 } 6421 return false 6422 } 6423 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 6424 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 6425 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6426 // result: x 6427 for { 6428 off := v.AuxInt 6429 sym := v.Aux 6430 ptr := v.Args[0] 6431 v_1 := v.Args[1] 6432 if v_1.Op != OpAMD64MOVLstore { 6433 break 6434 } 6435 off2 := v_1.AuxInt 6436 sym2 := v_1.Aux 6437 ptr2 := v_1.Args[0] 6438 x := v_1.Args[1] 6439 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6440 break 6441 } 6442 v.reset(OpCopy) 6443 v.Type = x.Type 6444 v.AddArg(x) 6445 return true 6446 } 6447 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 6448 // cond: is32Bit(off1+off2) 6449 // result: (MOVLload [off1+off2] {sym} ptr mem) 6450 for { 6451 off1 := v.AuxInt 6452 sym := v.Aux 6453 v_0 := v.Args[0] 6454 if v_0.Op != OpAMD64ADDQconst { 6455 break 6456 } 6457 off2 := v_0.AuxInt 6458 ptr := v_0.Args[0] 6459 mem := v.Args[1] 6460 if !(is32Bit(off1 + off2)) { 6461 break 6462 } 6463 v.reset(OpAMD64MOVLload) 6464 v.AuxInt = off1 + off2 6465 v.Aux = sym 6466 v.AddArg(ptr) 6467 v.AddArg(mem) 6468 return true 6469 } 6470 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6471 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6472 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6473 for { 6474 off1 := v.AuxInt 6475 sym1 := v.Aux 6476 v_0 := v.Args[0] 6477 if v_0.Op != OpAMD64LEAQ { 6478 break 6479 } 6480 off2 := v_0.AuxInt 6481 sym2 := v_0.Aux 6482 base := v_0.Args[0] 6483 mem := v.Args[1] 6484 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6485 break 6486 } 6487 v.reset(OpAMD64MOVLload) 6488 v.AuxInt = off1 + off2 6489 v.Aux = mergeSym(sym1, sym2) 6490 v.AddArg(base) 6491 v.AddArg(mem) 6492 return true 6493 } 6494 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 6495 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6496 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6497 for { 6498 off1 := v.AuxInt 6499 sym1 := v.Aux 6500 v_0 := v.Args[0] 6501 if v_0.Op != OpAMD64LEAQ1 { 6502 break 6503 } 6504 off2 := v_0.AuxInt 6505 sym2 := v_0.Aux 6506 ptr := v_0.Args[0] 6507 idx := v_0.Args[1] 6508 mem := v.Args[1] 6509 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6510 break 6511 } 6512 v.reset(OpAMD64MOVLloadidx1) 6513 v.AuxInt = off1 + off2 6514 v.Aux = mergeSym(sym1, sym2) 6515 v.AddArg(ptr) 6516 v.AddArg(idx) 6517 v.AddArg(mem) 6518 return true 6519 } 6520 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 6521 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6522 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6523 for { 6524 off1 := v.AuxInt 6525 sym1 := v.Aux 6526 v_0 := v.Args[0] 6527 if v_0.Op != OpAMD64LEAQ4 { 6528 break 6529 } 6530 off2 := v_0.AuxInt 6531 sym2 := v_0.Aux 6532 ptr := v_0.Args[0] 6533 idx := v_0.Args[1] 6534 mem := v.Args[1] 6535 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6536 break 6537 } 6538 v.reset(OpAMD64MOVLloadidx4) 6539 v.AuxInt = off1 + off2 6540 v.Aux = mergeSym(sym1, sym2) 6541 v.AddArg(ptr) 6542 v.AddArg(idx) 6543 v.AddArg(mem) 6544 return true 6545 } 6546 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 6547 // cond: ptr.Op != OpSB 6548 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 6549 for { 6550 off := v.AuxInt 6551 sym := v.Aux 6552 v_0 := v.Args[0] 6553 if v_0.Op != OpAMD64ADDQ { 6554 break 6555 } 6556 ptr := v_0.Args[0] 6557 idx := v_0.Args[1] 6558 mem := v.Args[1] 6559 if !(ptr.Op != OpSB) { 6560 break 6561 } 6562 v.reset(OpAMD64MOVLloadidx1) 6563 v.AuxInt = off 6564 v.Aux = sym 6565 v.AddArg(ptr) 6566 v.AddArg(idx) 6567 v.AddArg(mem) 6568 return true 6569 } 6570 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 6571 // cond: canMergeSym(sym1, sym2) 6572 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6573 for { 6574 off1 := v.AuxInt 6575 sym1 := v.Aux 6576 v_0 := v.Args[0] 6577 if v_0.Op != OpAMD64LEAL { 6578 break 6579 } 6580 off2 := v_0.AuxInt 6581 sym2 := v_0.Aux 6582 base := v_0.Args[0] 6583 mem := v.Args[1] 6584 if !(canMergeSym(sym1, sym2)) { 6585 break 6586 } 6587 v.reset(OpAMD64MOVLload) 6588 v.AuxInt = off1 + off2 6589 v.Aux = mergeSym(sym1, sym2) 6590 v.AddArg(base) 6591 v.AddArg(mem) 6592 return true 6593 } 6594 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 6595 // cond: is32Bit(off1+off2) 6596 // result: (MOVLload [off1+off2] {sym} ptr mem) 6597 for { 6598 off1 := v.AuxInt 6599 sym := v.Aux 6600 v_0 := v.Args[0] 6601 if v_0.Op != OpAMD64ADDLconst { 6602 break 6603 } 6604 off2 := v_0.AuxInt 6605 ptr := v_0.Args[0] 6606 mem := v.Args[1] 6607 if !(is32Bit(off1 + off2)) { 6608 break 6609 } 6610 v.reset(OpAMD64MOVLload) 6611 v.AuxInt = off1 + off2 6612 v.Aux = sym 6613 v.AddArg(ptr) 6614 v.AddArg(mem) 6615 return true 6616 } 6617 return false 6618 } 6619 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 6620 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 6621 // cond: 6622 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 6623 for { 6624 c := v.AuxInt 6625 sym := v.Aux 6626 ptr := v.Args[0] 6627 v_1 := v.Args[1] 6628 if v_1.Op != OpAMD64SHLQconst { 6629 break 6630 } 6631 if v_1.AuxInt != 2 { 6632 break 6633 } 6634 idx := v_1.Args[0] 6635 mem := v.Args[2] 6636 v.reset(OpAMD64MOVLloadidx4) 6637 v.AuxInt = c 6638 v.Aux = sym 6639 v.AddArg(ptr) 6640 v.AddArg(idx) 6641 v.AddArg(mem) 6642 return true 6643 } 6644 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 6645 // cond: 6646 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 6647 for { 6648 c := v.AuxInt 6649 sym := v.Aux 6650 v_0 := v.Args[0] 6651 if v_0.Op != OpAMD64SHLQconst { 6652 break 6653 } 6654 if v_0.AuxInt != 2 { 6655 break 6656 } 6657 idx := v_0.Args[0] 6658 ptr := v.Args[1] 6659 mem := v.Args[2] 6660 v.reset(OpAMD64MOVLloadidx4) 6661 v.AuxInt = c 6662 v.Aux = sym 6663 v.AddArg(ptr) 6664 v.AddArg(idx) 6665 v.AddArg(mem) 6666 return true 6667 } 6668 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 6669 // cond: 6670 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6671 for { 6672 c := v.AuxInt 6673 sym := v.Aux 6674 v_0 := v.Args[0] 6675 if v_0.Op != OpAMD64ADDQconst { 6676 break 6677 } 6678 d := v_0.AuxInt 6679 ptr := v_0.Args[0] 6680 idx := v.Args[1] 6681 mem := v.Args[2] 6682 v.reset(OpAMD64MOVLloadidx1) 6683 v.AuxInt = c + d 6684 v.Aux = sym 6685 v.AddArg(ptr) 6686 v.AddArg(idx) 6687 v.AddArg(mem) 6688 return true 6689 } 6690 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 6691 // cond: 6692 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6693 for { 6694 c := v.AuxInt 6695 sym := v.Aux 6696 idx := v.Args[0] 6697 v_1 := v.Args[1] 6698 if v_1.Op != OpAMD64ADDQconst { 6699 break 6700 } 6701 d := v_1.AuxInt 6702 ptr := v_1.Args[0] 6703 mem := v.Args[2] 6704 v.reset(OpAMD64MOVLloadidx1) 6705 v.AuxInt = c + d 6706 v.Aux = sym 6707 v.AddArg(ptr) 6708 v.AddArg(idx) 6709 v.AddArg(mem) 6710 return true 6711 } 6712 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 6713 // cond: 6714 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6715 for { 6716 c := v.AuxInt 6717 sym := v.Aux 6718 ptr := v.Args[0] 6719 v_1 := v.Args[1] 6720 if v_1.Op != OpAMD64ADDQconst { 6721 break 6722 } 6723 d := v_1.AuxInt 6724 idx := v_1.Args[0] 6725 mem := v.Args[2] 6726 v.reset(OpAMD64MOVLloadidx1) 6727 v.AuxInt = c + d 6728 v.Aux = sym 6729 v.AddArg(ptr) 6730 v.AddArg(idx) 6731 v.AddArg(mem) 6732 return true 6733 } 6734 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 6735 // cond: 6736 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6737 for { 6738 c := v.AuxInt 6739 sym := v.Aux 6740 v_0 := v.Args[0] 6741 if v_0.Op != OpAMD64ADDQconst { 6742 break 6743 } 6744 d := v_0.AuxInt 6745 idx := v_0.Args[0] 6746 ptr := v.Args[1] 6747 mem := v.Args[2] 6748 v.reset(OpAMD64MOVLloadidx1) 6749 v.AuxInt = c + d 6750 v.Aux = sym 6751 v.AddArg(ptr) 6752 v.AddArg(idx) 6753 v.AddArg(mem) 6754 return true 6755 } 6756 return false 6757 } 6758 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 6759 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 6760 // cond: 6761 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 6762 for { 6763 c := v.AuxInt 6764 sym := v.Aux 6765 v_0 := v.Args[0] 6766 if v_0.Op != OpAMD64ADDQconst { 6767 break 6768 } 6769 d := v_0.AuxInt 6770 ptr := v_0.Args[0] 6771 idx := v.Args[1] 6772 mem := v.Args[2] 6773 v.reset(OpAMD64MOVLloadidx4) 6774 v.AuxInt = c + d 6775 v.Aux = sym 6776 v.AddArg(ptr) 6777 v.AddArg(idx) 6778 v.AddArg(mem) 6779 return true 6780 } 6781 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 6782 // cond: 6783 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 6784 for { 6785 c := v.AuxInt 6786 sym := v.Aux 6787 ptr := v.Args[0] 6788 v_1 := v.Args[1] 6789 if v_1.Op != OpAMD64ADDQconst { 6790 break 6791 } 6792 d := v_1.AuxInt 6793 idx := v_1.Args[0] 6794 mem := v.Args[2] 6795 v.reset(OpAMD64MOVLloadidx4) 6796 v.AuxInt = c + 4*d 6797 v.Aux = sym 6798 v.AddArg(ptr) 6799 v.AddArg(idx) 6800 v.AddArg(mem) 6801 return true 6802 } 6803 return false 6804 } 6805 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 6806 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 6807 // cond: 6808 // result: (MOVLstore [off] {sym} ptr x mem) 6809 for { 6810 off := v.AuxInt 6811 sym := v.Aux 6812 ptr := v.Args[0] 6813 v_1 := v.Args[1] 6814 if v_1.Op != OpAMD64MOVLQSX { 6815 break 6816 } 6817 x := v_1.Args[0] 6818 mem := v.Args[2] 6819 v.reset(OpAMD64MOVLstore) 6820 v.AuxInt = off 6821 v.Aux = sym 6822 v.AddArg(ptr) 6823 v.AddArg(x) 6824 v.AddArg(mem) 6825 return true 6826 } 6827 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 6828 // cond: 6829 // result: (MOVLstore [off] {sym} ptr x mem) 6830 for { 6831 off := v.AuxInt 6832 sym := v.Aux 6833 ptr := v.Args[0] 6834 v_1 := v.Args[1] 6835 if v_1.Op != OpAMD64MOVLQZX { 6836 break 6837 } 6838 x := v_1.Args[0] 6839 mem := v.Args[2] 6840 v.reset(OpAMD64MOVLstore) 6841 v.AuxInt = off 6842 v.Aux = sym 6843 v.AddArg(ptr) 6844 v.AddArg(x) 6845 v.AddArg(mem) 6846 return true 6847 } 6848 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 6849 // cond: is32Bit(off1+off2) 6850 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 6851 for { 6852 off1 := v.AuxInt 6853 sym := v.Aux 6854 v_0 := v.Args[0] 6855 if v_0.Op != OpAMD64ADDQconst { 6856 break 6857 } 6858 off2 := v_0.AuxInt 6859 ptr := v_0.Args[0] 6860 val := v.Args[1] 6861 mem := v.Args[2] 6862 if !(is32Bit(off1 + off2)) { 6863 break 6864 } 6865 v.reset(OpAMD64MOVLstore) 6866 v.AuxInt = off1 + off2 6867 v.Aux = sym 6868 v.AddArg(ptr) 6869 v.AddArg(val) 6870 v.AddArg(mem) 6871 return true 6872 } 6873 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 6874 // cond: validOff(off) 6875 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 6876 for { 6877 off := v.AuxInt 6878 sym := v.Aux 6879 ptr := v.Args[0] 6880 v_1 := v.Args[1] 6881 if v_1.Op != OpAMD64MOVLconst { 6882 break 6883 } 6884 c := v_1.AuxInt 6885 mem := v.Args[2] 6886 if !(validOff(off)) { 6887 break 6888 } 6889 v.reset(OpAMD64MOVLstoreconst) 6890 v.AuxInt = makeValAndOff(int64(int32(c)), off) 6891 v.Aux = sym 6892 v.AddArg(ptr) 6893 v.AddArg(mem) 6894 return true 6895 } 6896 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6897 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6898 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6899 for { 6900 off1 := v.AuxInt 6901 sym1 := v.Aux 6902 v_0 := v.Args[0] 6903 if v_0.Op != OpAMD64LEAQ { 6904 break 6905 } 6906 off2 := v_0.AuxInt 6907 sym2 := v_0.Aux 6908 base := v_0.Args[0] 6909 val := v.Args[1] 6910 mem := v.Args[2] 6911 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6912 break 6913 } 6914 v.reset(OpAMD64MOVLstore) 6915 v.AuxInt = off1 + off2 6916 v.Aux = mergeSym(sym1, sym2) 6917 v.AddArg(base) 6918 v.AddArg(val) 6919 v.AddArg(mem) 6920 return true 6921 } 6922 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 6923 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6924 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6925 for { 6926 off1 := v.AuxInt 6927 sym1 := v.Aux 6928 v_0 := v.Args[0] 6929 if v_0.Op != OpAMD64LEAQ1 { 6930 break 6931 } 6932 off2 := v_0.AuxInt 6933 sym2 := v_0.Aux 6934 ptr := v_0.Args[0] 6935 idx := v_0.Args[1] 6936 val := v.Args[1] 6937 mem := v.Args[2] 6938 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6939 break 6940 } 6941 v.reset(OpAMD64MOVLstoreidx1) 6942 v.AuxInt = off1 + off2 6943 v.Aux = mergeSym(sym1, sym2) 6944 v.AddArg(ptr) 6945 v.AddArg(idx) 6946 v.AddArg(val) 6947 v.AddArg(mem) 6948 return true 6949 } 6950 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 6951 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6952 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6953 for { 6954 off1 := v.AuxInt 6955 sym1 := v.Aux 6956 v_0 := v.Args[0] 6957 if v_0.Op != OpAMD64LEAQ4 { 6958 break 6959 } 6960 off2 := v_0.AuxInt 6961 sym2 := v_0.Aux 6962 ptr := v_0.Args[0] 6963 idx := v_0.Args[1] 6964 val := v.Args[1] 6965 mem := v.Args[2] 6966 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6967 break 6968 } 6969 v.reset(OpAMD64MOVLstoreidx4) 6970 v.AuxInt = off1 + off2 6971 v.Aux = mergeSym(sym1, sym2) 6972 v.AddArg(ptr) 6973 v.AddArg(idx) 6974 v.AddArg(val) 6975 v.AddArg(mem) 6976 return true 6977 } 6978 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 6979 // cond: ptr.Op != OpSB 6980 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 6981 for { 6982 off := v.AuxInt 6983 sym := v.Aux 6984 v_0 := v.Args[0] 6985 if v_0.Op != OpAMD64ADDQ { 6986 break 6987 } 6988 ptr := v_0.Args[0] 6989 idx := v_0.Args[1] 6990 val := v.Args[1] 6991 mem := v.Args[2] 6992 if !(ptr.Op != OpSB) { 6993 break 6994 } 6995 v.reset(OpAMD64MOVLstoreidx1) 6996 v.AuxInt = off 6997 v.Aux = sym 6998 v.AddArg(ptr) 6999 v.AddArg(idx) 7000 v.AddArg(val) 7001 v.AddArg(mem) 7002 return true 7003 } 7004 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 7005 // cond: x.Uses == 1 && clobber(x) 7006 // result: (MOVQstore [i-4] {s} p w mem) 7007 for { 7008 i := v.AuxInt 7009 s := v.Aux 7010 p := v.Args[0] 7011 v_1 := v.Args[1] 7012 if v_1.Op != OpAMD64SHRQconst { 7013 break 7014 } 7015 if v_1.AuxInt != 32 { 7016 break 7017 } 7018 w := v_1.Args[0] 7019 x := v.Args[2] 7020 if x.Op != OpAMD64MOVLstore { 7021 break 7022 } 7023 if x.AuxInt != i-4 { 7024 break 7025 } 7026 if x.Aux != s { 7027 break 7028 } 7029 if p != x.Args[0] { 7030 break 7031 } 7032 if w != x.Args[1] { 7033 break 7034 } 7035 mem := x.Args[2] 7036 if !(x.Uses == 1 && clobber(x)) { 7037 break 7038 } 7039 v.reset(OpAMD64MOVQstore) 7040 v.AuxInt = i - 4 7041 v.Aux = s 7042 v.AddArg(p) 7043 v.AddArg(w) 7044 v.AddArg(mem) 7045 return true 7046 } 7047 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 7048 // cond: x.Uses == 1 && clobber(x) 7049 // result: (MOVQstore [i-4] {s} p w0 mem) 7050 for { 7051 i := v.AuxInt 7052 s := v.Aux 7053 p := v.Args[0] 7054 v_1 := v.Args[1] 7055 if v_1.Op != OpAMD64SHRQconst { 7056 break 7057 } 7058 j := v_1.AuxInt 7059 w := v_1.Args[0] 7060 x := v.Args[2] 7061 if x.Op != OpAMD64MOVLstore { 7062 break 7063 } 7064 if x.AuxInt != i-4 { 7065 break 7066 } 7067 if x.Aux != s { 7068 break 7069 } 7070 if p != x.Args[0] { 7071 break 7072 } 7073 w0 := x.Args[1] 7074 if w0.Op != OpAMD64SHRQconst { 7075 break 7076 } 7077 if w0.AuxInt != j-32 { 7078 break 7079 } 7080 if w != w0.Args[0] { 7081 break 7082 } 7083 mem := x.Args[2] 7084 if !(x.Uses == 1 && clobber(x)) { 7085 break 7086 } 7087 v.reset(OpAMD64MOVQstore) 7088 v.AuxInt = i - 4 7089 v.Aux = s 7090 v.AddArg(p) 7091 v.AddArg(w0) 7092 v.AddArg(mem) 7093 return true 7094 } 7095 return false 7096 } 7097 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 7098 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 7099 // cond: canMergeSym(sym1, sym2) 7100 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7101 for { 7102 off1 := v.AuxInt 7103 sym1 := v.Aux 7104 v_0 := v.Args[0] 7105 if v_0.Op != OpAMD64LEAL { 7106 break 7107 } 7108 off2 := v_0.AuxInt 7109 sym2 := v_0.Aux 7110 base := v_0.Args[0] 7111 val := v.Args[1] 7112 mem := v.Args[2] 7113 if !(canMergeSym(sym1, sym2)) { 7114 break 7115 } 7116 v.reset(OpAMD64MOVLstore) 7117 v.AuxInt = off1 + off2 7118 v.Aux = mergeSym(sym1, sym2) 7119 v.AddArg(base) 7120 v.AddArg(val) 7121 v.AddArg(mem) 7122 return true 7123 } 7124 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 7125 // cond: is32Bit(off1+off2) 7126 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 7127 for { 7128 off1 := v.AuxInt 7129 sym := v.Aux 7130 v_0 := v.Args[0] 7131 if v_0.Op != OpAMD64ADDLconst { 7132 break 7133 } 7134 off2 := v_0.AuxInt 7135 ptr := v_0.Args[0] 7136 val := v.Args[1] 7137 mem := v.Args[2] 7138 if !(is32Bit(off1 + off2)) { 7139 break 7140 } 7141 v.reset(OpAMD64MOVLstore) 7142 v.AuxInt = off1 + off2 7143 v.Aux = sym 7144 v.AddArg(ptr) 7145 v.AddArg(val) 7146 v.AddArg(mem) 7147 return true 7148 } 7149 return false 7150 } 7151 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 7152 b := v.Block 7153 _ = b 7154 types := &b.Func.Config.Types 7155 _ = types 7156 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 7157 // cond: ValAndOff(sc).canAdd(off) 7158 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7159 for { 7160 sc := v.AuxInt 7161 s := v.Aux 7162 v_0 := v.Args[0] 7163 if v_0.Op != OpAMD64ADDQconst { 7164 break 7165 } 7166 off := v_0.AuxInt 7167 ptr := v_0.Args[0] 7168 mem := v.Args[1] 7169 if !(ValAndOff(sc).canAdd(off)) { 7170 break 7171 } 7172 v.reset(OpAMD64MOVLstoreconst) 7173 v.AuxInt = ValAndOff(sc).add(off) 7174 v.Aux = s 7175 v.AddArg(ptr) 7176 v.AddArg(mem) 7177 return true 7178 } 7179 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 7180 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7181 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7182 for { 7183 sc := v.AuxInt 7184 sym1 := v.Aux 7185 v_0 := v.Args[0] 7186 if v_0.Op != OpAMD64LEAQ { 7187 break 7188 } 7189 off := v_0.AuxInt 7190 sym2 := v_0.Aux 7191 ptr := v_0.Args[0] 7192 mem := v.Args[1] 7193 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7194 break 7195 } 7196 v.reset(OpAMD64MOVLstoreconst) 7197 v.AuxInt = ValAndOff(sc).add(off) 7198 v.Aux = mergeSym(sym1, sym2) 7199 v.AddArg(ptr) 7200 v.AddArg(mem) 7201 return true 7202 } 7203 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 7204 // cond: canMergeSym(sym1, sym2) 7205 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7206 for { 7207 x := v.AuxInt 7208 sym1 := v.Aux 7209 v_0 := v.Args[0] 7210 if v_0.Op != OpAMD64LEAQ1 { 7211 break 7212 } 7213 off := v_0.AuxInt 7214 sym2 := v_0.Aux 7215 ptr := v_0.Args[0] 7216 idx := v_0.Args[1] 7217 mem := v.Args[1] 7218 if !(canMergeSym(sym1, sym2)) { 7219 break 7220 } 7221 v.reset(OpAMD64MOVLstoreconstidx1) 7222 v.AuxInt = ValAndOff(x).add(off) 7223 v.Aux = mergeSym(sym1, sym2) 7224 v.AddArg(ptr) 7225 v.AddArg(idx) 7226 v.AddArg(mem) 7227 return true 7228 } 7229 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 7230 // cond: canMergeSym(sym1, sym2) 7231 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7232 for { 7233 x := v.AuxInt 7234 sym1 := v.Aux 7235 v_0 := v.Args[0] 7236 if v_0.Op != OpAMD64LEAQ4 { 7237 break 7238 } 7239 off := v_0.AuxInt 7240 sym2 := v_0.Aux 7241 ptr := v_0.Args[0] 7242 idx := v_0.Args[1] 7243 mem := v.Args[1] 7244 if !(canMergeSym(sym1, sym2)) { 7245 break 7246 } 7247 v.reset(OpAMD64MOVLstoreconstidx4) 7248 v.AuxInt = ValAndOff(x).add(off) 7249 v.Aux = mergeSym(sym1, sym2) 7250 v.AddArg(ptr) 7251 v.AddArg(idx) 7252 v.AddArg(mem) 7253 return true 7254 } 7255 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 7256 // cond: 7257 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 7258 for { 7259 x := v.AuxInt 7260 sym := v.Aux 7261 v_0 := v.Args[0] 7262 if v_0.Op != OpAMD64ADDQ { 7263 break 7264 } 7265 ptr := v_0.Args[0] 7266 idx := v_0.Args[1] 7267 mem := v.Args[1] 7268 v.reset(OpAMD64MOVLstoreconstidx1) 7269 v.AuxInt = x 7270 v.Aux = sym 7271 v.AddArg(ptr) 7272 v.AddArg(idx) 7273 v.AddArg(mem) 7274 return true 7275 } 7276 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 7277 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7278 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7279 for { 7280 c := v.AuxInt 7281 s := v.Aux 7282 p := v.Args[0] 7283 x := v.Args[1] 7284 if x.Op != OpAMD64MOVLstoreconst { 7285 break 7286 } 7287 a := x.AuxInt 7288 if x.Aux != s { 7289 break 7290 } 7291 if p != x.Args[0] { 7292 break 7293 } 7294 mem := x.Args[1] 7295 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7296 break 7297 } 7298 v.reset(OpAMD64MOVQstore) 7299 v.AuxInt = ValAndOff(a).Off() 7300 v.Aux = s 7301 v.AddArg(p) 7302 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 7303 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7304 v.AddArg(v0) 7305 v.AddArg(mem) 7306 return true 7307 } 7308 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 7309 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7310 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7311 for { 7312 sc := v.AuxInt 7313 sym1 := v.Aux 7314 v_0 := v.Args[0] 7315 if v_0.Op != OpAMD64LEAL { 7316 break 7317 } 7318 off := v_0.AuxInt 7319 sym2 := v_0.Aux 7320 ptr := v_0.Args[0] 7321 mem := v.Args[1] 7322 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7323 break 7324 } 7325 v.reset(OpAMD64MOVLstoreconst) 7326 v.AuxInt = ValAndOff(sc).add(off) 7327 v.Aux = mergeSym(sym1, sym2) 7328 v.AddArg(ptr) 7329 v.AddArg(mem) 7330 return true 7331 } 7332 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 7333 // cond: ValAndOff(sc).canAdd(off) 7334 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7335 for { 7336 sc := v.AuxInt 7337 s := v.Aux 7338 v_0 := v.Args[0] 7339 if v_0.Op != OpAMD64ADDLconst { 7340 break 7341 } 7342 off := v_0.AuxInt 7343 ptr := v_0.Args[0] 7344 mem := v.Args[1] 7345 if !(ValAndOff(sc).canAdd(off)) { 7346 break 7347 } 7348 v.reset(OpAMD64MOVLstoreconst) 7349 v.AuxInt = ValAndOff(sc).add(off) 7350 v.Aux = s 7351 v.AddArg(ptr) 7352 v.AddArg(mem) 7353 return true 7354 } 7355 return false 7356 } 7357 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 7358 b := v.Block 7359 _ = b 7360 types := &b.Func.Config.Types 7361 _ = types 7362 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7363 // cond: 7364 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 7365 for { 7366 c := v.AuxInt 7367 sym := v.Aux 7368 ptr := v.Args[0] 7369 v_1 := v.Args[1] 7370 if v_1.Op != OpAMD64SHLQconst { 7371 break 7372 } 7373 if v_1.AuxInt != 2 { 7374 break 7375 } 7376 idx := v_1.Args[0] 7377 mem := v.Args[2] 7378 v.reset(OpAMD64MOVLstoreconstidx4) 7379 v.AuxInt = c 7380 v.Aux = sym 7381 v.AddArg(ptr) 7382 v.AddArg(idx) 7383 v.AddArg(mem) 7384 return true 7385 } 7386 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 7387 // cond: 7388 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7389 for { 7390 x := v.AuxInt 7391 sym := v.Aux 7392 v_0 := v.Args[0] 7393 if v_0.Op != OpAMD64ADDQconst { 7394 break 7395 } 7396 c := v_0.AuxInt 7397 ptr := v_0.Args[0] 7398 idx := v.Args[1] 7399 mem := v.Args[2] 7400 v.reset(OpAMD64MOVLstoreconstidx1) 7401 v.AuxInt = ValAndOff(x).add(c) 7402 v.Aux = sym 7403 v.AddArg(ptr) 7404 v.AddArg(idx) 7405 v.AddArg(mem) 7406 return true 7407 } 7408 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 7409 // cond: 7410 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7411 for { 7412 x := v.AuxInt 7413 sym := v.Aux 7414 ptr := v.Args[0] 7415 v_1 := v.Args[1] 7416 if v_1.Op != OpAMD64ADDQconst { 7417 break 7418 } 7419 c := v_1.AuxInt 7420 idx := v_1.Args[0] 7421 mem := v.Args[2] 7422 v.reset(OpAMD64MOVLstoreconstidx1) 7423 v.AuxInt = ValAndOff(x).add(c) 7424 v.Aux = sym 7425 v.AddArg(ptr) 7426 v.AddArg(idx) 7427 v.AddArg(mem) 7428 return true 7429 } 7430 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 7431 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7432 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7433 for { 7434 c := v.AuxInt 7435 s := v.Aux 7436 p := v.Args[0] 7437 i := v.Args[1] 7438 x := v.Args[2] 7439 if x.Op != OpAMD64MOVLstoreconstidx1 { 7440 break 7441 } 7442 a := x.AuxInt 7443 if x.Aux != s { 7444 break 7445 } 7446 if p != x.Args[0] { 7447 break 7448 } 7449 if i != x.Args[1] { 7450 break 7451 } 7452 mem := x.Args[2] 7453 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7454 break 7455 } 7456 v.reset(OpAMD64MOVQstoreidx1) 7457 v.AuxInt = ValAndOff(a).Off() 7458 v.Aux = s 7459 v.AddArg(p) 7460 v.AddArg(i) 7461 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 7462 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7463 v.AddArg(v0) 7464 v.AddArg(mem) 7465 return true 7466 } 7467 return false 7468 } 7469 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 7470 b := v.Block 7471 _ = b 7472 types := &b.Func.Config.Types 7473 _ = types 7474 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 7475 // cond: 7476 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7477 for { 7478 x := v.AuxInt 7479 sym := v.Aux 7480 v_0 := v.Args[0] 7481 if v_0.Op != OpAMD64ADDQconst { 7482 break 7483 } 7484 c := v_0.AuxInt 7485 ptr := v_0.Args[0] 7486 idx := v.Args[1] 7487 mem := v.Args[2] 7488 v.reset(OpAMD64MOVLstoreconstidx4) 7489 v.AuxInt = ValAndOff(x).add(c) 7490 v.Aux = sym 7491 v.AddArg(ptr) 7492 v.AddArg(idx) 7493 v.AddArg(mem) 7494 return true 7495 } 7496 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 7497 // cond: 7498 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 7499 for { 7500 x := v.AuxInt 7501 sym := v.Aux 7502 ptr := v.Args[0] 7503 v_1 := v.Args[1] 7504 if v_1.Op != OpAMD64ADDQconst { 7505 break 7506 } 7507 c := v_1.AuxInt 7508 idx := v_1.Args[0] 7509 mem := v.Args[2] 7510 v.reset(OpAMD64MOVLstoreconstidx4) 7511 v.AuxInt = ValAndOff(x).add(4 * c) 7512 v.Aux = sym 7513 v.AddArg(ptr) 7514 v.AddArg(idx) 7515 v.AddArg(mem) 7516 return true 7517 } 7518 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 7519 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7520 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7521 for { 7522 c := v.AuxInt 7523 s := v.Aux 7524 p := v.Args[0] 7525 i := v.Args[1] 7526 x := v.Args[2] 7527 if x.Op != OpAMD64MOVLstoreconstidx4 { 7528 break 7529 } 7530 a := x.AuxInt 7531 if x.Aux != s { 7532 break 7533 } 7534 if p != x.Args[0] { 7535 break 7536 } 7537 if i != x.Args[1] { 7538 break 7539 } 7540 mem := x.Args[2] 7541 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7542 break 7543 } 7544 v.reset(OpAMD64MOVQstoreidx1) 7545 v.AuxInt = ValAndOff(a).Off() 7546 v.Aux = s 7547 v.AddArg(p) 7548 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 7549 v0.AuxInt = 2 7550 v0.AddArg(i) 7551 v.AddArg(v0) 7552 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 7553 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7554 v.AddArg(v1) 7555 v.AddArg(mem) 7556 return true 7557 } 7558 return false 7559 } 7560 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 7561 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 7562 // cond: 7563 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 7564 for { 7565 c := v.AuxInt 7566 sym := v.Aux 7567 ptr := v.Args[0] 7568 v_1 := v.Args[1] 7569 if v_1.Op != OpAMD64SHLQconst { 7570 break 7571 } 7572 if v_1.AuxInt != 2 { 7573 break 7574 } 7575 idx := v_1.Args[0] 7576 val := v.Args[2] 7577 mem := v.Args[3] 7578 v.reset(OpAMD64MOVLstoreidx4) 7579 v.AuxInt = c 7580 v.Aux = sym 7581 v.AddArg(ptr) 7582 v.AddArg(idx) 7583 v.AddArg(val) 7584 v.AddArg(mem) 7585 return true 7586 } 7587 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7588 // cond: 7589 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7590 for { 7591 c := v.AuxInt 7592 sym := v.Aux 7593 v_0 := v.Args[0] 7594 if v_0.Op != OpAMD64ADDQconst { 7595 break 7596 } 7597 d := v_0.AuxInt 7598 ptr := v_0.Args[0] 7599 idx := v.Args[1] 7600 val := v.Args[2] 7601 mem := v.Args[3] 7602 v.reset(OpAMD64MOVLstoreidx1) 7603 v.AuxInt = c + d 7604 v.Aux = sym 7605 v.AddArg(ptr) 7606 v.AddArg(idx) 7607 v.AddArg(val) 7608 v.AddArg(mem) 7609 return true 7610 } 7611 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7612 // cond: 7613 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7614 for { 7615 c := v.AuxInt 7616 sym := v.Aux 7617 ptr := v.Args[0] 7618 v_1 := v.Args[1] 7619 if v_1.Op != OpAMD64ADDQconst { 7620 break 7621 } 7622 d := v_1.AuxInt 7623 idx := v_1.Args[0] 7624 val := v.Args[2] 7625 mem := v.Args[3] 7626 v.reset(OpAMD64MOVLstoreidx1) 7627 v.AuxInt = c + d 7628 v.Aux = sym 7629 v.AddArg(ptr) 7630 v.AddArg(idx) 7631 v.AddArg(val) 7632 v.AddArg(mem) 7633 return true 7634 } 7635 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 7636 // cond: x.Uses == 1 && clobber(x) 7637 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 7638 for { 7639 i := v.AuxInt 7640 s := v.Aux 7641 p := v.Args[0] 7642 idx := v.Args[1] 7643 v_2 := v.Args[2] 7644 if v_2.Op != OpAMD64SHRQconst { 7645 break 7646 } 7647 if v_2.AuxInt != 32 { 7648 break 7649 } 7650 w := v_2.Args[0] 7651 x := v.Args[3] 7652 if x.Op != OpAMD64MOVLstoreidx1 { 7653 break 7654 } 7655 if x.AuxInt != i-4 { 7656 break 7657 } 7658 if x.Aux != s { 7659 break 7660 } 7661 if p != x.Args[0] { 7662 break 7663 } 7664 if idx != x.Args[1] { 7665 break 7666 } 7667 if w != x.Args[2] { 7668 break 7669 } 7670 mem := x.Args[3] 7671 if !(x.Uses == 1 && clobber(x)) { 7672 break 7673 } 7674 v.reset(OpAMD64MOVQstoreidx1) 7675 v.AuxInt = i - 4 7676 v.Aux = s 7677 v.AddArg(p) 7678 v.AddArg(idx) 7679 v.AddArg(w) 7680 v.AddArg(mem) 7681 return true 7682 } 7683 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 7684 // cond: x.Uses == 1 && clobber(x) 7685 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 7686 for { 7687 i := v.AuxInt 7688 s := v.Aux 7689 p := v.Args[0] 7690 idx := v.Args[1] 7691 v_2 := v.Args[2] 7692 if v_2.Op != OpAMD64SHRQconst { 7693 break 7694 } 7695 j := v_2.AuxInt 7696 w := v_2.Args[0] 7697 x := v.Args[3] 7698 if x.Op != OpAMD64MOVLstoreidx1 { 7699 break 7700 } 7701 if x.AuxInt != i-4 { 7702 break 7703 } 7704 if x.Aux != s { 7705 break 7706 } 7707 if p != x.Args[0] { 7708 break 7709 } 7710 if idx != x.Args[1] { 7711 break 7712 } 7713 w0 := x.Args[2] 7714 if w0.Op != OpAMD64SHRQconst { 7715 break 7716 } 7717 if w0.AuxInt != j-32 { 7718 break 7719 } 7720 if w != w0.Args[0] { 7721 break 7722 } 7723 mem := x.Args[3] 7724 if !(x.Uses == 1 && clobber(x)) { 7725 break 7726 } 7727 v.reset(OpAMD64MOVQstoreidx1) 7728 v.AuxInt = i - 4 7729 v.Aux = s 7730 v.AddArg(p) 7731 v.AddArg(idx) 7732 v.AddArg(w0) 7733 v.AddArg(mem) 7734 return true 7735 } 7736 return false 7737 } 7738 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 7739 b := v.Block 7740 _ = b 7741 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7742 // cond: 7743 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 7744 for { 7745 c := v.AuxInt 7746 sym := v.Aux 7747 v_0 := v.Args[0] 7748 if v_0.Op != OpAMD64ADDQconst { 7749 break 7750 } 7751 d := v_0.AuxInt 7752 ptr := v_0.Args[0] 7753 idx := v.Args[1] 7754 val := v.Args[2] 7755 mem := v.Args[3] 7756 v.reset(OpAMD64MOVLstoreidx4) 7757 v.AuxInt = c + d 7758 v.Aux = sym 7759 v.AddArg(ptr) 7760 v.AddArg(idx) 7761 v.AddArg(val) 7762 v.AddArg(mem) 7763 return true 7764 } 7765 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7766 // cond: 7767 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 7768 for { 7769 c := v.AuxInt 7770 sym := v.Aux 7771 ptr := v.Args[0] 7772 v_1 := v.Args[1] 7773 if v_1.Op != OpAMD64ADDQconst { 7774 break 7775 } 7776 d := v_1.AuxInt 7777 idx := v_1.Args[0] 7778 val := v.Args[2] 7779 mem := v.Args[3] 7780 v.reset(OpAMD64MOVLstoreidx4) 7781 v.AuxInt = c + 4*d 7782 v.Aux = sym 7783 v.AddArg(ptr) 7784 v.AddArg(idx) 7785 v.AddArg(val) 7786 v.AddArg(mem) 7787 return true 7788 } 7789 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 7790 // cond: x.Uses == 1 && clobber(x) 7791 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 7792 for { 7793 i := v.AuxInt 7794 s := v.Aux 7795 p := v.Args[0] 7796 idx := v.Args[1] 7797 v_2 := v.Args[2] 7798 if v_2.Op != OpAMD64SHRQconst { 7799 break 7800 } 7801 if v_2.AuxInt != 32 { 7802 break 7803 } 7804 w := v_2.Args[0] 7805 x := v.Args[3] 7806 if x.Op != OpAMD64MOVLstoreidx4 { 7807 break 7808 } 7809 if x.AuxInt != i-4 { 7810 break 7811 } 7812 if x.Aux != s { 7813 break 7814 } 7815 if p != x.Args[0] { 7816 break 7817 } 7818 if idx != x.Args[1] { 7819 break 7820 } 7821 if w != x.Args[2] { 7822 break 7823 } 7824 mem := x.Args[3] 7825 if !(x.Uses == 1 && clobber(x)) { 7826 break 7827 } 7828 v.reset(OpAMD64MOVQstoreidx1) 7829 v.AuxInt = i - 4 7830 v.Aux = s 7831 v.AddArg(p) 7832 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 7833 v0.AuxInt = 2 7834 v0.AddArg(idx) 7835 v.AddArg(v0) 7836 v.AddArg(w) 7837 v.AddArg(mem) 7838 return true 7839 } 7840 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 7841 // cond: x.Uses == 1 && clobber(x) 7842 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 7843 for { 7844 i := v.AuxInt 7845 s := v.Aux 7846 p := v.Args[0] 7847 idx := v.Args[1] 7848 v_2 := v.Args[2] 7849 if v_2.Op != OpAMD64SHRQconst { 7850 break 7851 } 7852 j := v_2.AuxInt 7853 w := v_2.Args[0] 7854 x := v.Args[3] 7855 if x.Op != OpAMD64MOVLstoreidx4 { 7856 break 7857 } 7858 if x.AuxInt != i-4 { 7859 break 7860 } 7861 if x.Aux != s { 7862 break 7863 } 7864 if p != x.Args[0] { 7865 break 7866 } 7867 if idx != x.Args[1] { 7868 break 7869 } 7870 w0 := x.Args[2] 7871 if w0.Op != OpAMD64SHRQconst { 7872 break 7873 } 7874 if w0.AuxInt != j-32 { 7875 break 7876 } 7877 if w != w0.Args[0] { 7878 break 7879 } 7880 mem := x.Args[3] 7881 if !(x.Uses == 1 && clobber(x)) { 7882 break 7883 } 7884 v.reset(OpAMD64MOVQstoreidx1) 7885 v.AuxInt = i - 4 7886 v.Aux = s 7887 v.AddArg(p) 7888 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 7889 v0.AuxInt = 2 7890 v0.AddArg(idx) 7891 v.AddArg(v0) 7892 v.AddArg(w0) 7893 v.AddArg(mem) 7894 return true 7895 } 7896 return false 7897 } 7898 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 7899 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 7900 // cond: is32Bit(off1+off2) 7901 // result: (MOVOload [off1+off2] {sym} ptr mem) 7902 for { 7903 off1 := v.AuxInt 7904 sym := v.Aux 7905 v_0 := v.Args[0] 7906 if v_0.Op != OpAMD64ADDQconst { 7907 break 7908 } 7909 off2 := v_0.AuxInt 7910 ptr := v_0.Args[0] 7911 mem := v.Args[1] 7912 if !(is32Bit(off1 + off2)) { 7913 break 7914 } 7915 v.reset(OpAMD64MOVOload) 7916 v.AuxInt = off1 + off2 7917 v.Aux = sym 7918 v.AddArg(ptr) 7919 v.AddArg(mem) 7920 return true 7921 } 7922 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7923 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7924 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7925 for { 7926 off1 := v.AuxInt 7927 sym1 := v.Aux 7928 v_0 := v.Args[0] 7929 if v_0.Op != OpAMD64LEAQ { 7930 break 7931 } 7932 off2 := v_0.AuxInt 7933 sym2 := v_0.Aux 7934 base := v_0.Args[0] 7935 mem := v.Args[1] 7936 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7937 break 7938 } 7939 v.reset(OpAMD64MOVOload) 7940 v.AuxInt = off1 + off2 7941 v.Aux = mergeSym(sym1, sym2) 7942 v.AddArg(base) 7943 v.AddArg(mem) 7944 return true 7945 } 7946 return false 7947 } 7948 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 7949 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7950 // cond: is32Bit(off1+off2) 7951 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 7952 for { 7953 off1 := v.AuxInt 7954 sym := v.Aux 7955 v_0 := v.Args[0] 7956 if v_0.Op != OpAMD64ADDQconst { 7957 break 7958 } 7959 off2 := v_0.AuxInt 7960 ptr := v_0.Args[0] 7961 val := v.Args[1] 7962 mem := v.Args[2] 7963 if !(is32Bit(off1 + off2)) { 7964 break 7965 } 7966 v.reset(OpAMD64MOVOstore) 7967 v.AuxInt = off1 + off2 7968 v.Aux = sym 7969 v.AddArg(ptr) 7970 v.AddArg(val) 7971 v.AddArg(mem) 7972 return true 7973 } 7974 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7975 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7976 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7977 for { 7978 off1 := v.AuxInt 7979 sym1 := v.Aux 7980 v_0 := v.Args[0] 7981 if v_0.Op != OpAMD64LEAQ { 7982 break 7983 } 7984 off2 := v_0.AuxInt 7985 sym2 := v_0.Aux 7986 base := v_0.Args[0] 7987 val := v.Args[1] 7988 mem := v.Args[2] 7989 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7990 break 7991 } 7992 v.reset(OpAMD64MOVOstore) 7993 v.AuxInt = off1 + off2 7994 v.Aux = mergeSym(sym1, sym2) 7995 v.AddArg(base) 7996 v.AddArg(val) 7997 v.AddArg(mem) 7998 return true 7999 } 8000 return false 8001 } 8002 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 8003 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 8004 // cond: is32Bit(off1+off2) 8005 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 8006 for { 8007 off1 := v.AuxInt 8008 sym := v.Aux 8009 v_0 := v.Args[0] 8010 if v_0.Op != OpAMD64ADDQconst { 8011 break 8012 } 8013 off2 := v_0.AuxInt 8014 ptr := v_0.Args[0] 8015 mem := v.Args[1] 8016 if !(is32Bit(off1 + off2)) { 8017 break 8018 } 8019 v.reset(OpAMD64MOVQatomicload) 8020 v.AuxInt = off1 + off2 8021 v.Aux = sym 8022 v.AddArg(ptr) 8023 v.AddArg(mem) 8024 return true 8025 } 8026 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 8027 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8028 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 8029 for { 8030 off1 := v.AuxInt 8031 sym1 := v.Aux 8032 v_0 := v.Args[0] 8033 if v_0.Op != OpAMD64LEAQ { 8034 break 8035 } 8036 off2 := v_0.AuxInt 8037 sym2 := v_0.Aux 8038 ptr := v_0.Args[0] 8039 mem := v.Args[1] 8040 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8041 break 8042 } 8043 v.reset(OpAMD64MOVQatomicload) 8044 v.AuxInt = off1 + off2 8045 v.Aux = mergeSym(sym1, sym2) 8046 v.AddArg(ptr) 8047 v.AddArg(mem) 8048 return true 8049 } 8050 return false 8051 } 8052 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 8053 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 8054 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 8055 // result: x 8056 for { 8057 off := v.AuxInt 8058 sym := v.Aux 8059 ptr := v.Args[0] 8060 v_1 := v.Args[1] 8061 if v_1.Op != OpAMD64MOVQstore { 8062 break 8063 } 8064 off2 := v_1.AuxInt 8065 sym2 := v_1.Aux 8066 ptr2 := v_1.Args[0] 8067 x := v_1.Args[1] 8068 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 8069 break 8070 } 8071 v.reset(OpCopy) 8072 v.Type = x.Type 8073 v.AddArg(x) 8074 return true 8075 } 8076 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 8077 // cond: is32Bit(off1+off2) 8078 // result: (MOVQload [off1+off2] {sym} ptr mem) 8079 for { 8080 off1 := v.AuxInt 8081 sym := v.Aux 8082 v_0 := v.Args[0] 8083 if v_0.Op != OpAMD64ADDQconst { 8084 break 8085 } 8086 off2 := v_0.AuxInt 8087 ptr := v_0.Args[0] 8088 mem := v.Args[1] 8089 if !(is32Bit(off1 + off2)) { 8090 break 8091 } 8092 v.reset(OpAMD64MOVQload) 8093 v.AuxInt = off1 + off2 8094 v.Aux = sym 8095 v.AddArg(ptr) 8096 v.AddArg(mem) 8097 return true 8098 } 8099 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8100 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8101 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8102 for { 8103 off1 := v.AuxInt 8104 sym1 := v.Aux 8105 v_0 := v.Args[0] 8106 if v_0.Op != OpAMD64LEAQ { 8107 break 8108 } 8109 off2 := v_0.AuxInt 8110 sym2 := v_0.Aux 8111 base := v_0.Args[0] 8112 mem := v.Args[1] 8113 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8114 break 8115 } 8116 v.reset(OpAMD64MOVQload) 8117 v.AuxInt = off1 + off2 8118 v.Aux = mergeSym(sym1, sym2) 8119 v.AddArg(base) 8120 v.AddArg(mem) 8121 return true 8122 } 8123 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8124 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8125 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8126 for { 8127 off1 := v.AuxInt 8128 sym1 := v.Aux 8129 v_0 := v.Args[0] 8130 if v_0.Op != OpAMD64LEAQ1 { 8131 break 8132 } 8133 off2 := v_0.AuxInt 8134 sym2 := v_0.Aux 8135 ptr := v_0.Args[0] 8136 idx := v_0.Args[1] 8137 mem := v.Args[1] 8138 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8139 break 8140 } 8141 v.reset(OpAMD64MOVQloadidx1) 8142 v.AuxInt = off1 + off2 8143 v.Aux = mergeSym(sym1, sym2) 8144 v.AddArg(ptr) 8145 v.AddArg(idx) 8146 v.AddArg(mem) 8147 return true 8148 } 8149 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 8150 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8151 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8152 for { 8153 off1 := v.AuxInt 8154 sym1 := v.Aux 8155 v_0 := v.Args[0] 8156 if v_0.Op != OpAMD64LEAQ8 { 8157 break 8158 } 8159 off2 := v_0.AuxInt 8160 sym2 := v_0.Aux 8161 ptr := v_0.Args[0] 8162 idx := v_0.Args[1] 8163 mem := v.Args[1] 8164 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8165 break 8166 } 8167 v.reset(OpAMD64MOVQloadidx8) 8168 v.AuxInt = off1 + off2 8169 v.Aux = mergeSym(sym1, sym2) 8170 v.AddArg(ptr) 8171 v.AddArg(idx) 8172 v.AddArg(mem) 8173 return true 8174 } 8175 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 8176 // cond: ptr.Op != OpSB 8177 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 8178 for { 8179 off := v.AuxInt 8180 sym := v.Aux 8181 v_0 := v.Args[0] 8182 if v_0.Op != OpAMD64ADDQ { 8183 break 8184 } 8185 ptr := v_0.Args[0] 8186 idx := v_0.Args[1] 8187 mem := v.Args[1] 8188 if !(ptr.Op != OpSB) { 8189 break 8190 } 8191 v.reset(OpAMD64MOVQloadidx1) 8192 v.AuxInt = off 8193 v.Aux = sym 8194 v.AddArg(ptr) 8195 v.AddArg(idx) 8196 v.AddArg(mem) 8197 return true 8198 } 8199 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 8200 // cond: canMergeSym(sym1, sym2) 8201 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8202 for { 8203 off1 := v.AuxInt 8204 sym1 := v.Aux 8205 v_0 := v.Args[0] 8206 if v_0.Op != OpAMD64LEAL { 8207 break 8208 } 8209 off2 := v_0.AuxInt 8210 sym2 := v_0.Aux 8211 base := v_0.Args[0] 8212 mem := v.Args[1] 8213 if !(canMergeSym(sym1, sym2)) { 8214 break 8215 } 8216 v.reset(OpAMD64MOVQload) 8217 v.AuxInt = off1 + off2 8218 v.Aux = mergeSym(sym1, sym2) 8219 v.AddArg(base) 8220 v.AddArg(mem) 8221 return true 8222 } 8223 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 8224 // cond: is32Bit(off1+off2) 8225 // result: (MOVQload [off1+off2] {sym} ptr mem) 8226 for { 8227 off1 := v.AuxInt 8228 sym := v.Aux 8229 v_0 := v.Args[0] 8230 if v_0.Op != OpAMD64ADDLconst { 8231 break 8232 } 8233 off2 := v_0.AuxInt 8234 ptr := v_0.Args[0] 8235 mem := v.Args[1] 8236 if !(is32Bit(off1 + off2)) { 8237 break 8238 } 8239 v.reset(OpAMD64MOVQload) 8240 v.AuxInt = off1 + off2 8241 v.Aux = sym 8242 v.AddArg(ptr) 8243 v.AddArg(mem) 8244 return true 8245 } 8246 return false 8247 } 8248 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 8249 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 8250 // cond: 8251 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 8252 for { 8253 c := v.AuxInt 8254 sym := v.Aux 8255 ptr := v.Args[0] 8256 v_1 := v.Args[1] 8257 if v_1.Op != OpAMD64SHLQconst { 8258 break 8259 } 8260 if v_1.AuxInt != 3 { 8261 break 8262 } 8263 idx := v_1.Args[0] 8264 mem := v.Args[2] 8265 v.reset(OpAMD64MOVQloadidx8) 8266 v.AuxInt = c 8267 v.Aux = sym 8268 v.AddArg(ptr) 8269 v.AddArg(idx) 8270 v.AddArg(mem) 8271 return true 8272 } 8273 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 8274 // cond: 8275 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 8276 for { 8277 c := v.AuxInt 8278 sym := v.Aux 8279 v_0 := v.Args[0] 8280 if v_0.Op != OpAMD64SHLQconst { 8281 break 8282 } 8283 if v_0.AuxInt != 3 { 8284 break 8285 } 8286 idx := v_0.Args[0] 8287 ptr := v.Args[1] 8288 mem := v.Args[2] 8289 v.reset(OpAMD64MOVQloadidx8) 8290 v.AuxInt = c 8291 v.Aux = sym 8292 v.AddArg(ptr) 8293 v.AddArg(idx) 8294 v.AddArg(mem) 8295 return true 8296 } 8297 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8298 // cond: 8299 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8300 for { 8301 c := v.AuxInt 8302 sym := v.Aux 8303 v_0 := v.Args[0] 8304 if v_0.Op != OpAMD64ADDQconst { 8305 break 8306 } 8307 d := v_0.AuxInt 8308 ptr := v_0.Args[0] 8309 idx := v.Args[1] 8310 mem := v.Args[2] 8311 v.reset(OpAMD64MOVQloadidx1) 8312 v.AuxInt = c + d 8313 v.Aux = sym 8314 v.AddArg(ptr) 8315 v.AddArg(idx) 8316 v.AddArg(mem) 8317 return true 8318 } 8319 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 8320 // cond: 8321 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8322 for { 8323 c := v.AuxInt 8324 sym := v.Aux 8325 idx := v.Args[0] 8326 v_1 := v.Args[1] 8327 if v_1.Op != OpAMD64ADDQconst { 8328 break 8329 } 8330 d := v_1.AuxInt 8331 ptr := v_1.Args[0] 8332 mem := v.Args[2] 8333 v.reset(OpAMD64MOVQloadidx1) 8334 v.AuxInt = c + d 8335 v.Aux = sym 8336 v.AddArg(ptr) 8337 v.AddArg(idx) 8338 v.AddArg(mem) 8339 return true 8340 } 8341 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8342 // cond: 8343 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8344 for { 8345 c := v.AuxInt 8346 sym := v.Aux 8347 ptr := v.Args[0] 8348 v_1 := v.Args[1] 8349 if v_1.Op != OpAMD64ADDQconst { 8350 break 8351 } 8352 d := v_1.AuxInt 8353 idx := v_1.Args[0] 8354 mem := v.Args[2] 8355 v.reset(OpAMD64MOVQloadidx1) 8356 v.AuxInt = c + d 8357 v.Aux = sym 8358 v.AddArg(ptr) 8359 v.AddArg(idx) 8360 v.AddArg(mem) 8361 return true 8362 } 8363 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 8364 // cond: 8365 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8366 for { 8367 c := v.AuxInt 8368 sym := v.Aux 8369 v_0 := v.Args[0] 8370 if v_0.Op != OpAMD64ADDQconst { 8371 break 8372 } 8373 d := v_0.AuxInt 8374 idx := v_0.Args[0] 8375 ptr := v.Args[1] 8376 mem := v.Args[2] 8377 v.reset(OpAMD64MOVQloadidx1) 8378 v.AuxInt = c + d 8379 v.Aux = sym 8380 v.AddArg(ptr) 8381 v.AddArg(idx) 8382 v.AddArg(mem) 8383 return true 8384 } 8385 return false 8386 } 8387 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 8388 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 8389 // cond: 8390 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 8391 for { 8392 c := v.AuxInt 8393 sym := v.Aux 8394 v_0 := v.Args[0] 8395 if v_0.Op != OpAMD64ADDQconst { 8396 break 8397 } 8398 d := v_0.AuxInt 8399 ptr := v_0.Args[0] 8400 idx := v.Args[1] 8401 mem := v.Args[2] 8402 v.reset(OpAMD64MOVQloadidx8) 8403 v.AuxInt = c + d 8404 v.Aux = sym 8405 v.AddArg(ptr) 8406 v.AddArg(idx) 8407 v.AddArg(mem) 8408 return true 8409 } 8410 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8411 // cond: 8412 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 8413 for { 8414 c := v.AuxInt 8415 sym := v.Aux 8416 ptr := v.Args[0] 8417 v_1 := v.Args[1] 8418 if v_1.Op != OpAMD64ADDQconst { 8419 break 8420 } 8421 d := v_1.AuxInt 8422 idx := v_1.Args[0] 8423 mem := v.Args[2] 8424 v.reset(OpAMD64MOVQloadidx8) 8425 v.AuxInt = c + 8*d 8426 v.Aux = sym 8427 v.AddArg(ptr) 8428 v.AddArg(idx) 8429 v.AddArg(mem) 8430 return true 8431 } 8432 return false 8433 } 8434 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 8435 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8436 // cond: is32Bit(off1+off2) 8437 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 8438 for { 8439 off1 := v.AuxInt 8440 sym := v.Aux 8441 v_0 := v.Args[0] 8442 if v_0.Op != OpAMD64ADDQconst { 8443 break 8444 } 8445 off2 := v_0.AuxInt 8446 ptr := v_0.Args[0] 8447 val := v.Args[1] 8448 mem := v.Args[2] 8449 if !(is32Bit(off1 + off2)) { 8450 break 8451 } 8452 v.reset(OpAMD64MOVQstore) 8453 v.AuxInt = off1 + off2 8454 v.Aux = sym 8455 v.AddArg(ptr) 8456 v.AddArg(val) 8457 v.AddArg(mem) 8458 return true 8459 } 8460 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 8461 // cond: validValAndOff(c,off) 8462 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 8463 for { 8464 off := v.AuxInt 8465 sym := v.Aux 8466 ptr := v.Args[0] 8467 v_1 := v.Args[1] 8468 if v_1.Op != OpAMD64MOVQconst { 8469 break 8470 } 8471 c := v_1.AuxInt 8472 mem := v.Args[2] 8473 if !(validValAndOff(c, off)) { 8474 break 8475 } 8476 v.reset(OpAMD64MOVQstoreconst) 8477 v.AuxInt = makeValAndOff(c, off) 8478 v.Aux = sym 8479 v.AddArg(ptr) 8480 v.AddArg(mem) 8481 return true 8482 } 8483 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8484 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8485 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8486 for { 8487 off1 := v.AuxInt 8488 sym1 := v.Aux 8489 v_0 := v.Args[0] 8490 if v_0.Op != OpAMD64LEAQ { 8491 break 8492 } 8493 off2 := v_0.AuxInt 8494 sym2 := v_0.Aux 8495 base := v_0.Args[0] 8496 val := v.Args[1] 8497 mem := v.Args[2] 8498 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8499 break 8500 } 8501 v.reset(OpAMD64MOVQstore) 8502 v.AuxInt = off1 + off2 8503 v.Aux = mergeSym(sym1, sym2) 8504 v.AddArg(base) 8505 v.AddArg(val) 8506 v.AddArg(mem) 8507 return true 8508 } 8509 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8510 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8511 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8512 for { 8513 off1 := v.AuxInt 8514 sym1 := v.Aux 8515 v_0 := v.Args[0] 8516 if v_0.Op != OpAMD64LEAQ1 { 8517 break 8518 } 8519 off2 := v_0.AuxInt 8520 sym2 := v_0.Aux 8521 ptr := v_0.Args[0] 8522 idx := v_0.Args[1] 8523 val := v.Args[1] 8524 mem := v.Args[2] 8525 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8526 break 8527 } 8528 v.reset(OpAMD64MOVQstoreidx1) 8529 v.AuxInt = off1 + off2 8530 v.Aux = mergeSym(sym1, sym2) 8531 v.AddArg(ptr) 8532 v.AddArg(idx) 8533 v.AddArg(val) 8534 v.AddArg(mem) 8535 return true 8536 } 8537 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8538 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8539 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8540 for { 8541 off1 := v.AuxInt 8542 sym1 := v.Aux 8543 v_0 := v.Args[0] 8544 if v_0.Op != OpAMD64LEAQ8 { 8545 break 8546 } 8547 off2 := v_0.AuxInt 8548 sym2 := v_0.Aux 8549 ptr := v_0.Args[0] 8550 idx := v_0.Args[1] 8551 val := v.Args[1] 8552 mem := v.Args[2] 8553 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8554 break 8555 } 8556 v.reset(OpAMD64MOVQstoreidx8) 8557 v.AuxInt = off1 + off2 8558 v.Aux = mergeSym(sym1, sym2) 8559 v.AddArg(ptr) 8560 v.AddArg(idx) 8561 v.AddArg(val) 8562 v.AddArg(mem) 8563 return true 8564 } 8565 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 8566 // cond: ptr.Op != OpSB 8567 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 8568 for { 8569 off := v.AuxInt 8570 sym := v.Aux 8571 v_0 := v.Args[0] 8572 if v_0.Op != OpAMD64ADDQ { 8573 break 8574 } 8575 ptr := v_0.Args[0] 8576 idx := v_0.Args[1] 8577 val := v.Args[1] 8578 mem := v.Args[2] 8579 if !(ptr.Op != OpSB) { 8580 break 8581 } 8582 v.reset(OpAMD64MOVQstoreidx1) 8583 v.AuxInt = off 8584 v.Aux = sym 8585 v.AddArg(ptr) 8586 v.AddArg(idx) 8587 v.AddArg(val) 8588 v.AddArg(mem) 8589 return true 8590 } 8591 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 8592 // cond: canMergeSym(sym1, sym2) 8593 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8594 for { 8595 off1 := v.AuxInt 8596 sym1 := v.Aux 8597 v_0 := v.Args[0] 8598 if v_0.Op != OpAMD64LEAL { 8599 break 8600 } 8601 off2 := v_0.AuxInt 8602 sym2 := v_0.Aux 8603 base := v_0.Args[0] 8604 val := v.Args[1] 8605 mem := v.Args[2] 8606 if !(canMergeSym(sym1, sym2)) { 8607 break 8608 } 8609 v.reset(OpAMD64MOVQstore) 8610 v.AuxInt = off1 + off2 8611 v.Aux = mergeSym(sym1, sym2) 8612 v.AddArg(base) 8613 v.AddArg(val) 8614 v.AddArg(mem) 8615 return true 8616 } 8617 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 8618 // cond: is32Bit(off1+off2) 8619 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 8620 for { 8621 off1 := v.AuxInt 8622 sym := v.Aux 8623 v_0 := v.Args[0] 8624 if v_0.Op != OpAMD64ADDLconst { 8625 break 8626 } 8627 off2 := v_0.AuxInt 8628 ptr := v_0.Args[0] 8629 val := v.Args[1] 8630 mem := v.Args[2] 8631 if !(is32Bit(off1 + off2)) { 8632 break 8633 } 8634 v.reset(OpAMD64MOVQstore) 8635 v.AuxInt = off1 + off2 8636 v.Aux = sym 8637 v.AddArg(ptr) 8638 v.AddArg(val) 8639 v.AddArg(mem) 8640 return true 8641 } 8642 return false 8643 } 8644 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 8645 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 8646 // cond: ValAndOff(sc).canAdd(off) 8647 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8648 for { 8649 sc := v.AuxInt 8650 s := v.Aux 8651 v_0 := v.Args[0] 8652 if v_0.Op != OpAMD64ADDQconst { 8653 break 8654 } 8655 off := v_0.AuxInt 8656 ptr := v_0.Args[0] 8657 mem := v.Args[1] 8658 if !(ValAndOff(sc).canAdd(off)) { 8659 break 8660 } 8661 v.reset(OpAMD64MOVQstoreconst) 8662 v.AuxInt = ValAndOff(sc).add(off) 8663 v.Aux = s 8664 v.AddArg(ptr) 8665 v.AddArg(mem) 8666 return true 8667 } 8668 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 8669 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8670 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8671 for { 8672 sc := v.AuxInt 8673 sym1 := v.Aux 8674 v_0 := v.Args[0] 8675 if v_0.Op != OpAMD64LEAQ { 8676 break 8677 } 8678 off := v_0.AuxInt 8679 sym2 := v_0.Aux 8680 ptr := v_0.Args[0] 8681 mem := v.Args[1] 8682 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8683 break 8684 } 8685 v.reset(OpAMD64MOVQstoreconst) 8686 v.AuxInt = ValAndOff(sc).add(off) 8687 v.Aux = mergeSym(sym1, sym2) 8688 v.AddArg(ptr) 8689 v.AddArg(mem) 8690 return true 8691 } 8692 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 8693 // cond: canMergeSym(sym1, sym2) 8694 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8695 for { 8696 x := v.AuxInt 8697 sym1 := v.Aux 8698 v_0 := v.Args[0] 8699 if v_0.Op != OpAMD64LEAQ1 { 8700 break 8701 } 8702 off := v_0.AuxInt 8703 sym2 := v_0.Aux 8704 ptr := v_0.Args[0] 8705 idx := v_0.Args[1] 8706 mem := v.Args[1] 8707 if !(canMergeSym(sym1, sym2)) { 8708 break 8709 } 8710 v.reset(OpAMD64MOVQstoreconstidx1) 8711 v.AuxInt = ValAndOff(x).add(off) 8712 v.Aux = mergeSym(sym1, sym2) 8713 v.AddArg(ptr) 8714 v.AddArg(idx) 8715 v.AddArg(mem) 8716 return true 8717 } 8718 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 8719 // cond: canMergeSym(sym1, sym2) 8720 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8721 for { 8722 x := v.AuxInt 8723 sym1 := v.Aux 8724 v_0 := v.Args[0] 8725 if v_0.Op != OpAMD64LEAQ8 { 8726 break 8727 } 8728 off := v_0.AuxInt 8729 sym2 := v_0.Aux 8730 ptr := v_0.Args[0] 8731 idx := v_0.Args[1] 8732 mem := v.Args[1] 8733 if !(canMergeSym(sym1, sym2)) { 8734 break 8735 } 8736 v.reset(OpAMD64MOVQstoreconstidx8) 8737 v.AuxInt = ValAndOff(x).add(off) 8738 v.Aux = mergeSym(sym1, sym2) 8739 v.AddArg(ptr) 8740 v.AddArg(idx) 8741 v.AddArg(mem) 8742 return true 8743 } 8744 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 8745 // cond: 8746 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 8747 for { 8748 x := v.AuxInt 8749 sym := v.Aux 8750 v_0 := v.Args[0] 8751 if v_0.Op != OpAMD64ADDQ { 8752 break 8753 } 8754 ptr := v_0.Args[0] 8755 idx := v_0.Args[1] 8756 mem := v.Args[1] 8757 v.reset(OpAMD64MOVQstoreconstidx1) 8758 v.AuxInt = x 8759 v.Aux = sym 8760 v.AddArg(ptr) 8761 v.AddArg(idx) 8762 v.AddArg(mem) 8763 return true 8764 } 8765 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 8766 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8767 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8768 for { 8769 sc := v.AuxInt 8770 sym1 := v.Aux 8771 v_0 := v.Args[0] 8772 if v_0.Op != OpAMD64LEAL { 8773 break 8774 } 8775 off := v_0.AuxInt 8776 sym2 := v_0.Aux 8777 ptr := v_0.Args[0] 8778 mem := v.Args[1] 8779 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8780 break 8781 } 8782 v.reset(OpAMD64MOVQstoreconst) 8783 v.AuxInt = ValAndOff(sc).add(off) 8784 v.Aux = mergeSym(sym1, sym2) 8785 v.AddArg(ptr) 8786 v.AddArg(mem) 8787 return true 8788 } 8789 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 8790 // cond: ValAndOff(sc).canAdd(off) 8791 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8792 for { 8793 sc := v.AuxInt 8794 s := v.Aux 8795 v_0 := v.Args[0] 8796 if v_0.Op != OpAMD64ADDLconst { 8797 break 8798 } 8799 off := v_0.AuxInt 8800 ptr := v_0.Args[0] 8801 mem := v.Args[1] 8802 if !(ValAndOff(sc).canAdd(off)) { 8803 break 8804 } 8805 v.reset(OpAMD64MOVQstoreconst) 8806 v.AuxInt = ValAndOff(sc).add(off) 8807 v.Aux = s 8808 v.AddArg(ptr) 8809 v.AddArg(mem) 8810 return true 8811 } 8812 return false 8813 } 8814 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 8815 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 8816 // cond: 8817 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 8818 for { 8819 c := v.AuxInt 8820 sym := v.Aux 8821 ptr := v.Args[0] 8822 v_1 := v.Args[1] 8823 if v_1.Op != OpAMD64SHLQconst { 8824 break 8825 } 8826 if v_1.AuxInt != 3 { 8827 break 8828 } 8829 idx := v_1.Args[0] 8830 mem := v.Args[2] 8831 v.reset(OpAMD64MOVQstoreconstidx8) 8832 v.AuxInt = c 8833 v.Aux = sym 8834 v.AddArg(ptr) 8835 v.AddArg(idx) 8836 v.AddArg(mem) 8837 return true 8838 } 8839 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 8840 // cond: 8841 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8842 for { 8843 x := v.AuxInt 8844 sym := v.Aux 8845 v_0 := v.Args[0] 8846 if v_0.Op != OpAMD64ADDQconst { 8847 break 8848 } 8849 c := v_0.AuxInt 8850 ptr := v_0.Args[0] 8851 idx := v.Args[1] 8852 mem := v.Args[2] 8853 v.reset(OpAMD64MOVQstoreconstidx1) 8854 v.AuxInt = ValAndOff(x).add(c) 8855 v.Aux = sym 8856 v.AddArg(ptr) 8857 v.AddArg(idx) 8858 v.AddArg(mem) 8859 return true 8860 } 8861 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 8862 // cond: 8863 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8864 for { 8865 x := v.AuxInt 8866 sym := v.Aux 8867 ptr := v.Args[0] 8868 v_1 := v.Args[1] 8869 if v_1.Op != OpAMD64ADDQconst { 8870 break 8871 } 8872 c := v_1.AuxInt 8873 idx := v_1.Args[0] 8874 mem := v.Args[2] 8875 v.reset(OpAMD64MOVQstoreconstidx1) 8876 v.AuxInt = ValAndOff(x).add(c) 8877 v.Aux = sym 8878 v.AddArg(ptr) 8879 v.AddArg(idx) 8880 v.AddArg(mem) 8881 return true 8882 } 8883 return false 8884 } 8885 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 8886 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 8887 // cond: 8888 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8889 for { 8890 x := v.AuxInt 8891 sym := v.Aux 8892 v_0 := v.Args[0] 8893 if v_0.Op != OpAMD64ADDQconst { 8894 break 8895 } 8896 c := v_0.AuxInt 8897 ptr := v_0.Args[0] 8898 idx := v.Args[1] 8899 mem := v.Args[2] 8900 v.reset(OpAMD64MOVQstoreconstidx8) 8901 v.AuxInt = ValAndOff(x).add(c) 8902 v.Aux = sym 8903 v.AddArg(ptr) 8904 v.AddArg(idx) 8905 v.AddArg(mem) 8906 return true 8907 } 8908 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 8909 // cond: 8910 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 8911 for { 8912 x := v.AuxInt 8913 sym := v.Aux 8914 ptr := v.Args[0] 8915 v_1 := v.Args[1] 8916 if v_1.Op != OpAMD64ADDQconst { 8917 break 8918 } 8919 c := v_1.AuxInt 8920 idx := v_1.Args[0] 8921 mem := v.Args[2] 8922 v.reset(OpAMD64MOVQstoreconstidx8) 8923 v.AuxInt = ValAndOff(x).add(8 * c) 8924 v.Aux = sym 8925 v.AddArg(ptr) 8926 v.AddArg(idx) 8927 v.AddArg(mem) 8928 return true 8929 } 8930 return false 8931 } 8932 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 8933 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 8934 // cond: 8935 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 8936 for { 8937 c := v.AuxInt 8938 sym := v.Aux 8939 ptr := v.Args[0] 8940 v_1 := v.Args[1] 8941 if v_1.Op != OpAMD64SHLQconst { 8942 break 8943 } 8944 if v_1.AuxInt != 3 { 8945 break 8946 } 8947 idx := v_1.Args[0] 8948 val := v.Args[2] 8949 mem := v.Args[3] 8950 v.reset(OpAMD64MOVQstoreidx8) 8951 v.AuxInt = c 8952 v.Aux = sym 8953 v.AddArg(ptr) 8954 v.AddArg(idx) 8955 v.AddArg(val) 8956 v.AddArg(mem) 8957 return true 8958 } 8959 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8960 // cond: 8961 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 8962 for { 8963 c := v.AuxInt 8964 sym := v.Aux 8965 v_0 := v.Args[0] 8966 if v_0.Op != OpAMD64ADDQconst { 8967 break 8968 } 8969 d := v_0.AuxInt 8970 ptr := v_0.Args[0] 8971 idx := v.Args[1] 8972 val := v.Args[2] 8973 mem := v.Args[3] 8974 v.reset(OpAMD64MOVQstoreidx1) 8975 v.AuxInt = c + d 8976 v.Aux = sym 8977 v.AddArg(ptr) 8978 v.AddArg(idx) 8979 v.AddArg(val) 8980 v.AddArg(mem) 8981 return true 8982 } 8983 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8984 // cond: 8985 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 8986 for { 8987 c := v.AuxInt 8988 sym := v.Aux 8989 ptr := v.Args[0] 8990 v_1 := v.Args[1] 8991 if v_1.Op != OpAMD64ADDQconst { 8992 break 8993 } 8994 d := v_1.AuxInt 8995 idx := v_1.Args[0] 8996 val := v.Args[2] 8997 mem := v.Args[3] 8998 v.reset(OpAMD64MOVQstoreidx1) 8999 v.AuxInt = c + d 9000 v.Aux = sym 9001 v.AddArg(ptr) 9002 v.AddArg(idx) 9003 v.AddArg(val) 9004 v.AddArg(mem) 9005 return true 9006 } 9007 return false 9008 } 9009 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 9010 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9011 // cond: 9012 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 9013 for { 9014 c := v.AuxInt 9015 sym := v.Aux 9016 v_0 := v.Args[0] 9017 if v_0.Op != OpAMD64ADDQconst { 9018 break 9019 } 9020 d := v_0.AuxInt 9021 ptr := v_0.Args[0] 9022 idx := v.Args[1] 9023 val := v.Args[2] 9024 mem := v.Args[3] 9025 v.reset(OpAMD64MOVQstoreidx8) 9026 v.AuxInt = c + d 9027 v.Aux = sym 9028 v.AddArg(ptr) 9029 v.AddArg(idx) 9030 v.AddArg(val) 9031 v.AddArg(mem) 9032 return true 9033 } 9034 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9035 // cond: 9036 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 9037 for { 9038 c := v.AuxInt 9039 sym := v.Aux 9040 ptr := v.Args[0] 9041 v_1 := v.Args[1] 9042 if v_1.Op != OpAMD64ADDQconst { 9043 break 9044 } 9045 d := v_1.AuxInt 9046 idx := v_1.Args[0] 9047 val := v.Args[2] 9048 mem := v.Args[3] 9049 v.reset(OpAMD64MOVQstoreidx8) 9050 v.AuxInt = c + 8*d 9051 v.Aux = sym 9052 v.AddArg(ptr) 9053 v.AddArg(idx) 9054 v.AddArg(val) 9055 v.AddArg(mem) 9056 return true 9057 } 9058 return false 9059 } 9060 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 9061 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 9062 // cond: is32Bit(off1+off2) 9063 // result: (MOVSDload [off1+off2] {sym} ptr mem) 9064 for { 9065 off1 := v.AuxInt 9066 sym := v.Aux 9067 v_0 := v.Args[0] 9068 if v_0.Op != OpAMD64ADDQconst { 9069 break 9070 } 9071 off2 := v_0.AuxInt 9072 ptr := v_0.Args[0] 9073 mem := v.Args[1] 9074 if !(is32Bit(off1 + off2)) { 9075 break 9076 } 9077 v.reset(OpAMD64MOVSDload) 9078 v.AuxInt = off1 + off2 9079 v.Aux = sym 9080 v.AddArg(ptr) 9081 v.AddArg(mem) 9082 return true 9083 } 9084 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9085 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9086 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9087 for { 9088 off1 := v.AuxInt 9089 sym1 := v.Aux 9090 v_0 := v.Args[0] 9091 if v_0.Op != OpAMD64LEAQ { 9092 break 9093 } 9094 off2 := v_0.AuxInt 9095 sym2 := v_0.Aux 9096 base := v_0.Args[0] 9097 mem := v.Args[1] 9098 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9099 break 9100 } 9101 v.reset(OpAMD64MOVSDload) 9102 v.AuxInt = off1 + off2 9103 v.Aux = mergeSym(sym1, sym2) 9104 v.AddArg(base) 9105 v.AddArg(mem) 9106 return true 9107 } 9108 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9109 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9110 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9111 for { 9112 off1 := v.AuxInt 9113 sym1 := v.Aux 9114 v_0 := v.Args[0] 9115 if v_0.Op != OpAMD64LEAQ1 { 9116 break 9117 } 9118 off2 := v_0.AuxInt 9119 sym2 := v_0.Aux 9120 ptr := v_0.Args[0] 9121 idx := v_0.Args[1] 9122 mem := v.Args[1] 9123 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9124 break 9125 } 9126 v.reset(OpAMD64MOVSDloadidx1) 9127 v.AuxInt = off1 + off2 9128 v.Aux = mergeSym(sym1, sym2) 9129 v.AddArg(ptr) 9130 v.AddArg(idx) 9131 v.AddArg(mem) 9132 return true 9133 } 9134 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 9135 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9136 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9137 for { 9138 off1 := v.AuxInt 9139 sym1 := v.Aux 9140 v_0 := v.Args[0] 9141 if v_0.Op != OpAMD64LEAQ8 { 9142 break 9143 } 9144 off2 := v_0.AuxInt 9145 sym2 := v_0.Aux 9146 ptr := v_0.Args[0] 9147 idx := v_0.Args[1] 9148 mem := v.Args[1] 9149 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9150 break 9151 } 9152 v.reset(OpAMD64MOVSDloadidx8) 9153 v.AuxInt = off1 + off2 9154 v.Aux = mergeSym(sym1, sym2) 9155 v.AddArg(ptr) 9156 v.AddArg(idx) 9157 v.AddArg(mem) 9158 return true 9159 } 9160 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 9161 // cond: ptr.Op != OpSB 9162 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 9163 for { 9164 off := v.AuxInt 9165 sym := v.Aux 9166 v_0 := v.Args[0] 9167 if v_0.Op != OpAMD64ADDQ { 9168 break 9169 } 9170 ptr := v_0.Args[0] 9171 idx := v_0.Args[1] 9172 mem := v.Args[1] 9173 if !(ptr.Op != OpSB) { 9174 break 9175 } 9176 v.reset(OpAMD64MOVSDloadidx1) 9177 v.AuxInt = off 9178 v.Aux = sym 9179 v.AddArg(ptr) 9180 v.AddArg(idx) 9181 v.AddArg(mem) 9182 return true 9183 } 9184 return false 9185 } 9186 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 9187 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9188 // cond: 9189 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 9190 for { 9191 c := v.AuxInt 9192 sym := v.Aux 9193 ptr := v.Args[0] 9194 v_1 := v.Args[1] 9195 if v_1.Op != OpAMD64SHLQconst { 9196 break 9197 } 9198 if v_1.AuxInt != 3 { 9199 break 9200 } 9201 idx := v_1.Args[0] 9202 mem := v.Args[2] 9203 v.reset(OpAMD64MOVSDloadidx8) 9204 v.AuxInt = c 9205 v.Aux = sym 9206 v.AddArg(ptr) 9207 v.AddArg(idx) 9208 v.AddArg(mem) 9209 return true 9210 } 9211 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9212 // cond: 9213 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9214 for { 9215 c := v.AuxInt 9216 sym := v.Aux 9217 v_0 := v.Args[0] 9218 if v_0.Op != OpAMD64ADDQconst { 9219 break 9220 } 9221 d := v_0.AuxInt 9222 ptr := v_0.Args[0] 9223 idx := v.Args[1] 9224 mem := v.Args[2] 9225 v.reset(OpAMD64MOVSDloadidx1) 9226 v.AuxInt = c + d 9227 v.Aux = sym 9228 v.AddArg(ptr) 9229 v.AddArg(idx) 9230 v.AddArg(mem) 9231 return true 9232 } 9233 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9234 // cond: 9235 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9236 for { 9237 c := v.AuxInt 9238 sym := v.Aux 9239 ptr := v.Args[0] 9240 v_1 := v.Args[1] 9241 if v_1.Op != OpAMD64ADDQconst { 9242 break 9243 } 9244 d := v_1.AuxInt 9245 idx := v_1.Args[0] 9246 mem := v.Args[2] 9247 v.reset(OpAMD64MOVSDloadidx1) 9248 v.AuxInt = c + d 9249 v.Aux = sym 9250 v.AddArg(ptr) 9251 v.AddArg(idx) 9252 v.AddArg(mem) 9253 return true 9254 } 9255 return false 9256 } 9257 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 9258 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 9259 // cond: 9260 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 9261 for { 9262 c := v.AuxInt 9263 sym := v.Aux 9264 v_0 := v.Args[0] 9265 if v_0.Op != OpAMD64ADDQconst { 9266 break 9267 } 9268 d := v_0.AuxInt 9269 ptr := v_0.Args[0] 9270 idx := v.Args[1] 9271 mem := v.Args[2] 9272 v.reset(OpAMD64MOVSDloadidx8) 9273 v.AuxInt = c + d 9274 v.Aux = sym 9275 v.AddArg(ptr) 9276 v.AddArg(idx) 9277 v.AddArg(mem) 9278 return true 9279 } 9280 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 9281 // cond: 9282 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 9283 for { 9284 c := v.AuxInt 9285 sym := v.Aux 9286 ptr := v.Args[0] 9287 v_1 := v.Args[1] 9288 if v_1.Op != OpAMD64ADDQconst { 9289 break 9290 } 9291 d := v_1.AuxInt 9292 idx := v_1.Args[0] 9293 mem := v.Args[2] 9294 v.reset(OpAMD64MOVSDloadidx8) 9295 v.AuxInt = c + 8*d 9296 v.Aux = sym 9297 v.AddArg(ptr) 9298 v.AddArg(idx) 9299 v.AddArg(mem) 9300 return true 9301 } 9302 return false 9303 } 9304 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 9305 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9306 // cond: is32Bit(off1+off2) 9307 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 9308 for { 9309 off1 := v.AuxInt 9310 sym := v.Aux 9311 v_0 := v.Args[0] 9312 if v_0.Op != OpAMD64ADDQconst { 9313 break 9314 } 9315 off2 := v_0.AuxInt 9316 ptr := v_0.Args[0] 9317 val := v.Args[1] 9318 mem := v.Args[2] 9319 if !(is32Bit(off1 + off2)) { 9320 break 9321 } 9322 v.reset(OpAMD64MOVSDstore) 9323 v.AuxInt = off1 + off2 9324 v.Aux = sym 9325 v.AddArg(ptr) 9326 v.AddArg(val) 9327 v.AddArg(mem) 9328 return true 9329 } 9330 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9331 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9332 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9333 for { 9334 off1 := v.AuxInt 9335 sym1 := v.Aux 9336 v_0 := v.Args[0] 9337 if v_0.Op != OpAMD64LEAQ { 9338 break 9339 } 9340 off2 := v_0.AuxInt 9341 sym2 := v_0.Aux 9342 base := v_0.Args[0] 9343 val := v.Args[1] 9344 mem := v.Args[2] 9345 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9346 break 9347 } 9348 v.reset(OpAMD64MOVSDstore) 9349 v.AuxInt = off1 + off2 9350 v.Aux = mergeSym(sym1, sym2) 9351 v.AddArg(base) 9352 v.AddArg(val) 9353 v.AddArg(mem) 9354 return true 9355 } 9356 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9357 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9358 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9359 for { 9360 off1 := v.AuxInt 9361 sym1 := v.Aux 9362 v_0 := v.Args[0] 9363 if v_0.Op != OpAMD64LEAQ1 { 9364 break 9365 } 9366 off2 := v_0.AuxInt 9367 sym2 := v_0.Aux 9368 ptr := v_0.Args[0] 9369 idx := v_0.Args[1] 9370 val := v.Args[1] 9371 mem := v.Args[2] 9372 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9373 break 9374 } 9375 v.reset(OpAMD64MOVSDstoreidx1) 9376 v.AuxInt = off1 + off2 9377 v.Aux = mergeSym(sym1, sym2) 9378 v.AddArg(ptr) 9379 v.AddArg(idx) 9380 v.AddArg(val) 9381 v.AddArg(mem) 9382 return true 9383 } 9384 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 9385 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9386 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9387 for { 9388 off1 := v.AuxInt 9389 sym1 := v.Aux 9390 v_0 := v.Args[0] 9391 if v_0.Op != OpAMD64LEAQ8 { 9392 break 9393 } 9394 off2 := v_0.AuxInt 9395 sym2 := v_0.Aux 9396 ptr := v_0.Args[0] 9397 idx := v_0.Args[1] 9398 val := v.Args[1] 9399 mem := v.Args[2] 9400 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9401 break 9402 } 9403 v.reset(OpAMD64MOVSDstoreidx8) 9404 v.AuxInt = off1 + off2 9405 v.Aux = mergeSym(sym1, sym2) 9406 v.AddArg(ptr) 9407 v.AddArg(idx) 9408 v.AddArg(val) 9409 v.AddArg(mem) 9410 return true 9411 } 9412 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 9413 // cond: ptr.Op != OpSB 9414 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 9415 for { 9416 off := v.AuxInt 9417 sym := v.Aux 9418 v_0 := v.Args[0] 9419 if v_0.Op != OpAMD64ADDQ { 9420 break 9421 } 9422 ptr := v_0.Args[0] 9423 idx := v_0.Args[1] 9424 val := v.Args[1] 9425 mem := v.Args[2] 9426 if !(ptr.Op != OpSB) { 9427 break 9428 } 9429 v.reset(OpAMD64MOVSDstoreidx1) 9430 v.AuxInt = off 9431 v.Aux = sym 9432 v.AddArg(ptr) 9433 v.AddArg(idx) 9434 v.AddArg(val) 9435 v.AddArg(mem) 9436 return true 9437 } 9438 return false 9439 } 9440 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 9441 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9442 // cond: 9443 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 9444 for { 9445 c := v.AuxInt 9446 sym := v.Aux 9447 ptr := v.Args[0] 9448 v_1 := v.Args[1] 9449 if v_1.Op != OpAMD64SHLQconst { 9450 break 9451 } 9452 if v_1.AuxInt != 3 { 9453 break 9454 } 9455 idx := v_1.Args[0] 9456 val := v.Args[2] 9457 mem := v.Args[3] 9458 v.reset(OpAMD64MOVSDstoreidx8) 9459 v.AuxInt = c 9460 v.Aux = sym 9461 v.AddArg(ptr) 9462 v.AddArg(idx) 9463 v.AddArg(val) 9464 v.AddArg(mem) 9465 return true 9466 } 9467 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9468 // cond: 9469 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 9470 for { 9471 c := v.AuxInt 9472 sym := v.Aux 9473 v_0 := v.Args[0] 9474 if v_0.Op != OpAMD64ADDQconst { 9475 break 9476 } 9477 d := v_0.AuxInt 9478 ptr := v_0.Args[0] 9479 idx := v.Args[1] 9480 val := v.Args[2] 9481 mem := v.Args[3] 9482 v.reset(OpAMD64MOVSDstoreidx1) 9483 v.AuxInt = c + d 9484 v.Aux = sym 9485 v.AddArg(ptr) 9486 v.AddArg(idx) 9487 v.AddArg(val) 9488 v.AddArg(mem) 9489 return true 9490 } 9491 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9492 // cond: 9493 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 9494 for { 9495 c := v.AuxInt 9496 sym := v.Aux 9497 ptr := v.Args[0] 9498 v_1 := v.Args[1] 9499 if v_1.Op != OpAMD64ADDQconst { 9500 break 9501 } 9502 d := v_1.AuxInt 9503 idx := v_1.Args[0] 9504 val := v.Args[2] 9505 mem := v.Args[3] 9506 v.reset(OpAMD64MOVSDstoreidx1) 9507 v.AuxInt = c + d 9508 v.Aux = sym 9509 v.AddArg(ptr) 9510 v.AddArg(idx) 9511 v.AddArg(val) 9512 v.AddArg(mem) 9513 return true 9514 } 9515 return false 9516 } 9517 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 9518 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9519 // cond: 9520 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 9521 for { 9522 c := v.AuxInt 9523 sym := v.Aux 9524 v_0 := v.Args[0] 9525 if v_0.Op != OpAMD64ADDQconst { 9526 break 9527 } 9528 d := v_0.AuxInt 9529 ptr := v_0.Args[0] 9530 idx := v.Args[1] 9531 val := v.Args[2] 9532 mem := v.Args[3] 9533 v.reset(OpAMD64MOVSDstoreidx8) 9534 v.AuxInt = c + d 9535 v.Aux = sym 9536 v.AddArg(ptr) 9537 v.AddArg(idx) 9538 v.AddArg(val) 9539 v.AddArg(mem) 9540 return true 9541 } 9542 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9543 // cond: 9544 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 9545 for { 9546 c := v.AuxInt 9547 sym := v.Aux 9548 ptr := v.Args[0] 9549 v_1 := v.Args[1] 9550 if v_1.Op != OpAMD64ADDQconst { 9551 break 9552 } 9553 d := v_1.AuxInt 9554 idx := v_1.Args[0] 9555 val := v.Args[2] 9556 mem := v.Args[3] 9557 v.reset(OpAMD64MOVSDstoreidx8) 9558 v.AuxInt = c + 8*d 9559 v.Aux = sym 9560 v.AddArg(ptr) 9561 v.AddArg(idx) 9562 v.AddArg(val) 9563 v.AddArg(mem) 9564 return true 9565 } 9566 return false 9567 } 9568 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 9569 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 9570 // cond: is32Bit(off1+off2) 9571 // result: (MOVSSload [off1+off2] {sym} ptr mem) 9572 for { 9573 off1 := v.AuxInt 9574 sym := v.Aux 9575 v_0 := v.Args[0] 9576 if v_0.Op != OpAMD64ADDQconst { 9577 break 9578 } 9579 off2 := v_0.AuxInt 9580 ptr := v_0.Args[0] 9581 mem := v.Args[1] 9582 if !(is32Bit(off1 + off2)) { 9583 break 9584 } 9585 v.reset(OpAMD64MOVSSload) 9586 v.AuxInt = off1 + off2 9587 v.Aux = sym 9588 v.AddArg(ptr) 9589 v.AddArg(mem) 9590 return true 9591 } 9592 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9593 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9594 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9595 for { 9596 off1 := v.AuxInt 9597 sym1 := v.Aux 9598 v_0 := v.Args[0] 9599 if v_0.Op != OpAMD64LEAQ { 9600 break 9601 } 9602 off2 := v_0.AuxInt 9603 sym2 := v_0.Aux 9604 base := v_0.Args[0] 9605 mem := v.Args[1] 9606 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9607 break 9608 } 9609 v.reset(OpAMD64MOVSSload) 9610 v.AuxInt = off1 + off2 9611 v.Aux = mergeSym(sym1, sym2) 9612 v.AddArg(base) 9613 v.AddArg(mem) 9614 return true 9615 } 9616 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9617 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9618 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9619 for { 9620 off1 := v.AuxInt 9621 sym1 := v.Aux 9622 v_0 := v.Args[0] 9623 if v_0.Op != OpAMD64LEAQ1 { 9624 break 9625 } 9626 off2 := v_0.AuxInt 9627 sym2 := v_0.Aux 9628 ptr := v_0.Args[0] 9629 idx := v_0.Args[1] 9630 mem := v.Args[1] 9631 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9632 break 9633 } 9634 v.reset(OpAMD64MOVSSloadidx1) 9635 v.AuxInt = off1 + off2 9636 v.Aux = mergeSym(sym1, sym2) 9637 v.AddArg(ptr) 9638 v.AddArg(idx) 9639 v.AddArg(mem) 9640 return true 9641 } 9642 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 9643 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9644 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9645 for { 9646 off1 := v.AuxInt 9647 sym1 := v.Aux 9648 v_0 := v.Args[0] 9649 if v_0.Op != OpAMD64LEAQ4 { 9650 break 9651 } 9652 off2 := v_0.AuxInt 9653 sym2 := v_0.Aux 9654 ptr := v_0.Args[0] 9655 idx := v_0.Args[1] 9656 mem := v.Args[1] 9657 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9658 break 9659 } 9660 v.reset(OpAMD64MOVSSloadidx4) 9661 v.AuxInt = off1 + off2 9662 v.Aux = mergeSym(sym1, sym2) 9663 v.AddArg(ptr) 9664 v.AddArg(idx) 9665 v.AddArg(mem) 9666 return true 9667 } 9668 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 9669 // cond: ptr.Op != OpSB 9670 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 9671 for { 9672 off := v.AuxInt 9673 sym := v.Aux 9674 v_0 := v.Args[0] 9675 if v_0.Op != OpAMD64ADDQ { 9676 break 9677 } 9678 ptr := v_0.Args[0] 9679 idx := v_0.Args[1] 9680 mem := v.Args[1] 9681 if !(ptr.Op != OpSB) { 9682 break 9683 } 9684 v.reset(OpAMD64MOVSSloadidx1) 9685 v.AuxInt = off 9686 v.Aux = sym 9687 v.AddArg(ptr) 9688 v.AddArg(idx) 9689 v.AddArg(mem) 9690 return true 9691 } 9692 return false 9693 } 9694 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 9695 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 9696 // cond: 9697 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 9698 for { 9699 c := v.AuxInt 9700 sym := v.Aux 9701 ptr := v.Args[0] 9702 v_1 := v.Args[1] 9703 if v_1.Op != OpAMD64SHLQconst { 9704 break 9705 } 9706 if v_1.AuxInt != 2 { 9707 break 9708 } 9709 idx := v_1.Args[0] 9710 mem := v.Args[2] 9711 v.reset(OpAMD64MOVSSloadidx4) 9712 v.AuxInt = c 9713 v.Aux = sym 9714 v.AddArg(ptr) 9715 v.AddArg(idx) 9716 v.AddArg(mem) 9717 return true 9718 } 9719 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9720 // cond: 9721 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 9722 for { 9723 c := v.AuxInt 9724 sym := v.Aux 9725 v_0 := v.Args[0] 9726 if v_0.Op != OpAMD64ADDQconst { 9727 break 9728 } 9729 d := v_0.AuxInt 9730 ptr := v_0.Args[0] 9731 idx := v.Args[1] 9732 mem := v.Args[2] 9733 v.reset(OpAMD64MOVSSloadidx1) 9734 v.AuxInt = c + d 9735 v.Aux = sym 9736 v.AddArg(ptr) 9737 v.AddArg(idx) 9738 v.AddArg(mem) 9739 return true 9740 } 9741 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9742 // cond: 9743 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 9744 for { 9745 c := v.AuxInt 9746 sym := v.Aux 9747 ptr := v.Args[0] 9748 v_1 := v.Args[1] 9749 if v_1.Op != OpAMD64ADDQconst { 9750 break 9751 } 9752 d := v_1.AuxInt 9753 idx := v_1.Args[0] 9754 mem := v.Args[2] 9755 v.reset(OpAMD64MOVSSloadidx1) 9756 v.AuxInt = c + d 9757 v.Aux = sym 9758 v.AddArg(ptr) 9759 v.AddArg(idx) 9760 v.AddArg(mem) 9761 return true 9762 } 9763 return false 9764 } 9765 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 9766 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 9767 // cond: 9768 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 9769 for { 9770 c := v.AuxInt 9771 sym := v.Aux 9772 v_0 := v.Args[0] 9773 if v_0.Op != OpAMD64ADDQconst { 9774 break 9775 } 9776 d := v_0.AuxInt 9777 ptr := v_0.Args[0] 9778 idx := v.Args[1] 9779 mem := v.Args[2] 9780 v.reset(OpAMD64MOVSSloadidx4) 9781 v.AuxInt = c + d 9782 v.Aux = sym 9783 v.AddArg(ptr) 9784 v.AddArg(idx) 9785 v.AddArg(mem) 9786 return true 9787 } 9788 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 9789 // cond: 9790 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 9791 for { 9792 c := v.AuxInt 9793 sym := v.Aux 9794 ptr := v.Args[0] 9795 v_1 := v.Args[1] 9796 if v_1.Op != OpAMD64ADDQconst { 9797 break 9798 } 9799 d := v_1.AuxInt 9800 idx := v_1.Args[0] 9801 mem := v.Args[2] 9802 v.reset(OpAMD64MOVSSloadidx4) 9803 v.AuxInt = c + 4*d 9804 v.Aux = sym 9805 v.AddArg(ptr) 9806 v.AddArg(idx) 9807 v.AddArg(mem) 9808 return true 9809 } 9810 return false 9811 } 9812 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 9813 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9814 // cond: is32Bit(off1+off2) 9815 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 9816 for { 9817 off1 := v.AuxInt 9818 sym := v.Aux 9819 v_0 := v.Args[0] 9820 if v_0.Op != OpAMD64ADDQconst { 9821 break 9822 } 9823 off2 := v_0.AuxInt 9824 ptr := v_0.Args[0] 9825 val := v.Args[1] 9826 mem := v.Args[2] 9827 if !(is32Bit(off1 + off2)) { 9828 break 9829 } 9830 v.reset(OpAMD64MOVSSstore) 9831 v.AuxInt = off1 + off2 9832 v.Aux = sym 9833 v.AddArg(ptr) 9834 v.AddArg(val) 9835 v.AddArg(mem) 9836 return true 9837 } 9838 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9839 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9840 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9841 for { 9842 off1 := v.AuxInt 9843 sym1 := v.Aux 9844 v_0 := v.Args[0] 9845 if v_0.Op != OpAMD64LEAQ { 9846 break 9847 } 9848 off2 := v_0.AuxInt 9849 sym2 := v_0.Aux 9850 base := v_0.Args[0] 9851 val := v.Args[1] 9852 mem := v.Args[2] 9853 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9854 break 9855 } 9856 v.reset(OpAMD64MOVSSstore) 9857 v.AuxInt = off1 + off2 9858 v.Aux = mergeSym(sym1, sym2) 9859 v.AddArg(base) 9860 v.AddArg(val) 9861 v.AddArg(mem) 9862 return true 9863 } 9864 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9865 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9866 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9867 for { 9868 off1 := v.AuxInt 9869 sym1 := v.Aux 9870 v_0 := v.Args[0] 9871 if v_0.Op != OpAMD64LEAQ1 { 9872 break 9873 } 9874 off2 := v_0.AuxInt 9875 sym2 := v_0.Aux 9876 ptr := v_0.Args[0] 9877 idx := v_0.Args[1] 9878 val := v.Args[1] 9879 mem := v.Args[2] 9880 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9881 break 9882 } 9883 v.reset(OpAMD64MOVSSstoreidx1) 9884 v.AuxInt = off1 + off2 9885 v.Aux = mergeSym(sym1, sym2) 9886 v.AddArg(ptr) 9887 v.AddArg(idx) 9888 v.AddArg(val) 9889 v.AddArg(mem) 9890 return true 9891 } 9892 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 9893 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9894 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9895 for { 9896 off1 := v.AuxInt 9897 sym1 := v.Aux 9898 v_0 := v.Args[0] 9899 if v_0.Op != OpAMD64LEAQ4 { 9900 break 9901 } 9902 off2 := v_0.AuxInt 9903 sym2 := v_0.Aux 9904 ptr := v_0.Args[0] 9905 idx := v_0.Args[1] 9906 val := v.Args[1] 9907 mem := v.Args[2] 9908 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9909 break 9910 } 9911 v.reset(OpAMD64MOVSSstoreidx4) 9912 v.AuxInt = off1 + off2 9913 v.Aux = mergeSym(sym1, sym2) 9914 v.AddArg(ptr) 9915 v.AddArg(idx) 9916 v.AddArg(val) 9917 v.AddArg(mem) 9918 return true 9919 } 9920 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 9921 // cond: ptr.Op != OpSB 9922 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 9923 for { 9924 off := v.AuxInt 9925 sym := v.Aux 9926 v_0 := v.Args[0] 9927 if v_0.Op != OpAMD64ADDQ { 9928 break 9929 } 9930 ptr := v_0.Args[0] 9931 idx := v_0.Args[1] 9932 val := v.Args[1] 9933 mem := v.Args[2] 9934 if !(ptr.Op != OpSB) { 9935 break 9936 } 9937 v.reset(OpAMD64MOVSSstoreidx1) 9938 v.AuxInt = off 9939 v.Aux = sym 9940 v.AddArg(ptr) 9941 v.AddArg(idx) 9942 v.AddArg(val) 9943 v.AddArg(mem) 9944 return true 9945 } 9946 return false 9947 } 9948 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 9949 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 9950 // cond: 9951 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 9952 for { 9953 c := v.AuxInt 9954 sym := v.Aux 9955 ptr := v.Args[0] 9956 v_1 := v.Args[1] 9957 if v_1.Op != OpAMD64SHLQconst { 9958 break 9959 } 9960 if v_1.AuxInt != 2 { 9961 break 9962 } 9963 idx := v_1.Args[0] 9964 val := v.Args[2] 9965 mem := v.Args[3] 9966 v.reset(OpAMD64MOVSSstoreidx4) 9967 v.AuxInt = c 9968 v.Aux = sym 9969 v.AddArg(ptr) 9970 v.AddArg(idx) 9971 v.AddArg(val) 9972 v.AddArg(mem) 9973 return true 9974 } 9975 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9976 // cond: 9977 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 9978 for { 9979 c := v.AuxInt 9980 sym := v.Aux 9981 v_0 := v.Args[0] 9982 if v_0.Op != OpAMD64ADDQconst { 9983 break 9984 } 9985 d := v_0.AuxInt 9986 ptr := v_0.Args[0] 9987 idx := v.Args[1] 9988 val := v.Args[2] 9989 mem := v.Args[3] 9990 v.reset(OpAMD64MOVSSstoreidx1) 9991 v.AuxInt = c + d 9992 v.Aux = sym 9993 v.AddArg(ptr) 9994 v.AddArg(idx) 9995 v.AddArg(val) 9996 v.AddArg(mem) 9997 return true 9998 } 9999 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10000 // cond: 10001 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 10002 for { 10003 c := v.AuxInt 10004 sym := v.Aux 10005 ptr := v.Args[0] 10006 v_1 := v.Args[1] 10007 if v_1.Op != OpAMD64ADDQconst { 10008 break 10009 } 10010 d := v_1.AuxInt 10011 idx := v_1.Args[0] 10012 val := v.Args[2] 10013 mem := v.Args[3] 10014 v.reset(OpAMD64MOVSSstoreidx1) 10015 v.AuxInt = c + d 10016 v.Aux = sym 10017 v.AddArg(ptr) 10018 v.AddArg(idx) 10019 v.AddArg(val) 10020 v.AddArg(mem) 10021 return true 10022 } 10023 return false 10024 } 10025 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 10026 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10027 // cond: 10028 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 10029 for { 10030 c := v.AuxInt 10031 sym := v.Aux 10032 v_0 := v.Args[0] 10033 if v_0.Op != OpAMD64ADDQconst { 10034 break 10035 } 10036 d := v_0.AuxInt 10037 ptr := v_0.Args[0] 10038 idx := v.Args[1] 10039 val := v.Args[2] 10040 mem := v.Args[3] 10041 v.reset(OpAMD64MOVSSstoreidx4) 10042 v.AuxInt = c + d 10043 v.Aux = sym 10044 v.AddArg(ptr) 10045 v.AddArg(idx) 10046 v.AddArg(val) 10047 v.AddArg(mem) 10048 return true 10049 } 10050 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10051 // cond: 10052 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 10053 for { 10054 c := v.AuxInt 10055 sym := v.Aux 10056 ptr := v.Args[0] 10057 v_1 := v.Args[1] 10058 if v_1.Op != OpAMD64ADDQconst { 10059 break 10060 } 10061 d := v_1.AuxInt 10062 idx := v_1.Args[0] 10063 val := v.Args[2] 10064 mem := v.Args[3] 10065 v.reset(OpAMD64MOVSSstoreidx4) 10066 v.AuxInt = c + 4*d 10067 v.Aux = sym 10068 v.AddArg(ptr) 10069 v.AddArg(idx) 10070 v.AddArg(val) 10071 v.AddArg(mem) 10072 return true 10073 } 10074 return false 10075 } 10076 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 10077 b := v.Block 10078 _ = b 10079 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 10080 // cond: x.Uses == 1 && clobber(x) 10081 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10082 for { 10083 x := v.Args[0] 10084 if x.Op != OpAMD64MOVWload { 10085 break 10086 } 10087 off := x.AuxInt 10088 sym := x.Aux 10089 ptr := x.Args[0] 10090 mem := x.Args[1] 10091 if !(x.Uses == 1 && clobber(x)) { 10092 break 10093 } 10094 b = x.Block 10095 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10096 v.reset(OpCopy) 10097 v.AddArg(v0) 10098 v0.AuxInt = off 10099 v0.Aux = sym 10100 v0.AddArg(ptr) 10101 v0.AddArg(mem) 10102 return true 10103 } 10104 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 10105 // cond: x.Uses == 1 && clobber(x) 10106 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10107 for { 10108 x := v.Args[0] 10109 if x.Op != OpAMD64MOVLload { 10110 break 10111 } 10112 off := x.AuxInt 10113 sym := x.Aux 10114 ptr := x.Args[0] 10115 mem := x.Args[1] 10116 if !(x.Uses == 1 && clobber(x)) { 10117 break 10118 } 10119 b = x.Block 10120 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10121 v.reset(OpCopy) 10122 v.AddArg(v0) 10123 v0.AuxInt = off 10124 v0.Aux = sym 10125 v0.AddArg(ptr) 10126 v0.AddArg(mem) 10127 return true 10128 } 10129 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 10130 // cond: x.Uses == 1 && clobber(x) 10131 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10132 for { 10133 x := v.Args[0] 10134 if x.Op != OpAMD64MOVQload { 10135 break 10136 } 10137 off := x.AuxInt 10138 sym := x.Aux 10139 ptr := x.Args[0] 10140 mem := x.Args[1] 10141 if !(x.Uses == 1 && clobber(x)) { 10142 break 10143 } 10144 b = x.Block 10145 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10146 v.reset(OpCopy) 10147 v.AddArg(v0) 10148 v0.AuxInt = off 10149 v0.Aux = sym 10150 v0.AddArg(ptr) 10151 v0.AddArg(mem) 10152 return true 10153 } 10154 // match: (MOVWQSX (ANDLconst [c] x)) 10155 // cond: c & 0x8000 == 0 10156 // result: (ANDLconst [c & 0x7fff] x) 10157 for { 10158 v_0 := v.Args[0] 10159 if v_0.Op != OpAMD64ANDLconst { 10160 break 10161 } 10162 c := v_0.AuxInt 10163 x := v_0.Args[0] 10164 if !(c&0x8000 == 0) { 10165 break 10166 } 10167 v.reset(OpAMD64ANDLconst) 10168 v.AuxInt = c & 0x7fff 10169 v.AddArg(x) 10170 return true 10171 } 10172 // match: (MOVWQSX x:(MOVWQSX _)) 10173 // cond: 10174 // result: x 10175 for { 10176 x := v.Args[0] 10177 if x.Op != OpAMD64MOVWQSX { 10178 break 10179 } 10180 v.reset(OpCopy) 10181 v.Type = x.Type 10182 v.AddArg(x) 10183 return true 10184 } 10185 // match: (MOVWQSX x:(MOVBQSX _)) 10186 // cond: 10187 // result: x 10188 for { 10189 x := v.Args[0] 10190 if x.Op != OpAMD64MOVBQSX { 10191 break 10192 } 10193 v.reset(OpCopy) 10194 v.Type = x.Type 10195 v.AddArg(x) 10196 return true 10197 } 10198 return false 10199 } 10200 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 10201 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10202 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10203 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10204 for { 10205 off1 := v.AuxInt 10206 sym1 := v.Aux 10207 v_0 := v.Args[0] 10208 if v_0.Op != OpAMD64LEAQ { 10209 break 10210 } 10211 off2 := v_0.AuxInt 10212 sym2 := v_0.Aux 10213 base := v_0.Args[0] 10214 mem := v.Args[1] 10215 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10216 break 10217 } 10218 v.reset(OpAMD64MOVWQSXload) 10219 v.AuxInt = off1 + off2 10220 v.Aux = mergeSym(sym1, sym2) 10221 v.AddArg(base) 10222 v.AddArg(mem) 10223 return true 10224 } 10225 return false 10226 } 10227 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 10228 b := v.Block 10229 _ = b 10230 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 10231 // cond: x.Uses == 1 && clobber(x) 10232 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10233 for { 10234 x := v.Args[0] 10235 if x.Op != OpAMD64MOVWload { 10236 break 10237 } 10238 off := x.AuxInt 10239 sym := x.Aux 10240 ptr := x.Args[0] 10241 mem := x.Args[1] 10242 if !(x.Uses == 1 && clobber(x)) { 10243 break 10244 } 10245 b = x.Block 10246 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10247 v.reset(OpCopy) 10248 v.AddArg(v0) 10249 v0.AuxInt = off 10250 v0.Aux = sym 10251 v0.AddArg(ptr) 10252 v0.AddArg(mem) 10253 return true 10254 } 10255 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 10256 // cond: x.Uses == 1 && clobber(x) 10257 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10258 for { 10259 x := v.Args[0] 10260 if x.Op != OpAMD64MOVLload { 10261 break 10262 } 10263 off := x.AuxInt 10264 sym := x.Aux 10265 ptr := x.Args[0] 10266 mem := x.Args[1] 10267 if !(x.Uses == 1 && clobber(x)) { 10268 break 10269 } 10270 b = x.Block 10271 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10272 v.reset(OpCopy) 10273 v.AddArg(v0) 10274 v0.AuxInt = off 10275 v0.Aux = sym 10276 v0.AddArg(ptr) 10277 v0.AddArg(mem) 10278 return true 10279 } 10280 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 10281 // cond: x.Uses == 1 && clobber(x) 10282 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10283 for { 10284 x := v.Args[0] 10285 if x.Op != OpAMD64MOVQload { 10286 break 10287 } 10288 off := x.AuxInt 10289 sym := x.Aux 10290 ptr := x.Args[0] 10291 mem := x.Args[1] 10292 if !(x.Uses == 1 && clobber(x)) { 10293 break 10294 } 10295 b = x.Block 10296 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10297 v.reset(OpCopy) 10298 v.AddArg(v0) 10299 v0.AuxInt = off 10300 v0.Aux = sym 10301 v0.AddArg(ptr) 10302 v0.AddArg(mem) 10303 return true 10304 } 10305 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 10306 // cond: x.Uses == 1 && clobber(x) 10307 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 10308 for { 10309 x := v.Args[0] 10310 if x.Op != OpAMD64MOVWloadidx1 { 10311 break 10312 } 10313 off := x.AuxInt 10314 sym := x.Aux 10315 ptr := x.Args[0] 10316 idx := x.Args[1] 10317 mem := x.Args[2] 10318 if !(x.Uses == 1 && clobber(x)) { 10319 break 10320 } 10321 b = x.Block 10322 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 10323 v.reset(OpCopy) 10324 v.AddArg(v0) 10325 v0.AuxInt = off 10326 v0.Aux = sym 10327 v0.AddArg(ptr) 10328 v0.AddArg(idx) 10329 v0.AddArg(mem) 10330 return true 10331 } 10332 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 10333 // cond: x.Uses == 1 && clobber(x) 10334 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 10335 for { 10336 x := v.Args[0] 10337 if x.Op != OpAMD64MOVWloadidx2 { 10338 break 10339 } 10340 off := x.AuxInt 10341 sym := x.Aux 10342 ptr := x.Args[0] 10343 idx := x.Args[1] 10344 mem := x.Args[2] 10345 if !(x.Uses == 1 && clobber(x)) { 10346 break 10347 } 10348 b = x.Block 10349 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 10350 v.reset(OpCopy) 10351 v.AddArg(v0) 10352 v0.AuxInt = off 10353 v0.Aux = sym 10354 v0.AddArg(ptr) 10355 v0.AddArg(idx) 10356 v0.AddArg(mem) 10357 return true 10358 } 10359 // match: (MOVWQZX (ANDLconst [c] x)) 10360 // cond: 10361 // result: (ANDLconst [c & 0xffff] x) 10362 for { 10363 v_0 := v.Args[0] 10364 if v_0.Op != OpAMD64ANDLconst { 10365 break 10366 } 10367 c := v_0.AuxInt 10368 x := v_0.Args[0] 10369 v.reset(OpAMD64ANDLconst) 10370 v.AuxInt = c & 0xffff 10371 v.AddArg(x) 10372 return true 10373 } 10374 // match: (MOVWQZX x:(MOVWQZX _)) 10375 // cond: 10376 // result: x 10377 for { 10378 x := v.Args[0] 10379 if x.Op != OpAMD64MOVWQZX { 10380 break 10381 } 10382 v.reset(OpCopy) 10383 v.Type = x.Type 10384 v.AddArg(x) 10385 return true 10386 } 10387 // match: (MOVWQZX x:(MOVBQZX _)) 10388 // cond: 10389 // result: x 10390 for { 10391 x := v.Args[0] 10392 if x.Op != OpAMD64MOVBQZX { 10393 break 10394 } 10395 v.reset(OpCopy) 10396 v.Type = x.Type 10397 v.AddArg(x) 10398 return true 10399 } 10400 return false 10401 } 10402 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 10403 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 10404 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 10405 // result: x 10406 for { 10407 off := v.AuxInt 10408 sym := v.Aux 10409 ptr := v.Args[0] 10410 v_1 := v.Args[1] 10411 if v_1.Op != OpAMD64MOVWstore { 10412 break 10413 } 10414 off2 := v_1.AuxInt 10415 sym2 := v_1.Aux 10416 ptr2 := v_1.Args[0] 10417 x := v_1.Args[1] 10418 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 10419 break 10420 } 10421 v.reset(OpCopy) 10422 v.Type = x.Type 10423 v.AddArg(x) 10424 return true 10425 } 10426 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 10427 // cond: is32Bit(off1+off2) 10428 // result: (MOVWload [off1+off2] {sym} ptr mem) 10429 for { 10430 off1 := v.AuxInt 10431 sym := v.Aux 10432 v_0 := v.Args[0] 10433 if v_0.Op != OpAMD64ADDQconst { 10434 break 10435 } 10436 off2 := v_0.AuxInt 10437 ptr := v_0.Args[0] 10438 mem := v.Args[1] 10439 if !(is32Bit(off1 + off2)) { 10440 break 10441 } 10442 v.reset(OpAMD64MOVWload) 10443 v.AuxInt = off1 + off2 10444 v.Aux = sym 10445 v.AddArg(ptr) 10446 v.AddArg(mem) 10447 return true 10448 } 10449 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10450 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10451 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10452 for { 10453 off1 := v.AuxInt 10454 sym1 := v.Aux 10455 v_0 := v.Args[0] 10456 if v_0.Op != OpAMD64LEAQ { 10457 break 10458 } 10459 off2 := v_0.AuxInt 10460 sym2 := v_0.Aux 10461 base := v_0.Args[0] 10462 mem := v.Args[1] 10463 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10464 break 10465 } 10466 v.reset(OpAMD64MOVWload) 10467 v.AuxInt = off1 + off2 10468 v.Aux = mergeSym(sym1, sym2) 10469 v.AddArg(base) 10470 v.AddArg(mem) 10471 return true 10472 } 10473 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 10474 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10475 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10476 for { 10477 off1 := v.AuxInt 10478 sym1 := v.Aux 10479 v_0 := v.Args[0] 10480 if v_0.Op != OpAMD64LEAQ1 { 10481 break 10482 } 10483 off2 := v_0.AuxInt 10484 sym2 := v_0.Aux 10485 ptr := v_0.Args[0] 10486 idx := v_0.Args[1] 10487 mem := v.Args[1] 10488 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10489 break 10490 } 10491 v.reset(OpAMD64MOVWloadidx1) 10492 v.AuxInt = off1 + off2 10493 v.Aux = mergeSym(sym1, sym2) 10494 v.AddArg(ptr) 10495 v.AddArg(idx) 10496 v.AddArg(mem) 10497 return true 10498 } 10499 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 10500 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10501 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10502 for { 10503 off1 := v.AuxInt 10504 sym1 := v.Aux 10505 v_0 := v.Args[0] 10506 if v_0.Op != OpAMD64LEAQ2 { 10507 break 10508 } 10509 off2 := v_0.AuxInt 10510 sym2 := v_0.Aux 10511 ptr := v_0.Args[0] 10512 idx := v_0.Args[1] 10513 mem := v.Args[1] 10514 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10515 break 10516 } 10517 v.reset(OpAMD64MOVWloadidx2) 10518 v.AuxInt = off1 + off2 10519 v.Aux = mergeSym(sym1, sym2) 10520 v.AddArg(ptr) 10521 v.AddArg(idx) 10522 v.AddArg(mem) 10523 return true 10524 } 10525 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 10526 // cond: ptr.Op != OpSB 10527 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 10528 for { 10529 off := v.AuxInt 10530 sym := v.Aux 10531 v_0 := v.Args[0] 10532 if v_0.Op != OpAMD64ADDQ { 10533 break 10534 } 10535 ptr := v_0.Args[0] 10536 idx := v_0.Args[1] 10537 mem := v.Args[1] 10538 if !(ptr.Op != OpSB) { 10539 break 10540 } 10541 v.reset(OpAMD64MOVWloadidx1) 10542 v.AuxInt = off 10543 v.Aux = sym 10544 v.AddArg(ptr) 10545 v.AddArg(idx) 10546 v.AddArg(mem) 10547 return true 10548 } 10549 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 10550 // cond: canMergeSym(sym1, sym2) 10551 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10552 for { 10553 off1 := v.AuxInt 10554 sym1 := v.Aux 10555 v_0 := v.Args[0] 10556 if v_0.Op != OpAMD64LEAL { 10557 break 10558 } 10559 off2 := v_0.AuxInt 10560 sym2 := v_0.Aux 10561 base := v_0.Args[0] 10562 mem := v.Args[1] 10563 if !(canMergeSym(sym1, sym2)) { 10564 break 10565 } 10566 v.reset(OpAMD64MOVWload) 10567 v.AuxInt = off1 + off2 10568 v.Aux = mergeSym(sym1, sym2) 10569 v.AddArg(base) 10570 v.AddArg(mem) 10571 return true 10572 } 10573 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 10574 // cond: is32Bit(off1+off2) 10575 // result: (MOVWload [off1+off2] {sym} ptr mem) 10576 for { 10577 off1 := v.AuxInt 10578 sym := v.Aux 10579 v_0 := v.Args[0] 10580 if v_0.Op != OpAMD64ADDLconst { 10581 break 10582 } 10583 off2 := v_0.AuxInt 10584 ptr := v_0.Args[0] 10585 mem := v.Args[1] 10586 if !(is32Bit(off1 + off2)) { 10587 break 10588 } 10589 v.reset(OpAMD64MOVWload) 10590 v.AuxInt = off1 + off2 10591 v.Aux = sym 10592 v.AddArg(ptr) 10593 v.AddArg(mem) 10594 return true 10595 } 10596 return false 10597 } 10598 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 10599 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 10600 // cond: 10601 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 10602 for { 10603 c := v.AuxInt 10604 sym := v.Aux 10605 ptr := v.Args[0] 10606 v_1 := v.Args[1] 10607 if v_1.Op != OpAMD64SHLQconst { 10608 break 10609 } 10610 if v_1.AuxInt != 1 { 10611 break 10612 } 10613 idx := v_1.Args[0] 10614 mem := v.Args[2] 10615 v.reset(OpAMD64MOVWloadidx2) 10616 v.AuxInt = c 10617 v.Aux = sym 10618 v.AddArg(ptr) 10619 v.AddArg(idx) 10620 v.AddArg(mem) 10621 return true 10622 } 10623 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 10624 // cond: 10625 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 10626 for { 10627 c := v.AuxInt 10628 sym := v.Aux 10629 v_0 := v.Args[0] 10630 if v_0.Op != OpAMD64SHLQconst { 10631 break 10632 } 10633 if v_0.AuxInt != 1 { 10634 break 10635 } 10636 idx := v_0.Args[0] 10637 ptr := v.Args[1] 10638 mem := v.Args[2] 10639 v.reset(OpAMD64MOVWloadidx2) 10640 v.AuxInt = c 10641 v.Aux = sym 10642 v.AddArg(ptr) 10643 v.AddArg(idx) 10644 v.AddArg(mem) 10645 return true 10646 } 10647 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 10648 // cond: 10649 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 10650 for { 10651 c := v.AuxInt 10652 sym := v.Aux 10653 v_0 := v.Args[0] 10654 if v_0.Op != OpAMD64ADDQconst { 10655 break 10656 } 10657 d := v_0.AuxInt 10658 ptr := v_0.Args[0] 10659 idx := v.Args[1] 10660 mem := v.Args[2] 10661 v.reset(OpAMD64MOVWloadidx1) 10662 v.AuxInt = c + d 10663 v.Aux = sym 10664 v.AddArg(ptr) 10665 v.AddArg(idx) 10666 v.AddArg(mem) 10667 return true 10668 } 10669 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 10670 // cond: 10671 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 10672 for { 10673 c := v.AuxInt 10674 sym := v.Aux 10675 idx := v.Args[0] 10676 v_1 := v.Args[1] 10677 if v_1.Op != OpAMD64ADDQconst { 10678 break 10679 } 10680 d := v_1.AuxInt 10681 ptr := v_1.Args[0] 10682 mem := v.Args[2] 10683 v.reset(OpAMD64MOVWloadidx1) 10684 v.AuxInt = c + d 10685 v.Aux = sym 10686 v.AddArg(ptr) 10687 v.AddArg(idx) 10688 v.AddArg(mem) 10689 return true 10690 } 10691 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 10692 // cond: 10693 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 10694 for { 10695 c := v.AuxInt 10696 sym := v.Aux 10697 ptr := v.Args[0] 10698 v_1 := v.Args[1] 10699 if v_1.Op != OpAMD64ADDQconst { 10700 break 10701 } 10702 d := v_1.AuxInt 10703 idx := v_1.Args[0] 10704 mem := v.Args[2] 10705 v.reset(OpAMD64MOVWloadidx1) 10706 v.AuxInt = c + d 10707 v.Aux = sym 10708 v.AddArg(ptr) 10709 v.AddArg(idx) 10710 v.AddArg(mem) 10711 return true 10712 } 10713 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 10714 // cond: 10715 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 10716 for { 10717 c := v.AuxInt 10718 sym := v.Aux 10719 v_0 := v.Args[0] 10720 if v_0.Op != OpAMD64ADDQconst { 10721 break 10722 } 10723 d := v_0.AuxInt 10724 idx := v_0.Args[0] 10725 ptr := v.Args[1] 10726 mem := v.Args[2] 10727 v.reset(OpAMD64MOVWloadidx1) 10728 v.AuxInt = c + d 10729 v.Aux = sym 10730 v.AddArg(ptr) 10731 v.AddArg(idx) 10732 v.AddArg(mem) 10733 return true 10734 } 10735 return false 10736 } 10737 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 10738 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 10739 // cond: 10740 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 10741 for { 10742 c := v.AuxInt 10743 sym := v.Aux 10744 v_0 := v.Args[0] 10745 if v_0.Op != OpAMD64ADDQconst { 10746 break 10747 } 10748 d := v_0.AuxInt 10749 ptr := v_0.Args[0] 10750 idx := v.Args[1] 10751 mem := v.Args[2] 10752 v.reset(OpAMD64MOVWloadidx2) 10753 v.AuxInt = c + d 10754 v.Aux = sym 10755 v.AddArg(ptr) 10756 v.AddArg(idx) 10757 v.AddArg(mem) 10758 return true 10759 } 10760 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 10761 // cond: 10762 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 10763 for { 10764 c := v.AuxInt 10765 sym := v.Aux 10766 ptr := v.Args[0] 10767 v_1 := v.Args[1] 10768 if v_1.Op != OpAMD64ADDQconst { 10769 break 10770 } 10771 d := v_1.AuxInt 10772 idx := v_1.Args[0] 10773 mem := v.Args[2] 10774 v.reset(OpAMD64MOVWloadidx2) 10775 v.AuxInt = c + 2*d 10776 v.Aux = sym 10777 v.AddArg(ptr) 10778 v.AddArg(idx) 10779 v.AddArg(mem) 10780 return true 10781 } 10782 return false 10783 } 10784 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 10785 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 10786 // cond: 10787 // result: (MOVWstore [off] {sym} ptr x mem) 10788 for { 10789 off := v.AuxInt 10790 sym := v.Aux 10791 ptr := v.Args[0] 10792 v_1 := v.Args[1] 10793 if v_1.Op != OpAMD64MOVWQSX { 10794 break 10795 } 10796 x := v_1.Args[0] 10797 mem := v.Args[2] 10798 v.reset(OpAMD64MOVWstore) 10799 v.AuxInt = off 10800 v.Aux = sym 10801 v.AddArg(ptr) 10802 v.AddArg(x) 10803 v.AddArg(mem) 10804 return true 10805 } 10806 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 10807 // cond: 10808 // result: (MOVWstore [off] {sym} ptr x mem) 10809 for { 10810 off := v.AuxInt 10811 sym := v.Aux 10812 ptr := v.Args[0] 10813 v_1 := v.Args[1] 10814 if v_1.Op != OpAMD64MOVWQZX { 10815 break 10816 } 10817 x := v_1.Args[0] 10818 mem := v.Args[2] 10819 v.reset(OpAMD64MOVWstore) 10820 v.AuxInt = off 10821 v.Aux = sym 10822 v.AddArg(ptr) 10823 v.AddArg(x) 10824 v.AddArg(mem) 10825 return true 10826 } 10827 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10828 // cond: is32Bit(off1+off2) 10829 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 10830 for { 10831 off1 := v.AuxInt 10832 sym := v.Aux 10833 v_0 := v.Args[0] 10834 if v_0.Op != OpAMD64ADDQconst { 10835 break 10836 } 10837 off2 := v_0.AuxInt 10838 ptr := v_0.Args[0] 10839 val := v.Args[1] 10840 mem := v.Args[2] 10841 if !(is32Bit(off1 + off2)) { 10842 break 10843 } 10844 v.reset(OpAMD64MOVWstore) 10845 v.AuxInt = off1 + off2 10846 v.Aux = sym 10847 v.AddArg(ptr) 10848 v.AddArg(val) 10849 v.AddArg(mem) 10850 return true 10851 } 10852 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 10853 // cond: validOff(off) 10854 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 10855 for { 10856 off := v.AuxInt 10857 sym := v.Aux 10858 ptr := v.Args[0] 10859 v_1 := v.Args[1] 10860 if v_1.Op != OpAMD64MOVLconst { 10861 break 10862 } 10863 c := v_1.AuxInt 10864 mem := v.Args[2] 10865 if !(validOff(off)) { 10866 break 10867 } 10868 v.reset(OpAMD64MOVWstoreconst) 10869 v.AuxInt = makeValAndOff(int64(int16(c)), off) 10870 v.Aux = sym 10871 v.AddArg(ptr) 10872 v.AddArg(mem) 10873 return true 10874 } 10875 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10876 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10877 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10878 for { 10879 off1 := v.AuxInt 10880 sym1 := v.Aux 10881 v_0 := v.Args[0] 10882 if v_0.Op != OpAMD64LEAQ { 10883 break 10884 } 10885 off2 := v_0.AuxInt 10886 sym2 := v_0.Aux 10887 base := v_0.Args[0] 10888 val := v.Args[1] 10889 mem := v.Args[2] 10890 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10891 break 10892 } 10893 v.reset(OpAMD64MOVWstore) 10894 v.AuxInt = off1 + off2 10895 v.Aux = mergeSym(sym1, sym2) 10896 v.AddArg(base) 10897 v.AddArg(val) 10898 v.AddArg(mem) 10899 return true 10900 } 10901 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 10902 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10903 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10904 for { 10905 off1 := v.AuxInt 10906 sym1 := v.Aux 10907 v_0 := v.Args[0] 10908 if v_0.Op != OpAMD64LEAQ1 { 10909 break 10910 } 10911 off2 := v_0.AuxInt 10912 sym2 := v_0.Aux 10913 ptr := v_0.Args[0] 10914 idx := v_0.Args[1] 10915 val := v.Args[1] 10916 mem := v.Args[2] 10917 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10918 break 10919 } 10920 v.reset(OpAMD64MOVWstoreidx1) 10921 v.AuxInt = off1 + off2 10922 v.Aux = mergeSym(sym1, sym2) 10923 v.AddArg(ptr) 10924 v.AddArg(idx) 10925 v.AddArg(val) 10926 v.AddArg(mem) 10927 return true 10928 } 10929 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 10930 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10931 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10932 for { 10933 off1 := v.AuxInt 10934 sym1 := v.Aux 10935 v_0 := v.Args[0] 10936 if v_0.Op != OpAMD64LEAQ2 { 10937 break 10938 } 10939 off2 := v_0.AuxInt 10940 sym2 := v_0.Aux 10941 ptr := v_0.Args[0] 10942 idx := v_0.Args[1] 10943 val := v.Args[1] 10944 mem := v.Args[2] 10945 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10946 break 10947 } 10948 v.reset(OpAMD64MOVWstoreidx2) 10949 v.AuxInt = off1 + off2 10950 v.Aux = mergeSym(sym1, sym2) 10951 v.AddArg(ptr) 10952 v.AddArg(idx) 10953 v.AddArg(val) 10954 v.AddArg(mem) 10955 return true 10956 } 10957 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 10958 // cond: ptr.Op != OpSB 10959 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 10960 for { 10961 off := v.AuxInt 10962 sym := v.Aux 10963 v_0 := v.Args[0] 10964 if v_0.Op != OpAMD64ADDQ { 10965 break 10966 } 10967 ptr := v_0.Args[0] 10968 idx := v_0.Args[1] 10969 val := v.Args[1] 10970 mem := v.Args[2] 10971 if !(ptr.Op != OpSB) { 10972 break 10973 } 10974 v.reset(OpAMD64MOVWstoreidx1) 10975 v.AuxInt = off 10976 v.Aux = sym 10977 v.AddArg(ptr) 10978 v.AddArg(idx) 10979 v.AddArg(val) 10980 v.AddArg(mem) 10981 return true 10982 } 10983 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 10984 // cond: x.Uses == 1 && clobber(x) 10985 // result: (MOVLstore [i-2] {s} p w mem) 10986 for { 10987 i := v.AuxInt 10988 s := v.Aux 10989 p := v.Args[0] 10990 v_1 := v.Args[1] 10991 if v_1.Op != OpAMD64SHRQconst { 10992 break 10993 } 10994 if v_1.AuxInt != 16 { 10995 break 10996 } 10997 w := v_1.Args[0] 10998 x := v.Args[2] 10999 if x.Op != OpAMD64MOVWstore { 11000 break 11001 } 11002 if x.AuxInt != i-2 { 11003 break 11004 } 11005 if x.Aux != s { 11006 break 11007 } 11008 if p != x.Args[0] { 11009 break 11010 } 11011 if w != x.Args[1] { 11012 break 11013 } 11014 mem := x.Args[2] 11015 if !(x.Uses == 1 && clobber(x)) { 11016 break 11017 } 11018 v.reset(OpAMD64MOVLstore) 11019 v.AuxInt = i - 2 11020 v.Aux = s 11021 v.AddArg(p) 11022 v.AddArg(w) 11023 v.AddArg(mem) 11024 return true 11025 } 11026 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 11027 // cond: x.Uses == 1 && clobber(x) 11028 // result: (MOVLstore [i-2] {s} p w0 mem) 11029 for { 11030 i := v.AuxInt 11031 s := v.Aux 11032 p := v.Args[0] 11033 v_1 := v.Args[1] 11034 if v_1.Op != OpAMD64SHRQconst { 11035 break 11036 } 11037 j := v_1.AuxInt 11038 w := v_1.Args[0] 11039 x := v.Args[2] 11040 if x.Op != OpAMD64MOVWstore { 11041 break 11042 } 11043 if x.AuxInt != i-2 { 11044 break 11045 } 11046 if x.Aux != s { 11047 break 11048 } 11049 if p != x.Args[0] { 11050 break 11051 } 11052 w0 := x.Args[1] 11053 if w0.Op != OpAMD64SHRQconst { 11054 break 11055 } 11056 if w0.AuxInt != j-16 { 11057 break 11058 } 11059 if w != w0.Args[0] { 11060 break 11061 } 11062 mem := x.Args[2] 11063 if !(x.Uses == 1 && clobber(x)) { 11064 break 11065 } 11066 v.reset(OpAMD64MOVLstore) 11067 v.AuxInt = i - 2 11068 v.Aux = s 11069 v.AddArg(p) 11070 v.AddArg(w0) 11071 v.AddArg(mem) 11072 return true 11073 } 11074 return false 11075 } 11076 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 11077 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 11078 // cond: canMergeSym(sym1, sym2) 11079 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11080 for { 11081 off1 := v.AuxInt 11082 sym1 := v.Aux 11083 v_0 := v.Args[0] 11084 if v_0.Op != OpAMD64LEAL { 11085 break 11086 } 11087 off2 := v_0.AuxInt 11088 sym2 := v_0.Aux 11089 base := v_0.Args[0] 11090 val := v.Args[1] 11091 mem := v.Args[2] 11092 if !(canMergeSym(sym1, sym2)) { 11093 break 11094 } 11095 v.reset(OpAMD64MOVWstore) 11096 v.AuxInt = off1 + off2 11097 v.Aux = mergeSym(sym1, sym2) 11098 v.AddArg(base) 11099 v.AddArg(val) 11100 v.AddArg(mem) 11101 return true 11102 } 11103 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 11104 // cond: is32Bit(off1+off2) 11105 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 11106 for { 11107 off1 := v.AuxInt 11108 sym := v.Aux 11109 v_0 := v.Args[0] 11110 if v_0.Op != OpAMD64ADDLconst { 11111 break 11112 } 11113 off2 := v_0.AuxInt 11114 ptr := v_0.Args[0] 11115 val := v.Args[1] 11116 mem := v.Args[2] 11117 if !(is32Bit(off1 + off2)) { 11118 break 11119 } 11120 v.reset(OpAMD64MOVWstore) 11121 v.AuxInt = off1 + off2 11122 v.Aux = sym 11123 v.AddArg(ptr) 11124 v.AddArg(val) 11125 v.AddArg(mem) 11126 return true 11127 } 11128 return false 11129 } 11130 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 11131 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 11132 // cond: ValAndOff(sc).canAdd(off) 11133 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 11134 for { 11135 sc := v.AuxInt 11136 s := v.Aux 11137 v_0 := v.Args[0] 11138 if v_0.Op != OpAMD64ADDQconst { 11139 break 11140 } 11141 off := v_0.AuxInt 11142 ptr := v_0.Args[0] 11143 mem := v.Args[1] 11144 if !(ValAndOff(sc).canAdd(off)) { 11145 break 11146 } 11147 v.reset(OpAMD64MOVWstoreconst) 11148 v.AuxInt = ValAndOff(sc).add(off) 11149 v.Aux = s 11150 v.AddArg(ptr) 11151 v.AddArg(mem) 11152 return true 11153 } 11154 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 11155 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 11156 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 11157 for { 11158 sc := v.AuxInt 11159 sym1 := v.Aux 11160 v_0 := v.Args[0] 11161 if v_0.Op != OpAMD64LEAQ { 11162 break 11163 } 11164 off := v_0.AuxInt 11165 sym2 := v_0.Aux 11166 ptr := v_0.Args[0] 11167 mem := v.Args[1] 11168 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 11169 break 11170 } 11171 v.reset(OpAMD64MOVWstoreconst) 11172 v.AuxInt = ValAndOff(sc).add(off) 11173 v.Aux = mergeSym(sym1, sym2) 11174 v.AddArg(ptr) 11175 v.AddArg(mem) 11176 return true 11177 } 11178 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 11179 // cond: canMergeSym(sym1, sym2) 11180 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 11181 for { 11182 x := v.AuxInt 11183 sym1 := v.Aux 11184 v_0 := v.Args[0] 11185 if v_0.Op != OpAMD64LEAQ1 { 11186 break 11187 } 11188 off := v_0.AuxInt 11189 sym2 := v_0.Aux 11190 ptr := v_0.Args[0] 11191 idx := v_0.Args[1] 11192 mem := v.Args[1] 11193 if !(canMergeSym(sym1, sym2)) { 11194 break 11195 } 11196 v.reset(OpAMD64MOVWstoreconstidx1) 11197 v.AuxInt = ValAndOff(x).add(off) 11198 v.Aux = mergeSym(sym1, sym2) 11199 v.AddArg(ptr) 11200 v.AddArg(idx) 11201 v.AddArg(mem) 11202 return true 11203 } 11204 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 11205 // cond: canMergeSym(sym1, sym2) 11206 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 11207 for { 11208 x := v.AuxInt 11209 sym1 := v.Aux 11210 v_0 := v.Args[0] 11211 if v_0.Op != OpAMD64LEAQ2 { 11212 break 11213 } 11214 off := v_0.AuxInt 11215 sym2 := v_0.Aux 11216 ptr := v_0.Args[0] 11217 idx := v_0.Args[1] 11218 mem := v.Args[1] 11219 if !(canMergeSym(sym1, sym2)) { 11220 break 11221 } 11222 v.reset(OpAMD64MOVWstoreconstidx2) 11223 v.AuxInt = ValAndOff(x).add(off) 11224 v.Aux = mergeSym(sym1, sym2) 11225 v.AddArg(ptr) 11226 v.AddArg(idx) 11227 v.AddArg(mem) 11228 return true 11229 } 11230 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 11231 // cond: 11232 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 11233 for { 11234 x := v.AuxInt 11235 sym := v.Aux 11236 v_0 := v.Args[0] 11237 if v_0.Op != OpAMD64ADDQ { 11238 break 11239 } 11240 ptr := v_0.Args[0] 11241 idx := v_0.Args[1] 11242 mem := v.Args[1] 11243 v.reset(OpAMD64MOVWstoreconstidx1) 11244 v.AuxInt = x 11245 v.Aux = sym 11246 v.AddArg(ptr) 11247 v.AddArg(idx) 11248 v.AddArg(mem) 11249 return true 11250 } 11251 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 11252 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 11253 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 11254 for { 11255 c := v.AuxInt 11256 s := v.Aux 11257 p := v.Args[0] 11258 x := v.Args[1] 11259 if x.Op != OpAMD64MOVWstoreconst { 11260 break 11261 } 11262 a := x.AuxInt 11263 if x.Aux != s { 11264 break 11265 } 11266 if p != x.Args[0] { 11267 break 11268 } 11269 mem := x.Args[1] 11270 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 11271 break 11272 } 11273 v.reset(OpAMD64MOVLstoreconst) 11274 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 11275 v.Aux = s 11276 v.AddArg(p) 11277 v.AddArg(mem) 11278 return true 11279 } 11280 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 11281 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 11282 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 11283 for { 11284 sc := v.AuxInt 11285 sym1 := v.Aux 11286 v_0 := v.Args[0] 11287 if v_0.Op != OpAMD64LEAL { 11288 break 11289 } 11290 off := v_0.AuxInt 11291 sym2 := v_0.Aux 11292 ptr := v_0.Args[0] 11293 mem := v.Args[1] 11294 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 11295 break 11296 } 11297 v.reset(OpAMD64MOVWstoreconst) 11298 v.AuxInt = ValAndOff(sc).add(off) 11299 v.Aux = mergeSym(sym1, sym2) 11300 v.AddArg(ptr) 11301 v.AddArg(mem) 11302 return true 11303 } 11304 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 11305 // cond: ValAndOff(sc).canAdd(off) 11306 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 11307 for { 11308 sc := v.AuxInt 11309 s := v.Aux 11310 v_0 := v.Args[0] 11311 if v_0.Op != OpAMD64ADDLconst { 11312 break 11313 } 11314 off := v_0.AuxInt 11315 ptr := v_0.Args[0] 11316 mem := v.Args[1] 11317 if !(ValAndOff(sc).canAdd(off)) { 11318 break 11319 } 11320 v.reset(OpAMD64MOVWstoreconst) 11321 v.AuxInt = ValAndOff(sc).add(off) 11322 v.Aux = s 11323 v.AddArg(ptr) 11324 v.AddArg(mem) 11325 return true 11326 } 11327 return false 11328 } 11329 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 11330 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 11331 // cond: 11332 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 11333 for { 11334 c := v.AuxInt 11335 sym := v.Aux 11336 ptr := v.Args[0] 11337 v_1 := v.Args[1] 11338 if v_1.Op != OpAMD64SHLQconst { 11339 break 11340 } 11341 if v_1.AuxInt != 1 { 11342 break 11343 } 11344 idx := v_1.Args[0] 11345 mem := v.Args[2] 11346 v.reset(OpAMD64MOVWstoreconstidx2) 11347 v.AuxInt = c 11348 v.Aux = sym 11349 v.AddArg(ptr) 11350 v.AddArg(idx) 11351 v.AddArg(mem) 11352 return true 11353 } 11354 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 11355 // cond: 11356 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 11357 for { 11358 x := v.AuxInt 11359 sym := v.Aux 11360 v_0 := v.Args[0] 11361 if v_0.Op != OpAMD64ADDQconst { 11362 break 11363 } 11364 c := v_0.AuxInt 11365 ptr := v_0.Args[0] 11366 idx := v.Args[1] 11367 mem := v.Args[2] 11368 v.reset(OpAMD64MOVWstoreconstidx1) 11369 v.AuxInt = ValAndOff(x).add(c) 11370 v.Aux = sym 11371 v.AddArg(ptr) 11372 v.AddArg(idx) 11373 v.AddArg(mem) 11374 return true 11375 } 11376 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 11377 // cond: 11378 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 11379 for { 11380 x := v.AuxInt 11381 sym := v.Aux 11382 ptr := v.Args[0] 11383 v_1 := v.Args[1] 11384 if v_1.Op != OpAMD64ADDQconst { 11385 break 11386 } 11387 c := v_1.AuxInt 11388 idx := v_1.Args[0] 11389 mem := v.Args[2] 11390 v.reset(OpAMD64MOVWstoreconstidx1) 11391 v.AuxInt = ValAndOff(x).add(c) 11392 v.Aux = sym 11393 v.AddArg(ptr) 11394 v.AddArg(idx) 11395 v.AddArg(mem) 11396 return true 11397 } 11398 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 11399 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 11400 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 11401 for { 11402 c := v.AuxInt 11403 s := v.Aux 11404 p := v.Args[0] 11405 i := v.Args[1] 11406 x := v.Args[2] 11407 if x.Op != OpAMD64MOVWstoreconstidx1 { 11408 break 11409 } 11410 a := x.AuxInt 11411 if x.Aux != s { 11412 break 11413 } 11414 if p != x.Args[0] { 11415 break 11416 } 11417 if i != x.Args[1] { 11418 break 11419 } 11420 mem := x.Args[2] 11421 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 11422 break 11423 } 11424 v.reset(OpAMD64MOVLstoreconstidx1) 11425 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 11426 v.Aux = s 11427 v.AddArg(p) 11428 v.AddArg(i) 11429 v.AddArg(mem) 11430 return true 11431 } 11432 return false 11433 } 11434 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 11435 b := v.Block 11436 _ = b 11437 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 11438 // cond: 11439 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 11440 for { 11441 x := v.AuxInt 11442 sym := v.Aux 11443 v_0 := v.Args[0] 11444 if v_0.Op != OpAMD64ADDQconst { 11445 break 11446 } 11447 c := v_0.AuxInt 11448 ptr := v_0.Args[0] 11449 idx := v.Args[1] 11450 mem := v.Args[2] 11451 v.reset(OpAMD64MOVWstoreconstidx2) 11452 v.AuxInt = ValAndOff(x).add(c) 11453 v.Aux = sym 11454 v.AddArg(ptr) 11455 v.AddArg(idx) 11456 v.AddArg(mem) 11457 return true 11458 } 11459 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 11460 // cond: 11461 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 11462 for { 11463 x := v.AuxInt 11464 sym := v.Aux 11465 ptr := v.Args[0] 11466 v_1 := v.Args[1] 11467 if v_1.Op != OpAMD64ADDQconst { 11468 break 11469 } 11470 c := v_1.AuxInt 11471 idx := v_1.Args[0] 11472 mem := v.Args[2] 11473 v.reset(OpAMD64MOVWstoreconstidx2) 11474 v.AuxInt = ValAndOff(x).add(2 * c) 11475 v.Aux = sym 11476 v.AddArg(ptr) 11477 v.AddArg(idx) 11478 v.AddArg(mem) 11479 return true 11480 } 11481 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 11482 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 11483 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 11484 for { 11485 c := v.AuxInt 11486 s := v.Aux 11487 p := v.Args[0] 11488 i := v.Args[1] 11489 x := v.Args[2] 11490 if x.Op != OpAMD64MOVWstoreconstidx2 { 11491 break 11492 } 11493 a := x.AuxInt 11494 if x.Aux != s { 11495 break 11496 } 11497 if p != x.Args[0] { 11498 break 11499 } 11500 if i != x.Args[1] { 11501 break 11502 } 11503 mem := x.Args[2] 11504 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 11505 break 11506 } 11507 v.reset(OpAMD64MOVLstoreconstidx1) 11508 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 11509 v.Aux = s 11510 v.AddArg(p) 11511 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 11512 v0.AuxInt = 1 11513 v0.AddArg(i) 11514 v.AddArg(v0) 11515 v.AddArg(mem) 11516 return true 11517 } 11518 return false 11519 } 11520 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 11521 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 11522 // cond: 11523 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 11524 for { 11525 c := v.AuxInt 11526 sym := v.Aux 11527 ptr := v.Args[0] 11528 v_1 := v.Args[1] 11529 if v_1.Op != OpAMD64SHLQconst { 11530 break 11531 } 11532 if v_1.AuxInt != 1 { 11533 break 11534 } 11535 idx := v_1.Args[0] 11536 val := v.Args[2] 11537 mem := v.Args[3] 11538 v.reset(OpAMD64MOVWstoreidx2) 11539 v.AuxInt = c 11540 v.Aux = sym 11541 v.AddArg(ptr) 11542 v.AddArg(idx) 11543 v.AddArg(val) 11544 v.AddArg(mem) 11545 return true 11546 } 11547 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11548 // cond: 11549 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 11550 for { 11551 c := v.AuxInt 11552 sym := v.Aux 11553 v_0 := v.Args[0] 11554 if v_0.Op != OpAMD64ADDQconst { 11555 break 11556 } 11557 d := v_0.AuxInt 11558 ptr := v_0.Args[0] 11559 idx := v.Args[1] 11560 val := v.Args[2] 11561 mem := v.Args[3] 11562 v.reset(OpAMD64MOVWstoreidx1) 11563 v.AuxInt = c + d 11564 v.Aux = sym 11565 v.AddArg(ptr) 11566 v.AddArg(idx) 11567 v.AddArg(val) 11568 v.AddArg(mem) 11569 return true 11570 } 11571 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11572 // cond: 11573 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 11574 for { 11575 c := v.AuxInt 11576 sym := v.Aux 11577 ptr := v.Args[0] 11578 v_1 := v.Args[1] 11579 if v_1.Op != OpAMD64ADDQconst { 11580 break 11581 } 11582 d := v_1.AuxInt 11583 idx := v_1.Args[0] 11584 val := v.Args[2] 11585 mem := v.Args[3] 11586 v.reset(OpAMD64MOVWstoreidx1) 11587 v.AuxInt = c + d 11588 v.Aux = sym 11589 v.AddArg(ptr) 11590 v.AddArg(idx) 11591 v.AddArg(val) 11592 v.AddArg(mem) 11593 return true 11594 } 11595 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 11596 // cond: x.Uses == 1 && clobber(x) 11597 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 11598 for { 11599 i := v.AuxInt 11600 s := v.Aux 11601 p := v.Args[0] 11602 idx := v.Args[1] 11603 v_2 := v.Args[2] 11604 if v_2.Op != OpAMD64SHRQconst { 11605 break 11606 } 11607 if v_2.AuxInt != 16 { 11608 break 11609 } 11610 w := v_2.Args[0] 11611 x := v.Args[3] 11612 if x.Op != OpAMD64MOVWstoreidx1 { 11613 break 11614 } 11615 if x.AuxInt != i-2 { 11616 break 11617 } 11618 if x.Aux != s { 11619 break 11620 } 11621 if p != x.Args[0] { 11622 break 11623 } 11624 if idx != x.Args[1] { 11625 break 11626 } 11627 if w != x.Args[2] { 11628 break 11629 } 11630 mem := x.Args[3] 11631 if !(x.Uses == 1 && clobber(x)) { 11632 break 11633 } 11634 v.reset(OpAMD64MOVLstoreidx1) 11635 v.AuxInt = i - 2 11636 v.Aux = s 11637 v.AddArg(p) 11638 v.AddArg(idx) 11639 v.AddArg(w) 11640 v.AddArg(mem) 11641 return true 11642 } 11643 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 11644 // cond: x.Uses == 1 && clobber(x) 11645 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 11646 for { 11647 i := v.AuxInt 11648 s := v.Aux 11649 p := v.Args[0] 11650 idx := v.Args[1] 11651 v_2 := v.Args[2] 11652 if v_2.Op != OpAMD64SHRQconst { 11653 break 11654 } 11655 j := v_2.AuxInt 11656 w := v_2.Args[0] 11657 x := v.Args[3] 11658 if x.Op != OpAMD64MOVWstoreidx1 { 11659 break 11660 } 11661 if x.AuxInt != i-2 { 11662 break 11663 } 11664 if x.Aux != s { 11665 break 11666 } 11667 if p != x.Args[0] { 11668 break 11669 } 11670 if idx != x.Args[1] { 11671 break 11672 } 11673 w0 := x.Args[2] 11674 if w0.Op != OpAMD64SHRQconst { 11675 break 11676 } 11677 if w0.AuxInt != j-16 { 11678 break 11679 } 11680 if w != w0.Args[0] { 11681 break 11682 } 11683 mem := x.Args[3] 11684 if !(x.Uses == 1 && clobber(x)) { 11685 break 11686 } 11687 v.reset(OpAMD64MOVLstoreidx1) 11688 v.AuxInt = i - 2 11689 v.Aux = s 11690 v.AddArg(p) 11691 v.AddArg(idx) 11692 v.AddArg(w0) 11693 v.AddArg(mem) 11694 return true 11695 } 11696 return false 11697 } 11698 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 11699 b := v.Block 11700 _ = b 11701 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11702 // cond: 11703 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 11704 for { 11705 c := v.AuxInt 11706 sym := v.Aux 11707 v_0 := v.Args[0] 11708 if v_0.Op != OpAMD64ADDQconst { 11709 break 11710 } 11711 d := v_0.AuxInt 11712 ptr := v_0.Args[0] 11713 idx := v.Args[1] 11714 val := v.Args[2] 11715 mem := v.Args[3] 11716 v.reset(OpAMD64MOVWstoreidx2) 11717 v.AuxInt = c + d 11718 v.Aux = sym 11719 v.AddArg(ptr) 11720 v.AddArg(idx) 11721 v.AddArg(val) 11722 v.AddArg(mem) 11723 return true 11724 } 11725 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11726 // cond: 11727 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 11728 for { 11729 c := v.AuxInt 11730 sym := v.Aux 11731 ptr := v.Args[0] 11732 v_1 := v.Args[1] 11733 if v_1.Op != OpAMD64ADDQconst { 11734 break 11735 } 11736 d := v_1.AuxInt 11737 idx := v_1.Args[0] 11738 val := v.Args[2] 11739 mem := v.Args[3] 11740 v.reset(OpAMD64MOVWstoreidx2) 11741 v.AuxInt = c + 2*d 11742 v.Aux = sym 11743 v.AddArg(ptr) 11744 v.AddArg(idx) 11745 v.AddArg(val) 11746 v.AddArg(mem) 11747 return true 11748 } 11749 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 11750 // cond: x.Uses == 1 && clobber(x) 11751 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 11752 for { 11753 i := v.AuxInt 11754 s := v.Aux 11755 p := v.Args[0] 11756 idx := v.Args[1] 11757 v_2 := v.Args[2] 11758 if v_2.Op != OpAMD64SHRQconst { 11759 break 11760 } 11761 if v_2.AuxInt != 16 { 11762 break 11763 } 11764 w := v_2.Args[0] 11765 x := v.Args[3] 11766 if x.Op != OpAMD64MOVWstoreidx2 { 11767 break 11768 } 11769 if x.AuxInt != i-2 { 11770 break 11771 } 11772 if x.Aux != s { 11773 break 11774 } 11775 if p != x.Args[0] { 11776 break 11777 } 11778 if idx != x.Args[1] { 11779 break 11780 } 11781 if w != x.Args[2] { 11782 break 11783 } 11784 mem := x.Args[3] 11785 if !(x.Uses == 1 && clobber(x)) { 11786 break 11787 } 11788 v.reset(OpAMD64MOVLstoreidx1) 11789 v.AuxInt = i - 2 11790 v.Aux = s 11791 v.AddArg(p) 11792 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 11793 v0.AuxInt = 1 11794 v0.AddArg(idx) 11795 v.AddArg(v0) 11796 v.AddArg(w) 11797 v.AddArg(mem) 11798 return true 11799 } 11800 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 11801 // cond: x.Uses == 1 && clobber(x) 11802 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 11803 for { 11804 i := v.AuxInt 11805 s := v.Aux 11806 p := v.Args[0] 11807 idx := v.Args[1] 11808 v_2 := v.Args[2] 11809 if v_2.Op != OpAMD64SHRQconst { 11810 break 11811 } 11812 j := v_2.AuxInt 11813 w := v_2.Args[0] 11814 x := v.Args[3] 11815 if x.Op != OpAMD64MOVWstoreidx2 { 11816 break 11817 } 11818 if x.AuxInt != i-2 { 11819 break 11820 } 11821 if x.Aux != s { 11822 break 11823 } 11824 if p != x.Args[0] { 11825 break 11826 } 11827 if idx != x.Args[1] { 11828 break 11829 } 11830 w0 := x.Args[2] 11831 if w0.Op != OpAMD64SHRQconst { 11832 break 11833 } 11834 if w0.AuxInt != j-16 { 11835 break 11836 } 11837 if w != w0.Args[0] { 11838 break 11839 } 11840 mem := x.Args[3] 11841 if !(x.Uses == 1 && clobber(x)) { 11842 break 11843 } 11844 v.reset(OpAMD64MOVLstoreidx1) 11845 v.AuxInt = i - 2 11846 v.Aux = s 11847 v.AddArg(p) 11848 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 11849 v0.AuxInt = 1 11850 v0.AddArg(idx) 11851 v.AddArg(v0) 11852 v.AddArg(w0) 11853 v.AddArg(mem) 11854 return true 11855 } 11856 return false 11857 } 11858 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 11859 // match: (MULL x (MOVLconst [c])) 11860 // cond: 11861 // result: (MULLconst [c] x) 11862 for { 11863 x := v.Args[0] 11864 v_1 := v.Args[1] 11865 if v_1.Op != OpAMD64MOVLconst { 11866 break 11867 } 11868 c := v_1.AuxInt 11869 v.reset(OpAMD64MULLconst) 11870 v.AuxInt = c 11871 v.AddArg(x) 11872 return true 11873 } 11874 // match: (MULL (MOVLconst [c]) x) 11875 // cond: 11876 // result: (MULLconst [c] x) 11877 for { 11878 v_0 := v.Args[0] 11879 if v_0.Op != OpAMD64MOVLconst { 11880 break 11881 } 11882 c := v_0.AuxInt 11883 x := v.Args[1] 11884 v.reset(OpAMD64MULLconst) 11885 v.AuxInt = c 11886 v.AddArg(x) 11887 return true 11888 } 11889 return false 11890 } 11891 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 11892 // match: (MULLconst [c] (MULLconst [d] x)) 11893 // cond: 11894 // result: (MULLconst [int64(int32(c * d))] x) 11895 for { 11896 c := v.AuxInt 11897 v_0 := v.Args[0] 11898 if v_0.Op != OpAMD64MULLconst { 11899 break 11900 } 11901 d := v_0.AuxInt 11902 x := v_0.Args[0] 11903 v.reset(OpAMD64MULLconst) 11904 v.AuxInt = int64(int32(c * d)) 11905 v.AddArg(x) 11906 return true 11907 } 11908 // match: (MULLconst [c] (MOVLconst [d])) 11909 // cond: 11910 // result: (MOVLconst [int64(int32(c*d))]) 11911 for { 11912 c := v.AuxInt 11913 v_0 := v.Args[0] 11914 if v_0.Op != OpAMD64MOVLconst { 11915 break 11916 } 11917 d := v_0.AuxInt 11918 v.reset(OpAMD64MOVLconst) 11919 v.AuxInt = int64(int32(c * d)) 11920 return true 11921 } 11922 return false 11923 } 11924 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 11925 // match: (MULQ x (MOVQconst [c])) 11926 // cond: is32Bit(c) 11927 // result: (MULQconst [c] x) 11928 for { 11929 x := v.Args[0] 11930 v_1 := v.Args[1] 11931 if v_1.Op != OpAMD64MOVQconst { 11932 break 11933 } 11934 c := v_1.AuxInt 11935 if !(is32Bit(c)) { 11936 break 11937 } 11938 v.reset(OpAMD64MULQconst) 11939 v.AuxInt = c 11940 v.AddArg(x) 11941 return true 11942 } 11943 // match: (MULQ (MOVQconst [c]) x) 11944 // cond: is32Bit(c) 11945 // result: (MULQconst [c] x) 11946 for { 11947 v_0 := v.Args[0] 11948 if v_0.Op != OpAMD64MOVQconst { 11949 break 11950 } 11951 c := v_0.AuxInt 11952 x := v.Args[1] 11953 if !(is32Bit(c)) { 11954 break 11955 } 11956 v.reset(OpAMD64MULQconst) 11957 v.AuxInt = c 11958 v.AddArg(x) 11959 return true 11960 } 11961 return false 11962 } 11963 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 11964 b := v.Block 11965 _ = b 11966 // match: (MULQconst [c] (MULQconst [d] x)) 11967 // cond: is32Bit(c*d) 11968 // result: (MULQconst [c * d] x) 11969 for { 11970 c := v.AuxInt 11971 v_0 := v.Args[0] 11972 if v_0.Op != OpAMD64MULQconst { 11973 break 11974 } 11975 d := v_0.AuxInt 11976 x := v_0.Args[0] 11977 if !(is32Bit(c * d)) { 11978 break 11979 } 11980 v.reset(OpAMD64MULQconst) 11981 v.AuxInt = c * d 11982 v.AddArg(x) 11983 return true 11984 } 11985 // match: (MULQconst [-1] x) 11986 // cond: 11987 // result: (NEGQ x) 11988 for { 11989 if v.AuxInt != -1 { 11990 break 11991 } 11992 x := v.Args[0] 11993 v.reset(OpAMD64NEGQ) 11994 v.AddArg(x) 11995 return true 11996 } 11997 // match: (MULQconst [0] _) 11998 // cond: 11999 // result: (MOVQconst [0]) 12000 for { 12001 if v.AuxInt != 0 { 12002 break 12003 } 12004 v.reset(OpAMD64MOVQconst) 12005 v.AuxInt = 0 12006 return true 12007 } 12008 // match: (MULQconst [1] x) 12009 // cond: 12010 // result: x 12011 for { 12012 if v.AuxInt != 1 { 12013 break 12014 } 12015 x := v.Args[0] 12016 v.reset(OpCopy) 12017 v.Type = x.Type 12018 v.AddArg(x) 12019 return true 12020 } 12021 // match: (MULQconst [3] x) 12022 // cond: 12023 // result: (LEAQ2 x x) 12024 for { 12025 if v.AuxInt != 3 { 12026 break 12027 } 12028 x := v.Args[0] 12029 v.reset(OpAMD64LEAQ2) 12030 v.AddArg(x) 12031 v.AddArg(x) 12032 return true 12033 } 12034 // match: (MULQconst [5] x) 12035 // cond: 12036 // result: (LEAQ4 x x) 12037 for { 12038 if v.AuxInt != 5 { 12039 break 12040 } 12041 x := v.Args[0] 12042 v.reset(OpAMD64LEAQ4) 12043 v.AddArg(x) 12044 v.AddArg(x) 12045 return true 12046 } 12047 // match: (MULQconst [7] x) 12048 // cond: 12049 // result: (LEAQ8 (NEGQ <v.Type> x) x) 12050 for { 12051 if v.AuxInt != 7 { 12052 break 12053 } 12054 x := v.Args[0] 12055 v.reset(OpAMD64LEAQ8) 12056 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 12057 v0.AddArg(x) 12058 v.AddArg(v0) 12059 v.AddArg(x) 12060 return true 12061 } 12062 // match: (MULQconst [9] x) 12063 // cond: 12064 // result: (LEAQ8 x x) 12065 for { 12066 if v.AuxInt != 9 { 12067 break 12068 } 12069 x := v.Args[0] 12070 v.reset(OpAMD64LEAQ8) 12071 v.AddArg(x) 12072 v.AddArg(x) 12073 return true 12074 } 12075 // match: (MULQconst [11] x) 12076 // cond: 12077 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 12078 for { 12079 if v.AuxInt != 11 { 12080 break 12081 } 12082 x := v.Args[0] 12083 v.reset(OpAMD64LEAQ2) 12084 v.AddArg(x) 12085 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12086 v0.AddArg(x) 12087 v0.AddArg(x) 12088 v.AddArg(v0) 12089 return true 12090 } 12091 // match: (MULQconst [13] x) 12092 // cond: 12093 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 12094 for { 12095 if v.AuxInt != 13 { 12096 break 12097 } 12098 x := v.Args[0] 12099 v.reset(OpAMD64LEAQ4) 12100 v.AddArg(x) 12101 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12102 v0.AddArg(x) 12103 v0.AddArg(x) 12104 v.AddArg(v0) 12105 return true 12106 } 12107 return false 12108 } 12109 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 12110 b := v.Block 12111 _ = b 12112 // match: (MULQconst [21] x) 12113 // cond: 12114 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 12115 for { 12116 if v.AuxInt != 21 { 12117 break 12118 } 12119 x := v.Args[0] 12120 v.reset(OpAMD64LEAQ4) 12121 v.AddArg(x) 12122 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12123 v0.AddArg(x) 12124 v0.AddArg(x) 12125 v.AddArg(v0) 12126 return true 12127 } 12128 // match: (MULQconst [25] x) 12129 // cond: 12130 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 12131 for { 12132 if v.AuxInt != 25 { 12133 break 12134 } 12135 x := v.Args[0] 12136 v.reset(OpAMD64LEAQ8) 12137 v.AddArg(x) 12138 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12139 v0.AddArg(x) 12140 v0.AddArg(x) 12141 v.AddArg(v0) 12142 return true 12143 } 12144 // match: (MULQconst [37] x) 12145 // cond: 12146 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 12147 for { 12148 if v.AuxInt != 37 { 12149 break 12150 } 12151 x := v.Args[0] 12152 v.reset(OpAMD64LEAQ4) 12153 v.AddArg(x) 12154 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12155 v0.AddArg(x) 12156 v0.AddArg(x) 12157 v.AddArg(v0) 12158 return true 12159 } 12160 // match: (MULQconst [41] x) 12161 // cond: 12162 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 12163 for { 12164 if v.AuxInt != 41 { 12165 break 12166 } 12167 x := v.Args[0] 12168 v.reset(OpAMD64LEAQ8) 12169 v.AddArg(x) 12170 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12171 v0.AddArg(x) 12172 v0.AddArg(x) 12173 v.AddArg(v0) 12174 return true 12175 } 12176 // match: (MULQconst [73] x) 12177 // cond: 12178 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 12179 for { 12180 if v.AuxInt != 73 { 12181 break 12182 } 12183 x := v.Args[0] 12184 v.reset(OpAMD64LEAQ8) 12185 v.AddArg(x) 12186 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12187 v0.AddArg(x) 12188 v0.AddArg(x) 12189 v.AddArg(v0) 12190 return true 12191 } 12192 // match: (MULQconst [c] x) 12193 // cond: isPowerOfTwo(c) 12194 // result: (SHLQconst [log2(c)] x) 12195 for { 12196 c := v.AuxInt 12197 x := v.Args[0] 12198 if !(isPowerOfTwo(c)) { 12199 break 12200 } 12201 v.reset(OpAMD64SHLQconst) 12202 v.AuxInt = log2(c) 12203 v.AddArg(x) 12204 return true 12205 } 12206 // match: (MULQconst [c] x) 12207 // cond: isPowerOfTwo(c+1) && c >= 15 12208 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 12209 for { 12210 c := v.AuxInt 12211 x := v.Args[0] 12212 if !(isPowerOfTwo(c+1) && c >= 15) { 12213 break 12214 } 12215 v.reset(OpAMD64SUBQ) 12216 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12217 v0.AuxInt = log2(c + 1) 12218 v0.AddArg(x) 12219 v.AddArg(v0) 12220 v.AddArg(x) 12221 return true 12222 } 12223 // match: (MULQconst [c] x) 12224 // cond: isPowerOfTwo(c-1) && c >= 17 12225 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 12226 for { 12227 c := v.AuxInt 12228 x := v.Args[0] 12229 if !(isPowerOfTwo(c-1) && c >= 17) { 12230 break 12231 } 12232 v.reset(OpAMD64LEAQ1) 12233 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12234 v0.AuxInt = log2(c - 1) 12235 v0.AddArg(x) 12236 v.AddArg(v0) 12237 v.AddArg(x) 12238 return true 12239 } 12240 // match: (MULQconst [c] x) 12241 // cond: isPowerOfTwo(c-2) && c >= 34 12242 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 12243 for { 12244 c := v.AuxInt 12245 x := v.Args[0] 12246 if !(isPowerOfTwo(c-2) && c >= 34) { 12247 break 12248 } 12249 v.reset(OpAMD64LEAQ2) 12250 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12251 v0.AuxInt = log2(c - 2) 12252 v0.AddArg(x) 12253 v.AddArg(v0) 12254 v.AddArg(x) 12255 return true 12256 } 12257 // match: (MULQconst [c] x) 12258 // cond: isPowerOfTwo(c-4) && c >= 68 12259 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 12260 for { 12261 c := v.AuxInt 12262 x := v.Args[0] 12263 if !(isPowerOfTwo(c-4) && c >= 68) { 12264 break 12265 } 12266 v.reset(OpAMD64LEAQ4) 12267 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12268 v0.AuxInt = log2(c - 4) 12269 v0.AddArg(x) 12270 v.AddArg(v0) 12271 v.AddArg(x) 12272 return true 12273 } 12274 return false 12275 } 12276 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 12277 b := v.Block 12278 _ = b 12279 // match: (MULQconst [c] x) 12280 // cond: isPowerOfTwo(c-8) && c >= 136 12281 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 12282 for { 12283 c := v.AuxInt 12284 x := v.Args[0] 12285 if !(isPowerOfTwo(c-8) && c >= 136) { 12286 break 12287 } 12288 v.reset(OpAMD64LEAQ8) 12289 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12290 v0.AuxInt = log2(c - 8) 12291 v0.AddArg(x) 12292 v.AddArg(v0) 12293 v.AddArg(x) 12294 return true 12295 } 12296 // match: (MULQconst [c] x) 12297 // cond: c%3 == 0 && isPowerOfTwo(c/3) 12298 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 12299 for { 12300 c := v.AuxInt 12301 x := v.Args[0] 12302 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 12303 break 12304 } 12305 v.reset(OpAMD64SHLQconst) 12306 v.AuxInt = log2(c / 3) 12307 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12308 v0.AddArg(x) 12309 v0.AddArg(x) 12310 v.AddArg(v0) 12311 return true 12312 } 12313 // match: (MULQconst [c] x) 12314 // cond: c%5 == 0 && isPowerOfTwo(c/5) 12315 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 12316 for { 12317 c := v.AuxInt 12318 x := v.Args[0] 12319 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 12320 break 12321 } 12322 v.reset(OpAMD64SHLQconst) 12323 v.AuxInt = log2(c / 5) 12324 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12325 v0.AddArg(x) 12326 v0.AddArg(x) 12327 v.AddArg(v0) 12328 return true 12329 } 12330 // match: (MULQconst [c] x) 12331 // cond: c%9 == 0 && isPowerOfTwo(c/9) 12332 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 12333 for { 12334 c := v.AuxInt 12335 x := v.Args[0] 12336 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 12337 break 12338 } 12339 v.reset(OpAMD64SHLQconst) 12340 v.AuxInt = log2(c / 9) 12341 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12342 v0.AddArg(x) 12343 v0.AddArg(x) 12344 v.AddArg(v0) 12345 return true 12346 } 12347 // match: (MULQconst [c] (MOVQconst [d])) 12348 // cond: 12349 // result: (MOVQconst [c*d]) 12350 for { 12351 c := v.AuxInt 12352 v_0 := v.Args[0] 12353 if v_0.Op != OpAMD64MOVQconst { 12354 break 12355 } 12356 d := v_0.AuxInt 12357 v.reset(OpAMD64MOVQconst) 12358 v.AuxInt = c * d 12359 return true 12360 } 12361 return false 12362 } 12363 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 12364 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 12365 // cond: canMergeLoad(v, l, x) && clobber(l) 12366 // result: (MULSDmem x [off] {sym} ptr mem) 12367 for { 12368 x := v.Args[0] 12369 l := v.Args[1] 12370 if l.Op != OpAMD64MOVSDload { 12371 break 12372 } 12373 off := l.AuxInt 12374 sym := l.Aux 12375 ptr := l.Args[0] 12376 mem := l.Args[1] 12377 if !(canMergeLoad(v, l, x) && clobber(l)) { 12378 break 12379 } 12380 v.reset(OpAMD64MULSDmem) 12381 v.AuxInt = off 12382 v.Aux = sym 12383 v.AddArg(x) 12384 v.AddArg(ptr) 12385 v.AddArg(mem) 12386 return true 12387 } 12388 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 12389 // cond: canMergeLoad(v, l, x) && clobber(l) 12390 // result: (MULSDmem x [off] {sym} ptr mem) 12391 for { 12392 l := v.Args[0] 12393 if l.Op != OpAMD64MOVSDload { 12394 break 12395 } 12396 off := l.AuxInt 12397 sym := l.Aux 12398 ptr := l.Args[0] 12399 mem := l.Args[1] 12400 x := v.Args[1] 12401 if !(canMergeLoad(v, l, x) && clobber(l)) { 12402 break 12403 } 12404 v.reset(OpAMD64MULSDmem) 12405 v.AuxInt = off 12406 v.Aux = sym 12407 v.AddArg(x) 12408 v.AddArg(ptr) 12409 v.AddArg(mem) 12410 return true 12411 } 12412 return false 12413 } 12414 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 12415 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 12416 // cond: canMergeLoad(v, l, x) && clobber(l) 12417 // result: (MULSSmem x [off] {sym} ptr mem) 12418 for { 12419 x := v.Args[0] 12420 l := v.Args[1] 12421 if l.Op != OpAMD64MOVSSload { 12422 break 12423 } 12424 off := l.AuxInt 12425 sym := l.Aux 12426 ptr := l.Args[0] 12427 mem := l.Args[1] 12428 if !(canMergeLoad(v, l, x) && clobber(l)) { 12429 break 12430 } 12431 v.reset(OpAMD64MULSSmem) 12432 v.AuxInt = off 12433 v.Aux = sym 12434 v.AddArg(x) 12435 v.AddArg(ptr) 12436 v.AddArg(mem) 12437 return true 12438 } 12439 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 12440 // cond: canMergeLoad(v, l, x) && clobber(l) 12441 // result: (MULSSmem x [off] {sym} ptr mem) 12442 for { 12443 l := v.Args[0] 12444 if l.Op != OpAMD64MOVSSload { 12445 break 12446 } 12447 off := l.AuxInt 12448 sym := l.Aux 12449 ptr := l.Args[0] 12450 mem := l.Args[1] 12451 x := v.Args[1] 12452 if !(canMergeLoad(v, l, x) && clobber(l)) { 12453 break 12454 } 12455 v.reset(OpAMD64MULSSmem) 12456 v.AuxInt = off 12457 v.Aux = sym 12458 v.AddArg(x) 12459 v.AddArg(ptr) 12460 v.AddArg(mem) 12461 return true 12462 } 12463 return false 12464 } 12465 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 12466 // match: (NEGL (MOVLconst [c])) 12467 // cond: 12468 // result: (MOVLconst [int64(int32(-c))]) 12469 for { 12470 v_0 := v.Args[0] 12471 if v_0.Op != OpAMD64MOVLconst { 12472 break 12473 } 12474 c := v_0.AuxInt 12475 v.reset(OpAMD64MOVLconst) 12476 v.AuxInt = int64(int32(-c)) 12477 return true 12478 } 12479 return false 12480 } 12481 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 12482 // match: (NEGQ (MOVQconst [c])) 12483 // cond: 12484 // result: (MOVQconst [-c]) 12485 for { 12486 v_0 := v.Args[0] 12487 if v_0.Op != OpAMD64MOVQconst { 12488 break 12489 } 12490 c := v_0.AuxInt 12491 v.reset(OpAMD64MOVQconst) 12492 v.AuxInt = -c 12493 return true 12494 } 12495 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 12496 // cond: c != -(1<<31) 12497 // result: (ADDQconst [-c] x) 12498 for { 12499 v_0 := v.Args[0] 12500 if v_0.Op != OpAMD64ADDQconst { 12501 break 12502 } 12503 c := v_0.AuxInt 12504 v_0_0 := v_0.Args[0] 12505 if v_0_0.Op != OpAMD64NEGQ { 12506 break 12507 } 12508 x := v_0_0.Args[0] 12509 if !(c != -(1 << 31)) { 12510 break 12511 } 12512 v.reset(OpAMD64ADDQconst) 12513 v.AuxInt = -c 12514 v.AddArg(x) 12515 return true 12516 } 12517 return false 12518 } 12519 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 12520 // match: (NOTL (MOVLconst [c])) 12521 // cond: 12522 // result: (MOVLconst [^c]) 12523 for { 12524 v_0 := v.Args[0] 12525 if v_0.Op != OpAMD64MOVLconst { 12526 break 12527 } 12528 c := v_0.AuxInt 12529 v.reset(OpAMD64MOVLconst) 12530 v.AuxInt = ^c 12531 return true 12532 } 12533 return false 12534 } 12535 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 12536 // match: (NOTQ (MOVQconst [c])) 12537 // cond: 12538 // result: (MOVQconst [^c]) 12539 for { 12540 v_0 := v.Args[0] 12541 if v_0.Op != OpAMD64MOVQconst { 12542 break 12543 } 12544 c := v_0.AuxInt 12545 v.reset(OpAMD64MOVQconst) 12546 v.AuxInt = ^c 12547 return true 12548 } 12549 return false 12550 } 12551 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 12552 // match: (ORL x (MOVLconst [c])) 12553 // cond: 12554 // result: (ORLconst [c] x) 12555 for { 12556 x := v.Args[0] 12557 v_1 := v.Args[1] 12558 if v_1.Op != OpAMD64MOVLconst { 12559 break 12560 } 12561 c := v_1.AuxInt 12562 v.reset(OpAMD64ORLconst) 12563 v.AuxInt = c 12564 v.AddArg(x) 12565 return true 12566 } 12567 // match: (ORL (MOVLconst [c]) x) 12568 // cond: 12569 // result: (ORLconst [c] x) 12570 for { 12571 v_0 := v.Args[0] 12572 if v_0.Op != OpAMD64MOVLconst { 12573 break 12574 } 12575 c := v_0.AuxInt 12576 x := v.Args[1] 12577 v.reset(OpAMD64ORLconst) 12578 v.AuxInt = c 12579 v.AddArg(x) 12580 return true 12581 } 12582 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 12583 // cond: d==32-c 12584 // result: (ROLLconst x [c]) 12585 for { 12586 v_0 := v.Args[0] 12587 if v_0.Op != OpAMD64SHLLconst { 12588 break 12589 } 12590 c := v_0.AuxInt 12591 x := v_0.Args[0] 12592 v_1 := v.Args[1] 12593 if v_1.Op != OpAMD64SHRLconst { 12594 break 12595 } 12596 d := v_1.AuxInt 12597 if x != v_1.Args[0] { 12598 break 12599 } 12600 if !(d == 32-c) { 12601 break 12602 } 12603 v.reset(OpAMD64ROLLconst) 12604 v.AuxInt = c 12605 v.AddArg(x) 12606 return true 12607 } 12608 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 12609 // cond: d==32-c 12610 // result: (ROLLconst x [c]) 12611 for { 12612 v_0 := v.Args[0] 12613 if v_0.Op != OpAMD64SHRLconst { 12614 break 12615 } 12616 d := v_0.AuxInt 12617 x := v_0.Args[0] 12618 v_1 := v.Args[1] 12619 if v_1.Op != OpAMD64SHLLconst { 12620 break 12621 } 12622 c := v_1.AuxInt 12623 if x != v_1.Args[0] { 12624 break 12625 } 12626 if !(d == 32-c) { 12627 break 12628 } 12629 v.reset(OpAMD64ROLLconst) 12630 v.AuxInt = c 12631 v.AddArg(x) 12632 return true 12633 } 12634 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 12635 // cond: d==16-c && c < 16 && t.Size() == 2 12636 // result: (ROLWconst x [c]) 12637 for { 12638 t := v.Type 12639 v_0 := v.Args[0] 12640 if v_0.Op != OpAMD64SHLLconst { 12641 break 12642 } 12643 c := v_0.AuxInt 12644 x := v_0.Args[0] 12645 v_1 := v.Args[1] 12646 if v_1.Op != OpAMD64SHRWconst { 12647 break 12648 } 12649 d := v_1.AuxInt 12650 if x != v_1.Args[0] { 12651 break 12652 } 12653 if !(d == 16-c && c < 16 && t.Size() == 2) { 12654 break 12655 } 12656 v.reset(OpAMD64ROLWconst) 12657 v.AuxInt = c 12658 v.AddArg(x) 12659 return true 12660 } 12661 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 12662 // cond: d==16-c && c < 16 && t.Size() == 2 12663 // result: (ROLWconst x [c]) 12664 for { 12665 t := v.Type 12666 v_0 := v.Args[0] 12667 if v_0.Op != OpAMD64SHRWconst { 12668 break 12669 } 12670 d := v_0.AuxInt 12671 x := v_0.Args[0] 12672 v_1 := v.Args[1] 12673 if v_1.Op != OpAMD64SHLLconst { 12674 break 12675 } 12676 c := v_1.AuxInt 12677 if x != v_1.Args[0] { 12678 break 12679 } 12680 if !(d == 16-c && c < 16 && t.Size() == 2) { 12681 break 12682 } 12683 v.reset(OpAMD64ROLWconst) 12684 v.AuxInt = c 12685 v.AddArg(x) 12686 return true 12687 } 12688 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 12689 // cond: d==8-c && c < 8 && t.Size() == 1 12690 // result: (ROLBconst x [c]) 12691 for { 12692 t := v.Type 12693 v_0 := v.Args[0] 12694 if v_0.Op != OpAMD64SHLLconst { 12695 break 12696 } 12697 c := v_0.AuxInt 12698 x := v_0.Args[0] 12699 v_1 := v.Args[1] 12700 if v_1.Op != OpAMD64SHRBconst { 12701 break 12702 } 12703 d := v_1.AuxInt 12704 if x != v_1.Args[0] { 12705 break 12706 } 12707 if !(d == 8-c && c < 8 && t.Size() == 1) { 12708 break 12709 } 12710 v.reset(OpAMD64ROLBconst) 12711 v.AuxInt = c 12712 v.AddArg(x) 12713 return true 12714 } 12715 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 12716 // cond: d==8-c && c < 8 && t.Size() == 1 12717 // result: (ROLBconst x [c]) 12718 for { 12719 t := v.Type 12720 v_0 := v.Args[0] 12721 if v_0.Op != OpAMD64SHRBconst { 12722 break 12723 } 12724 d := v_0.AuxInt 12725 x := v_0.Args[0] 12726 v_1 := v.Args[1] 12727 if v_1.Op != OpAMD64SHLLconst { 12728 break 12729 } 12730 c := v_1.AuxInt 12731 if x != v_1.Args[0] { 12732 break 12733 } 12734 if !(d == 8-c && c < 8 && t.Size() == 1) { 12735 break 12736 } 12737 v.reset(OpAMD64ROLBconst) 12738 v.AuxInt = c 12739 v.AddArg(x) 12740 return true 12741 } 12742 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 12743 // cond: 12744 // result: (ROLL x y) 12745 for { 12746 v_0 := v.Args[0] 12747 if v_0.Op != OpAMD64SHLL { 12748 break 12749 } 12750 x := v_0.Args[0] 12751 y := v_0.Args[1] 12752 v_1 := v.Args[1] 12753 if v_1.Op != OpAMD64ANDL { 12754 break 12755 } 12756 v_1_0 := v_1.Args[0] 12757 if v_1_0.Op != OpAMD64SHRL { 12758 break 12759 } 12760 if x != v_1_0.Args[0] { 12761 break 12762 } 12763 v_1_0_1 := v_1_0.Args[1] 12764 if v_1_0_1.Op != OpAMD64NEGQ { 12765 break 12766 } 12767 if y != v_1_0_1.Args[0] { 12768 break 12769 } 12770 v_1_1 := v_1.Args[1] 12771 if v_1_1.Op != OpAMD64SBBLcarrymask { 12772 break 12773 } 12774 v_1_1_0 := v_1_1.Args[0] 12775 if v_1_1_0.Op != OpAMD64CMPQconst { 12776 break 12777 } 12778 if v_1_1_0.AuxInt != 32 { 12779 break 12780 } 12781 v_1_1_0_0 := v_1_1_0.Args[0] 12782 if v_1_1_0_0.Op != OpAMD64NEGQ { 12783 break 12784 } 12785 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 12786 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 12787 break 12788 } 12789 if v_1_1_0_0_0.AuxInt != -32 { 12790 break 12791 } 12792 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 12793 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 12794 break 12795 } 12796 if v_1_1_0_0_0_0.AuxInt != 31 { 12797 break 12798 } 12799 if y != v_1_1_0_0_0_0.Args[0] { 12800 break 12801 } 12802 v.reset(OpAMD64ROLL) 12803 v.AddArg(x) 12804 v.AddArg(y) 12805 return true 12806 } 12807 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 12808 // cond: 12809 // result: (ROLL x y) 12810 for { 12811 v_0 := v.Args[0] 12812 if v_0.Op != OpAMD64SHLL { 12813 break 12814 } 12815 x := v_0.Args[0] 12816 y := v_0.Args[1] 12817 v_1 := v.Args[1] 12818 if v_1.Op != OpAMD64ANDL { 12819 break 12820 } 12821 v_1_0 := v_1.Args[0] 12822 if v_1_0.Op != OpAMD64SBBLcarrymask { 12823 break 12824 } 12825 v_1_0_0 := v_1_0.Args[0] 12826 if v_1_0_0.Op != OpAMD64CMPQconst { 12827 break 12828 } 12829 if v_1_0_0.AuxInt != 32 { 12830 break 12831 } 12832 v_1_0_0_0 := v_1_0_0.Args[0] 12833 if v_1_0_0_0.Op != OpAMD64NEGQ { 12834 break 12835 } 12836 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 12837 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 12838 break 12839 } 12840 if v_1_0_0_0_0.AuxInt != -32 { 12841 break 12842 } 12843 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 12844 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 12845 break 12846 } 12847 if v_1_0_0_0_0_0.AuxInt != 31 { 12848 break 12849 } 12850 if y != v_1_0_0_0_0_0.Args[0] { 12851 break 12852 } 12853 v_1_1 := v_1.Args[1] 12854 if v_1_1.Op != OpAMD64SHRL { 12855 break 12856 } 12857 if x != v_1_1.Args[0] { 12858 break 12859 } 12860 v_1_1_1 := v_1_1.Args[1] 12861 if v_1_1_1.Op != OpAMD64NEGQ { 12862 break 12863 } 12864 if y != v_1_1_1.Args[0] { 12865 break 12866 } 12867 v.reset(OpAMD64ROLL) 12868 v.AddArg(x) 12869 v.AddArg(y) 12870 return true 12871 } 12872 return false 12873 } 12874 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 12875 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 12876 // cond: 12877 // result: (ROLL x y) 12878 for { 12879 v_0 := v.Args[0] 12880 if v_0.Op != OpAMD64ANDL { 12881 break 12882 } 12883 v_0_0 := v_0.Args[0] 12884 if v_0_0.Op != OpAMD64SHRL { 12885 break 12886 } 12887 x := v_0_0.Args[0] 12888 v_0_0_1 := v_0_0.Args[1] 12889 if v_0_0_1.Op != OpAMD64NEGQ { 12890 break 12891 } 12892 y := v_0_0_1.Args[0] 12893 v_0_1 := v_0.Args[1] 12894 if v_0_1.Op != OpAMD64SBBLcarrymask { 12895 break 12896 } 12897 v_0_1_0 := v_0_1.Args[0] 12898 if v_0_1_0.Op != OpAMD64CMPQconst { 12899 break 12900 } 12901 if v_0_1_0.AuxInt != 32 { 12902 break 12903 } 12904 v_0_1_0_0 := v_0_1_0.Args[0] 12905 if v_0_1_0_0.Op != OpAMD64NEGQ { 12906 break 12907 } 12908 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 12909 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 12910 break 12911 } 12912 if v_0_1_0_0_0.AuxInt != -32 { 12913 break 12914 } 12915 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 12916 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 12917 break 12918 } 12919 if v_0_1_0_0_0_0.AuxInt != 31 { 12920 break 12921 } 12922 if y != v_0_1_0_0_0_0.Args[0] { 12923 break 12924 } 12925 v_1 := v.Args[1] 12926 if v_1.Op != OpAMD64SHLL { 12927 break 12928 } 12929 if x != v_1.Args[0] { 12930 break 12931 } 12932 if y != v_1.Args[1] { 12933 break 12934 } 12935 v.reset(OpAMD64ROLL) 12936 v.AddArg(x) 12937 v.AddArg(y) 12938 return true 12939 } 12940 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 12941 // cond: 12942 // result: (ROLL x y) 12943 for { 12944 v_0 := v.Args[0] 12945 if v_0.Op != OpAMD64ANDL { 12946 break 12947 } 12948 v_0_0 := v_0.Args[0] 12949 if v_0_0.Op != OpAMD64SBBLcarrymask { 12950 break 12951 } 12952 v_0_0_0 := v_0_0.Args[0] 12953 if v_0_0_0.Op != OpAMD64CMPQconst { 12954 break 12955 } 12956 if v_0_0_0.AuxInt != 32 { 12957 break 12958 } 12959 v_0_0_0_0 := v_0_0_0.Args[0] 12960 if v_0_0_0_0.Op != OpAMD64NEGQ { 12961 break 12962 } 12963 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 12964 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 12965 break 12966 } 12967 if v_0_0_0_0_0.AuxInt != -32 { 12968 break 12969 } 12970 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 12971 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 12972 break 12973 } 12974 if v_0_0_0_0_0_0.AuxInt != 31 { 12975 break 12976 } 12977 y := v_0_0_0_0_0_0.Args[0] 12978 v_0_1 := v_0.Args[1] 12979 if v_0_1.Op != OpAMD64SHRL { 12980 break 12981 } 12982 x := v_0_1.Args[0] 12983 v_0_1_1 := v_0_1.Args[1] 12984 if v_0_1_1.Op != OpAMD64NEGQ { 12985 break 12986 } 12987 if y != v_0_1_1.Args[0] { 12988 break 12989 } 12990 v_1 := v.Args[1] 12991 if v_1.Op != OpAMD64SHLL { 12992 break 12993 } 12994 if x != v_1.Args[0] { 12995 break 12996 } 12997 if y != v_1.Args[1] { 12998 break 12999 } 13000 v.reset(OpAMD64ROLL) 13001 v.AddArg(x) 13002 v.AddArg(y) 13003 return true 13004 } 13005 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 13006 // cond: 13007 // result: (ROLL x y) 13008 for { 13009 v_0 := v.Args[0] 13010 if v_0.Op != OpAMD64SHLL { 13011 break 13012 } 13013 x := v_0.Args[0] 13014 y := v_0.Args[1] 13015 v_1 := v.Args[1] 13016 if v_1.Op != OpAMD64ANDL { 13017 break 13018 } 13019 v_1_0 := v_1.Args[0] 13020 if v_1_0.Op != OpAMD64SHRL { 13021 break 13022 } 13023 if x != v_1_0.Args[0] { 13024 break 13025 } 13026 v_1_0_1 := v_1_0.Args[1] 13027 if v_1_0_1.Op != OpAMD64NEGL { 13028 break 13029 } 13030 if y != v_1_0_1.Args[0] { 13031 break 13032 } 13033 v_1_1 := v_1.Args[1] 13034 if v_1_1.Op != OpAMD64SBBLcarrymask { 13035 break 13036 } 13037 v_1_1_0 := v_1_1.Args[0] 13038 if v_1_1_0.Op != OpAMD64CMPLconst { 13039 break 13040 } 13041 if v_1_1_0.AuxInt != 32 { 13042 break 13043 } 13044 v_1_1_0_0 := v_1_1_0.Args[0] 13045 if v_1_1_0_0.Op != OpAMD64NEGL { 13046 break 13047 } 13048 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13049 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 13050 break 13051 } 13052 if v_1_1_0_0_0.AuxInt != -32 { 13053 break 13054 } 13055 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13056 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 13057 break 13058 } 13059 if v_1_1_0_0_0_0.AuxInt != 31 { 13060 break 13061 } 13062 if y != v_1_1_0_0_0_0.Args[0] { 13063 break 13064 } 13065 v.reset(OpAMD64ROLL) 13066 v.AddArg(x) 13067 v.AddArg(y) 13068 return true 13069 } 13070 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 13071 // cond: 13072 // result: (ROLL x y) 13073 for { 13074 v_0 := v.Args[0] 13075 if v_0.Op != OpAMD64SHLL { 13076 break 13077 } 13078 x := v_0.Args[0] 13079 y := v_0.Args[1] 13080 v_1 := v.Args[1] 13081 if v_1.Op != OpAMD64ANDL { 13082 break 13083 } 13084 v_1_0 := v_1.Args[0] 13085 if v_1_0.Op != OpAMD64SBBLcarrymask { 13086 break 13087 } 13088 v_1_0_0 := v_1_0.Args[0] 13089 if v_1_0_0.Op != OpAMD64CMPLconst { 13090 break 13091 } 13092 if v_1_0_0.AuxInt != 32 { 13093 break 13094 } 13095 v_1_0_0_0 := v_1_0_0.Args[0] 13096 if v_1_0_0_0.Op != OpAMD64NEGL { 13097 break 13098 } 13099 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13100 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 13101 break 13102 } 13103 if v_1_0_0_0_0.AuxInt != -32 { 13104 break 13105 } 13106 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 13107 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 13108 break 13109 } 13110 if v_1_0_0_0_0_0.AuxInt != 31 { 13111 break 13112 } 13113 if y != v_1_0_0_0_0_0.Args[0] { 13114 break 13115 } 13116 v_1_1 := v_1.Args[1] 13117 if v_1_1.Op != OpAMD64SHRL { 13118 break 13119 } 13120 if x != v_1_1.Args[0] { 13121 break 13122 } 13123 v_1_1_1 := v_1_1.Args[1] 13124 if v_1_1_1.Op != OpAMD64NEGL { 13125 break 13126 } 13127 if y != v_1_1_1.Args[0] { 13128 break 13129 } 13130 v.reset(OpAMD64ROLL) 13131 v.AddArg(x) 13132 v.AddArg(y) 13133 return true 13134 } 13135 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 13136 // cond: 13137 // result: (ROLL x y) 13138 for { 13139 v_0 := v.Args[0] 13140 if v_0.Op != OpAMD64ANDL { 13141 break 13142 } 13143 v_0_0 := v_0.Args[0] 13144 if v_0_0.Op != OpAMD64SHRL { 13145 break 13146 } 13147 x := v_0_0.Args[0] 13148 v_0_0_1 := v_0_0.Args[1] 13149 if v_0_0_1.Op != OpAMD64NEGL { 13150 break 13151 } 13152 y := v_0_0_1.Args[0] 13153 v_0_1 := v_0.Args[1] 13154 if v_0_1.Op != OpAMD64SBBLcarrymask { 13155 break 13156 } 13157 v_0_1_0 := v_0_1.Args[0] 13158 if v_0_1_0.Op != OpAMD64CMPLconst { 13159 break 13160 } 13161 if v_0_1_0.AuxInt != 32 { 13162 break 13163 } 13164 v_0_1_0_0 := v_0_1_0.Args[0] 13165 if v_0_1_0_0.Op != OpAMD64NEGL { 13166 break 13167 } 13168 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 13169 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 13170 break 13171 } 13172 if v_0_1_0_0_0.AuxInt != -32 { 13173 break 13174 } 13175 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 13176 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 13177 break 13178 } 13179 if v_0_1_0_0_0_0.AuxInt != 31 { 13180 break 13181 } 13182 if y != v_0_1_0_0_0_0.Args[0] { 13183 break 13184 } 13185 v_1 := v.Args[1] 13186 if v_1.Op != OpAMD64SHLL { 13187 break 13188 } 13189 if x != v_1.Args[0] { 13190 break 13191 } 13192 if y != v_1.Args[1] { 13193 break 13194 } 13195 v.reset(OpAMD64ROLL) 13196 v.AddArg(x) 13197 v.AddArg(y) 13198 return true 13199 } 13200 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 13201 // cond: 13202 // result: (ROLL x y) 13203 for { 13204 v_0 := v.Args[0] 13205 if v_0.Op != OpAMD64ANDL { 13206 break 13207 } 13208 v_0_0 := v_0.Args[0] 13209 if v_0_0.Op != OpAMD64SBBLcarrymask { 13210 break 13211 } 13212 v_0_0_0 := v_0_0.Args[0] 13213 if v_0_0_0.Op != OpAMD64CMPLconst { 13214 break 13215 } 13216 if v_0_0_0.AuxInt != 32 { 13217 break 13218 } 13219 v_0_0_0_0 := v_0_0_0.Args[0] 13220 if v_0_0_0_0.Op != OpAMD64NEGL { 13221 break 13222 } 13223 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 13224 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 13225 break 13226 } 13227 if v_0_0_0_0_0.AuxInt != -32 { 13228 break 13229 } 13230 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 13231 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 13232 break 13233 } 13234 if v_0_0_0_0_0_0.AuxInt != 31 { 13235 break 13236 } 13237 y := v_0_0_0_0_0_0.Args[0] 13238 v_0_1 := v_0.Args[1] 13239 if v_0_1.Op != OpAMD64SHRL { 13240 break 13241 } 13242 x := v_0_1.Args[0] 13243 v_0_1_1 := v_0_1.Args[1] 13244 if v_0_1_1.Op != OpAMD64NEGL { 13245 break 13246 } 13247 if y != v_0_1_1.Args[0] { 13248 break 13249 } 13250 v_1 := v.Args[1] 13251 if v_1.Op != OpAMD64SHLL { 13252 break 13253 } 13254 if x != v_1.Args[0] { 13255 break 13256 } 13257 if y != v_1.Args[1] { 13258 break 13259 } 13260 v.reset(OpAMD64ROLL) 13261 v.AddArg(x) 13262 v.AddArg(y) 13263 return true 13264 } 13265 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 13266 // cond: 13267 // result: (RORL x y) 13268 for { 13269 v_0 := v.Args[0] 13270 if v_0.Op != OpAMD64SHRL { 13271 break 13272 } 13273 x := v_0.Args[0] 13274 y := v_0.Args[1] 13275 v_1 := v.Args[1] 13276 if v_1.Op != OpAMD64ANDL { 13277 break 13278 } 13279 v_1_0 := v_1.Args[0] 13280 if v_1_0.Op != OpAMD64SHLL { 13281 break 13282 } 13283 if x != v_1_0.Args[0] { 13284 break 13285 } 13286 v_1_0_1 := v_1_0.Args[1] 13287 if v_1_0_1.Op != OpAMD64NEGQ { 13288 break 13289 } 13290 if y != v_1_0_1.Args[0] { 13291 break 13292 } 13293 v_1_1 := v_1.Args[1] 13294 if v_1_1.Op != OpAMD64SBBLcarrymask { 13295 break 13296 } 13297 v_1_1_0 := v_1_1.Args[0] 13298 if v_1_1_0.Op != OpAMD64CMPQconst { 13299 break 13300 } 13301 if v_1_1_0.AuxInt != 32 { 13302 break 13303 } 13304 v_1_1_0_0 := v_1_1_0.Args[0] 13305 if v_1_1_0_0.Op != OpAMD64NEGQ { 13306 break 13307 } 13308 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13309 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 13310 break 13311 } 13312 if v_1_1_0_0_0.AuxInt != -32 { 13313 break 13314 } 13315 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13316 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 13317 break 13318 } 13319 if v_1_1_0_0_0_0.AuxInt != 31 { 13320 break 13321 } 13322 if y != v_1_1_0_0_0_0.Args[0] { 13323 break 13324 } 13325 v.reset(OpAMD64RORL) 13326 v.AddArg(x) 13327 v.AddArg(y) 13328 return true 13329 } 13330 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 13331 // cond: 13332 // result: (RORL x y) 13333 for { 13334 v_0 := v.Args[0] 13335 if v_0.Op != OpAMD64SHRL { 13336 break 13337 } 13338 x := v_0.Args[0] 13339 y := v_0.Args[1] 13340 v_1 := v.Args[1] 13341 if v_1.Op != OpAMD64ANDL { 13342 break 13343 } 13344 v_1_0 := v_1.Args[0] 13345 if v_1_0.Op != OpAMD64SBBLcarrymask { 13346 break 13347 } 13348 v_1_0_0 := v_1_0.Args[0] 13349 if v_1_0_0.Op != OpAMD64CMPQconst { 13350 break 13351 } 13352 if v_1_0_0.AuxInt != 32 { 13353 break 13354 } 13355 v_1_0_0_0 := v_1_0_0.Args[0] 13356 if v_1_0_0_0.Op != OpAMD64NEGQ { 13357 break 13358 } 13359 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13360 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 13361 break 13362 } 13363 if v_1_0_0_0_0.AuxInt != -32 { 13364 break 13365 } 13366 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 13367 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 13368 break 13369 } 13370 if v_1_0_0_0_0_0.AuxInt != 31 { 13371 break 13372 } 13373 if y != v_1_0_0_0_0_0.Args[0] { 13374 break 13375 } 13376 v_1_1 := v_1.Args[1] 13377 if v_1_1.Op != OpAMD64SHLL { 13378 break 13379 } 13380 if x != v_1_1.Args[0] { 13381 break 13382 } 13383 v_1_1_1 := v_1_1.Args[1] 13384 if v_1_1_1.Op != OpAMD64NEGQ { 13385 break 13386 } 13387 if y != v_1_1_1.Args[0] { 13388 break 13389 } 13390 v.reset(OpAMD64RORL) 13391 v.AddArg(x) 13392 v.AddArg(y) 13393 return true 13394 } 13395 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 13396 // cond: 13397 // result: (RORL x y) 13398 for { 13399 v_0 := v.Args[0] 13400 if v_0.Op != OpAMD64ANDL { 13401 break 13402 } 13403 v_0_0 := v_0.Args[0] 13404 if v_0_0.Op != OpAMD64SHLL { 13405 break 13406 } 13407 x := v_0_0.Args[0] 13408 v_0_0_1 := v_0_0.Args[1] 13409 if v_0_0_1.Op != OpAMD64NEGQ { 13410 break 13411 } 13412 y := v_0_0_1.Args[0] 13413 v_0_1 := v_0.Args[1] 13414 if v_0_1.Op != OpAMD64SBBLcarrymask { 13415 break 13416 } 13417 v_0_1_0 := v_0_1.Args[0] 13418 if v_0_1_0.Op != OpAMD64CMPQconst { 13419 break 13420 } 13421 if v_0_1_0.AuxInt != 32 { 13422 break 13423 } 13424 v_0_1_0_0 := v_0_1_0.Args[0] 13425 if v_0_1_0_0.Op != OpAMD64NEGQ { 13426 break 13427 } 13428 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 13429 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 13430 break 13431 } 13432 if v_0_1_0_0_0.AuxInt != -32 { 13433 break 13434 } 13435 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 13436 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 13437 break 13438 } 13439 if v_0_1_0_0_0_0.AuxInt != 31 { 13440 break 13441 } 13442 if y != v_0_1_0_0_0_0.Args[0] { 13443 break 13444 } 13445 v_1 := v.Args[1] 13446 if v_1.Op != OpAMD64SHRL { 13447 break 13448 } 13449 if x != v_1.Args[0] { 13450 break 13451 } 13452 if y != v_1.Args[1] { 13453 break 13454 } 13455 v.reset(OpAMD64RORL) 13456 v.AddArg(x) 13457 v.AddArg(y) 13458 return true 13459 } 13460 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 13461 // cond: 13462 // result: (RORL x y) 13463 for { 13464 v_0 := v.Args[0] 13465 if v_0.Op != OpAMD64ANDL { 13466 break 13467 } 13468 v_0_0 := v_0.Args[0] 13469 if v_0_0.Op != OpAMD64SBBLcarrymask { 13470 break 13471 } 13472 v_0_0_0 := v_0_0.Args[0] 13473 if v_0_0_0.Op != OpAMD64CMPQconst { 13474 break 13475 } 13476 if v_0_0_0.AuxInt != 32 { 13477 break 13478 } 13479 v_0_0_0_0 := v_0_0_0.Args[0] 13480 if v_0_0_0_0.Op != OpAMD64NEGQ { 13481 break 13482 } 13483 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 13484 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 13485 break 13486 } 13487 if v_0_0_0_0_0.AuxInt != -32 { 13488 break 13489 } 13490 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 13491 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 13492 break 13493 } 13494 if v_0_0_0_0_0_0.AuxInt != 31 { 13495 break 13496 } 13497 y := v_0_0_0_0_0_0.Args[0] 13498 v_0_1 := v_0.Args[1] 13499 if v_0_1.Op != OpAMD64SHLL { 13500 break 13501 } 13502 x := v_0_1.Args[0] 13503 v_0_1_1 := v_0_1.Args[1] 13504 if v_0_1_1.Op != OpAMD64NEGQ { 13505 break 13506 } 13507 if y != v_0_1_1.Args[0] { 13508 break 13509 } 13510 v_1 := v.Args[1] 13511 if v_1.Op != OpAMD64SHRL { 13512 break 13513 } 13514 if x != v_1.Args[0] { 13515 break 13516 } 13517 if y != v_1.Args[1] { 13518 break 13519 } 13520 v.reset(OpAMD64RORL) 13521 v.AddArg(x) 13522 v.AddArg(y) 13523 return true 13524 } 13525 return false 13526 } 13527 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 13528 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 13529 // cond: 13530 // result: (RORL x y) 13531 for { 13532 v_0 := v.Args[0] 13533 if v_0.Op != OpAMD64SHRL { 13534 break 13535 } 13536 x := v_0.Args[0] 13537 y := v_0.Args[1] 13538 v_1 := v.Args[1] 13539 if v_1.Op != OpAMD64ANDL { 13540 break 13541 } 13542 v_1_0 := v_1.Args[0] 13543 if v_1_0.Op != OpAMD64SHLL { 13544 break 13545 } 13546 if x != v_1_0.Args[0] { 13547 break 13548 } 13549 v_1_0_1 := v_1_0.Args[1] 13550 if v_1_0_1.Op != OpAMD64NEGL { 13551 break 13552 } 13553 if y != v_1_0_1.Args[0] { 13554 break 13555 } 13556 v_1_1 := v_1.Args[1] 13557 if v_1_1.Op != OpAMD64SBBLcarrymask { 13558 break 13559 } 13560 v_1_1_0 := v_1_1.Args[0] 13561 if v_1_1_0.Op != OpAMD64CMPLconst { 13562 break 13563 } 13564 if v_1_1_0.AuxInt != 32 { 13565 break 13566 } 13567 v_1_1_0_0 := v_1_1_0.Args[0] 13568 if v_1_1_0_0.Op != OpAMD64NEGL { 13569 break 13570 } 13571 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13572 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 13573 break 13574 } 13575 if v_1_1_0_0_0.AuxInt != -32 { 13576 break 13577 } 13578 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13579 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 13580 break 13581 } 13582 if v_1_1_0_0_0_0.AuxInt != 31 { 13583 break 13584 } 13585 if y != v_1_1_0_0_0_0.Args[0] { 13586 break 13587 } 13588 v.reset(OpAMD64RORL) 13589 v.AddArg(x) 13590 v.AddArg(y) 13591 return true 13592 } 13593 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 13594 // cond: 13595 // result: (RORL x y) 13596 for { 13597 v_0 := v.Args[0] 13598 if v_0.Op != OpAMD64SHRL { 13599 break 13600 } 13601 x := v_0.Args[0] 13602 y := v_0.Args[1] 13603 v_1 := v.Args[1] 13604 if v_1.Op != OpAMD64ANDL { 13605 break 13606 } 13607 v_1_0 := v_1.Args[0] 13608 if v_1_0.Op != OpAMD64SBBLcarrymask { 13609 break 13610 } 13611 v_1_0_0 := v_1_0.Args[0] 13612 if v_1_0_0.Op != OpAMD64CMPLconst { 13613 break 13614 } 13615 if v_1_0_0.AuxInt != 32 { 13616 break 13617 } 13618 v_1_0_0_0 := v_1_0_0.Args[0] 13619 if v_1_0_0_0.Op != OpAMD64NEGL { 13620 break 13621 } 13622 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13623 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 13624 break 13625 } 13626 if v_1_0_0_0_0.AuxInt != -32 { 13627 break 13628 } 13629 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 13630 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 13631 break 13632 } 13633 if v_1_0_0_0_0_0.AuxInt != 31 { 13634 break 13635 } 13636 if y != v_1_0_0_0_0_0.Args[0] { 13637 break 13638 } 13639 v_1_1 := v_1.Args[1] 13640 if v_1_1.Op != OpAMD64SHLL { 13641 break 13642 } 13643 if x != v_1_1.Args[0] { 13644 break 13645 } 13646 v_1_1_1 := v_1_1.Args[1] 13647 if v_1_1_1.Op != OpAMD64NEGL { 13648 break 13649 } 13650 if y != v_1_1_1.Args[0] { 13651 break 13652 } 13653 v.reset(OpAMD64RORL) 13654 v.AddArg(x) 13655 v.AddArg(y) 13656 return true 13657 } 13658 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 13659 // cond: 13660 // result: (RORL x y) 13661 for { 13662 v_0 := v.Args[0] 13663 if v_0.Op != OpAMD64ANDL { 13664 break 13665 } 13666 v_0_0 := v_0.Args[0] 13667 if v_0_0.Op != OpAMD64SHLL { 13668 break 13669 } 13670 x := v_0_0.Args[0] 13671 v_0_0_1 := v_0_0.Args[1] 13672 if v_0_0_1.Op != OpAMD64NEGL { 13673 break 13674 } 13675 y := v_0_0_1.Args[0] 13676 v_0_1 := v_0.Args[1] 13677 if v_0_1.Op != OpAMD64SBBLcarrymask { 13678 break 13679 } 13680 v_0_1_0 := v_0_1.Args[0] 13681 if v_0_1_0.Op != OpAMD64CMPLconst { 13682 break 13683 } 13684 if v_0_1_0.AuxInt != 32 { 13685 break 13686 } 13687 v_0_1_0_0 := v_0_1_0.Args[0] 13688 if v_0_1_0_0.Op != OpAMD64NEGL { 13689 break 13690 } 13691 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 13692 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 13693 break 13694 } 13695 if v_0_1_0_0_0.AuxInt != -32 { 13696 break 13697 } 13698 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 13699 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 13700 break 13701 } 13702 if v_0_1_0_0_0_0.AuxInt != 31 { 13703 break 13704 } 13705 if y != v_0_1_0_0_0_0.Args[0] { 13706 break 13707 } 13708 v_1 := v.Args[1] 13709 if v_1.Op != OpAMD64SHRL { 13710 break 13711 } 13712 if x != v_1.Args[0] { 13713 break 13714 } 13715 if y != v_1.Args[1] { 13716 break 13717 } 13718 v.reset(OpAMD64RORL) 13719 v.AddArg(x) 13720 v.AddArg(y) 13721 return true 13722 } 13723 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 13724 // cond: 13725 // result: (RORL x y) 13726 for { 13727 v_0 := v.Args[0] 13728 if v_0.Op != OpAMD64ANDL { 13729 break 13730 } 13731 v_0_0 := v_0.Args[0] 13732 if v_0_0.Op != OpAMD64SBBLcarrymask { 13733 break 13734 } 13735 v_0_0_0 := v_0_0.Args[0] 13736 if v_0_0_0.Op != OpAMD64CMPLconst { 13737 break 13738 } 13739 if v_0_0_0.AuxInt != 32 { 13740 break 13741 } 13742 v_0_0_0_0 := v_0_0_0.Args[0] 13743 if v_0_0_0_0.Op != OpAMD64NEGL { 13744 break 13745 } 13746 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 13747 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 13748 break 13749 } 13750 if v_0_0_0_0_0.AuxInt != -32 { 13751 break 13752 } 13753 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 13754 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 13755 break 13756 } 13757 if v_0_0_0_0_0_0.AuxInt != 31 { 13758 break 13759 } 13760 y := v_0_0_0_0_0_0.Args[0] 13761 v_0_1 := v_0.Args[1] 13762 if v_0_1.Op != OpAMD64SHLL { 13763 break 13764 } 13765 x := v_0_1.Args[0] 13766 v_0_1_1 := v_0_1.Args[1] 13767 if v_0_1_1.Op != OpAMD64NEGL { 13768 break 13769 } 13770 if y != v_0_1_1.Args[0] { 13771 break 13772 } 13773 v_1 := v.Args[1] 13774 if v_1.Op != OpAMD64SHRL { 13775 break 13776 } 13777 if x != v_1.Args[0] { 13778 break 13779 } 13780 if y != v_1.Args[1] { 13781 break 13782 } 13783 v.reset(OpAMD64RORL) 13784 v.AddArg(x) 13785 v.AddArg(y) 13786 return true 13787 } 13788 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 13789 // cond: v.Type.Size() == 2 13790 // result: (ROLW x y) 13791 for { 13792 v_0 := v.Args[0] 13793 if v_0.Op != OpAMD64SHLL { 13794 break 13795 } 13796 x := v_0.Args[0] 13797 v_0_1 := v_0.Args[1] 13798 if v_0_1.Op != OpAMD64ANDQconst { 13799 break 13800 } 13801 if v_0_1.AuxInt != 15 { 13802 break 13803 } 13804 y := v_0_1.Args[0] 13805 v_1 := v.Args[1] 13806 if v_1.Op != OpAMD64ANDL { 13807 break 13808 } 13809 v_1_0 := v_1.Args[0] 13810 if v_1_0.Op != OpAMD64SHRW { 13811 break 13812 } 13813 if x != v_1_0.Args[0] { 13814 break 13815 } 13816 v_1_0_1 := v_1_0.Args[1] 13817 if v_1_0_1.Op != OpAMD64NEGQ { 13818 break 13819 } 13820 v_1_0_1_0 := v_1_0_1.Args[0] 13821 if v_1_0_1_0.Op != OpAMD64ADDQconst { 13822 break 13823 } 13824 if v_1_0_1_0.AuxInt != -16 { 13825 break 13826 } 13827 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 13828 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 13829 break 13830 } 13831 if v_1_0_1_0_0.AuxInt != 15 { 13832 break 13833 } 13834 if y != v_1_0_1_0_0.Args[0] { 13835 break 13836 } 13837 v_1_1 := v_1.Args[1] 13838 if v_1_1.Op != OpAMD64SBBLcarrymask { 13839 break 13840 } 13841 v_1_1_0 := v_1_1.Args[0] 13842 if v_1_1_0.Op != OpAMD64CMPQconst { 13843 break 13844 } 13845 if v_1_1_0.AuxInt != 16 { 13846 break 13847 } 13848 v_1_1_0_0 := v_1_1_0.Args[0] 13849 if v_1_1_0_0.Op != OpAMD64NEGQ { 13850 break 13851 } 13852 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13853 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 13854 break 13855 } 13856 if v_1_1_0_0_0.AuxInt != -16 { 13857 break 13858 } 13859 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13860 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 13861 break 13862 } 13863 if v_1_1_0_0_0_0.AuxInt != 15 { 13864 break 13865 } 13866 if y != v_1_1_0_0_0_0.Args[0] { 13867 break 13868 } 13869 if !(v.Type.Size() == 2) { 13870 break 13871 } 13872 v.reset(OpAMD64ROLW) 13873 v.AddArg(x) 13874 v.AddArg(y) 13875 return true 13876 } 13877 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 13878 // cond: v.Type.Size() == 2 13879 // result: (ROLW x y) 13880 for { 13881 v_0 := v.Args[0] 13882 if v_0.Op != OpAMD64SHLL { 13883 break 13884 } 13885 x := v_0.Args[0] 13886 v_0_1 := v_0.Args[1] 13887 if v_0_1.Op != OpAMD64ANDQconst { 13888 break 13889 } 13890 if v_0_1.AuxInt != 15 { 13891 break 13892 } 13893 y := v_0_1.Args[0] 13894 v_1 := v.Args[1] 13895 if v_1.Op != OpAMD64ANDL { 13896 break 13897 } 13898 v_1_0 := v_1.Args[0] 13899 if v_1_0.Op != OpAMD64SBBLcarrymask { 13900 break 13901 } 13902 v_1_0_0 := v_1_0.Args[0] 13903 if v_1_0_0.Op != OpAMD64CMPQconst { 13904 break 13905 } 13906 if v_1_0_0.AuxInt != 16 { 13907 break 13908 } 13909 v_1_0_0_0 := v_1_0_0.Args[0] 13910 if v_1_0_0_0.Op != OpAMD64NEGQ { 13911 break 13912 } 13913 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13914 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 13915 break 13916 } 13917 if v_1_0_0_0_0.AuxInt != -16 { 13918 break 13919 } 13920 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 13921 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 13922 break 13923 } 13924 if v_1_0_0_0_0_0.AuxInt != 15 { 13925 break 13926 } 13927 if y != v_1_0_0_0_0_0.Args[0] { 13928 break 13929 } 13930 v_1_1 := v_1.Args[1] 13931 if v_1_1.Op != OpAMD64SHRW { 13932 break 13933 } 13934 if x != v_1_1.Args[0] { 13935 break 13936 } 13937 v_1_1_1 := v_1_1.Args[1] 13938 if v_1_1_1.Op != OpAMD64NEGQ { 13939 break 13940 } 13941 v_1_1_1_0 := v_1_1_1.Args[0] 13942 if v_1_1_1_0.Op != OpAMD64ADDQconst { 13943 break 13944 } 13945 if v_1_1_1_0.AuxInt != -16 { 13946 break 13947 } 13948 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 13949 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 13950 break 13951 } 13952 if v_1_1_1_0_0.AuxInt != 15 { 13953 break 13954 } 13955 if y != v_1_1_1_0_0.Args[0] { 13956 break 13957 } 13958 if !(v.Type.Size() == 2) { 13959 break 13960 } 13961 v.reset(OpAMD64ROLW) 13962 v.AddArg(x) 13963 v.AddArg(y) 13964 return true 13965 } 13966 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 13967 // cond: v.Type.Size() == 2 13968 // result: (ROLW x y) 13969 for { 13970 v_0 := v.Args[0] 13971 if v_0.Op != OpAMD64ANDL { 13972 break 13973 } 13974 v_0_0 := v_0.Args[0] 13975 if v_0_0.Op != OpAMD64SHRW { 13976 break 13977 } 13978 x := v_0_0.Args[0] 13979 v_0_0_1 := v_0_0.Args[1] 13980 if v_0_0_1.Op != OpAMD64NEGQ { 13981 break 13982 } 13983 v_0_0_1_0 := v_0_0_1.Args[0] 13984 if v_0_0_1_0.Op != OpAMD64ADDQconst { 13985 break 13986 } 13987 if v_0_0_1_0.AuxInt != -16 { 13988 break 13989 } 13990 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 13991 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 13992 break 13993 } 13994 if v_0_0_1_0_0.AuxInt != 15 { 13995 break 13996 } 13997 y := v_0_0_1_0_0.Args[0] 13998 v_0_1 := v_0.Args[1] 13999 if v_0_1.Op != OpAMD64SBBLcarrymask { 14000 break 14001 } 14002 v_0_1_0 := v_0_1.Args[0] 14003 if v_0_1_0.Op != OpAMD64CMPQconst { 14004 break 14005 } 14006 if v_0_1_0.AuxInt != 16 { 14007 break 14008 } 14009 v_0_1_0_0 := v_0_1_0.Args[0] 14010 if v_0_1_0_0.Op != OpAMD64NEGQ { 14011 break 14012 } 14013 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14014 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 14015 break 14016 } 14017 if v_0_1_0_0_0.AuxInt != -16 { 14018 break 14019 } 14020 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14021 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 14022 break 14023 } 14024 if v_0_1_0_0_0_0.AuxInt != 15 { 14025 break 14026 } 14027 if y != v_0_1_0_0_0_0.Args[0] { 14028 break 14029 } 14030 v_1 := v.Args[1] 14031 if v_1.Op != OpAMD64SHLL { 14032 break 14033 } 14034 if x != v_1.Args[0] { 14035 break 14036 } 14037 v_1_1 := v_1.Args[1] 14038 if v_1_1.Op != OpAMD64ANDQconst { 14039 break 14040 } 14041 if v_1_1.AuxInt != 15 { 14042 break 14043 } 14044 if y != v_1_1.Args[0] { 14045 break 14046 } 14047 if !(v.Type.Size() == 2) { 14048 break 14049 } 14050 v.reset(OpAMD64ROLW) 14051 v.AddArg(x) 14052 v.AddArg(y) 14053 return true 14054 } 14055 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 14056 // cond: v.Type.Size() == 2 14057 // result: (ROLW x y) 14058 for { 14059 v_0 := v.Args[0] 14060 if v_0.Op != OpAMD64ANDL { 14061 break 14062 } 14063 v_0_0 := v_0.Args[0] 14064 if v_0_0.Op != OpAMD64SBBLcarrymask { 14065 break 14066 } 14067 v_0_0_0 := v_0_0.Args[0] 14068 if v_0_0_0.Op != OpAMD64CMPQconst { 14069 break 14070 } 14071 if v_0_0_0.AuxInt != 16 { 14072 break 14073 } 14074 v_0_0_0_0 := v_0_0_0.Args[0] 14075 if v_0_0_0_0.Op != OpAMD64NEGQ { 14076 break 14077 } 14078 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14079 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 14080 break 14081 } 14082 if v_0_0_0_0_0.AuxInt != -16 { 14083 break 14084 } 14085 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14086 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 14087 break 14088 } 14089 if v_0_0_0_0_0_0.AuxInt != 15 { 14090 break 14091 } 14092 y := v_0_0_0_0_0_0.Args[0] 14093 v_0_1 := v_0.Args[1] 14094 if v_0_1.Op != OpAMD64SHRW { 14095 break 14096 } 14097 x := v_0_1.Args[0] 14098 v_0_1_1 := v_0_1.Args[1] 14099 if v_0_1_1.Op != OpAMD64NEGQ { 14100 break 14101 } 14102 v_0_1_1_0 := v_0_1_1.Args[0] 14103 if v_0_1_1_0.Op != OpAMD64ADDQconst { 14104 break 14105 } 14106 if v_0_1_1_0.AuxInt != -16 { 14107 break 14108 } 14109 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 14110 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 14111 break 14112 } 14113 if v_0_1_1_0_0.AuxInt != 15 { 14114 break 14115 } 14116 if y != v_0_1_1_0_0.Args[0] { 14117 break 14118 } 14119 v_1 := v.Args[1] 14120 if v_1.Op != OpAMD64SHLL { 14121 break 14122 } 14123 if x != v_1.Args[0] { 14124 break 14125 } 14126 v_1_1 := v_1.Args[1] 14127 if v_1_1.Op != OpAMD64ANDQconst { 14128 break 14129 } 14130 if v_1_1.AuxInt != 15 { 14131 break 14132 } 14133 if y != v_1_1.Args[0] { 14134 break 14135 } 14136 if !(v.Type.Size() == 2) { 14137 break 14138 } 14139 v.reset(OpAMD64ROLW) 14140 v.AddArg(x) 14141 v.AddArg(y) 14142 return true 14143 } 14144 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 14145 // cond: v.Type.Size() == 2 14146 // result: (ROLW x y) 14147 for { 14148 v_0 := v.Args[0] 14149 if v_0.Op != OpAMD64SHLL { 14150 break 14151 } 14152 x := v_0.Args[0] 14153 v_0_1 := v_0.Args[1] 14154 if v_0_1.Op != OpAMD64ANDLconst { 14155 break 14156 } 14157 if v_0_1.AuxInt != 15 { 14158 break 14159 } 14160 y := v_0_1.Args[0] 14161 v_1 := v.Args[1] 14162 if v_1.Op != OpAMD64ANDL { 14163 break 14164 } 14165 v_1_0 := v_1.Args[0] 14166 if v_1_0.Op != OpAMD64SHRW { 14167 break 14168 } 14169 if x != v_1_0.Args[0] { 14170 break 14171 } 14172 v_1_0_1 := v_1_0.Args[1] 14173 if v_1_0_1.Op != OpAMD64NEGL { 14174 break 14175 } 14176 v_1_0_1_0 := v_1_0_1.Args[0] 14177 if v_1_0_1_0.Op != OpAMD64ADDLconst { 14178 break 14179 } 14180 if v_1_0_1_0.AuxInt != -16 { 14181 break 14182 } 14183 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 14184 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 14185 break 14186 } 14187 if v_1_0_1_0_0.AuxInt != 15 { 14188 break 14189 } 14190 if y != v_1_0_1_0_0.Args[0] { 14191 break 14192 } 14193 v_1_1 := v_1.Args[1] 14194 if v_1_1.Op != OpAMD64SBBLcarrymask { 14195 break 14196 } 14197 v_1_1_0 := v_1_1.Args[0] 14198 if v_1_1_0.Op != OpAMD64CMPLconst { 14199 break 14200 } 14201 if v_1_1_0.AuxInt != 16 { 14202 break 14203 } 14204 v_1_1_0_0 := v_1_1_0.Args[0] 14205 if v_1_1_0_0.Op != OpAMD64NEGL { 14206 break 14207 } 14208 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14209 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 14210 break 14211 } 14212 if v_1_1_0_0_0.AuxInt != -16 { 14213 break 14214 } 14215 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14216 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 14217 break 14218 } 14219 if v_1_1_0_0_0_0.AuxInt != 15 { 14220 break 14221 } 14222 if y != v_1_1_0_0_0_0.Args[0] { 14223 break 14224 } 14225 if !(v.Type.Size() == 2) { 14226 break 14227 } 14228 v.reset(OpAMD64ROLW) 14229 v.AddArg(x) 14230 v.AddArg(y) 14231 return true 14232 } 14233 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 14234 // cond: v.Type.Size() == 2 14235 // result: (ROLW x y) 14236 for { 14237 v_0 := v.Args[0] 14238 if v_0.Op != OpAMD64SHLL { 14239 break 14240 } 14241 x := v_0.Args[0] 14242 v_0_1 := v_0.Args[1] 14243 if v_0_1.Op != OpAMD64ANDLconst { 14244 break 14245 } 14246 if v_0_1.AuxInt != 15 { 14247 break 14248 } 14249 y := v_0_1.Args[0] 14250 v_1 := v.Args[1] 14251 if v_1.Op != OpAMD64ANDL { 14252 break 14253 } 14254 v_1_0 := v_1.Args[0] 14255 if v_1_0.Op != OpAMD64SBBLcarrymask { 14256 break 14257 } 14258 v_1_0_0 := v_1_0.Args[0] 14259 if v_1_0_0.Op != OpAMD64CMPLconst { 14260 break 14261 } 14262 if v_1_0_0.AuxInt != 16 { 14263 break 14264 } 14265 v_1_0_0_0 := v_1_0_0.Args[0] 14266 if v_1_0_0_0.Op != OpAMD64NEGL { 14267 break 14268 } 14269 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14270 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 14271 break 14272 } 14273 if v_1_0_0_0_0.AuxInt != -16 { 14274 break 14275 } 14276 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14277 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 14278 break 14279 } 14280 if v_1_0_0_0_0_0.AuxInt != 15 { 14281 break 14282 } 14283 if y != v_1_0_0_0_0_0.Args[0] { 14284 break 14285 } 14286 v_1_1 := v_1.Args[1] 14287 if v_1_1.Op != OpAMD64SHRW { 14288 break 14289 } 14290 if x != v_1_1.Args[0] { 14291 break 14292 } 14293 v_1_1_1 := v_1_1.Args[1] 14294 if v_1_1_1.Op != OpAMD64NEGL { 14295 break 14296 } 14297 v_1_1_1_0 := v_1_1_1.Args[0] 14298 if v_1_1_1_0.Op != OpAMD64ADDLconst { 14299 break 14300 } 14301 if v_1_1_1_0.AuxInt != -16 { 14302 break 14303 } 14304 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 14305 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 14306 break 14307 } 14308 if v_1_1_1_0_0.AuxInt != 15 { 14309 break 14310 } 14311 if y != v_1_1_1_0_0.Args[0] { 14312 break 14313 } 14314 if !(v.Type.Size() == 2) { 14315 break 14316 } 14317 v.reset(OpAMD64ROLW) 14318 v.AddArg(x) 14319 v.AddArg(y) 14320 return true 14321 } 14322 return false 14323 } 14324 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 14325 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 14326 // cond: v.Type.Size() == 2 14327 // result: (ROLW x y) 14328 for { 14329 v_0 := v.Args[0] 14330 if v_0.Op != OpAMD64ANDL { 14331 break 14332 } 14333 v_0_0 := v_0.Args[0] 14334 if v_0_0.Op != OpAMD64SHRW { 14335 break 14336 } 14337 x := v_0_0.Args[0] 14338 v_0_0_1 := v_0_0.Args[1] 14339 if v_0_0_1.Op != OpAMD64NEGL { 14340 break 14341 } 14342 v_0_0_1_0 := v_0_0_1.Args[0] 14343 if v_0_0_1_0.Op != OpAMD64ADDLconst { 14344 break 14345 } 14346 if v_0_0_1_0.AuxInt != -16 { 14347 break 14348 } 14349 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 14350 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 14351 break 14352 } 14353 if v_0_0_1_0_0.AuxInt != 15 { 14354 break 14355 } 14356 y := v_0_0_1_0_0.Args[0] 14357 v_0_1 := v_0.Args[1] 14358 if v_0_1.Op != OpAMD64SBBLcarrymask { 14359 break 14360 } 14361 v_0_1_0 := v_0_1.Args[0] 14362 if v_0_1_0.Op != OpAMD64CMPLconst { 14363 break 14364 } 14365 if v_0_1_0.AuxInt != 16 { 14366 break 14367 } 14368 v_0_1_0_0 := v_0_1_0.Args[0] 14369 if v_0_1_0_0.Op != OpAMD64NEGL { 14370 break 14371 } 14372 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14373 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 14374 break 14375 } 14376 if v_0_1_0_0_0.AuxInt != -16 { 14377 break 14378 } 14379 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14380 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 14381 break 14382 } 14383 if v_0_1_0_0_0_0.AuxInt != 15 { 14384 break 14385 } 14386 if y != v_0_1_0_0_0_0.Args[0] { 14387 break 14388 } 14389 v_1 := v.Args[1] 14390 if v_1.Op != OpAMD64SHLL { 14391 break 14392 } 14393 if x != v_1.Args[0] { 14394 break 14395 } 14396 v_1_1 := v_1.Args[1] 14397 if v_1_1.Op != OpAMD64ANDLconst { 14398 break 14399 } 14400 if v_1_1.AuxInt != 15 { 14401 break 14402 } 14403 if y != v_1_1.Args[0] { 14404 break 14405 } 14406 if !(v.Type.Size() == 2) { 14407 break 14408 } 14409 v.reset(OpAMD64ROLW) 14410 v.AddArg(x) 14411 v.AddArg(y) 14412 return true 14413 } 14414 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 14415 // cond: v.Type.Size() == 2 14416 // result: (ROLW x y) 14417 for { 14418 v_0 := v.Args[0] 14419 if v_0.Op != OpAMD64ANDL { 14420 break 14421 } 14422 v_0_0 := v_0.Args[0] 14423 if v_0_0.Op != OpAMD64SBBLcarrymask { 14424 break 14425 } 14426 v_0_0_0 := v_0_0.Args[0] 14427 if v_0_0_0.Op != OpAMD64CMPLconst { 14428 break 14429 } 14430 if v_0_0_0.AuxInt != 16 { 14431 break 14432 } 14433 v_0_0_0_0 := v_0_0_0.Args[0] 14434 if v_0_0_0_0.Op != OpAMD64NEGL { 14435 break 14436 } 14437 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14438 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 14439 break 14440 } 14441 if v_0_0_0_0_0.AuxInt != -16 { 14442 break 14443 } 14444 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14445 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 14446 break 14447 } 14448 if v_0_0_0_0_0_0.AuxInt != 15 { 14449 break 14450 } 14451 y := v_0_0_0_0_0_0.Args[0] 14452 v_0_1 := v_0.Args[1] 14453 if v_0_1.Op != OpAMD64SHRW { 14454 break 14455 } 14456 x := v_0_1.Args[0] 14457 v_0_1_1 := v_0_1.Args[1] 14458 if v_0_1_1.Op != OpAMD64NEGL { 14459 break 14460 } 14461 v_0_1_1_0 := v_0_1_1.Args[0] 14462 if v_0_1_1_0.Op != OpAMD64ADDLconst { 14463 break 14464 } 14465 if v_0_1_1_0.AuxInt != -16 { 14466 break 14467 } 14468 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 14469 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 14470 break 14471 } 14472 if v_0_1_1_0_0.AuxInt != 15 { 14473 break 14474 } 14475 if y != v_0_1_1_0_0.Args[0] { 14476 break 14477 } 14478 v_1 := v.Args[1] 14479 if v_1.Op != OpAMD64SHLL { 14480 break 14481 } 14482 if x != v_1.Args[0] { 14483 break 14484 } 14485 v_1_1 := v_1.Args[1] 14486 if v_1_1.Op != OpAMD64ANDLconst { 14487 break 14488 } 14489 if v_1_1.AuxInt != 15 { 14490 break 14491 } 14492 if y != v_1_1.Args[0] { 14493 break 14494 } 14495 if !(v.Type.Size() == 2) { 14496 break 14497 } 14498 v.reset(OpAMD64ROLW) 14499 v.AddArg(x) 14500 v.AddArg(y) 14501 return true 14502 } 14503 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 14504 // cond: v.Type.Size() == 2 14505 // result: (RORW x y) 14506 for { 14507 v_0 := v.Args[0] 14508 if v_0.Op != OpAMD64SHRW { 14509 break 14510 } 14511 x := v_0.Args[0] 14512 v_0_1 := v_0.Args[1] 14513 if v_0_1.Op != OpAMD64ANDQconst { 14514 break 14515 } 14516 if v_0_1.AuxInt != 15 { 14517 break 14518 } 14519 y := v_0_1.Args[0] 14520 v_1 := v.Args[1] 14521 if v_1.Op != OpAMD64SHLL { 14522 break 14523 } 14524 if x != v_1.Args[0] { 14525 break 14526 } 14527 v_1_1 := v_1.Args[1] 14528 if v_1_1.Op != OpAMD64NEGQ { 14529 break 14530 } 14531 v_1_1_0 := v_1_1.Args[0] 14532 if v_1_1_0.Op != OpAMD64ADDQconst { 14533 break 14534 } 14535 if v_1_1_0.AuxInt != -16 { 14536 break 14537 } 14538 v_1_1_0_0 := v_1_1_0.Args[0] 14539 if v_1_1_0_0.Op != OpAMD64ANDQconst { 14540 break 14541 } 14542 if v_1_1_0_0.AuxInt != 15 { 14543 break 14544 } 14545 if y != v_1_1_0_0.Args[0] { 14546 break 14547 } 14548 if !(v.Type.Size() == 2) { 14549 break 14550 } 14551 v.reset(OpAMD64RORW) 14552 v.AddArg(x) 14553 v.AddArg(y) 14554 return true 14555 } 14556 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 14557 // cond: v.Type.Size() == 2 14558 // result: (RORW x y) 14559 for { 14560 v_0 := v.Args[0] 14561 if v_0.Op != OpAMD64SHLL { 14562 break 14563 } 14564 x := v_0.Args[0] 14565 v_0_1 := v_0.Args[1] 14566 if v_0_1.Op != OpAMD64NEGQ { 14567 break 14568 } 14569 v_0_1_0 := v_0_1.Args[0] 14570 if v_0_1_0.Op != OpAMD64ADDQconst { 14571 break 14572 } 14573 if v_0_1_0.AuxInt != -16 { 14574 break 14575 } 14576 v_0_1_0_0 := v_0_1_0.Args[0] 14577 if v_0_1_0_0.Op != OpAMD64ANDQconst { 14578 break 14579 } 14580 if v_0_1_0_0.AuxInt != 15 { 14581 break 14582 } 14583 y := v_0_1_0_0.Args[0] 14584 v_1 := v.Args[1] 14585 if v_1.Op != OpAMD64SHRW { 14586 break 14587 } 14588 if x != v_1.Args[0] { 14589 break 14590 } 14591 v_1_1 := v_1.Args[1] 14592 if v_1_1.Op != OpAMD64ANDQconst { 14593 break 14594 } 14595 if v_1_1.AuxInt != 15 { 14596 break 14597 } 14598 if y != v_1_1.Args[0] { 14599 break 14600 } 14601 if !(v.Type.Size() == 2) { 14602 break 14603 } 14604 v.reset(OpAMD64RORW) 14605 v.AddArg(x) 14606 v.AddArg(y) 14607 return true 14608 } 14609 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 14610 // cond: v.Type.Size() == 2 14611 // result: (RORW x y) 14612 for { 14613 v_0 := v.Args[0] 14614 if v_0.Op != OpAMD64SHRW { 14615 break 14616 } 14617 x := v_0.Args[0] 14618 v_0_1 := v_0.Args[1] 14619 if v_0_1.Op != OpAMD64ANDLconst { 14620 break 14621 } 14622 if v_0_1.AuxInt != 15 { 14623 break 14624 } 14625 y := v_0_1.Args[0] 14626 v_1 := v.Args[1] 14627 if v_1.Op != OpAMD64SHLL { 14628 break 14629 } 14630 if x != v_1.Args[0] { 14631 break 14632 } 14633 v_1_1 := v_1.Args[1] 14634 if v_1_1.Op != OpAMD64NEGL { 14635 break 14636 } 14637 v_1_1_0 := v_1_1.Args[0] 14638 if v_1_1_0.Op != OpAMD64ADDLconst { 14639 break 14640 } 14641 if v_1_1_0.AuxInt != -16 { 14642 break 14643 } 14644 v_1_1_0_0 := v_1_1_0.Args[0] 14645 if v_1_1_0_0.Op != OpAMD64ANDLconst { 14646 break 14647 } 14648 if v_1_1_0_0.AuxInt != 15 { 14649 break 14650 } 14651 if y != v_1_1_0_0.Args[0] { 14652 break 14653 } 14654 if !(v.Type.Size() == 2) { 14655 break 14656 } 14657 v.reset(OpAMD64RORW) 14658 v.AddArg(x) 14659 v.AddArg(y) 14660 return true 14661 } 14662 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 14663 // cond: v.Type.Size() == 2 14664 // result: (RORW x y) 14665 for { 14666 v_0 := v.Args[0] 14667 if v_0.Op != OpAMD64SHLL { 14668 break 14669 } 14670 x := v_0.Args[0] 14671 v_0_1 := v_0.Args[1] 14672 if v_0_1.Op != OpAMD64NEGL { 14673 break 14674 } 14675 v_0_1_0 := v_0_1.Args[0] 14676 if v_0_1_0.Op != OpAMD64ADDLconst { 14677 break 14678 } 14679 if v_0_1_0.AuxInt != -16 { 14680 break 14681 } 14682 v_0_1_0_0 := v_0_1_0.Args[0] 14683 if v_0_1_0_0.Op != OpAMD64ANDLconst { 14684 break 14685 } 14686 if v_0_1_0_0.AuxInt != 15 { 14687 break 14688 } 14689 y := v_0_1_0_0.Args[0] 14690 v_1 := v.Args[1] 14691 if v_1.Op != OpAMD64SHRW { 14692 break 14693 } 14694 if x != v_1.Args[0] { 14695 break 14696 } 14697 v_1_1 := v_1.Args[1] 14698 if v_1_1.Op != OpAMD64ANDLconst { 14699 break 14700 } 14701 if v_1_1.AuxInt != 15 { 14702 break 14703 } 14704 if y != v_1_1.Args[0] { 14705 break 14706 } 14707 if !(v.Type.Size() == 2) { 14708 break 14709 } 14710 v.reset(OpAMD64RORW) 14711 v.AddArg(x) 14712 v.AddArg(y) 14713 return true 14714 } 14715 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 14716 // cond: v.Type.Size() == 1 14717 // result: (ROLB x y) 14718 for { 14719 v_0 := v.Args[0] 14720 if v_0.Op != OpAMD64SHLL { 14721 break 14722 } 14723 x := v_0.Args[0] 14724 v_0_1 := v_0.Args[1] 14725 if v_0_1.Op != OpAMD64ANDQconst { 14726 break 14727 } 14728 if v_0_1.AuxInt != 7 { 14729 break 14730 } 14731 y := v_0_1.Args[0] 14732 v_1 := v.Args[1] 14733 if v_1.Op != OpAMD64ANDL { 14734 break 14735 } 14736 v_1_0 := v_1.Args[0] 14737 if v_1_0.Op != OpAMD64SHRB { 14738 break 14739 } 14740 if x != v_1_0.Args[0] { 14741 break 14742 } 14743 v_1_0_1 := v_1_0.Args[1] 14744 if v_1_0_1.Op != OpAMD64NEGQ { 14745 break 14746 } 14747 v_1_0_1_0 := v_1_0_1.Args[0] 14748 if v_1_0_1_0.Op != OpAMD64ADDQconst { 14749 break 14750 } 14751 if v_1_0_1_0.AuxInt != -8 { 14752 break 14753 } 14754 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 14755 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 14756 break 14757 } 14758 if v_1_0_1_0_0.AuxInt != 7 { 14759 break 14760 } 14761 if y != v_1_0_1_0_0.Args[0] { 14762 break 14763 } 14764 v_1_1 := v_1.Args[1] 14765 if v_1_1.Op != OpAMD64SBBLcarrymask { 14766 break 14767 } 14768 v_1_1_0 := v_1_1.Args[0] 14769 if v_1_1_0.Op != OpAMD64CMPQconst { 14770 break 14771 } 14772 if v_1_1_0.AuxInt != 8 { 14773 break 14774 } 14775 v_1_1_0_0 := v_1_1_0.Args[0] 14776 if v_1_1_0_0.Op != OpAMD64NEGQ { 14777 break 14778 } 14779 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14780 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 14781 break 14782 } 14783 if v_1_1_0_0_0.AuxInt != -8 { 14784 break 14785 } 14786 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14787 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 14788 break 14789 } 14790 if v_1_1_0_0_0_0.AuxInt != 7 { 14791 break 14792 } 14793 if y != v_1_1_0_0_0_0.Args[0] { 14794 break 14795 } 14796 if !(v.Type.Size() == 1) { 14797 break 14798 } 14799 v.reset(OpAMD64ROLB) 14800 v.AddArg(x) 14801 v.AddArg(y) 14802 return true 14803 } 14804 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 14805 // cond: v.Type.Size() == 1 14806 // result: (ROLB x y) 14807 for { 14808 v_0 := v.Args[0] 14809 if v_0.Op != OpAMD64SHLL { 14810 break 14811 } 14812 x := v_0.Args[0] 14813 v_0_1 := v_0.Args[1] 14814 if v_0_1.Op != OpAMD64ANDQconst { 14815 break 14816 } 14817 if v_0_1.AuxInt != 7 { 14818 break 14819 } 14820 y := v_0_1.Args[0] 14821 v_1 := v.Args[1] 14822 if v_1.Op != OpAMD64ANDL { 14823 break 14824 } 14825 v_1_0 := v_1.Args[0] 14826 if v_1_0.Op != OpAMD64SBBLcarrymask { 14827 break 14828 } 14829 v_1_0_0 := v_1_0.Args[0] 14830 if v_1_0_0.Op != OpAMD64CMPQconst { 14831 break 14832 } 14833 if v_1_0_0.AuxInt != 8 { 14834 break 14835 } 14836 v_1_0_0_0 := v_1_0_0.Args[0] 14837 if v_1_0_0_0.Op != OpAMD64NEGQ { 14838 break 14839 } 14840 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14841 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 14842 break 14843 } 14844 if v_1_0_0_0_0.AuxInt != -8 { 14845 break 14846 } 14847 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14848 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 14849 break 14850 } 14851 if v_1_0_0_0_0_0.AuxInt != 7 { 14852 break 14853 } 14854 if y != v_1_0_0_0_0_0.Args[0] { 14855 break 14856 } 14857 v_1_1 := v_1.Args[1] 14858 if v_1_1.Op != OpAMD64SHRB { 14859 break 14860 } 14861 if x != v_1_1.Args[0] { 14862 break 14863 } 14864 v_1_1_1 := v_1_1.Args[1] 14865 if v_1_1_1.Op != OpAMD64NEGQ { 14866 break 14867 } 14868 v_1_1_1_0 := v_1_1_1.Args[0] 14869 if v_1_1_1_0.Op != OpAMD64ADDQconst { 14870 break 14871 } 14872 if v_1_1_1_0.AuxInt != -8 { 14873 break 14874 } 14875 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 14876 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 14877 break 14878 } 14879 if v_1_1_1_0_0.AuxInt != 7 { 14880 break 14881 } 14882 if y != v_1_1_1_0_0.Args[0] { 14883 break 14884 } 14885 if !(v.Type.Size() == 1) { 14886 break 14887 } 14888 v.reset(OpAMD64ROLB) 14889 v.AddArg(x) 14890 v.AddArg(y) 14891 return true 14892 } 14893 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 14894 // cond: v.Type.Size() == 1 14895 // result: (ROLB x y) 14896 for { 14897 v_0 := v.Args[0] 14898 if v_0.Op != OpAMD64ANDL { 14899 break 14900 } 14901 v_0_0 := v_0.Args[0] 14902 if v_0_0.Op != OpAMD64SHRB { 14903 break 14904 } 14905 x := v_0_0.Args[0] 14906 v_0_0_1 := v_0_0.Args[1] 14907 if v_0_0_1.Op != OpAMD64NEGQ { 14908 break 14909 } 14910 v_0_0_1_0 := v_0_0_1.Args[0] 14911 if v_0_0_1_0.Op != OpAMD64ADDQconst { 14912 break 14913 } 14914 if v_0_0_1_0.AuxInt != -8 { 14915 break 14916 } 14917 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 14918 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 14919 break 14920 } 14921 if v_0_0_1_0_0.AuxInt != 7 { 14922 break 14923 } 14924 y := v_0_0_1_0_0.Args[0] 14925 v_0_1 := v_0.Args[1] 14926 if v_0_1.Op != OpAMD64SBBLcarrymask { 14927 break 14928 } 14929 v_0_1_0 := v_0_1.Args[0] 14930 if v_0_1_0.Op != OpAMD64CMPQconst { 14931 break 14932 } 14933 if v_0_1_0.AuxInt != 8 { 14934 break 14935 } 14936 v_0_1_0_0 := v_0_1_0.Args[0] 14937 if v_0_1_0_0.Op != OpAMD64NEGQ { 14938 break 14939 } 14940 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14941 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 14942 break 14943 } 14944 if v_0_1_0_0_0.AuxInt != -8 { 14945 break 14946 } 14947 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14948 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 14949 break 14950 } 14951 if v_0_1_0_0_0_0.AuxInt != 7 { 14952 break 14953 } 14954 if y != v_0_1_0_0_0_0.Args[0] { 14955 break 14956 } 14957 v_1 := v.Args[1] 14958 if v_1.Op != OpAMD64SHLL { 14959 break 14960 } 14961 if x != v_1.Args[0] { 14962 break 14963 } 14964 v_1_1 := v_1.Args[1] 14965 if v_1_1.Op != OpAMD64ANDQconst { 14966 break 14967 } 14968 if v_1_1.AuxInt != 7 { 14969 break 14970 } 14971 if y != v_1_1.Args[0] { 14972 break 14973 } 14974 if !(v.Type.Size() == 1) { 14975 break 14976 } 14977 v.reset(OpAMD64ROLB) 14978 v.AddArg(x) 14979 v.AddArg(y) 14980 return true 14981 } 14982 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 14983 // cond: v.Type.Size() == 1 14984 // result: (ROLB x y) 14985 for { 14986 v_0 := v.Args[0] 14987 if v_0.Op != OpAMD64ANDL { 14988 break 14989 } 14990 v_0_0 := v_0.Args[0] 14991 if v_0_0.Op != OpAMD64SBBLcarrymask { 14992 break 14993 } 14994 v_0_0_0 := v_0_0.Args[0] 14995 if v_0_0_0.Op != OpAMD64CMPQconst { 14996 break 14997 } 14998 if v_0_0_0.AuxInt != 8 { 14999 break 15000 } 15001 v_0_0_0_0 := v_0_0_0.Args[0] 15002 if v_0_0_0_0.Op != OpAMD64NEGQ { 15003 break 15004 } 15005 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15006 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15007 break 15008 } 15009 if v_0_0_0_0_0.AuxInt != -8 { 15010 break 15011 } 15012 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15013 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15014 break 15015 } 15016 if v_0_0_0_0_0_0.AuxInt != 7 { 15017 break 15018 } 15019 y := v_0_0_0_0_0_0.Args[0] 15020 v_0_1 := v_0.Args[1] 15021 if v_0_1.Op != OpAMD64SHRB { 15022 break 15023 } 15024 x := v_0_1.Args[0] 15025 v_0_1_1 := v_0_1.Args[1] 15026 if v_0_1_1.Op != OpAMD64NEGQ { 15027 break 15028 } 15029 v_0_1_1_0 := v_0_1_1.Args[0] 15030 if v_0_1_1_0.Op != OpAMD64ADDQconst { 15031 break 15032 } 15033 if v_0_1_1_0.AuxInt != -8 { 15034 break 15035 } 15036 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 15037 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 15038 break 15039 } 15040 if v_0_1_1_0_0.AuxInt != 7 { 15041 break 15042 } 15043 if y != v_0_1_1_0_0.Args[0] { 15044 break 15045 } 15046 v_1 := v.Args[1] 15047 if v_1.Op != OpAMD64SHLL { 15048 break 15049 } 15050 if x != v_1.Args[0] { 15051 break 15052 } 15053 v_1_1 := v_1.Args[1] 15054 if v_1_1.Op != OpAMD64ANDQconst { 15055 break 15056 } 15057 if v_1_1.AuxInt != 7 { 15058 break 15059 } 15060 if y != v_1_1.Args[0] { 15061 break 15062 } 15063 if !(v.Type.Size() == 1) { 15064 break 15065 } 15066 v.reset(OpAMD64ROLB) 15067 v.AddArg(x) 15068 v.AddArg(y) 15069 return true 15070 } 15071 return false 15072 } 15073 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 15074 b := v.Block 15075 _ = b 15076 types := &b.Func.Config.Types 15077 _ = types 15078 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 15079 // cond: v.Type.Size() == 1 15080 // result: (ROLB x y) 15081 for { 15082 v_0 := v.Args[0] 15083 if v_0.Op != OpAMD64SHLL { 15084 break 15085 } 15086 x := v_0.Args[0] 15087 v_0_1 := v_0.Args[1] 15088 if v_0_1.Op != OpAMD64ANDLconst { 15089 break 15090 } 15091 if v_0_1.AuxInt != 7 { 15092 break 15093 } 15094 y := v_0_1.Args[0] 15095 v_1 := v.Args[1] 15096 if v_1.Op != OpAMD64ANDL { 15097 break 15098 } 15099 v_1_0 := v_1.Args[0] 15100 if v_1_0.Op != OpAMD64SHRB { 15101 break 15102 } 15103 if x != v_1_0.Args[0] { 15104 break 15105 } 15106 v_1_0_1 := v_1_0.Args[1] 15107 if v_1_0_1.Op != OpAMD64NEGL { 15108 break 15109 } 15110 v_1_0_1_0 := v_1_0_1.Args[0] 15111 if v_1_0_1_0.Op != OpAMD64ADDLconst { 15112 break 15113 } 15114 if v_1_0_1_0.AuxInt != -8 { 15115 break 15116 } 15117 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 15118 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 15119 break 15120 } 15121 if v_1_0_1_0_0.AuxInt != 7 { 15122 break 15123 } 15124 if y != v_1_0_1_0_0.Args[0] { 15125 break 15126 } 15127 v_1_1 := v_1.Args[1] 15128 if v_1_1.Op != OpAMD64SBBLcarrymask { 15129 break 15130 } 15131 v_1_1_0 := v_1_1.Args[0] 15132 if v_1_1_0.Op != OpAMD64CMPLconst { 15133 break 15134 } 15135 if v_1_1_0.AuxInt != 8 { 15136 break 15137 } 15138 v_1_1_0_0 := v_1_1_0.Args[0] 15139 if v_1_1_0_0.Op != OpAMD64NEGL { 15140 break 15141 } 15142 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15143 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15144 break 15145 } 15146 if v_1_1_0_0_0.AuxInt != -8 { 15147 break 15148 } 15149 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15150 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15151 break 15152 } 15153 if v_1_1_0_0_0_0.AuxInt != 7 { 15154 break 15155 } 15156 if y != v_1_1_0_0_0_0.Args[0] { 15157 break 15158 } 15159 if !(v.Type.Size() == 1) { 15160 break 15161 } 15162 v.reset(OpAMD64ROLB) 15163 v.AddArg(x) 15164 v.AddArg(y) 15165 return true 15166 } 15167 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 15168 // cond: v.Type.Size() == 1 15169 // result: (ROLB x y) 15170 for { 15171 v_0 := v.Args[0] 15172 if v_0.Op != OpAMD64SHLL { 15173 break 15174 } 15175 x := v_0.Args[0] 15176 v_0_1 := v_0.Args[1] 15177 if v_0_1.Op != OpAMD64ANDLconst { 15178 break 15179 } 15180 if v_0_1.AuxInt != 7 { 15181 break 15182 } 15183 y := v_0_1.Args[0] 15184 v_1 := v.Args[1] 15185 if v_1.Op != OpAMD64ANDL { 15186 break 15187 } 15188 v_1_0 := v_1.Args[0] 15189 if v_1_0.Op != OpAMD64SBBLcarrymask { 15190 break 15191 } 15192 v_1_0_0 := v_1_0.Args[0] 15193 if v_1_0_0.Op != OpAMD64CMPLconst { 15194 break 15195 } 15196 if v_1_0_0.AuxInt != 8 { 15197 break 15198 } 15199 v_1_0_0_0 := v_1_0_0.Args[0] 15200 if v_1_0_0_0.Op != OpAMD64NEGL { 15201 break 15202 } 15203 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15204 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 15205 break 15206 } 15207 if v_1_0_0_0_0.AuxInt != -8 { 15208 break 15209 } 15210 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15211 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 15212 break 15213 } 15214 if v_1_0_0_0_0_0.AuxInt != 7 { 15215 break 15216 } 15217 if y != v_1_0_0_0_0_0.Args[0] { 15218 break 15219 } 15220 v_1_1 := v_1.Args[1] 15221 if v_1_1.Op != OpAMD64SHRB { 15222 break 15223 } 15224 if x != v_1_1.Args[0] { 15225 break 15226 } 15227 v_1_1_1 := v_1_1.Args[1] 15228 if v_1_1_1.Op != OpAMD64NEGL { 15229 break 15230 } 15231 v_1_1_1_0 := v_1_1_1.Args[0] 15232 if v_1_1_1_0.Op != OpAMD64ADDLconst { 15233 break 15234 } 15235 if v_1_1_1_0.AuxInt != -8 { 15236 break 15237 } 15238 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 15239 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 15240 break 15241 } 15242 if v_1_1_1_0_0.AuxInt != 7 { 15243 break 15244 } 15245 if y != v_1_1_1_0_0.Args[0] { 15246 break 15247 } 15248 if !(v.Type.Size() == 1) { 15249 break 15250 } 15251 v.reset(OpAMD64ROLB) 15252 v.AddArg(x) 15253 v.AddArg(y) 15254 return true 15255 } 15256 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 15257 // cond: v.Type.Size() == 1 15258 // result: (ROLB x y) 15259 for { 15260 v_0 := v.Args[0] 15261 if v_0.Op != OpAMD64ANDL { 15262 break 15263 } 15264 v_0_0 := v_0.Args[0] 15265 if v_0_0.Op != OpAMD64SHRB { 15266 break 15267 } 15268 x := v_0_0.Args[0] 15269 v_0_0_1 := v_0_0.Args[1] 15270 if v_0_0_1.Op != OpAMD64NEGL { 15271 break 15272 } 15273 v_0_0_1_0 := v_0_0_1.Args[0] 15274 if v_0_0_1_0.Op != OpAMD64ADDLconst { 15275 break 15276 } 15277 if v_0_0_1_0.AuxInt != -8 { 15278 break 15279 } 15280 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 15281 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 15282 break 15283 } 15284 if v_0_0_1_0_0.AuxInt != 7 { 15285 break 15286 } 15287 y := v_0_0_1_0_0.Args[0] 15288 v_0_1 := v_0.Args[1] 15289 if v_0_1.Op != OpAMD64SBBLcarrymask { 15290 break 15291 } 15292 v_0_1_0 := v_0_1.Args[0] 15293 if v_0_1_0.Op != OpAMD64CMPLconst { 15294 break 15295 } 15296 if v_0_1_0.AuxInt != 8 { 15297 break 15298 } 15299 v_0_1_0_0 := v_0_1_0.Args[0] 15300 if v_0_1_0_0.Op != OpAMD64NEGL { 15301 break 15302 } 15303 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15304 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 15305 break 15306 } 15307 if v_0_1_0_0_0.AuxInt != -8 { 15308 break 15309 } 15310 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15311 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 15312 break 15313 } 15314 if v_0_1_0_0_0_0.AuxInt != 7 { 15315 break 15316 } 15317 if y != v_0_1_0_0_0_0.Args[0] { 15318 break 15319 } 15320 v_1 := v.Args[1] 15321 if v_1.Op != OpAMD64SHLL { 15322 break 15323 } 15324 if x != v_1.Args[0] { 15325 break 15326 } 15327 v_1_1 := v_1.Args[1] 15328 if v_1_1.Op != OpAMD64ANDLconst { 15329 break 15330 } 15331 if v_1_1.AuxInt != 7 { 15332 break 15333 } 15334 if y != v_1_1.Args[0] { 15335 break 15336 } 15337 if !(v.Type.Size() == 1) { 15338 break 15339 } 15340 v.reset(OpAMD64ROLB) 15341 v.AddArg(x) 15342 v.AddArg(y) 15343 return true 15344 } 15345 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 15346 // cond: v.Type.Size() == 1 15347 // result: (ROLB x y) 15348 for { 15349 v_0 := v.Args[0] 15350 if v_0.Op != OpAMD64ANDL { 15351 break 15352 } 15353 v_0_0 := v_0.Args[0] 15354 if v_0_0.Op != OpAMD64SBBLcarrymask { 15355 break 15356 } 15357 v_0_0_0 := v_0_0.Args[0] 15358 if v_0_0_0.Op != OpAMD64CMPLconst { 15359 break 15360 } 15361 if v_0_0_0.AuxInt != 8 { 15362 break 15363 } 15364 v_0_0_0_0 := v_0_0_0.Args[0] 15365 if v_0_0_0_0.Op != OpAMD64NEGL { 15366 break 15367 } 15368 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15369 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 15370 break 15371 } 15372 if v_0_0_0_0_0.AuxInt != -8 { 15373 break 15374 } 15375 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15376 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 15377 break 15378 } 15379 if v_0_0_0_0_0_0.AuxInt != 7 { 15380 break 15381 } 15382 y := v_0_0_0_0_0_0.Args[0] 15383 v_0_1 := v_0.Args[1] 15384 if v_0_1.Op != OpAMD64SHRB { 15385 break 15386 } 15387 x := v_0_1.Args[0] 15388 v_0_1_1 := v_0_1.Args[1] 15389 if v_0_1_1.Op != OpAMD64NEGL { 15390 break 15391 } 15392 v_0_1_1_0 := v_0_1_1.Args[0] 15393 if v_0_1_1_0.Op != OpAMD64ADDLconst { 15394 break 15395 } 15396 if v_0_1_1_0.AuxInt != -8 { 15397 break 15398 } 15399 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 15400 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 15401 break 15402 } 15403 if v_0_1_1_0_0.AuxInt != 7 { 15404 break 15405 } 15406 if y != v_0_1_1_0_0.Args[0] { 15407 break 15408 } 15409 v_1 := v.Args[1] 15410 if v_1.Op != OpAMD64SHLL { 15411 break 15412 } 15413 if x != v_1.Args[0] { 15414 break 15415 } 15416 v_1_1 := v_1.Args[1] 15417 if v_1_1.Op != OpAMD64ANDLconst { 15418 break 15419 } 15420 if v_1_1.AuxInt != 7 { 15421 break 15422 } 15423 if y != v_1_1.Args[0] { 15424 break 15425 } 15426 if !(v.Type.Size() == 1) { 15427 break 15428 } 15429 v.reset(OpAMD64ROLB) 15430 v.AddArg(x) 15431 v.AddArg(y) 15432 return true 15433 } 15434 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 15435 // cond: v.Type.Size() == 1 15436 // result: (RORB x y) 15437 for { 15438 v_0 := v.Args[0] 15439 if v_0.Op != OpAMD64SHRB { 15440 break 15441 } 15442 x := v_0.Args[0] 15443 v_0_1 := v_0.Args[1] 15444 if v_0_1.Op != OpAMD64ANDQconst { 15445 break 15446 } 15447 if v_0_1.AuxInt != 7 { 15448 break 15449 } 15450 y := v_0_1.Args[0] 15451 v_1 := v.Args[1] 15452 if v_1.Op != OpAMD64SHLL { 15453 break 15454 } 15455 if x != v_1.Args[0] { 15456 break 15457 } 15458 v_1_1 := v_1.Args[1] 15459 if v_1_1.Op != OpAMD64NEGQ { 15460 break 15461 } 15462 v_1_1_0 := v_1_1.Args[0] 15463 if v_1_1_0.Op != OpAMD64ADDQconst { 15464 break 15465 } 15466 if v_1_1_0.AuxInt != -8 { 15467 break 15468 } 15469 v_1_1_0_0 := v_1_1_0.Args[0] 15470 if v_1_1_0_0.Op != OpAMD64ANDQconst { 15471 break 15472 } 15473 if v_1_1_0_0.AuxInt != 7 { 15474 break 15475 } 15476 if y != v_1_1_0_0.Args[0] { 15477 break 15478 } 15479 if !(v.Type.Size() == 1) { 15480 break 15481 } 15482 v.reset(OpAMD64RORB) 15483 v.AddArg(x) 15484 v.AddArg(y) 15485 return true 15486 } 15487 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 15488 // cond: v.Type.Size() == 1 15489 // result: (RORB x y) 15490 for { 15491 v_0 := v.Args[0] 15492 if v_0.Op != OpAMD64SHLL { 15493 break 15494 } 15495 x := v_0.Args[0] 15496 v_0_1 := v_0.Args[1] 15497 if v_0_1.Op != OpAMD64NEGQ { 15498 break 15499 } 15500 v_0_1_0 := v_0_1.Args[0] 15501 if v_0_1_0.Op != OpAMD64ADDQconst { 15502 break 15503 } 15504 if v_0_1_0.AuxInt != -8 { 15505 break 15506 } 15507 v_0_1_0_0 := v_0_1_0.Args[0] 15508 if v_0_1_0_0.Op != OpAMD64ANDQconst { 15509 break 15510 } 15511 if v_0_1_0_0.AuxInt != 7 { 15512 break 15513 } 15514 y := v_0_1_0_0.Args[0] 15515 v_1 := v.Args[1] 15516 if v_1.Op != OpAMD64SHRB { 15517 break 15518 } 15519 if x != v_1.Args[0] { 15520 break 15521 } 15522 v_1_1 := v_1.Args[1] 15523 if v_1_1.Op != OpAMD64ANDQconst { 15524 break 15525 } 15526 if v_1_1.AuxInt != 7 { 15527 break 15528 } 15529 if y != v_1_1.Args[0] { 15530 break 15531 } 15532 if !(v.Type.Size() == 1) { 15533 break 15534 } 15535 v.reset(OpAMD64RORB) 15536 v.AddArg(x) 15537 v.AddArg(y) 15538 return true 15539 } 15540 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 15541 // cond: v.Type.Size() == 1 15542 // result: (RORB x y) 15543 for { 15544 v_0 := v.Args[0] 15545 if v_0.Op != OpAMD64SHRB { 15546 break 15547 } 15548 x := v_0.Args[0] 15549 v_0_1 := v_0.Args[1] 15550 if v_0_1.Op != OpAMD64ANDLconst { 15551 break 15552 } 15553 if v_0_1.AuxInt != 7 { 15554 break 15555 } 15556 y := v_0_1.Args[0] 15557 v_1 := v.Args[1] 15558 if v_1.Op != OpAMD64SHLL { 15559 break 15560 } 15561 if x != v_1.Args[0] { 15562 break 15563 } 15564 v_1_1 := v_1.Args[1] 15565 if v_1_1.Op != OpAMD64NEGL { 15566 break 15567 } 15568 v_1_1_0 := v_1_1.Args[0] 15569 if v_1_1_0.Op != OpAMD64ADDLconst { 15570 break 15571 } 15572 if v_1_1_0.AuxInt != -8 { 15573 break 15574 } 15575 v_1_1_0_0 := v_1_1_0.Args[0] 15576 if v_1_1_0_0.Op != OpAMD64ANDLconst { 15577 break 15578 } 15579 if v_1_1_0_0.AuxInt != 7 { 15580 break 15581 } 15582 if y != v_1_1_0_0.Args[0] { 15583 break 15584 } 15585 if !(v.Type.Size() == 1) { 15586 break 15587 } 15588 v.reset(OpAMD64RORB) 15589 v.AddArg(x) 15590 v.AddArg(y) 15591 return true 15592 } 15593 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 15594 // cond: v.Type.Size() == 1 15595 // result: (RORB x y) 15596 for { 15597 v_0 := v.Args[0] 15598 if v_0.Op != OpAMD64SHLL { 15599 break 15600 } 15601 x := v_0.Args[0] 15602 v_0_1 := v_0.Args[1] 15603 if v_0_1.Op != OpAMD64NEGL { 15604 break 15605 } 15606 v_0_1_0 := v_0_1.Args[0] 15607 if v_0_1_0.Op != OpAMD64ADDLconst { 15608 break 15609 } 15610 if v_0_1_0.AuxInt != -8 { 15611 break 15612 } 15613 v_0_1_0_0 := v_0_1_0.Args[0] 15614 if v_0_1_0_0.Op != OpAMD64ANDLconst { 15615 break 15616 } 15617 if v_0_1_0_0.AuxInt != 7 { 15618 break 15619 } 15620 y := v_0_1_0_0.Args[0] 15621 v_1 := v.Args[1] 15622 if v_1.Op != OpAMD64SHRB { 15623 break 15624 } 15625 if x != v_1.Args[0] { 15626 break 15627 } 15628 v_1_1 := v_1.Args[1] 15629 if v_1_1.Op != OpAMD64ANDLconst { 15630 break 15631 } 15632 if v_1_1.AuxInt != 7 { 15633 break 15634 } 15635 if y != v_1_1.Args[0] { 15636 break 15637 } 15638 if !(v.Type.Size() == 1) { 15639 break 15640 } 15641 v.reset(OpAMD64RORB) 15642 v.AddArg(x) 15643 v.AddArg(y) 15644 return true 15645 } 15646 // match: (ORL x x) 15647 // cond: 15648 // result: x 15649 for { 15650 x := v.Args[0] 15651 if x != v.Args[1] { 15652 break 15653 } 15654 v.reset(OpCopy) 15655 v.Type = x.Type 15656 v.AddArg(x) 15657 return true 15658 } 15659 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 15660 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15661 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 15662 for { 15663 x0 := v.Args[0] 15664 if x0.Op != OpAMD64MOVBload { 15665 break 15666 } 15667 i0 := x0.AuxInt 15668 s := x0.Aux 15669 p := x0.Args[0] 15670 mem := x0.Args[1] 15671 sh := v.Args[1] 15672 if sh.Op != OpAMD64SHLLconst { 15673 break 15674 } 15675 if sh.AuxInt != 8 { 15676 break 15677 } 15678 x1 := sh.Args[0] 15679 if x1.Op != OpAMD64MOVBload { 15680 break 15681 } 15682 i1 := x1.AuxInt 15683 if x1.Aux != s { 15684 break 15685 } 15686 if p != x1.Args[0] { 15687 break 15688 } 15689 if mem != x1.Args[1] { 15690 break 15691 } 15692 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15693 break 15694 } 15695 b = mergePoint(b, x0, x1) 15696 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 15697 v.reset(OpCopy) 15698 v.AddArg(v0) 15699 v0.AuxInt = i0 15700 v0.Aux = s 15701 v0.AddArg(p) 15702 v0.AddArg(mem) 15703 return true 15704 } 15705 return false 15706 } 15707 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 15708 b := v.Block 15709 _ = b 15710 types := &b.Func.Config.Types 15711 _ = types 15712 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 15713 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15714 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 15715 for { 15716 sh := v.Args[0] 15717 if sh.Op != OpAMD64SHLLconst { 15718 break 15719 } 15720 if sh.AuxInt != 8 { 15721 break 15722 } 15723 x1 := sh.Args[0] 15724 if x1.Op != OpAMD64MOVBload { 15725 break 15726 } 15727 i1 := x1.AuxInt 15728 s := x1.Aux 15729 p := x1.Args[0] 15730 mem := x1.Args[1] 15731 x0 := v.Args[1] 15732 if x0.Op != OpAMD64MOVBload { 15733 break 15734 } 15735 i0 := x0.AuxInt 15736 if x0.Aux != s { 15737 break 15738 } 15739 if p != x0.Args[0] { 15740 break 15741 } 15742 if mem != x0.Args[1] { 15743 break 15744 } 15745 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15746 break 15747 } 15748 b = mergePoint(b, x0, x1) 15749 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 15750 v.reset(OpCopy) 15751 v.AddArg(v0) 15752 v0.AuxInt = i0 15753 v0.Aux = s 15754 v0.AddArg(p) 15755 v0.AddArg(mem) 15756 return true 15757 } 15758 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 15759 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15760 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 15761 for { 15762 x0 := v.Args[0] 15763 if x0.Op != OpAMD64MOVWload { 15764 break 15765 } 15766 i0 := x0.AuxInt 15767 s := x0.Aux 15768 p := x0.Args[0] 15769 mem := x0.Args[1] 15770 sh := v.Args[1] 15771 if sh.Op != OpAMD64SHLLconst { 15772 break 15773 } 15774 if sh.AuxInt != 16 { 15775 break 15776 } 15777 x1 := sh.Args[0] 15778 if x1.Op != OpAMD64MOVWload { 15779 break 15780 } 15781 i1 := x1.AuxInt 15782 if x1.Aux != s { 15783 break 15784 } 15785 if p != x1.Args[0] { 15786 break 15787 } 15788 if mem != x1.Args[1] { 15789 break 15790 } 15791 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15792 break 15793 } 15794 b = mergePoint(b, x0, x1) 15795 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 15796 v.reset(OpCopy) 15797 v.AddArg(v0) 15798 v0.AuxInt = i0 15799 v0.Aux = s 15800 v0.AddArg(p) 15801 v0.AddArg(mem) 15802 return true 15803 } 15804 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 15805 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 15806 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 15807 for { 15808 sh := v.Args[0] 15809 if sh.Op != OpAMD64SHLLconst { 15810 break 15811 } 15812 if sh.AuxInt != 16 { 15813 break 15814 } 15815 x1 := sh.Args[0] 15816 if x1.Op != OpAMD64MOVWload { 15817 break 15818 } 15819 i1 := x1.AuxInt 15820 s := x1.Aux 15821 p := x1.Args[0] 15822 mem := x1.Args[1] 15823 x0 := v.Args[1] 15824 if x0.Op != OpAMD64MOVWload { 15825 break 15826 } 15827 i0 := x0.AuxInt 15828 if x0.Aux != s { 15829 break 15830 } 15831 if p != x0.Args[0] { 15832 break 15833 } 15834 if mem != x0.Args[1] { 15835 break 15836 } 15837 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 15838 break 15839 } 15840 b = mergePoint(b, x0, x1) 15841 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 15842 v.reset(OpCopy) 15843 v.AddArg(v0) 15844 v0.AuxInt = i0 15845 v0.Aux = s 15846 v0.AddArg(p) 15847 v0.AddArg(mem) 15848 return true 15849 } 15850 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 15851 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 15852 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 15853 for { 15854 s1 := v.Args[0] 15855 if s1.Op != OpAMD64SHLLconst { 15856 break 15857 } 15858 j1 := s1.AuxInt 15859 x1 := s1.Args[0] 15860 if x1.Op != OpAMD64MOVBload { 15861 break 15862 } 15863 i1 := x1.AuxInt 15864 s := x1.Aux 15865 p := x1.Args[0] 15866 mem := x1.Args[1] 15867 or := v.Args[1] 15868 if or.Op != OpAMD64ORL { 15869 break 15870 } 15871 s0 := or.Args[0] 15872 if s0.Op != OpAMD64SHLLconst { 15873 break 15874 } 15875 j0 := s0.AuxInt 15876 x0 := s0.Args[0] 15877 if x0.Op != OpAMD64MOVBload { 15878 break 15879 } 15880 i0 := x0.AuxInt 15881 if x0.Aux != s { 15882 break 15883 } 15884 if p != x0.Args[0] { 15885 break 15886 } 15887 if mem != x0.Args[1] { 15888 break 15889 } 15890 y := or.Args[1] 15891 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 15892 break 15893 } 15894 b = mergePoint(b, x0, x1) 15895 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 15896 v.reset(OpCopy) 15897 v.AddArg(v0) 15898 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 15899 v1.AuxInt = j0 15900 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 15901 v2.AuxInt = i0 15902 v2.Aux = s 15903 v2.AddArg(p) 15904 v2.AddArg(mem) 15905 v1.AddArg(v2) 15906 v0.AddArg(v1) 15907 v0.AddArg(y) 15908 return true 15909 } 15910 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 15911 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 15912 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 15913 for { 15914 s1 := v.Args[0] 15915 if s1.Op != OpAMD64SHLLconst { 15916 break 15917 } 15918 j1 := s1.AuxInt 15919 x1 := s1.Args[0] 15920 if x1.Op != OpAMD64MOVBload { 15921 break 15922 } 15923 i1 := x1.AuxInt 15924 s := x1.Aux 15925 p := x1.Args[0] 15926 mem := x1.Args[1] 15927 or := v.Args[1] 15928 if or.Op != OpAMD64ORL { 15929 break 15930 } 15931 y := or.Args[0] 15932 s0 := or.Args[1] 15933 if s0.Op != OpAMD64SHLLconst { 15934 break 15935 } 15936 j0 := s0.AuxInt 15937 x0 := s0.Args[0] 15938 if x0.Op != OpAMD64MOVBload { 15939 break 15940 } 15941 i0 := x0.AuxInt 15942 if x0.Aux != s { 15943 break 15944 } 15945 if p != x0.Args[0] { 15946 break 15947 } 15948 if mem != x0.Args[1] { 15949 break 15950 } 15951 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 15952 break 15953 } 15954 b = mergePoint(b, x0, x1) 15955 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 15956 v.reset(OpCopy) 15957 v.AddArg(v0) 15958 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 15959 v1.AuxInt = j0 15960 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 15961 v2.AuxInt = i0 15962 v2.Aux = s 15963 v2.AddArg(p) 15964 v2.AddArg(mem) 15965 v1.AddArg(v2) 15966 v0.AddArg(v1) 15967 v0.AddArg(y) 15968 return true 15969 } 15970 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 15971 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 15972 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 15973 for { 15974 or := v.Args[0] 15975 if or.Op != OpAMD64ORL { 15976 break 15977 } 15978 s0 := or.Args[0] 15979 if s0.Op != OpAMD64SHLLconst { 15980 break 15981 } 15982 j0 := s0.AuxInt 15983 x0 := s0.Args[0] 15984 if x0.Op != OpAMD64MOVBload { 15985 break 15986 } 15987 i0 := x0.AuxInt 15988 s := x0.Aux 15989 p := x0.Args[0] 15990 mem := x0.Args[1] 15991 y := or.Args[1] 15992 s1 := v.Args[1] 15993 if s1.Op != OpAMD64SHLLconst { 15994 break 15995 } 15996 j1 := s1.AuxInt 15997 x1 := s1.Args[0] 15998 if x1.Op != OpAMD64MOVBload { 15999 break 16000 } 16001 i1 := x1.AuxInt 16002 if x1.Aux != s { 16003 break 16004 } 16005 if p != x1.Args[0] { 16006 break 16007 } 16008 if mem != x1.Args[1] { 16009 break 16010 } 16011 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16012 break 16013 } 16014 b = mergePoint(b, x0, x1) 16015 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16016 v.reset(OpCopy) 16017 v.AddArg(v0) 16018 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16019 v1.AuxInt = j0 16020 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 16021 v2.AuxInt = i0 16022 v2.Aux = s 16023 v2.AddArg(p) 16024 v2.AddArg(mem) 16025 v1.AddArg(v2) 16026 v0.AddArg(v1) 16027 v0.AddArg(y) 16028 return true 16029 } 16030 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 16031 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16032 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16033 for { 16034 or := v.Args[0] 16035 if or.Op != OpAMD64ORL { 16036 break 16037 } 16038 y := or.Args[0] 16039 s0 := or.Args[1] 16040 if s0.Op != OpAMD64SHLLconst { 16041 break 16042 } 16043 j0 := s0.AuxInt 16044 x0 := s0.Args[0] 16045 if x0.Op != OpAMD64MOVBload { 16046 break 16047 } 16048 i0 := x0.AuxInt 16049 s := x0.Aux 16050 p := x0.Args[0] 16051 mem := x0.Args[1] 16052 s1 := v.Args[1] 16053 if s1.Op != OpAMD64SHLLconst { 16054 break 16055 } 16056 j1 := s1.AuxInt 16057 x1 := s1.Args[0] 16058 if x1.Op != OpAMD64MOVBload { 16059 break 16060 } 16061 i1 := x1.AuxInt 16062 if x1.Aux != s { 16063 break 16064 } 16065 if p != x1.Args[0] { 16066 break 16067 } 16068 if mem != x1.Args[1] { 16069 break 16070 } 16071 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16072 break 16073 } 16074 b = mergePoint(b, x0, x1) 16075 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16076 v.reset(OpCopy) 16077 v.AddArg(v0) 16078 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16079 v1.AuxInt = j0 16080 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 16081 v2.AuxInt = i0 16082 v2.Aux = s 16083 v2.AddArg(p) 16084 v2.AddArg(mem) 16085 v1.AddArg(v2) 16086 v0.AddArg(v1) 16087 v0.AddArg(y) 16088 return true 16089 } 16090 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 16091 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16092 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16093 for { 16094 x0 := v.Args[0] 16095 if x0.Op != OpAMD64MOVBloadidx1 { 16096 break 16097 } 16098 i0 := x0.AuxInt 16099 s := x0.Aux 16100 p := x0.Args[0] 16101 idx := x0.Args[1] 16102 mem := x0.Args[2] 16103 sh := v.Args[1] 16104 if sh.Op != OpAMD64SHLLconst { 16105 break 16106 } 16107 if sh.AuxInt != 8 { 16108 break 16109 } 16110 x1 := sh.Args[0] 16111 if x1.Op != OpAMD64MOVBloadidx1 { 16112 break 16113 } 16114 i1 := x1.AuxInt 16115 if x1.Aux != s { 16116 break 16117 } 16118 if p != x1.Args[0] { 16119 break 16120 } 16121 if idx != x1.Args[1] { 16122 break 16123 } 16124 if mem != x1.Args[2] { 16125 break 16126 } 16127 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16128 break 16129 } 16130 b = mergePoint(b, x0, x1) 16131 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16132 v.reset(OpCopy) 16133 v.AddArg(v0) 16134 v0.AuxInt = i0 16135 v0.Aux = s 16136 v0.AddArg(p) 16137 v0.AddArg(idx) 16138 v0.AddArg(mem) 16139 return true 16140 } 16141 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 16142 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16143 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16144 for { 16145 x0 := v.Args[0] 16146 if x0.Op != OpAMD64MOVBloadidx1 { 16147 break 16148 } 16149 i0 := x0.AuxInt 16150 s := x0.Aux 16151 idx := x0.Args[0] 16152 p := x0.Args[1] 16153 mem := x0.Args[2] 16154 sh := v.Args[1] 16155 if sh.Op != OpAMD64SHLLconst { 16156 break 16157 } 16158 if sh.AuxInt != 8 { 16159 break 16160 } 16161 x1 := sh.Args[0] 16162 if x1.Op != OpAMD64MOVBloadidx1 { 16163 break 16164 } 16165 i1 := x1.AuxInt 16166 if x1.Aux != s { 16167 break 16168 } 16169 if p != x1.Args[0] { 16170 break 16171 } 16172 if idx != x1.Args[1] { 16173 break 16174 } 16175 if mem != x1.Args[2] { 16176 break 16177 } 16178 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16179 break 16180 } 16181 b = mergePoint(b, x0, x1) 16182 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16183 v.reset(OpCopy) 16184 v.AddArg(v0) 16185 v0.AuxInt = i0 16186 v0.Aux = s 16187 v0.AddArg(p) 16188 v0.AddArg(idx) 16189 v0.AddArg(mem) 16190 return true 16191 } 16192 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 16193 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16194 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16195 for { 16196 x0 := v.Args[0] 16197 if x0.Op != OpAMD64MOVBloadidx1 { 16198 break 16199 } 16200 i0 := x0.AuxInt 16201 s := x0.Aux 16202 p := x0.Args[0] 16203 idx := x0.Args[1] 16204 mem := x0.Args[2] 16205 sh := v.Args[1] 16206 if sh.Op != OpAMD64SHLLconst { 16207 break 16208 } 16209 if sh.AuxInt != 8 { 16210 break 16211 } 16212 x1 := sh.Args[0] 16213 if x1.Op != OpAMD64MOVBloadidx1 { 16214 break 16215 } 16216 i1 := x1.AuxInt 16217 if x1.Aux != s { 16218 break 16219 } 16220 if idx != x1.Args[0] { 16221 break 16222 } 16223 if p != x1.Args[1] { 16224 break 16225 } 16226 if mem != x1.Args[2] { 16227 break 16228 } 16229 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16230 break 16231 } 16232 b = mergePoint(b, x0, x1) 16233 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16234 v.reset(OpCopy) 16235 v.AddArg(v0) 16236 v0.AuxInt = i0 16237 v0.Aux = s 16238 v0.AddArg(p) 16239 v0.AddArg(idx) 16240 v0.AddArg(mem) 16241 return true 16242 } 16243 return false 16244 } 16245 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 16246 b := v.Block 16247 _ = b 16248 types := &b.Func.Config.Types 16249 _ = types 16250 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 16251 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16252 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16253 for { 16254 x0 := v.Args[0] 16255 if x0.Op != OpAMD64MOVBloadidx1 { 16256 break 16257 } 16258 i0 := x0.AuxInt 16259 s := x0.Aux 16260 idx := x0.Args[0] 16261 p := x0.Args[1] 16262 mem := x0.Args[2] 16263 sh := v.Args[1] 16264 if sh.Op != OpAMD64SHLLconst { 16265 break 16266 } 16267 if sh.AuxInt != 8 { 16268 break 16269 } 16270 x1 := sh.Args[0] 16271 if x1.Op != OpAMD64MOVBloadidx1 { 16272 break 16273 } 16274 i1 := x1.AuxInt 16275 if x1.Aux != s { 16276 break 16277 } 16278 if idx != x1.Args[0] { 16279 break 16280 } 16281 if p != x1.Args[1] { 16282 break 16283 } 16284 if mem != x1.Args[2] { 16285 break 16286 } 16287 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16288 break 16289 } 16290 b = mergePoint(b, x0, x1) 16291 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16292 v.reset(OpCopy) 16293 v.AddArg(v0) 16294 v0.AuxInt = i0 16295 v0.Aux = s 16296 v0.AddArg(p) 16297 v0.AddArg(idx) 16298 v0.AddArg(mem) 16299 return true 16300 } 16301 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 16302 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16303 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16304 for { 16305 sh := v.Args[0] 16306 if sh.Op != OpAMD64SHLLconst { 16307 break 16308 } 16309 if sh.AuxInt != 8 { 16310 break 16311 } 16312 x1 := sh.Args[0] 16313 if x1.Op != OpAMD64MOVBloadidx1 { 16314 break 16315 } 16316 i1 := x1.AuxInt 16317 s := x1.Aux 16318 p := x1.Args[0] 16319 idx := x1.Args[1] 16320 mem := x1.Args[2] 16321 x0 := v.Args[1] 16322 if x0.Op != OpAMD64MOVBloadidx1 { 16323 break 16324 } 16325 i0 := x0.AuxInt 16326 if x0.Aux != s { 16327 break 16328 } 16329 if p != x0.Args[0] { 16330 break 16331 } 16332 if idx != x0.Args[1] { 16333 break 16334 } 16335 if mem != x0.Args[2] { 16336 break 16337 } 16338 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16339 break 16340 } 16341 b = mergePoint(b, x0, x1) 16342 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16343 v.reset(OpCopy) 16344 v.AddArg(v0) 16345 v0.AuxInt = i0 16346 v0.Aux = s 16347 v0.AddArg(p) 16348 v0.AddArg(idx) 16349 v0.AddArg(mem) 16350 return true 16351 } 16352 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 16353 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16354 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16355 for { 16356 sh := v.Args[0] 16357 if sh.Op != OpAMD64SHLLconst { 16358 break 16359 } 16360 if sh.AuxInt != 8 { 16361 break 16362 } 16363 x1 := sh.Args[0] 16364 if x1.Op != OpAMD64MOVBloadidx1 { 16365 break 16366 } 16367 i1 := x1.AuxInt 16368 s := x1.Aux 16369 idx := x1.Args[0] 16370 p := x1.Args[1] 16371 mem := x1.Args[2] 16372 x0 := v.Args[1] 16373 if x0.Op != OpAMD64MOVBloadidx1 { 16374 break 16375 } 16376 i0 := x0.AuxInt 16377 if x0.Aux != s { 16378 break 16379 } 16380 if p != x0.Args[0] { 16381 break 16382 } 16383 if idx != x0.Args[1] { 16384 break 16385 } 16386 if mem != x0.Args[2] { 16387 break 16388 } 16389 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16390 break 16391 } 16392 b = mergePoint(b, x0, x1) 16393 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16394 v.reset(OpCopy) 16395 v.AddArg(v0) 16396 v0.AuxInt = i0 16397 v0.Aux = s 16398 v0.AddArg(p) 16399 v0.AddArg(idx) 16400 v0.AddArg(mem) 16401 return true 16402 } 16403 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 16404 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16405 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16406 for { 16407 sh := v.Args[0] 16408 if sh.Op != OpAMD64SHLLconst { 16409 break 16410 } 16411 if sh.AuxInt != 8 { 16412 break 16413 } 16414 x1 := sh.Args[0] 16415 if x1.Op != OpAMD64MOVBloadidx1 { 16416 break 16417 } 16418 i1 := x1.AuxInt 16419 s := x1.Aux 16420 p := x1.Args[0] 16421 idx := x1.Args[1] 16422 mem := x1.Args[2] 16423 x0 := v.Args[1] 16424 if x0.Op != OpAMD64MOVBloadidx1 { 16425 break 16426 } 16427 i0 := x0.AuxInt 16428 if x0.Aux != s { 16429 break 16430 } 16431 if idx != x0.Args[0] { 16432 break 16433 } 16434 if p != x0.Args[1] { 16435 break 16436 } 16437 if mem != x0.Args[2] { 16438 break 16439 } 16440 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16441 break 16442 } 16443 b = mergePoint(b, x0, x1) 16444 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16445 v.reset(OpCopy) 16446 v.AddArg(v0) 16447 v0.AuxInt = i0 16448 v0.Aux = s 16449 v0.AddArg(p) 16450 v0.AddArg(idx) 16451 v0.AddArg(mem) 16452 return true 16453 } 16454 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 16455 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16456 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16457 for { 16458 sh := v.Args[0] 16459 if sh.Op != OpAMD64SHLLconst { 16460 break 16461 } 16462 if sh.AuxInt != 8 { 16463 break 16464 } 16465 x1 := sh.Args[0] 16466 if x1.Op != OpAMD64MOVBloadidx1 { 16467 break 16468 } 16469 i1 := x1.AuxInt 16470 s := x1.Aux 16471 idx := x1.Args[0] 16472 p := x1.Args[1] 16473 mem := x1.Args[2] 16474 x0 := v.Args[1] 16475 if x0.Op != OpAMD64MOVBloadidx1 { 16476 break 16477 } 16478 i0 := x0.AuxInt 16479 if x0.Aux != s { 16480 break 16481 } 16482 if idx != x0.Args[0] { 16483 break 16484 } 16485 if p != x0.Args[1] { 16486 break 16487 } 16488 if mem != x0.Args[2] { 16489 break 16490 } 16491 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16492 break 16493 } 16494 b = mergePoint(b, x0, x1) 16495 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16496 v.reset(OpCopy) 16497 v.AddArg(v0) 16498 v0.AuxInt = i0 16499 v0.Aux = s 16500 v0.AddArg(p) 16501 v0.AddArg(idx) 16502 v0.AddArg(mem) 16503 return true 16504 } 16505 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 16506 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16507 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 16508 for { 16509 x0 := v.Args[0] 16510 if x0.Op != OpAMD64MOVWloadidx1 { 16511 break 16512 } 16513 i0 := x0.AuxInt 16514 s := x0.Aux 16515 p := x0.Args[0] 16516 idx := x0.Args[1] 16517 mem := x0.Args[2] 16518 sh := v.Args[1] 16519 if sh.Op != OpAMD64SHLLconst { 16520 break 16521 } 16522 if sh.AuxInt != 16 { 16523 break 16524 } 16525 x1 := sh.Args[0] 16526 if x1.Op != OpAMD64MOVWloadidx1 { 16527 break 16528 } 16529 i1 := x1.AuxInt 16530 if x1.Aux != s { 16531 break 16532 } 16533 if p != x1.Args[0] { 16534 break 16535 } 16536 if idx != x1.Args[1] { 16537 break 16538 } 16539 if mem != x1.Args[2] { 16540 break 16541 } 16542 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16543 break 16544 } 16545 b = mergePoint(b, x0, x1) 16546 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16547 v.reset(OpCopy) 16548 v.AddArg(v0) 16549 v0.AuxInt = i0 16550 v0.Aux = s 16551 v0.AddArg(p) 16552 v0.AddArg(idx) 16553 v0.AddArg(mem) 16554 return true 16555 } 16556 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 16557 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16558 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 16559 for { 16560 x0 := v.Args[0] 16561 if x0.Op != OpAMD64MOVWloadidx1 { 16562 break 16563 } 16564 i0 := x0.AuxInt 16565 s := x0.Aux 16566 idx := x0.Args[0] 16567 p := x0.Args[1] 16568 mem := x0.Args[2] 16569 sh := v.Args[1] 16570 if sh.Op != OpAMD64SHLLconst { 16571 break 16572 } 16573 if sh.AuxInt != 16 { 16574 break 16575 } 16576 x1 := sh.Args[0] 16577 if x1.Op != OpAMD64MOVWloadidx1 { 16578 break 16579 } 16580 i1 := x1.AuxInt 16581 if x1.Aux != s { 16582 break 16583 } 16584 if p != x1.Args[0] { 16585 break 16586 } 16587 if idx != x1.Args[1] { 16588 break 16589 } 16590 if mem != x1.Args[2] { 16591 break 16592 } 16593 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16594 break 16595 } 16596 b = mergePoint(b, x0, x1) 16597 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16598 v.reset(OpCopy) 16599 v.AddArg(v0) 16600 v0.AuxInt = i0 16601 v0.Aux = s 16602 v0.AddArg(p) 16603 v0.AddArg(idx) 16604 v0.AddArg(mem) 16605 return true 16606 } 16607 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 16608 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16609 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 16610 for { 16611 x0 := v.Args[0] 16612 if x0.Op != OpAMD64MOVWloadidx1 { 16613 break 16614 } 16615 i0 := x0.AuxInt 16616 s := x0.Aux 16617 p := x0.Args[0] 16618 idx := x0.Args[1] 16619 mem := x0.Args[2] 16620 sh := v.Args[1] 16621 if sh.Op != OpAMD64SHLLconst { 16622 break 16623 } 16624 if sh.AuxInt != 16 { 16625 break 16626 } 16627 x1 := sh.Args[0] 16628 if x1.Op != OpAMD64MOVWloadidx1 { 16629 break 16630 } 16631 i1 := x1.AuxInt 16632 if x1.Aux != s { 16633 break 16634 } 16635 if idx != x1.Args[0] { 16636 break 16637 } 16638 if p != x1.Args[1] { 16639 break 16640 } 16641 if mem != x1.Args[2] { 16642 break 16643 } 16644 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16645 break 16646 } 16647 b = mergePoint(b, x0, x1) 16648 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16649 v.reset(OpCopy) 16650 v.AddArg(v0) 16651 v0.AuxInt = i0 16652 v0.Aux = s 16653 v0.AddArg(p) 16654 v0.AddArg(idx) 16655 v0.AddArg(mem) 16656 return true 16657 } 16658 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 16659 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16660 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 16661 for { 16662 x0 := v.Args[0] 16663 if x0.Op != OpAMD64MOVWloadidx1 { 16664 break 16665 } 16666 i0 := x0.AuxInt 16667 s := x0.Aux 16668 idx := x0.Args[0] 16669 p := x0.Args[1] 16670 mem := x0.Args[2] 16671 sh := v.Args[1] 16672 if sh.Op != OpAMD64SHLLconst { 16673 break 16674 } 16675 if sh.AuxInt != 16 { 16676 break 16677 } 16678 x1 := sh.Args[0] 16679 if x1.Op != OpAMD64MOVWloadidx1 { 16680 break 16681 } 16682 i1 := x1.AuxInt 16683 if x1.Aux != s { 16684 break 16685 } 16686 if idx != x1.Args[0] { 16687 break 16688 } 16689 if p != x1.Args[1] { 16690 break 16691 } 16692 if mem != x1.Args[2] { 16693 break 16694 } 16695 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16696 break 16697 } 16698 b = mergePoint(b, x0, x1) 16699 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16700 v.reset(OpCopy) 16701 v.AddArg(v0) 16702 v0.AuxInt = i0 16703 v0.Aux = s 16704 v0.AddArg(p) 16705 v0.AddArg(idx) 16706 v0.AddArg(mem) 16707 return true 16708 } 16709 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 16710 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16711 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 16712 for { 16713 sh := v.Args[0] 16714 if sh.Op != OpAMD64SHLLconst { 16715 break 16716 } 16717 if sh.AuxInt != 16 { 16718 break 16719 } 16720 x1 := sh.Args[0] 16721 if x1.Op != OpAMD64MOVWloadidx1 { 16722 break 16723 } 16724 i1 := x1.AuxInt 16725 s := x1.Aux 16726 p := x1.Args[0] 16727 idx := x1.Args[1] 16728 mem := x1.Args[2] 16729 x0 := v.Args[1] 16730 if x0.Op != OpAMD64MOVWloadidx1 { 16731 break 16732 } 16733 i0 := x0.AuxInt 16734 if x0.Aux != s { 16735 break 16736 } 16737 if p != x0.Args[0] { 16738 break 16739 } 16740 if idx != x0.Args[1] { 16741 break 16742 } 16743 if mem != x0.Args[2] { 16744 break 16745 } 16746 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16747 break 16748 } 16749 b = mergePoint(b, x0, x1) 16750 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16751 v.reset(OpCopy) 16752 v.AddArg(v0) 16753 v0.AuxInt = i0 16754 v0.Aux = s 16755 v0.AddArg(p) 16756 v0.AddArg(idx) 16757 v0.AddArg(mem) 16758 return true 16759 } 16760 return false 16761 } 16762 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 16763 b := v.Block 16764 _ = b 16765 types := &b.Func.Config.Types 16766 _ = types 16767 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 16768 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16769 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 16770 for { 16771 sh := v.Args[0] 16772 if sh.Op != OpAMD64SHLLconst { 16773 break 16774 } 16775 if sh.AuxInt != 16 { 16776 break 16777 } 16778 x1 := sh.Args[0] 16779 if x1.Op != OpAMD64MOVWloadidx1 { 16780 break 16781 } 16782 i1 := x1.AuxInt 16783 s := x1.Aux 16784 idx := x1.Args[0] 16785 p := x1.Args[1] 16786 mem := x1.Args[2] 16787 x0 := v.Args[1] 16788 if x0.Op != OpAMD64MOVWloadidx1 { 16789 break 16790 } 16791 i0 := x0.AuxInt 16792 if x0.Aux != s { 16793 break 16794 } 16795 if p != x0.Args[0] { 16796 break 16797 } 16798 if idx != x0.Args[1] { 16799 break 16800 } 16801 if mem != x0.Args[2] { 16802 break 16803 } 16804 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16805 break 16806 } 16807 b = mergePoint(b, x0, x1) 16808 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16809 v.reset(OpCopy) 16810 v.AddArg(v0) 16811 v0.AuxInt = i0 16812 v0.Aux = s 16813 v0.AddArg(p) 16814 v0.AddArg(idx) 16815 v0.AddArg(mem) 16816 return true 16817 } 16818 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 16819 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16820 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 16821 for { 16822 sh := v.Args[0] 16823 if sh.Op != OpAMD64SHLLconst { 16824 break 16825 } 16826 if sh.AuxInt != 16 { 16827 break 16828 } 16829 x1 := sh.Args[0] 16830 if x1.Op != OpAMD64MOVWloadidx1 { 16831 break 16832 } 16833 i1 := x1.AuxInt 16834 s := x1.Aux 16835 p := x1.Args[0] 16836 idx := x1.Args[1] 16837 mem := x1.Args[2] 16838 x0 := v.Args[1] 16839 if x0.Op != OpAMD64MOVWloadidx1 { 16840 break 16841 } 16842 i0 := x0.AuxInt 16843 if x0.Aux != s { 16844 break 16845 } 16846 if idx != x0.Args[0] { 16847 break 16848 } 16849 if p != x0.Args[1] { 16850 break 16851 } 16852 if mem != x0.Args[2] { 16853 break 16854 } 16855 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16856 break 16857 } 16858 b = mergePoint(b, x0, x1) 16859 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16860 v.reset(OpCopy) 16861 v.AddArg(v0) 16862 v0.AuxInt = i0 16863 v0.Aux = s 16864 v0.AddArg(p) 16865 v0.AddArg(idx) 16866 v0.AddArg(mem) 16867 return true 16868 } 16869 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 16870 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16871 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 16872 for { 16873 sh := v.Args[0] 16874 if sh.Op != OpAMD64SHLLconst { 16875 break 16876 } 16877 if sh.AuxInt != 16 { 16878 break 16879 } 16880 x1 := sh.Args[0] 16881 if x1.Op != OpAMD64MOVWloadidx1 { 16882 break 16883 } 16884 i1 := x1.AuxInt 16885 s := x1.Aux 16886 idx := x1.Args[0] 16887 p := x1.Args[1] 16888 mem := x1.Args[2] 16889 x0 := v.Args[1] 16890 if x0.Op != OpAMD64MOVWloadidx1 { 16891 break 16892 } 16893 i0 := x0.AuxInt 16894 if x0.Aux != s { 16895 break 16896 } 16897 if idx != x0.Args[0] { 16898 break 16899 } 16900 if p != x0.Args[1] { 16901 break 16902 } 16903 if mem != x0.Args[2] { 16904 break 16905 } 16906 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16907 break 16908 } 16909 b = mergePoint(b, x0, x1) 16910 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 16911 v.reset(OpCopy) 16912 v.AddArg(v0) 16913 v0.AuxInt = i0 16914 v0.Aux = s 16915 v0.AddArg(p) 16916 v0.AddArg(idx) 16917 v0.AddArg(mem) 16918 return true 16919 } 16920 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 16921 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16922 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 16923 for { 16924 s1 := v.Args[0] 16925 if s1.Op != OpAMD64SHLLconst { 16926 break 16927 } 16928 j1 := s1.AuxInt 16929 x1 := s1.Args[0] 16930 if x1.Op != OpAMD64MOVBloadidx1 { 16931 break 16932 } 16933 i1 := x1.AuxInt 16934 s := x1.Aux 16935 p := x1.Args[0] 16936 idx := x1.Args[1] 16937 mem := x1.Args[2] 16938 or := v.Args[1] 16939 if or.Op != OpAMD64ORL { 16940 break 16941 } 16942 s0 := or.Args[0] 16943 if s0.Op != OpAMD64SHLLconst { 16944 break 16945 } 16946 j0 := s0.AuxInt 16947 x0 := s0.Args[0] 16948 if x0.Op != OpAMD64MOVBloadidx1 { 16949 break 16950 } 16951 i0 := x0.AuxInt 16952 if x0.Aux != s { 16953 break 16954 } 16955 if p != x0.Args[0] { 16956 break 16957 } 16958 if idx != x0.Args[1] { 16959 break 16960 } 16961 if mem != x0.Args[2] { 16962 break 16963 } 16964 y := or.Args[1] 16965 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16966 break 16967 } 16968 b = mergePoint(b, x0, x1) 16969 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16970 v.reset(OpCopy) 16971 v.AddArg(v0) 16972 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16973 v1.AuxInt = j0 16974 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 16975 v2.AuxInt = i0 16976 v2.Aux = s 16977 v2.AddArg(p) 16978 v2.AddArg(idx) 16979 v2.AddArg(mem) 16980 v1.AddArg(v2) 16981 v0.AddArg(v1) 16982 v0.AddArg(y) 16983 return true 16984 } 16985 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 16986 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16987 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 16988 for { 16989 s1 := v.Args[0] 16990 if s1.Op != OpAMD64SHLLconst { 16991 break 16992 } 16993 j1 := s1.AuxInt 16994 x1 := s1.Args[0] 16995 if x1.Op != OpAMD64MOVBloadidx1 { 16996 break 16997 } 16998 i1 := x1.AuxInt 16999 s := x1.Aux 17000 idx := x1.Args[0] 17001 p := x1.Args[1] 17002 mem := x1.Args[2] 17003 or := v.Args[1] 17004 if or.Op != OpAMD64ORL { 17005 break 17006 } 17007 s0 := or.Args[0] 17008 if s0.Op != OpAMD64SHLLconst { 17009 break 17010 } 17011 j0 := s0.AuxInt 17012 x0 := s0.Args[0] 17013 if x0.Op != OpAMD64MOVBloadidx1 { 17014 break 17015 } 17016 i0 := x0.AuxInt 17017 if x0.Aux != s { 17018 break 17019 } 17020 if p != x0.Args[0] { 17021 break 17022 } 17023 if idx != x0.Args[1] { 17024 break 17025 } 17026 if mem != x0.Args[2] { 17027 break 17028 } 17029 y := or.Args[1] 17030 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17031 break 17032 } 17033 b = mergePoint(b, x0, x1) 17034 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17035 v.reset(OpCopy) 17036 v.AddArg(v0) 17037 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17038 v1.AuxInt = j0 17039 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17040 v2.AuxInt = i0 17041 v2.Aux = s 17042 v2.AddArg(p) 17043 v2.AddArg(idx) 17044 v2.AddArg(mem) 17045 v1.AddArg(v2) 17046 v0.AddArg(v1) 17047 v0.AddArg(y) 17048 return true 17049 } 17050 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 17051 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17052 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17053 for { 17054 s1 := v.Args[0] 17055 if s1.Op != OpAMD64SHLLconst { 17056 break 17057 } 17058 j1 := s1.AuxInt 17059 x1 := s1.Args[0] 17060 if x1.Op != OpAMD64MOVBloadidx1 { 17061 break 17062 } 17063 i1 := x1.AuxInt 17064 s := x1.Aux 17065 p := x1.Args[0] 17066 idx := x1.Args[1] 17067 mem := x1.Args[2] 17068 or := v.Args[1] 17069 if or.Op != OpAMD64ORL { 17070 break 17071 } 17072 s0 := or.Args[0] 17073 if s0.Op != OpAMD64SHLLconst { 17074 break 17075 } 17076 j0 := s0.AuxInt 17077 x0 := s0.Args[0] 17078 if x0.Op != OpAMD64MOVBloadidx1 { 17079 break 17080 } 17081 i0 := x0.AuxInt 17082 if x0.Aux != s { 17083 break 17084 } 17085 if idx != x0.Args[0] { 17086 break 17087 } 17088 if p != x0.Args[1] { 17089 break 17090 } 17091 if mem != x0.Args[2] { 17092 break 17093 } 17094 y := or.Args[1] 17095 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17096 break 17097 } 17098 b = mergePoint(b, x0, x1) 17099 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17100 v.reset(OpCopy) 17101 v.AddArg(v0) 17102 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17103 v1.AuxInt = j0 17104 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17105 v2.AuxInt = i0 17106 v2.Aux = s 17107 v2.AddArg(p) 17108 v2.AddArg(idx) 17109 v2.AddArg(mem) 17110 v1.AddArg(v2) 17111 v0.AddArg(v1) 17112 v0.AddArg(y) 17113 return true 17114 } 17115 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 17116 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17117 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17118 for { 17119 s1 := v.Args[0] 17120 if s1.Op != OpAMD64SHLLconst { 17121 break 17122 } 17123 j1 := s1.AuxInt 17124 x1 := s1.Args[0] 17125 if x1.Op != OpAMD64MOVBloadidx1 { 17126 break 17127 } 17128 i1 := x1.AuxInt 17129 s := x1.Aux 17130 idx := x1.Args[0] 17131 p := x1.Args[1] 17132 mem := x1.Args[2] 17133 or := v.Args[1] 17134 if or.Op != OpAMD64ORL { 17135 break 17136 } 17137 s0 := or.Args[0] 17138 if s0.Op != OpAMD64SHLLconst { 17139 break 17140 } 17141 j0 := s0.AuxInt 17142 x0 := s0.Args[0] 17143 if x0.Op != OpAMD64MOVBloadidx1 { 17144 break 17145 } 17146 i0 := x0.AuxInt 17147 if x0.Aux != s { 17148 break 17149 } 17150 if idx != x0.Args[0] { 17151 break 17152 } 17153 if p != x0.Args[1] { 17154 break 17155 } 17156 if mem != x0.Args[2] { 17157 break 17158 } 17159 y := or.Args[1] 17160 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17161 break 17162 } 17163 b = mergePoint(b, x0, x1) 17164 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17165 v.reset(OpCopy) 17166 v.AddArg(v0) 17167 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17168 v1.AuxInt = j0 17169 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17170 v2.AuxInt = i0 17171 v2.Aux = s 17172 v2.AddArg(p) 17173 v2.AddArg(idx) 17174 v2.AddArg(mem) 17175 v1.AddArg(v2) 17176 v0.AddArg(v1) 17177 v0.AddArg(y) 17178 return true 17179 } 17180 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 17181 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17182 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17183 for { 17184 s1 := v.Args[0] 17185 if s1.Op != OpAMD64SHLLconst { 17186 break 17187 } 17188 j1 := s1.AuxInt 17189 x1 := s1.Args[0] 17190 if x1.Op != OpAMD64MOVBloadidx1 { 17191 break 17192 } 17193 i1 := x1.AuxInt 17194 s := x1.Aux 17195 p := x1.Args[0] 17196 idx := x1.Args[1] 17197 mem := x1.Args[2] 17198 or := v.Args[1] 17199 if or.Op != OpAMD64ORL { 17200 break 17201 } 17202 y := or.Args[0] 17203 s0 := or.Args[1] 17204 if s0.Op != OpAMD64SHLLconst { 17205 break 17206 } 17207 j0 := s0.AuxInt 17208 x0 := s0.Args[0] 17209 if x0.Op != OpAMD64MOVBloadidx1 { 17210 break 17211 } 17212 i0 := x0.AuxInt 17213 if x0.Aux != s { 17214 break 17215 } 17216 if p != x0.Args[0] { 17217 break 17218 } 17219 if idx != x0.Args[1] { 17220 break 17221 } 17222 if mem != x0.Args[2] { 17223 break 17224 } 17225 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17226 break 17227 } 17228 b = mergePoint(b, x0, x1) 17229 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17230 v.reset(OpCopy) 17231 v.AddArg(v0) 17232 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17233 v1.AuxInt = j0 17234 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17235 v2.AuxInt = i0 17236 v2.Aux = s 17237 v2.AddArg(p) 17238 v2.AddArg(idx) 17239 v2.AddArg(mem) 17240 v1.AddArg(v2) 17241 v0.AddArg(v1) 17242 v0.AddArg(y) 17243 return true 17244 } 17245 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 17246 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17247 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17248 for { 17249 s1 := v.Args[0] 17250 if s1.Op != OpAMD64SHLLconst { 17251 break 17252 } 17253 j1 := s1.AuxInt 17254 x1 := s1.Args[0] 17255 if x1.Op != OpAMD64MOVBloadidx1 { 17256 break 17257 } 17258 i1 := x1.AuxInt 17259 s := x1.Aux 17260 idx := x1.Args[0] 17261 p := x1.Args[1] 17262 mem := x1.Args[2] 17263 or := v.Args[1] 17264 if or.Op != OpAMD64ORL { 17265 break 17266 } 17267 y := or.Args[0] 17268 s0 := or.Args[1] 17269 if s0.Op != OpAMD64SHLLconst { 17270 break 17271 } 17272 j0 := s0.AuxInt 17273 x0 := s0.Args[0] 17274 if x0.Op != OpAMD64MOVBloadidx1 { 17275 break 17276 } 17277 i0 := x0.AuxInt 17278 if x0.Aux != s { 17279 break 17280 } 17281 if p != x0.Args[0] { 17282 break 17283 } 17284 if idx != x0.Args[1] { 17285 break 17286 } 17287 if mem != x0.Args[2] { 17288 break 17289 } 17290 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17291 break 17292 } 17293 b = mergePoint(b, x0, x1) 17294 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17295 v.reset(OpCopy) 17296 v.AddArg(v0) 17297 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17298 v1.AuxInt = j0 17299 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17300 v2.AuxInt = i0 17301 v2.Aux = s 17302 v2.AddArg(p) 17303 v2.AddArg(idx) 17304 v2.AddArg(mem) 17305 v1.AddArg(v2) 17306 v0.AddArg(v1) 17307 v0.AddArg(y) 17308 return true 17309 } 17310 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 17311 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17312 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17313 for { 17314 s1 := v.Args[0] 17315 if s1.Op != OpAMD64SHLLconst { 17316 break 17317 } 17318 j1 := s1.AuxInt 17319 x1 := s1.Args[0] 17320 if x1.Op != OpAMD64MOVBloadidx1 { 17321 break 17322 } 17323 i1 := x1.AuxInt 17324 s := x1.Aux 17325 p := x1.Args[0] 17326 idx := x1.Args[1] 17327 mem := x1.Args[2] 17328 or := v.Args[1] 17329 if or.Op != OpAMD64ORL { 17330 break 17331 } 17332 y := or.Args[0] 17333 s0 := or.Args[1] 17334 if s0.Op != OpAMD64SHLLconst { 17335 break 17336 } 17337 j0 := s0.AuxInt 17338 x0 := s0.Args[0] 17339 if x0.Op != OpAMD64MOVBloadidx1 { 17340 break 17341 } 17342 i0 := x0.AuxInt 17343 if x0.Aux != s { 17344 break 17345 } 17346 if idx != x0.Args[0] { 17347 break 17348 } 17349 if p != x0.Args[1] { 17350 break 17351 } 17352 if mem != x0.Args[2] { 17353 break 17354 } 17355 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17356 break 17357 } 17358 b = mergePoint(b, x0, x1) 17359 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17360 v.reset(OpCopy) 17361 v.AddArg(v0) 17362 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17363 v1.AuxInt = j0 17364 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17365 v2.AuxInt = i0 17366 v2.Aux = s 17367 v2.AddArg(p) 17368 v2.AddArg(idx) 17369 v2.AddArg(mem) 17370 v1.AddArg(v2) 17371 v0.AddArg(v1) 17372 v0.AddArg(y) 17373 return true 17374 } 17375 return false 17376 } 17377 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 17378 b := v.Block 17379 _ = b 17380 types := &b.Func.Config.Types 17381 _ = types 17382 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 17383 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17384 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17385 for { 17386 s1 := v.Args[0] 17387 if s1.Op != OpAMD64SHLLconst { 17388 break 17389 } 17390 j1 := s1.AuxInt 17391 x1 := s1.Args[0] 17392 if x1.Op != OpAMD64MOVBloadidx1 { 17393 break 17394 } 17395 i1 := x1.AuxInt 17396 s := x1.Aux 17397 idx := x1.Args[0] 17398 p := x1.Args[1] 17399 mem := x1.Args[2] 17400 or := v.Args[1] 17401 if or.Op != OpAMD64ORL { 17402 break 17403 } 17404 y := or.Args[0] 17405 s0 := or.Args[1] 17406 if s0.Op != OpAMD64SHLLconst { 17407 break 17408 } 17409 j0 := s0.AuxInt 17410 x0 := s0.Args[0] 17411 if x0.Op != OpAMD64MOVBloadidx1 { 17412 break 17413 } 17414 i0 := x0.AuxInt 17415 if x0.Aux != s { 17416 break 17417 } 17418 if idx != x0.Args[0] { 17419 break 17420 } 17421 if p != x0.Args[1] { 17422 break 17423 } 17424 if mem != x0.Args[2] { 17425 break 17426 } 17427 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17428 break 17429 } 17430 b = mergePoint(b, x0, x1) 17431 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17432 v.reset(OpCopy) 17433 v.AddArg(v0) 17434 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17435 v1.AuxInt = j0 17436 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17437 v2.AuxInt = i0 17438 v2.Aux = s 17439 v2.AddArg(p) 17440 v2.AddArg(idx) 17441 v2.AddArg(mem) 17442 v1.AddArg(v2) 17443 v0.AddArg(v1) 17444 v0.AddArg(y) 17445 return true 17446 } 17447 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 17448 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17449 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17450 for { 17451 or := v.Args[0] 17452 if or.Op != OpAMD64ORL { 17453 break 17454 } 17455 s0 := or.Args[0] 17456 if s0.Op != OpAMD64SHLLconst { 17457 break 17458 } 17459 j0 := s0.AuxInt 17460 x0 := s0.Args[0] 17461 if x0.Op != OpAMD64MOVBloadidx1 { 17462 break 17463 } 17464 i0 := x0.AuxInt 17465 s := x0.Aux 17466 p := x0.Args[0] 17467 idx := x0.Args[1] 17468 mem := x0.Args[2] 17469 y := or.Args[1] 17470 s1 := v.Args[1] 17471 if s1.Op != OpAMD64SHLLconst { 17472 break 17473 } 17474 j1 := s1.AuxInt 17475 x1 := s1.Args[0] 17476 if x1.Op != OpAMD64MOVBloadidx1 { 17477 break 17478 } 17479 i1 := x1.AuxInt 17480 if x1.Aux != s { 17481 break 17482 } 17483 if p != x1.Args[0] { 17484 break 17485 } 17486 if idx != x1.Args[1] { 17487 break 17488 } 17489 if mem != x1.Args[2] { 17490 break 17491 } 17492 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17493 break 17494 } 17495 b = mergePoint(b, x0, x1) 17496 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17497 v.reset(OpCopy) 17498 v.AddArg(v0) 17499 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17500 v1.AuxInt = j0 17501 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17502 v2.AuxInt = i0 17503 v2.Aux = s 17504 v2.AddArg(p) 17505 v2.AddArg(idx) 17506 v2.AddArg(mem) 17507 v1.AddArg(v2) 17508 v0.AddArg(v1) 17509 v0.AddArg(y) 17510 return true 17511 } 17512 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 17513 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17514 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17515 for { 17516 or := v.Args[0] 17517 if or.Op != OpAMD64ORL { 17518 break 17519 } 17520 s0 := or.Args[0] 17521 if s0.Op != OpAMD64SHLLconst { 17522 break 17523 } 17524 j0 := s0.AuxInt 17525 x0 := s0.Args[0] 17526 if x0.Op != OpAMD64MOVBloadidx1 { 17527 break 17528 } 17529 i0 := x0.AuxInt 17530 s := x0.Aux 17531 idx := x0.Args[0] 17532 p := x0.Args[1] 17533 mem := x0.Args[2] 17534 y := or.Args[1] 17535 s1 := v.Args[1] 17536 if s1.Op != OpAMD64SHLLconst { 17537 break 17538 } 17539 j1 := s1.AuxInt 17540 x1 := s1.Args[0] 17541 if x1.Op != OpAMD64MOVBloadidx1 { 17542 break 17543 } 17544 i1 := x1.AuxInt 17545 if x1.Aux != s { 17546 break 17547 } 17548 if p != x1.Args[0] { 17549 break 17550 } 17551 if idx != x1.Args[1] { 17552 break 17553 } 17554 if mem != x1.Args[2] { 17555 break 17556 } 17557 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17558 break 17559 } 17560 b = mergePoint(b, x0, x1) 17561 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17562 v.reset(OpCopy) 17563 v.AddArg(v0) 17564 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17565 v1.AuxInt = j0 17566 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17567 v2.AuxInt = i0 17568 v2.Aux = s 17569 v2.AddArg(p) 17570 v2.AddArg(idx) 17571 v2.AddArg(mem) 17572 v1.AddArg(v2) 17573 v0.AddArg(v1) 17574 v0.AddArg(y) 17575 return true 17576 } 17577 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 17578 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17579 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17580 for { 17581 or := v.Args[0] 17582 if or.Op != OpAMD64ORL { 17583 break 17584 } 17585 y := or.Args[0] 17586 s0 := or.Args[1] 17587 if s0.Op != OpAMD64SHLLconst { 17588 break 17589 } 17590 j0 := s0.AuxInt 17591 x0 := s0.Args[0] 17592 if x0.Op != OpAMD64MOVBloadidx1 { 17593 break 17594 } 17595 i0 := x0.AuxInt 17596 s := x0.Aux 17597 p := x0.Args[0] 17598 idx := x0.Args[1] 17599 mem := x0.Args[2] 17600 s1 := v.Args[1] 17601 if s1.Op != OpAMD64SHLLconst { 17602 break 17603 } 17604 j1 := s1.AuxInt 17605 x1 := s1.Args[0] 17606 if x1.Op != OpAMD64MOVBloadidx1 { 17607 break 17608 } 17609 i1 := x1.AuxInt 17610 if x1.Aux != s { 17611 break 17612 } 17613 if p != x1.Args[0] { 17614 break 17615 } 17616 if idx != x1.Args[1] { 17617 break 17618 } 17619 if mem != x1.Args[2] { 17620 break 17621 } 17622 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17623 break 17624 } 17625 b = mergePoint(b, x0, x1) 17626 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17627 v.reset(OpCopy) 17628 v.AddArg(v0) 17629 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17630 v1.AuxInt = j0 17631 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17632 v2.AuxInt = i0 17633 v2.Aux = s 17634 v2.AddArg(p) 17635 v2.AddArg(idx) 17636 v2.AddArg(mem) 17637 v1.AddArg(v2) 17638 v0.AddArg(v1) 17639 v0.AddArg(y) 17640 return true 17641 } 17642 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 17643 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17644 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17645 for { 17646 or := v.Args[0] 17647 if or.Op != OpAMD64ORL { 17648 break 17649 } 17650 y := or.Args[0] 17651 s0 := or.Args[1] 17652 if s0.Op != OpAMD64SHLLconst { 17653 break 17654 } 17655 j0 := s0.AuxInt 17656 x0 := s0.Args[0] 17657 if x0.Op != OpAMD64MOVBloadidx1 { 17658 break 17659 } 17660 i0 := x0.AuxInt 17661 s := x0.Aux 17662 idx := x0.Args[0] 17663 p := x0.Args[1] 17664 mem := x0.Args[2] 17665 s1 := v.Args[1] 17666 if s1.Op != OpAMD64SHLLconst { 17667 break 17668 } 17669 j1 := s1.AuxInt 17670 x1 := s1.Args[0] 17671 if x1.Op != OpAMD64MOVBloadidx1 { 17672 break 17673 } 17674 i1 := x1.AuxInt 17675 if x1.Aux != s { 17676 break 17677 } 17678 if p != x1.Args[0] { 17679 break 17680 } 17681 if idx != x1.Args[1] { 17682 break 17683 } 17684 if mem != x1.Args[2] { 17685 break 17686 } 17687 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17688 break 17689 } 17690 b = mergePoint(b, x0, x1) 17691 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17692 v.reset(OpCopy) 17693 v.AddArg(v0) 17694 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17695 v1.AuxInt = j0 17696 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17697 v2.AuxInt = i0 17698 v2.Aux = s 17699 v2.AddArg(p) 17700 v2.AddArg(idx) 17701 v2.AddArg(mem) 17702 v1.AddArg(v2) 17703 v0.AddArg(v1) 17704 v0.AddArg(y) 17705 return true 17706 } 17707 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 17708 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17709 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17710 for { 17711 or := v.Args[0] 17712 if or.Op != OpAMD64ORL { 17713 break 17714 } 17715 s0 := or.Args[0] 17716 if s0.Op != OpAMD64SHLLconst { 17717 break 17718 } 17719 j0 := s0.AuxInt 17720 x0 := s0.Args[0] 17721 if x0.Op != OpAMD64MOVBloadidx1 { 17722 break 17723 } 17724 i0 := x0.AuxInt 17725 s := x0.Aux 17726 p := x0.Args[0] 17727 idx := x0.Args[1] 17728 mem := x0.Args[2] 17729 y := or.Args[1] 17730 s1 := v.Args[1] 17731 if s1.Op != OpAMD64SHLLconst { 17732 break 17733 } 17734 j1 := s1.AuxInt 17735 x1 := s1.Args[0] 17736 if x1.Op != OpAMD64MOVBloadidx1 { 17737 break 17738 } 17739 i1 := x1.AuxInt 17740 if x1.Aux != s { 17741 break 17742 } 17743 if idx != x1.Args[0] { 17744 break 17745 } 17746 if p != x1.Args[1] { 17747 break 17748 } 17749 if mem != x1.Args[2] { 17750 break 17751 } 17752 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17753 break 17754 } 17755 b = mergePoint(b, x0, x1) 17756 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17757 v.reset(OpCopy) 17758 v.AddArg(v0) 17759 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17760 v1.AuxInt = j0 17761 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17762 v2.AuxInt = i0 17763 v2.Aux = s 17764 v2.AddArg(p) 17765 v2.AddArg(idx) 17766 v2.AddArg(mem) 17767 v1.AddArg(v2) 17768 v0.AddArg(v1) 17769 v0.AddArg(y) 17770 return true 17771 } 17772 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 17773 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17774 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17775 for { 17776 or := v.Args[0] 17777 if or.Op != OpAMD64ORL { 17778 break 17779 } 17780 s0 := or.Args[0] 17781 if s0.Op != OpAMD64SHLLconst { 17782 break 17783 } 17784 j0 := s0.AuxInt 17785 x0 := s0.Args[0] 17786 if x0.Op != OpAMD64MOVBloadidx1 { 17787 break 17788 } 17789 i0 := x0.AuxInt 17790 s := x0.Aux 17791 idx := x0.Args[0] 17792 p := x0.Args[1] 17793 mem := x0.Args[2] 17794 y := or.Args[1] 17795 s1 := v.Args[1] 17796 if s1.Op != OpAMD64SHLLconst { 17797 break 17798 } 17799 j1 := s1.AuxInt 17800 x1 := s1.Args[0] 17801 if x1.Op != OpAMD64MOVBloadidx1 { 17802 break 17803 } 17804 i1 := x1.AuxInt 17805 if x1.Aux != s { 17806 break 17807 } 17808 if idx != x1.Args[0] { 17809 break 17810 } 17811 if p != x1.Args[1] { 17812 break 17813 } 17814 if mem != x1.Args[2] { 17815 break 17816 } 17817 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17818 break 17819 } 17820 b = mergePoint(b, x0, x1) 17821 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17822 v.reset(OpCopy) 17823 v.AddArg(v0) 17824 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17825 v1.AuxInt = j0 17826 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17827 v2.AuxInt = i0 17828 v2.Aux = s 17829 v2.AddArg(p) 17830 v2.AddArg(idx) 17831 v2.AddArg(mem) 17832 v1.AddArg(v2) 17833 v0.AddArg(v1) 17834 v0.AddArg(y) 17835 return true 17836 } 17837 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 17838 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17839 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17840 for { 17841 or := v.Args[0] 17842 if or.Op != OpAMD64ORL { 17843 break 17844 } 17845 y := or.Args[0] 17846 s0 := or.Args[1] 17847 if s0.Op != OpAMD64SHLLconst { 17848 break 17849 } 17850 j0 := s0.AuxInt 17851 x0 := s0.Args[0] 17852 if x0.Op != OpAMD64MOVBloadidx1 { 17853 break 17854 } 17855 i0 := x0.AuxInt 17856 s := x0.Aux 17857 p := x0.Args[0] 17858 idx := x0.Args[1] 17859 mem := x0.Args[2] 17860 s1 := v.Args[1] 17861 if s1.Op != OpAMD64SHLLconst { 17862 break 17863 } 17864 j1 := s1.AuxInt 17865 x1 := s1.Args[0] 17866 if x1.Op != OpAMD64MOVBloadidx1 { 17867 break 17868 } 17869 i1 := x1.AuxInt 17870 if x1.Aux != s { 17871 break 17872 } 17873 if idx != x1.Args[0] { 17874 break 17875 } 17876 if p != x1.Args[1] { 17877 break 17878 } 17879 if mem != x1.Args[2] { 17880 break 17881 } 17882 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17883 break 17884 } 17885 b = mergePoint(b, x0, x1) 17886 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17887 v.reset(OpCopy) 17888 v.AddArg(v0) 17889 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17890 v1.AuxInt = j0 17891 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17892 v2.AuxInt = i0 17893 v2.Aux = s 17894 v2.AddArg(p) 17895 v2.AddArg(idx) 17896 v2.AddArg(mem) 17897 v1.AddArg(v2) 17898 v0.AddArg(v1) 17899 v0.AddArg(y) 17900 return true 17901 } 17902 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 17903 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17904 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17905 for { 17906 or := v.Args[0] 17907 if or.Op != OpAMD64ORL { 17908 break 17909 } 17910 y := or.Args[0] 17911 s0 := or.Args[1] 17912 if s0.Op != OpAMD64SHLLconst { 17913 break 17914 } 17915 j0 := s0.AuxInt 17916 x0 := s0.Args[0] 17917 if x0.Op != OpAMD64MOVBloadidx1 { 17918 break 17919 } 17920 i0 := x0.AuxInt 17921 s := x0.Aux 17922 idx := x0.Args[0] 17923 p := x0.Args[1] 17924 mem := x0.Args[2] 17925 s1 := v.Args[1] 17926 if s1.Op != OpAMD64SHLLconst { 17927 break 17928 } 17929 j1 := s1.AuxInt 17930 x1 := s1.Args[0] 17931 if x1.Op != OpAMD64MOVBloadidx1 { 17932 break 17933 } 17934 i1 := x1.AuxInt 17935 if x1.Aux != s { 17936 break 17937 } 17938 if idx != x1.Args[0] { 17939 break 17940 } 17941 if p != x1.Args[1] { 17942 break 17943 } 17944 if mem != x1.Args[2] { 17945 break 17946 } 17947 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17948 break 17949 } 17950 b = mergePoint(b, x0, x1) 17951 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17952 v.reset(OpCopy) 17953 v.AddArg(v0) 17954 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17955 v1.AuxInt = j0 17956 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 17957 v2.AuxInt = i0 17958 v2.Aux = s 17959 v2.AddArg(p) 17960 v2.AddArg(idx) 17961 v2.AddArg(mem) 17962 v1.AddArg(v2) 17963 v0.AddArg(v1) 17964 v0.AddArg(y) 17965 return true 17966 } 17967 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 17968 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17969 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 17970 for { 17971 x1 := v.Args[0] 17972 if x1.Op != OpAMD64MOVBload { 17973 break 17974 } 17975 i1 := x1.AuxInt 17976 s := x1.Aux 17977 p := x1.Args[0] 17978 mem := x1.Args[1] 17979 sh := v.Args[1] 17980 if sh.Op != OpAMD64SHLLconst { 17981 break 17982 } 17983 if sh.AuxInt != 8 { 17984 break 17985 } 17986 x0 := sh.Args[0] 17987 if x0.Op != OpAMD64MOVBload { 17988 break 17989 } 17990 i0 := x0.AuxInt 17991 if x0.Aux != s { 17992 break 17993 } 17994 if p != x0.Args[0] { 17995 break 17996 } 17997 if mem != x0.Args[1] { 17998 break 17999 } 18000 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18001 break 18002 } 18003 b = mergePoint(b, x0, x1) 18004 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18005 v.reset(OpCopy) 18006 v.AddArg(v0) 18007 v0.AuxInt = 8 18008 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 18009 v1.AuxInt = i0 18010 v1.Aux = s 18011 v1.AddArg(p) 18012 v1.AddArg(mem) 18013 v0.AddArg(v1) 18014 return true 18015 } 18016 return false 18017 } 18018 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 18019 b := v.Block 18020 _ = b 18021 types := &b.Func.Config.Types 18022 _ = types 18023 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 18024 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18025 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 18026 for { 18027 sh := v.Args[0] 18028 if sh.Op != OpAMD64SHLLconst { 18029 break 18030 } 18031 if sh.AuxInt != 8 { 18032 break 18033 } 18034 x0 := sh.Args[0] 18035 if x0.Op != OpAMD64MOVBload { 18036 break 18037 } 18038 i0 := x0.AuxInt 18039 s := x0.Aux 18040 p := x0.Args[0] 18041 mem := x0.Args[1] 18042 x1 := v.Args[1] 18043 if x1.Op != OpAMD64MOVBload { 18044 break 18045 } 18046 i1 := x1.AuxInt 18047 if x1.Aux != s { 18048 break 18049 } 18050 if p != x1.Args[0] { 18051 break 18052 } 18053 if mem != x1.Args[1] { 18054 break 18055 } 18056 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18057 break 18058 } 18059 b = mergePoint(b, x0, x1) 18060 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18061 v.reset(OpCopy) 18062 v.AddArg(v0) 18063 v0.AuxInt = 8 18064 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 18065 v1.AuxInt = i0 18066 v1.Aux = s 18067 v1.AddArg(p) 18068 v1.AddArg(mem) 18069 v0.AddArg(v1) 18070 return true 18071 } 18072 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 18073 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 18074 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 18075 for { 18076 r1 := v.Args[0] 18077 if r1.Op != OpAMD64ROLWconst { 18078 break 18079 } 18080 if r1.AuxInt != 8 { 18081 break 18082 } 18083 x1 := r1.Args[0] 18084 if x1.Op != OpAMD64MOVWload { 18085 break 18086 } 18087 i1 := x1.AuxInt 18088 s := x1.Aux 18089 p := x1.Args[0] 18090 mem := x1.Args[1] 18091 sh := v.Args[1] 18092 if sh.Op != OpAMD64SHLLconst { 18093 break 18094 } 18095 if sh.AuxInt != 16 { 18096 break 18097 } 18098 r0 := sh.Args[0] 18099 if r0.Op != OpAMD64ROLWconst { 18100 break 18101 } 18102 if r0.AuxInt != 8 { 18103 break 18104 } 18105 x0 := r0.Args[0] 18106 if x0.Op != OpAMD64MOVWload { 18107 break 18108 } 18109 i0 := x0.AuxInt 18110 if x0.Aux != s { 18111 break 18112 } 18113 if p != x0.Args[0] { 18114 break 18115 } 18116 if mem != x0.Args[1] { 18117 break 18118 } 18119 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 18120 break 18121 } 18122 b = mergePoint(b, x0, x1) 18123 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 18124 v.reset(OpCopy) 18125 v.AddArg(v0) 18126 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 18127 v1.AuxInt = i0 18128 v1.Aux = s 18129 v1.AddArg(p) 18130 v1.AddArg(mem) 18131 v0.AddArg(v1) 18132 return true 18133 } 18134 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 18135 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 18136 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 18137 for { 18138 sh := v.Args[0] 18139 if sh.Op != OpAMD64SHLLconst { 18140 break 18141 } 18142 if sh.AuxInt != 16 { 18143 break 18144 } 18145 r0 := sh.Args[0] 18146 if r0.Op != OpAMD64ROLWconst { 18147 break 18148 } 18149 if r0.AuxInt != 8 { 18150 break 18151 } 18152 x0 := r0.Args[0] 18153 if x0.Op != OpAMD64MOVWload { 18154 break 18155 } 18156 i0 := x0.AuxInt 18157 s := x0.Aux 18158 p := x0.Args[0] 18159 mem := x0.Args[1] 18160 r1 := v.Args[1] 18161 if r1.Op != OpAMD64ROLWconst { 18162 break 18163 } 18164 if r1.AuxInt != 8 { 18165 break 18166 } 18167 x1 := r1.Args[0] 18168 if x1.Op != OpAMD64MOVWload { 18169 break 18170 } 18171 i1 := x1.AuxInt 18172 if x1.Aux != s { 18173 break 18174 } 18175 if p != x1.Args[0] { 18176 break 18177 } 18178 if mem != x1.Args[1] { 18179 break 18180 } 18181 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 18182 break 18183 } 18184 b = mergePoint(b, x0, x1) 18185 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 18186 v.reset(OpCopy) 18187 v.AddArg(v0) 18188 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 18189 v1.AuxInt = i0 18190 v1.Aux = s 18191 v1.AddArg(p) 18192 v1.AddArg(mem) 18193 v0.AddArg(v1) 18194 return true 18195 } 18196 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 18197 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18198 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 18199 for { 18200 s0 := v.Args[0] 18201 if s0.Op != OpAMD64SHLLconst { 18202 break 18203 } 18204 j0 := s0.AuxInt 18205 x0 := s0.Args[0] 18206 if x0.Op != OpAMD64MOVBload { 18207 break 18208 } 18209 i0 := x0.AuxInt 18210 s := x0.Aux 18211 p := x0.Args[0] 18212 mem := x0.Args[1] 18213 or := v.Args[1] 18214 if or.Op != OpAMD64ORL { 18215 break 18216 } 18217 s1 := or.Args[0] 18218 if s1.Op != OpAMD64SHLLconst { 18219 break 18220 } 18221 j1 := s1.AuxInt 18222 x1 := s1.Args[0] 18223 if x1.Op != OpAMD64MOVBload { 18224 break 18225 } 18226 i1 := x1.AuxInt 18227 if x1.Aux != s { 18228 break 18229 } 18230 if p != x1.Args[0] { 18231 break 18232 } 18233 if mem != x1.Args[1] { 18234 break 18235 } 18236 y := or.Args[1] 18237 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18238 break 18239 } 18240 b = mergePoint(b, x0, x1) 18241 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18242 v.reset(OpCopy) 18243 v.AddArg(v0) 18244 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18245 v1.AuxInt = j1 18246 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 18247 v2.AuxInt = 8 18248 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 18249 v3.AuxInt = i0 18250 v3.Aux = s 18251 v3.AddArg(p) 18252 v3.AddArg(mem) 18253 v2.AddArg(v3) 18254 v1.AddArg(v2) 18255 v0.AddArg(v1) 18256 v0.AddArg(y) 18257 return true 18258 } 18259 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 18260 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18261 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 18262 for { 18263 s0 := v.Args[0] 18264 if s0.Op != OpAMD64SHLLconst { 18265 break 18266 } 18267 j0 := s0.AuxInt 18268 x0 := s0.Args[0] 18269 if x0.Op != OpAMD64MOVBload { 18270 break 18271 } 18272 i0 := x0.AuxInt 18273 s := x0.Aux 18274 p := x0.Args[0] 18275 mem := x0.Args[1] 18276 or := v.Args[1] 18277 if or.Op != OpAMD64ORL { 18278 break 18279 } 18280 y := or.Args[0] 18281 s1 := or.Args[1] 18282 if s1.Op != OpAMD64SHLLconst { 18283 break 18284 } 18285 j1 := s1.AuxInt 18286 x1 := s1.Args[0] 18287 if x1.Op != OpAMD64MOVBload { 18288 break 18289 } 18290 i1 := x1.AuxInt 18291 if x1.Aux != s { 18292 break 18293 } 18294 if p != x1.Args[0] { 18295 break 18296 } 18297 if mem != x1.Args[1] { 18298 break 18299 } 18300 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18301 break 18302 } 18303 b = mergePoint(b, x0, x1) 18304 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18305 v.reset(OpCopy) 18306 v.AddArg(v0) 18307 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18308 v1.AuxInt = j1 18309 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 18310 v2.AuxInt = 8 18311 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 18312 v3.AuxInt = i0 18313 v3.Aux = s 18314 v3.AddArg(p) 18315 v3.AddArg(mem) 18316 v2.AddArg(v3) 18317 v1.AddArg(v2) 18318 v0.AddArg(v1) 18319 v0.AddArg(y) 18320 return true 18321 } 18322 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 18323 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18324 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 18325 for { 18326 or := v.Args[0] 18327 if or.Op != OpAMD64ORL { 18328 break 18329 } 18330 s1 := or.Args[0] 18331 if s1.Op != OpAMD64SHLLconst { 18332 break 18333 } 18334 j1 := s1.AuxInt 18335 x1 := s1.Args[0] 18336 if x1.Op != OpAMD64MOVBload { 18337 break 18338 } 18339 i1 := x1.AuxInt 18340 s := x1.Aux 18341 p := x1.Args[0] 18342 mem := x1.Args[1] 18343 y := or.Args[1] 18344 s0 := v.Args[1] 18345 if s0.Op != OpAMD64SHLLconst { 18346 break 18347 } 18348 j0 := s0.AuxInt 18349 x0 := s0.Args[0] 18350 if x0.Op != OpAMD64MOVBload { 18351 break 18352 } 18353 i0 := x0.AuxInt 18354 if x0.Aux != s { 18355 break 18356 } 18357 if p != x0.Args[0] { 18358 break 18359 } 18360 if mem != x0.Args[1] { 18361 break 18362 } 18363 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18364 break 18365 } 18366 b = mergePoint(b, x0, x1) 18367 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18368 v.reset(OpCopy) 18369 v.AddArg(v0) 18370 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18371 v1.AuxInt = j1 18372 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 18373 v2.AuxInt = 8 18374 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 18375 v3.AuxInt = i0 18376 v3.Aux = s 18377 v3.AddArg(p) 18378 v3.AddArg(mem) 18379 v2.AddArg(v3) 18380 v1.AddArg(v2) 18381 v0.AddArg(v1) 18382 v0.AddArg(y) 18383 return true 18384 } 18385 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 18386 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18387 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 18388 for { 18389 or := v.Args[0] 18390 if or.Op != OpAMD64ORL { 18391 break 18392 } 18393 y := or.Args[0] 18394 s1 := or.Args[1] 18395 if s1.Op != OpAMD64SHLLconst { 18396 break 18397 } 18398 j1 := s1.AuxInt 18399 x1 := s1.Args[0] 18400 if x1.Op != OpAMD64MOVBload { 18401 break 18402 } 18403 i1 := x1.AuxInt 18404 s := x1.Aux 18405 p := x1.Args[0] 18406 mem := x1.Args[1] 18407 s0 := v.Args[1] 18408 if s0.Op != OpAMD64SHLLconst { 18409 break 18410 } 18411 j0 := s0.AuxInt 18412 x0 := s0.Args[0] 18413 if x0.Op != OpAMD64MOVBload { 18414 break 18415 } 18416 i0 := x0.AuxInt 18417 if x0.Aux != s { 18418 break 18419 } 18420 if p != x0.Args[0] { 18421 break 18422 } 18423 if mem != x0.Args[1] { 18424 break 18425 } 18426 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18427 break 18428 } 18429 b = mergePoint(b, x0, x1) 18430 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18431 v.reset(OpCopy) 18432 v.AddArg(v0) 18433 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18434 v1.AuxInt = j1 18435 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 18436 v2.AuxInt = 8 18437 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 18438 v3.AuxInt = i0 18439 v3.Aux = s 18440 v3.AddArg(p) 18441 v3.AddArg(mem) 18442 v2.AddArg(v3) 18443 v1.AddArg(v2) 18444 v0.AddArg(v1) 18445 v0.AddArg(y) 18446 return true 18447 } 18448 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 18449 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18450 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 18451 for { 18452 x1 := v.Args[0] 18453 if x1.Op != OpAMD64MOVBloadidx1 { 18454 break 18455 } 18456 i1 := x1.AuxInt 18457 s := x1.Aux 18458 p := x1.Args[0] 18459 idx := x1.Args[1] 18460 mem := x1.Args[2] 18461 sh := v.Args[1] 18462 if sh.Op != OpAMD64SHLLconst { 18463 break 18464 } 18465 if sh.AuxInt != 8 { 18466 break 18467 } 18468 x0 := sh.Args[0] 18469 if x0.Op != OpAMD64MOVBloadidx1 { 18470 break 18471 } 18472 i0 := x0.AuxInt 18473 if x0.Aux != s { 18474 break 18475 } 18476 if p != x0.Args[0] { 18477 break 18478 } 18479 if idx != x0.Args[1] { 18480 break 18481 } 18482 if mem != x0.Args[2] { 18483 break 18484 } 18485 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18486 break 18487 } 18488 b = mergePoint(b, x0, x1) 18489 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18490 v.reset(OpCopy) 18491 v.AddArg(v0) 18492 v0.AuxInt = 8 18493 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 18494 v1.AuxInt = i0 18495 v1.Aux = s 18496 v1.AddArg(p) 18497 v1.AddArg(idx) 18498 v1.AddArg(mem) 18499 v0.AddArg(v1) 18500 return true 18501 } 18502 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 18503 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18504 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 18505 for { 18506 x1 := v.Args[0] 18507 if x1.Op != OpAMD64MOVBloadidx1 { 18508 break 18509 } 18510 i1 := x1.AuxInt 18511 s := x1.Aux 18512 idx := x1.Args[0] 18513 p := x1.Args[1] 18514 mem := x1.Args[2] 18515 sh := v.Args[1] 18516 if sh.Op != OpAMD64SHLLconst { 18517 break 18518 } 18519 if sh.AuxInt != 8 { 18520 break 18521 } 18522 x0 := sh.Args[0] 18523 if x0.Op != OpAMD64MOVBloadidx1 { 18524 break 18525 } 18526 i0 := x0.AuxInt 18527 if x0.Aux != s { 18528 break 18529 } 18530 if p != x0.Args[0] { 18531 break 18532 } 18533 if idx != x0.Args[1] { 18534 break 18535 } 18536 if mem != x0.Args[2] { 18537 break 18538 } 18539 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18540 break 18541 } 18542 b = mergePoint(b, x0, x1) 18543 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18544 v.reset(OpCopy) 18545 v.AddArg(v0) 18546 v0.AuxInt = 8 18547 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 18548 v1.AuxInt = i0 18549 v1.Aux = s 18550 v1.AddArg(p) 18551 v1.AddArg(idx) 18552 v1.AddArg(mem) 18553 v0.AddArg(v1) 18554 return true 18555 } 18556 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 18557 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18558 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 18559 for { 18560 x1 := v.Args[0] 18561 if x1.Op != OpAMD64MOVBloadidx1 { 18562 break 18563 } 18564 i1 := x1.AuxInt 18565 s := x1.Aux 18566 p := x1.Args[0] 18567 idx := x1.Args[1] 18568 mem := x1.Args[2] 18569 sh := v.Args[1] 18570 if sh.Op != OpAMD64SHLLconst { 18571 break 18572 } 18573 if sh.AuxInt != 8 { 18574 break 18575 } 18576 x0 := sh.Args[0] 18577 if x0.Op != OpAMD64MOVBloadidx1 { 18578 break 18579 } 18580 i0 := x0.AuxInt 18581 if x0.Aux != s { 18582 break 18583 } 18584 if idx != x0.Args[0] { 18585 break 18586 } 18587 if p != x0.Args[1] { 18588 break 18589 } 18590 if mem != x0.Args[2] { 18591 break 18592 } 18593 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18594 break 18595 } 18596 b = mergePoint(b, x0, x1) 18597 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18598 v.reset(OpCopy) 18599 v.AddArg(v0) 18600 v0.AuxInt = 8 18601 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 18602 v1.AuxInt = i0 18603 v1.Aux = s 18604 v1.AddArg(p) 18605 v1.AddArg(idx) 18606 v1.AddArg(mem) 18607 v0.AddArg(v1) 18608 return true 18609 } 18610 return false 18611 } 18612 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 18613 b := v.Block 18614 _ = b 18615 types := &b.Func.Config.Types 18616 _ = types 18617 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 18618 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18619 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 18620 for { 18621 x1 := v.Args[0] 18622 if x1.Op != OpAMD64MOVBloadidx1 { 18623 break 18624 } 18625 i1 := x1.AuxInt 18626 s := x1.Aux 18627 idx := x1.Args[0] 18628 p := x1.Args[1] 18629 mem := x1.Args[2] 18630 sh := v.Args[1] 18631 if sh.Op != OpAMD64SHLLconst { 18632 break 18633 } 18634 if sh.AuxInt != 8 { 18635 break 18636 } 18637 x0 := sh.Args[0] 18638 if x0.Op != OpAMD64MOVBloadidx1 { 18639 break 18640 } 18641 i0 := x0.AuxInt 18642 if x0.Aux != s { 18643 break 18644 } 18645 if idx != x0.Args[0] { 18646 break 18647 } 18648 if p != x0.Args[1] { 18649 break 18650 } 18651 if mem != x0.Args[2] { 18652 break 18653 } 18654 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18655 break 18656 } 18657 b = mergePoint(b, x0, x1) 18658 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18659 v.reset(OpCopy) 18660 v.AddArg(v0) 18661 v0.AuxInt = 8 18662 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 18663 v1.AuxInt = i0 18664 v1.Aux = s 18665 v1.AddArg(p) 18666 v1.AddArg(idx) 18667 v1.AddArg(mem) 18668 v0.AddArg(v1) 18669 return true 18670 } 18671 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 18672 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18673 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 18674 for { 18675 sh := v.Args[0] 18676 if sh.Op != OpAMD64SHLLconst { 18677 break 18678 } 18679 if sh.AuxInt != 8 { 18680 break 18681 } 18682 x0 := sh.Args[0] 18683 if x0.Op != OpAMD64MOVBloadidx1 { 18684 break 18685 } 18686 i0 := x0.AuxInt 18687 s := x0.Aux 18688 p := x0.Args[0] 18689 idx := x0.Args[1] 18690 mem := x0.Args[2] 18691 x1 := v.Args[1] 18692 if x1.Op != OpAMD64MOVBloadidx1 { 18693 break 18694 } 18695 i1 := x1.AuxInt 18696 if x1.Aux != s { 18697 break 18698 } 18699 if p != x1.Args[0] { 18700 break 18701 } 18702 if idx != x1.Args[1] { 18703 break 18704 } 18705 if mem != x1.Args[2] { 18706 break 18707 } 18708 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18709 break 18710 } 18711 b = mergePoint(b, x0, x1) 18712 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18713 v.reset(OpCopy) 18714 v.AddArg(v0) 18715 v0.AuxInt = 8 18716 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 18717 v1.AuxInt = i0 18718 v1.Aux = s 18719 v1.AddArg(p) 18720 v1.AddArg(idx) 18721 v1.AddArg(mem) 18722 v0.AddArg(v1) 18723 return true 18724 } 18725 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 18726 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18727 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 18728 for { 18729 sh := v.Args[0] 18730 if sh.Op != OpAMD64SHLLconst { 18731 break 18732 } 18733 if sh.AuxInt != 8 { 18734 break 18735 } 18736 x0 := sh.Args[0] 18737 if x0.Op != OpAMD64MOVBloadidx1 { 18738 break 18739 } 18740 i0 := x0.AuxInt 18741 s := x0.Aux 18742 idx := x0.Args[0] 18743 p := x0.Args[1] 18744 mem := x0.Args[2] 18745 x1 := v.Args[1] 18746 if x1.Op != OpAMD64MOVBloadidx1 { 18747 break 18748 } 18749 i1 := x1.AuxInt 18750 if x1.Aux != s { 18751 break 18752 } 18753 if p != x1.Args[0] { 18754 break 18755 } 18756 if idx != x1.Args[1] { 18757 break 18758 } 18759 if mem != x1.Args[2] { 18760 break 18761 } 18762 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18763 break 18764 } 18765 b = mergePoint(b, x0, x1) 18766 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18767 v.reset(OpCopy) 18768 v.AddArg(v0) 18769 v0.AuxInt = 8 18770 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 18771 v1.AuxInt = i0 18772 v1.Aux = s 18773 v1.AddArg(p) 18774 v1.AddArg(idx) 18775 v1.AddArg(mem) 18776 v0.AddArg(v1) 18777 return true 18778 } 18779 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 18780 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18781 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 18782 for { 18783 sh := v.Args[0] 18784 if sh.Op != OpAMD64SHLLconst { 18785 break 18786 } 18787 if sh.AuxInt != 8 { 18788 break 18789 } 18790 x0 := sh.Args[0] 18791 if x0.Op != OpAMD64MOVBloadidx1 { 18792 break 18793 } 18794 i0 := x0.AuxInt 18795 s := x0.Aux 18796 p := x0.Args[0] 18797 idx := x0.Args[1] 18798 mem := x0.Args[2] 18799 x1 := v.Args[1] 18800 if x1.Op != OpAMD64MOVBloadidx1 { 18801 break 18802 } 18803 i1 := x1.AuxInt 18804 if x1.Aux != s { 18805 break 18806 } 18807 if idx != x1.Args[0] { 18808 break 18809 } 18810 if p != x1.Args[1] { 18811 break 18812 } 18813 if mem != x1.Args[2] { 18814 break 18815 } 18816 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18817 break 18818 } 18819 b = mergePoint(b, x0, x1) 18820 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18821 v.reset(OpCopy) 18822 v.AddArg(v0) 18823 v0.AuxInt = 8 18824 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 18825 v1.AuxInt = i0 18826 v1.Aux = s 18827 v1.AddArg(p) 18828 v1.AddArg(idx) 18829 v1.AddArg(mem) 18830 v0.AddArg(v1) 18831 return true 18832 } 18833 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 18834 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18835 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 18836 for { 18837 sh := v.Args[0] 18838 if sh.Op != OpAMD64SHLLconst { 18839 break 18840 } 18841 if sh.AuxInt != 8 { 18842 break 18843 } 18844 x0 := sh.Args[0] 18845 if x0.Op != OpAMD64MOVBloadidx1 { 18846 break 18847 } 18848 i0 := x0.AuxInt 18849 s := x0.Aux 18850 idx := x0.Args[0] 18851 p := x0.Args[1] 18852 mem := x0.Args[2] 18853 x1 := v.Args[1] 18854 if x1.Op != OpAMD64MOVBloadidx1 { 18855 break 18856 } 18857 i1 := x1.AuxInt 18858 if x1.Aux != s { 18859 break 18860 } 18861 if idx != x1.Args[0] { 18862 break 18863 } 18864 if p != x1.Args[1] { 18865 break 18866 } 18867 if mem != x1.Args[2] { 18868 break 18869 } 18870 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18871 break 18872 } 18873 b = mergePoint(b, x0, x1) 18874 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18875 v.reset(OpCopy) 18876 v.AddArg(v0) 18877 v0.AuxInt = 8 18878 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 18879 v1.AuxInt = i0 18880 v1.Aux = s 18881 v1.AddArg(p) 18882 v1.AddArg(idx) 18883 v1.AddArg(mem) 18884 v0.AddArg(v1) 18885 return true 18886 } 18887 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 18888 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 18889 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 18890 for { 18891 r1 := v.Args[0] 18892 if r1.Op != OpAMD64ROLWconst { 18893 break 18894 } 18895 if r1.AuxInt != 8 { 18896 break 18897 } 18898 x1 := r1.Args[0] 18899 if x1.Op != OpAMD64MOVWloadidx1 { 18900 break 18901 } 18902 i1 := x1.AuxInt 18903 s := x1.Aux 18904 p := x1.Args[0] 18905 idx := x1.Args[1] 18906 mem := x1.Args[2] 18907 sh := v.Args[1] 18908 if sh.Op != OpAMD64SHLLconst { 18909 break 18910 } 18911 if sh.AuxInt != 16 { 18912 break 18913 } 18914 r0 := sh.Args[0] 18915 if r0.Op != OpAMD64ROLWconst { 18916 break 18917 } 18918 if r0.AuxInt != 8 { 18919 break 18920 } 18921 x0 := r0.Args[0] 18922 if x0.Op != OpAMD64MOVWloadidx1 { 18923 break 18924 } 18925 i0 := x0.AuxInt 18926 if x0.Aux != s { 18927 break 18928 } 18929 if p != x0.Args[0] { 18930 break 18931 } 18932 if idx != x0.Args[1] { 18933 break 18934 } 18935 if mem != x0.Args[2] { 18936 break 18937 } 18938 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 18939 break 18940 } 18941 b = mergePoint(b, x0, x1) 18942 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 18943 v.reset(OpCopy) 18944 v.AddArg(v0) 18945 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 18946 v1.AuxInt = i0 18947 v1.Aux = s 18948 v1.AddArg(p) 18949 v1.AddArg(idx) 18950 v1.AddArg(mem) 18951 v0.AddArg(v1) 18952 return true 18953 } 18954 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 18955 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 18956 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 18957 for { 18958 r1 := v.Args[0] 18959 if r1.Op != OpAMD64ROLWconst { 18960 break 18961 } 18962 if r1.AuxInt != 8 { 18963 break 18964 } 18965 x1 := r1.Args[0] 18966 if x1.Op != OpAMD64MOVWloadidx1 { 18967 break 18968 } 18969 i1 := x1.AuxInt 18970 s := x1.Aux 18971 idx := x1.Args[0] 18972 p := x1.Args[1] 18973 mem := x1.Args[2] 18974 sh := v.Args[1] 18975 if sh.Op != OpAMD64SHLLconst { 18976 break 18977 } 18978 if sh.AuxInt != 16 { 18979 break 18980 } 18981 r0 := sh.Args[0] 18982 if r0.Op != OpAMD64ROLWconst { 18983 break 18984 } 18985 if r0.AuxInt != 8 { 18986 break 18987 } 18988 x0 := r0.Args[0] 18989 if x0.Op != OpAMD64MOVWloadidx1 { 18990 break 18991 } 18992 i0 := x0.AuxInt 18993 if x0.Aux != s { 18994 break 18995 } 18996 if p != x0.Args[0] { 18997 break 18998 } 18999 if idx != x0.Args[1] { 19000 break 19001 } 19002 if mem != x0.Args[2] { 19003 break 19004 } 19005 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19006 break 19007 } 19008 b = mergePoint(b, x0, x1) 19009 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19010 v.reset(OpCopy) 19011 v.AddArg(v0) 19012 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19013 v1.AuxInt = i0 19014 v1.Aux = s 19015 v1.AddArg(p) 19016 v1.AddArg(idx) 19017 v1.AddArg(mem) 19018 v0.AddArg(v1) 19019 return true 19020 } 19021 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 19022 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19023 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19024 for { 19025 r1 := v.Args[0] 19026 if r1.Op != OpAMD64ROLWconst { 19027 break 19028 } 19029 if r1.AuxInt != 8 { 19030 break 19031 } 19032 x1 := r1.Args[0] 19033 if x1.Op != OpAMD64MOVWloadidx1 { 19034 break 19035 } 19036 i1 := x1.AuxInt 19037 s := x1.Aux 19038 p := x1.Args[0] 19039 idx := x1.Args[1] 19040 mem := x1.Args[2] 19041 sh := v.Args[1] 19042 if sh.Op != OpAMD64SHLLconst { 19043 break 19044 } 19045 if sh.AuxInt != 16 { 19046 break 19047 } 19048 r0 := sh.Args[0] 19049 if r0.Op != OpAMD64ROLWconst { 19050 break 19051 } 19052 if r0.AuxInt != 8 { 19053 break 19054 } 19055 x0 := r0.Args[0] 19056 if x0.Op != OpAMD64MOVWloadidx1 { 19057 break 19058 } 19059 i0 := x0.AuxInt 19060 if x0.Aux != s { 19061 break 19062 } 19063 if idx != x0.Args[0] { 19064 break 19065 } 19066 if p != x0.Args[1] { 19067 break 19068 } 19069 if mem != x0.Args[2] { 19070 break 19071 } 19072 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19073 break 19074 } 19075 b = mergePoint(b, x0, x1) 19076 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19077 v.reset(OpCopy) 19078 v.AddArg(v0) 19079 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19080 v1.AuxInt = i0 19081 v1.Aux = s 19082 v1.AddArg(p) 19083 v1.AddArg(idx) 19084 v1.AddArg(mem) 19085 v0.AddArg(v1) 19086 return true 19087 } 19088 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 19089 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19090 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19091 for { 19092 r1 := v.Args[0] 19093 if r1.Op != OpAMD64ROLWconst { 19094 break 19095 } 19096 if r1.AuxInt != 8 { 19097 break 19098 } 19099 x1 := r1.Args[0] 19100 if x1.Op != OpAMD64MOVWloadidx1 { 19101 break 19102 } 19103 i1 := x1.AuxInt 19104 s := x1.Aux 19105 idx := x1.Args[0] 19106 p := x1.Args[1] 19107 mem := x1.Args[2] 19108 sh := v.Args[1] 19109 if sh.Op != OpAMD64SHLLconst { 19110 break 19111 } 19112 if sh.AuxInt != 16 { 19113 break 19114 } 19115 r0 := sh.Args[0] 19116 if r0.Op != OpAMD64ROLWconst { 19117 break 19118 } 19119 if r0.AuxInt != 8 { 19120 break 19121 } 19122 x0 := r0.Args[0] 19123 if x0.Op != OpAMD64MOVWloadidx1 { 19124 break 19125 } 19126 i0 := x0.AuxInt 19127 if x0.Aux != s { 19128 break 19129 } 19130 if idx != x0.Args[0] { 19131 break 19132 } 19133 if p != x0.Args[1] { 19134 break 19135 } 19136 if mem != x0.Args[2] { 19137 break 19138 } 19139 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19140 break 19141 } 19142 b = mergePoint(b, x0, x1) 19143 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19144 v.reset(OpCopy) 19145 v.AddArg(v0) 19146 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19147 v1.AuxInt = i0 19148 v1.Aux = s 19149 v1.AddArg(p) 19150 v1.AddArg(idx) 19151 v1.AddArg(mem) 19152 v0.AddArg(v1) 19153 return true 19154 } 19155 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 19156 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19157 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19158 for { 19159 sh := v.Args[0] 19160 if sh.Op != OpAMD64SHLLconst { 19161 break 19162 } 19163 if sh.AuxInt != 16 { 19164 break 19165 } 19166 r0 := sh.Args[0] 19167 if r0.Op != OpAMD64ROLWconst { 19168 break 19169 } 19170 if r0.AuxInt != 8 { 19171 break 19172 } 19173 x0 := r0.Args[0] 19174 if x0.Op != OpAMD64MOVWloadidx1 { 19175 break 19176 } 19177 i0 := x0.AuxInt 19178 s := x0.Aux 19179 p := x0.Args[0] 19180 idx := x0.Args[1] 19181 mem := x0.Args[2] 19182 r1 := v.Args[1] 19183 if r1.Op != OpAMD64ROLWconst { 19184 break 19185 } 19186 if r1.AuxInt != 8 { 19187 break 19188 } 19189 x1 := r1.Args[0] 19190 if x1.Op != OpAMD64MOVWloadidx1 { 19191 break 19192 } 19193 i1 := x1.AuxInt 19194 if x1.Aux != s { 19195 break 19196 } 19197 if p != x1.Args[0] { 19198 break 19199 } 19200 if idx != x1.Args[1] { 19201 break 19202 } 19203 if mem != x1.Args[2] { 19204 break 19205 } 19206 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19207 break 19208 } 19209 b = mergePoint(b, x0, x1) 19210 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19211 v.reset(OpCopy) 19212 v.AddArg(v0) 19213 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19214 v1.AuxInt = i0 19215 v1.Aux = s 19216 v1.AddArg(p) 19217 v1.AddArg(idx) 19218 v1.AddArg(mem) 19219 v0.AddArg(v1) 19220 return true 19221 } 19222 return false 19223 } 19224 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 19225 b := v.Block 19226 _ = b 19227 types := &b.Func.Config.Types 19228 _ = types 19229 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 19230 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19231 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19232 for { 19233 sh := v.Args[0] 19234 if sh.Op != OpAMD64SHLLconst { 19235 break 19236 } 19237 if sh.AuxInt != 16 { 19238 break 19239 } 19240 r0 := sh.Args[0] 19241 if r0.Op != OpAMD64ROLWconst { 19242 break 19243 } 19244 if r0.AuxInt != 8 { 19245 break 19246 } 19247 x0 := r0.Args[0] 19248 if x0.Op != OpAMD64MOVWloadidx1 { 19249 break 19250 } 19251 i0 := x0.AuxInt 19252 s := x0.Aux 19253 idx := x0.Args[0] 19254 p := x0.Args[1] 19255 mem := x0.Args[2] 19256 r1 := v.Args[1] 19257 if r1.Op != OpAMD64ROLWconst { 19258 break 19259 } 19260 if r1.AuxInt != 8 { 19261 break 19262 } 19263 x1 := r1.Args[0] 19264 if x1.Op != OpAMD64MOVWloadidx1 { 19265 break 19266 } 19267 i1 := x1.AuxInt 19268 if x1.Aux != s { 19269 break 19270 } 19271 if p != x1.Args[0] { 19272 break 19273 } 19274 if idx != x1.Args[1] { 19275 break 19276 } 19277 if mem != x1.Args[2] { 19278 break 19279 } 19280 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19281 break 19282 } 19283 b = mergePoint(b, x0, x1) 19284 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19285 v.reset(OpCopy) 19286 v.AddArg(v0) 19287 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19288 v1.AuxInt = i0 19289 v1.Aux = s 19290 v1.AddArg(p) 19291 v1.AddArg(idx) 19292 v1.AddArg(mem) 19293 v0.AddArg(v1) 19294 return true 19295 } 19296 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 19297 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19298 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19299 for { 19300 sh := v.Args[0] 19301 if sh.Op != OpAMD64SHLLconst { 19302 break 19303 } 19304 if sh.AuxInt != 16 { 19305 break 19306 } 19307 r0 := sh.Args[0] 19308 if r0.Op != OpAMD64ROLWconst { 19309 break 19310 } 19311 if r0.AuxInt != 8 { 19312 break 19313 } 19314 x0 := r0.Args[0] 19315 if x0.Op != OpAMD64MOVWloadidx1 { 19316 break 19317 } 19318 i0 := x0.AuxInt 19319 s := x0.Aux 19320 p := x0.Args[0] 19321 idx := x0.Args[1] 19322 mem := x0.Args[2] 19323 r1 := v.Args[1] 19324 if r1.Op != OpAMD64ROLWconst { 19325 break 19326 } 19327 if r1.AuxInt != 8 { 19328 break 19329 } 19330 x1 := r1.Args[0] 19331 if x1.Op != OpAMD64MOVWloadidx1 { 19332 break 19333 } 19334 i1 := x1.AuxInt 19335 if x1.Aux != s { 19336 break 19337 } 19338 if idx != x1.Args[0] { 19339 break 19340 } 19341 if p != x1.Args[1] { 19342 break 19343 } 19344 if mem != x1.Args[2] { 19345 break 19346 } 19347 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19348 break 19349 } 19350 b = mergePoint(b, x0, x1) 19351 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19352 v.reset(OpCopy) 19353 v.AddArg(v0) 19354 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19355 v1.AuxInt = i0 19356 v1.Aux = s 19357 v1.AddArg(p) 19358 v1.AddArg(idx) 19359 v1.AddArg(mem) 19360 v0.AddArg(v1) 19361 return true 19362 } 19363 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 19364 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19365 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19366 for { 19367 sh := v.Args[0] 19368 if sh.Op != OpAMD64SHLLconst { 19369 break 19370 } 19371 if sh.AuxInt != 16 { 19372 break 19373 } 19374 r0 := sh.Args[0] 19375 if r0.Op != OpAMD64ROLWconst { 19376 break 19377 } 19378 if r0.AuxInt != 8 { 19379 break 19380 } 19381 x0 := r0.Args[0] 19382 if x0.Op != OpAMD64MOVWloadidx1 { 19383 break 19384 } 19385 i0 := x0.AuxInt 19386 s := x0.Aux 19387 idx := x0.Args[0] 19388 p := x0.Args[1] 19389 mem := x0.Args[2] 19390 r1 := v.Args[1] 19391 if r1.Op != OpAMD64ROLWconst { 19392 break 19393 } 19394 if r1.AuxInt != 8 { 19395 break 19396 } 19397 x1 := r1.Args[0] 19398 if x1.Op != OpAMD64MOVWloadidx1 { 19399 break 19400 } 19401 i1 := x1.AuxInt 19402 if x1.Aux != s { 19403 break 19404 } 19405 if idx != x1.Args[0] { 19406 break 19407 } 19408 if p != x1.Args[1] { 19409 break 19410 } 19411 if mem != x1.Args[2] { 19412 break 19413 } 19414 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19415 break 19416 } 19417 b = mergePoint(b, x0, x1) 19418 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19419 v.reset(OpCopy) 19420 v.AddArg(v0) 19421 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 19422 v1.AuxInt = i0 19423 v1.Aux = s 19424 v1.AddArg(p) 19425 v1.AddArg(idx) 19426 v1.AddArg(mem) 19427 v0.AddArg(v1) 19428 return true 19429 } 19430 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 19431 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19432 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 19433 for { 19434 s0 := v.Args[0] 19435 if s0.Op != OpAMD64SHLLconst { 19436 break 19437 } 19438 j0 := s0.AuxInt 19439 x0 := s0.Args[0] 19440 if x0.Op != OpAMD64MOVBloadidx1 { 19441 break 19442 } 19443 i0 := x0.AuxInt 19444 s := x0.Aux 19445 p := x0.Args[0] 19446 idx := x0.Args[1] 19447 mem := x0.Args[2] 19448 or := v.Args[1] 19449 if or.Op != OpAMD64ORL { 19450 break 19451 } 19452 s1 := or.Args[0] 19453 if s1.Op != OpAMD64SHLLconst { 19454 break 19455 } 19456 j1 := s1.AuxInt 19457 x1 := s1.Args[0] 19458 if x1.Op != OpAMD64MOVBloadidx1 { 19459 break 19460 } 19461 i1 := x1.AuxInt 19462 if x1.Aux != s { 19463 break 19464 } 19465 if p != x1.Args[0] { 19466 break 19467 } 19468 if idx != x1.Args[1] { 19469 break 19470 } 19471 if mem != x1.Args[2] { 19472 break 19473 } 19474 y := or.Args[1] 19475 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19476 break 19477 } 19478 b = mergePoint(b, x0, x1) 19479 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19480 v.reset(OpCopy) 19481 v.AddArg(v0) 19482 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19483 v1.AuxInt = j1 19484 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 19485 v2.AuxInt = 8 19486 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19487 v3.AuxInt = i0 19488 v3.Aux = s 19489 v3.AddArg(p) 19490 v3.AddArg(idx) 19491 v3.AddArg(mem) 19492 v2.AddArg(v3) 19493 v1.AddArg(v2) 19494 v0.AddArg(v1) 19495 v0.AddArg(y) 19496 return true 19497 } 19498 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 19499 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19500 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 19501 for { 19502 s0 := v.Args[0] 19503 if s0.Op != OpAMD64SHLLconst { 19504 break 19505 } 19506 j0 := s0.AuxInt 19507 x0 := s0.Args[0] 19508 if x0.Op != OpAMD64MOVBloadidx1 { 19509 break 19510 } 19511 i0 := x0.AuxInt 19512 s := x0.Aux 19513 idx := x0.Args[0] 19514 p := x0.Args[1] 19515 mem := x0.Args[2] 19516 or := v.Args[1] 19517 if or.Op != OpAMD64ORL { 19518 break 19519 } 19520 s1 := or.Args[0] 19521 if s1.Op != OpAMD64SHLLconst { 19522 break 19523 } 19524 j1 := s1.AuxInt 19525 x1 := s1.Args[0] 19526 if x1.Op != OpAMD64MOVBloadidx1 { 19527 break 19528 } 19529 i1 := x1.AuxInt 19530 if x1.Aux != s { 19531 break 19532 } 19533 if p != x1.Args[0] { 19534 break 19535 } 19536 if idx != x1.Args[1] { 19537 break 19538 } 19539 if mem != x1.Args[2] { 19540 break 19541 } 19542 y := or.Args[1] 19543 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19544 break 19545 } 19546 b = mergePoint(b, x0, x1) 19547 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19548 v.reset(OpCopy) 19549 v.AddArg(v0) 19550 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19551 v1.AuxInt = j1 19552 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 19553 v2.AuxInt = 8 19554 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19555 v3.AuxInt = i0 19556 v3.Aux = s 19557 v3.AddArg(p) 19558 v3.AddArg(idx) 19559 v3.AddArg(mem) 19560 v2.AddArg(v3) 19561 v1.AddArg(v2) 19562 v0.AddArg(v1) 19563 v0.AddArg(y) 19564 return true 19565 } 19566 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 19567 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19568 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 19569 for { 19570 s0 := v.Args[0] 19571 if s0.Op != OpAMD64SHLLconst { 19572 break 19573 } 19574 j0 := s0.AuxInt 19575 x0 := s0.Args[0] 19576 if x0.Op != OpAMD64MOVBloadidx1 { 19577 break 19578 } 19579 i0 := x0.AuxInt 19580 s := x0.Aux 19581 p := x0.Args[0] 19582 idx := x0.Args[1] 19583 mem := x0.Args[2] 19584 or := v.Args[1] 19585 if or.Op != OpAMD64ORL { 19586 break 19587 } 19588 s1 := or.Args[0] 19589 if s1.Op != OpAMD64SHLLconst { 19590 break 19591 } 19592 j1 := s1.AuxInt 19593 x1 := s1.Args[0] 19594 if x1.Op != OpAMD64MOVBloadidx1 { 19595 break 19596 } 19597 i1 := x1.AuxInt 19598 if x1.Aux != s { 19599 break 19600 } 19601 if idx != x1.Args[0] { 19602 break 19603 } 19604 if p != x1.Args[1] { 19605 break 19606 } 19607 if mem != x1.Args[2] { 19608 break 19609 } 19610 y := or.Args[1] 19611 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19612 break 19613 } 19614 b = mergePoint(b, x0, x1) 19615 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19616 v.reset(OpCopy) 19617 v.AddArg(v0) 19618 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19619 v1.AuxInt = j1 19620 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 19621 v2.AuxInt = 8 19622 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19623 v3.AuxInt = i0 19624 v3.Aux = s 19625 v3.AddArg(p) 19626 v3.AddArg(idx) 19627 v3.AddArg(mem) 19628 v2.AddArg(v3) 19629 v1.AddArg(v2) 19630 v0.AddArg(v1) 19631 v0.AddArg(y) 19632 return true 19633 } 19634 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 19635 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19636 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 19637 for { 19638 s0 := v.Args[0] 19639 if s0.Op != OpAMD64SHLLconst { 19640 break 19641 } 19642 j0 := s0.AuxInt 19643 x0 := s0.Args[0] 19644 if x0.Op != OpAMD64MOVBloadidx1 { 19645 break 19646 } 19647 i0 := x0.AuxInt 19648 s := x0.Aux 19649 idx := x0.Args[0] 19650 p := x0.Args[1] 19651 mem := x0.Args[2] 19652 or := v.Args[1] 19653 if or.Op != OpAMD64ORL { 19654 break 19655 } 19656 s1 := or.Args[0] 19657 if s1.Op != OpAMD64SHLLconst { 19658 break 19659 } 19660 j1 := s1.AuxInt 19661 x1 := s1.Args[0] 19662 if x1.Op != OpAMD64MOVBloadidx1 { 19663 break 19664 } 19665 i1 := x1.AuxInt 19666 if x1.Aux != s { 19667 break 19668 } 19669 if idx != x1.Args[0] { 19670 break 19671 } 19672 if p != x1.Args[1] { 19673 break 19674 } 19675 if mem != x1.Args[2] { 19676 break 19677 } 19678 y := or.Args[1] 19679 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19680 break 19681 } 19682 b = mergePoint(b, x0, x1) 19683 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19684 v.reset(OpCopy) 19685 v.AddArg(v0) 19686 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19687 v1.AuxInt = j1 19688 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 19689 v2.AuxInt = 8 19690 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19691 v3.AuxInt = i0 19692 v3.Aux = s 19693 v3.AddArg(p) 19694 v3.AddArg(idx) 19695 v3.AddArg(mem) 19696 v2.AddArg(v3) 19697 v1.AddArg(v2) 19698 v0.AddArg(v1) 19699 v0.AddArg(y) 19700 return true 19701 } 19702 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 19703 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19704 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 19705 for { 19706 s0 := v.Args[0] 19707 if s0.Op != OpAMD64SHLLconst { 19708 break 19709 } 19710 j0 := s0.AuxInt 19711 x0 := s0.Args[0] 19712 if x0.Op != OpAMD64MOVBloadidx1 { 19713 break 19714 } 19715 i0 := x0.AuxInt 19716 s := x0.Aux 19717 p := x0.Args[0] 19718 idx := x0.Args[1] 19719 mem := x0.Args[2] 19720 or := v.Args[1] 19721 if or.Op != OpAMD64ORL { 19722 break 19723 } 19724 y := or.Args[0] 19725 s1 := or.Args[1] 19726 if s1.Op != OpAMD64SHLLconst { 19727 break 19728 } 19729 j1 := s1.AuxInt 19730 x1 := s1.Args[0] 19731 if x1.Op != OpAMD64MOVBloadidx1 { 19732 break 19733 } 19734 i1 := x1.AuxInt 19735 if x1.Aux != s { 19736 break 19737 } 19738 if p != x1.Args[0] { 19739 break 19740 } 19741 if idx != x1.Args[1] { 19742 break 19743 } 19744 if mem != x1.Args[2] { 19745 break 19746 } 19747 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19748 break 19749 } 19750 b = mergePoint(b, x0, x1) 19751 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19752 v.reset(OpCopy) 19753 v.AddArg(v0) 19754 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19755 v1.AuxInt = j1 19756 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 19757 v2.AuxInt = 8 19758 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19759 v3.AuxInt = i0 19760 v3.Aux = s 19761 v3.AddArg(p) 19762 v3.AddArg(idx) 19763 v3.AddArg(mem) 19764 v2.AddArg(v3) 19765 v1.AddArg(v2) 19766 v0.AddArg(v1) 19767 v0.AddArg(y) 19768 return true 19769 } 19770 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 19771 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19772 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 19773 for { 19774 s0 := v.Args[0] 19775 if s0.Op != OpAMD64SHLLconst { 19776 break 19777 } 19778 j0 := s0.AuxInt 19779 x0 := s0.Args[0] 19780 if x0.Op != OpAMD64MOVBloadidx1 { 19781 break 19782 } 19783 i0 := x0.AuxInt 19784 s := x0.Aux 19785 idx := x0.Args[0] 19786 p := x0.Args[1] 19787 mem := x0.Args[2] 19788 or := v.Args[1] 19789 if or.Op != OpAMD64ORL { 19790 break 19791 } 19792 y := or.Args[0] 19793 s1 := or.Args[1] 19794 if s1.Op != OpAMD64SHLLconst { 19795 break 19796 } 19797 j1 := s1.AuxInt 19798 x1 := s1.Args[0] 19799 if x1.Op != OpAMD64MOVBloadidx1 { 19800 break 19801 } 19802 i1 := x1.AuxInt 19803 if x1.Aux != s { 19804 break 19805 } 19806 if p != x1.Args[0] { 19807 break 19808 } 19809 if idx != x1.Args[1] { 19810 break 19811 } 19812 if mem != x1.Args[2] { 19813 break 19814 } 19815 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19816 break 19817 } 19818 b = mergePoint(b, x0, x1) 19819 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19820 v.reset(OpCopy) 19821 v.AddArg(v0) 19822 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19823 v1.AuxInt = j1 19824 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 19825 v2.AuxInt = 8 19826 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19827 v3.AuxInt = i0 19828 v3.Aux = s 19829 v3.AddArg(p) 19830 v3.AddArg(idx) 19831 v3.AddArg(mem) 19832 v2.AddArg(v3) 19833 v1.AddArg(v2) 19834 v0.AddArg(v1) 19835 v0.AddArg(y) 19836 return true 19837 } 19838 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 19839 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19840 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 19841 for { 19842 s0 := v.Args[0] 19843 if s0.Op != OpAMD64SHLLconst { 19844 break 19845 } 19846 j0 := s0.AuxInt 19847 x0 := s0.Args[0] 19848 if x0.Op != OpAMD64MOVBloadidx1 { 19849 break 19850 } 19851 i0 := x0.AuxInt 19852 s := x0.Aux 19853 p := x0.Args[0] 19854 idx := x0.Args[1] 19855 mem := x0.Args[2] 19856 or := v.Args[1] 19857 if or.Op != OpAMD64ORL { 19858 break 19859 } 19860 y := or.Args[0] 19861 s1 := or.Args[1] 19862 if s1.Op != OpAMD64SHLLconst { 19863 break 19864 } 19865 j1 := s1.AuxInt 19866 x1 := s1.Args[0] 19867 if x1.Op != OpAMD64MOVBloadidx1 { 19868 break 19869 } 19870 i1 := x1.AuxInt 19871 if x1.Aux != s { 19872 break 19873 } 19874 if idx != x1.Args[0] { 19875 break 19876 } 19877 if p != x1.Args[1] { 19878 break 19879 } 19880 if mem != x1.Args[2] { 19881 break 19882 } 19883 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19884 break 19885 } 19886 b = mergePoint(b, x0, x1) 19887 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19888 v.reset(OpCopy) 19889 v.AddArg(v0) 19890 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19891 v1.AuxInt = j1 19892 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 19893 v2.AuxInt = 8 19894 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19895 v3.AuxInt = i0 19896 v3.Aux = s 19897 v3.AddArg(p) 19898 v3.AddArg(idx) 19899 v3.AddArg(mem) 19900 v2.AddArg(v3) 19901 v1.AddArg(v2) 19902 v0.AddArg(v1) 19903 v0.AddArg(y) 19904 return true 19905 } 19906 return false 19907 } 19908 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 19909 b := v.Block 19910 _ = b 19911 types := &b.Func.Config.Types 19912 _ = types 19913 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 19914 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19915 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 19916 for { 19917 s0 := v.Args[0] 19918 if s0.Op != OpAMD64SHLLconst { 19919 break 19920 } 19921 j0 := s0.AuxInt 19922 x0 := s0.Args[0] 19923 if x0.Op != OpAMD64MOVBloadidx1 { 19924 break 19925 } 19926 i0 := x0.AuxInt 19927 s := x0.Aux 19928 idx := x0.Args[0] 19929 p := x0.Args[1] 19930 mem := x0.Args[2] 19931 or := v.Args[1] 19932 if or.Op != OpAMD64ORL { 19933 break 19934 } 19935 y := or.Args[0] 19936 s1 := or.Args[1] 19937 if s1.Op != OpAMD64SHLLconst { 19938 break 19939 } 19940 j1 := s1.AuxInt 19941 x1 := s1.Args[0] 19942 if x1.Op != OpAMD64MOVBloadidx1 { 19943 break 19944 } 19945 i1 := x1.AuxInt 19946 if x1.Aux != s { 19947 break 19948 } 19949 if idx != x1.Args[0] { 19950 break 19951 } 19952 if p != x1.Args[1] { 19953 break 19954 } 19955 if mem != x1.Args[2] { 19956 break 19957 } 19958 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19959 break 19960 } 19961 b = mergePoint(b, x0, x1) 19962 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19963 v.reset(OpCopy) 19964 v.AddArg(v0) 19965 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19966 v1.AuxInt = j1 19967 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 19968 v2.AuxInt = 8 19969 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 19970 v3.AuxInt = i0 19971 v3.Aux = s 19972 v3.AddArg(p) 19973 v3.AddArg(idx) 19974 v3.AddArg(mem) 19975 v2.AddArg(v3) 19976 v1.AddArg(v2) 19977 v0.AddArg(v1) 19978 v0.AddArg(y) 19979 return true 19980 } 19981 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 19982 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19983 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 19984 for { 19985 or := v.Args[0] 19986 if or.Op != OpAMD64ORL { 19987 break 19988 } 19989 s1 := or.Args[0] 19990 if s1.Op != OpAMD64SHLLconst { 19991 break 19992 } 19993 j1 := s1.AuxInt 19994 x1 := s1.Args[0] 19995 if x1.Op != OpAMD64MOVBloadidx1 { 19996 break 19997 } 19998 i1 := x1.AuxInt 19999 s := x1.Aux 20000 p := x1.Args[0] 20001 idx := x1.Args[1] 20002 mem := x1.Args[2] 20003 y := or.Args[1] 20004 s0 := v.Args[1] 20005 if s0.Op != OpAMD64SHLLconst { 20006 break 20007 } 20008 j0 := s0.AuxInt 20009 x0 := s0.Args[0] 20010 if x0.Op != OpAMD64MOVBloadidx1 { 20011 break 20012 } 20013 i0 := x0.AuxInt 20014 if x0.Aux != s { 20015 break 20016 } 20017 if p != x0.Args[0] { 20018 break 20019 } 20020 if idx != x0.Args[1] { 20021 break 20022 } 20023 if mem != x0.Args[2] { 20024 break 20025 } 20026 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20027 break 20028 } 20029 b = mergePoint(b, x0, x1) 20030 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20031 v.reset(OpCopy) 20032 v.AddArg(v0) 20033 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20034 v1.AuxInt = j1 20035 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 20036 v2.AuxInt = 8 20037 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20038 v3.AuxInt = i0 20039 v3.Aux = s 20040 v3.AddArg(p) 20041 v3.AddArg(idx) 20042 v3.AddArg(mem) 20043 v2.AddArg(v3) 20044 v1.AddArg(v2) 20045 v0.AddArg(v1) 20046 v0.AddArg(y) 20047 return true 20048 } 20049 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 20050 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20051 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20052 for { 20053 or := v.Args[0] 20054 if or.Op != OpAMD64ORL { 20055 break 20056 } 20057 s1 := or.Args[0] 20058 if s1.Op != OpAMD64SHLLconst { 20059 break 20060 } 20061 j1 := s1.AuxInt 20062 x1 := s1.Args[0] 20063 if x1.Op != OpAMD64MOVBloadidx1 { 20064 break 20065 } 20066 i1 := x1.AuxInt 20067 s := x1.Aux 20068 idx := x1.Args[0] 20069 p := x1.Args[1] 20070 mem := x1.Args[2] 20071 y := or.Args[1] 20072 s0 := v.Args[1] 20073 if s0.Op != OpAMD64SHLLconst { 20074 break 20075 } 20076 j0 := s0.AuxInt 20077 x0 := s0.Args[0] 20078 if x0.Op != OpAMD64MOVBloadidx1 { 20079 break 20080 } 20081 i0 := x0.AuxInt 20082 if x0.Aux != s { 20083 break 20084 } 20085 if p != x0.Args[0] { 20086 break 20087 } 20088 if idx != x0.Args[1] { 20089 break 20090 } 20091 if mem != x0.Args[2] { 20092 break 20093 } 20094 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20095 break 20096 } 20097 b = mergePoint(b, x0, x1) 20098 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20099 v.reset(OpCopy) 20100 v.AddArg(v0) 20101 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20102 v1.AuxInt = j1 20103 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 20104 v2.AuxInt = 8 20105 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20106 v3.AuxInt = i0 20107 v3.Aux = s 20108 v3.AddArg(p) 20109 v3.AddArg(idx) 20110 v3.AddArg(mem) 20111 v2.AddArg(v3) 20112 v1.AddArg(v2) 20113 v0.AddArg(v1) 20114 v0.AddArg(y) 20115 return true 20116 } 20117 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 20118 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20119 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20120 for { 20121 or := v.Args[0] 20122 if or.Op != OpAMD64ORL { 20123 break 20124 } 20125 y := or.Args[0] 20126 s1 := or.Args[1] 20127 if s1.Op != OpAMD64SHLLconst { 20128 break 20129 } 20130 j1 := s1.AuxInt 20131 x1 := s1.Args[0] 20132 if x1.Op != OpAMD64MOVBloadidx1 { 20133 break 20134 } 20135 i1 := x1.AuxInt 20136 s := x1.Aux 20137 p := x1.Args[0] 20138 idx := x1.Args[1] 20139 mem := x1.Args[2] 20140 s0 := v.Args[1] 20141 if s0.Op != OpAMD64SHLLconst { 20142 break 20143 } 20144 j0 := s0.AuxInt 20145 x0 := s0.Args[0] 20146 if x0.Op != OpAMD64MOVBloadidx1 { 20147 break 20148 } 20149 i0 := x0.AuxInt 20150 if x0.Aux != s { 20151 break 20152 } 20153 if p != x0.Args[0] { 20154 break 20155 } 20156 if idx != x0.Args[1] { 20157 break 20158 } 20159 if mem != x0.Args[2] { 20160 break 20161 } 20162 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20163 break 20164 } 20165 b = mergePoint(b, x0, x1) 20166 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20167 v.reset(OpCopy) 20168 v.AddArg(v0) 20169 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20170 v1.AuxInt = j1 20171 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 20172 v2.AuxInt = 8 20173 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20174 v3.AuxInt = i0 20175 v3.Aux = s 20176 v3.AddArg(p) 20177 v3.AddArg(idx) 20178 v3.AddArg(mem) 20179 v2.AddArg(v3) 20180 v1.AddArg(v2) 20181 v0.AddArg(v1) 20182 v0.AddArg(y) 20183 return true 20184 } 20185 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 20186 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20187 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20188 for { 20189 or := v.Args[0] 20190 if or.Op != OpAMD64ORL { 20191 break 20192 } 20193 y := or.Args[0] 20194 s1 := or.Args[1] 20195 if s1.Op != OpAMD64SHLLconst { 20196 break 20197 } 20198 j1 := s1.AuxInt 20199 x1 := s1.Args[0] 20200 if x1.Op != OpAMD64MOVBloadidx1 { 20201 break 20202 } 20203 i1 := x1.AuxInt 20204 s := x1.Aux 20205 idx := x1.Args[0] 20206 p := x1.Args[1] 20207 mem := x1.Args[2] 20208 s0 := v.Args[1] 20209 if s0.Op != OpAMD64SHLLconst { 20210 break 20211 } 20212 j0 := s0.AuxInt 20213 x0 := s0.Args[0] 20214 if x0.Op != OpAMD64MOVBloadidx1 { 20215 break 20216 } 20217 i0 := x0.AuxInt 20218 if x0.Aux != s { 20219 break 20220 } 20221 if p != x0.Args[0] { 20222 break 20223 } 20224 if idx != x0.Args[1] { 20225 break 20226 } 20227 if mem != x0.Args[2] { 20228 break 20229 } 20230 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20231 break 20232 } 20233 b = mergePoint(b, x0, x1) 20234 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20235 v.reset(OpCopy) 20236 v.AddArg(v0) 20237 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20238 v1.AuxInt = j1 20239 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 20240 v2.AuxInt = 8 20241 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20242 v3.AuxInt = i0 20243 v3.Aux = s 20244 v3.AddArg(p) 20245 v3.AddArg(idx) 20246 v3.AddArg(mem) 20247 v2.AddArg(v3) 20248 v1.AddArg(v2) 20249 v0.AddArg(v1) 20250 v0.AddArg(y) 20251 return true 20252 } 20253 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 20254 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20255 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20256 for { 20257 or := v.Args[0] 20258 if or.Op != OpAMD64ORL { 20259 break 20260 } 20261 s1 := or.Args[0] 20262 if s1.Op != OpAMD64SHLLconst { 20263 break 20264 } 20265 j1 := s1.AuxInt 20266 x1 := s1.Args[0] 20267 if x1.Op != OpAMD64MOVBloadidx1 { 20268 break 20269 } 20270 i1 := x1.AuxInt 20271 s := x1.Aux 20272 p := x1.Args[0] 20273 idx := x1.Args[1] 20274 mem := x1.Args[2] 20275 y := or.Args[1] 20276 s0 := v.Args[1] 20277 if s0.Op != OpAMD64SHLLconst { 20278 break 20279 } 20280 j0 := s0.AuxInt 20281 x0 := s0.Args[0] 20282 if x0.Op != OpAMD64MOVBloadidx1 { 20283 break 20284 } 20285 i0 := x0.AuxInt 20286 if x0.Aux != s { 20287 break 20288 } 20289 if idx != x0.Args[0] { 20290 break 20291 } 20292 if p != x0.Args[1] { 20293 break 20294 } 20295 if mem != x0.Args[2] { 20296 break 20297 } 20298 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20299 break 20300 } 20301 b = mergePoint(b, x0, x1) 20302 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20303 v.reset(OpCopy) 20304 v.AddArg(v0) 20305 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20306 v1.AuxInt = j1 20307 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 20308 v2.AuxInt = 8 20309 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20310 v3.AuxInt = i0 20311 v3.Aux = s 20312 v3.AddArg(p) 20313 v3.AddArg(idx) 20314 v3.AddArg(mem) 20315 v2.AddArg(v3) 20316 v1.AddArg(v2) 20317 v0.AddArg(v1) 20318 v0.AddArg(y) 20319 return true 20320 } 20321 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 20322 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20323 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20324 for { 20325 or := v.Args[0] 20326 if or.Op != OpAMD64ORL { 20327 break 20328 } 20329 s1 := or.Args[0] 20330 if s1.Op != OpAMD64SHLLconst { 20331 break 20332 } 20333 j1 := s1.AuxInt 20334 x1 := s1.Args[0] 20335 if x1.Op != OpAMD64MOVBloadidx1 { 20336 break 20337 } 20338 i1 := x1.AuxInt 20339 s := x1.Aux 20340 idx := x1.Args[0] 20341 p := x1.Args[1] 20342 mem := x1.Args[2] 20343 y := or.Args[1] 20344 s0 := v.Args[1] 20345 if s0.Op != OpAMD64SHLLconst { 20346 break 20347 } 20348 j0 := s0.AuxInt 20349 x0 := s0.Args[0] 20350 if x0.Op != OpAMD64MOVBloadidx1 { 20351 break 20352 } 20353 i0 := x0.AuxInt 20354 if x0.Aux != s { 20355 break 20356 } 20357 if idx != x0.Args[0] { 20358 break 20359 } 20360 if p != x0.Args[1] { 20361 break 20362 } 20363 if mem != x0.Args[2] { 20364 break 20365 } 20366 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20367 break 20368 } 20369 b = mergePoint(b, x0, x1) 20370 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20371 v.reset(OpCopy) 20372 v.AddArg(v0) 20373 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20374 v1.AuxInt = j1 20375 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 20376 v2.AuxInt = 8 20377 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20378 v3.AuxInt = i0 20379 v3.Aux = s 20380 v3.AddArg(p) 20381 v3.AddArg(idx) 20382 v3.AddArg(mem) 20383 v2.AddArg(v3) 20384 v1.AddArg(v2) 20385 v0.AddArg(v1) 20386 v0.AddArg(y) 20387 return true 20388 } 20389 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 20390 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20391 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20392 for { 20393 or := v.Args[0] 20394 if or.Op != OpAMD64ORL { 20395 break 20396 } 20397 y := or.Args[0] 20398 s1 := or.Args[1] 20399 if s1.Op != OpAMD64SHLLconst { 20400 break 20401 } 20402 j1 := s1.AuxInt 20403 x1 := s1.Args[0] 20404 if x1.Op != OpAMD64MOVBloadidx1 { 20405 break 20406 } 20407 i1 := x1.AuxInt 20408 s := x1.Aux 20409 p := x1.Args[0] 20410 idx := x1.Args[1] 20411 mem := x1.Args[2] 20412 s0 := v.Args[1] 20413 if s0.Op != OpAMD64SHLLconst { 20414 break 20415 } 20416 j0 := s0.AuxInt 20417 x0 := s0.Args[0] 20418 if x0.Op != OpAMD64MOVBloadidx1 { 20419 break 20420 } 20421 i0 := x0.AuxInt 20422 if x0.Aux != s { 20423 break 20424 } 20425 if idx != x0.Args[0] { 20426 break 20427 } 20428 if p != x0.Args[1] { 20429 break 20430 } 20431 if mem != x0.Args[2] { 20432 break 20433 } 20434 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20435 break 20436 } 20437 b = mergePoint(b, x0, x1) 20438 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20439 v.reset(OpCopy) 20440 v.AddArg(v0) 20441 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20442 v1.AuxInt = j1 20443 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 20444 v2.AuxInt = 8 20445 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20446 v3.AuxInt = i0 20447 v3.Aux = s 20448 v3.AddArg(p) 20449 v3.AddArg(idx) 20450 v3.AddArg(mem) 20451 v2.AddArg(v3) 20452 v1.AddArg(v2) 20453 v0.AddArg(v1) 20454 v0.AddArg(y) 20455 return true 20456 } 20457 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 20458 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20459 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20460 for { 20461 or := v.Args[0] 20462 if or.Op != OpAMD64ORL { 20463 break 20464 } 20465 y := or.Args[0] 20466 s1 := or.Args[1] 20467 if s1.Op != OpAMD64SHLLconst { 20468 break 20469 } 20470 j1 := s1.AuxInt 20471 x1 := s1.Args[0] 20472 if x1.Op != OpAMD64MOVBloadidx1 { 20473 break 20474 } 20475 i1 := x1.AuxInt 20476 s := x1.Aux 20477 idx := x1.Args[0] 20478 p := x1.Args[1] 20479 mem := x1.Args[2] 20480 s0 := v.Args[1] 20481 if s0.Op != OpAMD64SHLLconst { 20482 break 20483 } 20484 j0 := s0.AuxInt 20485 x0 := s0.Args[0] 20486 if x0.Op != OpAMD64MOVBloadidx1 { 20487 break 20488 } 20489 i0 := x0.AuxInt 20490 if x0.Aux != s { 20491 break 20492 } 20493 if idx != x0.Args[0] { 20494 break 20495 } 20496 if p != x0.Args[1] { 20497 break 20498 } 20499 if mem != x0.Args[2] { 20500 break 20501 } 20502 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20503 break 20504 } 20505 b = mergePoint(b, x0, x1) 20506 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20507 v.reset(OpCopy) 20508 v.AddArg(v0) 20509 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20510 v1.AuxInt = j1 20511 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 20512 v2.AuxInt = 8 20513 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 20514 v3.AuxInt = i0 20515 v3.Aux = s 20516 v3.AddArg(p) 20517 v3.AddArg(idx) 20518 v3.AddArg(mem) 20519 v2.AddArg(v3) 20520 v1.AddArg(v2) 20521 v0.AddArg(v1) 20522 v0.AddArg(y) 20523 return true 20524 } 20525 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 20526 // cond: canMergeLoad(v, l, x) && clobber(l) 20527 // result: (ORLmem x [off] {sym} ptr mem) 20528 for { 20529 x := v.Args[0] 20530 l := v.Args[1] 20531 if l.Op != OpAMD64MOVLload { 20532 break 20533 } 20534 off := l.AuxInt 20535 sym := l.Aux 20536 ptr := l.Args[0] 20537 mem := l.Args[1] 20538 if !(canMergeLoad(v, l, x) && clobber(l)) { 20539 break 20540 } 20541 v.reset(OpAMD64ORLmem) 20542 v.AuxInt = off 20543 v.Aux = sym 20544 v.AddArg(x) 20545 v.AddArg(ptr) 20546 v.AddArg(mem) 20547 return true 20548 } 20549 return false 20550 } 20551 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 20552 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 20553 // cond: canMergeLoad(v, l, x) && clobber(l) 20554 // result: (ORLmem x [off] {sym} ptr mem) 20555 for { 20556 l := v.Args[0] 20557 if l.Op != OpAMD64MOVLload { 20558 break 20559 } 20560 off := l.AuxInt 20561 sym := l.Aux 20562 ptr := l.Args[0] 20563 mem := l.Args[1] 20564 x := v.Args[1] 20565 if !(canMergeLoad(v, l, x) && clobber(l)) { 20566 break 20567 } 20568 v.reset(OpAMD64ORLmem) 20569 v.AuxInt = off 20570 v.Aux = sym 20571 v.AddArg(x) 20572 v.AddArg(ptr) 20573 v.AddArg(mem) 20574 return true 20575 } 20576 return false 20577 } 20578 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 20579 // match: (ORLconst [c] x) 20580 // cond: int32(c)==0 20581 // result: x 20582 for { 20583 c := v.AuxInt 20584 x := v.Args[0] 20585 if !(int32(c) == 0) { 20586 break 20587 } 20588 v.reset(OpCopy) 20589 v.Type = x.Type 20590 v.AddArg(x) 20591 return true 20592 } 20593 // match: (ORLconst [c] _) 20594 // cond: int32(c)==-1 20595 // result: (MOVLconst [-1]) 20596 for { 20597 c := v.AuxInt 20598 if !(int32(c) == -1) { 20599 break 20600 } 20601 v.reset(OpAMD64MOVLconst) 20602 v.AuxInt = -1 20603 return true 20604 } 20605 // match: (ORLconst [c] (MOVLconst [d])) 20606 // cond: 20607 // result: (MOVLconst [c|d]) 20608 for { 20609 c := v.AuxInt 20610 v_0 := v.Args[0] 20611 if v_0.Op != OpAMD64MOVLconst { 20612 break 20613 } 20614 d := v_0.AuxInt 20615 v.reset(OpAMD64MOVLconst) 20616 v.AuxInt = c | d 20617 return true 20618 } 20619 return false 20620 } 20621 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 20622 // match: (ORQ x (MOVQconst [c])) 20623 // cond: is32Bit(c) 20624 // result: (ORQconst [c] x) 20625 for { 20626 x := v.Args[0] 20627 v_1 := v.Args[1] 20628 if v_1.Op != OpAMD64MOVQconst { 20629 break 20630 } 20631 c := v_1.AuxInt 20632 if !(is32Bit(c)) { 20633 break 20634 } 20635 v.reset(OpAMD64ORQconst) 20636 v.AuxInt = c 20637 v.AddArg(x) 20638 return true 20639 } 20640 // match: (ORQ (MOVQconst [c]) x) 20641 // cond: is32Bit(c) 20642 // result: (ORQconst [c] x) 20643 for { 20644 v_0 := v.Args[0] 20645 if v_0.Op != OpAMD64MOVQconst { 20646 break 20647 } 20648 c := v_0.AuxInt 20649 x := v.Args[1] 20650 if !(is32Bit(c)) { 20651 break 20652 } 20653 v.reset(OpAMD64ORQconst) 20654 v.AuxInt = c 20655 v.AddArg(x) 20656 return true 20657 } 20658 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 20659 // cond: d==64-c 20660 // result: (ROLQconst x [c]) 20661 for { 20662 v_0 := v.Args[0] 20663 if v_0.Op != OpAMD64SHLQconst { 20664 break 20665 } 20666 c := v_0.AuxInt 20667 x := v_0.Args[0] 20668 v_1 := v.Args[1] 20669 if v_1.Op != OpAMD64SHRQconst { 20670 break 20671 } 20672 d := v_1.AuxInt 20673 if x != v_1.Args[0] { 20674 break 20675 } 20676 if !(d == 64-c) { 20677 break 20678 } 20679 v.reset(OpAMD64ROLQconst) 20680 v.AuxInt = c 20681 v.AddArg(x) 20682 return true 20683 } 20684 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 20685 // cond: d==64-c 20686 // result: (ROLQconst x [c]) 20687 for { 20688 v_0 := v.Args[0] 20689 if v_0.Op != OpAMD64SHRQconst { 20690 break 20691 } 20692 d := v_0.AuxInt 20693 x := v_0.Args[0] 20694 v_1 := v.Args[1] 20695 if v_1.Op != OpAMD64SHLQconst { 20696 break 20697 } 20698 c := v_1.AuxInt 20699 if x != v_1.Args[0] { 20700 break 20701 } 20702 if !(d == 64-c) { 20703 break 20704 } 20705 v.reset(OpAMD64ROLQconst) 20706 v.AuxInt = c 20707 v.AddArg(x) 20708 return true 20709 } 20710 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 20711 // cond: 20712 // result: (ROLQ x y) 20713 for { 20714 v_0 := v.Args[0] 20715 if v_0.Op != OpAMD64SHLQ { 20716 break 20717 } 20718 x := v_0.Args[0] 20719 y := v_0.Args[1] 20720 v_1 := v.Args[1] 20721 if v_1.Op != OpAMD64ANDQ { 20722 break 20723 } 20724 v_1_0 := v_1.Args[0] 20725 if v_1_0.Op != OpAMD64SHRQ { 20726 break 20727 } 20728 if x != v_1_0.Args[0] { 20729 break 20730 } 20731 v_1_0_1 := v_1_0.Args[1] 20732 if v_1_0_1.Op != OpAMD64NEGQ { 20733 break 20734 } 20735 if y != v_1_0_1.Args[0] { 20736 break 20737 } 20738 v_1_1 := v_1.Args[1] 20739 if v_1_1.Op != OpAMD64SBBQcarrymask { 20740 break 20741 } 20742 v_1_1_0 := v_1_1.Args[0] 20743 if v_1_1_0.Op != OpAMD64CMPQconst { 20744 break 20745 } 20746 if v_1_1_0.AuxInt != 64 { 20747 break 20748 } 20749 v_1_1_0_0 := v_1_1_0.Args[0] 20750 if v_1_1_0_0.Op != OpAMD64NEGQ { 20751 break 20752 } 20753 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 20754 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 20755 break 20756 } 20757 if v_1_1_0_0_0.AuxInt != -64 { 20758 break 20759 } 20760 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 20761 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 20762 break 20763 } 20764 if v_1_1_0_0_0_0.AuxInt != 63 { 20765 break 20766 } 20767 if y != v_1_1_0_0_0_0.Args[0] { 20768 break 20769 } 20770 v.reset(OpAMD64ROLQ) 20771 v.AddArg(x) 20772 v.AddArg(y) 20773 return true 20774 } 20775 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 20776 // cond: 20777 // result: (ROLQ x y) 20778 for { 20779 v_0 := v.Args[0] 20780 if v_0.Op != OpAMD64SHLQ { 20781 break 20782 } 20783 x := v_0.Args[0] 20784 y := v_0.Args[1] 20785 v_1 := v.Args[1] 20786 if v_1.Op != OpAMD64ANDQ { 20787 break 20788 } 20789 v_1_0 := v_1.Args[0] 20790 if v_1_0.Op != OpAMD64SBBQcarrymask { 20791 break 20792 } 20793 v_1_0_0 := v_1_0.Args[0] 20794 if v_1_0_0.Op != OpAMD64CMPQconst { 20795 break 20796 } 20797 if v_1_0_0.AuxInt != 64 { 20798 break 20799 } 20800 v_1_0_0_0 := v_1_0_0.Args[0] 20801 if v_1_0_0_0.Op != OpAMD64NEGQ { 20802 break 20803 } 20804 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 20805 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 20806 break 20807 } 20808 if v_1_0_0_0_0.AuxInt != -64 { 20809 break 20810 } 20811 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 20812 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 20813 break 20814 } 20815 if v_1_0_0_0_0_0.AuxInt != 63 { 20816 break 20817 } 20818 if y != v_1_0_0_0_0_0.Args[0] { 20819 break 20820 } 20821 v_1_1 := v_1.Args[1] 20822 if v_1_1.Op != OpAMD64SHRQ { 20823 break 20824 } 20825 if x != v_1_1.Args[0] { 20826 break 20827 } 20828 v_1_1_1 := v_1_1.Args[1] 20829 if v_1_1_1.Op != OpAMD64NEGQ { 20830 break 20831 } 20832 if y != v_1_1_1.Args[0] { 20833 break 20834 } 20835 v.reset(OpAMD64ROLQ) 20836 v.AddArg(x) 20837 v.AddArg(y) 20838 return true 20839 } 20840 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 20841 // cond: 20842 // result: (ROLQ x y) 20843 for { 20844 v_0 := v.Args[0] 20845 if v_0.Op != OpAMD64ANDQ { 20846 break 20847 } 20848 v_0_0 := v_0.Args[0] 20849 if v_0_0.Op != OpAMD64SHRQ { 20850 break 20851 } 20852 x := v_0_0.Args[0] 20853 v_0_0_1 := v_0_0.Args[1] 20854 if v_0_0_1.Op != OpAMD64NEGQ { 20855 break 20856 } 20857 y := v_0_0_1.Args[0] 20858 v_0_1 := v_0.Args[1] 20859 if v_0_1.Op != OpAMD64SBBQcarrymask { 20860 break 20861 } 20862 v_0_1_0 := v_0_1.Args[0] 20863 if v_0_1_0.Op != OpAMD64CMPQconst { 20864 break 20865 } 20866 if v_0_1_0.AuxInt != 64 { 20867 break 20868 } 20869 v_0_1_0_0 := v_0_1_0.Args[0] 20870 if v_0_1_0_0.Op != OpAMD64NEGQ { 20871 break 20872 } 20873 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 20874 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 20875 break 20876 } 20877 if v_0_1_0_0_0.AuxInt != -64 { 20878 break 20879 } 20880 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 20881 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 20882 break 20883 } 20884 if v_0_1_0_0_0_0.AuxInt != 63 { 20885 break 20886 } 20887 if y != v_0_1_0_0_0_0.Args[0] { 20888 break 20889 } 20890 v_1 := v.Args[1] 20891 if v_1.Op != OpAMD64SHLQ { 20892 break 20893 } 20894 if x != v_1.Args[0] { 20895 break 20896 } 20897 if y != v_1.Args[1] { 20898 break 20899 } 20900 v.reset(OpAMD64ROLQ) 20901 v.AddArg(x) 20902 v.AddArg(y) 20903 return true 20904 } 20905 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 20906 // cond: 20907 // result: (ROLQ x y) 20908 for { 20909 v_0 := v.Args[0] 20910 if v_0.Op != OpAMD64ANDQ { 20911 break 20912 } 20913 v_0_0 := v_0.Args[0] 20914 if v_0_0.Op != OpAMD64SBBQcarrymask { 20915 break 20916 } 20917 v_0_0_0 := v_0_0.Args[0] 20918 if v_0_0_0.Op != OpAMD64CMPQconst { 20919 break 20920 } 20921 if v_0_0_0.AuxInt != 64 { 20922 break 20923 } 20924 v_0_0_0_0 := v_0_0_0.Args[0] 20925 if v_0_0_0_0.Op != OpAMD64NEGQ { 20926 break 20927 } 20928 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 20929 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 20930 break 20931 } 20932 if v_0_0_0_0_0.AuxInt != -64 { 20933 break 20934 } 20935 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 20936 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 20937 break 20938 } 20939 if v_0_0_0_0_0_0.AuxInt != 63 { 20940 break 20941 } 20942 y := v_0_0_0_0_0_0.Args[0] 20943 v_0_1 := v_0.Args[1] 20944 if v_0_1.Op != OpAMD64SHRQ { 20945 break 20946 } 20947 x := v_0_1.Args[0] 20948 v_0_1_1 := v_0_1.Args[1] 20949 if v_0_1_1.Op != OpAMD64NEGQ { 20950 break 20951 } 20952 if y != v_0_1_1.Args[0] { 20953 break 20954 } 20955 v_1 := v.Args[1] 20956 if v_1.Op != OpAMD64SHLQ { 20957 break 20958 } 20959 if x != v_1.Args[0] { 20960 break 20961 } 20962 if y != v_1.Args[1] { 20963 break 20964 } 20965 v.reset(OpAMD64ROLQ) 20966 v.AddArg(x) 20967 v.AddArg(y) 20968 return true 20969 } 20970 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 20971 // cond: 20972 // result: (ROLQ x y) 20973 for { 20974 v_0 := v.Args[0] 20975 if v_0.Op != OpAMD64SHLQ { 20976 break 20977 } 20978 x := v_0.Args[0] 20979 y := v_0.Args[1] 20980 v_1 := v.Args[1] 20981 if v_1.Op != OpAMD64ANDQ { 20982 break 20983 } 20984 v_1_0 := v_1.Args[0] 20985 if v_1_0.Op != OpAMD64SHRQ { 20986 break 20987 } 20988 if x != v_1_0.Args[0] { 20989 break 20990 } 20991 v_1_0_1 := v_1_0.Args[1] 20992 if v_1_0_1.Op != OpAMD64NEGL { 20993 break 20994 } 20995 if y != v_1_0_1.Args[0] { 20996 break 20997 } 20998 v_1_1 := v_1.Args[1] 20999 if v_1_1.Op != OpAMD64SBBQcarrymask { 21000 break 21001 } 21002 v_1_1_0 := v_1_1.Args[0] 21003 if v_1_1_0.Op != OpAMD64CMPLconst { 21004 break 21005 } 21006 if v_1_1_0.AuxInt != 64 { 21007 break 21008 } 21009 v_1_1_0_0 := v_1_1_0.Args[0] 21010 if v_1_1_0_0.Op != OpAMD64NEGL { 21011 break 21012 } 21013 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 21014 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 21015 break 21016 } 21017 if v_1_1_0_0_0.AuxInt != -64 { 21018 break 21019 } 21020 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 21021 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 21022 break 21023 } 21024 if v_1_1_0_0_0_0.AuxInt != 63 { 21025 break 21026 } 21027 if y != v_1_1_0_0_0_0.Args[0] { 21028 break 21029 } 21030 v.reset(OpAMD64ROLQ) 21031 v.AddArg(x) 21032 v.AddArg(y) 21033 return true 21034 } 21035 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 21036 // cond: 21037 // result: (ROLQ x y) 21038 for { 21039 v_0 := v.Args[0] 21040 if v_0.Op != OpAMD64SHLQ { 21041 break 21042 } 21043 x := v_0.Args[0] 21044 y := v_0.Args[1] 21045 v_1 := v.Args[1] 21046 if v_1.Op != OpAMD64ANDQ { 21047 break 21048 } 21049 v_1_0 := v_1.Args[0] 21050 if v_1_0.Op != OpAMD64SBBQcarrymask { 21051 break 21052 } 21053 v_1_0_0 := v_1_0.Args[0] 21054 if v_1_0_0.Op != OpAMD64CMPLconst { 21055 break 21056 } 21057 if v_1_0_0.AuxInt != 64 { 21058 break 21059 } 21060 v_1_0_0_0 := v_1_0_0.Args[0] 21061 if v_1_0_0_0.Op != OpAMD64NEGL { 21062 break 21063 } 21064 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 21065 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 21066 break 21067 } 21068 if v_1_0_0_0_0.AuxInt != -64 { 21069 break 21070 } 21071 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 21072 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 21073 break 21074 } 21075 if v_1_0_0_0_0_0.AuxInt != 63 { 21076 break 21077 } 21078 if y != v_1_0_0_0_0_0.Args[0] { 21079 break 21080 } 21081 v_1_1 := v_1.Args[1] 21082 if v_1_1.Op != OpAMD64SHRQ { 21083 break 21084 } 21085 if x != v_1_1.Args[0] { 21086 break 21087 } 21088 v_1_1_1 := v_1_1.Args[1] 21089 if v_1_1_1.Op != OpAMD64NEGL { 21090 break 21091 } 21092 if y != v_1_1_1.Args[0] { 21093 break 21094 } 21095 v.reset(OpAMD64ROLQ) 21096 v.AddArg(x) 21097 v.AddArg(y) 21098 return true 21099 } 21100 return false 21101 } 21102 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 21103 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 21104 // cond: 21105 // result: (ROLQ x y) 21106 for { 21107 v_0 := v.Args[0] 21108 if v_0.Op != OpAMD64ANDQ { 21109 break 21110 } 21111 v_0_0 := v_0.Args[0] 21112 if v_0_0.Op != OpAMD64SHRQ { 21113 break 21114 } 21115 x := v_0_0.Args[0] 21116 v_0_0_1 := v_0_0.Args[1] 21117 if v_0_0_1.Op != OpAMD64NEGL { 21118 break 21119 } 21120 y := v_0_0_1.Args[0] 21121 v_0_1 := v_0.Args[1] 21122 if v_0_1.Op != OpAMD64SBBQcarrymask { 21123 break 21124 } 21125 v_0_1_0 := v_0_1.Args[0] 21126 if v_0_1_0.Op != OpAMD64CMPLconst { 21127 break 21128 } 21129 if v_0_1_0.AuxInt != 64 { 21130 break 21131 } 21132 v_0_1_0_0 := v_0_1_0.Args[0] 21133 if v_0_1_0_0.Op != OpAMD64NEGL { 21134 break 21135 } 21136 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 21137 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 21138 break 21139 } 21140 if v_0_1_0_0_0.AuxInt != -64 { 21141 break 21142 } 21143 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 21144 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 21145 break 21146 } 21147 if v_0_1_0_0_0_0.AuxInt != 63 { 21148 break 21149 } 21150 if y != v_0_1_0_0_0_0.Args[0] { 21151 break 21152 } 21153 v_1 := v.Args[1] 21154 if v_1.Op != OpAMD64SHLQ { 21155 break 21156 } 21157 if x != v_1.Args[0] { 21158 break 21159 } 21160 if y != v_1.Args[1] { 21161 break 21162 } 21163 v.reset(OpAMD64ROLQ) 21164 v.AddArg(x) 21165 v.AddArg(y) 21166 return true 21167 } 21168 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 21169 // cond: 21170 // result: (ROLQ x y) 21171 for { 21172 v_0 := v.Args[0] 21173 if v_0.Op != OpAMD64ANDQ { 21174 break 21175 } 21176 v_0_0 := v_0.Args[0] 21177 if v_0_0.Op != OpAMD64SBBQcarrymask { 21178 break 21179 } 21180 v_0_0_0 := v_0_0.Args[0] 21181 if v_0_0_0.Op != OpAMD64CMPLconst { 21182 break 21183 } 21184 if v_0_0_0.AuxInt != 64 { 21185 break 21186 } 21187 v_0_0_0_0 := v_0_0_0.Args[0] 21188 if v_0_0_0_0.Op != OpAMD64NEGL { 21189 break 21190 } 21191 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 21192 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 21193 break 21194 } 21195 if v_0_0_0_0_0.AuxInt != -64 { 21196 break 21197 } 21198 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 21199 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 21200 break 21201 } 21202 if v_0_0_0_0_0_0.AuxInt != 63 { 21203 break 21204 } 21205 y := v_0_0_0_0_0_0.Args[0] 21206 v_0_1 := v_0.Args[1] 21207 if v_0_1.Op != OpAMD64SHRQ { 21208 break 21209 } 21210 x := v_0_1.Args[0] 21211 v_0_1_1 := v_0_1.Args[1] 21212 if v_0_1_1.Op != OpAMD64NEGL { 21213 break 21214 } 21215 if y != v_0_1_1.Args[0] { 21216 break 21217 } 21218 v_1 := v.Args[1] 21219 if v_1.Op != OpAMD64SHLQ { 21220 break 21221 } 21222 if x != v_1.Args[0] { 21223 break 21224 } 21225 if y != v_1.Args[1] { 21226 break 21227 } 21228 v.reset(OpAMD64ROLQ) 21229 v.AddArg(x) 21230 v.AddArg(y) 21231 return true 21232 } 21233 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 21234 // cond: 21235 // result: (RORQ x y) 21236 for { 21237 v_0 := v.Args[0] 21238 if v_0.Op != OpAMD64SHRQ { 21239 break 21240 } 21241 x := v_0.Args[0] 21242 y := v_0.Args[1] 21243 v_1 := v.Args[1] 21244 if v_1.Op != OpAMD64ANDQ { 21245 break 21246 } 21247 v_1_0 := v_1.Args[0] 21248 if v_1_0.Op != OpAMD64SHLQ { 21249 break 21250 } 21251 if x != v_1_0.Args[0] { 21252 break 21253 } 21254 v_1_0_1 := v_1_0.Args[1] 21255 if v_1_0_1.Op != OpAMD64NEGQ { 21256 break 21257 } 21258 if y != v_1_0_1.Args[0] { 21259 break 21260 } 21261 v_1_1 := v_1.Args[1] 21262 if v_1_1.Op != OpAMD64SBBQcarrymask { 21263 break 21264 } 21265 v_1_1_0 := v_1_1.Args[0] 21266 if v_1_1_0.Op != OpAMD64CMPQconst { 21267 break 21268 } 21269 if v_1_1_0.AuxInt != 64 { 21270 break 21271 } 21272 v_1_1_0_0 := v_1_1_0.Args[0] 21273 if v_1_1_0_0.Op != OpAMD64NEGQ { 21274 break 21275 } 21276 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 21277 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 21278 break 21279 } 21280 if v_1_1_0_0_0.AuxInt != -64 { 21281 break 21282 } 21283 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 21284 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 21285 break 21286 } 21287 if v_1_1_0_0_0_0.AuxInt != 63 { 21288 break 21289 } 21290 if y != v_1_1_0_0_0_0.Args[0] { 21291 break 21292 } 21293 v.reset(OpAMD64RORQ) 21294 v.AddArg(x) 21295 v.AddArg(y) 21296 return true 21297 } 21298 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 21299 // cond: 21300 // result: (RORQ x y) 21301 for { 21302 v_0 := v.Args[0] 21303 if v_0.Op != OpAMD64SHRQ { 21304 break 21305 } 21306 x := v_0.Args[0] 21307 y := v_0.Args[1] 21308 v_1 := v.Args[1] 21309 if v_1.Op != OpAMD64ANDQ { 21310 break 21311 } 21312 v_1_0 := v_1.Args[0] 21313 if v_1_0.Op != OpAMD64SBBQcarrymask { 21314 break 21315 } 21316 v_1_0_0 := v_1_0.Args[0] 21317 if v_1_0_0.Op != OpAMD64CMPQconst { 21318 break 21319 } 21320 if v_1_0_0.AuxInt != 64 { 21321 break 21322 } 21323 v_1_0_0_0 := v_1_0_0.Args[0] 21324 if v_1_0_0_0.Op != OpAMD64NEGQ { 21325 break 21326 } 21327 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 21328 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 21329 break 21330 } 21331 if v_1_0_0_0_0.AuxInt != -64 { 21332 break 21333 } 21334 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 21335 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 21336 break 21337 } 21338 if v_1_0_0_0_0_0.AuxInt != 63 { 21339 break 21340 } 21341 if y != v_1_0_0_0_0_0.Args[0] { 21342 break 21343 } 21344 v_1_1 := v_1.Args[1] 21345 if v_1_1.Op != OpAMD64SHLQ { 21346 break 21347 } 21348 if x != v_1_1.Args[0] { 21349 break 21350 } 21351 v_1_1_1 := v_1_1.Args[1] 21352 if v_1_1_1.Op != OpAMD64NEGQ { 21353 break 21354 } 21355 if y != v_1_1_1.Args[0] { 21356 break 21357 } 21358 v.reset(OpAMD64RORQ) 21359 v.AddArg(x) 21360 v.AddArg(y) 21361 return true 21362 } 21363 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 21364 // cond: 21365 // result: (RORQ x y) 21366 for { 21367 v_0 := v.Args[0] 21368 if v_0.Op != OpAMD64ANDQ { 21369 break 21370 } 21371 v_0_0 := v_0.Args[0] 21372 if v_0_0.Op != OpAMD64SHLQ { 21373 break 21374 } 21375 x := v_0_0.Args[0] 21376 v_0_0_1 := v_0_0.Args[1] 21377 if v_0_0_1.Op != OpAMD64NEGQ { 21378 break 21379 } 21380 y := v_0_0_1.Args[0] 21381 v_0_1 := v_0.Args[1] 21382 if v_0_1.Op != OpAMD64SBBQcarrymask { 21383 break 21384 } 21385 v_0_1_0 := v_0_1.Args[0] 21386 if v_0_1_0.Op != OpAMD64CMPQconst { 21387 break 21388 } 21389 if v_0_1_0.AuxInt != 64 { 21390 break 21391 } 21392 v_0_1_0_0 := v_0_1_0.Args[0] 21393 if v_0_1_0_0.Op != OpAMD64NEGQ { 21394 break 21395 } 21396 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 21397 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 21398 break 21399 } 21400 if v_0_1_0_0_0.AuxInt != -64 { 21401 break 21402 } 21403 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 21404 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 21405 break 21406 } 21407 if v_0_1_0_0_0_0.AuxInt != 63 { 21408 break 21409 } 21410 if y != v_0_1_0_0_0_0.Args[0] { 21411 break 21412 } 21413 v_1 := v.Args[1] 21414 if v_1.Op != OpAMD64SHRQ { 21415 break 21416 } 21417 if x != v_1.Args[0] { 21418 break 21419 } 21420 if y != v_1.Args[1] { 21421 break 21422 } 21423 v.reset(OpAMD64RORQ) 21424 v.AddArg(x) 21425 v.AddArg(y) 21426 return true 21427 } 21428 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 21429 // cond: 21430 // result: (RORQ x y) 21431 for { 21432 v_0 := v.Args[0] 21433 if v_0.Op != OpAMD64ANDQ { 21434 break 21435 } 21436 v_0_0 := v_0.Args[0] 21437 if v_0_0.Op != OpAMD64SBBQcarrymask { 21438 break 21439 } 21440 v_0_0_0 := v_0_0.Args[0] 21441 if v_0_0_0.Op != OpAMD64CMPQconst { 21442 break 21443 } 21444 if v_0_0_0.AuxInt != 64 { 21445 break 21446 } 21447 v_0_0_0_0 := v_0_0_0.Args[0] 21448 if v_0_0_0_0.Op != OpAMD64NEGQ { 21449 break 21450 } 21451 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 21452 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 21453 break 21454 } 21455 if v_0_0_0_0_0.AuxInt != -64 { 21456 break 21457 } 21458 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 21459 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 21460 break 21461 } 21462 if v_0_0_0_0_0_0.AuxInt != 63 { 21463 break 21464 } 21465 y := v_0_0_0_0_0_0.Args[0] 21466 v_0_1 := v_0.Args[1] 21467 if v_0_1.Op != OpAMD64SHLQ { 21468 break 21469 } 21470 x := v_0_1.Args[0] 21471 v_0_1_1 := v_0_1.Args[1] 21472 if v_0_1_1.Op != OpAMD64NEGQ { 21473 break 21474 } 21475 if y != v_0_1_1.Args[0] { 21476 break 21477 } 21478 v_1 := v.Args[1] 21479 if v_1.Op != OpAMD64SHRQ { 21480 break 21481 } 21482 if x != v_1.Args[0] { 21483 break 21484 } 21485 if y != v_1.Args[1] { 21486 break 21487 } 21488 v.reset(OpAMD64RORQ) 21489 v.AddArg(x) 21490 v.AddArg(y) 21491 return true 21492 } 21493 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 21494 // cond: 21495 // result: (RORQ x y) 21496 for { 21497 v_0 := v.Args[0] 21498 if v_0.Op != OpAMD64SHRQ { 21499 break 21500 } 21501 x := v_0.Args[0] 21502 y := v_0.Args[1] 21503 v_1 := v.Args[1] 21504 if v_1.Op != OpAMD64ANDQ { 21505 break 21506 } 21507 v_1_0 := v_1.Args[0] 21508 if v_1_0.Op != OpAMD64SHLQ { 21509 break 21510 } 21511 if x != v_1_0.Args[0] { 21512 break 21513 } 21514 v_1_0_1 := v_1_0.Args[1] 21515 if v_1_0_1.Op != OpAMD64NEGL { 21516 break 21517 } 21518 if y != v_1_0_1.Args[0] { 21519 break 21520 } 21521 v_1_1 := v_1.Args[1] 21522 if v_1_1.Op != OpAMD64SBBQcarrymask { 21523 break 21524 } 21525 v_1_1_0 := v_1_1.Args[0] 21526 if v_1_1_0.Op != OpAMD64CMPLconst { 21527 break 21528 } 21529 if v_1_1_0.AuxInt != 64 { 21530 break 21531 } 21532 v_1_1_0_0 := v_1_1_0.Args[0] 21533 if v_1_1_0_0.Op != OpAMD64NEGL { 21534 break 21535 } 21536 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 21537 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 21538 break 21539 } 21540 if v_1_1_0_0_0.AuxInt != -64 { 21541 break 21542 } 21543 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 21544 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 21545 break 21546 } 21547 if v_1_1_0_0_0_0.AuxInt != 63 { 21548 break 21549 } 21550 if y != v_1_1_0_0_0_0.Args[0] { 21551 break 21552 } 21553 v.reset(OpAMD64RORQ) 21554 v.AddArg(x) 21555 v.AddArg(y) 21556 return true 21557 } 21558 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 21559 // cond: 21560 // result: (RORQ x y) 21561 for { 21562 v_0 := v.Args[0] 21563 if v_0.Op != OpAMD64SHRQ { 21564 break 21565 } 21566 x := v_0.Args[0] 21567 y := v_0.Args[1] 21568 v_1 := v.Args[1] 21569 if v_1.Op != OpAMD64ANDQ { 21570 break 21571 } 21572 v_1_0 := v_1.Args[0] 21573 if v_1_0.Op != OpAMD64SBBQcarrymask { 21574 break 21575 } 21576 v_1_0_0 := v_1_0.Args[0] 21577 if v_1_0_0.Op != OpAMD64CMPLconst { 21578 break 21579 } 21580 if v_1_0_0.AuxInt != 64 { 21581 break 21582 } 21583 v_1_0_0_0 := v_1_0_0.Args[0] 21584 if v_1_0_0_0.Op != OpAMD64NEGL { 21585 break 21586 } 21587 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 21588 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 21589 break 21590 } 21591 if v_1_0_0_0_0.AuxInt != -64 { 21592 break 21593 } 21594 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 21595 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 21596 break 21597 } 21598 if v_1_0_0_0_0_0.AuxInt != 63 { 21599 break 21600 } 21601 if y != v_1_0_0_0_0_0.Args[0] { 21602 break 21603 } 21604 v_1_1 := v_1.Args[1] 21605 if v_1_1.Op != OpAMD64SHLQ { 21606 break 21607 } 21608 if x != v_1_1.Args[0] { 21609 break 21610 } 21611 v_1_1_1 := v_1_1.Args[1] 21612 if v_1_1_1.Op != OpAMD64NEGL { 21613 break 21614 } 21615 if y != v_1_1_1.Args[0] { 21616 break 21617 } 21618 v.reset(OpAMD64RORQ) 21619 v.AddArg(x) 21620 v.AddArg(y) 21621 return true 21622 } 21623 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 21624 // cond: 21625 // result: (RORQ x y) 21626 for { 21627 v_0 := v.Args[0] 21628 if v_0.Op != OpAMD64ANDQ { 21629 break 21630 } 21631 v_0_0 := v_0.Args[0] 21632 if v_0_0.Op != OpAMD64SHLQ { 21633 break 21634 } 21635 x := v_0_0.Args[0] 21636 v_0_0_1 := v_0_0.Args[1] 21637 if v_0_0_1.Op != OpAMD64NEGL { 21638 break 21639 } 21640 y := v_0_0_1.Args[0] 21641 v_0_1 := v_0.Args[1] 21642 if v_0_1.Op != OpAMD64SBBQcarrymask { 21643 break 21644 } 21645 v_0_1_0 := v_0_1.Args[0] 21646 if v_0_1_0.Op != OpAMD64CMPLconst { 21647 break 21648 } 21649 if v_0_1_0.AuxInt != 64 { 21650 break 21651 } 21652 v_0_1_0_0 := v_0_1_0.Args[0] 21653 if v_0_1_0_0.Op != OpAMD64NEGL { 21654 break 21655 } 21656 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 21657 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 21658 break 21659 } 21660 if v_0_1_0_0_0.AuxInt != -64 { 21661 break 21662 } 21663 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 21664 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 21665 break 21666 } 21667 if v_0_1_0_0_0_0.AuxInt != 63 { 21668 break 21669 } 21670 if y != v_0_1_0_0_0_0.Args[0] { 21671 break 21672 } 21673 v_1 := v.Args[1] 21674 if v_1.Op != OpAMD64SHRQ { 21675 break 21676 } 21677 if x != v_1.Args[0] { 21678 break 21679 } 21680 if y != v_1.Args[1] { 21681 break 21682 } 21683 v.reset(OpAMD64RORQ) 21684 v.AddArg(x) 21685 v.AddArg(y) 21686 return true 21687 } 21688 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 21689 // cond: 21690 // result: (RORQ x y) 21691 for { 21692 v_0 := v.Args[0] 21693 if v_0.Op != OpAMD64ANDQ { 21694 break 21695 } 21696 v_0_0 := v_0.Args[0] 21697 if v_0_0.Op != OpAMD64SBBQcarrymask { 21698 break 21699 } 21700 v_0_0_0 := v_0_0.Args[0] 21701 if v_0_0_0.Op != OpAMD64CMPLconst { 21702 break 21703 } 21704 if v_0_0_0.AuxInt != 64 { 21705 break 21706 } 21707 v_0_0_0_0 := v_0_0_0.Args[0] 21708 if v_0_0_0_0.Op != OpAMD64NEGL { 21709 break 21710 } 21711 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 21712 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 21713 break 21714 } 21715 if v_0_0_0_0_0.AuxInt != -64 { 21716 break 21717 } 21718 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 21719 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 21720 break 21721 } 21722 if v_0_0_0_0_0_0.AuxInt != 63 { 21723 break 21724 } 21725 y := v_0_0_0_0_0_0.Args[0] 21726 v_0_1 := v_0.Args[1] 21727 if v_0_1.Op != OpAMD64SHLQ { 21728 break 21729 } 21730 x := v_0_1.Args[0] 21731 v_0_1_1 := v_0_1.Args[1] 21732 if v_0_1_1.Op != OpAMD64NEGL { 21733 break 21734 } 21735 if y != v_0_1_1.Args[0] { 21736 break 21737 } 21738 v_1 := v.Args[1] 21739 if v_1.Op != OpAMD64SHRQ { 21740 break 21741 } 21742 if x != v_1.Args[0] { 21743 break 21744 } 21745 if y != v_1.Args[1] { 21746 break 21747 } 21748 v.reset(OpAMD64RORQ) 21749 v.AddArg(x) 21750 v.AddArg(y) 21751 return true 21752 } 21753 return false 21754 } 21755 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 21756 b := v.Block 21757 _ = b 21758 types := &b.Func.Config.Types 21759 _ = types 21760 // match: (ORQ x x) 21761 // cond: 21762 // result: x 21763 for { 21764 x := v.Args[0] 21765 if x != v.Args[1] { 21766 break 21767 } 21768 v.reset(OpCopy) 21769 v.Type = x.Type 21770 v.AddArg(x) 21771 return true 21772 } 21773 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 21774 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21775 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 21776 for { 21777 x0 := v.Args[0] 21778 if x0.Op != OpAMD64MOVBload { 21779 break 21780 } 21781 i0 := x0.AuxInt 21782 s := x0.Aux 21783 p := x0.Args[0] 21784 mem := x0.Args[1] 21785 sh := v.Args[1] 21786 if sh.Op != OpAMD64SHLQconst { 21787 break 21788 } 21789 if sh.AuxInt != 8 { 21790 break 21791 } 21792 x1 := sh.Args[0] 21793 if x1.Op != OpAMD64MOVBload { 21794 break 21795 } 21796 i1 := x1.AuxInt 21797 if x1.Aux != s { 21798 break 21799 } 21800 if p != x1.Args[0] { 21801 break 21802 } 21803 if mem != x1.Args[1] { 21804 break 21805 } 21806 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21807 break 21808 } 21809 b = mergePoint(b, x0, x1) 21810 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 21811 v.reset(OpCopy) 21812 v.AddArg(v0) 21813 v0.AuxInt = i0 21814 v0.Aux = s 21815 v0.AddArg(p) 21816 v0.AddArg(mem) 21817 return true 21818 } 21819 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 21820 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21821 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 21822 for { 21823 sh := v.Args[0] 21824 if sh.Op != OpAMD64SHLQconst { 21825 break 21826 } 21827 if sh.AuxInt != 8 { 21828 break 21829 } 21830 x1 := sh.Args[0] 21831 if x1.Op != OpAMD64MOVBload { 21832 break 21833 } 21834 i1 := x1.AuxInt 21835 s := x1.Aux 21836 p := x1.Args[0] 21837 mem := x1.Args[1] 21838 x0 := v.Args[1] 21839 if x0.Op != OpAMD64MOVBload { 21840 break 21841 } 21842 i0 := x0.AuxInt 21843 if x0.Aux != s { 21844 break 21845 } 21846 if p != x0.Args[0] { 21847 break 21848 } 21849 if mem != x0.Args[1] { 21850 break 21851 } 21852 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21853 break 21854 } 21855 b = mergePoint(b, x0, x1) 21856 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 21857 v.reset(OpCopy) 21858 v.AddArg(v0) 21859 v0.AuxInt = i0 21860 v0.Aux = s 21861 v0.AddArg(p) 21862 v0.AddArg(mem) 21863 return true 21864 } 21865 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 21866 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21867 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 21868 for { 21869 x0 := v.Args[0] 21870 if x0.Op != OpAMD64MOVWload { 21871 break 21872 } 21873 i0 := x0.AuxInt 21874 s := x0.Aux 21875 p := x0.Args[0] 21876 mem := x0.Args[1] 21877 sh := v.Args[1] 21878 if sh.Op != OpAMD64SHLQconst { 21879 break 21880 } 21881 if sh.AuxInt != 16 { 21882 break 21883 } 21884 x1 := sh.Args[0] 21885 if x1.Op != OpAMD64MOVWload { 21886 break 21887 } 21888 i1 := x1.AuxInt 21889 if x1.Aux != s { 21890 break 21891 } 21892 if p != x1.Args[0] { 21893 break 21894 } 21895 if mem != x1.Args[1] { 21896 break 21897 } 21898 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21899 break 21900 } 21901 b = mergePoint(b, x0, x1) 21902 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 21903 v.reset(OpCopy) 21904 v.AddArg(v0) 21905 v0.AuxInt = i0 21906 v0.Aux = s 21907 v0.AddArg(p) 21908 v0.AddArg(mem) 21909 return true 21910 } 21911 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 21912 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21913 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 21914 for { 21915 sh := v.Args[0] 21916 if sh.Op != OpAMD64SHLQconst { 21917 break 21918 } 21919 if sh.AuxInt != 16 { 21920 break 21921 } 21922 x1 := sh.Args[0] 21923 if x1.Op != OpAMD64MOVWload { 21924 break 21925 } 21926 i1 := x1.AuxInt 21927 s := x1.Aux 21928 p := x1.Args[0] 21929 mem := x1.Args[1] 21930 x0 := v.Args[1] 21931 if x0.Op != OpAMD64MOVWload { 21932 break 21933 } 21934 i0 := x0.AuxInt 21935 if x0.Aux != s { 21936 break 21937 } 21938 if p != x0.Args[0] { 21939 break 21940 } 21941 if mem != x0.Args[1] { 21942 break 21943 } 21944 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21945 break 21946 } 21947 b = mergePoint(b, x0, x1) 21948 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 21949 v.reset(OpCopy) 21950 v.AddArg(v0) 21951 v0.AuxInt = i0 21952 v0.Aux = s 21953 v0.AddArg(p) 21954 v0.AddArg(mem) 21955 return true 21956 } 21957 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 21958 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21959 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 21960 for { 21961 x0 := v.Args[0] 21962 if x0.Op != OpAMD64MOVLload { 21963 break 21964 } 21965 i0 := x0.AuxInt 21966 s := x0.Aux 21967 p := x0.Args[0] 21968 mem := x0.Args[1] 21969 sh := v.Args[1] 21970 if sh.Op != OpAMD64SHLQconst { 21971 break 21972 } 21973 if sh.AuxInt != 32 { 21974 break 21975 } 21976 x1 := sh.Args[0] 21977 if x1.Op != OpAMD64MOVLload { 21978 break 21979 } 21980 i1 := x1.AuxInt 21981 if x1.Aux != s { 21982 break 21983 } 21984 if p != x1.Args[0] { 21985 break 21986 } 21987 if mem != x1.Args[1] { 21988 break 21989 } 21990 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21991 break 21992 } 21993 b = mergePoint(b, x0, x1) 21994 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 21995 v.reset(OpCopy) 21996 v.AddArg(v0) 21997 v0.AuxInt = i0 21998 v0.Aux = s 21999 v0.AddArg(p) 22000 v0.AddArg(mem) 22001 return true 22002 } 22003 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 22004 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22005 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 22006 for { 22007 sh := v.Args[0] 22008 if sh.Op != OpAMD64SHLQconst { 22009 break 22010 } 22011 if sh.AuxInt != 32 { 22012 break 22013 } 22014 x1 := sh.Args[0] 22015 if x1.Op != OpAMD64MOVLload { 22016 break 22017 } 22018 i1 := x1.AuxInt 22019 s := x1.Aux 22020 p := x1.Args[0] 22021 mem := x1.Args[1] 22022 x0 := v.Args[1] 22023 if x0.Op != OpAMD64MOVLload { 22024 break 22025 } 22026 i0 := x0.AuxInt 22027 if x0.Aux != s { 22028 break 22029 } 22030 if p != x0.Args[0] { 22031 break 22032 } 22033 if mem != x0.Args[1] { 22034 break 22035 } 22036 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22037 break 22038 } 22039 b = mergePoint(b, x0, x1) 22040 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 22041 v.reset(OpCopy) 22042 v.AddArg(v0) 22043 v0.AuxInt = i0 22044 v0.Aux = s 22045 v0.AddArg(p) 22046 v0.AddArg(mem) 22047 return true 22048 } 22049 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 22050 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22051 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 22052 for { 22053 s1 := v.Args[0] 22054 if s1.Op != OpAMD64SHLQconst { 22055 break 22056 } 22057 j1 := s1.AuxInt 22058 x1 := s1.Args[0] 22059 if x1.Op != OpAMD64MOVBload { 22060 break 22061 } 22062 i1 := x1.AuxInt 22063 s := x1.Aux 22064 p := x1.Args[0] 22065 mem := x1.Args[1] 22066 or := v.Args[1] 22067 if or.Op != OpAMD64ORQ { 22068 break 22069 } 22070 s0 := or.Args[0] 22071 if s0.Op != OpAMD64SHLQconst { 22072 break 22073 } 22074 j0 := s0.AuxInt 22075 x0 := s0.Args[0] 22076 if x0.Op != OpAMD64MOVBload { 22077 break 22078 } 22079 i0 := x0.AuxInt 22080 if x0.Aux != s { 22081 break 22082 } 22083 if p != x0.Args[0] { 22084 break 22085 } 22086 if mem != x0.Args[1] { 22087 break 22088 } 22089 y := or.Args[1] 22090 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22091 break 22092 } 22093 b = mergePoint(b, x0, x1) 22094 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22095 v.reset(OpCopy) 22096 v.AddArg(v0) 22097 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22098 v1.AuxInt = j0 22099 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 22100 v2.AuxInt = i0 22101 v2.Aux = s 22102 v2.AddArg(p) 22103 v2.AddArg(mem) 22104 v1.AddArg(v2) 22105 v0.AddArg(v1) 22106 v0.AddArg(y) 22107 return true 22108 } 22109 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 22110 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22111 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 22112 for { 22113 s1 := v.Args[0] 22114 if s1.Op != OpAMD64SHLQconst { 22115 break 22116 } 22117 j1 := s1.AuxInt 22118 x1 := s1.Args[0] 22119 if x1.Op != OpAMD64MOVBload { 22120 break 22121 } 22122 i1 := x1.AuxInt 22123 s := x1.Aux 22124 p := x1.Args[0] 22125 mem := x1.Args[1] 22126 or := v.Args[1] 22127 if or.Op != OpAMD64ORQ { 22128 break 22129 } 22130 y := or.Args[0] 22131 s0 := or.Args[1] 22132 if s0.Op != OpAMD64SHLQconst { 22133 break 22134 } 22135 j0 := s0.AuxInt 22136 x0 := s0.Args[0] 22137 if x0.Op != OpAMD64MOVBload { 22138 break 22139 } 22140 i0 := x0.AuxInt 22141 if x0.Aux != s { 22142 break 22143 } 22144 if p != x0.Args[0] { 22145 break 22146 } 22147 if mem != x0.Args[1] { 22148 break 22149 } 22150 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22151 break 22152 } 22153 b = mergePoint(b, x0, x1) 22154 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22155 v.reset(OpCopy) 22156 v.AddArg(v0) 22157 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22158 v1.AuxInt = j0 22159 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 22160 v2.AuxInt = i0 22161 v2.Aux = s 22162 v2.AddArg(p) 22163 v2.AddArg(mem) 22164 v1.AddArg(v2) 22165 v0.AddArg(v1) 22166 v0.AddArg(y) 22167 return true 22168 } 22169 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 22170 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22171 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 22172 for { 22173 or := v.Args[0] 22174 if or.Op != OpAMD64ORQ { 22175 break 22176 } 22177 s0 := or.Args[0] 22178 if s0.Op != OpAMD64SHLQconst { 22179 break 22180 } 22181 j0 := s0.AuxInt 22182 x0 := s0.Args[0] 22183 if x0.Op != OpAMD64MOVBload { 22184 break 22185 } 22186 i0 := x0.AuxInt 22187 s := x0.Aux 22188 p := x0.Args[0] 22189 mem := x0.Args[1] 22190 y := or.Args[1] 22191 s1 := v.Args[1] 22192 if s1.Op != OpAMD64SHLQconst { 22193 break 22194 } 22195 j1 := s1.AuxInt 22196 x1 := s1.Args[0] 22197 if x1.Op != OpAMD64MOVBload { 22198 break 22199 } 22200 i1 := x1.AuxInt 22201 if x1.Aux != s { 22202 break 22203 } 22204 if p != x1.Args[0] { 22205 break 22206 } 22207 if mem != x1.Args[1] { 22208 break 22209 } 22210 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22211 break 22212 } 22213 b = mergePoint(b, x0, x1) 22214 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22215 v.reset(OpCopy) 22216 v.AddArg(v0) 22217 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22218 v1.AuxInt = j0 22219 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 22220 v2.AuxInt = i0 22221 v2.Aux = s 22222 v2.AddArg(p) 22223 v2.AddArg(mem) 22224 v1.AddArg(v2) 22225 v0.AddArg(v1) 22226 v0.AddArg(y) 22227 return true 22228 } 22229 return false 22230 } 22231 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 22232 b := v.Block 22233 _ = b 22234 types := &b.Func.Config.Types 22235 _ = types 22236 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 22237 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22238 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 22239 for { 22240 or := v.Args[0] 22241 if or.Op != OpAMD64ORQ { 22242 break 22243 } 22244 y := or.Args[0] 22245 s0 := or.Args[1] 22246 if s0.Op != OpAMD64SHLQconst { 22247 break 22248 } 22249 j0 := s0.AuxInt 22250 x0 := s0.Args[0] 22251 if x0.Op != OpAMD64MOVBload { 22252 break 22253 } 22254 i0 := x0.AuxInt 22255 s := x0.Aux 22256 p := x0.Args[0] 22257 mem := x0.Args[1] 22258 s1 := v.Args[1] 22259 if s1.Op != OpAMD64SHLQconst { 22260 break 22261 } 22262 j1 := s1.AuxInt 22263 x1 := s1.Args[0] 22264 if x1.Op != OpAMD64MOVBload { 22265 break 22266 } 22267 i1 := x1.AuxInt 22268 if x1.Aux != s { 22269 break 22270 } 22271 if p != x1.Args[0] { 22272 break 22273 } 22274 if mem != x1.Args[1] { 22275 break 22276 } 22277 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22278 break 22279 } 22280 b = mergePoint(b, x0, x1) 22281 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22282 v.reset(OpCopy) 22283 v.AddArg(v0) 22284 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22285 v1.AuxInt = j0 22286 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 22287 v2.AuxInt = i0 22288 v2.Aux = s 22289 v2.AddArg(p) 22290 v2.AddArg(mem) 22291 v1.AddArg(v2) 22292 v0.AddArg(v1) 22293 v0.AddArg(y) 22294 return true 22295 } 22296 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 22297 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22298 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 22299 for { 22300 s1 := v.Args[0] 22301 if s1.Op != OpAMD64SHLQconst { 22302 break 22303 } 22304 j1 := s1.AuxInt 22305 x1 := s1.Args[0] 22306 if x1.Op != OpAMD64MOVWload { 22307 break 22308 } 22309 i1 := x1.AuxInt 22310 s := x1.Aux 22311 p := x1.Args[0] 22312 mem := x1.Args[1] 22313 or := v.Args[1] 22314 if or.Op != OpAMD64ORQ { 22315 break 22316 } 22317 s0 := or.Args[0] 22318 if s0.Op != OpAMD64SHLQconst { 22319 break 22320 } 22321 j0 := s0.AuxInt 22322 x0 := s0.Args[0] 22323 if x0.Op != OpAMD64MOVWload { 22324 break 22325 } 22326 i0 := x0.AuxInt 22327 if x0.Aux != s { 22328 break 22329 } 22330 if p != x0.Args[0] { 22331 break 22332 } 22333 if mem != x0.Args[1] { 22334 break 22335 } 22336 y := or.Args[1] 22337 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22338 break 22339 } 22340 b = mergePoint(b, x0, x1) 22341 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22342 v.reset(OpCopy) 22343 v.AddArg(v0) 22344 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22345 v1.AuxInt = j0 22346 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 22347 v2.AuxInt = i0 22348 v2.Aux = s 22349 v2.AddArg(p) 22350 v2.AddArg(mem) 22351 v1.AddArg(v2) 22352 v0.AddArg(v1) 22353 v0.AddArg(y) 22354 return true 22355 } 22356 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 22357 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22358 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 22359 for { 22360 s1 := v.Args[0] 22361 if s1.Op != OpAMD64SHLQconst { 22362 break 22363 } 22364 j1 := s1.AuxInt 22365 x1 := s1.Args[0] 22366 if x1.Op != OpAMD64MOVWload { 22367 break 22368 } 22369 i1 := x1.AuxInt 22370 s := x1.Aux 22371 p := x1.Args[0] 22372 mem := x1.Args[1] 22373 or := v.Args[1] 22374 if or.Op != OpAMD64ORQ { 22375 break 22376 } 22377 y := or.Args[0] 22378 s0 := or.Args[1] 22379 if s0.Op != OpAMD64SHLQconst { 22380 break 22381 } 22382 j0 := s0.AuxInt 22383 x0 := s0.Args[0] 22384 if x0.Op != OpAMD64MOVWload { 22385 break 22386 } 22387 i0 := x0.AuxInt 22388 if x0.Aux != s { 22389 break 22390 } 22391 if p != x0.Args[0] { 22392 break 22393 } 22394 if mem != x0.Args[1] { 22395 break 22396 } 22397 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22398 break 22399 } 22400 b = mergePoint(b, x0, x1) 22401 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22402 v.reset(OpCopy) 22403 v.AddArg(v0) 22404 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22405 v1.AuxInt = j0 22406 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 22407 v2.AuxInt = i0 22408 v2.Aux = s 22409 v2.AddArg(p) 22410 v2.AddArg(mem) 22411 v1.AddArg(v2) 22412 v0.AddArg(v1) 22413 v0.AddArg(y) 22414 return true 22415 } 22416 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 22417 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22418 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 22419 for { 22420 or := v.Args[0] 22421 if or.Op != OpAMD64ORQ { 22422 break 22423 } 22424 s0 := or.Args[0] 22425 if s0.Op != OpAMD64SHLQconst { 22426 break 22427 } 22428 j0 := s0.AuxInt 22429 x0 := s0.Args[0] 22430 if x0.Op != OpAMD64MOVWload { 22431 break 22432 } 22433 i0 := x0.AuxInt 22434 s := x0.Aux 22435 p := x0.Args[0] 22436 mem := x0.Args[1] 22437 y := or.Args[1] 22438 s1 := v.Args[1] 22439 if s1.Op != OpAMD64SHLQconst { 22440 break 22441 } 22442 j1 := s1.AuxInt 22443 x1 := s1.Args[0] 22444 if x1.Op != OpAMD64MOVWload { 22445 break 22446 } 22447 i1 := x1.AuxInt 22448 if x1.Aux != s { 22449 break 22450 } 22451 if p != x1.Args[0] { 22452 break 22453 } 22454 if mem != x1.Args[1] { 22455 break 22456 } 22457 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22458 break 22459 } 22460 b = mergePoint(b, x0, x1) 22461 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22462 v.reset(OpCopy) 22463 v.AddArg(v0) 22464 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22465 v1.AuxInt = j0 22466 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 22467 v2.AuxInt = i0 22468 v2.Aux = s 22469 v2.AddArg(p) 22470 v2.AddArg(mem) 22471 v1.AddArg(v2) 22472 v0.AddArg(v1) 22473 v0.AddArg(y) 22474 return true 22475 } 22476 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 22477 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22478 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 22479 for { 22480 or := v.Args[0] 22481 if or.Op != OpAMD64ORQ { 22482 break 22483 } 22484 y := or.Args[0] 22485 s0 := or.Args[1] 22486 if s0.Op != OpAMD64SHLQconst { 22487 break 22488 } 22489 j0 := s0.AuxInt 22490 x0 := s0.Args[0] 22491 if x0.Op != OpAMD64MOVWload { 22492 break 22493 } 22494 i0 := x0.AuxInt 22495 s := x0.Aux 22496 p := x0.Args[0] 22497 mem := x0.Args[1] 22498 s1 := v.Args[1] 22499 if s1.Op != OpAMD64SHLQconst { 22500 break 22501 } 22502 j1 := s1.AuxInt 22503 x1 := s1.Args[0] 22504 if x1.Op != OpAMD64MOVWload { 22505 break 22506 } 22507 i1 := x1.AuxInt 22508 if x1.Aux != s { 22509 break 22510 } 22511 if p != x1.Args[0] { 22512 break 22513 } 22514 if mem != x1.Args[1] { 22515 break 22516 } 22517 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22518 break 22519 } 22520 b = mergePoint(b, x0, x1) 22521 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 22522 v.reset(OpCopy) 22523 v.AddArg(v0) 22524 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 22525 v1.AuxInt = j0 22526 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 22527 v2.AuxInt = i0 22528 v2.Aux = s 22529 v2.AddArg(p) 22530 v2.AddArg(mem) 22531 v1.AddArg(v2) 22532 v0.AddArg(v1) 22533 v0.AddArg(y) 22534 return true 22535 } 22536 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 22537 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22538 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 22539 for { 22540 x0 := v.Args[0] 22541 if x0.Op != OpAMD64MOVBloadidx1 { 22542 break 22543 } 22544 i0 := x0.AuxInt 22545 s := x0.Aux 22546 p := x0.Args[0] 22547 idx := x0.Args[1] 22548 mem := x0.Args[2] 22549 sh := v.Args[1] 22550 if sh.Op != OpAMD64SHLQconst { 22551 break 22552 } 22553 if sh.AuxInt != 8 { 22554 break 22555 } 22556 x1 := sh.Args[0] 22557 if x1.Op != OpAMD64MOVBloadidx1 { 22558 break 22559 } 22560 i1 := x1.AuxInt 22561 if x1.Aux != s { 22562 break 22563 } 22564 if p != x1.Args[0] { 22565 break 22566 } 22567 if idx != x1.Args[1] { 22568 break 22569 } 22570 if mem != x1.Args[2] { 22571 break 22572 } 22573 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22574 break 22575 } 22576 b = mergePoint(b, x0, x1) 22577 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 22578 v.reset(OpCopy) 22579 v.AddArg(v0) 22580 v0.AuxInt = i0 22581 v0.Aux = s 22582 v0.AddArg(p) 22583 v0.AddArg(idx) 22584 v0.AddArg(mem) 22585 return true 22586 } 22587 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 22588 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22589 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 22590 for { 22591 x0 := v.Args[0] 22592 if x0.Op != OpAMD64MOVBloadidx1 { 22593 break 22594 } 22595 i0 := x0.AuxInt 22596 s := x0.Aux 22597 idx := x0.Args[0] 22598 p := x0.Args[1] 22599 mem := x0.Args[2] 22600 sh := v.Args[1] 22601 if sh.Op != OpAMD64SHLQconst { 22602 break 22603 } 22604 if sh.AuxInt != 8 { 22605 break 22606 } 22607 x1 := sh.Args[0] 22608 if x1.Op != OpAMD64MOVBloadidx1 { 22609 break 22610 } 22611 i1 := x1.AuxInt 22612 if x1.Aux != s { 22613 break 22614 } 22615 if p != x1.Args[0] { 22616 break 22617 } 22618 if idx != x1.Args[1] { 22619 break 22620 } 22621 if mem != x1.Args[2] { 22622 break 22623 } 22624 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22625 break 22626 } 22627 b = mergePoint(b, x0, x1) 22628 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 22629 v.reset(OpCopy) 22630 v.AddArg(v0) 22631 v0.AuxInt = i0 22632 v0.Aux = s 22633 v0.AddArg(p) 22634 v0.AddArg(idx) 22635 v0.AddArg(mem) 22636 return true 22637 } 22638 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 22639 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22640 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 22641 for { 22642 x0 := v.Args[0] 22643 if x0.Op != OpAMD64MOVBloadidx1 { 22644 break 22645 } 22646 i0 := x0.AuxInt 22647 s := x0.Aux 22648 p := x0.Args[0] 22649 idx := x0.Args[1] 22650 mem := x0.Args[2] 22651 sh := v.Args[1] 22652 if sh.Op != OpAMD64SHLQconst { 22653 break 22654 } 22655 if sh.AuxInt != 8 { 22656 break 22657 } 22658 x1 := sh.Args[0] 22659 if x1.Op != OpAMD64MOVBloadidx1 { 22660 break 22661 } 22662 i1 := x1.AuxInt 22663 if x1.Aux != s { 22664 break 22665 } 22666 if idx != x1.Args[0] { 22667 break 22668 } 22669 if p != x1.Args[1] { 22670 break 22671 } 22672 if mem != x1.Args[2] { 22673 break 22674 } 22675 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22676 break 22677 } 22678 b = mergePoint(b, x0, x1) 22679 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 22680 v.reset(OpCopy) 22681 v.AddArg(v0) 22682 v0.AuxInt = i0 22683 v0.Aux = s 22684 v0.AddArg(p) 22685 v0.AddArg(idx) 22686 v0.AddArg(mem) 22687 return true 22688 } 22689 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 22690 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22691 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 22692 for { 22693 x0 := v.Args[0] 22694 if x0.Op != OpAMD64MOVBloadidx1 { 22695 break 22696 } 22697 i0 := x0.AuxInt 22698 s := x0.Aux 22699 idx := x0.Args[0] 22700 p := x0.Args[1] 22701 mem := x0.Args[2] 22702 sh := v.Args[1] 22703 if sh.Op != OpAMD64SHLQconst { 22704 break 22705 } 22706 if sh.AuxInt != 8 { 22707 break 22708 } 22709 x1 := sh.Args[0] 22710 if x1.Op != OpAMD64MOVBloadidx1 { 22711 break 22712 } 22713 i1 := x1.AuxInt 22714 if x1.Aux != s { 22715 break 22716 } 22717 if idx != x1.Args[0] { 22718 break 22719 } 22720 if p != x1.Args[1] { 22721 break 22722 } 22723 if mem != x1.Args[2] { 22724 break 22725 } 22726 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22727 break 22728 } 22729 b = mergePoint(b, x0, x1) 22730 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 22731 v.reset(OpCopy) 22732 v.AddArg(v0) 22733 v0.AuxInt = i0 22734 v0.Aux = s 22735 v0.AddArg(p) 22736 v0.AddArg(idx) 22737 v0.AddArg(mem) 22738 return true 22739 } 22740 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 22741 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22742 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 22743 for { 22744 sh := v.Args[0] 22745 if sh.Op != OpAMD64SHLQconst { 22746 break 22747 } 22748 if sh.AuxInt != 8 { 22749 break 22750 } 22751 x1 := sh.Args[0] 22752 if x1.Op != OpAMD64MOVBloadidx1 { 22753 break 22754 } 22755 i1 := x1.AuxInt 22756 s := x1.Aux 22757 p := x1.Args[0] 22758 idx := x1.Args[1] 22759 mem := x1.Args[2] 22760 x0 := v.Args[1] 22761 if x0.Op != OpAMD64MOVBloadidx1 { 22762 break 22763 } 22764 i0 := x0.AuxInt 22765 if x0.Aux != s { 22766 break 22767 } 22768 if p != x0.Args[0] { 22769 break 22770 } 22771 if idx != x0.Args[1] { 22772 break 22773 } 22774 if mem != x0.Args[2] { 22775 break 22776 } 22777 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22778 break 22779 } 22780 b = mergePoint(b, x0, x1) 22781 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 22782 v.reset(OpCopy) 22783 v.AddArg(v0) 22784 v0.AuxInt = i0 22785 v0.Aux = s 22786 v0.AddArg(p) 22787 v0.AddArg(idx) 22788 v0.AddArg(mem) 22789 return true 22790 } 22791 return false 22792 } 22793 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 22794 b := v.Block 22795 _ = b 22796 types := &b.Func.Config.Types 22797 _ = types 22798 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 22799 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22800 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 22801 for { 22802 sh := v.Args[0] 22803 if sh.Op != OpAMD64SHLQconst { 22804 break 22805 } 22806 if sh.AuxInt != 8 { 22807 break 22808 } 22809 x1 := sh.Args[0] 22810 if x1.Op != OpAMD64MOVBloadidx1 { 22811 break 22812 } 22813 i1 := x1.AuxInt 22814 s := x1.Aux 22815 idx := x1.Args[0] 22816 p := x1.Args[1] 22817 mem := x1.Args[2] 22818 x0 := v.Args[1] 22819 if x0.Op != OpAMD64MOVBloadidx1 { 22820 break 22821 } 22822 i0 := x0.AuxInt 22823 if x0.Aux != s { 22824 break 22825 } 22826 if p != x0.Args[0] { 22827 break 22828 } 22829 if idx != x0.Args[1] { 22830 break 22831 } 22832 if mem != x0.Args[2] { 22833 break 22834 } 22835 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22836 break 22837 } 22838 b = mergePoint(b, x0, x1) 22839 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 22840 v.reset(OpCopy) 22841 v.AddArg(v0) 22842 v0.AuxInt = i0 22843 v0.Aux = s 22844 v0.AddArg(p) 22845 v0.AddArg(idx) 22846 v0.AddArg(mem) 22847 return true 22848 } 22849 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 22850 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22851 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 22852 for { 22853 sh := v.Args[0] 22854 if sh.Op != OpAMD64SHLQconst { 22855 break 22856 } 22857 if sh.AuxInt != 8 { 22858 break 22859 } 22860 x1 := sh.Args[0] 22861 if x1.Op != OpAMD64MOVBloadidx1 { 22862 break 22863 } 22864 i1 := x1.AuxInt 22865 s := x1.Aux 22866 p := x1.Args[0] 22867 idx := x1.Args[1] 22868 mem := x1.Args[2] 22869 x0 := v.Args[1] 22870 if x0.Op != OpAMD64MOVBloadidx1 { 22871 break 22872 } 22873 i0 := x0.AuxInt 22874 if x0.Aux != s { 22875 break 22876 } 22877 if idx != x0.Args[0] { 22878 break 22879 } 22880 if p != x0.Args[1] { 22881 break 22882 } 22883 if mem != x0.Args[2] { 22884 break 22885 } 22886 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22887 break 22888 } 22889 b = mergePoint(b, x0, x1) 22890 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 22891 v.reset(OpCopy) 22892 v.AddArg(v0) 22893 v0.AuxInt = i0 22894 v0.Aux = s 22895 v0.AddArg(p) 22896 v0.AddArg(idx) 22897 v0.AddArg(mem) 22898 return true 22899 } 22900 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 22901 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22902 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 22903 for { 22904 sh := v.Args[0] 22905 if sh.Op != OpAMD64SHLQconst { 22906 break 22907 } 22908 if sh.AuxInt != 8 { 22909 break 22910 } 22911 x1 := sh.Args[0] 22912 if x1.Op != OpAMD64MOVBloadidx1 { 22913 break 22914 } 22915 i1 := x1.AuxInt 22916 s := x1.Aux 22917 idx := x1.Args[0] 22918 p := x1.Args[1] 22919 mem := x1.Args[2] 22920 x0 := v.Args[1] 22921 if x0.Op != OpAMD64MOVBloadidx1 { 22922 break 22923 } 22924 i0 := x0.AuxInt 22925 if x0.Aux != s { 22926 break 22927 } 22928 if idx != x0.Args[0] { 22929 break 22930 } 22931 if p != x0.Args[1] { 22932 break 22933 } 22934 if mem != x0.Args[2] { 22935 break 22936 } 22937 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22938 break 22939 } 22940 b = mergePoint(b, x0, x1) 22941 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 22942 v.reset(OpCopy) 22943 v.AddArg(v0) 22944 v0.AuxInt = i0 22945 v0.Aux = s 22946 v0.AddArg(p) 22947 v0.AddArg(idx) 22948 v0.AddArg(mem) 22949 return true 22950 } 22951 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 22952 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22953 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 22954 for { 22955 x0 := v.Args[0] 22956 if x0.Op != OpAMD64MOVWloadidx1 { 22957 break 22958 } 22959 i0 := x0.AuxInt 22960 s := x0.Aux 22961 p := x0.Args[0] 22962 idx := x0.Args[1] 22963 mem := x0.Args[2] 22964 sh := v.Args[1] 22965 if sh.Op != OpAMD64SHLQconst { 22966 break 22967 } 22968 if sh.AuxInt != 16 { 22969 break 22970 } 22971 x1 := sh.Args[0] 22972 if x1.Op != OpAMD64MOVWloadidx1 { 22973 break 22974 } 22975 i1 := x1.AuxInt 22976 if x1.Aux != s { 22977 break 22978 } 22979 if p != x1.Args[0] { 22980 break 22981 } 22982 if idx != x1.Args[1] { 22983 break 22984 } 22985 if mem != x1.Args[2] { 22986 break 22987 } 22988 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22989 break 22990 } 22991 b = mergePoint(b, x0, x1) 22992 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 22993 v.reset(OpCopy) 22994 v.AddArg(v0) 22995 v0.AuxInt = i0 22996 v0.Aux = s 22997 v0.AddArg(p) 22998 v0.AddArg(idx) 22999 v0.AddArg(mem) 23000 return true 23001 } 23002 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 23003 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23004 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 23005 for { 23006 x0 := v.Args[0] 23007 if x0.Op != OpAMD64MOVWloadidx1 { 23008 break 23009 } 23010 i0 := x0.AuxInt 23011 s := x0.Aux 23012 idx := x0.Args[0] 23013 p := x0.Args[1] 23014 mem := x0.Args[2] 23015 sh := v.Args[1] 23016 if sh.Op != OpAMD64SHLQconst { 23017 break 23018 } 23019 if sh.AuxInt != 16 { 23020 break 23021 } 23022 x1 := sh.Args[0] 23023 if x1.Op != OpAMD64MOVWloadidx1 { 23024 break 23025 } 23026 i1 := x1.AuxInt 23027 if x1.Aux != s { 23028 break 23029 } 23030 if p != x1.Args[0] { 23031 break 23032 } 23033 if idx != x1.Args[1] { 23034 break 23035 } 23036 if mem != x1.Args[2] { 23037 break 23038 } 23039 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23040 break 23041 } 23042 b = mergePoint(b, x0, x1) 23043 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23044 v.reset(OpCopy) 23045 v.AddArg(v0) 23046 v0.AuxInt = i0 23047 v0.Aux = s 23048 v0.AddArg(p) 23049 v0.AddArg(idx) 23050 v0.AddArg(mem) 23051 return true 23052 } 23053 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 23054 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23055 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 23056 for { 23057 x0 := v.Args[0] 23058 if x0.Op != OpAMD64MOVWloadidx1 { 23059 break 23060 } 23061 i0 := x0.AuxInt 23062 s := x0.Aux 23063 p := x0.Args[0] 23064 idx := x0.Args[1] 23065 mem := x0.Args[2] 23066 sh := v.Args[1] 23067 if sh.Op != OpAMD64SHLQconst { 23068 break 23069 } 23070 if sh.AuxInt != 16 { 23071 break 23072 } 23073 x1 := sh.Args[0] 23074 if x1.Op != OpAMD64MOVWloadidx1 { 23075 break 23076 } 23077 i1 := x1.AuxInt 23078 if x1.Aux != s { 23079 break 23080 } 23081 if idx != x1.Args[0] { 23082 break 23083 } 23084 if p != x1.Args[1] { 23085 break 23086 } 23087 if mem != x1.Args[2] { 23088 break 23089 } 23090 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23091 break 23092 } 23093 b = mergePoint(b, x0, x1) 23094 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23095 v.reset(OpCopy) 23096 v.AddArg(v0) 23097 v0.AuxInt = i0 23098 v0.Aux = s 23099 v0.AddArg(p) 23100 v0.AddArg(idx) 23101 v0.AddArg(mem) 23102 return true 23103 } 23104 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 23105 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23106 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 23107 for { 23108 x0 := v.Args[0] 23109 if x0.Op != OpAMD64MOVWloadidx1 { 23110 break 23111 } 23112 i0 := x0.AuxInt 23113 s := x0.Aux 23114 idx := x0.Args[0] 23115 p := x0.Args[1] 23116 mem := x0.Args[2] 23117 sh := v.Args[1] 23118 if sh.Op != OpAMD64SHLQconst { 23119 break 23120 } 23121 if sh.AuxInt != 16 { 23122 break 23123 } 23124 x1 := sh.Args[0] 23125 if x1.Op != OpAMD64MOVWloadidx1 { 23126 break 23127 } 23128 i1 := x1.AuxInt 23129 if x1.Aux != s { 23130 break 23131 } 23132 if idx != x1.Args[0] { 23133 break 23134 } 23135 if p != x1.Args[1] { 23136 break 23137 } 23138 if mem != x1.Args[2] { 23139 break 23140 } 23141 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23142 break 23143 } 23144 b = mergePoint(b, x0, x1) 23145 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23146 v.reset(OpCopy) 23147 v.AddArg(v0) 23148 v0.AuxInt = i0 23149 v0.Aux = s 23150 v0.AddArg(p) 23151 v0.AddArg(idx) 23152 v0.AddArg(mem) 23153 return true 23154 } 23155 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 23156 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23157 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 23158 for { 23159 sh := v.Args[0] 23160 if sh.Op != OpAMD64SHLQconst { 23161 break 23162 } 23163 if sh.AuxInt != 16 { 23164 break 23165 } 23166 x1 := sh.Args[0] 23167 if x1.Op != OpAMD64MOVWloadidx1 { 23168 break 23169 } 23170 i1 := x1.AuxInt 23171 s := x1.Aux 23172 p := x1.Args[0] 23173 idx := x1.Args[1] 23174 mem := x1.Args[2] 23175 x0 := v.Args[1] 23176 if x0.Op != OpAMD64MOVWloadidx1 { 23177 break 23178 } 23179 i0 := x0.AuxInt 23180 if x0.Aux != s { 23181 break 23182 } 23183 if p != x0.Args[0] { 23184 break 23185 } 23186 if idx != x0.Args[1] { 23187 break 23188 } 23189 if mem != x0.Args[2] { 23190 break 23191 } 23192 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23193 break 23194 } 23195 b = mergePoint(b, x0, x1) 23196 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23197 v.reset(OpCopy) 23198 v.AddArg(v0) 23199 v0.AuxInt = i0 23200 v0.Aux = s 23201 v0.AddArg(p) 23202 v0.AddArg(idx) 23203 v0.AddArg(mem) 23204 return true 23205 } 23206 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 23207 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23208 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 23209 for { 23210 sh := v.Args[0] 23211 if sh.Op != OpAMD64SHLQconst { 23212 break 23213 } 23214 if sh.AuxInt != 16 { 23215 break 23216 } 23217 x1 := sh.Args[0] 23218 if x1.Op != OpAMD64MOVWloadidx1 { 23219 break 23220 } 23221 i1 := x1.AuxInt 23222 s := x1.Aux 23223 idx := x1.Args[0] 23224 p := x1.Args[1] 23225 mem := x1.Args[2] 23226 x0 := v.Args[1] 23227 if x0.Op != OpAMD64MOVWloadidx1 { 23228 break 23229 } 23230 i0 := x0.AuxInt 23231 if x0.Aux != s { 23232 break 23233 } 23234 if p != x0.Args[0] { 23235 break 23236 } 23237 if idx != x0.Args[1] { 23238 break 23239 } 23240 if mem != x0.Args[2] { 23241 break 23242 } 23243 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23244 break 23245 } 23246 b = mergePoint(b, x0, x1) 23247 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23248 v.reset(OpCopy) 23249 v.AddArg(v0) 23250 v0.AuxInt = i0 23251 v0.Aux = s 23252 v0.AddArg(p) 23253 v0.AddArg(idx) 23254 v0.AddArg(mem) 23255 return true 23256 } 23257 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 23258 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23259 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 23260 for { 23261 sh := v.Args[0] 23262 if sh.Op != OpAMD64SHLQconst { 23263 break 23264 } 23265 if sh.AuxInt != 16 { 23266 break 23267 } 23268 x1 := sh.Args[0] 23269 if x1.Op != OpAMD64MOVWloadidx1 { 23270 break 23271 } 23272 i1 := x1.AuxInt 23273 s := x1.Aux 23274 p := x1.Args[0] 23275 idx := x1.Args[1] 23276 mem := x1.Args[2] 23277 x0 := v.Args[1] 23278 if x0.Op != OpAMD64MOVWloadidx1 { 23279 break 23280 } 23281 i0 := x0.AuxInt 23282 if x0.Aux != s { 23283 break 23284 } 23285 if idx != x0.Args[0] { 23286 break 23287 } 23288 if p != x0.Args[1] { 23289 break 23290 } 23291 if mem != x0.Args[2] { 23292 break 23293 } 23294 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23295 break 23296 } 23297 b = mergePoint(b, x0, x1) 23298 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23299 v.reset(OpCopy) 23300 v.AddArg(v0) 23301 v0.AuxInt = i0 23302 v0.Aux = s 23303 v0.AddArg(p) 23304 v0.AddArg(idx) 23305 v0.AddArg(mem) 23306 return true 23307 } 23308 return false 23309 } 23310 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 23311 b := v.Block 23312 _ = b 23313 types := &b.Func.Config.Types 23314 _ = types 23315 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 23316 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23317 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 23318 for { 23319 sh := v.Args[0] 23320 if sh.Op != OpAMD64SHLQconst { 23321 break 23322 } 23323 if sh.AuxInt != 16 { 23324 break 23325 } 23326 x1 := sh.Args[0] 23327 if x1.Op != OpAMD64MOVWloadidx1 { 23328 break 23329 } 23330 i1 := x1.AuxInt 23331 s := x1.Aux 23332 idx := x1.Args[0] 23333 p := x1.Args[1] 23334 mem := x1.Args[2] 23335 x0 := v.Args[1] 23336 if x0.Op != OpAMD64MOVWloadidx1 { 23337 break 23338 } 23339 i0 := x0.AuxInt 23340 if x0.Aux != s { 23341 break 23342 } 23343 if idx != x0.Args[0] { 23344 break 23345 } 23346 if p != x0.Args[1] { 23347 break 23348 } 23349 if mem != x0.Args[2] { 23350 break 23351 } 23352 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23353 break 23354 } 23355 b = mergePoint(b, x0, x1) 23356 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 23357 v.reset(OpCopy) 23358 v.AddArg(v0) 23359 v0.AuxInt = i0 23360 v0.Aux = s 23361 v0.AddArg(p) 23362 v0.AddArg(idx) 23363 v0.AddArg(mem) 23364 return true 23365 } 23366 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 23367 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23368 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 23369 for { 23370 x0 := v.Args[0] 23371 if x0.Op != OpAMD64MOVLloadidx1 { 23372 break 23373 } 23374 i0 := x0.AuxInt 23375 s := x0.Aux 23376 p := x0.Args[0] 23377 idx := x0.Args[1] 23378 mem := x0.Args[2] 23379 sh := v.Args[1] 23380 if sh.Op != OpAMD64SHLQconst { 23381 break 23382 } 23383 if sh.AuxInt != 32 { 23384 break 23385 } 23386 x1 := sh.Args[0] 23387 if x1.Op != OpAMD64MOVLloadidx1 { 23388 break 23389 } 23390 i1 := x1.AuxInt 23391 if x1.Aux != s { 23392 break 23393 } 23394 if p != x1.Args[0] { 23395 break 23396 } 23397 if idx != x1.Args[1] { 23398 break 23399 } 23400 if mem != x1.Args[2] { 23401 break 23402 } 23403 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23404 break 23405 } 23406 b = mergePoint(b, x0, x1) 23407 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23408 v.reset(OpCopy) 23409 v.AddArg(v0) 23410 v0.AuxInt = i0 23411 v0.Aux = s 23412 v0.AddArg(p) 23413 v0.AddArg(idx) 23414 v0.AddArg(mem) 23415 return true 23416 } 23417 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 23418 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23419 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 23420 for { 23421 x0 := v.Args[0] 23422 if x0.Op != OpAMD64MOVLloadidx1 { 23423 break 23424 } 23425 i0 := x0.AuxInt 23426 s := x0.Aux 23427 idx := x0.Args[0] 23428 p := x0.Args[1] 23429 mem := x0.Args[2] 23430 sh := v.Args[1] 23431 if sh.Op != OpAMD64SHLQconst { 23432 break 23433 } 23434 if sh.AuxInt != 32 { 23435 break 23436 } 23437 x1 := sh.Args[0] 23438 if x1.Op != OpAMD64MOVLloadidx1 { 23439 break 23440 } 23441 i1 := x1.AuxInt 23442 if x1.Aux != s { 23443 break 23444 } 23445 if p != x1.Args[0] { 23446 break 23447 } 23448 if idx != x1.Args[1] { 23449 break 23450 } 23451 if mem != x1.Args[2] { 23452 break 23453 } 23454 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23455 break 23456 } 23457 b = mergePoint(b, x0, x1) 23458 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23459 v.reset(OpCopy) 23460 v.AddArg(v0) 23461 v0.AuxInt = i0 23462 v0.Aux = s 23463 v0.AddArg(p) 23464 v0.AddArg(idx) 23465 v0.AddArg(mem) 23466 return true 23467 } 23468 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 23469 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23470 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 23471 for { 23472 x0 := v.Args[0] 23473 if x0.Op != OpAMD64MOVLloadidx1 { 23474 break 23475 } 23476 i0 := x0.AuxInt 23477 s := x0.Aux 23478 p := x0.Args[0] 23479 idx := x0.Args[1] 23480 mem := x0.Args[2] 23481 sh := v.Args[1] 23482 if sh.Op != OpAMD64SHLQconst { 23483 break 23484 } 23485 if sh.AuxInt != 32 { 23486 break 23487 } 23488 x1 := sh.Args[0] 23489 if x1.Op != OpAMD64MOVLloadidx1 { 23490 break 23491 } 23492 i1 := x1.AuxInt 23493 if x1.Aux != s { 23494 break 23495 } 23496 if idx != x1.Args[0] { 23497 break 23498 } 23499 if p != x1.Args[1] { 23500 break 23501 } 23502 if mem != x1.Args[2] { 23503 break 23504 } 23505 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23506 break 23507 } 23508 b = mergePoint(b, x0, x1) 23509 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23510 v.reset(OpCopy) 23511 v.AddArg(v0) 23512 v0.AuxInt = i0 23513 v0.Aux = s 23514 v0.AddArg(p) 23515 v0.AddArg(idx) 23516 v0.AddArg(mem) 23517 return true 23518 } 23519 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 23520 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23521 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 23522 for { 23523 x0 := v.Args[0] 23524 if x0.Op != OpAMD64MOVLloadidx1 { 23525 break 23526 } 23527 i0 := x0.AuxInt 23528 s := x0.Aux 23529 idx := x0.Args[0] 23530 p := x0.Args[1] 23531 mem := x0.Args[2] 23532 sh := v.Args[1] 23533 if sh.Op != OpAMD64SHLQconst { 23534 break 23535 } 23536 if sh.AuxInt != 32 { 23537 break 23538 } 23539 x1 := sh.Args[0] 23540 if x1.Op != OpAMD64MOVLloadidx1 { 23541 break 23542 } 23543 i1 := x1.AuxInt 23544 if x1.Aux != s { 23545 break 23546 } 23547 if idx != x1.Args[0] { 23548 break 23549 } 23550 if p != x1.Args[1] { 23551 break 23552 } 23553 if mem != x1.Args[2] { 23554 break 23555 } 23556 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23557 break 23558 } 23559 b = mergePoint(b, x0, x1) 23560 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23561 v.reset(OpCopy) 23562 v.AddArg(v0) 23563 v0.AuxInt = i0 23564 v0.Aux = s 23565 v0.AddArg(p) 23566 v0.AddArg(idx) 23567 v0.AddArg(mem) 23568 return true 23569 } 23570 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 23571 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23572 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 23573 for { 23574 sh := v.Args[0] 23575 if sh.Op != OpAMD64SHLQconst { 23576 break 23577 } 23578 if sh.AuxInt != 32 { 23579 break 23580 } 23581 x1 := sh.Args[0] 23582 if x1.Op != OpAMD64MOVLloadidx1 { 23583 break 23584 } 23585 i1 := x1.AuxInt 23586 s := x1.Aux 23587 p := x1.Args[0] 23588 idx := x1.Args[1] 23589 mem := x1.Args[2] 23590 x0 := v.Args[1] 23591 if x0.Op != OpAMD64MOVLloadidx1 { 23592 break 23593 } 23594 i0 := x0.AuxInt 23595 if x0.Aux != s { 23596 break 23597 } 23598 if p != x0.Args[0] { 23599 break 23600 } 23601 if idx != x0.Args[1] { 23602 break 23603 } 23604 if mem != x0.Args[2] { 23605 break 23606 } 23607 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23608 break 23609 } 23610 b = mergePoint(b, x0, x1) 23611 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23612 v.reset(OpCopy) 23613 v.AddArg(v0) 23614 v0.AuxInt = i0 23615 v0.Aux = s 23616 v0.AddArg(p) 23617 v0.AddArg(idx) 23618 v0.AddArg(mem) 23619 return true 23620 } 23621 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 23622 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23623 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 23624 for { 23625 sh := v.Args[0] 23626 if sh.Op != OpAMD64SHLQconst { 23627 break 23628 } 23629 if sh.AuxInt != 32 { 23630 break 23631 } 23632 x1 := sh.Args[0] 23633 if x1.Op != OpAMD64MOVLloadidx1 { 23634 break 23635 } 23636 i1 := x1.AuxInt 23637 s := x1.Aux 23638 idx := x1.Args[0] 23639 p := x1.Args[1] 23640 mem := x1.Args[2] 23641 x0 := v.Args[1] 23642 if x0.Op != OpAMD64MOVLloadidx1 { 23643 break 23644 } 23645 i0 := x0.AuxInt 23646 if x0.Aux != s { 23647 break 23648 } 23649 if p != x0.Args[0] { 23650 break 23651 } 23652 if idx != x0.Args[1] { 23653 break 23654 } 23655 if mem != x0.Args[2] { 23656 break 23657 } 23658 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23659 break 23660 } 23661 b = mergePoint(b, x0, x1) 23662 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23663 v.reset(OpCopy) 23664 v.AddArg(v0) 23665 v0.AuxInt = i0 23666 v0.Aux = s 23667 v0.AddArg(p) 23668 v0.AddArg(idx) 23669 v0.AddArg(mem) 23670 return true 23671 } 23672 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 23673 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23674 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 23675 for { 23676 sh := v.Args[0] 23677 if sh.Op != OpAMD64SHLQconst { 23678 break 23679 } 23680 if sh.AuxInt != 32 { 23681 break 23682 } 23683 x1 := sh.Args[0] 23684 if x1.Op != OpAMD64MOVLloadidx1 { 23685 break 23686 } 23687 i1 := x1.AuxInt 23688 s := x1.Aux 23689 p := x1.Args[0] 23690 idx := x1.Args[1] 23691 mem := x1.Args[2] 23692 x0 := v.Args[1] 23693 if x0.Op != OpAMD64MOVLloadidx1 { 23694 break 23695 } 23696 i0 := x0.AuxInt 23697 if x0.Aux != s { 23698 break 23699 } 23700 if idx != x0.Args[0] { 23701 break 23702 } 23703 if p != x0.Args[1] { 23704 break 23705 } 23706 if mem != x0.Args[2] { 23707 break 23708 } 23709 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23710 break 23711 } 23712 b = mergePoint(b, x0, x1) 23713 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23714 v.reset(OpCopy) 23715 v.AddArg(v0) 23716 v0.AuxInt = i0 23717 v0.Aux = s 23718 v0.AddArg(p) 23719 v0.AddArg(idx) 23720 v0.AddArg(mem) 23721 return true 23722 } 23723 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 23724 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23725 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 23726 for { 23727 sh := v.Args[0] 23728 if sh.Op != OpAMD64SHLQconst { 23729 break 23730 } 23731 if sh.AuxInt != 32 { 23732 break 23733 } 23734 x1 := sh.Args[0] 23735 if x1.Op != OpAMD64MOVLloadidx1 { 23736 break 23737 } 23738 i1 := x1.AuxInt 23739 s := x1.Aux 23740 idx := x1.Args[0] 23741 p := x1.Args[1] 23742 mem := x1.Args[2] 23743 x0 := v.Args[1] 23744 if x0.Op != OpAMD64MOVLloadidx1 { 23745 break 23746 } 23747 i0 := x0.AuxInt 23748 if x0.Aux != s { 23749 break 23750 } 23751 if idx != x0.Args[0] { 23752 break 23753 } 23754 if p != x0.Args[1] { 23755 break 23756 } 23757 if mem != x0.Args[2] { 23758 break 23759 } 23760 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23761 break 23762 } 23763 b = mergePoint(b, x0, x1) 23764 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 23765 v.reset(OpCopy) 23766 v.AddArg(v0) 23767 v0.AuxInt = i0 23768 v0.Aux = s 23769 v0.AddArg(p) 23770 v0.AddArg(idx) 23771 v0.AddArg(mem) 23772 return true 23773 } 23774 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 23775 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23776 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 23777 for { 23778 s1 := v.Args[0] 23779 if s1.Op != OpAMD64SHLQconst { 23780 break 23781 } 23782 j1 := s1.AuxInt 23783 x1 := s1.Args[0] 23784 if x1.Op != OpAMD64MOVBloadidx1 { 23785 break 23786 } 23787 i1 := x1.AuxInt 23788 s := x1.Aux 23789 p := x1.Args[0] 23790 idx := x1.Args[1] 23791 mem := x1.Args[2] 23792 or := v.Args[1] 23793 if or.Op != OpAMD64ORQ { 23794 break 23795 } 23796 s0 := or.Args[0] 23797 if s0.Op != OpAMD64SHLQconst { 23798 break 23799 } 23800 j0 := s0.AuxInt 23801 x0 := s0.Args[0] 23802 if x0.Op != OpAMD64MOVBloadidx1 { 23803 break 23804 } 23805 i0 := x0.AuxInt 23806 if x0.Aux != s { 23807 break 23808 } 23809 if p != x0.Args[0] { 23810 break 23811 } 23812 if idx != x0.Args[1] { 23813 break 23814 } 23815 if mem != x0.Args[2] { 23816 break 23817 } 23818 y := or.Args[1] 23819 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23820 break 23821 } 23822 b = mergePoint(b, x0, x1) 23823 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23824 v.reset(OpCopy) 23825 v.AddArg(v0) 23826 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23827 v1.AuxInt = j0 23828 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 23829 v2.AuxInt = i0 23830 v2.Aux = s 23831 v2.AddArg(p) 23832 v2.AddArg(idx) 23833 v2.AddArg(mem) 23834 v1.AddArg(v2) 23835 v0.AddArg(v1) 23836 v0.AddArg(y) 23837 return true 23838 } 23839 return false 23840 } 23841 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 23842 b := v.Block 23843 _ = b 23844 types := &b.Func.Config.Types 23845 _ = types 23846 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 23847 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23848 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 23849 for { 23850 s1 := v.Args[0] 23851 if s1.Op != OpAMD64SHLQconst { 23852 break 23853 } 23854 j1 := s1.AuxInt 23855 x1 := s1.Args[0] 23856 if x1.Op != OpAMD64MOVBloadidx1 { 23857 break 23858 } 23859 i1 := x1.AuxInt 23860 s := x1.Aux 23861 idx := x1.Args[0] 23862 p := x1.Args[1] 23863 mem := x1.Args[2] 23864 or := v.Args[1] 23865 if or.Op != OpAMD64ORQ { 23866 break 23867 } 23868 s0 := or.Args[0] 23869 if s0.Op != OpAMD64SHLQconst { 23870 break 23871 } 23872 j0 := s0.AuxInt 23873 x0 := s0.Args[0] 23874 if x0.Op != OpAMD64MOVBloadidx1 { 23875 break 23876 } 23877 i0 := x0.AuxInt 23878 if x0.Aux != s { 23879 break 23880 } 23881 if p != x0.Args[0] { 23882 break 23883 } 23884 if idx != x0.Args[1] { 23885 break 23886 } 23887 if mem != x0.Args[2] { 23888 break 23889 } 23890 y := or.Args[1] 23891 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23892 break 23893 } 23894 b = mergePoint(b, x0, x1) 23895 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23896 v.reset(OpCopy) 23897 v.AddArg(v0) 23898 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23899 v1.AuxInt = j0 23900 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 23901 v2.AuxInt = i0 23902 v2.Aux = s 23903 v2.AddArg(p) 23904 v2.AddArg(idx) 23905 v2.AddArg(mem) 23906 v1.AddArg(v2) 23907 v0.AddArg(v1) 23908 v0.AddArg(y) 23909 return true 23910 } 23911 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 23912 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23913 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 23914 for { 23915 s1 := v.Args[0] 23916 if s1.Op != OpAMD64SHLQconst { 23917 break 23918 } 23919 j1 := s1.AuxInt 23920 x1 := s1.Args[0] 23921 if x1.Op != OpAMD64MOVBloadidx1 { 23922 break 23923 } 23924 i1 := x1.AuxInt 23925 s := x1.Aux 23926 p := x1.Args[0] 23927 idx := x1.Args[1] 23928 mem := x1.Args[2] 23929 or := v.Args[1] 23930 if or.Op != OpAMD64ORQ { 23931 break 23932 } 23933 s0 := or.Args[0] 23934 if s0.Op != OpAMD64SHLQconst { 23935 break 23936 } 23937 j0 := s0.AuxInt 23938 x0 := s0.Args[0] 23939 if x0.Op != OpAMD64MOVBloadidx1 { 23940 break 23941 } 23942 i0 := x0.AuxInt 23943 if x0.Aux != s { 23944 break 23945 } 23946 if idx != x0.Args[0] { 23947 break 23948 } 23949 if p != x0.Args[1] { 23950 break 23951 } 23952 if mem != x0.Args[2] { 23953 break 23954 } 23955 y := or.Args[1] 23956 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23957 break 23958 } 23959 b = mergePoint(b, x0, x1) 23960 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23961 v.reset(OpCopy) 23962 v.AddArg(v0) 23963 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23964 v1.AuxInt = j0 23965 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 23966 v2.AuxInt = i0 23967 v2.Aux = s 23968 v2.AddArg(p) 23969 v2.AddArg(idx) 23970 v2.AddArg(mem) 23971 v1.AddArg(v2) 23972 v0.AddArg(v1) 23973 v0.AddArg(y) 23974 return true 23975 } 23976 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 23977 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23978 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 23979 for { 23980 s1 := v.Args[0] 23981 if s1.Op != OpAMD64SHLQconst { 23982 break 23983 } 23984 j1 := s1.AuxInt 23985 x1 := s1.Args[0] 23986 if x1.Op != OpAMD64MOVBloadidx1 { 23987 break 23988 } 23989 i1 := x1.AuxInt 23990 s := x1.Aux 23991 idx := x1.Args[0] 23992 p := x1.Args[1] 23993 mem := x1.Args[2] 23994 or := v.Args[1] 23995 if or.Op != OpAMD64ORQ { 23996 break 23997 } 23998 s0 := or.Args[0] 23999 if s0.Op != OpAMD64SHLQconst { 24000 break 24001 } 24002 j0 := s0.AuxInt 24003 x0 := s0.Args[0] 24004 if x0.Op != OpAMD64MOVBloadidx1 { 24005 break 24006 } 24007 i0 := x0.AuxInt 24008 if x0.Aux != s { 24009 break 24010 } 24011 if idx != x0.Args[0] { 24012 break 24013 } 24014 if p != x0.Args[1] { 24015 break 24016 } 24017 if mem != x0.Args[2] { 24018 break 24019 } 24020 y := or.Args[1] 24021 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24022 break 24023 } 24024 b = mergePoint(b, x0, x1) 24025 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24026 v.reset(OpCopy) 24027 v.AddArg(v0) 24028 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24029 v1.AuxInt = j0 24030 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24031 v2.AuxInt = i0 24032 v2.Aux = s 24033 v2.AddArg(p) 24034 v2.AddArg(idx) 24035 v2.AddArg(mem) 24036 v1.AddArg(v2) 24037 v0.AddArg(v1) 24038 v0.AddArg(y) 24039 return true 24040 } 24041 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 24042 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24043 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24044 for { 24045 s1 := v.Args[0] 24046 if s1.Op != OpAMD64SHLQconst { 24047 break 24048 } 24049 j1 := s1.AuxInt 24050 x1 := s1.Args[0] 24051 if x1.Op != OpAMD64MOVBloadidx1 { 24052 break 24053 } 24054 i1 := x1.AuxInt 24055 s := x1.Aux 24056 p := x1.Args[0] 24057 idx := x1.Args[1] 24058 mem := x1.Args[2] 24059 or := v.Args[1] 24060 if or.Op != OpAMD64ORQ { 24061 break 24062 } 24063 y := or.Args[0] 24064 s0 := or.Args[1] 24065 if s0.Op != OpAMD64SHLQconst { 24066 break 24067 } 24068 j0 := s0.AuxInt 24069 x0 := s0.Args[0] 24070 if x0.Op != OpAMD64MOVBloadidx1 { 24071 break 24072 } 24073 i0 := x0.AuxInt 24074 if x0.Aux != s { 24075 break 24076 } 24077 if p != x0.Args[0] { 24078 break 24079 } 24080 if idx != x0.Args[1] { 24081 break 24082 } 24083 if mem != x0.Args[2] { 24084 break 24085 } 24086 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24087 break 24088 } 24089 b = mergePoint(b, x0, x1) 24090 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24091 v.reset(OpCopy) 24092 v.AddArg(v0) 24093 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24094 v1.AuxInt = j0 24095 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24096 v2.AuxInt = i0 24097 v2.Aux = s 24098 v2.AddArg(p) 24099 v2.AddArg(idx) 24100 v2.AddArg(mem) 24101 v1.AddArg(v2) 24102 v0.AddArg(v1) 24103 v0.AddArg(y) 24104 return true 24105 } 24106 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 24107 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24108 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24109 for { 24110 s1 := v.Args[0] 24111 if s1.Op != OpAMD64SHLQconst { 24112 break 24113 } 24114 j1 := s1.AuxInt 24115 x1 := s1.Args[0] 24116 if x1.Op != OpAMD64MOVBloadidx1 { 24117 break 24118 } 24119 i1 := x1.AuxInt 24120 s := x1.Aux 24121 idx := x1.Args[0] 24122 p := x1.Args[1] 24123 mem := x1.Args[2] 24124 or := v.Args[1] 24125 if or.Op != OpAMD64ORQ { 24126 break 24127 } 24128 y := or.Args[0] 24129 s0 := or.Args[1] 24130 if s0.Op != OpAMD64SHLQconst { 24131 break 24132 } 24133 j0 := s0.AuxInt 24134 x0 := s0.Args[0] 24135 if x0.Op != OpAMD64MOVBloadidx1 { 24136 break 24137 } 24138 i0 := x0.AuxInt 24139 if x0.Aux != s { 24140 break 24141 } 24142 if p != x0.Args[0] { 24143 break 24144 } 24145 if idx != x0.Args[1] { 24146 break 24147 } 24148 if mem != x0.Args[2] { 24149 break 24150 } 24151 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24152 break 24153 } 24154 b = mergePoint(b, x0, x1) 24155 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24156 v.reset(OpCopy) 24157 v.AddArg(v0) 24158 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24159 v1.AuxInt = j0 24160 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24161 v2.AuxInt = i0 24162 v2.Aux = s 24163 v2.AddArg(p) 24164 v2.AddArg(idx) 24165 v2.AddArg(mem) 24166 v1.AddArg(v2) 24167 v0.AddArg(v1) 24168 v0.AddArg(y) 24169 return true 24170 } 24171 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 24172 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24173 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24174 for { 24175 s1 := v.Args[0] 24176 if s1.Op != OpAMD64SHLQconst { 24177 break 24178 } 24179 j1 := s1.AuxInt 24180 x1 := s1.Args[0] 24181 if x1.Op != OpAMD64MOVBloadidx1 { 24182 break 24183 } 24184 i1 := x1.AuxInt 24185 s := x1.Aux 24186 p := x1.Args[0] 24187 idx := x1.Args[1] 24188 mem := x1.Args[2] 24189 or := v.Args[1] 24190 if or.Op != OpAMD64ORQ { 24191 break 24192 } 24193 y := or.Args[0] 24194 s0 := or.Args[1] 24195 if s0.Op != OpAMD64SHLQconst { 24196 break 24197 } 24198 j0 := s0.AuxInt 24199 x0 := s0.Args[0] 24200 if x0.Op != OpAMD64MOVBloadidx1 { 24201 break 24202 } 24203 i0 := x0.AuxInt 24204 if x0.Aux != s { 24205 break 24206 } 24207 if idx != x0.Args[0] { 24208 break 24209 } 24210 if p != x0.Args[1] { 24211 break 24212 } 24213 if mem != x0.Args[2] { 24214 break 24215 } 24216 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24217 break 24218 } 24219 b = mergePoint(b, x0, x1) 24220 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24221 v.reset(OpCopy) 24222 v.AddArg(v0) 24223 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24224 v1.AuxInt = j0 24225 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24226 v2.AuxInt = i0 24227 v2.Aux = s 24228 v2.AddArg(p) 24229 v2.AddArg(idx) 24230 v2.AddArg(mem) 24231 v1.AddArg(v2) 24232 v0.AddArg(v1) 24233 v0.AddArg(y) 24234 return true 24235 } 24236 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 24237 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24238 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24239 for { 24240 s1 := v.Args[0] 24241 if s1.Op != OpAMD64SHLQconst { 24242 break 24243 } 24244 j1 := s1.AuxInt 24245 x1 := s1.Args[0] 24246 if x1.Op != OpAMD64MOVBloadidx1 { 24247 break 24248 } 24249 i1 := x1.AuxInt 24250 s := x1.Aux 24251 idx := x1.Args[0] 24252 p := x1.Args[1] 24253 mem := x1.Args[2] 24254 or := v.Args[1] 24255 if or.Op != OpAMD64ORQ { 24256 break 24257 } 24258 y := or.Args[0] 24259 s0 := or.Args[1] 24260 if s0.Op != OpAMD64SHLQconst { 24261 break 24262 } 24263 j0 := s0.AuxInt 24264 x0 := s0.Args[0] 24265 if x0.Op != OpAMD64MOVBloadidx1 { 24266 break 24267 } 24268 i0 := x0.AuxInt 24269 if x0.Aux != s { 24270 break 24271 } 24272 if idx != x0.Args[0] { 24273 break 24274 } 24275 if p != x0.Args[1] { 24276 break 24277 } 24278 if mem != x0.Args[2] { 24279 break 24280 } 24281 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24282 break 24283 } 24284 b = mergePoint(b, x0, x1) 24285 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24286 v.reset(OpCopy) 24287 v.AddArg(v0) 24288 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24289 v1.AuxInt = j0 24290 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24291 v2.AuxInt = i0 24292 v2.Aux = s 24293 v2.AddArg(p) 24294 v2.AddArg(idx) 24295 v2.AddArg(mem) 24296 v1.AddArg(v2) 24297 v0.AddArg(v1) 24298 v0.AddArg(y) 24299 return true 24300 } 24301 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 24302 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24303 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24304 for { 24305 or := v.Args[0] 24306 if or.Op != OpAMD64ORQ { 24307 break 24308 } 24309 s0 := or.Args[0] 24310 if s0.Op != OpAMD64SHLQconst { 24311 break 24312 } 24313 j0 := s0.AuxInt 24314 x0 := s0.Args[0] 24315 if x0.Op != OpAMD64MOVBloadidx1 { 24316 break 24317 } 24318 i0 := x0.AuxInt 24319 s := x0.Aux 24320 p := x0.Args[0] 24321 idx := x0.Args[1] 24322 mem := x0.Args[2] 24323 y := or.Args[1] 24324 s1 := v.Args[1] 24325 if s1.Op != OpAMD64SHLQconst { 24326 break 24327 } 24328 j1 := s1.AuxInt 24329 x1 := s1.Args[0] 24330 if x1.Op != OpAMD64MOVBloadidx1 { 24331 break 24332 } 24333 i1 := x1.AuxInt 24334 if x1.Aux != s { 24335 break 24336 } 24337 if p != x1.Args[0] { 24338 break 24339 } 24340 if idx != x1.Args[1] { 24341 break 24342 } 24343 if mem != x1.Args[2] { 24344 break 24345 } 24346 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24347 break 24348 } 24349 b = mergePoint(b, x0, x1) 24350 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24351 v.reset(OpCopy) 24352 v.AddArg(v0) 24353 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24354 v1.AuxInt = j0 24355 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24356 v2.AuxInt = i0 24357 v2.Aux = s 24358 v2.AddArg(p) 24359 v2.AddArg(idx) 24360 v2.AddArg(mem) 24361 v1.AddArg(v2) 24362 v0.AddArg(v1) 24363 v0.AddArg(y) 24364 return true 24365 } 24366 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 24367 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24368 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24369 for { 24370 or := v.Args[0] 24371 if or.Op != OpAMD64ORQ { 24372 break 24373 } 24374 s0 := or.Args[0] 24375 if s0.Op != OpAMD64SHLQconst { 24376 break 24377 } 24378 j0 := s0.AuxInt 24379 x0 := s0.Args[0] 24380 if x0.Op != OpAMD64MOVBloadidx1 { 24381 break 24382 } 24383 i0 := x0.AuxInt 24384 s := x0.Aux 24385 idx := x0.Args[0] 24386 p := x0.Args[1] 24387 mem := x0.Args[2] 24388 y := or.Args[1] 24389 s1 := v.Args[1] 24390 if s1.Op != OpAMD64SHLQconst { 24391 break 24392 } 24393 j1 := s1.AuxInt 24394 x1 := s1.Args[0] 24395 if x1.Op != OpAMD64MOVBloadidx1 { 24396 break 24397 } 24398 i1 := x1.AuxInt 24399 if x1.Aux != s { 24400 break 24401 } 24402 if p != x1.Args[0] { 24403 break 24404 } 24405 if idx != x1.Args[1] { 24406 break 24407 } 24408 if mem != x1.Args[2] { 24409 break 24410 } 24411 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24412 break 24413 } 24414 b = mergePoint(b, x0, x1) 24415 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24416 v.reset(OpCopy) 24417 v.AddArg(v0) 24418 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24419 v1.AuxInt = j0 24420 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24421 v2.AuxInt = i0 24422 v2.Aux = s 24423 v2.AddArg(p) 24424 v2.AddArg(idx) 24425 v2.AddArg(mem) 24426 v1.AddArg(v2) 24427 v0.AddArg(v1) 24428 v0.AddArg(y) 24429 return true 24430 } 24431 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 24432 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24433 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24434 for { 24435 or := v.Args[0] 24436 if or.Op != OpAMD64ORQ { 24437 break 24438 } 24439 y := or.Args[0] 24440 s0 := or.Args[1] 24441 if s0.Op != OpAMD64SHLQconst { 24442 break 24443 } 24444 j0 := s0.AuxInt 24445 x0 := s0.Args[0] 24446 if x0.Op != OpAMD64MOVBloadidx1 { 24447 break 24448 } 24449 i0 := x0.AuxInt 24450 s := x0.Aux 24451 p := x0.Args[0] 24452 idx := x0.Args[1] 24453 mem := x0.Args[2] 24454 s1 := v.Args[1] 24455 if s1.Op != OpAMD64SHLQconst { 24456 break 24457 } 24458 j1 := s1.AuxInt 24459 x1 := s1.Args[0] 24460 if x1.Op != OpAMD64MOVBloadidx1 { 24461 break 24462 } 24463 i1 := x1.AuxInt 24464 if x1.Aux != s { 24465 break 24466 } 24467 if p != x1.Args[0] { 24468 break 24469 } 24470 if idx != x1.Args[1] { 24471 break 24472 } 24473 if mem != x1.Args[2] { 24474 break 24475 } 24476 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24477 break 24478 } 24479 b = mergePoint(b, x0, x1) 24480 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24481 v.reset(OpCopy) 24482 v.AddArg(v0) 24483 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24484 v1.AuxInt = j0 24485 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24486 v2.AuxInt = i0 24487 v2.Aux = s 24488 v2.AddArg(p) 24489 v2.AddArg(idx) 24490 v2.AddArg(mem) 24491 v1.AddArg(v2) 24492 v0.AddArg(v1) 24493 v0.AddArg(y) 24494 return true 24495 } 24496 return false 24497 } 24498 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 24499 b := v.Block 24500 _ = b 24501 types := &b.Func.Config.Types 24502 _ = types 24503 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 24504 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24505 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24506 for { 24507 or := v.Args[0] 24508 if or.Op != OpAMD64ORQ { 24509 break 24510 } 24511 y := or.Args[0] 24512 s0 := or.Args[1] 24513 if s0.Op != OpAMD64SHLQconst { 24514 break 24515 } 24516 j0 := s0.AuxInt 24517 x0 := s0.Args[0] 24518 if x0.Op != OpAMD64MOVBloadidx1 { 24519 break 24520 } 24521 i0 := x0.AuxInt 24522 s := x0.Aux 24523 idx := x0.Args[0] 24524 p := x0.Args[1] 24525 mem := x0.Args[2] 24526 s1 := v.Args[1] 24527 if s1.Op != OpAMD64SHLQconst { 24528 break 24529 } 24530 j1 := s1.AuxInt 24531 x1 := s1.Args[0] 24532 if x1.Op != OpAMD64MOVBloadidx1 { 24533 break 24534 } 24535 i1 := x1.AuxInt 24536 if x1.Aux != s { 24537 break 24538 } 24539 if p != x1.Args[0] { 24540 break 24541 } 24542 if idx != x1.Args[1] { 24543 break 24544 } 24545 if mem != x1.Args[2] { 24546 break 24547 } 24548 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24549 break 24550 } 24551 b = mergePoint(b, x0, x1) 24552 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24553 v.reset(OpCopy) 24554 v.AddArg(v0) 24555 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24556 v1.AuxInt = j0 24557 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24558 v2.AuxInt = i0 24559 v2.Aux = s 24560 v2.AddArg(p) 24561 v2.AddArg(idx) 24562 v2.AddArg(mem) 24563 v1.AddArg(v2) 24564 v0.AddArg(v1) 24565 v0.AddArg(y) 24566 return true 24567 } 24568 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 24569 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24570 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24571 for { 24572 or := v.Args[0] 24573 if or.Op != OpAMD64ORQ { 24574 break 24575 } 24576 s0 := or.Args[0] 24577 if s0.Op != OpAMD64SHLQconst { 24578 break 24579 } 24580 j0 := s0.AuxInt 24581 x0 := s0.Args[0] 24582 if x0.Op != OpAMD64MOVBloadidx1 { 24583 break 24584 } 24585 i0 := x0.AuxInt 24586 s := x0.Aux 24587 p := x0.Args[0] 24588 idx := x0.Args[1] 24589 mem := x0.Args[2] 24590 y := or.Args[1] 24591 s1 := v.Args[1] 24592 if s1.Op != OpAMD64SHLQconst { 24593 break 24594 } 24595 j1 := s1.AuxInt 24596 x1 := s1.Args[0] 24597 if x1.Op != OpAMD64MOVBloadidx1 { 24598 break 24599 } 24600 i1 := x1.AuxInt 24601 if x1.Aux != s { 24602 break 24603 } 24604 if idx != x1.Args[0] { 24605 break 24606 } 24607 if p != x1.Args[1] { 24608 break 24609 } 24610 if mem != x1.Args[2] { 24611 break 24612 } 24613 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24614 break 24615 } 24616 b = mergePoint(b, x0, x1) 24617 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24618 v.reset(OpCopy) 24619 v.AddArg(v0) 24620 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24621 v1.AuxInt = j0 24622 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24623 v2.AuxInt = i0 24624 v2.Aux = s 24625 v2.AddArg(p) 24626 v2.AddArg(idx) 24627 v2.AddArg(mem) 24628 v1.AddArg(v2) 24629 v0.AddArg(v1) 24630 v0.AddArg(y) 24631 return true 24632 } 24633 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 24634 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24635 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24636 for { 24637 or := v.Args[0] 24638 if or.Op != OpAMD64ORQ { 24639 break 24640 } 24641 s0 := or.Args[0] 24642 if s0.Op != OpAMD64SHLQconst { 24643 break 24644 } 24645 j0 := s0.AuxInt 24646 x0 := s0.Args[0] 24647 if x0.Op != OpAMD64MOVBloadidx1 { 24648 break 24649 } 24650 i0 := x0.AuxInt 24651 s := x0.Aux 24652 idx := x0.Args[0] 24653 p := x0.Args[1] 24654 mem := x0.Args[2] 24655 y := or.Args[1] 24656 s1 := v.Args[1] 24657 if s1.Op != OpAMD64SHLQconst { 24658 break 24659 } 24660 j1 := s1.AuxInt 24661 x1 := s1.Args[0] 24662 if x1.Op != OpAMD64MOVBloadidx1 { 24663 break 24664 } 24665 i1 := x1.AuxInt 24666 if x1.Aux != s { 24667 break 24668 } 24669 if idx != x1.Args[0] { 24670 break 24671 } 24672 if p != x1.Args[1] { 24673 break 24674 } 24675 if mem != x1.Args[2] { 24676 break 24677 } 24678 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24679 break 24680 } 24681 b = mergePoint(b, x0, x1) 24682 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24683 v.reset(OpCopy) 24684 v.AddArg(v0) 24685 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24686 v1.AuxInt = j0 24687 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24688 v2.AuxInt = i0 24689 v2.Aux = s 24690 v2.AddArg(p) 24691 v2.AddArg(idx) 24692 v2.AddArg(mem) 24693 v1.AddArg(v2) 24694 v0.AddArg(v1) 24695 v0.AddArg(y) 24696 return true 24697 } 24698 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 24699 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24700 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24701 for { 24702 or := v.Args[0] 24703 if or.Op != OpAMD64ORQ { 24704 break 24705 } 24706 y := or.Args[0] 24707 s0 := or.Args[1] 24708 if s0.Op != OpAMD64SHLQconst { 24709 break 24710 } 24711 j0 := s0.AuxInt 24712 x0 := s0.Args[0] 24713 if x0.Op != OpAMD64MOVBloadidx1 { 24714 break 24715 } 24716 i0 := x0.AuxInt 24717 s := x0.Aux 24718 p := x0.Args[0] 24719 idx := x0.Args[1] 24720 mem := x0.Args[2] 24721 s1 := v.Args[1] 24722 if s1.Op != OpAMD64SHLQconst { 24723 break 24724 } 24725 j1 := s1.AuxInt 24726 x1 := s1.Args[0] 24727 if x1.Op != OpAMD64MOVBloadidx1 { 24728 break 24729 } 24730 i1 := x1.AuxInt 24731 if x1.Aux != s { 24732 break 24733 } 24734 if idx != x1.Args[0] { 24735 break 24736 } 24737 if p != x1.Args[1] { 24738 break 24739 } 24740 if mem != x1.Args[2] { 24741 break 24742 } 24743 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24744 break 24745 } 24746 b = mergePoint(b, x0, x1) 24747 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24748 v.reset(OpCopy) 24749 v.AddArg(v0) 24750 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24751 v1.AuxInt = j0 24752 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24753 v2.AuxInt = i0 24754 v2.Aux = s 24755 v2.AddArg(p) 24756 v2.AddArg(idx) 24757 v2.AddArg(mem) 24758 v1.AddArg(v2) 24759 v0.AddArg(v1) 24760 v0.AddArg(y) 24761 return true 24762 } 24763 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 24764 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24765 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24766 for { 24767 or := v.Args[0] 24768 if or.Op != OpAMD64ORQ { 24769 break 24770 } 24771 y := or.Args[0] 24772 s0 := or.Args[1] 24773 if s0.Op != OpAMD64SHLQconst { 24774 break 24775 } 24776 j0 := s0.AuxInt 24777 x0 := s0.Args[0] 24778 if x0.Op != OpAMD64MOVBloadidx1 { 24779 break 24780 } 24781 i0 := x0.AuxInt 24782 s := x0.Aux 24783 idx := x0.Args[0] 24784 p := x0.Args[1] 24785 mem := x0.Args[2] 24786 s1 := v.Args[1] 24787 if s1.Op != OpAMD64SHLQconst { 24788 break 24789 } 24790 j1 := s1.AuxInt 24791 x1 := s1.Args[0] 24792 if x1.Op != OpAMD64MOVBloadidx1 { 24793 break 24794 } 24795 i1 := x1.AuxInt 24796 if x1.Aux != s { 24797 break 24798 } 24799 if idx != x1.Args[0] { 24800 break 24801 } 24802 if p != x1.Args[1] { 24803 break 24804 } 24805 if mem != x1.Args[2] { 24806 break 24807 } 24808 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24809 break 24810 } 24811 b = mergePoint(b, x0, x1) 24812 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24813 v.reset(OpCopy) 24814 v.AddArg(v0) 24815 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24816 v1.AuxInt = j0 24817 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 24818 v2.AuxInt = i0 24819 v2.Aux = s 24820 v2.AddArg(p) 24821 v2.AddArg(idx) 24822 v2.AddArg(mem) 24823 v1.AddArg(v2) 24824 v0.AddArg(v1) 24825 v0.AddArg(y) 24826 return true 24827 } 24828 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 24829 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24830 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 24831 for { 24832 s1 := v.Args[0] 24833 if s1.Op != OpAMD64SHLQconst { 24834 break 24835 } 24836 j1 := s1.AuxInt 24837 x1 := s1.Args[0] 24838 if x1.Op != OpAMD64MOVWloadidx1 { 24839 break 24840 } 24841 i1 := x1.AuxInt 24842 s := x1.Aux 24843 p := x1.Args[0] 24844 idx := x1.Args[1] 24845 mem := x1.Args[2] 24846 or := v.Args[1] 24847 if or.Op != OpAMD64ORQ { 24848 break 24849 } 24850 s0 := or.Args[0] 24851 if s0.Op != OpAMD64SHLQconst { 24852 break 24853 } 24854 j0 := s0.AuxInt 24855 x0 := s0.Args[0] 24856 if x0.Op != OpAMD64MOVWloadidx1 { 24857 break 24858 } 24859 i0 := x0.AuxInt 24860 if x0.Aux != s { 24861 break 24862 } 24863 if p != x0.Args[0] { 24864 break 24865 } 24866 if idx != x0.Args[1] { 24867 break 24868 } 24869 if mem != x0.Args[2] { 24870 break 24871 } 24872 y := or.Args[1] 24873 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24874 break 24875 } 24876 b = mergePoint(b, x0, x1) 24877 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24878 v.reset(OpCopy) 24879 v.AddArg(v0) 24880 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24881 v1.AuxInt = j0 24882 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 24883 v2.AuxInt = i0 24884 v2.Aux = s 24885 v2.AddArg(p) 24886 v2.AddArg(idx) 24887 v2.AddArg(mem) 24888 v1.AddArg(v2) 24889 v0.AddArg(v1) 24890 v0.AddArg(y) 24891 return true 24892 } 24893 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 24894 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24895 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 24896 for { 24897 s1 := v.Args[0] 24898 if s1.Op != OpAMD64SHLQconst { 24899 break 24900 } 24901 j1 := s1.AuxInt 24902 x1 := s1.Args[0] 24903 if x1.Op != OpAMD64MOVWloadidx1 { 24904 break 24905 } 24906 i1 := x1.AuxInt 24907 s := x1.Aux 24908 idx := x1.Args[0] 24909 p := x1.Args[1] 24910 mem := x1.Args[2] 24911 or := v.Args[1] 24912 if or.Op != OpAMD64ORQ { 24913 break 24914 } 24915 s0 := or.Args[0] 24916 if s0.Op != OpAMD64SHLQconst { 24917 break 24918 } 24919 j0 := s0.AuxInt 24920 x0 := s0.Args[0] 24921 if x0.Op != OpAMD64MOVWloadidx1 { 24922 break 24923 } 24924 i0 := x0.AuxInt 24925 if x0.Aux != s { 24926 break 24927 } 24928 if p != x0.Args[0] { 24929 break 24930 } 24931 if idx != x0.Args[1] { 24932 break 24933 } 24934 if mem != x0.Args[2] { 24935 break 24936 } 24937 y := or.Args[1] 24938 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 24939 break 24940 } 24941 b = mergePoint(b, x0, x1) 24942 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 24943 v.reset(OpCopy) 24944 v.AddArg(v0) 24945 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 24946 v1.AuxInt = j0 24947 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 24948 v2.AuxInt = i0 24949 v2.Aux = s 24950 v2.AddArg(p) 24951 v2.AddArg(idx) 24952 v2.AddArg(mem) 24953 v1.AddArg(v2) 24954 v0.AddArg(v1) 24955 v0.AddArg(y) 24956 return true 24957 } 24958 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 24959 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24960 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 24961 for { 24962 s1 := v.Args[0] 24963 if s1.Op != OpAMD64SHLQconst { 24964 break 24965 } 24966 j1 := s1.AuxInt 24967 x1 := s1.Args[0] 24968 if x1.Op != OpAMD64MOVWloadidx1 { 24969 break 24970 } 24971 i1 := x1.AuxInt 24972 s := x1.Aux 24973 p := x1.Args[0] 24974 idx := x1.Args[1] 24975 mem := x1.Args[2] 24976 or := v.Args[1] 24977 if or.Op != OpAMD64ORQ { 24978 break 24979 } 24980 s0 := or.Args[0] 24981 if s0.Op != OpAMD64SHLQconst { 24982 break 24983 } 24984 j0 := s0.AuxInt 24985 x0 := s0.Args[0] 24986 if x0.Op != OpAMD64MOVWloadidx1 { 24987 break 24988 } 24989 i0 := x0.AuxInt 24990 if x0.Aux != s { 24991 break 24992 } 24993 if idx != x0.Args[0] { 24994 break 24995 } 24996 if p != x0.Args[1] { 24997 break 24998 } 24999 if mem != x0.Args[2] { 25000 break 25001 } 25002 y := or.Args[1] 25003 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25004 break 25005 } 25006 b = mergePoint(b, x0, x1) 25007 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25008 v.reset(OpCopy) 25009 v.AddArg(v0) 25010 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25011 v1.AuxInt = j0 25012 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25013 v2.AuxInt = i0 25014 v2.Aux = s 25015 v2.AddArg(p) 25016 v2.AddArg(idx) 25017 v2.AddArg(mem) 25018 v1.AddArg(v2) 25019 v0.AddArg(v1) 25020 v0.AddArg(y) 25021 return true 25022 } 25023 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 25024 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25025 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25026 for { 25027 s1 := v.Args[0] 25028 if s1.Op != OpAMD64SHLQconst { 25029 break 25030 } 25031 j1 := s1.AuxInt 25032 x1 := s1.Args[0] 25033 if x1.Op != OpAMD64MOVWloadidx1 { 25034 break 25035 } 25036 i1 := x1.AuxInt 25037 s := x1.Aux 25038 idx := x1.Args[0] 25039 p := x1.Args[1] 25040 mem := x1.Args[2] 25041 or := v.Args[1] 25042 if or.Op != OpAMD64ORQ { 25043 break 25044 } 25045 s0 := or.Args[0] 25046 if s0.Op != OpAMD64SHLQconst { 25047 break 25048 } 25049 j0 := s0.AuxInt 25050 x0 := s0.Args[0] 25051 if x0.Op != OpAMD64MOVWloadidx1 { 25052 break 25053 } 25054 i0 := x0.AuxInt 25055 if x0.Aux != s { 25056 break 25057 } 25058 if idx != x0.Args[0] { 25059 break 25060 } 25061 if p != x0.Args[1] { 25062 break 25063 } 25064 if mem != x0.Args[2] { 25065 break 25066 } 25067 y := or.Args[1] 25068 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25069 break 25070 } 25071 b = mergePoint(b, x0, x1) 25072 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25073 v.reset(OpCopy) 25074 v.AddArg(v0) 25075 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25076 v1.AuxInt = j0 25077 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25078 v2.AuxInt = i0 25079 v2.Aux = s 25080 v2.AddArg(p) 25081 v2.AddArg(idx) 25082 v2.AddArg(mem) 25083 v1.AddArg(v2) 25084 v0.AddArg(v1) 25085 v0.AddArg(y) 25086 return true 25087 } 25088 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 25089 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25090 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25091 for { 25092 s1 := v.Args[0] 25093 if s1.Op != OpAMD64SHLQconst { 25094 break 25095 } 25096 j1 := s1.AuxInt 25097 x1 := s1.Args[0] 25098 if x1.Op != OpAMD64MOVWloadidx1 { 25099 break 25100 } 25101 i1 := x1.AuxInt 25102 s := x1.Aux 25103 p := x1.Args[0] 25104 idx := x1.Args[1] 25105 mem := x1.Args[2] 25106 or := v.Args[1] 25107 if or.Op != OpAMD64ORQ { 25108 break 25109 } 25110 y := or.Args[0] 25111 s0 := or.Args[1] 25112 if s0.Op != OpAMD64SHLQconst { 25113 break 25114 } 25115 j0 := s0.AuxInt 25116 x0 := s0.Args[0] 25117 if x0.Op != OpAMD64MOVWloadidx1 { 25118 break 25119 } 25120 i0 := x0.AuxInt 25121 if x0.Aux != s { 25122 break 25123 } 25124 if p != x0.Args[0] { 25125 break 25126 } 25127 if idx != x0.Args[1] { 25128 break 25129 } 25130 if mem != x0.Args[2] { 25131 break 25132 } 25133 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25134 break 25135 } 25136 b = mergePoint(b, x0, x1) 25137 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25138 v.reset(OpCopy) 25139 v.AddArg(v0) 25140 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25141 v1.AuxInt = j0 25142 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25143 v2.AuxInt = i0 25144 v2.Aux = s 25145 v2.AddArg(p) 25146 v2.AddArg(idx) 25147 v2.AddArg(mem) 25148 v1.AddArg(v2) 25149 v0.AddArg(v1) 25150 v0.AddArg(y) 25151 return true 25152 } 25153 return false 25154 } 25155 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 25156 b := v.Block 25157 _ = b 25158 types := &b.Func.Config.Types 25159 _ = types 25160 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 25161 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25162 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25163 for { 25164 s1 := v.Args[0] 25165 if s1.Op != OpAMD64SHLQconst { 25166 break 25167 } 25168 j1 := s1.AuxInt 25169 x1 := s1.Args[0] 25170 if x1.Op != OpAMD64MOVWloadidx1 { 25171 break 25172 } 25173 i1 := x1.AuxInt 25174 s := x1.Aux 25175 idx := x1.Args[0] 25176 p := x1.Args[1] 25177 mem := x1.Args[2] 25178 or := v.Args[1] 25179 if or.Op != OpAMD64ORQ { 25180 break 25181 } 25182 y := or.Args[0] 25183 s0 := or.Args[1] 25184 if s0.Op != OpAMD64SHLQconst { 25185 break 25186 } 25187 j0 := s0.AuxInt 25188 x0 := s0.Args[0] 25189 if x0.Op != OpAMD64MOVWloadidx1 { 25190 break 25191 } 25192 i0 := x0.AuxInt 25193 if x0.Aux != s { 25194 break 25195 } 25196 if p != x0.Args[0] { 25197 break 25198 } 25199 if idx != x0.Args[1] { 25200 break 25201 } 25202 if mem != x0.Args[2] { 25203 break 25204 } 25205 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25206 break 25207 } 25208 b = mergePoint(b, x0, x1) 25209 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25210 v.reset(OpCopy) 25211 v.AddArg(v0) 25212 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25213 v1.AuxInt = j0 25214 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25215 v2.AuxInt = i0 25216 v2.Aux = s 25217 v2.AddArg(p) 25218 v2.AddArg(idx) 25219 v2.AddArg(mem) 25220 v1.AddArg(v2) 25221 v0.AddArg(v1) 25222 v0.AddArg(y) 25223 return true 25224 } 25225 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 25226 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25227 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25228 for { 25229 s1 := v.Args[0] 25230 if s1.Op != OpAMD64SHLQconst { 25231 break 25232 } 25233 j1 := s1.AuxInt 25234 x1 := s1.Args[0] 25235 if x1.Op != OpAMD64MOVWloadidx1 { 25236 break 25237 } 25238 i1 := x1.AuxInt 25239 s := x1.Aux 25240 p := x1.Args[0] 25241 idx := x1.Args[1] 25242 mem := x1.Args[2] 25243 or := v.Args[1] 25244 if or.Op != OpAMD64ORQ { 25245 break 25246 } 25247 y := or.Args[0] 25248 s0 := or.Args[1] 25249 if s0.Op != OpAMD64SHLQconst { 25250 break 25251 } 25252 j0 := s0.AuxInt 25253 x0 := s0.Args[0] 25254 if x0.Op != OpAMD64MOVWloadidx1 { 25255 break 25256 } 25257 i0 := x0.AuxInt 25258 if x0.Aux != s { 25259 break 25260 } 25261 if idx != x0.Args[0] { 25262 break 25263 } 25264 if p != x0.Args[1] { 25265 break 25266 } 25267 if mem != x0.Args[2] { 25268 break 25269 } 25270 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25271 break 25272 } 25273 b = mergePoint(b, x0, x1) 25274 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25275 v.reset(OpCopy) 25276 v.AddArg(v0) 25277 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25278 v1.AuxInt = j0 25279 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25280 v2.AuxInt = i0 25281 v2.Aux = s 25282 v2.AddArg(p) 25283 v2.AddArg(idx) 25284 v2.AddArg(mem) 25285 v1.AddArg(v2) 25286 v0.AddArg(v1) 25287 v0.AddArg(y) 25288 return true 25289 } 25290 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 25291 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25292 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25293 for { 25294 s1 := v.Args[0] 25295 if s1.Op != OpAMD64SHLQconst { 25296 break 25297 } 25298 j1 := s1.AuxInt 25299 x1 := s1.Args[0] 25300 if x1.Op != OpAMD64MOVWloadidx1 { 25301 break 25302 } 25303 i1 := x1.AuxInt 25304 s := x1.Aux 25305 idx := x1.Args[0] 25306 p := x1.Args[1] 25307 mem := x1.Args[2] 25308 or := v.Args[1] 25309 if or.Op != OpAMD64ORQ { 25310 break 25311 } 25312 y := or.Args[0] 25313 s0 := or.Args[1] 25314 if s0.Op != OpAMD64SHLQconst { 25315 break 25316 } 25317 j0 := s0.AuxInt 25318 x0 := s0.Args[0] 25319 if x0.Op != OpAMD64MOVWloadidx1 { 25320 break 25321 } 25322 i0 := x0.AuxInt 25323 if x0.Aux != s { 25324 break 25325 } 25326 if idx != x0.Args[0] { 25327 break 25328 } 25329 if p != x0.Args[1] { 25330 break 25331 } 25332 if mem != x0.Args[2] { 25333 break 25334 } 25335 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25336 break 25337 } 25338 b = mergePoint(b, x0, x1) 25339 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25340 v.reset(OpCopy) 25341 v.AddArg(v0) 25342 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25343 v1.AuxInt = j0 25344 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25345 v2.AuxInt = i0 25346 v2.Aux = s 25347 v2.AddArg(p) 25348 v2.AddArg(idx) 25349 v2.AddArg(mem) 25350 v1.AddArg(v2) 25351 v0.AddArg(v1) 25352 v0.AddArg(y) 25353 return true 25354 } 25355 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25356 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25357 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25358 for { 25359 or := v.Args[0] 25360 if or.Op != OpAMD64ORQ { 25361 break 25362 } 25363 s0 := or.Args[0] 25364 if s0.Op != OpAMD64SHLQconst { 25365 break 25366 } 25367 j0 := s0.AuxInt 25368 x0 := s0.Args[0] 25369 if x0.Op != OpAMD64MOVWloadidx1 { 25370 break 25371 } 25372 i0 := x0.AuxInt 25373 s := x0.Aux 25374 p := x0.Args[0] 25375 idx := x0.Args[1] 25376 mem := x0.Args[2] 25377 y := or.Args[1] 25378 s1 := v.Args[1] 25379 if s1.Op != OpAMD64SHLQconst { 25380 break 25381 } 25382 j1 := s1.AuxInt 25383 x1 := s1.Args[0] 25384 if x1.Op != OpAMD64MOVWloadidx1 { 25385 break 25386 } 25387 i1 := x1.AuxInt 25388 if x1.Aux != s { 25389 break 25390 } 25391 if p != x1.Args[0] { 25392 break 25393 } 25394 if idx != x1.Args[1] { 25395 break 25396 } 25397 if mem != x1.Args[2] { 25398 break 25399 } 25400 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25401 break 25402 } 25403 b = mergePoint(b, x0, x1) 25404 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25405 v.reset(OpCopy) 25406 v.AddArg(v0) 25407 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25408 v1.AuxInt = j0 25409 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25410 v2.AuxInt = i0 25411 v2.Aux = s 25412 v2.AddArg(p) 25413 v2.AddArg(idx) 25414 v2.AddArg(mem) 25415 v1.AddArg(v2) 25416 v0.AddArg(v1) 25417 v0.AddArg(y) 25418 return true 25419 } 25420 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25421 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25422 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25423 for { 25424 or := v.Args[0] 25425 if or.Op != OpAMD64ORQ { 25426 break 25427 } 25428 s0 := or.Args[0] 25429 if s0.Op != OpAMD64SHLQconst { 25430 break 25431 } 25432 j0 := s0.AuxInt 25433 x0 := s0.Args[0] 25434 if x0.Op != OpAMD64MOVWloadidx1 { 25435 break 25436 } 25437 i0 := x0.AuxInt 25438 s := x0.Aux 25439 idx := x0.Args[0] 25440 p := x0.Args[1] 25441 mem := x0.Args[2] 25442 y := or.Args[1] 25443 s1 := v.Args[1] 25444 if s1.Op != OpAMD64SHLQconst { 25445 break 25446 } 25447 j1 := s1.AuxInt 25448 x1 := s1.Args[0] 25449 if x1.Op != OpAMD64MOVWloadidx1 { 25450 break 25451 } 25452 i1 := x1.AuxInt 25453 if x1.Aux != s { 25454 break 25455 } 25456 if p != x1.Args[0] { 25457 break 25458 } 25459 if idx != x1.Args[1] { 25460 break 25461 } 25462 if mem != x1.Args[2] { 25463 break 25464 } 25465 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25466 break 25467 } 25468 b = mergePoint(b, x0, x1) 25469 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25470 v.reset(OpCopy) 25471 v.AddArg(v0) 25472 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25473 v1.AuxInt = j0 25474 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25475 v2.AuxInt = i0 25476 v2.Aux = s 25477 v2.AddArg(p) 25478 v2.AddArg(idx) 25479 v2.AddArg(mem) 25480 v1.AddArg(v2) 25481 v0.AddArg(v1) 25482 v0.AddArg(y) 25483 return true 25484 } 25485 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25486 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25487 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25488 for { 25489 or := v.Args[0] 25490 if or.Op != OpAMD64ORQ { 25491 break 25492 } 25493 y := or.Args[0] 25494 s0 := or.Args[1] 25495 if s0.Op != OpAMD64SHLQconst { 25496 break 25497 } 25498 j0 := s0.AuxInt 25499 x0 := s0.Args[0] 25500 if x0.Op != OpAMD64MOVWloadidx1 { 25501 break 25502 } 25503 i0 := x0.AuxInt 25504 s := x0.Aux 25505 p := x0.Args[0] 25506 idx := x0.Args[1] 25507 mem := x0.Args[2] 25508 s1 := v.Args[1] 25509 if s1.Op != OpAMD64SHLQconst { 25510 break 25511 } 25512 j1 := s1.AuxInt 25513 x1 := s1.Args[0] 25514 if x1.Op != OpAMD64MOVWloadidx1 { 25515 break 25516 } 25517 i1 := x1.AuxInt 25518 if x1.Aux != s { 25519 break 25520 } 25521 if p != x1.Args[0] { 25522 break 25523 } 25524 if idx != x1.Args[1] { 25525 break 25526 } 25527 if mem != x1.Args[2] { 25528 break 25529 } 25530 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25531 break 25532 } 25533 b = mergePoint(b, x0, x1) 25534 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25535 v.reset(OpCopy) 25536 v.AddArg(v0) 25537 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25538 v1.AuxInt = j0 25539 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25540 v2.AuxInt = i0 25541 v2.Aux = s 25542 v2.AddArg(p) 25543 v2.AddArg(idx) 25544 v2.AddArg(mem) 25545 v1.AddArg(v2) 25546 v0.AddArg(v1) 25547 v0.AddArg(y) 25548 return true 25549 } 25550 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25551 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25552 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25553 for { 25554 or := v.Args[0] 25555 if or.Op != OpAMD64ORQ { 25556 break 25557 } 25558 y := or.Args[0] 25559 s0 := or.Args[1] 25560 if s0.Op != OpAMD64SHLQconst { 25561 break 25562 } 25563 j0 := s0.AuxInt 25564 x0 := s0.Args[0] 25565 if x0.Op != OpAMD64MOVWloadidx1 { 25566 break 25567 } 25568 i0 := x0.AuxInt 25569 s := x0.Aux 25570 idx := x0.Args[0] 25571 p := x0.Args[1] 25572 mem := x0.Args[2] 25573 s1 := v.Args[1] 25574 if s1.Op != OpAMD64SHLQconst { 25575 break 25576 } 25577 j1 := s1.AuxInt 25578 x1 := s1.Args[0] 25579 if x1.Op != OpAMD64MOVWloadidx1 { 25580 break 25581 } 25582 i1 := x1.AuxInt 25583 if x1.Aux != s { 25584 break 25585 } 25586 if p != x1.Args[0] { 25587 break 25588 } 25589 if idx != x1.Args[1] { 25590 break 25591 } 25592 if mem != x1.Args[2] { 25593 break 25594 } 25595 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25596 break 25597 } 25598 b = mergePoint(b, x0, x1) 25599 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25600 v.reset(OpCopy) 25601 v.AddArg(v0) 25602 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25603 v1.AuxInt = j0 25604 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25605 v2.AuxInt = i0 25606 v2.Aux = s 25607 v2.AddArg(p) 25608 v2.AddArg(idx) 25609 v2.AddArg(mem) 25610 v1.AddArg(v2) 25611 v0.AddArg(v1) 25612 v0.AddArg(y) 25613 return true 25614 } 25615 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 25616 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25617 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25618 for { 25619 or := v.Args[0] 25620 if or.Op != OpAMD64ORQ { 25621 break 25622 } 25623 s0 := or.Args[0] 25624 if s0.Op != OpAMD64SHLQconst { 25625 break 25626 } 25627 j0 := s0.AuxInt 25628 x0 := s0.Args[0] 25629 if x0.Op != OpAMD64MOVWloadidx1 { 25630 break 25631 } 25632 i0 := x0.AuxInt 25633 s := x0.Aux 25634 p := x0.Args[0] 25635 idx := x0.Args[1] 25636 mem := x0.Args[2] 25637 y := or.Args[1] 25638 s1 := v.Args[1] 25639 if s1.Op != OpAMD64SHLQconst { 25640 break 25641 } 25642 j1 := s1.AuxInt 25643 x1 := s1.Args[0] 25644 if x1.Op != OpAMD64MOVWloadidx1 { 25645 break 25646 } 25647 i1 := x1.AuxInt 25648 if x1.Aux != s { 25649 break 25650 } 25651 if idx != x1.Args[0] { 25652 break 25653 } 25654 if p != x1.Args[1] { 25655 break 25656 } 25657 if mem != x1.Args[2] { 25658 break 25659 } 25660 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25661 break 25662 } 25663 b = mergePoint(b, x0, x1) 25664 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25665 v.reset(OpCopy) 25666 v.AddArg(v0) 25667 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25668 v1.AuxInt = j0 25669 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25670 v2.AuxInt = i0 25671 v2.Aux = s 25672 v2.AddArg(p) 25673 v2.AddArg(idx) 25674 v2.AddArg(mem) 25675 v1.AddArg(v2) 25676 v0.AddArg(v1) 25677 v0.AddArg(y) 25678 return true 25679 } 25680 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 25681 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25682 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25683 for { 25684 or := v.Args[0] 25685 if or.Op != OpAMD64ORQ { 25686 break 25687 } 25688 s0 := or.Args[0] 25689 if s0.Op != OpAMD64SHLQconst { 25690 break 25691 } 25692 j0 := s0.AuxInt 25693 x0 := s0.Args[0] 25694 if x0.Op != OpAMD64MOVWloadidx1 { 25695 break 25696 } 25697 i0 := x0.AuxInt 25698 s := x0.Aux 25699 idx := x0.Args[0] 25700 p := x0.Args[1] 25701 mem := x0.Args[2] 25702 y := or.Args[1] 25703 s1 := v.Args[1] 25704 if s1.Op != OpAMD64SHLQconst { 25705 break 25706 } 25707 j1 := s1.AuxInt 25708 x1 := s1.Args[0] 25709 if x1.Op != OpAMD64MOVWloadidx1 { 25710 break 25711 } 25712 i1 := x1.AuxInt 25713 if x1.Aux != s { 25714 break 25715 } 25716 if idx != x1.Args[0] { 25717 break 25718 } 25719 if p != x1.Args[1] { 25720 break 25721 } 25722 if mem != x1.Args[2] { 25723 break 25724 } 25725 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25726 break 25727 } 25728 b = mergePoint(b, x0, x1) 25729 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25730 v.reset(OpCopy) 25731 v.AddArg(v0) 25732 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25733 v1.AuxInt = j0 25734 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25735 v2.AuxInt = i0 25736 v2.Aux = s 25737 v2.AddArg(p) 25738 v2.AddArg(idx) 25739 v2.AddArg(mem) 25740 v1.AddArg(v2) 25741 v0.AddArg(v1) 25742 v0.AddArg(y) 25743 return true 25744 } 25745 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 25746 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25747 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25748 for { 25749 or := v.Args[0] 25750 if or.Op != OpAMD64ORQ { 25751 break 25752 } 25753 y := or.Args[0] 25754 s0 := or.Args[1] 25755 if s0.Op != OpAMD64SHLQconst { 25756 break 25757 } 25758 j0 := s0.AuxInt 25759 x0 := s0.Args[0] 25760 if x0.Op != OpAMD64MOVWloadidx1 { 25761 break 25762 } 25763 i0 := x0.AuxInt 25764 s := x0.Aux 25765 p := x0.Args[0] 25766 idx := x0.Args[1] 25767 mem := x0.Args[2] 25768 s1 := v.Args[1] 25769 if s1.Op != OpAMD64SHLQconst { 25770 break 25771 } 25772 j1 := s1.AuxInt 25773 x1 := s1.Args[0] 25774 if x1.Op != OpAMD64MOVWloadidx1 { 25775 break 25776 } 25777 i1 := x1.AuxInt 25778 if x1.Aux != s { 25779 break 25780 } 25781 if idx != x1.Args[0] { 25782 break 25783 } 25784 if p != x1.Args[1] { 25785 break 25786 } 25787 if mem != x1.Args[2] { 25788 break 25789 } 25790 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25791 break 25792 } 25793 b = mergePoint(b, x0, x1) 25794 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25795 v.reset(OpCopy) 25796 v.AddArg(v0) 25797 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25798 v1.AuxInt = j0 25799 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25800 v2.AuxInt = i0 25801 v2.Aux = s 25802 v2.AddArg(p) 25803 v2.AddArg(idx) 25804 v2.AddArg(mem) 25805 v1.AddArg(v2) 25806 v0.AddArg(v1) 25807 v0.AddArg(y) 25808 return true 25809 } 25810 return false 25811 } 25812 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 25813 b := v.Block 25814 _ = b 25815 types := &b.Func.Config.Types 25816 _ = types 25817 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 25818 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25819 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 25820 for { 25821 or := v.Args[0] 25822 if or.Op != OpAMD64ORQ { 25823 break 25824 } 25825 y := or.Args[0] 25826 s0 := or.Args[1] 25827 if s0.Op != OpAMD64SHLQconst { 25828 break 25829 } 25830 j0 := s0.AuxInt 25831 x0 := s0.Args[0] 25832 if x0.Op != OpAMD64MOVWloadidx1 { 25833 break 25834 } 25835 i0 := x0.AuxInt 25836 s := x0.Aux 25837 idx := x0.Args[0] 25838 p := x0.Args[1] 25839 mem := x0.Args[2] 25840 s1 := v.Args[1] 25841 if s1.Op != OpAMD64SHLQconst { 25842 break 25843 } 25844 j1 := s1.AuxInt 25845 x1 := s1.Args[0] 25846 if x1.Op != OpAMD64MOVWloadidx1 { 25847 break 25848 } 25849 i1 := x1.AuxInt 25850 if x1.Aux != s { 25851 break 25852 } 25853 if idx != x1.Args[0] { 25854 break 25855 } 25856 if p != x1.Args[1] { 25857 break 25858 } 25859 if mem != x1.Args[2] { 25860 break 25861 } 25862 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25863 break 25864 } 25865 b = mergePoint(b, x0, x1) 25866 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25867 v.reset(OpCopy) 25868 v.AddArg(v0) 25869 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25870 v1.AuxInt = j0 25871 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 25872 v2.AuxInt = i0 25873 v2.Aux = s 25874 v2.AddArg(p) 25875 v2.AddArg(idx) 25876 v2.AddArg(mem) 25877 v1.AddArg(v2) 25878 v0.AddArg(v1) 25879 v0.AddArg(y) 25880 return true 25881 } 25882 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 25883 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25884 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 25885 for { 25886 x1 := v.Args[0] 25887 if x1.Op != OpAMD64MOVBload { 25888 break 25889 } 25890 i1 := x1.AuxInt 25891 s := x1.Aux 25892 p := x1.Args[0] 25893 mem := x1.Args[1] 25894 sh := v.Args[1] 25895 if sh.Op != OpAMD64SHLQconst { 25896 break 25897 } 25898 if sh.AuxInt != 8 { 25899 break 25900 } 25901 x0 := sh.Args[0] 25902 if x0.Op != OpAMD64MOVBload { 25903 break 25904 } 25905 i0 := x0.AuxInt 25906 if x0.Aux != s { 25907 break 25908 } 25909 if p != x0.Args[0] { 25910 break 25911 } 25912 if mem != x0.Args[1] { 25913 break 25914 } 25915 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25916 break 25917 } 25918 b = mergePoint(b, x0, x1) 25919 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 25920 v.reset(OpCopy) 25921 v.AddArg(v0) 25922 v0.AuxInt = 8 25923 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 25924 v1.AuxInt = i0 25925 v1.Aux = s 25926 v1.AddArg(p) 25927 v1.AddArg(mem) 25928 v0.AddArg(v1) 25929 return true 25930 } 25931 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 25932 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25933 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 25934 for { 25935 sh := v.Args[0] 25936 if sh.Op != OpAMD64SHLQconst { 25937 break 25938 } 25939 if sh.AuxInt != 8 { 25940 break 25941 } 25942 x0 := sh.Args[0] 25943 if x0.Op != OpAMD64MOVBload { 25944 break 25945 } 25946 i0 := x0.AuxInt 25947 s := x0.Aux 25948 p := x0.Args[0] 25949 mem := x0.Args[1] 25950 x1 := v.Args[1] 25951 if x1.Op != OpAMD64MOVBload { 25952 break 25953 } 25954 i1 := x1.AuxInt 25955 if x1.Aux != s { 25956 break 25957 } 25958 if p != x1.Args[0] { 25959 break 25960 } 25961 if mem != x1.Args[1] { 25962 break 25963 } 25964 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25965 break 25966 } 25967 b = mergePoint(b, x0, x1) 25968 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 25969 v.reset(OpCopy) 25970 v.AddArg(v0) 25971 v0.AuxInt = 8 25972 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 25973 v1.AuxInt = i0 25974 v1.Aux = s 25975 v1.AddArg(p) 25976 v1.AddArg(mem) 25977 v0.AddArg(v1) 25978 return true 25979 } 25980 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 25981 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 25982 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 25983 for { 25984 r1 := v.Args[0] 25985 if r1.Op != OpAMD64ROLWconst { 25986 break 25987 } 25988 if r1.AuxInt != 8 { 25989 break 25990 } 25991 x1 := r1.Args[0] 25992 if x1.Op != OpAMD64MOVWload { 25993 break 25994 } 25995 i1 := x1.AuxInt 25996 s := x1.Aux 25997 p := x1.Args[0] 25998 mem := x1.Args[1] 25999 sh := v.Args[1] 26000 if sh.Op != OpAMD64SHLQconst { 26001 break 26002 } 26003 if sh.AuxInt != 16 { 26004 break 26005 } 26006 r0 := sh.Args[0] 26007 if r0.Op != OpAMD64ROLWconst { 26008 break 26009 } 26010 if r0.AuxInt != 8 { 26011 break 26012 } 26013 x0 := r0.Args[0] 26014 if x0.Op != OpAMD64MOVWload { 26015 break 26016 } 26017 i0 := x0.AuxInt 26018 if x0.Aux != s { 26019 break 26020 } 26021 if p != x0.Args[0] { 26022 break 26023 } 26024 if mem != x0.Args[1] { 26025 break 26026 } 26027 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 26028 break 26029 } 26030 b = mergePoint(b, x0, x1) 26031 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 26032 v.reset(OpCopy) 26033 v.AddArg(v0) 26034 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 26035 v1.AuxInt = i0 26036 v1.Aux = s 26037 v1.AddArg(p) 26038 v1.AddArg(mem) 26039 v0.AddArg(v1) 26040 return true 26041 } 26042 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 26043 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 26044 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 26045 for { 26046 sh := v.Args[0] 26047 if sh.Op != OpAMD64SHLQconst { 26048 break 26049 } 26050 if sh.AuxInt != 16 { 26051 break 26052 } 26053 r0 := sh.Args[0] 26054 if r0.Op != OpAMD64ROLWconst { 26055 break 26056 } 26057 if r0.AuxInt != 8 { 26058 break 26059 } 26060 x0 := r0.Args[0] 26061 if x0.Op != OpAMD64MOVWload { 26062 break 26063 } 26064 i0 := x0.AuxInt 26065 s := x0.Aux 26066 p := x0.Args[0] 26067 mem := x0.Args[1] 26068 r1 := v.Args[1] 26069 if r1.Op != OpAMD64ROLWconst { 26070 break 26071 } 26072 if r1.AuxInt != 8 { 26073 break 26074 } 26075 x1 := r1.Args[0] 26076 if x1.Op != OpAMD64MOVWload { 26077 break 26078 } 26079 i1 := x1.AuxInt 26080 if x1.Aux != s { 26081 break 26082 } 26083 if p != x1.Args[0] { 26084 break 26085 } 26086 if mem != x1.Args[1] { 26087 break 26088 } 26089 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 26090 break 26091 } 26092 b = mergePoint(b, x0, x1) 26093 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 26094 v.reset(OpCopy) 26095 v.AddArg(v0) 26096 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 26097 v1.AuxInt = i0 26098 v1.Aux = s 26099 v1.AddArg(p) 26100 v1.AddArg(mem) 26101 v0.AddArg(v1) 26102 return true 26103 } 26104 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 26105 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 26106 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 26107 for { 26108 r1 := v.Args[0] 26109 if r1.Op != OpAMD64BSWAPL { 26110 break 26111 } 26112 x1 := r1.Args[0] 26113 if x1.Op != OpAMD64MOVLload { 26114 break 26115 } 26116 i1 := x1.AuxInt 26117 s := x1.Aux 26118 p := x1.Args[0] 26119 mem := x1.Args[1] 26120 sh := v.Args[1] 26121 if sh.Op != OpAMD64SHLQconst { 26122 break 26123 } 26124 if sh.AuxInt != 32 { 26125 break 26126 } 26127 r0 := sh.Args[0] 26128 if r0.Op != OpAMD64BSWAPL { 26129 break 26130 } 26131 x0 := r0.Args[0] 26132 if x0.Op != OpAMD64MOVLload { 26133 break 26134 } 26135 i0 := x0.AuxInt 26136 if x0.Aux != s { 26137 break 26138 } 26139 if p != x0.Args[0] { 26140 break 26141 } 26142 if mem != x0.Args[1] { 26143 break 26144 } 26145 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 26146 break 26147 } 26148 b = mergePoint(b, x0, x1) 26149 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 26150 v.reset(OpCopy) 26151 v.AddArg(v0) 26152 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 26153 v1.AuxInt = i0 26154 v1.Aux = s 26155 v1.AddArg(p) 26156 v1.AddArg(mem) 26157 v0.AddArg(v1) 26158 return true 26159 } 26160 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 26161 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 26162 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 26163 for { 26164 sh := v.Args[0] 26165 if sh.Op != OpAMD64SHLQconst { 26166 break 26167 } 26168 if sh.AuxInt != 32 { 26169 break 26170 } 26171 r0 := sh.Args[0] 26172 if r0.Op != OpAMD64BSWAPL { 26173 break 26174 } 26175 x0 := r0.Args[0] 26176 if x0.Op != OpAMD64MOVLload { 26177 break 26178 } 26179 i0 := x0.AuxInt 26180 s := x0.Aux 26181 p := x0.Args[0] 26182 mem := x0.Args[1] 26183 r1 := v.Args[1] 26184 if r1.Op != OpAMD64BSWAPL { 26185 break 26186 } 26187 x1 := r1.Args[0] 26188 if x1.Op != OpAMD64MOVLload { 26189 break 26190 } 26191 i1 := x1.AuxInt 26192 if x1.Aux != s { 26193 break 26194 } 26195 if p != x1.Args[0] { 26196 break 26197 } 26198 if mem != x1.Args[1] { 26199 break 26200 } 26201 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 26202 break 26203 } 26204 b = mergePoint(b, x0, x1) 26205 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 26206 v.reset(OpCopy) 26207 v.AddArg(v0) 26208 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 26209 v1.AuxInt = i0 26210 v1.Aux = s 26211 v1.AddArg(p) 26212 v1.AddArg(mem) 26213 v0.AddArg(v1) 26214 return true 26215 } 26216 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 26217 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26218 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 26219 for { 26220 s0 := v.Args[0] 26221 if s0.Op != OpAMD64SHLQconst { 26222 break 26223 } 26224 j0 := s0.AuxInt 26225 x0 := s0.Args[0] 26226 if x0.Op != OpAMD64MOVBload { 26227 break 26228 } 26229 i0 := x0.AuxInt 26230 s := x0.Aux 26231 p := x0.Args[0] 26232 mem := x0.Args[1] 26233 or := v.Args[1] 26234 if or.Op != OpAMD64ORQ { 26235 break 26236 } 26237 s1 := or.Args[0] 26238 if s1.Op != OpAMD64SHLQconst { 26239 break 26240 } 26241 j1 := s1.AuxInt 26242 x1 := s1.Args[0] 26243 if x1.Op != OpAMD64MOVBload { 26244 break 26245 } 26246 i1 := x1.AuxInt 26247 if x1.Aux != s { 26248 break 26249 } 26250 if p != x1.Args[0] { 26251 break 26252 } 26253 if mem != x1.Args[1] { 26254 break 26255 } 26256 y := or.Args[1] 26257 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26258 break 26259 } 26260 b = mergePoint(b, x0, x1) 26261 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26262 v.reset(OpCopy) 26263 v.AddArg(v0) 26264 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26265 v1.AuxInt = j1 26266 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 26267 v2.AuxInt = 8 26268 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 26269 v3.AuxInt = i0 26270 v3.Aux = s 26271 v3.AddArg(p) 26272 v3.AddArg(mem) 26273 v2.AddArg(v3) 26274 v1.AddArg(v2) 26275 v0.AddArg(v1) 26276 v0.AddArg(y) 26277 return true 26278 } 26279 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 26280 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26281 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 26282 for { 26283 s0 := v.Args[0] 26284 if s0.Op != OpAMD64SHLQconst { 26285 break 26286 } 26287 j0 := s0.AuxInt 26288 x0 := s0.Args[0] 26289 if x0.Op != OpAMD64MOVBload { 26290 break 26291 } 26292 i0 := x0.AuxInt 26293 s := x0.Aux 26294 p := x0.Args[0] 26295 mem := x0.Args[1] 26296 or := v.Args[1] 26297 if or.Op != OpAMD64ORQ { 26298 break 26299 } 26300 y := or.Args[0] 26301 s1 := or.Args[1] 26302 if s1.Op != OpAMD64SHLQconst { 26303 break 26304 } 26305 j1 := s1.AuxInt 26306 x1 := s1.Args[0] 26307 if x1.Op != OpAMD64MOVBload { 26308 break 26309 } 26310 i1 := x1.AuxInt 26311 if x1.Aux != s { 26312 break 26313 } 26314 if p != x1.Args[0] { 26315 break 26316 } 26317 if mem != x1.Args[1] { 26318 break 26319 } 26320 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26321 break 26322 } 26323 b = mergePoint(b, x0, x1) 26324 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26325 v.reset(OpCopy) 26326 v.AddArg(v0) 26327 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26328 v1.AuxInt = j1 26329 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 26330 v2.AuxInt = 8 26331 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 26332 v3.AuxInt = i0 26333 v3.Aux = s 26334 v3.AddArg(p) 26335 v3.AddArg(mem) 26336 v2.AddArg(v3) 26337 v1.AddArg(v2) 26338 v0.AddArg(v1) 26339 v0.AddArg(y) 26340 return true 26341 } 26342 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 26343 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26344 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 26345 for { 26346 or := v.Args[0] 26347 if or.Op != OpAMD64ORQ { 26348 break 26349 } 26350 s1 := or.Args[0] 26351 if s1.Op != OpAMD64SHLQconst { 26352 break 26353 } 26354 j1 := s1.AuxInt 26355 x1 := s1.Args[0] 26356 if x1.Op != OpAMD64MOVBload { 26357 break 26358 } 26359 i1 := x1.AuxInt 26360 s := x1.Aux 26361 p := x1.Args[0] 26362 mem := x1.Args[1] 26363 y := or.Args[1] 26364 s0 := v.Args[1] 26365 if s0.Op != OpAMD64SHLQconst { 26366 break 26367 } 26368 j0 := s0.AuxInt 26369 x0 := s0.Args[0] 26370 if x0.Op != OpAMD64MOVBload { 26371 break 26372 } 26373 i0 := x0.AuxInt 26374 if x0.Aux != s { 26375 break 26376 } 26377 if p != x0.Args[0] { 26378 break 26379 } 26380 if mem != x0.Args[1] { 26381 break 26382 } 26383 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26384 break 26385 } 26386 b = mergePoint(b, x0, x1) 26387 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26388 v.reset(OpCopy) 26389 v.AddArg(v0) 26390 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26391 v1.AuxInt = j1 26392 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 26393 v2.AuxInt = 8 26394 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 26395 v3.AuxInt = i0 26396 v3.Aux = s 26397 v3.AddArg(p) 26398 v3.AddArg(mem) 26399 v2.AddArg(v3) 26400 v1.AddArg(v2) 26401 v0.AddArg(v1) 26402 v0.AddArg(y) 26403 return true 26404 } 26405 return false 26406 } 26407 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 26408 b := v.Block 26409 _ = b 26410 types := &b.Func.Config.Types 26411 _ = types 26412 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 26413 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26414 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 26415 for { 26416 or := v.Args[0] 26417 if or.Op != OpAMD64ORQ { 26418 break 26419 } 26420 y := or.Args[0] 26421 s1 := or.Args[1] 26422 if s1.Op != OpAMD64SHLQconst { 26423 break 26424 } 26425 j1 := s1.AuxInt 26426 x1 := s1.Args[0] 26427 if x1.Op != OpAMD64MOVBload { 26428 break 26429 } 26430 i1 := x1.AuxInt 26431 s := x1.Aux 26432 p := x1.Args[0] 26433 mem := x1.Args[1] 26434 s0 := v.Args[1] 26435 if s0.Op != OpAMD64SHLQconst { 26436 break 26437 } 26438 j0 := s0.AuxInt 26439 x0 := s0.Args[0] 26440 if x0.Op != OpAMD64MOVBload { 26441 break 26442 } 26443 i0 := x0.AuxInt 26444 if x0.Aux != s { 26445 break 26446 } 26447 if p != x0.Args[0] { 26448 break 26449 } 26450 if mem != x0.Args[1] { 26451 break 26452 } 26453 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26454 break 26455 } 26456 b = mergePoint(b, x0, x1) 26457 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26458 v.reset(OpCopy) 26459 v.AddArg(v0) 26460 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26461 v1.AuxInt = j1 26462 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 26463 v2.AuxInt = 8 26464 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 26465 v3.AuxInt = i0 26466 v3.Aux = s 26467 v3.AddArg(p) 26468 v3.AddArg(mem) 26469 v2.AddArg(v3) 26470 v1.AddArg(v2) 26471 v0.AddArg(v1) 26472 v0.AddArg(y) 26473 return true 26474 } 26475 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 26476 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 26477 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y) 26478 for { 26479 s0 := v.Args[0] 26480 if s0.Op != OpAMD64SHLQconst { 26481 break 26482 } 26483 j0 := s0.AuxInt 26484 r0 := s0.Args[0] 26485 if r0.Op != OpAMD64ROLWconst { 26486 break 26487 } 26488 if r0.AuxInt != 8 { 26489 break 26490 } 26491 x0 := r0.Args[0] 26492 if x0.Op != OpAMD64MOVWload { 26493 break 26494 } 26495 i0 := x0.AuxInt 26496 s := x0.Aux 26497 p := x0.Args[0] 26498 mem := x0.Args[1] 26499 or := v.Args[1] 26500 if or.Op != OpAMD64ORQ { 26501 break 26502 } 26503 s1 := or.Args[0] 26504 if s1.Op != OpAMD64SHLQconst { 26505 break 26506 } 26507 j1 := s1.AuxInt 26508 r1 := s1.Args[0] 26509 if r1.Op != OpAMD64ROLWconst { 26510 break 26511 } 26512 if r1.AuxInt != 8 { 26513 break 26514 } 26515 x1 := r1.Args[0] 26516 if x1.Op != OpAMD64MOVWload { 26517 break 26518 } 26519 i1 := x1.AuxInt 26520 if x1.Aux != s { 26521 break 26522 } 26523 if p != x1.Args[0] { 26524 break 26525 } 26526 if mem != x1.Args[1] { 26527 break 26528 } 26529 y := or.Args[1] 26530 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 26531 break 26532 } 26533 b = mergePoint(b, x0, x1) 26534 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26535 v.reset(OpCopy) 26536 v.AddArg(v0) 26537 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26538 v1.AuxInt = j1 26539 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 26540 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 26541 v3.AuxInt = i0 26542 v3.Aux = s 26543 v3.AddArg(p) 26544 v3.AddArg(mem) 26545 v2.AddArg(v3) 26546 v1.AddArg(v2) 26547 v0.AddArg(v1) 26548 v0.AddArg(y) 26549 return true 26550 } 26551 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 26552 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 26553 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y) 26554 for { 26555 s0 := v.Args[0] 26556 if s0.Op != OpAMD64SHLQconst { 26557 break 26558 } 26559 j0 := s0.AuxInt 26560 r0 := s0.Args[0] 26561 if r0.Op != OpAMD64ROLWconst { 26562 break 26563 } 26564 if r0.AuxInt != 8 { 26565 break 26566 } 26567 x0 := r0.Args[0] 26568 if x0.Op != OpAMD64MOVWload { 26569 break 26570 } 26571 i0 := x0.AuxInt 26572 s := x0.Aux 26573 p := x0.Args[0] 26574 mem := x0.Args[1] 26575 or := v.Args[1] 26576 if or.Op != OpAMD64ORQ { 26577 break 26578 } 26579 y := or.Args[0] 26580 s1 := or.Args[1] 26581 if s1.Op != OpAMD64SHLQconst { 26582 break 26583 } 26584 j1 := s1.AuxInt 26585 r1 := s1.Args[0] 26586 if r1.Op != OpAMD64ROLWconst { 26587 break 26588 } 26589 if r1.AuxInt != 8 { 26590 break 26591 } 26592 x1 := r1.Args[0] 26593 if x1.Op != OpAMD64MOVWload { 26594 break 26595 } 26596 i1 := x1.AuxInt 26597 if x1.Aux != s { 26598 break 26599 } 26600 if p != x1.Args[0] { 26601 break 26602 } 26603 if mem != x1.Args[1] { 26604 break 26605 } 26606 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 26607 break 26608 } 26609 b = mergePoint(b, x0, x1) 26610 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26611 v.reset(OpCopy) 26612 v.AddArg(v0) 26613 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26614 v1.AuxInt = j1 26615 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 26616 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 26617 v3.AuxInt = i0 26618 v3.Aux = s 26619 v3.AddArg(p) 26620 v3.AddArg(mem) 26621 v2.AddArg(v3) 26622 v1.AddArg(v2) 26623 v0.AddArg(v1) 26624 v0.AddArg(y) 26625 return true 26626 } 26627 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 26628 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 26629 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y) 26630 for { 26631 or := v.Args[0] 26632 if or.Op != OpAMD64ORQ { 26633 break 26634 } 26635 s1 := or.Args[0] 26636 if s1.Op != OpAMD64SHLQconst { 26637 break 26638 } 26639 j1 := s1.AuxInt 26640 r1 := s1.Args[0] 26641 if r1.Op != OpAMD64ROLWconst { 26642 break 26643 } 26644 if r1.AuxInt != 8 { 26645 break 26646 } 26647 x1 := r1.Args[0] 26648 if x1.Op != OpAMD64MOVWload { 26649 break 26650 } 26651 i1 := x1.AuxInt 26652 s := x1.Aux 26653 p := x1.Args[0] 26654 mem := x1.Args[1] 26655 y := or.Args[1] 26656 s0 := v.Args[1] 26657 if s0.Op != OpAMD64SHLQconst { 26658 break 26659 } 26660 j0 := s0.AuxInt 26661 r0 := s0.Args[0] 26662 if r0.Op != OpAMD64ROLWconst { 26663 break 26664 } 26665 if r0.AuxInt != 8 { 26666 break 26667 } 26668 x0 := r0.Args[0] 26669 if x0.Op != OpAMD64MOVWload { 26670 break 26671 } 26672 i0 := x0.AuxInt 26673 if x0.Aux != s { 26674 break 26675 } 26676 if p != x0.Args[0] { 26677 break 26678 } 26679 if mem != x0.Args[1] { 26680 break 26681 } 26682 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 26683 break 26684 } 26685 b = mergePoint(b, x0, x1) 26686 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26687 v.reset(OpCopy) 26688 v.AddArg(v0) 26689 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26690 v1.AuxInt = j1 26691 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 26692 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 26693 v3.AuxInt = i0 26694 v3.Aux = s 26695 v3.AddArg(p) 26696 v3.AddArg(mem) 26697 v2.AddArg(v3) 26698 v1.AddArg(v2) 26699 v0.AddArg(v1) 26700 v0.AddArg(y) 26701 return true 26702 } 26703 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 26704 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 26705 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y) 26706 for { 26707 or := v.Args[0] 26708 if or.Op != OpAMD64ORQ { 26709 break 26710 } 26711 y := or.Args[0] 26712 s1 := or.Args[1] 26713 if s1.Op != OpAMD64SHLQconst { 26714 break 26715 } 26716 j1 := s1.AuxInt 26717 r1 := s1.Args[0] 26718 if r1.Op != OpAMD64ROLWconst { 26719 break 26720 } 26721 if r1.AuxInt != 8 { 26722 break 26723 } 26724 x1 := r1.Args[0] 26725 if x1.Op != OpAMD64MOVWload { 26726 break 26727 } 26728 i1 := x1.AuxInt 26729 s := x1.Aux 26730 p := x1.Args[0] 26731 mem := x1.Args[1] 26732 s0 := v.Args[1] 26733 if s0.Op != OpAMD64SHLQconst { 26734 break 26735 } 26736 j0 := s0.AuxInt 26737 r0 := s0.Args[0] 26738 if r0.Op != OpAMD64ROLWconst { 26739 break 26740 } 26741 if r0.AuxInt != 8 { 26742 break 26743 } 26744 x0 := r0.Args[0] 26745 if x0.Op != OpAMD64MOVWload { 26746 break 26747 } 26748 i0 := x0.AuxInt 26749 if x0.Aux != s { 26750 break 26751 } 26752 if p != x0.Args[0] { 26753 break 26754 } 26755 if mem != x0.Args[1] { 26756 break 26757 } 26758 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 26759 break 26760 } 26761 b = mergePoint(b, x0, x1) 26762 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26763 v.reset(OpCopy) 26764 v.AddArg(v0) 26765 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26766 v1.AuxInt = j1 26767 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 26768 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 26769 v3.AuxInt = i0 26770 v3.Aux = s 26771 v3.AddArg(p) 26772 v3.AddArg(mem) 26773 v2.AddArg(v3) 26774 v1.AddArg(v2) 26775 v0.AddArg(v1) 26776 v0.AddArg(y) 26777 return true 26778 } 26779 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 26780 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26781 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 26782 for { 26783 x1 := v.Args[0] 26784 if x1.Op != OpAMD64MOVBloadidx1 { 26785 break 26786 } 26787 i1 := x1.AuxInt 26788 s := x1.Aux 26789 p := x1.Args[0] 26790 idx := x1.Args[1] 26791 mem := x1.Args[2] 26792 sh := v.Args[1] 26793 if sh.Op != OpAMD64SHLQconst { 26794 break 26795 } 26796 if sh.AuxInt != 8 { 26797 break 26798 } 26799 x0 := sh.Args[0] 26800 if x0.Op != OpAMD64MOVBloadidx1 { 26801 break 26802 } 26803 i0 := x0.AuxInt 26804 if x0.Aux != s { 26805 break 26806 } 26807 if p != x0.Args[0] { 26808 break 26809 } 26810 if idx != x0.Args[1] { 26811 break 26812 } 26813 if mem != x0.Args[2] { 26814 break 26815 } 26816 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26817 break 26818 } 26819 b = mergePoint(b, x0, x1) 26820 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 26821 v.reset(OpCopy) 26822 v.AddArg(v0) 26823 v0.AuxInt = 8 26824 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 26825 v1.AuxInt = i0 26826 v1.Aux = s 26827 v1.AddArg(p) 26828 v1.AddArg(idx) 26829 v1.AddArg(mem) 26830 v0.AddArg(v1) 26831 return true 26832 } 26833 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 26834 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26835 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 26836 for { 26837 x1 := v.Args[0] 26838 if x1.Op != OpAMD64MOVBloadidx1 { 26839 break 26840 } 26841 i1 := x1.AuxInt 26842 s := x1.Aux 26843 idx := x1.Args[0] 26844 p := x1.Args[1] 26845 mem := x1.Args[2] 26846 sh := v.Args[1] 26847 if sh.Op != OpAMD64SHLQconst { 26848 break 26849 } 26850 if sh.AuxInt != 8 { 26851 break 26852 } 26853 x0 := sh.Args[0] 26854 if x0.Op != OpAMD64MOVBloadidx1 { 26855 break 26856 } 26857 i0 := x0.AuxInt 26858 if x0.Aux != s { 26859 break 26860 } 26861 if p != x0.Args[0] { 26862 break 26863 } 26864 if idx != x0.Args[1] { 26865 break 26866 } 26867 if mem != x0.Args[2] { 26868 break 26869 } 26870 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26871 break 26872 } 26873 b = mergePoint(b, x0, x1) 26874 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 26875 v.reset(OpCopy) 26876 v.AddArg(v0) 26877 v0.AuxInt = 8 26878 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 26879 v1.AuxInt = i0 26880 v1.Aux = s 26881 v1.AddArg(p) 26882 v1.AddArg(idx) 26883 v1.AddArg(mem) 26884 v0.AddArg(v1) 26885 return true 26886 } 26887 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 26888 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26889 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 26890 for { 26891 x1 := v.Args[0] 26892 if x1.Op != OpAMD64MOVBloadidx1 { 26893 break 26894 } 26895 i1 := x1.AuxInt 26896 s := x1.Aux 26897 p := x1.Args[0] 26898 idx := x1.Args[1] 26899 mem := x1.Args[2] 26900 sh := v.Args[1] 26901 if sh.Op != OpAMD64SHLQconst { 26902 break 26903 } 26904 if sh.AuxInt != 8 { 26905 break 26906 } 26907 x0 := sh.Args[0] 26908 if x0.Op != OpAMD64MOVBloadidx1 { 26909 break 26910 } 26911 i0 := x0.AuxInt 26912 if x0.Aux != s { 26913 break 26914 } 26915 if idx != x0.Args[0] { 26916 break 26917 } 26918 if p != x0.Args[1] { 26919 break 26920 } 26921 if mem != x0.Args[2] { 26922 break 26923 } 26924 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26925 break 26926 } 26927 b = mergePoint(b, x0, x1) 26928 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 26929 v.reset(OpCopy) 26930 v.AddArg(v0) 26931 v0.AuxInt = 8 26932 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 26933 v1.AuxInt = i0 26934 v1.Aux = s 26935 v1.AddArg(p) 26936 v1.AddArg(idx) 26937 v1.AddArg(mem) 26938 v0.AddArg(v1) 26939 return true 26940 } 26941 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 26942 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26943 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 26944 for { 26945 x1 := v.Args[0] 26946 if x1.Op != OpAMD64MOVBloadidx1 { 26947 break 26948 } 26949 i1 := x1.AuxInt 26950 s := x1.Aux 26951 idx := x1.Args[0] 26952 p := x1.Args[1] 26953 mem := x1.Args[2] 26954 sh := v.Args[1] 26955 if sh.Op != OpAMD64SHLQconst { 26956 break 26957 } 26958 if sh.AuxInt != 8 { 26959 break 26960 } 26961 x0 := sh.Args[0] 26962 if x0.Op != OpAMD64MOVBloadidx1 { 26963 break 26964 } 26965 i0 := x0.AuxInt 26966 if x0.Aux != s { 26967 break 26968 } 26969 if idx != x0.Args[0] { 26970 break 26971 } 26972 if p != x0.Args[1] { 26973 break 26974 } 26975 if mem != x0.Args[2] { 26976 break 26977 } 26978 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26979 break 26980 } 26981 b = mergePoint(b, x0, x1) 26982 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 26983 v.reset(OpCopy) 26984 v.AddArg(v0) 26985 v0.AuxInt = 8 26986 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 26987 v1.AuxInt = i0 26988 v1.Aux = s 26989 v1.AddArg(p) 26990 v1.AddArg(idx) 26991 v1.AddArg(mem) 26992 v0.AddArg(v1) 26993 return true 26994 } 26995 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 26996 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26997 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 26998 for { 26999 sh := v.Args[0] 27000 if sh.Op != OpAMD64SHLQconst { 27001 break 27002 } 27003 if sh.AuxInt != 8 { 27004 break 27005 } 27006 x0 := sh.Args[0] 27007 if x0.Op != OpAMD64MOVBloadidx1 { 27008 break 27009 } 27010 i0 := x0.AuxInt 27011 s := x0.Aux 27012 p := x0.Args[0] 27013 idx := x0.Args[1] 27014 mem := x0.Args[2] 27015 x1 := v.Args[1] 27016 if x1.Op != OpAMD64MOVBloadidx1 { 27017 break 27018 } 27019 i1 := x1.AuxInt 27020 if x1.Aux != s { 27021 break 27022 } 27023 if p != x1.Args[0] { 27024 break 27025 } 27026 if idx != x1.Args[1] { 27027 break 27028 } 27029 if mem != x1.Args[2] { 27030 break 27031 } 27032 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27033 break 27034 } 27035 b = mergePoint(b, x0, x1) 27036 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 27037 v.reset(OpCopy) 27038 v.AddArg(v0) 27039 v0.AuxInt = 8 27040 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 27041 v1.AuxInt = i0 27042 v1.Aux = s 27043 v1.AddArg(p) 27044 v1.AddArg(idx) 27045 v1.AddArg(mem) 27046 v0.AddArg(v1) 27047 return true 27048 } 27049 return false 27050 } 27051 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 27052 b := v.Block 27053 _ = b 27054 types := &b.Func.Config.Types 27055 _ = types 27056 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 27057 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27058 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 27059 for { 27060 sh := v.Args[0] 27061 if sh.Op != OpAMD64SHLQconst { 27062 break 27063 } 27064 if sh.AuxInt != 8 { 27065 break 27066 } 27067 x0 := sh.Args[0] 27068 if x0.Op != OpAMD64MOVBloadidx1 { 27069 break 27070 } 27071 i0 := x0.AuxInt 27072 s := x0.Aux 27073 idx := x0.Args[0] 27074 p := x0.Args[1] 27075 mem := x0.Args[2] 27076 x1 := v.Args[1] 27077 if x1.Op != OpAMD64MOVBloadidx1 { 27078 break 27079 } 27080 i1 := x1.AuxInt 27081 if x1.Aux != s { 27082 break 27083 } 27084 if p != x1.Args[0] { 27085 break 27086 } 27087 if idx != x1.Args[1] { 27088 break 27089 } 27090 if mem != x1.Args[2] { 27091 break 27092 } 27093 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27094 break 27095 } 27096 b = mergePoint(b, x0, x1) 27097 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 27098 v.reset(OpCopy) 27099 v.AddArg(v0) 27100 v0.AuxInt = 8 27101 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 27102 v1.AuxInt = i0 27103 v1.Aux = s 27104 v1.AddArg(p) 27105 v1.AddArg(idx) 27106 v1.AddArg(mem) 27107 v0.AddArg(v1) 27108 return true 27109 } 27110 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 27111 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27112 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 27113 for { 27114 sh := v.Args[0] 27115 if sh.Op != OpAMD64SHLQconst { 27116 break 27117 } 27118 if sh.AuxInt != 8 { 27119 break 27120 } 27121 x0 := sh.Args[0] 27122 if x0.Op != OpAMD64MOVBloadidx1 { 27123 break 27124 } 27125 i0 := x0.AuxInt 27126 s := x0.Aux 27127 p := x0.Args[0] 27128 idx := x0.Args[1] 27129 mem := x0.Args[2] 27130 x1 := v.Args[1] 27131 if x1.Op != OpAMD64MOVBloadidx1 { 27132 break 27133 } 27134 i1 := x1.AuxInt 27135 if x1.Aux != s { 27136 break 27137 } 27138 if idx != x1.Args[0] { 27139 break 27140 } 27141 if p != x1.Args[1] { 27142 break 27143 } 27144 if mem != x1.Args[2] { 27145 break 27146 } 27147 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27148 break 27149 } 27150 b = mergePoint(b, x0, x1) 27151 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 27152 v.reset(OpCopy) 27153 v.AddArg(v0) 27154 v0.AuxInt = 8 27155 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 27156 v1.AuxInt = i0 27157 v1.Aux = s 27158 v1.AddArg(p) 27159 v1.AddArg(idx) 27160 v1.AddArg(mem) 27161 v0.AddArg(v1) 27162 return true 27163 } 27164 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 27165 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27166 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 27167 for { 27168 sh := v.Args[0] 27169 if sh.Op != OpAMD64SHLQconst { 27170 break 27171 } 27172 if sh.AuxInt != 8 { 27173 break 27174 } 27175 x0 := sh.Args[0] 27176 if x0.Op != OpAMD64MOVBloadidx1 { 27177 break 27178 } 27179 i0 := x0.AuxInt 27180 s := x0.Aux 27181 idx := x0.Args[0] 27182 p := x0.Args[1] 27183 mem := x0.Args[2] 27184 x1 := v.Args[1] 27185 if x1.Op != OpAMD64MOVBloadidx1 { 27186 break 27187 } 27188 i1 := x1.AuxInt 27189 if x1.Aux != s { 27190 break 27191 } 27192 if idx != x1.Args[0] { 27193 break 27194 } 27195 if p != x1.Args[1] { 27196 break 27197 } 27198 if mem != x1.Args[2] { 27199 break 27200 } 27201 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27202 break 27203 } 27204 b = mergePoint(b, x0, x1) 27205 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 27206 v.reset(OpCopy) 27207 v.AddArg(v0) 27208 v0.AuxInt = 8 27209 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 27210 v1.AuxInt = i0 27211 v1.Aux = s 27212 v1.AddArg(p) 27213 v1.AddArg(idx) 27214 v1.AddArg(mem) 27215 v0.AddArg(v1) 27216 return true 27217 } 27218 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 27219 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27220 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 27221 for { 27222 r1 := v.Args[0] 27223 if r1.Op != OpAMD64ROLWconst { 27224 break 27225 } 27226 if r1.AuxInt != 8 { 27227 break 27228 } 27229 x1 := r1.Args[0] 27230 if x1.Op != OpAMD64MOVWloadidx1 { 27231 break 27232 } 27233 i1 := x1.AuxInt 27234 s := x1.Aux 27235 p := x1.Args[0] 27236 idx := x1.Args[1] 27237 mem := x1.Args[2] 27238 sh := v.Args[1] 27239 if sh.Op != OpAMD64SHLQconst { 27240 break 27241 } 27242 if sh.AuxInt != 16 { 27243 break 27244 } 27245 r0 := sh.Args[0] 27246 if r0.Op != OpAMD64ROLWconst { 27247 break 27248 } 27249 if r0.AuxInt != 8 { 27250 break 27251 } 27252 x0 := r0.Args[0] 27253 if x0.Op != OpAMD64MOVWloadidx1 { 27254 break 27255 } 27256 i0 := x0.AuxInt 27257 if x0.Aux != s { 27258 break 27259 } 27260 if p != x0.Args[0] { 27261 break 27262 } 27263 if idx != x0.Args[1] { 27264 break 27265 } 27266 if mem != x0.Args[2] { 27267 break 27268 } 27269 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27270 break 27271 } 27272 b = mergePoint(b, x0, x1) 27273 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27274 v.reset(OpCopy) 27275 v.AddArg(v0) 27276 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 27277 v1.AuxInt = i0 27278 v1.Aux = s 27279 v1.AddArg(p) 27280 v1.AddArg(idx) 27281 v1.AddArg(mem) 27282 v0.AddArg(v1) 27283 return true 27284 } 27285 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 27286 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27287 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 27288 for { 27289 r1 := v.Args[0] 27290 if r1.Op != OpAMD64ROLWconst { 27291 break 27292 } 27293 if r1.AuxInt != 8 { 27294 break 27295 } 27296 x1 := r1.Args[0] 27297 if x1.Op != OpAMD64MOVWloadidx1 { 27298 break 27299 } 27300 i1 := x1.AuxInt 27301 s := x1.Aux 27302 idx := x1.Args[0] 27303 p := x1.Args[1] 27304 mem := x1.Args[2] 27305 sh := v.Args[1] 27306 if sh.Op != OpAMD64SHLQconst { 27307 break 27308 } 27309 if sh.AuxInt != 16 { 27310 break 27311 } 27312 r0 := sh.Args[0] 27313 if r0.Op != OpAMD64ROLWconst { 27314 break 27315 } 27316 if r0.AuxInt != 8 { 27317 break 27318 } 27319 x0 := r0.Args[0] 27320 if x0.Op != OpAMD64MOVWloadidx1 { 27321 break 27322 } 27323 i0 := x0.AuxInt 27324 if x0.Aux != s { 27325 break 27326 } 27327 if p != x0.Args[0] { 27328 break 27329 } 27330 if idx != x0.Args[1] { 27331 break 27332 } 27333 if mem != x0.Args[2] { 27334 break 27335 } 27336 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27337 break 27338 } 27339 b = mergePoint(b, x0, x1) 27340 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27341 v.reset(OpCopy) 27342 v.AddArg(v0) 27343 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 27344 v1.AuxInt = i0 27345 v1.Aux = s 27346 v1.AddArg(p) 27347 v1.AddArg(idx) 27348 v1.AddArg(mem) 27349 v0.AddArg(v1) 27350 return true 27351 } 27352 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 27353 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27354 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 27355 for { 27356 r1 := v.Args[0] 27357 if r1.Op != OpAMD64ROLWconst { 27358 break 27359 } 27360 if r1.AuxInt != 8 { 27361 break 27362 } 27363 x1 := r1.Args[0] 27364 if x1.Op != OpAMD64MOVWloadidx1 { 27365 break 27366 } 27367 i1 := x1.AuxInt 27368 s := x1.Aux 27369 p := x1.Args[0] 27370 idx := x1.Args[1] 27371 mem := x1.Args[2] 27372 sh := v.Args[1] 27373 if sh.Op != OpAMD64SHLQconst { 27374 break 27375 } 27376 if sh.AuxInt != 16 { 27377 break 27378 } 27379 r0 := sh.Args[0] 27380 if r0.Op != OpAMD64ROLWconst { 27381 break 27382 } 27383 if r0.AuxInt != 8 { 27384 break 27385 } 27386 x0 := r0.Args[0] 27387 if x0.Op != OpAMD64MOVWloadidx1 { 27388 break 27389 } 27390 i0 := x0.AuxInt 27391 if x0.Aux != s { 27392 break 27393 } 27394 if idx != x0.Args[0] { 27395 break 27396 } 27397 if p != x0.Args[1] { 27398 break 27399 } 27400 if mem != x0.Args[2] { 27401 break 27402 } 27403 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27404 break 27405 } 27406 b = mergePoint(b, x0, x1) 27407 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27408 v.reset(OpCopy) 27409 v.AddArg(v0) 27410 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 27411 v1.AuxInt = i0 27412 v1.Aux = s 27413 v1.AddArg(p) 27414 v1.AddArg(idx) 27415 v1.AddArg(mem) 27416 v0.AddArg(v1) 27417 return true 27418 } 27419 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 27420 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27421 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 27422 for { 27423 r1 := v.Args[0] 27424 if r1.Op != OpAMD64ROLWconst { 27425 break 27426 } 27427 if r1.AuxInt != 8 { 27428 break 27429 } 27430 x1 := r1.Args[0] 27431 if x1.Op != OpAMD64MOVWloadidx1 { 27432 break 27433 } 27434 i1 := x1.AuxInt 27435 s := x1.Aux 27436 idx := x1.Args[0] 27437 p := x1.Args[1] 27438 mem := x1.Args[2] 27439 sh := v.Args[1] 27440 if sh.Op != OpAMD64SHLQconst { 27441 break 27442 } 27443 if sh.AuxInt != 16 { 27444 break 27445 } 27446 r0 := sh.Args[0] 27447 if r0.Op != OpAMD64ROLWconst { 27448 break 27449 } 27450 if r0.AuxInt != 8 { 27451 break 27452 } 27453 x0 := r0.Args[0] 27454 if x0.Op != OpAMD64MOVWloadidx1 { 27455 break 27456 } 27457 i0 := x0.AuxInt 27458 if x0.Aux != s { 27459 break 27460 } 27461 if idx != x0.Args[0] { 27462 break 27463 } 27464 if p != x0.Args[1] { 27465 break 27466 } 27467 if mem != x0.Args[2] { 27468 break 27469 } 27470 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27471 break 27472 } 27473 b = mergePoint(b, x0, x1) 27474 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27475 v.reset(OpCopy) 27476 v.AddArg(v0) 27477 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 27478 v1.AuxInt = i0 27479 v1.Aux = s 27480 v1.AddArg(p) 27481 v1.AddArg(idx) 27482 v1.AddArg(mem) 27483 v0.AddArg(v1) 27484 return true 27485 } 27486 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 27487 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27488 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 27489 for { 27490 sh := v.Args[0] 27491 if sh.Op != OpAMD64SHLQconst { 27492 break 27493 } 27494 if sh.AuxInt != 16 { 27495 break 27496 } 27497 r0 := sh.Args[0] 27498 if r0.Op != OpAMD64ROLWconst { 27499 break 27500 } 27501 if r0.AuxInt != 8 { 27502 break 27503 } 27504 x0 := r0.Args[0] 27505 if x0.Op != OpAMD64MOVWloadidx1 { 27506 break 27507 } 27508 i0 := x0.AuxInt 27509 s := x0.Aux 27510 p := x0.Args[0] 27511 idx := x0.Args[1] 27512 mem := x0.Args[2] 27513 r1 := v.Args[1] 27514 if r1.Op != OpAMD64ROLWconst { 27515 break 27516 } 27517 if r1.AuxInt != 8 { 27518 break 27519 } 27520 x1 := r1.Args[0] 27521 if x1.Op != OpAMD64MOVWloadidx1 { 27522 break 27523 } 27524 i1 := x1.AuxInt 27525 if x1.Aux != s { 27526 break 27527 } 27528 if p != x1.Args[0] { 27529 break 27530 } 27531 if idx != x1.Args[1] { 27532 break 27533 } 27534 if mem != x1.Args[2] { 27535 break 27536 } 27537 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27538 break 27539 } 27540 b = mergePoint(b, x0, x1) 27541 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27542 v.reset(OpCopy) 27543 v.AddArg(v0) 27544 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 27545 v1.AuxInt = i0 27546 v1.Aux = s 27547 v1.AddArg(p) 27548 v1.AddArg(idx) 27549 v1.AddArg(mem) 27550 v0.AddArg(v1) 27551 return true 27552 } 27553 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 27554 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27555 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 27556 for { 27557 sh := v.Args[0] 27558 if sh.Op != OpAMD64SHLQconst { 27559 break 27560 } 27561 if sh.AuxInt != 16 { 27562 break 27563 } 27564 r0 := sh.Args[0] 27565 if r0.Op != OpAMD64ROLWconst { 27566 break 27567 } 27568 if r0.AuxInt != 8 { 27569 break 27570 } 27571 x0 := r0.Args[0] 27572 if x0.Op != OpAMD64MOVWloadidx1 { 27573 break 27574 } 27575 i0 := x0.AuxInt 27576 s := x0.Aux 27577 idx := x0.Args[0] 27578 p := x0.Args[1] 27579 mem := x0.Args[2] 27580 r1 := v.Args[1] 27581 if r1.Op != OpAMD64ROLWconst { 27582 break 27583 } 27584 if r1.AuxInt != 8 { 27585 break 27586 } 27587 x1 := r1.Args[0] 27588 if x1.Op != OpAMD64MOVWloadidx1 { 27589 break 27590 } 27591 i1 := x1.AuxInt 27592 if x1.Aux != s { 27593 break 27594 } 27595 if p != x1.Args[0] { 27596 break 27597 } 27598 if idx != x1.Args[1] { 27599 break 27600 } 27601 if mem != x1.Args[2] { 27602 break 27603 } 27604 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27605 break 27606 } 27607 b = mergePoint(b, x0, x1) 27608 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27609 v.reset(OpCopy) 27610 v.AddArg(v0) 27611 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 27612 v1.AuxInt = i0 27613 v1.Aux = s 27614 v1.AddArg(p) 27615 v1.AddArg(idx) 27616 v1.AddArg(mem) 27617 v0.AddArg(v1) 27618 return true 27619 } 27620 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27621 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27622 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 27623 for { 27624 sh := v.Args[0] 27625 if sh.Op != OpAMD64SHLQconst { 27626 break 27627 } 27628 if sh.AuxInt != 16 { 27629 break 27630 } 27631 r0 := sh.Args[0] 27632 if r0.Op != OpAMD64ROLWconst { 27633 break 27634 } 27635 if r0.AuxInt != 8 { 27636 break 27637 } 27638 x0 := r0.Args[0] 27639 if x0.Op != OpAMD64MOVWloadidx1 { 27640 break 27641 } 27642 i0 := x0.AuxInt 27643 s := x0.Aux 27644 p := x0.Args[0] 27645 idx := x0.Args[1] 27646 mem := x0.Args[2] 27647 r1 := v.Args[1] 27648 if r1.Op != OpAMD64ROLWconst { 27649 break 27650 } 27651 if r1.AuxInt != 8 { 27652 break 27653 } 27654 x1 := r1.Args[0] 27655 if x1.Op != OpAMD64MOVWloadidx1 { 27656 break 27657 } 27658 i1 := x1.AuxInt 27659 if x1.Aux != s { 27660 break 27661 } 27662 if idx != x1.Args[0] { 27663 break 27664 } 27665 if p != x1.Args[1] { 27666 break 27667 } 27668 if mem != x1.Args[2] { 27669 break 27670 } 27671 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27672 break 27673 } 27674 b = mergePoint(b, x0, x1) 27675 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27676 v.reset(OpCopy) 27677 v.AddArg(v0) 27678 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 27679 v1.AuxInt = i0 27680 v1.Aux = s 27681 v1.AddArg(p) 27682 v1.AddArg(idx) 27683 v1.AddArg(mem) 27684 v0.AddArg(v1) 27685 return true 27686 } 27687 return false 27688 } 27689 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 27690 b := v.Block 27691 _ = b 27692 types := &b.Func.Config.Types 27693 _ = types 27694 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27695 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27696 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 27697 for { 27698 sh := v.Args[0] 27699 if sh.Op != OpAMD64SHLQconst { 27700 break 27701 } 27702 if sh.AuxInt != 16 { 27703 break 27704 } 27705 r0 := sh.Args[0] 27706 if r0.Op != OpAMD64ROLWconst { 27707 break 27708 } 27709 if r0.AuxInt != 8 { 27710 break 27711 } 27712 x0 := r0.Args[0] 27713 if x0.Op != OpAMD64MOVWloadidx1 { 27714 break 27715 } 27716 i0 := x0.AuxInt 27717 s := x0.Aux 27718 idx := x0.Args[0] 27719 p := x0.Args[1] 27720 mem := x0.Args[2] 27721 r1 := v.Args[1] 27722 if r1.Op != OpAMD64ROLWconst { 27723 break 27724 } 27725 if r1.AuxInt != 8 { 27726 break 27727 } 27728 x1 := r1.Args[0] 27729 if x1.Op != OpAMD64MOVWloadidx1 { 27730 break 27731 } 27732 i1 := x1.AuxInt 27733 if x1.Aux != s { 27734 break 27735 } 27736 if idx != x1.Args[0] { 27737 break 27738 } 27739 if p != x1.Args[1] { 27740 break 27741 } 27742 if mem != x1.Args[2] { 27743 break 27744 } 27745 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27746 break 27747 } 27748 b = mergePoint(b, x0, x1) 27749 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27750 v.reset(OpCopy) 27751 v.AddArg(v0) 27752 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 27753 v1.AuxInt = i0 27754 v1.Aux = s 27755 v1.AddArg(p) 27756 v1.AddArg(idx) 27757 v1.AddArg(mem) 27758 v0.AddArg(v1) 27759 return true 27760 } 27761 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 27762 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27763 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 27764 for { 27765 r1 := v.Args[0] 27766 if r1.Op != OpAMD64BSWAPL { 27767 break 27768 } 27769 x1 := r1.Args[0] 27770 if x1.Op != OpAMD64MOVLloadidx1 { 27771 break 27772 } 27773 i1 := x1.AuxInt 27774 s := x1.Aux 27775 p := x1.Args[0] 27776 idx := x1.Args[1] 27777 mem := x1.Args[2] 27778 sh := v.Args[1] 27779 if sh.Op != OpAMD64SHLQconst { 27780 break 27781 } 27782 if sh.AuxInt != 32 { 27783 break 27784 } 27785 r0 := sh.Args[0] 27786 if r0.Op != OpAMD64BSWAPL { 27787 break 27788 } 27789 x0 := r0.Args[0] 27790 if x0.Op != OpAMD64MOVLloadidx1 { 27791 break 27792 } 27793 i0 := x0.AuxInt 27794 if x0.Aux != s { 27795 break 27796 } 27797 if p != x0.Args[0] { 27798 break 27799 } 27800 if idx != x0.Args[1] { 27801 break 27802 } 27803 if mem != x0.Args[2] { 27804 break 27805 } 27806 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27807 break 27808 } 27809 b = mergePoint(b, x0, x1) 27810 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 27811 v.reset(OpCopy) 27812 v.AddArg(v0) 27813 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 27814 v1.AuxInt = i0 27815 v1.Aux = s 27816 v1.AddArg(p) 27817 v1.AddArg(idx) 27818 v1.AddArg(mem) 27819 v0.AddArg(v1) 27820 return true 27821 } 27822 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 27823 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27824 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 27825 for { 27826 r1 := v.Args[0] 27827 if r1.Op != OpAMD64BSWAPL { 27828 break 27829 } 27830 x1 := r1.Args[0] 27831 if x1.Op != OpAMD64MOVLloadidx1 { 27832 break 27833 } 27834 i1 := x1.AuxInt 27835 s := x1.Aux 27836 idx := x1.Args[0] 27837 p := x1.Args[1] 27838 mem := x1.Args[2] 27839 sh := v.Args[1] 27840 if sh.Op != OpAMD64SHLQconst { 27841 break 27842 } 27843 if sh.AuxInt != 32 { 27844 break 27845 } 27846 r0 := sh.Args[0] 27847 if r0.Op != OpAMD64BSWAPL { 27848 break 27849 } 27850 x0 := r0.Args[0] 27851 if x0.Op != OpAMD64MOVLloadidx1 { 27852 break 27853 } 27854 i0 := x0.AuxInt 27855 if x0.Aux != s { 27856 break 27857 } 27858 if p != x0.Args[0] { 27859 break 27860 } 27861 if idx != x0.Args[1] { 27862 break 27863 } 27864 if mem != x0.Args[2] { 27865 break 27866 } 27867 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27868 break 27869 } 27870 b = mergePoint(b, x0, x1) 27871 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 27872 v.reset(OpCopy) 27873 v.AddArg(v0) 27874 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 27875 v1.AuxInt = i0 27876 v1.Aux = s 27877 v1.AddArg(p) 27878 v1.AddArg(idx) 27879 v1.AddArg(mem) 27880 v0.AddArg(v1) 27881 return true 27882 } 27883 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 27884 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27885 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 27886 for { 27887 r1 := v.Args[0] 27888 if r1.Op != OpAMD64BSWAPL { 27889 break 27890 } 27891 x1 := r1.Args[0] 27892 if x1.Op != OpAMD64MOVLloadidx1 { 27893 break 27894 } 27895 i1 := x1.AuxInt 27896 s := x1.Aux 27897 p := x1.Args[0] 27898 idx := x1.Args[1] 27899 mem := x1.Args[2] 27900 sh := v.Args[1] 27901 if sh.Op != OpAMD64SHLQconst { 27902 break 27903 } 27904 if sh.AuxInt != 32 { 27905 break 27906 } 27907 r0 := sh.Args[0] 27908 if r0.Op != OpAMD64BSWAPL { 27909 break 27910 } 27911 x0 := r0.Args[0] 27912 if x0.Op != OpAMD64MOVLloadidx1 { 27913 break 27914 } 27915 i0 := x0.AuxInt 27916 if x0.Aux != s { 27917 break 27918 } 27919 if idx != x0.Args[0] { 27920 break 27921 } 27922 if p != x0.Args[1] { 27923 break 27924 } 27925 if mem != x0.Args[2] { 27926 break 27927 } 27928 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27929 break 27930 } 27931 b = mergePoint(b, x0, x1) 27932 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 27933 v.reset(OpCopy) 27934 v.AddArg(v0) 27935 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 27936 v1.AuxInt = i0 27937 v1.Aux = s 27938 v1.AddArg(p) 27939 v1.AddArg(idx) 27940 v1.AddArg(mem) 27941 v0.AddArg(v1) 27942 return true 27943 } 27944 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 27945 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27946 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 27947 for { 27948 r1 := v.Args[0] 27949 if r1.Op != OpAMD64BSWAPL { 27950 break 27951 } 27952 x1 := r1.Args[0] 27953 if x1.Op != OpAMD64MOVLloadidx1 { 27954 break 27955 } 27956 i1 := x1.AuxInt 27957 s := x1.Aux 27958 idx := x1.Args[0] 27959 p := x1.Args[1] 27960 mem := x1.Args[2] 27961 sh := v.Args[1] 27962 if sh.Op != OpAMD64SHLQconst { 27963 break 27964 } 27965 if sh.AuxInt != 32 { 27966 break 27967 } 27968 r0 := sh.Args[0] 27969 if r0.Op != OpAMD64BSWAPL { 27970 break 27971 } 27972 x0 := r0.Args[0] 27973 if x0.Op != OpAMD64MOVLloadidx1 { 27974 break 27975 } 27976 i0 := x0.AuxInt 27977 if x0.Aux != s { 27978 break 27979 } 27980 if idx != x0.Args[0] { 27981 break 27982 } 27983 if p != x0.Args[1] { 27984 break 27985 } 27986 if mem != x0.Args[2] { 27987 break 27988 } 27989 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27990 break 27991 } 27992 b = mergePoint(b, x0, x1) 27993 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 27994 v.reset(OpCopy) 27995 v.AddArg(v0) 27996 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 27997 v1.AuxInt = i0 27998 v1.Aux = s 27999 v1.AddArg(p) 28000 v1.AddArg(idx) 28001 v1.AddArg(mem) 28002 v0.AddArg(v1) 28003 return true 28004 } 28005 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 28006 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28007 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 28008 for { 28009 sh := v.Args[0] 28010 if sh.Op != OpAMD64SHLQconst { 28011 break 28012 } 28013 if sh.AuxInt != 32 { 28014 break 28015 } 28016 r0 := sh.Args[0] 28017 if r0.Op != OpAMD64BSWAPL { 28018 break 28019 } 28020 x0 := r0.Args[0] 28021 if x0.Op != OpAMD64MOVLloadidx1 { 28022 break 28023 } 28024 i0 := x0.AuxInt 28025 s := x0.Aux 28026 p := x0.Args[0] 28027 idx := x0.Args[1] 28028 mem := x0.Args[2] 28029 r1 := v.Args[1] 28030 if r1.Op != OpAMD64BSWAPL { 28031 break 28032 } 28033 x1 := r1.Args[0] 28034 if x1.Op != OpAMD64MOVLloadidx1 { 28035 break 28036 } 28037 i1 := x1.AuxInt 28038 if x1.Aux != s { 28039 break 28040 } 28041 if p != x1.Args[0] { 28042 break 28043 } 28044 if idx != x1.Args[1] { 28045 break 28046 } 28047 if mem != x1.Args[2] { 28048 break 28049 } 28050 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28051 break 28052 } 28053 b = mergePoint(b, x0, x1) 28054 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 28055 v.reset(OpCopy) 28056 v.AddArg(v0) 28057 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 28058 v1.AuxInt = i0 28059 v1.Aux = s 28060 v1.AddArg(p) 28061 v1.AddArg(idx) 28062 v1.AddArg(mem) 28063 v0.AddArg(v1) 28064 return true 28065 } 28066 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 28067 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28068 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 28069 for { 28070 sh := v.Args[0] 28071 if sh.Op != OpAMD64SHLQconst { 28072 break 28073 } 28074 if sh.AuxInt != 32 { 28075 break 28076 } 28077 r0 := sh.Args[0] 28078 if r0.Op != OpAMD64BSWAPL { 28079 break 28080 } 28081 x0 := r0.Args[0] 28082 if x0.Op != OpAMD64MOVLloadidx1 { 28083 break 28084 } 28085 i0 := x0.AuxInt 28086 s := x0.Aux 28087 idx := x0.Args[0] 28088 p := x0.Args[1] 28089 mem := x0.Args[2] 28090 r1 := v.Args[1] 28091 if r1.Op != OpAMD64BSWAPL { 28092 break 28093 } 28094 x1 := r1.Args[0] 28095 if x1.Op != OpAMD64MOVLloadidx1 { 28096 break 28097 } 28098 i1 := x1.AuxInt 28099 if x1.Aux != s { 28100 break 28101 } 28102 if p != x1.Args[0] { 28103 break 28104 } 28105 if idx != x1.Args[1] { 28106 break 28107 } 28108 if mem != x1.Args[2] { 28109 break 28110 } 28111 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28112 break 28113 } 28114 b = mergePoint(b, x0, x1) 28115 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 28116 v.reset(OpCopy) 28117 v.AddArg(v0) 28118 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 28119 v1.AuxInt = i0 28120 v1.Aux = s 28121 v1.AddArg(p) 28122 v1.AddArg(idx) 28123 v1.AddArg(mem) 28124 v0.AddArg(v1) 28125 return true 28126 } 28127 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 28128 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28129 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 28130 for { 28131 sh := v.Args[0] 28132 if sh.Op != OpAMD64SHLQconst { 28133 break 28134 } 28135 if sh.AuxInt != 32 { 28136 break 28137 } 28138 r0 := sh.Args[0] 28139 if r0.Op != OpAMD64BSWAPL { 28140 break 28141 } 28142 x0 := r0.Args[0] 28143 if x0.Op != OpAMD64MOVLloadidx1 { 28144 break 28145 } 28146 i0 := x0.AuxInt 28147 s := x0.Aux 28148 p := x0.Args[0] 28149 idx := x0.Args[1] 28150 mem := x0.Args[2] 28151 r1 := v.Args[1] 28152 if r1.Op != OpAMD64BSWAPL { 28153 break 28154 } 28155 x1 := r1.Args[0] 28156 if x1.Op != OpAMD64MOVLloadidx1 { 28157 break 28158 } 28159 i1 := x1.AuxInt 28160 if x1.Aux != s { 28161 break 28162 } 28163 if idx != x1.Args[0] { 28164 break 28165 } 28166 if p != x1.Args[1] { 28167 break 28168 } 28169 if mem != x1.Args[2] { 28170 break 28171 } 28172 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28173 break 28174 } 28175 b = mergePoint(b, x0, x1) 28176 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 28177 v.reset(OpCopy) 28178 v.AddArg(v0) 28179 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 28180 v1.AuxInt = i0 28181 v1.Aux = s 28182 v1.AddArg(p) 28183 v1.AddArg(idx) 28184 v1.AddArg(mem) 28185 v0.AddArg(v1) 28186 return true 28187 } 28188 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 28189 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28190 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 28191 for { 28192 sh := v.Args[0] 28193 if sh.Op != OpAMD64SHLQconst { 28194 break 28195 } 28196 if sh.AuxInt != 32 { 28197 break 28198 } 28199 r0 := sh.Args[0] 28200 if r0.Op != OpAMD64BSWAPL { 28201 break 28202 } 28203 x0 := r0.Args[0] 28204 if x0.Op != OpAMD64MOVLloadidx1 { 28205 break 28206 } 28207 i0 := x0.AuxInt 28208 s := x0.Aux 28209 idx := x0.Args[0] 28210 p := x0.Args[1] 28211 mem := x0.Args[2] 28212 r1 := v.Args[1] 28213 if r1.Op != OpAMD64BSWAPL { 28214 break 28215 } 28216 x1 := r1.Args[0] 28217 if x1.Op != OpAMD64MOVLloadidx1 { 28218 break 28219 } 28220 i1 := x1.AuxInt 28221 if x1.Aux != s { 28222 break 28223 } 28224 if idx != x1.Args[0] { 28225 break 28226 } 28227 if p != x1.Args[1] { 28228 break 28229 } 28230 if mem != x1.Args[2] { 28231 break 28232 } 28233 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28234 break 28235 } 28236 b = mergePoint(b, x0, x1) 28237 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 28238 v.reset(OpCopy) 28239 v.AddArg(v0) 28240 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) 28241 v1.AuxInt = i0 28242 v1.Aux = s 28243 v1.AddArg(p) 28244 v1.AddArg(idx) 28245 v1.AddArg(mem) 28246 v0.AddArg(v1) 28247 return true 28248 } 28249 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 28250 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28251 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 28252 for { 28253 s0 := v.Args[0] 28254 if s0.Op != OpAMD64SHLQconst { 28255 break 28256 } 28257 j0 := s0.AuxInt 28258 x0 := s0.Args[0] 28259 if x0.Op != OpAMD64MOVBloadidx1 { 28260 break 28261 } 28262 i0 := x0.AuxInt 28263 s := x0.Aux 28264 p := x0.Args[0] 28265 idx := x0.Args[1] 28266 mem := x0.Args[2] 28267 or := v.Args[1] 28268 if or.Op != OpAMD64ORQ { 28269 break 28270 } 28271 s1 := or.Args[0] 28272 if s1.Op != OpAMD64SHLQconst { 28273 break 28274 } 28275 j1 := s1.AuxInt 28276 x1 := s1.Args[0] 28277 if x1.Op != OpAMD64MOVBloadidx1 { 28278 break 28279 } 28280 i1 := x1.AuxInt 28281 if x1.Aux != s { 28282 break 28283 } 28284 if p != x1.Args[0] { 28285 break 28286 } 28287 if idx != x1.Args[1] { 28288 break 28289 } 28290 if mem != x1.Args[2] { 28291 break 28292 } 28293 y := or.Args[1] 28294 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28295 break 28296 } 28297 b = mergePoint(b, x0, x1) 28298 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28299 v.reset(OpCopy) 28300 v.AddArg(v0) 28301 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28302 v1.AuxInt = j1 28303 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 28304 v2.AuxInt = 8 28305 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 28306 v3.AuxInt = i0 28307 v3.Aux = s 28308 v3.AddArg(p) 28309 v3.AddArg(idx) 28310 v3.AddArg(mem) 28311 v2.AddArg(v3) 28312 v1.AddArg(v2) 28313 v0.AddArg(v1) 28314 v0.AddArg(y) 28315 return true 28316 } 28317 return false 28318 } 28319 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 28320 b := v.Block 28321 _ = b 28322 types := &b.Func.Config.Types 28323 _ = types 28324 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 28325 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28326 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 28327 for { 28328 s0 := v.Args[0] 28329 if s0.Op != OpAMD64SHLQconst { 28330 break 28331 } 28332 j0 := s0.AuxInt 28333 x0 := s0.Args[0] 28334 if x0.Op != OpAMD64MOVBloadidx1 { 28335 break 28336 } 28337 i0 := x0.AuxInt 28338 s := x0.Aux 28339 idx := x0.Args[0] 28340 p := x0.Args[1] 28341 mem := x0.Args[2] 28342 or := v.Args[1] 28343 if or.Op != OpAMD64ORQ { 28344 break 28345 } 28346 s1 := or.Args[0] 28347 if s1.Op != OpAMD64SHLQconst { 28348 break 28349 } 28350 j1 := s1.AuxInt 28351 x1 := s1.Args[0] 28352 if x1.Op != OpAMD64MOVBloadidx1 { 28353 break 28354 } 28355 i1 := x1.AuxInt 28356 if x1.Aux != s { 28357 break 28358 } 28359 if p != x1.Args[0] { 28360 break 28361 } 28362 if idx != x1.Args[1] { 28363 break 28364 } 28365 if mem != x1.Args[2] { 28366 break 28367 } 28368 y := or.Args[1] 28369 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28370 break 28371 } 28372 b = mergePoint(b, x0, x1) 28373 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28374 v.reset(OpCopy) 28375 v.AddArg(v0) 28376 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28377 v1.AuxInt = j1 28378 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 28379 v2.AuxInt = 8 28380 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 28381 v3.AuxInt = i0 28382 v3.Aux = s 28383 v3.AddArg(p) 28384 v3.AddArg(idx) 28385 v3.AddArg(mem) 28386 v2.AddArg(v3) 28387 v1.AddArg(v2) 28388 v0.AddArg(v1) 28389 v0.AddArg(y) 28390 return true 28391 } 28392 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 28393 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28394 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 28395 for { 28396 s0 := v.Args[0] 28397 if s0.Op != OpAMD64SHLQconst { 28398 break 28399 } 28400 j0 := s0.AuxInt 28401 x0 := s0.Args[0] 28402 if x0.Op != OpAMD64MOVBloadidx1 { 28403 break 28404 } 28405 i0 := x0.AuxInt 28406 s := x0.Aux 28407 p := x0.Args[0] 28408 idx := x0.Args[1] 28409 mem := x0.Args[2] 28410 or := v.Args[1] 28411 if or.Op != OpAMD64ORQ { 28412 break 28413 } 28414 s1 := or.Args[0] 28415 if s1.Op != OpAMD64SHLQconst { 28416 break 28417 } 28418 j1 := s1.AuxInt 28419 x1 := s1.Args[0] 28420 if x1.Op != OpAMD64MOVBloadidx1 { 28421 break 28422 } 28423 i1 := x1.AuxInt 28424 if x1.Aux != s { 28425 break 28426 } 28427 if idx != x1.Args[0] { 28428 break 28429 } 28430 if p != x1.Args[1] { 28431 break 28432 } 28433 if mem != x1.Args[2] { 28434 break 28435 } 28436 y := or.Args[1] 28437 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28438 break 28439 } 28440 b = mergePoint(b, x0, x1) 28441 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28442 v.reset(OpCopy) 28443 v.AddArg(v0) 28444 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28445 v1.AuxInt = j1 28446 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 28447 v2.AuxInt = 8 28448 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 28449 v3.AuxInt = i0 28450 v3.Aux = s 28451 v3.AddArg(p) 28452 v3.AddArg(idx) 28453 v3.AddArg(mem) 28454 v2.AddArg(v3) 28455 v1.AddArg(v2) 28456 v0.AddArg(v1) 28457 v0.AddArg(y) 28458 return true 28459 } 28460 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 28461 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28462 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 28463 for { 28464 s0 := v.Args[0] 28465 if s0.Op != OpAMD64SHLQconst { 28466 break 28467 } 28468 j0 := s0.AuxInt 28469 x0 := s0.Args[0] 28470 if x0.Op != OpAMD64MOVBloadidx1 { 28471 break 28472 } 28473 i0 := x0.AuxInt 28474 s := x0.Aux 28475 idx := x0.Args[0] 28476 p := x0.Args[1] 28477 mem := x0.Args[2] 28478 or := v.Args[1] 28479 if or.Op != OpAMD64ORQ { 28480 break 28481 } 28482 s1 := or.Args[0] 28483 if s1.Op != OpAMD64SHLQconst { 28484 break 28485 } 28486 j1 := s1.AuxInt 28487 x1 := s1.Args[0] 28488 if x1.Op != OpAMD64MOVBloadidx1 { 28489 break 28490 } 28491 i1 := x1.AuxInt 28492 if x1.Aux != s { 28493 break 28494 } 28495 if idx != x1.Args[0] { 28496 break 28497 } 28498 if p != x1.Args[1] { 28499 break 28500 } 28501 if mem != x1.Args[2] { 28502 break 28503 } 28504 y := or.Args[1] 28505 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28506 break 28507 } 28508 b = mergePoint(b, x0, x1) 28509 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28510 v.reset(OpCopy) 28511 v.AddArg(v0) 28512 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28513 v1.AuxInt = j1 28514 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 28515 v2.AuxInt = 8 28516 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 28517 v3.AuxInt = i0 28518 v3.Aux = s 28519 v3.AddArg(p) 28520 v3.AddArg(idx) 28521 v3.AddArg(mem) 28522 v2.AddArg(v3) 28523 v1.AddArg(v2) 28524 v0.AddArg(v1) 28525 v0.AddArg(y) 28526 return true 28527 } 28528 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 28529 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28530 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 28531 for { 28532 s0 := v.Args[0] 28533 if s0.Op != OpAMD64SHLQconst { 28534 break 28535 } 28536 j0 := s0.AuxInt 28537 x0 := s0.Args[0] 28538 if x0.Op != OpAMD64MOVBloadidx1 { 28539 break 28540 } 28541 i0 := x0.AuxInt 28542 s := x0.Aux 28543 p := x0.Args[0] 28544 idx := x0.Args[1] 28545 mem := x0.Args[2] 28546 or := v.Args[1] 28547 if or.Op != OpAMD64ORQ { 28548 break 28549 } 28550 y := or.Args[0] 28551 s1 := or.Args[1] 28552 if s1.Op != OpAMD64SHLQconst { 28553 break 28554 } 28555 j1 := s1.AuxInt 28556 x1 := s1.Args[0] 28557 if x1.Op != OpAMD64MOVBloadidx1 { 28558 break 28559 } 28560 i1 := x1.AuxInt 28561 if x1.Aux != s { 28562 break 28563 } 28564 if p != x1.Args[0] { 28565 break 28566 } 28567 if idx != x1.Args[1] { 28568 break 28569 } 28570 if mem != x1.Args[2] { 28571 break 28572 } 28573 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28574 break 28575 } 28576 b = mergePoint(b, x0, x1) 28577 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28578 v.reset(OpCopy) 28579 v.AddArg(v0) 28580 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28581 v1.AuxInt = j1 28582 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 28583 v2.AuxInt = 8 28584 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 28585 v3.AuxInt = i0 28586 v3.Aux = s 28587 v3.AddArg(p) 28588 v3.AddArg(idx) 28589 v3.AddArg(mem) 28590 v2.AddArg(v3) 28591 v1.AddArg(v2) 28592 v0.AddArg(v1) 28593 v0.AddArg(y) 28594 return true 28595 } 28596 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 28597 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28598 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 28599 for { 28600 s0 := v.Args[0] 28601 if s0.Op != OpAMD64SHLQconst { 28602 break 28603 } 28604 j0 := s0.AuxInt 28605 x0 := s0.Args[0] 28606 if x0.Op != OpAMD64MOVBloadidx1 { 28607 break 28608 } 28609 i0 := x0.AuxInt 28610 s := x0.Aux 28611 idx := x0.Args[0] 28612 p := x0.Args[1] 28613 mem := x0.Args[2] 28614 or := v.Args[1] 28615 if or.Op != OpAMD64ORQ { 28616 break 28617 } 28618 y := or.Args[0] 28619 s1 := or.Args[1] 28620 if s1.Op != OpAMD64SHLQconst { 28621 break 28622 } 28623 j1 := s1.AuxInt 28624 x1 := s1.Args[0] 28625 if x1.Op != OpAMD64MOVBloadidx1 { 28626 break 28627 } 28628 i1 := x1.AuxInt 28629 if x1.Aux != s { 28630 break 28631 } 28632 if p != x1.Args[0] { 28633 break 28634 } 28635 if idx != x1.Args[1] { 28636 break 28637 } 28638 if mem != x1.Args[2] { 28639 break 28640 } 28641 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28642 break 28643 } 28644 b = mergePoint(b, x0, x1) 28645 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28646 v.reset(OpCopy) 28647 v.AddArg(v0) 28648 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28649 v1.AuxInt = j1 28650 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 28651 v2.AuxInt = 8 28652 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 28653 v3.AuxInt = i0 28654 v3.Aux = s 28655 v3.AddArg(p) 28656 v3.AddArg(idx) 28657 v3.AddArg(mem) 28658 v2.AddArg(v3) 28659 v1.AddArg(v2) 28660 v0.AddArg(v1) 28661 v0.AddArg(y) 28662 return true 28663 } 28664 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 28665 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28666 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 28667 for { 28668 s0 := v.Args[0] 28669 if s0.Op != OpAMD64SHLQconst { 28670 break 28671 } 28672 j0 := s0.AuxInt 28673 x0 := s0.Args[0] 28674 if x0.Op != OpAMD64MOVBloadidx1 { 28675 break 28676 } 28677 i0 := x0.AuxInt 28678 s := x0.Aux 28679 p := x0.Args[0] 28680 idx := x0.Args[1] 28681 mem := x0.Args[2] 28682 or := v.Args[1] 28683 if or.Op != OpAMD64ORQ { 28684 break 28685 } 28686 y := or.Args[0] 28687 s1 := or.Args[1] 28688 if s1.Op != OpAMD64SHLQconst { 28689 break 28690 } 28691 j1 := s1.AuxInt 28692 x1 := s1.Args[0] 28693 if x1.Op != OpAMD64MOVBloadidx1 { 28694 break 28695 } 28696 i1 := x1.AuxInt 28697 if x1.Aux != s { 28698 break 28699 } 28700 if idx != x1.Args[0] { 28701 break 28702 } 28703 if p != x1.Args[1] { 28704 break 28705 } 28706 if mem != x1.Args[2] { 28707 break 28708 } 28709 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28710 break 28711 } 28712 b = mergePoint(b, x0, x1) 28713 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28714 v.reset(OpCopy) 28715 v.AddArg(v0) 28716 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28717 v1.AuxInt = j1 28718 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 28719 v2.AuxInt = 8 28720 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 28721 v3.AuxInt = i0 28722 v3.Aux = s 28723 v3.AddArg(p) 28724 v3.AddArg(idx) 28725 v3.AddArg(mem) 28726 v2.AddArg(v3) 28727 v1.AddArg(v2) 28728 v0.AddArg(v1) 28729 v0.AddArg(y) 28730 return true 28731 } 28732 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 28733 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28734 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 28735 for { 28736 s0 := v.Args[0] 28737 if s0.Op != OpAMD64SHLQconst { 28738 break 28739 } 28740 j0 := s0.AuxInt 28741 x0 := s0.Args[0] 28742 if x0.Op != OpAMD64MOVBloadidx1 { 28743 break 28744 } 28745 i0 := x0.AuxInt 28746 s := x0.Aux 28747 idx := x0.Args[0] 28748 p := x0.Args[1] 28749 mem := x0.Args[2] 28750 or := v.Args[1] 28751 if or.Op != OpAMD64ORQ { 28752 break 28753 } 28754 y := or.Args[0] 28755 s1 := or.Args[1] 28756 if s1.Op != OpAMD64SHLQconst { 28757 break 28758 } 28759 j1 := s1.AuxInt 28760 x1 := s1.Args[0] 28761 if x1.Op != OpAMD64MOVBloadidx1 { 28762 break 28763 } 28764 i1 := x1.AuxInt 28765 if x1.Aux != s { 28766 break 28767 } 28768 if idx != x1.Args[0] { 28769 break 28770 } 28771 if p != x1.Args[1] { 28772 break 28773 } 28774 if mem != x1.Args[2] { 28775 break 28776 } 28777 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28778 break 28779 } 28780 b = mergePoint(b, x0, x1) 28781 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28782 v.reset(OpCopy) 28783 v.AddArg(v0) 28784 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28785 v1.AuxInt = j1 28786 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 28787 v2.AuxInt = 8 28788 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 28789 v3.AuxInt = i0 28790 v3.Aux = s 28791 v3.AddArg(p) 28792 v3.AddArg(idx) 28793 v3.AddArg(mem) 28794 v2.AddArg(v3) 28795 v1.AddArg(v2) 28796 v0.AddArg(v1) 28797 v0.AddArg(y) 28798 return true 28799 } 28800 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 28801 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28802 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 28803 for { 28804 or := v.Args[0] 28805 if or.Op != OpAMD64ORQ { 28806 break 28807 } 28808 s1 := or.Args[0] 28809 if s1.Op != OpAMD64SHLQconst { 28810 break 28811 } 28812 j1 := s1.AuxInt 28813 x1 := s1.Args[0] 28814 if x1.Op != OpAMD64MOVBloadidx1 { 28815 break 28816 } 28817 i1 := x1.AuxInt 28818 s := x1.Aux 28819 p := x1.Args[0] 28820 idx := x1.Args[1] 28821 mem := x1.Args[2] 28822 y := or.Args[1] 28823 s0 := v.Args[1] 28824 if s0.Op != OpAMD64SHLQconst { 28825 break 28826 } 28827 j0 := s0.AuxInt 28828 x0 := s0.Args[0] 28829 if x0.Op != OpAMD64MOVBloadidx1 { 28830 break 28831 } 28832 i0 := x0.AuxInt 28833 if x0.Aux != s { 28834 break 28835 } 28836 if p != x0.Args[0] { 28837 break 28838 } 28839 if idx != x0.Args[1] { 28840 break 28841 } 28842 if mem != x0.Args[2] { 28843 break 28844 } 28845 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28846 break 28847 } 28848 b = mergePoint(b, x0, x1) 28849 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28850 v.reset(OpCopy) 28851 v.AddArg(v0) 28852 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28853 v1.AuxInt = j1 28854 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 28855 v2.AuxInt = 8 28856 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 28857 v3.AuxInt = i0 28858 v3.Aux = s 28859 v3.AddArg(p) 28860 v3.AddArg(idx) 28861 v3.AddArg(mem) 28862 v2.AddArg(v3) 28863 v1.AddArg(v2) 28864 v0.AddArg(v1) 28865 v0.AddArg(y) 28866 return true 28867 } 28868 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 28869 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28870 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 28871 for { 28872 or := v.Args[0] 28873 if or.Op != OpAMD64ORQ { 28874 break 28875 } 28876 s1 := or.Args[0] 28877 if s1.Op != OpAMD64SHLQconst { 28878 break 28879 } 28880 j1 := s1.AuxInt 28881 x1 := s1.Args[0] 28882 if x1.Op != OpAMD64MOVBloadidx1 { 28883 break 28884 } 28885 i1 := x1.AuxInt 28886 s := x1.Aux 28887 idx := x1.Args[0] 28888 p := x1.Args[1] 28889 mem := x1.Args[2] 28890 y := or.Args[1] 28891 s0 := v.Args[1] 28892 if s0.Op != OpAMD64SHLQconst { 28893 break 28894 } 28895 j0 := s0.AuxInt 28896 x0 := s0.Args[0] 28897 if x0.Op != OpAMD64MOVBloadidx1 { 28898 break 28899 } 28900 i0 := x0.AuxInt 28901 if x0.Aux != s { 28902 break 28903 } 28904 if p != x0.Args[0] { 28905 break 28906 } 28907 if idx != x0.Args[1] { 28908 break 28909 } 28910 if mem != x0.Args[2] { 28911 break 28912 } 28913 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28914 break 28915 } 28916 b = mergePoint(b, x0, x1) 28917 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28918 v.reset(OpCopy) 28919 v.AddArg(v0) 28920 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28921 v1.AuxInt = j1 28922 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 28923 v2.AuxInt = 8 28924 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 28925 v3.AuxInt = i0 28926 v3.Aux = s 28927 v3.AddArg(p) 28928 v3.AddArg(idx) 28929 v3.AddArg(mem) 28930 v2.AddArg(v3) 28931 v1.AddArg(v2) 28932 v0.AddArg(v1) 28933 v0.AddArg(y) 28934 return true 28935 } 28936 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 28937 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28938 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 28939 for { 28940 or := v.Args[0] 28941 if or.Op != OpAMD64ORQ { 28942 break 28943 } 28944 y := or.Args[0] 28945 s1 := or.Args[1] 28946 if s1.Op != OpAMD64SHLQconst { 28947 break 28948 } 28949 j1 := s1.AuxInt 28950 x1 := s1.Args[0] 28951 if x1.Op != OpAMD64MOVBloadidx1 { 28952 break 28953 } 28954 i1 := x1.AuxInt 28955 s := x1.Aux 28956 p := x1.Args[0] 28957 idx := x1.Args[1] 28958 mem := x1.Args[2] 28959 s0 := v.Args[1] 28960 if s0.Op != OpAMD64SHLQconst { 28961 break 28962 } 28963 j0 := s0.AuxInt 28964 x0 := s0.Args[0] 28965 if x0.Op != OpAMD64MOVBloadidx1 { 28966 break 28967 } 28968 i0 := x0.AuxInt 28969 if x0.Aux != s { 28970 break 28971 } 28972 if p != x0.Args[0] { 28973 break 28974 } 28975 if idx != x0.Args[1] { 28976 break 28977 } 28978 if mem != x0.Args[2] { 28979 break 28980 } 28981 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28982 break 28983 } 28984 b = mergePoint(b, x0, x1) 28985 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28986 v.reset(OpCopy) 28987 v.AddArg(v0) 28988 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28989 v1.AuxInt = j1 28990 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 28991 v2.AuxInt = 8 28992 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 28993 v3.AuxInt = i0 28994 v3.Aux = s 28995 v3.AddArg(p) 28996 v3.AddArg(idx) 28997 v3.AddArg(mem) 28998 v2.AddArg(v3) 28999 v1.AddArg(v2) 29000 v0.AddArg(v1) 29001 v0.AddArg(y) 29002 return true 29003 } 29004 return false 29005 } 29006 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 29007 b := v.Block 29008 _ = b 29009 types := &b.Func.Config.Types 29010 _ = types 29011 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 29012 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29013 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29014 for { 29015 or := v.Args[0] 29016 if or.Op != OpAMD64ORQ { 29017 break 29018 } 29019 y := or.Args[0] 29020 s1 := or.Args[1] 29021 if s1.Op != OpAMD64SHLQconst { 29022 break 29023 } 29024 j1 := s1.AuxInt 29025 x1 := s1.Args[0] 29026 if x1.Op != OpAMD64MOVBloadidx1 { 29027 break 29028 } 29029 i1 := x1.AuxInt 29030 s := x1.Aux 29031 idx := x1.Args[0] 29032 p := x1.Args[1] 29033 mem := x1.Args[2] 29034 s0 := v.Args[1] 29035 if s0.Op != OpAMD64SHLQconst { 29036 break 29037 } 29038 j0 := s0.AuxInt 29039 x0 := s0.Args[0] 29040 if x0.Op != OpAMD64MOVBloadidx1 { 29041 break 29042 } 29043 i0 := x0.AuxInt 29044 if x0.Aux != s { 29045 break 29046 } 29047 if p != x0.Args[0] { 29048 break 29049 } 29050 if idx != x0.Args[1] { 29051 break 29052 } 29053 if mem != x0.Args[2] { 29054 break 29055 } 29056 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29057 break 29058 } 29059 b = mergePoint(b, x0, x1) 29060 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29061 v.reset(OpCopy) 29062 v.AddArg(v0) 29063 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29064 v1.AuxInt = j1 29065 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 29066 v2.AuxInt = 8 29067 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 29068 v3.AuxInt = i0 29069 v3.Aux = s 29070 v3.AddArg(p) 29071 v3.AddArg(idx) 29072 v3.AddArg(mem) 29073 v2.AddArg(v3) 29074 v1.AddArg(v2) 29075 v0.AddArg(v1) 29076 v0.AddArg(y) 29077 return true 29078 } 29079 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 29080 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29081 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29082 for { 29083 or := v.Args[0] 29084 if or.Op != OpAMD64ORQ { 29085 break 29086 } 29087 s1 := or.Args[0] 29088 if s1.Op != OpAMD64SHLQconst { 29089 break 29090 } 29091 j1 := s1.AuxInt 29092 x1 := s1.Args[0] 29093 if x1.Op != OpAMD64MOVBloadidx1 { 29094 break 29095 } 29096 i1 := x1.AuxInt 29097 s := x1.Aux 29098 p := x1.Args[0] 29099 idx := x1.Args[1] 29100 mem := x1.Args[2] 29101 y := or.Args[1] 29102 s0 := v.Args[1] 29103 if s0.Op != OpAMD64SHLQconst { 29104 break 29105 } 29106 j0 := s0.AuxInt 29107 x0 := s0.Args[0] 29108 if x0.Op != OpAMD64MOVBloadidx1 { 29109 break 29110 } 29111 i0 := x0.AuxInt 29112 if x0.Aux != s { 29113 break 29114 } 29115 if idx != x0.Args[0] { 29116 break 29117 } 29118 if p != x0.Args[1] { 29119 break 29120 } 29121 if mem != x0.Args[2] { 29122 break 29123 } 29124 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29125 break 29126 } 29127 b = mergePoint(b, x0, x1) 29128 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29129 v.reset(OpCopy) 29130 v.AddArg(v0) 29131 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29132 v1.AuxInt = j1 29133 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 29134 v2.AuxInt = 8 29135 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 29136 v3.AuxInt = i0 29137 v3.Aux = s 29138 v3.AddArg(p) 29139 v3.AddArg(idx) 29140 v3.AddArg(mem) 29141 v2.AddArg(v3) 29142 v1.AddArg(v2) 29143 v0.AddArg(v1) 29144 v0.AddArg(y) 29145 return true 29146 } 29147 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 29148 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29149 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29150 for { 29151 or := v.Args[0] 29152 if or.Op != OpAMD64ORQ { 29153 break 29154 } 29155 s1 := or.Args[0] 29156 if s1.Op != OpAMD64SHLQconst { 29157 break 29158 } 29159 j1 := s1.AuxInt 29160 x1 := s1.Args[0] 29161 if x1.Op != OpAMD64MOVBloadidx1 { 29162 break 29163 } 29164 i1 := x1.AuxInt 29165 s := x1.Aux 29166 idx := x1.Args[0] 29167 p := x1.Args[1] 29168 mem := x1.Args[2] 29169 y := or.Args[1] 29170 s0 := v.Args[1] 29171 if s0.Op != OpAMD64SHLQconst { 29172 break 29173 } 29174 j0 := s0.AuxInt 29175 x0 := s0.Args[0] 29176 if x0.Op != OpAMD64MOVBloadidx1 { 29177 break 29178 } 29179 i0 := x0.AuxInt 29180 if x0.Aux != s { 29181 break 29182 } 29183 if idx != x0.Args[0] { 29184 break 29185 } 29186 if p != x0.Args[1] { 29187 break 29188 } 29189 if mem != x0.Args[2] { 29190 break 29191 } 29192 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29193 break 29194 } 29195 b = mergePoint(b, x0, x1) 29196 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29197 v.reset(OpCopy) 29198 v.AddArg(v0) 29199 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29200 v1.AuxInt = j1 29201 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 29202 v2.AuxInt = 8 29203 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 29204 v3.AuxInt = i0 29205 v3.Aux = s 29206 v3.AddArg(p) 29207 v3.AddArg(idx) 29208 v3.AddArg(mem) 29209 v2.AddArg(v3) 29210 v1.AddArg(v2) 29211 v0.AddArg(v1) 29212 v0.AddArg(y) 29213 return true 29214 } 29215 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 29216 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29217 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29218 for { 29219 or := v.Args[0] 29220 if or.Op != OpAMD64ORQ { 29221 break 29222 } 29223 y := or.Args[0] 29224 s1 := or.Args[1] 29225 if s1.Op != OpAMD64SHLQconst { 29226 break 29227 } 29228 j1 := s1.AuxInt 29229 x1 := s1.Args[0] 29230 if x1.Op != OpAMD64MOVBloadidx1 { 29231 break 29232 } 29233 i1 := x1.AuxInt 29234 s := x1.Aux 29235 p := x1.Args[0] 29236 idx := x1.Args[1] 29237 mem := x1.Args[2] 29238 s0 := v.Args[1] 29239 if s0.Op != OpAMD64SHLQconst { 29240 break 29241 } 29242 j0 := s0.AuxInt 29243 x0 := s0.Args[0] 29244 if x0.Op != OpAMD64MOVBloadidx1 { 29245 break 29246 } 29247 i0 := x0.AuxInt 29248 if x0.Aux != s { 29249 break 29250 } 29251 if idx != x0.Args[0] { 29252 break 29253 } 29254 if p != x0.Args[1] { 29255 break 29256 } 29257 if mem != x0.Args[2] { 29258 break 29259 } 29260 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29261 break 29262 } 29263 b = mergePoint(b, x0, x1) 29264 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29265 v.reset(OpCopy) 29266 v.AddArg(v0) 29267 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29268 v1.AuxInt = j1 29269 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 29270 v2.AuxInt = 8 29271 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 29272 v3.AuxInt = i0 29273 v3.Aux = s 29274 v3.AddArg(p) 29275 v3.AddArg(idx) 29276 v3.AddArg(mem) 29277 v2.AddArg(v3) 29278 v1.AddArg(v2) 29279 v0.AddArg(v1) 29280 v0.AddArg(y) 29281 return true 29282 } 29283 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 29284 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29285 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29286 for { 29287 or := v.Args[0] 29288 if or.Op != OpAMD64ORQ { 29289 break 29290 } 29291 y := or.Args[0] 29292 s1 := or.Args[1] 29293 if s1.Op != OpAMD64SHLQconst { 29294 break 29295 } 29296 j1 := s1.AuxInt 29297 x1 := s1.Args[0] 29298 if x1.Op != OpAMD64MOVBloadidx1 { 29299 break 29300 } 29301 i1 := x1.AuxInt 29302 s := x1.Aux 29303 idx := x1.Args[0] 29304 p := x1.Args[1] 29305 mem := x1.Args[2] 29306 s0 := v.Args[1] 29307 if s0.Op != OpAMD64SHLQconst { 29308 break 29309 } 29310 j0 := s0.AuxInt 29311 x0 := s0.Args[0] 29312 if x0.Op != OpAMD64MOVBloadidx1 { 29313 break 29314 } 29315 i0 := x0.AuxInt 29316 if x0.Aux != s { 29317 break 29318 } 29319 if idx != x0.Args[0] { 29320 break 29321 } 29322 if p != x0.Args[1] { 29323 break 29324 } 29325 if mem != x0.Args[2] { 29326 break 29327 } 29328 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29329 break 29330 } 29331 b = mergePoint(b, x0, x1) 29332 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29333 v.reset(OpCopy) 29334 v.AddArg(v0) 29335 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29336 v1.AuxInt = j1 29337 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) 29338 v2.AuxInt = 8 29339 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) 29340 v3.AuxInt = i0 29341 v3.Aux = s 29342 v3.AddArg(p) 29343 v3.AddArg(idx) 29344 v3.AddArg(mem) 29345 v2.AddArg(v3) 29346 v1.AddArg(v2) 29347 v0.AddArg(v1) 29348 v0.AddArg(y) 29349 return true 29350 } 29351 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 29352 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29353 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 29354 for { 29355 s0 := v.Args[0] 29356 if s0.Op != OpAMD64SHLQconst { 29357 break 29358 } 29359 j0 := s0.AuxInt 29360 r0 := s0.Args[0] 29361 if r0.Op != OpAMD64ROLWconst { 29362 break 29363 } 29364 if r0.AuxInt != 8 { 29365 break 29366 } 29367 x0 := r0.Args[0] 29368 if x0.Op != OpAMD64MOVWloadidx1 { 29369 break 29370 } 29371 i0 := x0.AuxInt 29372 s := x0.Aux 29373 p := x0.Args[0] 29374 idx := x0.Args[1] 29375 mem := x0.Args[2] 29376 or := v.Args[1] 29377 if or.Op != OpAMD64ORQ { 29378 break 29379 } 29380 s1 := or.Args[0] 29381 if s1.Op != OpAMD64SHLQconst { 29382 break 29383 } 29384 j1 := s1.AuxInt 29385 r1 := s1.Args[0] 29386 if r1.Op != OpAMD64ROLWconst { 29387 break 29388 } 29389 if r1.AuxInt != 8 { 29390 break 29391 } 29392 x1 := r1.Args[0] 29393 if x1.Op != OpAMD64MOVWloadidx1 { 29394 break 29395 } 29396 i1 := x1.AuxInt 29397 if x1.Aux != s { 29398 break 29399 } 29400 if p != x1.Args[0] { 29401 break 29402 } 29403 if idx != x1.Args[1] { 29404 break 29405 } 29406 if mem != x1.Args[2] { 29407 break 29408 } 29409 y := or.Args[1] 29410 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29411 break 29412 } 29413 b = mergePoint(b, x0, x1) 29414 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29415 v.reset(OpCopy) 29416 v.AddArg(v0) 29417 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29418 v1.AuxInt = j1 29419 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 29420 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 29421 v3.AuxInt = i0 29422 v3.Aux = s 29423 v3.AddArg(p) 29424 v3.AddArg(idx) 29425 v3.AddArg(mem) 29426 v2.AddArg(v3) 29427 v1.AddArg(v2) 29428 v0.AddArg(v1) 29429 v0.AddArg(y) 29430 return true 29431 } 29432 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 29433 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29434 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 29435 for { 29436 s0 := v.Args[0] 29437 if s0.Op != OpAMD64SHLQconst { 29438 break 29439 } 29440 j0 := s0.AuxInt 29441 r0 := s0.Args[0] 29442 if r0.Op != OpAMD64ROLWconst { 29443 break 29444 } 29445 if r0.AuxInt != 8 { 29446 break 29447 } 29448 x0 := r0.Args[0] 29449 if x0.Op != OpAMD64MOVWloadidx1 { 29450 break 29451 } 29452 i0 := x0.AuxInt 29453 s := x0.Aux 29454 idx := x0.Args[0] 29455 p := x0.Args[1] 29456 mem := x0.Args[2] 29457 or := v.Args[1] 29458 if or.Op != OpAMD64ORQ { 29459 break 29460 } 29461 s1 := or.Args[0] 29462 if s1.Op != OpAMD64SHLQconst { 29463 break 29464 } 29465 j1 := s1.AuxInt 29466 r1 := s1.Args[0] 29467 if r1.Op != OpAMD64ROLWconst { 29468 break 29469 } 29470 if r1.AuxInt != 8 { 29471 break 29472 } 29473 x1 := r1.Args[0] 29474 if x1.Op != OpAMD64MOVWloadidx1 { 29475 break 29476 } 29477 i1 := x1.AuxInt 29478 if x1.Aux != s { 29479 break 29480 } 29481 if p != x1.Args[0] { 29482 break 29483 } 29484 if idx != x1.Args[1] { 29485 break 29486 } 29487 if mem != x1.Args[2] { 29488 break 29489 } 29490 y := or.Args[1] 29491 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29492 break 29493 } 29494 b = mergePoint(b, x0, x1) 29495 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29496 v.reset(OpCopy) 29497 v.AddArg(v0) 29498 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29499 v1.AuxInt = j1 29500 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 29501 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 29502 v3.AuxInt = i0 29503 v3.Aux = s 29504 v3.AddArg(p) 29505 v3.AddArg(idx) 29506 v3.AddArg(mem) 29507 v2.AddArg(v3) 29508 v1.AddArg(v2) 29509 v0.AddArg(v1) 29510 v0.AddArg(y) 29511 return true 29512 } 29513 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 29514 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29515 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 29516 for { 29517 s0 := v.Args[0] 29518 if s0.Op != OpAMD64SHLQconst { 29519 break 29520 } 29521 j0 := s0.AuxInt 29522 r0 := s0.Args[0] 29523 if r0.Op != OpAMD64ROLWconst { 29524 break 29525 } 29526 if r0.AuxInt != 8 { 29527 break 29528 } 29529 x0 := r0.Args[0] 29530 if x0.Op != OpAMD64MOVWloadidx1 { 29531 break 29532 } 29533 i0 := x0.AuxInt 29534 s := x0.Aux 29535 p := x0.Args[0] 29536 idx := x0.Args[1] 29537 mem := x0.Args[2] 29538 or := v.Args[1] 29539 if or.Op != OpAMD64ORQ { 29540 break 29541 } 29542 s1 := or.Args[0] 29543 if s1.Op != OpAMD64SHLQconst { 29544 break 29545 } 29546 j1 := s1.AuxInt 29547 r1 := s1.Args[0] 29548 if r1.Op != OpAMD64ROLWconst { 29549 break 29550 } 29551 if r1.AuxInt != 8 { 29552 break 29553 } 29554 x1 := r1.Args[0] 29555 if x1.Op != OpAMD64MOVWloadidx1 { 29556 break 29557 } 29558 i1 := x1.AuxInt 29559 if x1.Aux != s { 29560 break 29561 } 29562 if idx != x1.Args[0] { 29563 break 29564 } 29565 if p != x1.Args[1] { 29566 break 29567 } 29568 if mem != x1.Args[2] { 29569 break 29570 } 29571 y := or.Args[1] 29572 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29573 break 29574 } 29575 b = mergePoint(b, x0, x1) 29576 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29577 v.reset(OpCopy) 29578 v.AddArg(v0) 29579 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29580 v1.AuxInt = j1 29581 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 29582 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 29583 v3.AuxInt = i0 29584 v3.Aux = s 29585 v3.AddArg(p) 29586 v3.AddArg(idx) 29587 v3.AddArg(mem) 29588 v2.AddArg(v3) 29589 v1.AddArg(v2) 29590 v0.AddArg(v1) 29591 v0.AddArg(y) 29592 return true 29593 } 29594 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 29595 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29596 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 29597 for { 29598 s0 := v.Args[0] 29599 if s0.Op != OpAMD64SHLQconst { 29600 break 29601 } 29602 j0 := s0.AuxInt 29603 r0 := s0.Args[0] 29604 if r0.Op != OpAMD64ROLWconst { 29605 break 29606 } 29607 if r0.AuxInt != 8 { 29608 break 29609 } 29610 x0 := r0.Args[0] 29611 if x0.Op != OpAMD64MOVWloadidx1 { 29612 break 29613 } 29614 i0 := x0.AuxInt 29615 s := x0.Aux 29616 idx := x0.Args[0] 29617 p := x0.Args[1] 29618 mem := x0.Args[2] 29619 or := v.Args[1] 29620 if or.Op != OpAMD64ORQ { 29621 break 29622 } 29623 s1 := or.Args[0] 29624 if s1.Op != OpAMD64SHLQconst { 29625 break 29626 } 29627 j1 := s1.AuxInt 29628 r1 := s1.Args[0] 29629 if r1.Op != OpAMD64ROLWconst { 29630 break 29631 } 29632 if r1.AuxInt != 8 { 29633 break 29634 } 29635 x1 := r1.Args[0] 29636 if x1.Op != OpAMD64MOVWloadidx1 { 29637 break 29638 } 29639 i1 := x1.AuxInt 29640 if x1.Aux != s { 29641 break 29642 } 29643 if idx != x1.Args[0] { 29644 break 29645 } 29646 if p != x1.Args[1] { 29647 break 29648 } 29649 if mem != x1.Args[2] { 29650 break 29651 } 29652 y := or.Args[1] 29653 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29654 break 29655 } 29656 b = mergePoint(b, x0, x1) 29657 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29658 v.reset(OpCopy) 29659 v.AddArg(v0) 29660 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29661 v1.AuxInt = j1 29662 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 29663 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 29664 v3.AuxInt = i0 29665 v3.Aux = s 29666 v3.AddArg(p) 29667 v3.AddArg(idx) 29668 v3.AddArg(mem) 29669 v2.AddArg(v3) 29670 v1.AddArg(v2) 29671 v0.AddArg(v1) 29672 v0.AddArg(y) 29673 return true 29674 } 29675 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 29676 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29677 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 29678 for { 29679 s0 := v.Args[0] 29680 if s0.Op != OpAMD64SHLQconst { 29681 break 29682 } 29683 j0 := s0.AuxInt 29684 r0 := s0.Args[0] 29685 if r0.Op != OpAMD64ROLWconst { 29686 break 29687 } 29688 if r0.AuxInt != 8 { 29689 break 29690 } 29691 x0 := r0.Args[0] 29692 if x0.Op != OpAMD64MOVWloadidx1 { 29693 break 29694 } 29695 i0 := x0.AuxInt 29696 s := x0.Aux 29697 p := x0.Args[0] 29698 idx := x0.Args[1] 29699 mem := x0.Args[2] 29700 or := v.Args[1] 29701 if or.Op != OpAMD64ORQ { 29702 break 29703 } 29704 y := or.Args[0] 29705 s1 := or.Args[1] 29706 if s1.Op != OpAMD64SHLQconst { 29707 break 29708 } 29709 j1 := s1.AuxInt 29710 r1 := s1.Args[0] 29711 if r1.Op != OpAMD64ROLWconst { 29712 break 29713 } 29714 if r1.AuxInt != 8 { 29715 break 29716 } 29717 x1 := r1.Args[0] 29718 if x1.Op != OpAMD64MOVWloadidx1 { 29719 break 29720 } 29721 i1 := x1.AuxInt 29722 if x1.Aux != s { 29723 break 29724 } 29725 if p != x1.Args[0] { 29726 break 29727 } 29728 if idx != x1.Args[1] { 29729 break 29730 } 29731 if mem != x1.Args[2] { 29732 break 29733 } 29734 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29735 break 29736 } 29737 b = mergePoint(b, x0, x1) 29738 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29739 v.reset(OpCopy) 29740 v.AddArg(v0) 29741 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29742 v1.AuxInt = j1 29743 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 29744 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 29745 v3.AuxInt = i0 29746 v3.Aux = s 29747 v3.AddArg(p) 29748 v3.AddArg(idx) 29749 v3.AddArg(mem) 29750 v2.AddArg(v3) 29751 v1.AddArg(v2) 29752 v0.AddArg(v1) 29753 v0.AddArg(y) 29754 return true 29755 } 29756 return false 29757 } 29758 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 29759 b := v.Block 29760 _ = b 29761 types := &b.Func.Config.Types 29762 _ = types 29763 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 29764 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29765 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 29766 for { 29767 s0 := v.Args[0] 29768 if s0.Op != OpAMD64SHLQconst { 29769 break 29770 } 29771 j0 := s0.AuxInt 29772 r0 := s0.Args[0] 29773 if r0.Op != OpAMD64ROLWconst { 29774 break 29775 } 29776 if r0.AuxInt != 8 { 29777 break 29778 } 29779 x0 := r0.Args[0] 29780 if x0.Op != OpAMD64MOVWloadidx1 { 29781 break 29782 } 29783 i0 := x0.AuxInt 29784 s := x0.Aux 29785 idx := x0.Args[0] 29786 p := x0.Args[1] 29787 mem := x0.Args[2] 29788 or := v.Args[1] 29789 if or.Op != OpAMD64ORQ { 29790 break 29791 } 29792 y := or.Args[0] 29793 s1 := or.Args[1] 29794 if s1.Op != OpAMD64SHLQconst { 29795 break 29796 } 29797 j1 := s1.AuxInt 29798 r1 := s1.Args[0] 29799 if r1.Op != OpAMD64ROLWconst { 29800 break 29801 } 29802 if r1.AuxInt != 8 { 29803 break 29804 } 29805 x1 := r1.Args[0] 29806 if x1.Op != OpAMD64MOVWloadidx1 { 29807 break 29808 } 29809 i1 := x1.AuxInt 29810 if x1.Aux != s { 29811 break 29812 } 29813 if p != x1.Args[0] { 29814 break 29815 } 29816 if idx != x1.Args[1] { 29817 break 29818 } 29819 if mem != x1.Args[2] { 29820 break 29821 } 29822 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29823 break 29824 } 29825 b = mergePoint(b, x0, x1) 29826 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29827 v.reset(OpCopy) 29828 v.AddArg(v0) 29829 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29830 v1.AuxInt = j1 29831 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 29832 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 29833 v3.AuxInt = i0 29834 v3.Aux = s 29835 v3.AddArg(p) 29836 v3.AddArg(idx) 29837 v3.AddArg(mem) 29838 v2.AddArg(v3) 29839 v1.AddArg(v2) 29840 v0.AddArg(v1) 29841 v0.AddArg(y) 29842 return true 29843 } 29844 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 29845 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29846 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 29847 for { 29848 s0 := v.Args[0] 29849 if s0.Op != OpAMD64SHLQconst { 29850 break 29851 } 29852 j0 := s0.AuxInt 29853 r0 := s0.Args[0] 29854 if r0.Op != OpAMD64ROLWconst { 29855 break 29856 } 29857 if r0.AuxInt != 8 { 29858 break 29859 } 29860 x0 := r0.Args[0] 29861 if x0.Op != OpAMD64MOVWloadidx1 { 29862 break 29863 } 29864 i0 := x0.AuxInt 29865 s := x0.Aux 29866 p := x0.Args[0] 29867 idx := x0.Args[1] 29868 mem := x0.Args[2] 29869 or := v.Args[1] 29870 if or.Op != OpAMD64ORQ { 29871 break 29872 } 29873 y := or.Args[0] 29874 s1 := or.Args[1] 29875 if s1.Op != OpAMD64SHLQconst { 29876 break 29877 } 29878 j1 := s1.AuxInt 29879 r1 := s1.Args[0] 29880 if r1.Op != OpAMD64ROLWconst { 29881 break 29882 } 29883 if r1.AuxInt != 8 { 29884 break 29885 } 29886 x1 := r1.Args[0] 29887 if x1.Op != OpAMD64MOVWloadidx1 { 29888 break 29889 } 29890 i1 := x1.AuxInt 29891 if x1.Aux != s { 29892 break 29893 } 29894 if idx != x1.Args[0] { 29895 break 29896 } 29897 if p != x1.Args[1] { 29898 break 29899 } 29900 if mem != x1.Args[2] { 29901 break 29902 } 29903 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29904 break 29905 } 29906 b = mergePoint(b, x0, x1) 29907 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29908 v.reset(OpCopy) 29909 v.AddArg(v0) 29910 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29911 v1.AuxInt = j1 29912 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 29913 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 29914 v3.AuxInt = i0 29915 v3.Aux = s 29916 v3.AddArg(p) 29917 v3.AddArg(idx) 29918 v3.AddArg(mem) 29919 v2.AddArg(v3) 29920 v1.AddArg(v2) 29921 v0.AddArg(v1) 29922 v0.AddArg(y) 29923 return true 29924 } 29925 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 29926 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29927 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 29928 for { 29929 s0 := v.Args[0] 29930 if s0.Op != OpAMD64SHLQconst { 29931 break 29932 } 29933 j0 := s0.AuxInt 29934 r0 := s0.Args[0] 29935 if r0.Op != OpAMD64ROLWconst { 29936 break 29937 } 29938 if r0.AuxInt != 8 { 29939 break 29940 } 29941 x0 := r0.Args[0] 29942 if x0.Op != OpAMD64MOVWloadidx1 { 29943 break 29944 } 29945 i0 := x0.AuxInt 29946 s := x0.Aux 29947 idx := x0.Args[0] 29948 p := x0.Args[1] 29949 mem := x0.Args[2] 29950 or := v.Args[1] 29951 if or.Op != OpAMD64ORQ { 29952 break 29953 } 29954 y := or.Args[0] 29955 s1 := or.Args[1] 29956 if s1.Op != OpAMD64SHLQconst { 29957 break 29958 } 29959 j1 := s1.AuxInt 29960 r1 := s1.Args[0] 29961 if r1.Op != OpAMD64ROLWconst { 29962 break 29963 } 29964 if r1.AuxInt != 8 { 29965 break 29966 } 29967 x1 := r1.Args[0] 29968 if x1.Op != OpAMD64MOVWloadidx1 { 29969 break 29970 } 29971 i1 := x1.AuxInt 29972 if x1.Aux != s { 29973 break 29974 } 29975 if idx != x1.Args[0] { 29976 break 29977 } 29978 if p != x1.Args[1] { 29979 break 29980 } 29981 if mem != x1.Args[2] { 29982 break 29983 } 29984 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29985 break 29986 } 29987 b = mergePoint(b, x0, x1) 29988 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29989 v.reset(OpCopy) 29990 v.AddArg(v0) 29991 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29992 v1.AuxInt = j1 29993 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 29994 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 29995 v3.AuxInt = i0 29996 v3.Aux = s 29997 v3.AddArg(p) 29998 v3.AddArg(idx) 29999 v3.AddArg(mem) 30000 v2.AddArg(v3) 30001 v1.AddArg(v2) 30002 v0.AddArg(v1) 30003 v0.AddArg(y) 30004 return true 30005 } 30006 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30007 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30008 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30009 for { 30010 or := v.Args[0] 30011 if or.Op != OpAMD64ORQ { 30012 break 30013 } 30014 s1 := or.Args[0] 30015 if s1.Op != OpAMD64SHLQconst { 30016 break 30017 } 30018 j1 := s1.AuxInt 30019 r1 := s1.Args[0] 30020 if r1.Op != OpAMD64ROLWconst { 30021 break 30022 } 30023 if r1.AuxInt != 8 { 30024 break 30025 } 30026 x1 := r1.Args[0] 30027 if x1.Op != OpAMD64MOVWloadidx1 { 30028 break 30029 } 30030 i1 := x1.AuxInt 30031 s := x1.Aux 30032 p := x1.Args[0] 30033 idx := x1.Args[1] 30034 mem := x1.Args[2] 30035 y := or.Args[1] 30036 s0 := v.Args[1] 30037 if s0.Op != OpAMD64SHLQconst { 30038 break 30039 } 30040 j0 := s0.AuxInt 30041 r0 := s0.Args[0] 30042 if r0.Op != OpAMD64ROLWconst { 30043 break 30044 } 30045 if r0.AuxInt != 8 { 30046 break 30047 } 30048 x0 := r0.Args[0] 30049 if x0.Op != OpAMD64MOVWloadidx1 { 30050 break 30051 } 30052 i0 := x0.AuxInt 30053 if x0.Aux != s { 30054 break 30055 } 30056 if p != x0.Args[0] { 30057 break 30058 } 30059 if idx != x0.Args[1] { 30060 break 30061 } 30062 if mem != x0.Args[2] { 30063 break 30064 } 30065 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30066 break 30067 } 30068 b = mergePoint(b, x0, x1) 30069 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30070 v.reset(OpCopy) 30071 v.AddArg(v0) 30072 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30073 v1.AuxInt = j1 30074 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 30075 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 30076 v3.AuxInt = i0 30077 v3.Aux = s 30078 v3.AddArg(p) 30079 v3.AddArg(idx) 30080 v3.AddArg(mem) 30081 v2.AddArg(v3) 30082 v1.AddArg(v2) 30083 v0.AddArg(v1) 30084 v0.AddArg(y) 30085 return true 30086 } 30087 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30088 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30089 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30090 for { 30091 or := v.Args[0] 30092 if or.Op != OpAMD64ORQ { 30093 break 30094 } 30095 s1 := or.Args[0] 30096 if s1.Op != OpAMD64SHLQconst { 30097 break 30098 } 30099 j1 := s1.AuxInt 30100 r1 := s1.Args[0] 30101 if r1.Op != OpAMD64ROLWconst { 30102 break 30103 } 30104 if r1.AuxInt != 8 { 30105 break 30106 } 30107 x1 := r1.Args[0] 30108 if x1.Op != OpAMD64MOVWloadidx1 { 30109 break 30110 } 30111 i1 := x1.AuxInt 30112 s := x1.Aux 30113 idx := x1.Args[0] 30114 p := x1.Args[1] 30115 mem := x1.Args[2] 30116 y := or.Args[1] 30117 s0 := v.Args[1] 30118 if s0.Op != OpAMD64SHLQconst { 30119 break 30120 } 30121 j0 := s0.AuxInt 30122 r0 := s0.Args[0] 30123 if r0.Op != OpAMD64ROLWconst { 30124 break 30125 } 30126 if r0.AuxInt != 8 { 30127 break 30128 } 30129 x0 := r0.Args[0] 30130 if x0.Op != OpAMD64MOVWloadidx1 { 30131 break 30132 } 30133 i0 := x0.AuxInt 30134 if x0.Aux != s { 30135 break 30136 } 30137 if p != x0.Args[0] { 30138 break 30139 } 30140 if idx != x0.Args[1] { 30141 break 30142 } 30143 if mem != x0.Args[2] { 30144 break 30145 } 30146 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30147 break 30148 } 30149 b = mergePoint(b, x0, x1) 30150 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30151 v.reset(OpCopy) 30152 v.AddArg(v0) 30153 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30154 v1.AuxInt = j1 30155 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 30156 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 30157 v3.AuxInt = i0 30158 v3.Aux = s 30159 v3.AddArg(p) 30160 v3.AddArg(idx) 30161 v3.AddArg(mem) 30162 v2.AddArg(v3) 30163 v1.AddArg(v2) 30164 v0.AddArg(v1) 30165 v0.AddArg(y) 30166 return true 30167 } 30168 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30169 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30170 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30171 for { 30172 or := v.Args[0] 30173 if or.Op != OpAMD64ORQ { 30174 break 30175 } 30176 y := or.Args[0] 30177 s1 := or.Args[1] 30178 if s1.Op != OpAMD64SHLQconst { 30179 break 30180 } 30181 j1 := s1.AuxInt 30182 r1 := s1.Args[0] 30183 if r1.Op != OpAMD64ROLWconst { 30184 break 30185 } 30186 if r1.AuxInt != 8 { 30187 break 30188 } 30189 x1 := r1.Args[0] 30190 if x1.Op != OpAMD64MOVWloadidx1 { 30191 break 30192 } 30193 i1 := x1.AuxInt 30194 s := x1.Aux 30195 p := x1.Args[0] 30196 idx := x1.Args[1] 30197 mem := x1.Args[2] 30198 s0 := v.Args[1] 30199 if s0.Op != OpAMD64SHLQconst { 30200 break 30201 } 30202 j0 := s0.AuxInt 30203 r0 := s0.Args[0] 30204 if r0.Op != OpAMD64ROLWconst { 30205 break 30206 } 30207 if r0.AuxInt != 8 { 30208 break 30209 } 30210 x0 := r0.Args[0] 30211 if x0.Op != OpAMD64MOVWloadidx1 { 30212 break 30213 } 30214 i0 := x0.AuxInt 30215 if x0.Aux != s { 30216 break 30217 } 30218 if p != x0.Args[0] { 30219 break 30220 } 30221 if idx != x0.Args[1] { 30222 break 30223 } 30224 if mem != x0.Args[2] { 30225 break 30226 } 30227 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30228 break 30229 } 30230 b = mergePoint(b, x0, x1) 30231 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30232 v.reset(OpCopy) 30233 v.AddArg(v0) 30234 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30235 v1.AuxInt = j1 30236 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 30237 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 30238 v3.AuxInt = i0 30239 v3.Aux = s 30240 v3.AddArg(p) 30241 v3.AddArg(idx) 30242 v3.AddArg(mem) 30243 v2.AddArg(v3) 30244 v1.AddArg(v2) 30245 v0.AddArg(v1) 30246 v0.AddArg(y) 30247 return true 30248 } 30249 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30250 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30251 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30252 for { 30253 or := v.Args[0] 30254 if or.Op != OpAMD64ORQ { 30255 break 30256 } 30257 y := or.Args[0] 30258 s1 := or.Args[1] 30259 if s1.Op != OpAMD64SHLQconst { 30260 break 30261 } 30262 j1 := s1.AuxInt 30263 r1 := s1.Args[0] 30264 if r1.Op != OpAMD64ROLWconst { 30265 break 30266 } 30267 if r1.AuxInt != 8 { 30268 break 30269 } 30270 x1 := r1.Args[0] 30271 if x1.Op != OpAMD64MOVWloadidx1 { 30272 break 30273 } 30274 i1 := x1.AuxInt 30275 s := x1.Aux 30276 idx := x1.Args[0] 30277 p := x1.Args[1] 30278 mem := x1.Args[2] 30279 s0 := v.Args[1] 30280 if s0.Op != OpAMD64SHLQconst { 30281 break 30282 } 30283 j0 := s0.AuxInt 30284 r0 := s0.Args[0] 30285 if r0.Op != OpAMD64ROLWconst { 30286 break 30287 } 30288 if r0.AuxInt != 8 { 30289 break 30290 } 30291 x0 := r0.Args[0] 30292 if x0.Op != OpAMD64MOVWloadidx1 { 30293 break 30294 } 30295 i0 := x0.AuxInt 30296 if x0.Aux != s { 30297 break 30298 } 30299 if p != x0.Args[0] { 30300 break 30301 } 30302 if idx != x0.Args[1] { 30303 break 30304 } 30305 if mem != x0.Args[2] { 30306 break 30307 } 30308 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30309 break 30310 } 30311 b = mergePoint(b, x0, x1) 30312 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30313 v.reset(OpCopy) 30314 v.AddArg(v0) 30315 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30316 v1.AuxInt = j1 30317 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 30318 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 30319 v3.AuxInt = i0 30320 v3.Aux = s 30321 v3.AddArg(p) 30322 v3.AddArg(idx) 30323 v3.AddArg(mem) 30324 v2.AddArg(v3) 30325 v1.AddArg(v2) 30326 v0.AddArg(v1) 30327 v0.AddArg(y) 30328 return true 30329 } 30330 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30331 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30332 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30333 for { 30334 or := v.Args[0] 30335 if or.Op != OpAMD64ORQ { 30336 break 30337 } 30338 s1 := or.Args[0] 30339 if s1.Op != OpAMD64SHLQconst { 30340 break 30341 } 30342 j1 := s1.AuxInt 30343 r1 := s1.Args[0] 30344 if r1.Op != OpAMD64ROLWconst { 30345 break 30346 } 30347 if r1.AuxInt != 8 { 30348 break 30349 } 30350 x1 := r1.Args[0] 30351 if x1.Op != OpAMD64MOVWloadidx1 { 30352 break 30353 } 30354 i1 := x1.AuxInt 30355 s := x1.Aux 30356 p := x1.Args[0] 30357 idx := x1.Args[1] 30358 mem := x1.Args[2] 30359 y := or.Args[1] 30360 s0 := v.Args[1] 30361 if s0.Op != OpAMD64SHLQconst { 30362 break 30363 } 30364 j0 := s0.AuxInt 30365 r0 := s0.Args[0] 30366 if r0.Op != OpAMD64ROLWconst { 30367 break 30368 } 30369 if r0.AuxInt != 8 { 30370 break 30371 } 30372 x0 := r0.Args[0] 30373 if x0.Op != OpAMD64MOVWloadidx1 { 30374 break 30375 } 30376 i0 := x0.AuxInt 30377 if x0.Aux != s { 30378 break 30379 } 30380 if idx != x0.Args[0] { 30381 break 30382 } 30383 if p != x0.Args[1] { 30384 break 30385 } 30386 if mem != x0.Args[2] { 30387 break 30388 } 30389 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30390 break 30391 } 30392 b = mergePoint(b, x0, x1) 30393 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30394 v.reset(OpCopy) 30395 v.AddArg(v0) 30396 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30397 v1.AuxInt = j1 30398 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 30399 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 30400 v3.AuxInt = i0 30401 v3.Aux = s 30402 v3.AddArg(p) 30403 v3.AddArg(idx) 30404 v3.AddArg(mem) 30405 v2.AddArg(v3) 30406 v1.AddArg(v2) 30407 v0.AddArg(v1) 30408 v0.AddArg(y) 30409 return true 30410 } 30411 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30412 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30413 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30414 for { 30415 or := v.Args[0] 30416 if or.Op != OpAMD64ORQ { 30417 break 30418 } 30419 s1 := or.Args[0] 30420 if s1.Op != OpAMD64SHLQconst { 30421 break 30422 } 30423 j1 := s1.AuxInt 30424 r1 := s1.Args[0] 30425 if r1.Op != OpAMD64ROLWconst { 30426 break 30427 } 30428 if r1.AuxInt != 8 { 30429 break 30430 } 30431 x1 := r1.Args[0] 30432 if x1.Op != OpAMD64MOVWloadidx1 { 30433 break 30434 } 30435 i1 := x1.AuxInt 30436 s := x1.Aux 30437 idx := x1.Args[0] 30438 p := x1.Args[1] 30439 mem := x1.Args[2] 30440 y := or.Args[1] 30441 s0 := v.Args[1] 30442 if s0.Op != OpAMD64SHLQconst { 30443 break 30444 } 30445 j0 := s0.AuxInt 30446 r0 := s0.Args[0] 30447 if r0.Op != OpAMD64ROLWconst { 30448 break 30449 } 30450 if r0.AuxInt != 8 { 30451 break 30452 } 30453 x0 := r0.Args[0] 30454 if x0.Op != OpAMD64MOVWloadidx1 { 30455 break 30456 } 30457 i0 := x0.AuxInt 30458 if x0.Aux != s { 30459 break 30460 } 30461 if idx != x0.Args[0] { 30462 break 30463 } 30464 if p != x0.Args[1] { 30465 break 30466 } 30467 if mem != x0.Args[2] { 30468 break 30469 } 30470 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30471 break 30472 } 30473 b = mergePoint(b, x0, x1) 30474 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30475 v.reset(OpCopy) 30476 v.AddArg(v0) 30477 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30478 v1.AuxInt = j1 30479 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 30480 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 30481 v3.AuxInt = i0 30482 v3.Aux = s 30483 v3.AddArg(p) 30484 v3.AddArg(idx) 30485 v3.AddArg(mem) 30486 v2.AddArg(v3) 30487 v1.AddArg(v2) 30488 v0.AddArg(v1) 30489 v0.AddArg(y) 30490 return true 30491 } 30492 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30493 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30494 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30495 for { 30496 or := v.Args[0] 30497 if or.Op != OpAMD64ORQ { 30498 break 30499 } 30500 y := or.Args[0] 30501 s1 := or.Args[1] 30502 if s1.Op != OpAMD64SHLQconst { 30503 break 30504 } 30505 j1 := s1.AuxInt 30506 r1 := s1.Args[0] 30507 if r1.Op != OpAMD64ROLWconst { 30508 break 30509 } 30510 if r1.AuxInt != 8 { 30511 break 30512 } 30513 x1 := r1.Args[0] 30514 if x1.Op != OpAMD64MOVWloadidx1 { 30515 break 30516 } 30517 i1 := x1.AuxInt 30518 s := x1.Aux 30519 p := x1.Args[0] 30520 idx := x1.Args[1] 30521 mem := x1.Args[2] 30522 s0 := v.Args[1] 30523 if s0.Op != OpAMD64SHLQconst { 30524 break 30525 } 30526 j0 := s0.AuxInt 30527 r0 := s0.Args[0] 30528 if r0.Op != OpAMD64ROLWconst { 30529 break 30530 } 30531 if r0.AuxInt != 8 { 30532 break 30533 } 30534 x0 := r0.Args[0] 30535 if x0.Op != OpAMD64MOVWloadidx1 { 30536 break 30537 } 30538 i0 := x0.AuxInt 30539 if x0.Aux != s { 30540 break 30541 } 30542 if idx != x0.Args[0] { 30543 break 30544 } 30545 if p != x0.Args[1] { 30546 break 30547 } 30548 if mem != x0.Args[2] { 30549 break 30550 } 30551 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30552 break 30553 } 30554 b = mergePoint(b, x0, x1) 30555 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30556 v.reset(OpCopy) 30557 v.AddArg(v0) 30558 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30559 v1.AuxInt = j1 30560 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 30561 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 30562 v3.AuxInt = i0 30563 v3.Aux = s 30564 v3.AddArg(p) 30565 v3.AddArg(idx) 30566 v3.AddArg(mem) 30567 v2.AddArg(v3) 30568 v1.AddArg(v2) 30569 v0.AddArg(v1) 30570 v0.AddArg(y) 30571 return true 30572 } 30573 return false 30574 } 30575 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 30576 b := v.Block 30577 _ = b 30578 types := &b.Func.Config.Types 30579 _ = types 30580 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30581 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30582 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30583 for { 30584 or := v.Args[0] 30585 if or.Op != OpAMD64ORQ { 30586 break 30587 } 30588 y := or.Args[0] 30589 s1 := or.Args[1] 30590 if s1.Op != OpAMD64SHLQconst { 30591 break 30592 } 30593 j1 := s1.AuxInt 30594 r1 := s1.Args[0] 30595 if r1.Op != OpAMD64ROLWconst { 30596 break 30597 } 30598 if r1.AuxInt != 8 { 30599 break 30600 } 30601 x1 := r1.Args[0] 30602 if x1.Op != OpAMD64MOVWloadidx1 { 30603 break 30604 } 30605 i1 := x1.AuxInt 30606 s := x1.Aux 30607 idx := x1.Args[0] 30608 p := x1.Args[1] 30609 mem := x1.Args[2] 30610 s0 := v.Args[1] 30611 if s0.Op != OpAMD64SHLQconst { 30612 break 30613 } 30614 j0 := s0.AuxInt 30615 r0 := s0.Args[0] 30616 if r0.Op != OpAMD64ROLWconst { 30617 break 30618 } 30619 if r0.AuxInt != 8 { 30620 break 30621 } 30622 x0 := r0.Args[0] 30623 if x0.Op != OpAMD64MOVWloadidx1 { 30624 break 30625 } 30626 i0 := x0.AuxInt 30627 if x0.Aux != s { 30628 break 30629 } 30630 if idx != x0.Args[0] { 30631 break 30632 } 30633 if p != x0.Args[1] { 30634 break 30635 } 30636 if mem != x0.Args[2] { 30637 break 30638 } 30639 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30640 break 30641 } 30642 b = mergePoint(b, x0, x1) 30643 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30644 v.reset(OpCopy) 30645 v.AddArg(v0) 30646 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30647 v1.AuxInt = j1 30648 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) 30649 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) 30650 v3.AuxInt = i0 30651 v3.Aux = s 30652 v3.AddArg(p) 30653 v3.AddArg(idx) 30654 v3.AddArg(mem) 30655 v2.AddArg(v3) 30656 v1.AddArg(v2) 30657 v0.AddArg(v1) 30658 v0.AddArg(y) 30659 return true 30660 } 30661 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 30662 // cond: canMergeLoad(v, l, x) && clobber(l) 30663 // result: (ORQmem x [off] {sym} ptr mem) 30664 for { 30665 x := v.Args[0] 30666 l := v.Args[1] 30667 if l.Op != OpAMD64MOVQload { 30668 break 30669 } 30670 off := l.AuxInt 30671 sym := l.Aux 30672 ptr := l.Args[0] 30673 mem := l.Args[1] 30674 if !(canMergeLoad(v, l, x) && clobber(l)) { 30675 break 30676 } 30677 v.reset(OpAMD64ORQmem) 30678 v.AuxInt = off 30679 v.Aux = sym 30680 v.AddArg(x) 30681 v.AddArg(ptr) 30682 v.AddArg(mem) 30683 return true 30684 } 30685 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 30686 // cond: canMergeLoad(v, l, x) && clobber(l) 30687 // result: (ORQmem x [off] {sym} ptr mem) 30688 for { 30689 l := v.Args[0] 30690 if l.Op != OpAMD64MOVQload { 30691 break 30692 } 30693 off := l.AuxInt 30694 sym := l.Aux 30695 ptr := l.Args[0] 30696 mem := l.Args[1] 30697 x := v.Args[1] 30698 if !(canMergeLoad(v, l, x) && clobber(l)) { 30699 break 30700 } 30701 v.reset(OpAMD64ORQmem) 30702 v.AuxInt = off 30703 v.Aux = sym 30704 v.AddArg(x) 30705 v.AddArg(ptr) 30706 v.AddArg(mem) 30707 return true 30708 } 30709 return false 30710 } 30711 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 30712 // match: (ORQconst [0] x) 30713 // cond: 30714 // result: x 30715 for { 30716 if v.AuxInt != 0 { 30717 break 30718 } 30719 x := v.Args[0] 30720 v.reset(OpCopy) 30721 v.Type = x.Type 30722 v.AddArg(x) 30723 return true 30724 } 30725 // match: (ORQconst [-1] _) 30726 // cond: 30727 // result: (MOVQconst [-1]) 30728 for { 30729 if v.AuxInt != -1 { 30730 break 30731 } 30732 v.reset(OpAMD64MOVQconst) 30733 v.AuxInt = -1 30734 return true 30735 } 30736 // match: (ORQconst [c] (MOVQconst [d])) 30737 // cond: 30738 // result: (MOVQconst [c|d]) 30739 for { 30740 c := v.AuxInt 30741 v_0 := v.Args[0] 30742 if v_0.Op != OpAMD64MOVQconst { 30743 break 30744 } 30745 d := v_0.AuxInt 30746 v.reset(OpAMD64MOVQconst) 30747 v.AuxInt = c | d 30748 return true 30749 } 30750 return false 30751 } 30752 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 30753 // match: (ROLB x (NEGQ y)) 30754 // cond: 30755 // result: (RORB x y) 30756 for { 30757 x := v.Args[0] 30758 v_1 := v.Args[1] 30759 if v_1.Op != OpAMD64NEGQ { 30760 break 30761 } 30762 y := v_1.Args[0] 30763 v.reset(OpAMD64RORB) 30764 v.AddArg(x) 30765 v.AddArg(y) 30766 return true 30767 } 30768 // match: (ROLB x (NEGL y)) 30769 // cond: 30770 // result: (RORB x y) 30771 for { 30772 x := v.Args[0] 30773 v_1 := v.Args[1] 30774 if v_1.Op != OpAMD64NEGL { 30775 break 30776 } 30777 y := v_1.Args[0] 30778 v.reset(OpAMD64RORB) 30779 v.AddArg(x) 30780 v.AddArg(y) 30781 return true 30782 } 30783 // match: (ROLB x (MOVQconst [c])) 30784 // cond: 30785 // result: (ROLBconst [c&7 ] x) 30786 for { 30787 x := v.Args[0] 30788 v_1 := v.Args[1] 30789 if v_1.Op != OpAMD64MOVQconst { 30790 break 30791 } 30792 c := v_1.AuxInt 30793 v.reset(OpAMD64ROLBconst) 30794 v.AuxInt = c & 7 30795 v.AddArg(x) 30796 return true 30797 } 30798 // match: (ROLB x (MOVLconst [c])) 30799 // cond: 30800 // result: (ROLBconst [c&7 ] x) 30801 for { 30802 x := v.Args[0] 30803 v_1 := v.Args[1] 30804 if v_1.Op != OpAMD64MOVLconst { 30805 break 30806 } 30807 c := v_1.AuxInt 30808 v.reset(OpAMD64ROLBconst) 30809 v.AuxInt = c & 7 30810 v.AddArg(x) 30811 return true 30812 } 30813 return false 30814 } 30815 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 30816 // match: (ROLBconst [c] (ROLBconst [d] x)) 30817 // cond: 30818 // result: (ROLBconst [(c+d)& 7] x) 30819 for { 30820 c := v.AuxInt 30821 v_0 := v.Args[0] 30822 if v_0.Op != OpAMD64ROLBconst { 30823 break 30824 } 30825 d := v_0.AuxInt 30826 x := v_0.Args[0] 30827 v.reset(OpAMD64ROLBconst) 30828 v.AuxInt = (c + d) & 7 30829 v.AddArg(x) 30830 return true 30831 } 30832 // match: (ROLBconst x [0]) 30833 // cond: 30834 // result: x 30835 for { 30836 if v.AuxInt != 0 { 30837 break 30838 } 30839 x := v.Args[0] 30840 v.reset(OpCopy) 30841 v.Type = x.Type 30842 v.AddArg(x) 30843 return true 30844 } 30845 return false 30846 } 30847 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 30848 // match: (ROLL x (NEGQ y)) 30849 // cond: 30850 // result: (RORL x y) 30851 for { 30852 x := v.Args[0] 30853 v_1 := v.Args[1] 30854 if v_1.Op != OpAMD64NEGQ { 30855 break 30856 } 30857 y := v_1.Args[0] 30858 v.reset(OpAMD64RORL) 30859 v.AddArg(x) 30860 v.AddArg(y) 30861 return true 30862 } 30863 // match: (ROLL x (NEGL y)) 30864 // cond: 30865 // result: (RORL x y) 30866 for { 30867 x := v.Args[0] 30868 v_1 := v.Args[1] 30869 if v_1.Op != OpAMD64NEGL { 30870 break 30871 } 30872 y := v_1.Args[0] 30873 v.reset(OpAMD64RORL) 30874 v.AddArg(x) 30875 v.AddArg(y) 30876 return true 30877 } 30878 // match: (ROLL x (MOVQconst [c])) 30879 // cond: 30880 // result: (ROLLconst [c&31] x) 30881 for { 30882 x := v.Args[0] 30883 v_1 := v.Args[1] 30884 if v_1.Op != OpAMD64MOVQconst { 30885 break 30886 } 30887 c := v_1.AuxInt 30888 v.reset(OpAMD64ROLLconst) 30889 v.AuxInt = c & 31 30890 v.AddArg(x) 30891 return true 30892 } 30893 // match: (ROLL x (MOVLconst [c])) 30894 // cond: 30895 // result: (ROLLconst [c&31] x) 30896 for { 30897 x := v.Args[0] 30898 v_1 := v.Args[1] 30899 if v_1.Op != OpAMD64MOVLconst { 30900 break 30901 } 30902 c := v_1.AuxInt 30903 v.reset(OpAMD64ROLLconst) 30904 v.AuxInt = c & 31 30905 v.AddArg(x) 30906 return true 30907 } 30908 return false 30909 } 30910 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 30911 // match: (ROLLconst [c] (ROLLconst [d] x)) 30912 // cond: 30913 // result: (ROLLconst [(c+d)&31] x) 30914 for { 30915 c := v.AuxInt 30916 v_0 := v.Args[0] 30917 if v_0.Op != OpAMD64ROLLconst { 30918 break 30919 } 30920 d := v_0.AuxInt 30921 x := v_0.Args[0] 30922 v.reset(OpAMD64ROLLconst) 30923 v.AuxInt = (c + d) & 31 30924 v.AddArg(x) 30925 return true 30926 } 30927 // match: (ROLLconst x [0]) 30928 // cond: 30929 // result: x 30930 for { 30931 if v.AuxInt != 0 { 30932 break 30933 } 30934 x := v.Args[0] 30935 v.reset(OpCopy) 30936 v.Type = x.Type 30937 v.AddArg(x) 30938 return true 30939 } 30940 return false 30941 } 30942 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 30943 // match: (ROLQ x (NEGQ y)) 30944 // cond: 30945 // result: (RORQ x y) 30946 for { 30947 x := v.Args[0] 30948 v_1 := v.Args[1] 30949 if v_1.Op != OpAMD64NEGQ { 30950 break 30951 } 30952 y := v_1.Args[0] 30953 v.reset(OpAMD64RORQ) 30954 v.AddArg(x) 30955 v.AddArg(y) 30956 return true 30957 } 30958 // match: (ROLQ x (NEGL y)) 30959 // cond: 30960 // result: (RORQ x y) 30961 for { 30962 x := v.Args[0] 30963 v_1 := v.Args[1] 30964 if v_1.Op != OpAMD64NEGL { 30965 break 30966 } 30967 y := v_1.Args[0] 30968 v.reset(OpAMD64RORQ) 30969 v.AddArg(x) 30970 v.AddArg(y) 30971 return true 30972 } 30973 // match: (ROLQ x (MOVQconst [c])) 30974 // cond: 30975 // result: (ROLQconst [c&63] x) 30976 for { 30977 x := v.Args[0] 30978 v_1 := v.Args[1] 30979 if v_1.Op != OpAMD64MOVQconst { 30980 break 30981 } 30982 c := v_1.AuxInt 30983 v.reset(OpAMD64ROLQconst) 30984 v.AuxInt = c & 63 30985 v.AddArg(x) 30986 return true 30987 } 30988 // match: (ROLQ x (MOVLconst [c])) 30989 // cond: 30990 // result: (ROLQconst [c&63] x) 30991 for { 30992 x := v.Args[0] 30993 v_1 := v.Args[1] 30994 if v_1.Op != OpAMD64MOVLconst { 30995 break 30996 } 30997 c := v_1.AuxInt 30998 v.reset(OpAMD64ROLQconst) 30999 v.AuxInt = c & 63 31000 v.AddArg(x) 31001 return true 31002 } 31003 return false 31004 } 31005 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 31006 // match: (ROLQconst [c] (ROLQconst [d] x)) 31007 // cond: 31008 // result: (ROLQconst [(c+d)&63] x) 31009 for { 31010 c := v.AuxInt 31011 v_0 := v.Args[0] 31012 if v_0.Op != OpAMD64ROLQconst { 31013 break 31014 } 31015 d := v_0.AuxInt 31016 x := v_0.Args[0] 31017 v.reset(OpAMD64ROLQconst) 31018 v.AuxInt = (c + d) & 63 31019 v.AddArg(x) 31020 return true 31021 } 31022 // match: (ROLQconst x [0]) 31023 // cond: 31024 // result: x 31025 for { 31026 if v.AuxInt != 0 { 31027 break 31028 } 31029 x := v.Args[0] 31030 v.reset(OpCopy) 31031 v.Type = x.Type 31032 v.AddArg(x) 31033 return true 31034 } 31035 return false 31036 } 31037 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 31038 // match: (ROLW x (NEGQ y)) 31039 // cond: 31040 // result: (RORW x y) 31041 for { 31042 x := v.Args[0] 31043 v_1 := v.Args[1] 31044 if v_1.Op != OpAMD64NEGQ { 31045 break 31046 } 31047 y := v_1.Args[0] 31048 v.reset(OpAMD64RORW) 31049 v.AddArg(x) 31050 v.AddArg(y) 31051 return true 31052 } 31053 // match: (ROLW x (NEGL y)) 31054 // cond: 31055 // result: (RORW x y) 31056 for { 31057 x := v.Args[0] 31058 v_1 := v.Args[1] 31059 if v_1.Op != OpAMD64NEGL { 31060 break 31061 } 31062 y := v_1.Args[0] 31063 v.reset(OpAMD64RORW) 31064 v.AddArg(x) 31065 v.AddArg(y) 31066 return true 31067 } 31068 // match: (ROLW x (MOVQconst [c])) 31069 // cond: 31070 // result: (ROLWconst [c&15] x) 31071 for { 31072 x := v.Args[0] 31073 v_1 := v.Args[1] 31074 if v_1.Op != OpAMD64MOVQconst { 31075 break 31076 } 31077 c := v_1.AuxInt 31078 v.reset(OpAMD64ROLWconst) 31079 v.AuxInt = c & 15 31080 v.AddArg(x) 31081 return true 31082 } 31083 // match: (ROLW x (MOVLconst [c])) 31084 // cond: 31085 // result: (ROLWconst [c&15] x) 31086 for { 31087 x := v.Args[0] 31088 v_1 := v.Args[1] 31089 if v_1.Op != OpAMD64MOVLconst { 31090 break 31091 } 31092 c := v_1.AuxInt 31093 v.reset(OpAMD64ROLWconst) 31094 v.AuxInt = c & 15 31095 v.AddArg(x) 31096 return true 31097 } 31098 return false 31099 } 31100 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 31101 // match: (ROLWconst [c] (ROLWconst [d] x)) 31102 // cond: 31103 // result: (ROLWconst [(c+d)&15] x) 31104 for { 31105 c := v.AuxInt 31106 v_0 := v.Args[0] 31107 if v_0.Op != OpAMD64ROLWconst { 31108 break 31109 } 31110 d := v_0.AuxInt 31111 x := v_0.Args[0] 31112 v.reset(OpAMD64ROLWconst) 31113 v.AuxInt = (c + d) & 15 31114 v.AddArg(x) 31115 return true 31116 } 31117 // match: (ROLWconst x [0]) 31118 // cond: 31119 // result: x 31120 for { 31121 if v.AuxInt != 0 { 31122 break 31123 } 31124 x := v.Args[0] 31125 v.reset(OpCopy) 31126 v.Type = x.Type 31127 v.AddArg(x) 31128 return true 31129 } 31130 return false 31131 } 31132 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 31133 // match: (RORB x (NEGQ y)) 31134 // cond: 31135 // result: (ROLB x y) 31136 for { 31137 x := v.Args[0] 31138 v_1 := v.Args[1] 31139 if v_1.Op != OpAMD64NEGQ { 31140 break 31141 } 31142 y := v_1.Args[0] 31143 v.reset(OpAMD64ROLB) 31144 v.AddArg(x) 31145 v.AddArg(y) 31146 return true 31147 } 31148 // match: (RORB x (NEGL y)) 31149 // cond: 31150 // result: (ROLB x y) 31151 for { 31152 x := v.Args[0] 31153 v_1 := v.Args[1] 31154 if v_1.Op != OpAMD64NEGL { 31155 break 31156 } 31157 y := v_1.Args[0] 31158 v.reset(OpAMD64ROLB) 31159 v.AddArg(x) 31160 v.AddArg(y) 31161 return true 31162 } 31163 // match: (RORB x (MOVQconst [c])) 31164 // cond: 31165 // result: (ROLBconst [(-c)&7 ] x) 31166 for { 31167 x := v.Args[0] 31168 v_1 := v.Args[1] 31169 if v_1.Op != OpAMD64MOVQconst { 31170 break 31171 } 31172 c := v_1.AuxInt 31173 v.reset(OpAMD64ROLBconst) 31174 v.AuxInt = (-c) & 7 31175 v.AddArg(x) 31176 return true 31177 } 31178 // match: (RORB x (MOVLconst [c])) 31179 // cond: 31180 // result: (ROLBconst [(-c)&7 ] x) 31181 for { 31182 x := v.Args[0] 31183 v_1 := v.Args[1] 31184 if v_1.Op != OpAMD64MOVLconst { 31185 break 31186 } 31187 c := v_1.AuxInt 31188 v.reset(OpAMD64ROLBconst) 31189 v.AuxInt = (-c) & 7 31190 v.AddArg(x) 31191 return true 31192 } 31193 return false 31194 } 31195 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 31196 // match: (RORL x (NEGQ y)) 31197 // cond: 31198 // result: (ROLL x y) 31199 for { 31200 x := v.Args[0] 31201 v_1 := v.Args[1] 31202 if v_1.Op != OpAMD64NEGQ { 31203 break 31204 } 31205 y := v_1.Args[0] 31206 v.reset(OpAMD64ROLL) 31207 v.AddArg(x) 31208 v.AddArg(y) 31209 return true 31210 } 31211 // match: (RORL x (NEGL y)) 31212 // cond: 31213 // result: (ROLL x y) 31214 for { 31215 x := v.Args[0] 31216 v_1 := v.Args[1] 31217 if v_1.Op != OpAMD64NEGL { 31218 break 31219 } 31220 y := v_1.Args[0] 31221 v.reset(OpAMD64ROLL) 31222 v.AddArg(x) 31223 v.AddArg(y) 31224 return true 31225 } 31226 // match: (RORL x (MOVQconst [c])) 31227 // cond: 31228 // result: (ROLLconst [(-c)&31] x) 31229 for { 31230 x := v.Args[0] 31231 v_1 := v.Args[1] 31232 if v_1.Op != OpAMD64MOVQconst { 31233 break 31234 } 31235 c := v_1.AuxInt 31236 v.reset(OpAMD64ROLLconst) 31237 v.AuxInt = (-c) & 31 31238 v.AddArg(x) 31239 return true 31240 } 31241 // match: (RORL x (MOVLconst [c])) 31242 // cond: 31243 // result: (ROLLconst [(-c)&31] x) 31244 for { 31245 x := v.Args[0] 31246 v_1 := v.Args[1] 31247 if v_1.Op != OpAMD64MOVLconst { 31248 break 31249 } 31250 c := v_1.AuxInt 31251 v.reset(OpAMD64ROLLconst) 31252 v.AuxInt = (-c) & 31 31253 v.AddArg(x) 31254 return true 31255 } 31256 return false 31257 } 31258 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 31259 // match: (RORQ x (NEGQ y)) 31260 // cond: 31261 // result: (ROLQ x y) 31262 for { 31263 x := v.Args[0] 31264 v_1 := v.Args[1] 31265 if v_1.Op != OpAMD64NEGQ { 31266 break 31267 } 31268 y := v_1.Args[0] 31269 v.reset(OpAMD64ROLQ) 31270 v.AddArg(x) 31271 v.AddArg(y) 31272 return true 31273 } 31274 // match: (RORQ x (NEGL y)) 31275 // cond: 31276 // result: (ROLQ x y) 31277 for { 31278 x := v.Args[0] 31279 v_1 := v.Args[1] 31280 if v_1.Op != OpAMD64NEGL { 31281 break 31282 } 31283 y := v_1.Args[0] 31284 v.reset(OpAMD64ROLQ) 31285 v.AddArg(x) 31286 v.AddArg(y) 31287 return true 31288 } 31289 // match: (RORQ x (MOVQconst [c])) 31290 // cond: 31291 // result: (ROLQconst [(-c)&63] x) 31292 for { 31293 x := v.Args[0] 31294 v_1 := v.Args[1] 31295 if v_1.Op != OpAMD64MOVQconst { 31296 break 31297 } 31298 c := v_1.AuxInt 31299 v.reset(OpAMD64ROLQconst) 31300 v.AuxInt = (-c) & 63 31301 v.AddArg(x) 31302 return true 31303 } 31304 // match: (RORQ x (MOVLconst [c])) 31305 // cond: 31306 // result: (ROLQconst [(-c)&63] x) 31307 for { 31308 x := v.Args[0] 31309 v_1 := v.Args[1] 31310 if v_1.Op != OpAMD64MOVLconst { 31311 break 31312 } 31313 c := v_1.AuxInt 31314 v.reset(OpAMD64ROLQconst) 31315 v.AuxInt = (-c) & 63 31316 v.AddArg(x) 31317 return true 31318 } 31319 return false 31320 } 31321 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 31322 // match: (RORW x (NEGQ y)) 31323 // cond: 31324 // result: (ROLW x y) 31325 for { 31326 x := v.Args[0] 31327 v_1 := v.Args[1] 31328 if v_1.Op != OpAMD64NEGQ { 31329 break 31330 } 31331 y := v_1.Args[0] 31332 v.reset(OpAMD64ROLW) 31333 v.AddArg(x) 31334 v.AddArg(y) 31335 return true 31336 } 31337 // match: (RORW x (NEGL y)) 31338 // cond: 31339 // result: (ROLW x y) 31340 for { 31341 x := v.Args[0] 31342 v_1 := v.Args[1] 31343 if v_1.Op != OpAMD64NEGL { 31344 break 31345 } 31346 y := v_1.Args[0] 31347 v.reset(OpAMD64ROLW) 31348 v.AddArg(x) 31349 v.AddArg(y) 31350 return true 31351 } 31352 // match: (RORW x (MOVQconst [c])) 31353 // cond: 31354 // result: (ROLWconst [(-c)&15] x) 31355 for { 31356 x := v.Args[0] 31357 v_1 := v.Args[1] 31358 if v_1.Op != OpAMD64MOVQconst { 31359 break 31360 } 31361 c := v_1.AuxInt 31362 v.reset(OpAMD64ROLWconst) 31363 v.AuxInt = (-c) & 15 31364 v.AddArg(x) 31365 return true 31366 } 31367 // match: (RORW x (MOVLconst [c])) 31368 // cond: 31369 // result: (ROLWconst [(-c)&15] x) 31370 for { 31371 x := v.Args[0] 31372 v_1 := v.Args[1] 31373 if v_1.Op != OpAMD64MOVLconst { 31374 break 31375 } 31376 c := v_1.AuxInt 31377 v.reset(OpAMD64ROLWconst) 31378 v.AuxInt = (-c) & 15 31379 v.AddArg(x) 31380 return true 31381 } 31382 return false 31383 } 31384 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 31385 // match: (SARB x (MOVQconst [c])) 31386 // cond: 31387 // result: (SARBconst [min(c&31,7)] x) 31388 for { 31389 x := v.Args[0] 31390 v_1 := v.Args[1] 31391 if v_1.Op != OpAMD64MOVQconst { 31392 break 31393 } 31394 c := v_1.AuxInt 31395 v.reset(OpAMD64SARBconst) 31396 v.AuxInt = min(c&31, 7) 31397 v.AddArg(x) 31398 return true 31399 } 31400 // match: (SARB x (MOVLconst [c])) 31401 // cond: 31402 // result: (SARBconst [min(c&31,7)] x) 31403 for { 31404 x := v.Args[0] 31405 v_1 := v.Args[1] 31406 if v_1.Op != OpAMD64MOVLconst { 31407 break 31408 } 31409 c := v_1.AuxInt 31410 v.reset(OpAMD64SARBconst) 31411 v.AuxInt = min(c&31, 7) 31412 v.AddArg(x) 31413 return true 31414 } 31415 return false 31416 } 31417 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 31418 // match: (SARBconst x [0]) 31419 // cond: 31420 // result: x 31421 for { 31422 if v.AuxInt != 0 { 31423 break 31424 } 31425 x := v.Args[0] 31426 v.reset(OpCopy) 31427 v.Type = x.Type 31428 v.AddArg(x) 31429 return true 31430 } 31431 // match: (SARBconst [c] (MOVQconst [d])) 31432 // cond: 31433 // result: (MOVQconst [d>>uint64(c)]) 31434 for { 31435 c := v.AuxInt 31436 v_0 := v.Args[0] 31437 if v_0.Op != OpAMD64MOVQconst { 31438 break 31439 } 31440 d := v_0.AuxInt 31441 v.reset(OpAMD64MOVQconst) 31442 v.AuxInt = d >> uint64(c) 31443 return true 31444 } 31445 return false 31446 } 31447 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 31448 b := v.Block 31449 _ = b 31450 // match: (SARL x (MOVQconst [c])) 31451 // cond: 31452 // result: (SARLconst [c&31] x) 31453 for { 31454 x := v.Args[0] 31455 v_1 := v.Args[1] 31456 if v_1.Op != OpAMD64MOVQconst { 31457 break 31458 } 31459 c := v_1.AuxInt 31460 v.reset(OpAMD64SARLconst) 31461 v.AuxInt = c & 31 31462 v.AddArg(x) 31463 return true 31464 } 31465 // match: (SARL x (MOVLconst [c])) 31466 // cond: 31467 // result: (SARLconst [c&31] x) 31468 for { 31469 x := v.Args[0] 31470 v_1 := v.Args[1] 31471 if v_1.Op != OpAMD64MOVLconst { 31472 break 31473 } 31474 c := v_1.AuxInt 31475 v.reset(OpAMD64SARLconst) 31476 v.AuxInt = c & 31 31477 v.AddArg(x) 31478 return true 31479 } 31480 // match: (SARL x (ADDQconst [c] y)) 31481 // cond: c & 31 == 0 31482 // result: (SARL x y) 31483 for { 31484 x := v.Args[0] 31485 v_1 := v.Args[1] 31486 if v_1.Op != OpAMD64ADDQconst { 31487 break 31488 } 31489 c := v_1.AuxInt 31490 y := v_1.Args[0] 31491 if !(c&31 == 0) { 31492 break 31493 } 31494 v.reset(OpAMD64SARL) 31495 v.AddArg(x) 31496 v.AddArg(y) 31497 return true 31498 } 31499 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 31500 // cond: c & 31 == 0 31501 // result: (SARL x (NEGQ <t> y)) 31502 for { 31503 x := v.Args[0] 31504 v_1 := v.Args[1] 31505 if v_1.Op != OpAMD64NEGQ { 31506 break 31507 } 31508 t := v_1.Type 31509 v_1_0 := v_1.Args[0] 31510 if v_1_0.Op != OpAMD64ADDQconst { 31511 break 31512 } 31513 c := v_1_0.AuxInt 31514 y := v_1_0.Args[0] 31515 if !(c&31 == 0) { 31516 break 31517 } 31518 v.reset(OpAMD64SARL) 31519 v.AddArg(x) 31520 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 31521 v0.AddArg(y) 31522 v.AddArg(v0) 31523 return true 31524 } 31525 // match: (SARL x (ANDQconst [c] y)) 31526 // cond: c & 31 == 31 31527 // result: (SARL x y) 31528 for { 31529 x := v.Args[0] 31530 v_1 := v.Args[1] 31531 if v_1.Op != OpAMD64ANDQconst { 31532 break 31533 } 31534 c := v_1.AuxInt 31535 y := v_1.Args[0] 31536 if !(c&31 == 31) { 31537 break 31538 } 31539 v.reset(OpAMD64SARL) 31540 v.AddArg(x) 31541 v.AddArg(y) 31542 return true 31543 } 31544 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 31545 // cond: c & 31 == 31 31546 // result: (SARL x (NEGQ <t> y)) 31547 for { 31548 x := v.Args[0] 31549 v_1 := v.Args[1] 31550 if v_1.Op != OpAMD64NEGQ { 31551 break 31552 } 31553 t := v_1.Type 31554 v_1_0 := v_1.Args[0] 31555 if v_1_0.Op != OpAMD64ANDQconst { 31556 break 31557 } 31558 c := v_1_0.AuxInt 31559 y := v_1_0.Args[0] 31560 if !(c&31 == 31) { 31561 break 31562 } 31563 v.reset(OpAMD64SARL) 31564 v.AddArg(x) 31565 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 31566 v0.AddArg(y) 31567 v.AddArg(v0) 31568 return true 31569 } 31570 // match: (SARL x (ADDLconst [c] y)) 31571 // cond: c & 31 == 0 31572 // result: (SARL x y) 31573 for { 31574 x := v.Args[0] 31575 v_1 := v.Args[1] 31576 if v_1.Op != OpAMD64ADDLconst { 31577 break 31578 } 31579 c := v_1.AuxInt 31580 y := v_1.Args[0] 31581 if !(c&31 == 0) { 31582 break 31583 } 31584 v.reset(OpAMD64SARL) 31585 v.AddArg(x) 31586 v.AddArg(y) 31587 return true 31588 } 31589 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 31590 // cond: c & 31 == 0 31591 // result: (SARL x (NEGL <t> y)) 31592 for { 31593 x := v.Args[0] 31594 v_1 := v.Args[1] 31595 if v_1.Op != OpAMD64NEGL { 31596 break 31597 } 31598 t := v_1.Type 31599 v_1_0 := v_1.Args[0] 31600 if v_1_0.Op != OpAMD64ADDLconst { 31601 break 31602 } 31603 c := v_1_0.AuxInt 31604 y := v_1_0.Args[0] 31605 if !(c&31 == 0) { 31606 break 31607 } 31608 v.reset(OpAMD64SARL) 31609 v.AddArg(x) 31610 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 31611 v0.AddArg(y) 31612 v.AddArg(v0) 31613 return true 31614 } 31615 // match: (SARL x (ANDLconst [c] y)) 31616 // cond: c & 31 == 31 31617 // result: (SARL x y) 31618 for { 31619 x := v.Args[0] 31620 v_1 := v.Args[1] 31621 if v_1.Op != OpAMD64ANDLconst { 31622 break 31623 } 31624 c := v_1.AuxInt 31625 y := v_1.Args[0] 31626 if !(c&31 == 31) { 31627 break 31628 } 31629 v.reset(OpAMD64SARL) 31630 v.AddArg(x) 31631 v.AddArg(y) 31632 return true 31633 } 31634 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 31635 // cond: c & 31 == 31 31636 // result: (SARL x (NEGL <t> y)) 31637 for { 31638 x := v.Args[0] 31639 v_1 := v.Args[1] 31640 if v_1.Op != OpAMD64NEGL { 31641 break 31642 } 31643 t := v_1.Type 31644 v_1_0 := v_1.Args[0] 31645 if v_1_0.Op != OpAMD64ANDLconst { 31646 break 31647 } 31648 c := v_1_0.AuxInt 31649 y := v_1_0.Args[0] 31650 if !(c&31 == 31) { 31651 break 31652 } 31653 v.reset(OpAMD64SARL) 31654 v.AddArg(x) 31655 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 31656 v0.AddArg(y) 31657 v.AddArg(v0) 31658 return true 31659 } 31660 return false 31661 } 31662 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 31663 // match: (SARLconst x [0]) 31664 // cond: 31665 // result: x 31666 for { 31667 if v.AuxInt != 0 { 31668 break 31669 } 31670 x := v.Args[0] 31671 v.reset(OpCopy) 31672 v.Type = x.Type 31673 v.AddArg(x) 31674 return true 31675 } 31676 // match: (SARLconst [c] (MOVQconst [d])) 31677 // cond: 31678 // result: (MOVQconst [d>>uint64(c)]) 31679 for { 31680 c := v.AuxInt 31681 v_0 := v.Args[0] 31682 if v_0.Op != OpAMD64MOVQconst { 31683 break 31684 } 31685 d := v_0.AuxInt 31686 v.reset(OpAMD64MOVQconst) 31687 v.AuxInt = d >> uint64(c) 31688 return true 31689 } 31690 return false 31691 } 31692 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 31693 b := v.Block 31694 _ = b 31695 // match: (SARQ x (MOVQconst [c])) 31696 // cond: 31697 // result: (SARQconst [c&63] x) 31698 for { 31699 x := v.Args[0] 31700 v_1 := v.Args[1] 31701 if v_1.Op != OpAMD64MOVQconst { 31702 break 31703 } 31704 c := v_1.AuxInt 31705 v.reset(OpAMD64SARQconst) 31706 v.AuxInt = c & 63 31707 v.AddArg(x) 31708 return true 31709 } 31710 // match: (SARQ x (MOVLconst [c])) 31711 // cond: 31712 // result: (SARQconst [c&63] x) 31713 for { 31714 x := v.Args[0] 31715 v_1 := v.Args[1] 31716 if v_1.Op != OpAMD64MOVLconst { 31717 break 31718 } 31719 c := v_1.AuxInt 31720 v.reset(OpAMD64SARQconst) 31721 v.AuxInt = c & 63 31722 v.AddArg(x) 31723 return true 31724 } 31725 // match: (SARQ x (ADDQconst [c] y)) 31726 // cond: c & 63 == 0 31727 // result: (SARQ x y) 31728 for { 31729 x := v.Args[0] 31730 v_1 := v.Args[1] 31731 if v_1.Op != OpAMD64ADDQconst { 31732 break 31733 } 31734 c := v_1.AuxInt 31735 y := v_1.Args[0] 31736 if !(c&63 == 0) { 31737 break 31738 } 31739 v.reset(OpAMD64SARQ) 31740 v.AddArg(x) 31741 v.AddArg(y) 31742 return true 31743 } 31744 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 31745 // cond: c & 63 == 0 31746 // result: (SARQ x (NEGQ <t> y)) 31747 for { 31748 x := v.Args[0] 31749 v_1 := v.Args[1] 31750 if v_1.Op != OpAMD64NEGQ { 31751 break 31752 } 31753 t := v_1.Type 31754 v_1_0 := v_1.Args[0] 31755 if v_1_0.Op != OpAMD64ADDQconst { 31756 break 31757 } 31758 c := v_1_0.AuxInt 31759 y := v_1_0.Args[0] 31760 if !(c&63 == 0) { 31761 break 31762 } 31763 v.reset(OpAMD64SARQ) 31764 v.AddArg(x) 31765 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 31766 v0.AddArg(y) 31767 v.AddArg(v0) 31768 return true 31769 } 31770 // match: (SARQ x (ANDQconst [c] y)) 31771 // cond: c & 63 == 63 31772 // result: (SARQ x y) 31773 for { 31774 x := v.Args[0] 31775 v_1 := v.Args[1] 31776 if v_1.Op != OpAMD64ANDQconst { 31777 break 31778 } 31779 c := v_1.AuxInt 31780 y := v_1.Args[0] 31781 if !(c&63 == 63) { 31782 break 31783 } 31784 v.reset(OpAMD64SARQ) 31785 v.AddArg(x) 31786 v.AddArg(y) 31787 return true 31788 } 31789 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 31790 // cond: c & 63 == 63 31791 // result: (SARQ x (NEGQ <t> y)) 31792 for { 31793 x := v.Args[0] 31794 v_1 := v.Args[1] 31795 if v_1.Op != OpAMD64NEGQ { 31796 break 31797 } 31798 t := v_1.Type 31799 v_1_0 := v_1.Args[0] 31800 if v_1_0.Op != OpAMD64ANDQconst { 31801 break 31802 } 31803 c := v_1_0.AuxInt 31804 y := v_1_0.Args[0] 31805 if !(c&63 == 63) { 31806 break 31807 } 31808 v.reset(OpAMD64SARQ) 31809 v.AddArg(x) 31810 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 31811 v0.AddArg(y) 31812 v.AddArg(v0) 31813 return true 31814 } 31815 // match: (SARQ x (ADDLconst [c] y)) 31816 // cond: c & 63 == 0 31817 // result: (SARQ x y) 31818 for { 31819 x := v.Args[0] 31820 v_1 := v.Args[1] 31821 if v_1.Op != OpAMD64ADDLconst { 31822 break 31823 } 31824 c := v_1.AuxInt 31825 y := v_1.Args[0] 31826 if !(c&63 == 0) { 31827 break 31828 } 31829 v.reset(OpAMD64SARQ) 31830 v.AddArg(x) 31831 v.AddArg(y) 31832 return true 31833 } 31834 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 31835 // cond: c & 63 == 0 31836 // result: (SARQ x (NEGL <t> y)) 31837 for { 31838 x := v.Args[0] 31839 v_1 := v.Args[1] 31840 if v_1.Op != OpAMD64NEGL { 31841 break 31842 } 31843 t := v_1.Type 31844 v_1_0 := v_1.Args[0] 31845 if v_1_0.Op != OpAMD64ADDLconst { 31846 break 31847 } 31848 c := v_1_0.AuxInt 31849 y := v_1_0.Args[0] 31850 if !(c&63 == 0) { 31851 break 31852 } 31853 v.reset(OpAMD64SARQ) 31854 v.AddArg(x) 31855 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 31856 v0.AddArg(y) 31857 v.AddArg(v0) 31858 return true 31859 } 31860 // match: (SARQ x (ANDLconst [c] y)) 31861 // cond: c & 63 == 63 31862 // result: (SARQ x y) 31863 for { 31864 x := v.Args[0] 31865 v_1 := v.Args[1] 31866 if v_1.Op != OpAMD64ANDLconst { 31867 break 31868 } 31869 c := v_1.AuxInt 31870 y := v_1.Args[0] 31871 if !(c&63 == 63) { 31872 break 31873 } 31874 v.reset(OpAMD64SARQ) 31875 v.AddArg(x) 31876 v.AddArg(y) 31877 return true 31878 } 31879 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 31880 // cond: c & 63 == 63 31881 // result: (SARQ x (NEGL <t> y)) 31882 for { 31883 x := v.Args[0] 31884 v_1 := v.Args[1] 31885 if v_1.Op != OpAMD64NEGL { 31886 break 31887 } 31888 t := v_1.Type 31889 v_1_0 := v_1.Args[0] 31890 if v_1_0.Op != OpAMD64ANDLconst { 31891 break 31892 } 31893 c := v_1_0.AuxInt 31894 y := v_1_0.Args[0] 31895 if !(c&63 == 63) { 31896 break 31897 } 31898 v.reset(OpAMD64SARQ) 31899 v.AddArg(x) 31900 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 31901 v0.AddArg(y) 31902 v.AddArg(v0) 31903 return true 31904 } 31905 return false 31906 } 31907 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 31908 // match: (SARQconst x [0]) 31909 // cond: 31910 // result: x 31911 for { 31912 if v.AuxInt != 0 { 31913 break 31914 } 31915 x := v.Args[0] 31916 v.reset(OpCopy) 31917 v.Type = x.Type 31918 v.AddArg(x) 31919 return true 31920 } 31921 // match: (SARQconst [c] (MOVQconst [d])) 31922 // cond: 31923 // result: (MOVQconst [d>>uint64(c)]) 31924 for { 31925 c := v.AuxInt 31926 v_0 := v.Args[0] 31927 if v_0.Op != OpAMD64MOVQconst { 31928 break 31929 } 31930 d := v_0.AuxInt 31931 v.reset(OpAMD64MOVQconst) 31932 v.AuxInt = d >> uint64(c) 31933 return true 31934 } 31935 return false 31936 } 31937 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 31938 // match: (SARW x (MOVQconst [c])) 31939 // cond: 31940 // result: (SARWconst [min(c&31,15)] x) 31941 for { 31942 x := v.Args[0] 31943 v_1 := v.Args[1] 31944 if v_1.Op != OpAMD64MOVQconst { 31945 break 31946 } 31947 c := v_1.AuxInt 31948 v.reset(OpAMD64SARWconst) 31949 v.AuxInt = min(c&31, 15) 31950 v.AddArg(x) 31951 return true 31952 } 31953 // match: (SARW x (MOVLconst [c])) 31954 // cond: 31955 // result: (SARWconst [min(c&31,15)] x) 31956 for { 31957 x := v.Args[0] 31958 v_1 := v.Args[1] 31959 if v_1.Op != OpAMD64MOVLconst { 31960 break 31961 } 31962 c := v_1.AuxInt 31963 v.reset(OpAMD64SARWconst) 31964 v.AuxInt = min(c&31, 15) 31965 v.AddArg(x) 31966 return true 31967 } 31968 return false 31969 } 31970 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 31971 // match: (SARWconst x [0]) 31972 // cond: 31973 // result: x 31974 for { 31975 if v.AuxInt != 0 { 31976 break 31977 } 31978 x := v.Args[0] 31979 v.reset(OpCopy) 31980 v.Type = x.Type 31981 v.AddArg(x) 31982 return true 31983 } 31984 // match: (SARWconst [c] (MOVQconst [d])) 31985 // cond: 31986 // result: (MOVQconst [d>>uint64(c)]) 31987 for { 31988 c := v.AuxInt 31989 v_0 := v.Args[0] 31990 if v_0.Op != OpAMD64MOVQconst { 31991 break 31992 } 31993 d := v_0.AuxInt 31994 v.reset(OpAMD64MOVQconst) 31995 v.AuxInt = d >> uint64(c) 31996 return true 31997 } 31998 return false 31999 } 32000 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 32001 // match: (SBBLcarrymask (FlagEQ)) 32002 // cond: 32003 // result: (MOVLconst [0]) 32004 for { 32005 v_0 := v.Args[0] 32006 if v_0.Op != OpAMD64FlagEQ { 32007 break 32008 } 32009 v.reset(OpAMD64MOVLconst) 32010 v.AuxInt = 0 32011 return true 32012 } 32013 // match: (SBBLcarrymask (FlagLT_ULT)) 32014 // cond: 32015 // result: (MOVLconst [-1]) 32016 for { 32017 v_0 := v.Args[0] 32018 if v_0.Op != OpAMD64FlagLT_ULT { 32019 break 32020 } 32021 v.reset(OpAMD64MOVLconst) 32022 v.AuxInt = -1 32023 return true 32024 } 32025 // match: (SBBLcarrymask (FlagLT_UGT)) 32026 // cond: 32027 // result: (MOVLconst [0]) 32028 for { 32029 v_0 := v.Args[0] 32030 if v_0.Op != OpAMD64FlagLT_UGT { 32031 break 32032 } 32033 v.reset(OpAMD64MOVLconst) 32034 v.AuxInt = 0 32035 return true 32036 } 32037 // match: (SBBLcarrymask (FlagGT_ULT)) 32038 // cond: 32039 // result: (MOVLconst [-1]) 32040 for { 32041 v_0 := v.Args[0] 32042 if v_0.Op != OpAMD64FlagGT_ULT { 32043 break 32044 } 32045 v.reset(OpAMD64MOVLconst) 32046 v.AuxInt = -1 32047 return true 32048 } 32049 // match: (SBBLcarrymask (FlagGT_UGT)) 32050 // cond: 32051 // result: (MOVLconst [0]) 32052 for { 32053 v_0 := v.Args[0] 32054 if v_0.Op != OpAMD64FlagGT_UGT { 32055 break 32056 } 32057 v.reset(OpAMD64MOVLconst) 32058 v.AuxInt = 0 32059 return true 32060 } 32061 return false 32062 } 32063 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 32064 // match: (SBBQcarrymask (FlagEQ)) 32065 // cond: 32066 // result: (MOVQconst [0]) 32067 for { 32068 v_0 := v.Args[0] 32069 if v_0.Op != OpAMD64FlagEQ { 32070 break 32071 } 32072 v.reset(OpAMD64MOVQconst) 32073 v.AuxInt = 0 32074 return true 32075 } 32076 // match: (SBBQcarrymask (FlagLT_ULT)) 32077 // cond: 32078 // result: (MOVQconst [-1]) 32079 for { 32080 v_0 := v.Args[0] 32081 if v_0.Op != OpAMD64FlagLT_ULT { 32082 break 32083 } 32084 v.reset(OpAMD64MOVQconst) 32085 v.AuxInt = -1 32086 return true 32087 } 32088 // match: (SBBQcarrymask (FlagLT_UGT)) 32089 // cond: 32090 // result: (MOVQconst [0]) 32091 for { 32092 v_0 := v.Args[0] 32093 if v_0.Op != OpAMD64FlagLT_UGT { 32094 break 32095 } 32096 v.reset(OpAMD64MOVQconst) 32097 v.AuxInt = 0 32098 return true 32099 } 32100 // match: (SBBQcarrymask (FlagGT_ULT)) 32101 // cond: 32102 // result: (MOVQconst [-1]) 32103 for { 32104 v_0 := v.Args[0] 32105 if v_0.Op != OpAMD64FlagGT_ULT { 32106 break 32107 } 32108 v.reset(OpAMD64MOVQconst) 32109 v.AuxInt = -1 32110 return true 32111 } 32112 // match: (SBBQcarrymask (FlagGT_UGT)) 32113 // cond: 32114 // result: (MOVQconst [0]) 32115 for { 32116 v_0 := v.Args[0] 32117 if v_0.Op != OpAMD64FlagGT_UGT { 32118 break 32119 } 32120 v.reset(OpAMD64MOVQconst) 32121 v.AuxInt = 0 32122 return true 32123 } 32124 return false 32125 } 32126 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 32127 // match: (SETA (InvertFlags x)) 32128 // cond: 32129 // result: (SETB x) 32130 for { 32131 v_0 := v.Args[0] 32132 if v_0.Op != OpAMD64InvertFlags { 32133 break 32134 } 32135 x := v_0.Args[0] 32136 v.reset(OpAMD64SETB) 32137 v.AddArg(x) 32138 return true 32139 } 32140 // match: (SETA (FlagEQ)) 32141 // cond: 32142 // result: (MOVLconst [0]) 32143 for { 32144 v_0 := v.Args[0] 32145 if v_0.Op != OpAMD64FlagEQ { 32146 break 32147 } 32148 v.reset(OpAMD64MOVLconst) 32149 v.AuxInt = 0 32150 return true 32151 } 32152 // match: (SETA (FlagLT_ULT)) 32153 // cond: 32154 // result: (MOVLconst [0]) 32155 for { 32156 v_0 := v.Args[0] 32157 if v_0.Op != OpAMD64FlagLT_ULT { 32158 break 32159 } 32160 v.reset(OpAMD64MOVLconst) 32161 v.AuxInt = 0 32162 return true 32163 } 32164 // match: (SETA (FlagLT_UGT)) 32165 // cond: 32166 // result: (MOVLconst [1]) 32167 for { 32168 v_0 := v.Args[0] 32169 if v_0.Op != OpAMD64FlagLT_UGT { 32170 break 32171 } 32172 v.reset(OpAMD64MOVLconst) 32173 v.AuxInt = 1 32174 return true 32175 } 32176 // match: (SETA (FlagGT_ULT)) 32177 // cond: 32178 // result: (MOVLconst [0]) 32179 for { 32180 v_0 := v.Args[0] 32181 if v_0.Op != OpAMD64FlagGT_ULT { 32182 break 32183 } 32184 v.reset(OpAMD64MOVLconst) 32185 v.AuxInt = 0 32186 return true 32187 } 32188 // match: (SETA (FlagGT_UGT)) 32189 // cond: 32190 // result: (MOVLconst [1]) 32191 for { 32192 v_0 := v.Args[0] 32193 if v_0.Op != OpAMD64FlagGT_UGT { 32194 break 32195 } 32196 v.reset(OpAMD64MOVLconst) 32197 v.AuxInt = 1 32198 return true 32199 } 32200 return false 32201 } 32202 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 32203 // match: (SETAE (InvertFlags x)) 32204 // cond: 32205 // result: (SETBE x) 32206 for { 32207 v_0 := v.Args[0] 32208 if v_0.Op != OpAMD64InvertFlags { 32209 break 32210 } 32211 x := v_0.Args[0] 32212 v.reset(OpAMD64SETBE) 32213 v.AddArg(x) 32214 return true 32215 } 32216 // match: (SETAE (FlagEQ)) 32217 // cond: 32218 // result: (MOVLconst [1]) 32219 for { 32220 v_0 := v.Args[0] 32221 if v_0.Op != OpAMD64FlagEQ { 32222 break 32223 } 32224 v.reset(OpAMD64MOVLconst) 32225 v.AuxInt = 1 32226 return true 32227 } 32228 // match: (SETAE (FlagLT_ULT)) 32229 // cond: 32230 // result: (MOVLconst [0]) 32231 for { 32232 v_0 := v.Args[0] 32233 if v_0.Op != OpAMD64FlagLT_ULT { 32234 break 32235 } 32236 v.reset(OpAMD64MOVLconst) 32237 v.AuxInt = 0 32238 return true 32239 } 32240 // match: (SETAE (FlagLT_UGT)) 32241 // cond: 32242 // result: (MOVLconst [1]) 32243 for { 32244 v_0 := v.Args[0] 32245 if v_0.Op != OpAMD64FlagLT_UGT { 32246 break 32247 } 32248 v.reset(OpAMD64MOVLconst) 32249 v.AuxInt = 1 32250 return true 32251 } 32252 // match: (SETAE (FlagGT_ULT)) 32253 // cond: 32254 // result: (MOVLconst [0]) 32255 for { 32256 v_0 := v.Args[0] 32257 if v_0.Op != OpAMD64FlagGT_ULT { 32258 break 32259 } 32260 v.reset(OpAMD64MOVLconst) 32261 v.AuxInt = 0 32262 return true 32263 } 32264 // match: (SETAE (FlagGT_UGT)) 32265 // cond: 32266 // result: (MOVLconst [1]) 32267 for { 32268 v_0 := v.Args[0] 32269 if v_0.Op != OpAMD64FlagGT_UGT { 32270 break 32271 } 32272 v.reset(OpAMD64MOVLconst) 32273 v.AuxInt = 1 32274 return true 32275 } 32276 return false 32277 } 32278 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 32279 // match: (SETB (InvertFlags x)) 32280 // cond: 32281 // result: (SETA x) 32282 for { 32283 v_0 := v.Args[0] 32284 if v_0.Op != OpAMD64InvertFlags { 32285 break 32286 } 32287 x := v_0.Args[0] 32288 v.reset(OpAMD64SETA) 32289 v.AddArg(x) 32290 return true 32291 } 32292 // match: (SETB (FlagEQ)) 32293 // cond: 32294 // result: (MOVLconst [0]) 32295 for { 32296 v_0 := v.Args[0] 32297 if v_0.Op != OpAMD64FlagEQ { 32298 break 32299 } 32300 v.reset(OpAMD64MOVLconst) 32301 v.AuxInt = 0 32302 return true 32303 } 32304 // match: (SETB (FlagLT_ULT)) 32305 // cond: 32306 // result: (MOVLconst [1]) 32307 for { 32308 v_0 := v.Args[0] 32309 if v_0.Op != OpAMD64FlagLT_ULT { 32310 break 32311 } 32312 v.reset(OpAMD64MOVLconst) 32313 v.AuxInt = 1 32314 return true 32315 } 32316 // match: (SETB (FlagLT_UGT)) 32317 // cond: 32318 // result: (MOVLconst [0]) 32319 for { 32320 v_0 := v.Args[0] 32321 if v_0.Op != OpAMD64FlagLT_UGT { 32322 break 32323 } 32324 v.reset(OpAMD64MOVLconst) 32325 v.AuxInt = 0 32326 return true 32327 } 32328 // match: (SETB (FlagGT_ULT)) 32329 // cond: 32330 // result: (MOVLconst [1]) 32331 for { 32332 v_0 := v.Args[0] 32333 if v_0.Op != OpAMD64FlagGT_ULT { 32334 break 32335 } 32336 v.reset(OpAMD64MOVLconst) 32337 v.AuxInt = 1 32338 return true 32339 } 32340 // match: (SETB (FlagGT_UGT)) 32341 // cond: 32342 // result: (MOVLconst [0]) 32343 for { 32344 v_0 := v.Args[0] 32345 if v_0.Op != OpAMD64FlagGT_UGT { 32346 break 32347 } 32348 v.reset(OpAMD64MOVLconst) 32349 v.AuxInt = 0 32350 return true 32351 } 32352 return false 32353 } 32354 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 32355 // match: (SETBE (InvertFlags x)) 32356 // cond: 32357 // result: (SETAE x) 32358 for { 32359 v_0 := v.Args[0] 32360 if v_0.Op != OpAMD64InvertFlags { 32361 break 32362 } 32363 x := v_0.Args[0] 32364 v.reset(OpAMD64SETAE) 32365 v.AddArg(x) 32366 return true 32367 } 32368 // match: (SETBE (FlagEQ)) 32369 // cond: 32370 // result: (MOVLconst [1]) 32371 for { 32372 v_0 := v.Args[0] 32373 if v_0.Op != OpAMD64FlagEQ { 32374 break 32375 } 32376 v.reset(OpAMD64MOVLconst) 32377 v.AuxInt = 1 32378 return true 32379 } 32380 // match: (SETBE (FlagLT_ULT)) 32381 // cond: 32382 // result: (MOVLconst [1]) 32383 for { 32384 v_0 := v.Args[0] 32385 if v_0.Op != OpAMD64FlagLT_ULT { 32386 break 32387 } 32388 v.reset(OpAMD64MOVLconst) 32389 v.AuxInt = 1 32390 return true 32391 } 32392 // match: (SETBE (FlagLT_UGT)) 32393 // cond: 32394 // result: (MOVLconst [0]) 32395 for { 32396 v_0 := v.Args[0] 32397 if v_0.Op != OpAMD64FlagLT_UGT { 32398 break 32399 } 32400 v.reset(OpAMD64MOVLconst) 32401 v.AuxInt = 0 32402 return true 32403 } 32404 // match: (SETBE (FlagGT_ULT)) 32405 // cond: 32406 // result: (MOVLconst [1]) 32407 for { 32408 v_0 := v.Args[0] 32409 if v_0.Op != OpAMD64FlagGT_ULT { 32410 break 32411 } 32412 v.reset(OpAMD64MOVLconst) 32413 v.AuxInt = 1 32414 return true 32415 } 32416 // match: (SETBE (FlagGT_UGT)) 32417 // cond: 32418 // result: (MOVLconst [0]) 32419 for { 32420 v_0 := v.Args[0] 32421 if v_0.Op != OpAMD64FlagGT_UGT { 32422 break 32423 } 32424 v.reset(OpAMD64MOVLconst) 32425 v.AuxInt = 0 32426 return true 32427 } 32428 return false 32429 } 32430 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 32431 b := v.Block 32432 _ = b 32433 config := b.Func.Config 32434 _ = config 32435 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 32436 // cond: !config.nacl 32437 // result: (SETAE (BTL x y)) 32438 for { 32439 v_0 := v.Args[0] 32440 if v_0.Op != OpAMD64TESTL { 32441 break 32442 } 32443 v_0_0 := v_0.Args[0] 32444 if v_0_0.Op != OpAMD64SHLL { 32445 break 32446 } 32447 v_0_0_0 := v_0_0.Args[0] 32448 if v_0_0_0.Op != OpAMD64MOVLconst { 32449 break 32450 } 32451 if v_0_0_0.AuxInt != 1 { 32452 break 32453 } 32454 x := v_0_0.Args[1] 32455 y := v_0.Args[1] 32456 if !(!config.nacl) { 32457 break 32458 } 32459 v.reset(OpAMD64SETAE) 32460 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 32461 v0.AddArg(x) 32462 v0.AddArg(y) 32463 v.AddArg(v0) 32464 return true 32465 } 32466 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 32467 // cond: !config.nacl 32468 // result: (SETAE (BTL x y)) 32469 for { 32470 v_0 := v.Args[0] 32471 if v_0.Op != OpAMD64TESTL { 32472 break 32473 } 32474 y := v_0.Args[0] 32475 v_0_1 := v_0.Args[1] 32476 if v_0_1.Op != OpAMD64SHLL { 32477 break 32478 } 32479 v_0_1_0 := v_0_1.Args[0] 32480 if v_0_1_0.Op != OpAMD64MOVLconst { 32481 break 32482 } 32483 if v_0_1_0.AuxInt != 1 { 32484 break 32485 } 32486 x := v_0_1.Args[1] 32487 if !(!config.nacl) { 32488 break 32489 } 32490 v.reset(OpAMD64SETAE) 32491 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 32492 v0.AddArg(x) 32493 v0.AddArg(y) 32494 v.AddArg(v0) 32495 return true 32496 } 32497 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 32498 // cond: !config.nacl 32499 // result: (SETAE (BTQ x y)) 32500 for { 32501 v_0 := v.Args[0] 32502 if v_0.Op != OpAMD64TESTQ { 32503 break 32504 } 32505 v_0_0 := v_0.Args[0] 32506 if v_0_0.Op != OpAMD64SHLQ { 32507 break 32508 } 32509 v_0_0_0 := v_0_0.Args[0] 32510 if v_0_0_0.Op != OpAMD64MOVQconst { 32511 break 32512 } 32513 if v_0_0_0.AuxInt != 1 { 32514 break 32515 } 32516 x := v_0_0.Args[1] 32517 y := v_0.Args[1] 32518 if !(!config.nacl) { 32519 break 32520 } 32521 v.reset(OpAMD64SETAE) 32522 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 32523 v0.AddArg(x) 32524 v0.AddArg(y) 32525 v.AddArg(v0) 32526 return true 32527 } 32528 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 32529 // cond: !config.nacl 32530 // result: (SETAE (BTQ x y)) 32531 for { 32532 v_0 := v.Args[0] 32533 if v_0.Op != OpAMD64TESTQ { 32534 break 32535 } 32536 y := v_0.Args[0] 32537 v_0_1 := v_0.Args[1] 32538 if v_0_1.Op != OpAMD64SHLQ { 32539 break 32540 } 32541 v_0_1_0 := v_0_1.Args[0] 32542 if v_0_1_0.Op != OpAMD64MOVQconst { 32543 break 32544 } 32545 if v_0_1_0.AuxInt != 1 { 32546 break 32547 } 32548 x := v_0_1.Args[1] 32549 if !(!config.nacl) { 32550 break 32551 } 32552 v.reset(OpAMD64SETAE) 32553 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 32554 v0.AddArg(x) 32555 v0.AddArg(y) 32556 v.AddArg(v0) 32557 return true 32558 } 32559 // match: (SETEQ (TESTLconst [c] x)) 32560 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 32561 // result: (SETAE (BTLconst [log2(c)] x)) 32562 for { 32563 v_0 := v.Args[0] 32564 if v_0.Op != OpAMD64TESTLconst { 32565 break 32566 } 32567 c := v_0.AuxInt 32568 x := v_0.Args[0] 32569 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 32570 break 32571 } 32572 v.reset(OpAMD64SETAE) 32573 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags) 32574 v0.AuxInt = log2(c) 32575 v0.AddArg(x) 32576 v.AddArg(v0) 32577 return true 32578 } 32579 // match: (SETEQ (TESTQconst [c] x)) 32580 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 32581 // result: (SETAE (BTQconst [log2(c)] x)) 32582 for { 32583 v_0 := v.Args[0] 32584 if v_0.Op != OpAMD64TESTQconst { 32585 break 32586 } 32587 c := v_0.AuxInt 32588 x := v_0.Args[0] 32589 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 32590 break 32591 } 32592 v.reset(OpAMD64SETAE) 32593 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 32594 v0.AuxInt = log2(c) 32595 v0.AddArg(x) 32596 v.AddArg(v0) 32597 return true 32598 } 32599 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 32600 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 32601 // result: (SETAE (BTQconst [log2(c)] x)) 32602 for { 32603 v_0 := v.Args[0] 32604 if v_0.Op != OpAMD64TESTQ { 32605 break 32606 } 32607 v_0_0 := v_0.Args[0] 32608 if v_0_0.Op != OpAMD64MOVQconst { 32609 break 32610 } 32611 c := v_0_0.AuxInt 32612 x := v_0.Args[1] 32613 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 32614 break 32615 } 32616 v.reset(OpAMD64SETAE) 32617 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 32618 v0.AuxInt = log2(c) 32619 v0.AddArg(x) 32620 v.AddArg(v0) 32621 return true 32622 } 32623 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 32624 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 32625 // result: (SETAE (BTQconst [log2(c)] x)) 32626 for { 32627 v_0 := v.Args[0] 32628 if v_0.Op != OpAMD64TESTQ { 32629 break 32630 } 32631 x := v_0.Args[0] 32632 v_0_1 := v_0.Args[1] 32633 if v_0_1.Op != OpAMD64MOVQconst { 32634 break 32635 } 32636 c := v_0_1.AuxInt 32637 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 32638 break 32639 } 32640 v.reset(OpAMD64SETAE) 32641 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 32642 v0.AuxInt = log2(c) 32643 v0.AddArg(x) 32644 v.AddArg(v0) 32645 return true 32646 } 32647 // match: (SETEQ (InvertFlags x)) 32648 // cond: 32649 // result: (SETEQ x) 32650 for { 32651 v_0 := v.Args[0] 32652 if v_0.Op != OpAMD64InvertFlags { 32653 break 32654 } 32655 x := v_0.Args[0] 32656 v.reset(OpAMD64SETEQ) 32657 v.AddArg(x) 32658 return true 32659 } 32660 // match: (SETEQ (FlagEQ)) 32661 // cond: 32662 // result: (MOVLconst [1]) 32663 for { 32664 v_0 := v.Args[0] 32665 if v_0.Op != OpAMD64FlagEQ { 32666 break 32667 } 32668 v.reset(OpAMD64MOVLconst) 32669 v.AuxInt = 1 32670 return true 32671 } 32672 return false 32673 } 32674 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 32675 // match: (SETEQ (FlagLT_ULT)) 32676 // cond: 32677 // result: (MOVLconst [0]) 32678 for { 32679 v_0 := v.Args[0] 32680 if v_0.Op != OpAMD64FlagLT_ULT { 32681 break 32682 } 32683 v.reset(OpAMD64MOVLconst) 32684 v.AuxInt = 0 32685 return true 32686 } 32687 // match: (SETEQ (FlagLT_UGT)) 32688 // cond: 32689 // result: (MOVLconst [0]) 32690 for { 32691 v_0 := v.Args[0] 32692 if v_0.Op != OpAMD64FlagLT_UGT { 32693 break 32694 } 32695 v.reset(OpAMD64MOVLconst) 32696 v.AuxInt = 0 32697 return true 32698 } 32699 // match: (SETEQ (FlagGT_ULT)) 32700 // cond: 32701 // result: (MOVLconst [0]) 32702 for { 32703 v_0 := v.Args[0] 32704 if v_0.Op != OpAMD64FlagGT_ULT { 32705 break 32706 } 32707 v.reset(OpAMD64MOVLconst) 32708 v.AuxInt = 0 32709 return true 32710 } 32711 // match: (SETEQ (FlagGT_UGT)) 32712 // cond: 32713 // result: (MOVLconst [0]) 32714 for { 32715 v_0 := v.Args[0] 32716 if v_0.Op != OpAMD64FlagGT_UGT { 32717 break 32718 } 32719 v.reset(OpAMD64MOVLconst) 32720 v.AuxInt = 0 32721 return true 32722 } 32723 return false 32724 } 32725 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 32726 // match: (SETG (InvertFlags x)) 32727 // cond: 32728 // result: (SETL x) 32729 for { 32730 v_0 := v.Args[0] 32731 if v_0.Op != OpAMD64InvertFlags { 32732 break 32733 } 32734 x := v_0.Args[0] 32735 v.reset(OpAMD64SETL) 32736 v.AddArg(x) 32737 return true 32738 } 32739 // match: (SETG (FlagEQ)) 32740 // cond: 32741 // result: (MOVLconst [0]) 32742 for { 32743 v_0 := v.Args[0] 32744 if v_0.Op != OpAMD64FlagEQ { 32745 break 32746 } 32747 v.reset(OpAMD64MOVLconst) 32748 v.AuxInt = 0 32749 return true 32750 } 32751 // match: (SETG (FlagLT_ULT)) 32752 // cond: 32753 // result: (MOVLconst [0]) 32754 for { 32755 v_0 := v.Args[0] 32756 if v_0.Op != OpAMD64FlagLT_ULT { 32757 break 32758 } 32759 v.reset(OpAMD64MOVLconst) 32760 v.AuxInt = 0 32761 return true 32762 } 32763 // match: (SETG (FlagLT_UGT)) 32764 // cond: 32765 // result: (MOVLconst [0]) 32766 for { 32767 v_0 := v.Args[0] 32768 if v_0.Op != OpAMD64FlagLT_UGT { 32769 break 32770 } 32771 v.reset(OpAMD64MOVLconst) 32772 v.AuxInt = 0 32773 return true 32774 } 32775 // match: (SETG (FlagGT_ULT)) 32776 // cond: 32777 // result: (MOVLconst [1]) 32778 for { 32779 v_0 := v.Args[0] 32780 if v_0.Op != OpAMD64FlagGT_ULT { 32781 break 32782 } 32783 v.reset(OpAMD64MOVLconst) 32784 v.AuxInt = 1 32785 return true 32786 } 32787 // match: (SETG (FlagGT_UGT)) 32788 // cond: 32789 // result: (MOVLconst [1]) 32790 for { 32791 v_0 := v.Args[0] 32792 if v_0.Op != OpAMD64FlagGT_UGT { 32793 break 32794 } 32795 v.reset(OpAMD64MOVLconst) 32796 v.AuxInt = 1 32797 return true 32798 } 32799 return false 32800 } 32801 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 32802 // match: (SETGE (InvertFlags x)) 32803 // cond: 32804 // result: (SETLE x) 32805 for { 32806 v_0 := v.Args[0] 32807 if v_0.Op != OpAMD64InvertFlags { 32808 break 32809 } 32810 x := v_0.Args[0] 32811 v.reset(OpAMD64SETLE) 32812 v.AddArg(x) 32813 return true 32814 } 32815 // match: (SETGE (FlagEQ)) 32816 // cond: 32817 // result: (MOVLconst [1]) 32818 for { 32819 v_0 := v.Args[0] 32820 if v_0.Op != OpAMD64FlagEQ { 32821 break 32822 } 32823 v.reset(OpAMD64MOVLconst) 32824 v.AuxInt = 1 32825 return true 32826 } 32827 // match: (SETGE (FlagLT_ULT)) 32828 // cond: 32829 // result: (MOVLconst [0]) 32830 for { 32831 v_0 := v.Args[0] 32832 if v_0.Op != OpAMD64FlagLT_ULT { 32833 break 32834 } 32835 v.reset(OpAMD64MOVLconst) 32836 v.AuxInt = 0 32837 return true 32838 } 32839 // match: (SETGE (FlagLT_UGT)) 32840 // cond: 32841 // result: (MOVLconst [0]) 32842 for { 32843 v_0 := v.Args[0] 32844 if v_0.Op != OpAMD64FlagLT_UGT { 32845 break 32846 } 32847 v.reset(OpAMD64MOVLconst) 32848 v.AuxInt = 0 32849 return true 32850 } 32851 // match: (SETGE (FlagGT_ULT)) 32852 // cond: 32853 // result: (MOVLconst [1]) 32854 for { 32855 v_0 := v.Args[0] 32856 if v_0.Op != OpAMD64FlagGT_ULT { 32857 break 32858 } 32859 v.reset(OpAMD64MOVLconst) 32860 v.AuxInt = 1 32861 return true 32862 } 32863 // match: (SETGE (FlagGT_UGT)) 32864 // cond: 32865 // result: (MOVLconst [1]) 32866 for { 32867 v_0 := v.Args[0] 32868 if v_0.Op != OpAMD64FlagGT_UGT { 32869 break 32870 } 32871 v.reset(OpAMD64MOVLconst) 32872 v.AuxInt = 1 32873 return true 32874 } 32875 return false 32876 } 32877 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 32878 // match: (SETL (InvertFlags x)) 32879 // cond: 32880 // result: (SETG x) 32881 for { 32882 v_0 := v.Args[0] 32883 if v_0.Op != OpAMD64InvertFlags { 32884 break 32885 } 32886 x := v_0.Args[0] 32887 v.reset(OpAMD64SETG) 32888 v.AddArg(x) 32889 return true 32890 } 32891 // match: (SETL (FlagEQ)) 32892 // cond: 32893 // result: (MOVLconst [0]) 32894 for { 32895 v_0 := v.Args[0] 32896 if v_0.Op != OpAMD64FlagEQ { 32897 break 32898 } 32899 v.reset(OpAMD64MOVLconst) 32900 v.AuxInt = 0 32901 return true 32902 } 32903 // match: (SETL (FlagLT_ULT)) 32904 // cond: 32905 // result: (MOVLconst [1]) 32906 for { 32907 v_0 := v.Args[0] 32908 if v_0.Op != OpAMD64FlagLT_ULT { 32909 break 32910 } 32911 v.reset(OpAMD64MOVLconst) 32912 v.AuxInt = 1 32913 return true 32914 } 32915 // match: (SETL (FlagLT_UGT)) 32916 // cond: 32917 // result: (MOVLconst [1]) 32918 for { 32919 v_0 := v.Args[0] 32920 if v_0.Op != OpAMD64FlagLT_UGT { 32921 break 32922 } 32923 v.reset(OpAMD64MOVLconst) 32924 v.AuxInt = 1 32925 return true 32926 } 32927 // match: (SETL (FlagGT_ULT)) 32928 // cond: 32929 // result: (MOVLconst [0]) 32930 for { 32931 v_0 := v.Args[0] 32932 if v_0.Op != OpAMD64FlagGT_ULT { 32933 break 32934 } 32935 v.reset(OpAMD64MOVLconst) 32936 v.AuxInt = 0 32937 return true 32938 } 32939 // match: (SETL (FlagGT_UGT)) 32940 // cond: 32941 // result: (MOVLconst [0]) 32942 for { 32943 v_0 := v.Args[0] 32944 if v_0.Op != OpAMD64FlagGT_UGT { 32945 break 32946 } 32947 v.reset(OpAMD64MOVLconst) 32948 v.AuxInt = 0 32949 return true 32950 } 32951 return false 32952 } 32953 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 32954 // match: (SETLE (InvertFlags x)) 32955 // cond: 32956 // result: (SETGE x) 32957 for { 32958 v_0 := v.Args[0] 32959 if v_0.Op != OpAMD64InvertFlags { 32960 break 32961 } 32962 x := v_0.Args[0] 32963 v.reset(OpAMD64SETGE) 32964 v.AddArg(x) 32965 return true 32966 } 32967 // match: (SETLE (FlagEQ)) 32968 // cond: 32969 // result: (MOVLconst [1]) 32970 for { 32971 v_0 := v.Args[0] 32972 if v_0.Op != OpAMD64FlagEQ { 32973 break 32974 } 32975 v.reset(OpAMD64MOVLconst) 32976 v.AuxInt = 1 32977 return true 32978 } 32979 // match: (SETLE (FlagLT_ULT)) 32980 // cond: 32981 // result: (MOVLconst [1]) 32982 for { 32983 v_0 := v.Args[0] 32984 if v_0.Op != OpAMD64FlagLT_ULT { 32985 break 32986 } 32987 v.reset(OpAMD64MOVLconst) 32988 v.AuxInt = 1 32989 return true 32990 } 32991 // match: (SETLE (FlagLT_UGT)) 32992 // cond: 32993 // result: (MOVLconst [1]) 32994 for { 32995 v_0 := v.Args[0] 32996 if v_0.Op != OpAMD64FlagLT_UGT { 32997 break 32998 } 32999 v.reset(OpAMD64MOVLconst) 33000 v.AuxInt = 1 33001 return true 33002 } 33003 // match: (SETLE (FlagGT_ULT)) 33004 // cond: 33005 // result: (MOVLconst [0]) 33006 for { 33007 v_0 := v.Args[0] 33008 if v_0.Op != OpAMD64FlagGT_ULT { 33009 break 33010 } 33011 v.reset(OpAMD64MOVLconst) 33012 v.AuxInt = 0 33013 return true 33014 } 33015 // match: (SETLE (FlagGT_UGT)) 33016 // cond: 33017 // result: (MOVLconst [0]) 33018 for { 33019 v_0 := v.Args[0] 33020 if v_0.Op != OpAMD64FlagGT_UGT { 33021 break 33022 } 33023 v.reset(OpAMD64MOVLconst) 33024 v.AuxInt = 0 33025 return true 33026 } 33027 return false 33028 } 33029 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 33030 b := v.Block 33031 _ = b 33032 config := b.Func.Config 33033 _ = config 33034 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 33035 // cond: !config.nacl 33036 // result: (SETB (BTL x y)) 33037 for { 33038 v_0 := v.Args[0] 33039 if v_0.Op != OpAMD64TESTL { 33040 break 33041 } 33042 v_0_0 := v_0.Args[0] 33043 if v_0_0.Op != OpAMD64SHLL { 33044 break 33045 } 33046 v_0_0_0 := v_0_0.Args[0] 33047 if v_0_0_0.Op != OpAMD64MOVLconst { 33048 break 33049 } 33050 if v_0_0_0.AuxInt != 1 { 33051 break 33052 } 33053 x := v_0_0.Args[1] 33054 y := v_0.Args[1] 33055 if !(!config.nacl) { 33056 break 33057 } 33058 v.reset(OpAMD64SETB) 33059 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 33060 v0.AddArg(x) 33061 v0.AddArg(y) 33062 v.AddArg(v0) 33063 return true 33064 } 33065 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 33066 // cond: !config.nacl 33067 // result: (SETB (BTL x y)) 33068 for { 33069 v_0 := v.Args[0] 33070 if v_0.Op != OpAMD64TESTL { 33071 break 33072 } 33073 y := v_0.Args[0] 33074 v_0_1 := v_0.Args[1] 33075 if v_0_1.Op != OpAMD64SHLL { 33076 break 33077 } 33078 v_0_1_0 := v_0_1.Args[0] 33079 if v_0_1_0.Op != OpAMD64MOVLconst { 33080 break 33081 } 33082 if v_0_1_0.AuxInt != 1 { 33083 break 33084 } 33085 x := v_0_1.Args[1] 33086 if !(!config.nacl) { 33087 break 33088 } 33089 v.reset(OpAMD64SETB) 33090 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 33091 v0.AddArg(x) 33092 v0.AddArg(y) 33093 v.AddArg(v0) 33094 return true 33095 } 33096 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 33097 // cond: !config.nacl 33098 // result: (SETB (BTQ x y)) 33099 for { 33100 v_0 := v.Args[0] 33101 if v_0.Op != OpAMD64TESTQ { 33102 break 33103 } 33104 v_0_0 := v_0.Args[0] 33105 if v_0_0.Op != OpAMD64SHLQ { 33106 break 33107 } 33108 v_0_0_0 := v_0_0.Args[0] 33109 if v_0_0_0.Op != OpAMD64MOVQconst { 33110 break 33111 } 33112 if v_0_0_0.AuxInt != 1 { 33113 break 33114 } 33115 x := v_0_0.Args[1] 33116 y := v_0.Args[1] 33117 if !(!config.nacl) { 33118 break 33119 } 33120 v.reset(OpAMD64SETB) 33121 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 33122 v0.AddArg(x) 33123 v0.AddArg(y) 33124 v.AddArg(v0) 33125 return true 33126 } 33127 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 33128 // cond: !config.nacl 33129 // result: (SETB (BTQ x y)) 33130 for { 33131 v_0 := v.Args[0] 33132 if v_0.Op != OpAMD64TESTQ { 33133 break 33134 } 33135 y := v_0.Args[0] 33136 v_0_1 := v_0.Args[1] 33137 if v_0_1.Op != OpAMD64SHLQ { 33138 break 33139 } 33140 v_0_1_0 := v_0_1.Args[0] 33141 if v_0_1_0.Op != OpAMD64MOVQconst { 33142 break 33143 } 33144 if v_0_1_0.AuxInt != 1 { 33145 break 33146 } 33147 x := v_0_1.Args[1] 33148 if !(!config.nacl) { 33149 break 33150 } 33151 v.reset(OpAMD64SETB) 33152 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 33153 v0.AddArg(x) 33154 v0.AddArg(y) 33155 v.AddArg(v0) 33156 return true 33157 } 33158 // match: (SETNE (TESTLconst [c] x)) 33159 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 33160 // result: (SETB (BTLconst [log2(c)] x)) 33161 for { 33162 v_0 := v.Args[0] 33163 if v_0.Op != OpAMD64TESTLconst { 33164 break 33165 } 33166 c := v_0.AuxInt 33167 x := v_0.Args[0] 33168 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 33169 break 33170 } 33171 v.reset(OpAMD64SETB) 33172 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags) 33173 v0.AuxInt = log2(c) 33174 v0.AddArg(x) 33175 v.AddArg(v0) 33176 return true 33177 } 33178 // match: (SETNE (TESTQconst [c] x)) 33179 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 33180 // result: (SETB (BTQconst [log2(c)] x)) 33181 for { 33182 v_0 := v.Args[0] 33183 if v_0.Op != OpAMD64TESTQconst { 33184 break 33185 } 33186 c := v_0.AuxInt 33187 x := v_0.Args[0] 33188 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 33189 break 33190 } 33191 v.reset(OpAMD64SETB) 33192 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 33193 v0.AuxInt = log2(c) 33194 v0.AddArg(x) 33195 v.AddArg(v0) 33196 return true 33197 } 33198 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 33199 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 33200 // result: (SETB (BTQconst [log2(c)] x)) 33201 for { 33202 v_0 := v.Args[0] 33203 if v_0.Op != OpAMD64TESTQ { 33204 break 33205 } 33206 v_0_0 := v_0.Args[0] 33207 if v_0_0.Op != OpAMD64MOVQconst { 33208 break 33209 } 33210 c := v_0_0.AuxInt 33211 x := v_0.Args[1] 33212 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 33213 break 33214 } 33215 v.reset(OpAMD64SETB) 33216 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 33217 v0.AuxInt = log2(c) 33218 v0.AddArg(x) 33219 v.AddArg(v0) 33220 return true 33221 } 33222 // match: (SETNE (TESTQ x (MOVQconst [c]))) 33223 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 33224 // result: (SETB (BTQconst [log2(c)] x)) 33225 for { 33226 v_0 := v.Args[0] 33227 if v_0.Op != OpAMD64TESTQ { 33228 break 33229 } 33230 x := v_0.Args[0] 33231 v_0_1 := v_0.Args[1] 33232 if v_0_1.Op != OpAMD64MOVQconst { 33233 break 33234 } 33235 c := v_0_1.AuxInt 33236 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 33237 break 33238 } 33239 v.reset(OpAMD64SETB) 33240 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 33241 v0.AuxInt = log2(c) 33242 v0.AddArg(x) 33243 v.AddArg(v0) 33244 return true 33245 } 33246 // match: (SETNE (InvertFlags x)) 33247 // cond: 33248 // result: (SETNE x) 33249 for { 33250 v_0 := v.Args[0] 33251 if v_0.Op != OpAMD64InvertFlags { 33252 break 33253 } 33254 x := v_0.Args[0] 33255 v.reset(OpAMD64SETNE) 33256 v.AddArg(x) 33257 return true 33258 } 33259 // match: (SETNE (FlagEQ)) 33260 // cond: 33261 // result: (MOVLconst [0]) 33262 for { 33263 v_0 := v.Args[0] 33264 if v_0.Op != OpAMD64FlagEQ { 33265 break 33266 } 33267 v.reset(OpAMD64MOVLconst) 33268 v.AuxInt = 0 33269 return true 33270 } 33271 return false 33272 } 33273 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 33274 // match: (SETNE (FlagLT_ULT)) 33275 // cond: 33276 // result: (MOVLconst [1]) 33277 for { 33278 v_0 := v.Args[0] 33279 if v_0.Op != OpAMD64FlagLT_ULT { 33280 break 33281 } 33282 v.reset(OpAMD64MOVLconst) 33283 v.AuxInt = 1 33284 return true 33285 } 33286 // match: (SETNE (FlagLT_UGT)) 33287 // cond: 33288 // result: (MOVLconst [1]) 33289 for { 33290 v_0 := v.Args[0] 33291 if v_0.Op != OpAMD64FlagLT_UGT { 33292 break 33293 } 33294 v.reset(OpAMD64MOVLconst) 33295 v.AuxInt = 1 33296 return true 33297 } 33298 // match: (SETNE (FlagGT_ULT)) 33299 // cond: 33300 // result: (MOVLconst [1]) 33301 for { 33302 v_0 := v.Args[0] 33303 if v_0.Op != OpAMD64FlagGT_ULT { 33304 break 33305 } 33306 v.reset(OpAMD64MOVLconst) 33307 v.AuxInt = 1 33308 return true 33309 } 33310 // match: (SETNE (FlagGT_UGT)) 33311 // cond: 33312 // result: (MOVLconst [1]) 33313 for { 33314 v_0 := v.Args[0] 33315 if v_0.Op != OpAMD64FlagGT_UGT { 33316 break 33317 } 33318 v.reset(OpAMD64MOVLconst) 33319 v.AuxInt = 1 33320 return true 33321 } 33322 return false 33323 } 33324 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 33325 b := v.Block 33326 _ = b 33327 // match: (SHLL x (MOVQconst [c])) 33328 // cond: 33329 // result: (SHLLconst [c&31] x) 33330 for { 33331 x := v.Args[0] 33332 v_1 := v.Args[1] 33333 if v_1.Op != OpAMD64MOVQconst { 33334 break 33335 } 33336 c := v_1.AuxInt 33337 v.reset(OpAMD64SHLLconst) 33338 v.AuxInt = c & 31 33339 v.AddArg(x) 33340 return true 33341 } 33342 // match: (SHLL x (MOVLconst [c])) 33343 // cond: 33344 // result: (SHLLconst [c&31] x) 33345 for { 33346 x := v.Args[0] 33347 v_1 := v.Args[1] 33348 if v_1.Op != OpAMD64MOVLconst { 33349 break 33350 } 33351 c := v_1.AuxInt 33352 v.reset(OpAMD64SHLLconst) 33353 v.AuxInt = c & 31 33354 v.AddArg(x) 33355 return true 33356 } 33357 // match: (SHLL x (ADDQconst [c] y)) 33358 // cond: c & 31 == 0 33359 // result: (SHLL x y) 33360 for { 33361 x := v.Args[0] 33362 v_1 := v.Args[1] 33363 if v_1.Op != OpAMD64ADDQconst { 33364 break 33365 } 33366 c := v_1.AuxInt 33367 y := v_1.Args[0] 33368 if !(c&31 == 0) { 33369 break 33370 } 33371 v.reset(OpAMD64SHLL) 33372 v.AddArg(x) 33373 v.AddArg(y) 33374 return true 33375 } 33376 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 33377 // cond: c & 31 == 0 33378 // result: (SHLL x (NEGQ <t> y)) 33379 for { 33380 x := v.Args[0] 33381 v_1 := v.Args[1] 33382 if v_1.Op != OpAMD64NEGQ { 33383 break 33384 } 33385 t := v_1.Type 33386 v_1_0 := v_1.Args[0] 33387 if v_1_0.Op != OpAMD64ADDQconst { 33388 break 33389 } 33390 c := v_1_0.AuxInt 33391 y := v_1_0.Args[0] 33392 if !(c&31 == 0) { 33393 break 33394 } 33395 v.reset(OpAMD64SHLL) 33396 v.AddArg(x) 33397 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33398 v0.AddArg(y) 33399 v.AddArg(v0) 33400 return true 33401 } 33402 // match: (SHLL x (ANDQconst [c] y)) 33403 // cond: c & 31 == 31 33404 // result: (SHLL x y) 33405 for { 33406 x := v.Args[0] 33407 v_1 := v.Args[1] 33408 if v_1.Op != OpAMD64ANDQconst { 33409 break 33410 } 33411 c := v_1.AuxInt 33412 y := v_1.Args[0] 33413 if !(c&31 == 31) { 33414 break 33415 } 33416 v.reset(OpAMD64SHLL) 33417 v.AddArg(x) 33418 v.AddArg(y) 33419 return true 33420 } 33421 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 33422 // cond: c & 31 == 31 33423 // result: (SHLL x (NEGQ <t> y)) 33424 for { 33425 x := v.Args[0] 33426 v_1 := v.Args[1] 33427 if v_1.Op != OpAMD64NEGQ { 33428 break 33429 } 33430 t := v_1.Type 33431 v_1_0 := v_1.Args[0] 33432 if v_1_0.Op != OpAMD64ANDQconst { 33433 break 33434 } 33435 c := v_1_0.AuxInt 33436 y := v_1_0.Args[0] 33437 if !(c&31 == 31) { 33438 break 33439 } 33440 v.reset(OpAMD64SHLL) 33441 v.AddArg(x) 33442 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33443 v0.AddArg(y) 33444 v.AddArg(v0) 33445 return true 33446 } 33447 // match: (SHLL x (ADDLconst [c] y)) 33448 // cond: c & 31 == 0 33449 // result: (SHLL x y) 33450 for { 33451 x := v.Args[0] 33452 v_1 := v.Args[1] 33453 if v_1.Op != OpAMD64ADDLconst { 33454 break 33455 } 33456 c := v_1.AuxInt 33457 y := v_1.Args[0] 33458 if !(c&31 == 0) { 33459 break 33460 } 33461 v.reset(OpAMD64SHLL) 33462 v.AddArg(x) 33463 v.AddArg(y) 33464 return true 33465 } 33466 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 33467 // cond: c & 31 == 0 33468 // result: (SHLL x (NEGL <t> y)) 33469 for { 33470 x := v.Args[0] 33471 v_1 := v.Args[1] 33472 if v_1.Op != OpAMD64NEGL { 33473 break 33474 } 33475 t := v_1.Type 33476 v_1_0 := v_1.Args[0] 33477 if v_1_0.Op != OpAMD64ADDLconst { 33478 break 33479 } 33480 c := v_1_0.AuxInt 33481 y := v_1_0.Args[0] 33482 if !(c&31 == 0) { 33483 break 33484 } 33485 v.reset(OpAMD64SHLL) 33486 v.AddArg(x) 33487 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33488 v0.AddArg(y) 33489 v.AddArg(v0) 33490 return true 33491 } 33492 // match: (SHLL x (ANDLconst [c] y)) 33493 // cond: c & 31 == 31 33494 // result: (SHLL x y) 33495 for { 33496 x := v.Args[0] 33497 v_1 := v.Args[1] 33498 if v_1.Op != OpAMD64ANDLconst { 33499 break 33500 } 33501 c := v_1.AuxInt 33502 y := v_1.Args[0] 33503 if !(c&31 == 31) { 33504 break 33505 } 33506 v.reset(OpAMD64SHLL) 33507 v.AddArg(x) 33508 v.AddArg(y) 33509 return true 33510 } 33511 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 33512 // cond: c & 31 == 31 33513 // result: (SHLL x (NEGL <t> y)) 33514 for { 33515 x := v.Args[0] 33516 v_1 := v.Args[1] 33517 if v_1.Op != OpAMD64NEGL { 33518 break 33519 } 33520 t := v_1.Type 33521 v_1_0 := v_1.Args[0] 33522 if v_1_0.Op != OpAMD64ANDLconst { 33523 break 33524 } 33525 c := v_1_0.AuxInt 33526 y := v_1_0.Args[0] 33527 if !(c&31 == 31) { 33528 break 33529 } 33530 v.reset(OpAMD64SHLL) 33531 v.AddArg(x) 33532 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33533 v0.AddArg(y) 33534 v.AddArg(v0) 33535 return true 33536 } 33537 return false 33538 } 33539 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 33540 // match: (SHLLconst x [0]) 33541 // cond: 33542 // result: x 33543 for { 33544 if v.AuxInt != 0 { 33545 break 33546 } 33547 x := v.Args[0] 33548 v.reset(OpCopy) 33549 v.Type = x.Type 33550 v.AddArg(x) 33551 return true 33552 } 33553 return false 33554 } 33555 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 33556 b := v.Block 33557 _ = b 33558 // match: (SHLQ x (MOVQconst [c])) 33559 // cond: 33560 // result: (SHLQconst [c&63] x) 33561 for { 33562 x := v.Args[0] 33563 v_1 := v.Args[1] 33564 if v_1.Op != OpAMD64MOVQconst { 33565 break 33566 } 33567 c := v_1.AuxInt 33568 v.reset(OpAMD64SHLQconst) 33569 v.AuxInt = c & 63 33570 v.AddArg(x) 33571 return true 33572 } 33573 // match: (SHLQ x (MOVLconst [c])) 33574 // cond: 33575 // result: (SHLQconst [c&63] x) 33576 for { 33577 x := v.Args[0] 33578 v_1 := v.Args[1] 33579 if v_1.Op != OpAMD64MOVLconst { 33580 break 33581 } 33582 c := v_1.AuxInt 33583 v.reset(OpAMD64SHLQconst) 33584 v.AuxInt = c & 63 33585 v.AddArg(x) 33586 return true 33587 } 33588 // match: (SHLQ x (ADDQconst [c] y)) 33589 // cond: c & 63 == 0 33590 // result: (SHLQ x y) 33591 for { 33592 x := v.Args[0] 33593 v_1 := v.Args[1] 33594 if v_1.Op != OpAMD64ADDQconst { 33595 break 33596 } 33597 c := v_1.AuxInt 33598 y := v_1.Args[0] 33599 if !(c&63 == 0) { 33600 break 33601 } 33602 v.reset(OpAMD64SHLQ) 33603 v.AddArg(x) 33604 v.AddArg(y) 33605 return true 33606 } 33607 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 33608 // cond: c & 63 == 0 33609 // result: (SHLQ x (NEGQ <t> y)) 33610 for { 33611 x := v.Args[0] 33612 v_1 := v.Args[1] 33613 if v_1.Op != OpAMD64NEGQ { 33614 break 33615 } 33616 t := v_1.Type 33617 v_1_0 := v_1.Args[0] 33618 if v_1_0.Op != OpAMD64ADDQconst { 33619 break 33620 } 33621 c := v_1_0.AuxInt 33622 y := v_1_0.Args[0] 33623 if !(c&63 == 0) { 33624 break 33625 } 33626 v.reset(OpAMD64SHLQ) 33627 v.AddArg(x) 33628 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33629 v0.AddArg(y) 33630 v.AddArg(v0) 33631 return true 33632 } 33633 // match: (SHLQ x (ANDQconst [c] y)) 33634 // cond: c & 63 == 63 33635 // result: (SHLQ x y) 33636 for { 33637 x := v.Args[0] 33638 v_1 := v.Args[1] 33639 if v_1.Op != OpAMD64ANDQconst { 33640 break 33641 } 33642 c := v_1.AuxInt 33643 y := v_1.Args[0] 33644 if !(c&63 == 63) { 33645 break 33646 } 33647 v.reset(OpAMD64SHLQ) 33648 v.AddArg(x) 33649 v.AddArg(y) 33650 return true 33651 } 33652 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 33653 // cond: c & 63 == 63 33654 // result: (SHLQ x (NEGQ <t> y)) 33655 for { 33656 x := v.Args[0] 33657 v_1 := v.Args[1] 33658 if v_1.Op != OpAMD64NEGQ { 33659 break 33660 } 33661 t := v_1.Type 33662 v_1_0 := v_1.Args[0] 33663 if v_1_0.Op != OpAMD64ANDQconst { 33664 break 33665 } 33666 c := v_1_0.AuxInt 33667 y := v_1_0.Args[0] 33668 if !(c&63 == 63) { 33669 break 33670 } 33671 v.reset(OpAMD64SHLQ) 33672 v.AddArg(x) 33673 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33674 v0.AddArg(y) 33675 v.AddArg(v0) 33676 return true 33677 } 33678 // match: (SHLQ x (ADDLconst [c] y)) 33679 // cond: c & 63 == 0 33680 // result: (SHLQ x y) 33681 for { 33682 x := v.Args[0] 33683 v_1 := v.Args[1] 33684 if v_1.Op != OpAMD64ADDLconst { 33685 break 33686 } 33687 c := v_1.AuxInt 33688 y := v_1.Args[0] 33689 if !(c&63 == 0) { 33690 break 33691 } 33692 v.reset(OpAMD64SHLQ) 33693 v.AddArg(x) 33694 v.AddArg(y) 33695 return true 33696 } 33697 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 33698 // cond: c & 63 == 0 33699 // result: (SHLQ x (NEGL <t> y)) 33700 for { 33701 x := v.Args[0] 33702 v_1 := v.Args[1] 33703 if v_1.Op != OpAMD64NEGL { 33704 break 33705 } 33706 t := v_1.Type 33707 v_1_0 := v_1.Args[0] 33708 if v_1_0.Op != OpAMD64ADDLconst { 33709 break 33710 } 33711 c := v_1_0.AuxInt 33712 y := v_1_0.Args[0] 33713 if !(c&63 == 0) { 33714 break 33715 } 33716 v.reset(OpAMD64SHLQ) 33717 v.AddArg(x) 33718 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33719 v0.AddArg(y) 33720 v.AddArg(v0) 33721 return true 33722 } 33723 // match: (SHLQ x (ANDLconst [c] y)) 33724 // cond: c & 63 == 63 33725 // result: (SHLQ x y) 33726 for { 33727 x := v.Args[0] 33728 v_1 := v.Args[1] 33729 if v_1.Op != OpAMD64ANDLconst { 33730 break 33731 } 33732 c := v_1.AuxInt 33733 y := v_1.Args[0] 33734 if !(c&63 == 63) { 33735 break 33736 } 33737 v.reset(OpAMD64SHLQ) 33738 v.AddArg(x) 33739 v.AddArg(y) 33740 return true 33741 } 33742 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 33743 // cond: c & 63 == 63 33744 // result: (SHLQ x (NEGL <t> y)) 33745 for { 33746 x := v.Args[0] 33747 v_1 := v.Args[1] 33748 if v_1.Op != OpAMD64NEGL { 33749 break 33750 } 33751 t := v_1.Type 33752 v_1_0 := v_1.Args[0] 33753 if v_1_0.Op != OpAMD64ANDLconst { 33754 break 33755 } 33756 c := v_1_0.AuxInt 33757 y := v_1_0.Args[0] 33758 if !(c&63 == 63) { 33759 break 33760 } 33761 v.reset(OpAMD64SHLQ) 33762 v.AddArg(x) 33763 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33764 v0.AddArg(y) 33765 v.AddArg(v0) 33766 return true 33767 } 33768 return false 33769 } 33770 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 33771 // match: (SHLQconst x [0]) 33772 // cond: 33773 // result: x 33774 for { 33775 if v.AuxInt != 0 { 33776 break 33777 } 33778 x := v.Args[0] 33779 v.reset(OpCopy) 33780 v.Type = x.Type 33781 v.AddArg(x) 33782 return true 33783 } 33784 return false 33785 } 33786 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 33787 // match: (SHRB x (MOVQconst [c])) 33788 // cond: c&31 < 8 33789 // result: (SHRBconst [c&31] x) 33790 for { 33791 x := v.Args[0] 33792 v_1 := v.Args[1] 33793 if v_1.Op != OpAMD64MOVQconst { 33794 break 33795 } 33796 c := v_1.AuxInt 33797 if !(c&31 < 8) { 33798 break 33799 } 33800 v.reset(OpAMD64SHRBconst) 33801 v.AuxInt = c & 31 33802 v.AddArg(x) 33803 return true 33804 } 33805 // match: (SHRB x (MOVLconst [c])) 33806 // cond: c&31 < 8 33807 // result: (SHRBconst [c&31] x) 33808 for { 33809 x := v.Args[0] 33810 v_1 := v.Args[1] 33811 if v_1.Op != OpAMD64MOVLconst { 33812 break 33813 } 33814 c := v_1.AuxInt 33815 if !(c&31 < 8) { 33816 break 33817 } 33818 v.reset(OpAMD64SHRBconst) 33819 v.AuxInt = c & 31 33820 v.AddArg(x) 33821 return true 33822 } 33823 // match: (SHRB _ (MOVQconst [c])) 33824 // cond: c&31 >= 8 33825 // result: (MOVLconst [0]) 33826 for { 33827 v_1 := v.Args[1] 33828 if v_1.Op != OpAMD64MOVQconst { 33829 break 33830 } 33831 c := v_1.AuxInt 33832 if !(c&31 >= 8) { 33833 break 33834 } 33835 v.reset(OpAMD64MOVLconst) 33836 v.AuxInt = 0 33837 return true 33838 } 33839 // match: (SHRB _ (MOVLconst [c])) 33840 // cond: c&31 >= 8 33841 // result: (MOVLconst [0]) 33842 for { 33843 v_1 := v.Args[1] 33844 if v_1.Op != OpAMD64MOVLconst { 33845 break 33846 } 33847 c := v_1.AuxInt 33848 if !(c&31 >= 8) { 33849 break 33850 } 33851 v.reset(OpAMD64MOVLconst) 33852 v.AuxInt = 0 33853 return true 33854 } 33855 return false 33856 } 33857 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 33858 // match: (SHRBconst x [0]) 33859 // cond: 33860 // result: x 33861 for { 33862 if v.AuxInt != 0 { 33863 break 33864 } 33865 x := v.Args[0] 33866 v.reset(OpCopy) 33867 v.Type = x.Type 33868 v.AddArg(x) 33869 return true 33870 } 33871 return false 33872 } 33873 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 33874 b := v.Block 33875 _ = b 33876 // match: (SHRL x (MOVQconst [c])) 33877 // cond: 33878 // result: (SHRLconst [c&31] x) 33879 for { 33880 x := v.Args[0] 33881 v_1 := v.Args[1] 33882 if v_1.Op != OpAMD64MOVQconst { 33883 break 33884 } 33885 c := v_1.AuxInt 33886 v.reset(OpAMD64SHRLconst) 33887 v.AuxInt = c & 31 33888 v.AddArg(x) 33889 return true 33890 } 33891 // match: (SHRL x (MOVLconst [c])) 33892 // cond: 33893 // result: (SHRLconst [c&31] x) 33894 for { 33895 x := v.Args[0] 33896 v_1 := v.Args[1] 33897 if v_1.Op != OpAMD64MOVLconst { 33898 break 33899 } 33900 c := v_1.AuxInt 33901 v.reset(OpAMD64SHRLconst) 33902 v.AuxInt = c & 31 33903 v.AddArg(x) 33904 return true 33905 } 33906 // match: (SHRL x (ADDQconst [c] y)) 33907 // cond: c & 31 == 0 33908 // result: (SHRL x y) 33909 for { 33910 x := v.Args[0] 33911 v_1 := v.Args[1] 33912 if v_1.Op != OpAMD64ADDQconst { 33913 break 33914 } 33915 c := v_1.AuxInt 33916 y := v_1.Args[0] 33917 if !(c&31 == 0) { 33918 break 33919 } 33920 v.reset(OpAMD64SHRL) 33921 v.AddArg(x) 33922 v.AddArg(y) 33923 return true 33924 } 33925 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 33926 // cond: c & 31 == 0 33927 // result: (SHRL x (NEGQ <t> y)) 33928 for { 33929 x := v.Args[0] 33930 v_1 := v.Args[1] 33931 if v_1.Op != OpAMD64NEGQ { 33932 break 33933 } 33934 t := v_1.Type 33935 v_1_0 := v_1.Args[0] 33936 if v_1_0.Op != OpAMD64ADDQconst { 33937 break 33938 } 33939 c := v_1_0.AuxInt 33940 y := v_1_0.Args[0] 33941 if !(c&31 == 0) { 33942 break 33943 } 33944 v.reset(OpAMD64SHRL) 33945 v.AddArg(x) 33946 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33947 v0.AddArg(y) 33948 v.AddArg(v0) 33949 return true 33950 } 33951 // match: (SHRL x (ANDQconst [c] y)) 33952 // cond: c & 31 == 31 33953 // result: (SHRL x y) 33954 for { 33955 x := v.Args[0] 33956 v_1 := v.Args[1] 33957 if v_1.Op != OpAMD64ANDQconst { 33958 break 33959 } 33960 c := v_1.AuxInt 33961 y := v_1.Args[0] 33962 if !(c&31 == 31) { 33963 break 33964 } 33965 v.reset(OpAMD64SHRL) 33966 v.AddArg(x) 33967 v.AddArg(y) 33968 return true 33969 } 33970 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 33971 // cond: c & 31 == 31 33972 // result: (SHRL x (NEGQ <t> y)) 33973 for { 33974 x := v.Args[0] 33975 v_1 := v.Args[1] 33976 if v_1.Op != OpAMD64NEGQ { 33977 break 33978 } 33979 t := v_1.Type 33980 v_1_0 := v_1.Args[0] 33981 if v_1_0.Op != OpAMD64ANDQconst { 33982 break 33983 } 33984 c := v_1_0.AuxInt 33985 y := v_1_0.Args[0] 33986 if !(c&31 == 31) { 33987 break 33988 } 33989 v.reset(OpAMD64SHRL) 33990 v.AddArg(x) 33991 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33992 v0.AddArg(y) 33993 v.AddArg(v0) 33994 return true 33995 } 33996 // match: (SHRL x (ADDLconst [c] y)) 33997 // cond: c & 31 == 0 33998 // result: (SHRL x y) 33999 for { 34000 x := v.Args[0] 34001 v_1 := v.Args[1] 34002 if v_1.Op != OpAMD64ADDLconst { 34003 break 34004 } 34005 c := v_1.AuxInt 34006 y := v_1.Args[0] 34007 if !(c&31 == 0) { 34008 break 34009 } 34010 v.reset(OpAMD64SHRL) 34011 v.AddArg(x) 34012 v.AddArg(y) 34013 return true 34014 } 34015 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 34016 // cond: c & 31 == 0 34017 // result: (SHRL x (NEGL <t> y)) 34018 for { 34019 x := v.Args[0] 34020 v_1 := v.Args[1] 34021 if v_1.Op != OpAMD64NEGL { 34022 break 34023 } 34024 t := v_1.Type 34025 v_1_0 := v_1.Args[0] 34026 if v_1_0.Op != OpAMD64ADDLconst { 34027 break 34028 } 34029 c := v_1_0.AuxInt 34030 y := v_1_0.Args[0] 34031 if !(c&31 == 0) { 34032 break 34033 } 34034 v.reset(OpAMD64SHRL) 34035 v.AddArg(x) 34036 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 34037 v0.AddArg(y) 34038 v.AddArg(v0) 34039 return true 34040 } 34041 // match: (SHRL x (ANDLconst [c] y)) 34042 // cond: c & 31 == 31 34043 // result: (SHRL x y) 34044 for { 34045 x := v.Args[0] 34046 v_1 := v.Args[1] 34047 if v_1.Op != OpAMD64ANDLconst { 34048 break 34049 } 34050 c := v_1.AuxInt 34051 y := v_1.Args[0] 34052 if !(c&31 == 31) { 34053 break 34054 } 34055 v.reset(OpAMD64SHRL) 34056 v.AddArg(x) 34057 v.AddArg(y) 34058 return true 34059 } 34060 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 34061 // cond: c & 31 == 31 34062 // result: (SHRL x (NEGL <t> y)) 34063 for { 34064 x := v.Args[0] 34065 v_1 := v.Args[1] 34066 if v_1.Op != OpAMD64NEGL { 34067 break 34068 } 34069 t := v_1.Type 34070 v_1_0 := v_1.Args[0] 34071 if v_1_0.Op != OpAMD64ANDLconst { 34072 break 34073 } 34074 c := v_1_0.AuxInt 34075 y := v_1_0.Args[0] 34076 if !(c&31 == 31) { 34077 break 34078 } 34079 v.reset(OpAMD64SHRL) 34080 v.AddArg(x) 34081 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 34082 v0.AddArg(y) 34083 v.AddArg(v0) 34084 return true 34085 } 34086 return false 34087 } 34088 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 34089 // match: (SHRLconst x [0]) 34090 // cond: 34091 // result: x 34092 for { 34093 if v.AuxInt != 0 { 34094 break 34095 } 34096 x := v.Args[0] 34097 v.reset(OpCopy) 34098 v.Type = x.Type 34099 v.AddArg(x) 34100 return true 34101 } 34102 return false 34103 } 34104 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 34105 b := v.Block 34106 _ = b 34107 // match: (SHRQ x (MOVQconst [c])) 34108 // cond: 34109 // result: (SHRQconst [c&63] x) 34110 for { 34111 x := v.Args[0] 34112 v_1 := v.Args[1] 34113 if v_1.Op != OpAMD64MOVQconst { 34114 break 34115 } 34116 c := v_1.AuxInt 34117 v.reset(OpAMD64SHRQconst) 34118 v.AuxInt = c & 63 34119 v.AddArg(x) 34120 return true 34121 } 34122 // match: (SHRQ x (MOVLconst [c])) 34123 // cond: 34124 // result: (SHRQconst [c&63] x) 34125 for { 34126 x := v.Args[0] 34127 v_1 := v.Args[1] 34128 if v_1.Op != OpAMD64MOVLconst { 34129 break 34130 } 34131 c := v_1.AuxInt 34132 v.reset(OpAMD64SHRQconst) 34133 v.AuxInt = c & 63 34134 v.AddArg(x) 34135 return true 34136 } 34137 // match: (SHRQ x (ADDQconst [c] y)) 34138 // cond: c & 63 == 0 34139 // result: (SHRQ x y) 34140 for { 34141 x := v.Args[0] 34142 v_1 := v.Args[1] 34143 if v_1.Op != OpAMD64ADDQconst { 34144 break 34145 } 34146 c := v_1.AuxInt 34147 y := v_1.Args[0] 34148 if !(c&63 == 0) { 34149 break 34150 } 34151 v.reset(OpAMD64SHRQ) 34152 v.AddArg(x) 34153 v.AddArg(y) 34154 return true 34155 } 34156 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 34157 // cond: c & 63 == 0 34158 // result: (SHRQ x (NEGQ <t> y)) 34159 for { 34160 x := v.Args[0] 34161 v_1 := v.Args[1] 34162 if v_1.Op != OpAMD64NEGQ { 34163 break 34164 } 34165 t := v_1.Type 34166 v_1_0 := v_1.Args[0] 34167 if v_1_0.Op != OpAMD64ADDQconst { 34168 break 34169 } 34170 c := v_1_0.AuxInt 34171 y := v_1_0.Args[0] 34172 if !(c&63 == 0) { 34173 break 34174 } 34175 v.reset(OpAMD64SHRQ) 34176 v.AddArg(x) 34177 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 34178 v0.AddArg(y) 34179 v.AddArg(v0) 34180 return true 34181 } 34182 // match: (SHRQ x (ANDQconst [c] y)) 34183 // cond: c & 63 == 63 34184 // result: (SHRQ x y) 34185 for { 34186 x := v.Args[0] 34187 v_1 := v.Args[1] 34188 if v_1.Op != OpAMD64ANDQconst { 34189 break 34190 } 34191 c := v_1.AuxInt 34192 y := v_1.Args[0] 34193 if !(c&63 == 63) { 34194 break 34195 } 34196 v.reset(OpAMD64SHRQ) 34197 v.AddArg(x) 34198 v.AddArg(y) 34199 return true 34200 } 34201 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 34202 // cond: c & 63 == 63 34203 // result: (SHRQ x (NEGQ <t> y)) 34204 for { 34205 x := v.Args[0] 34206 v_1 := v.Args[1] 34207 if v_1.Op != OpAMD64NEGQ { 34208 break 34209 } 34210 t := v_1.Type 34211 v_1_0 := v_1.Args[0] 34212 if v_1_0.Op != OpAMD64ANDQconst { 34213 break 34214 } 34215 c := v_1_0.AuxInt 34216 y := v_1_0.Args[0] 34217 if !(c&63 == 63) { 34218 break 34219 } 34220 v.reset(OpAMD64SHRQ) 34221 v.AddArg(x) 34222 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 34223 v0.AddArg(y) 34224 v.AddArg(v0) 34225 return true 34226 } 34227 // match: (SHRQ x (ADDLconst [c] y)) 34228 // cond: c & 63 == 0 34229 // result: (SHRQ x y) 34230 for { 34231 x := v.Args[0] 34232 v_1 := v.Args[1] 34233 if v_1.Op != OpAMD64ADDLconst { 34234 break 34235 } 34236 c := v_1.AuxInt 34237 y := v_1.Args[0] 34238 if !(c&63 == 0) { 34239 break 34240 } 34241 v.reset(OpAMD64SHRQ) 34242 v.AddArg(x) 34243 v.AddArg(y) 34244 return true 34245 } 34246 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 34247 // cond: c & 63 == 0 34248 // result: (SHRQ x (NEGL <t> y)) 34249 for { 34250 x := v.Args[0] 34251 v_1 := v.Args[1] 34252 if v_1.Op != OpAMD64NEGL { 34253 break 34254 } 34255 t := v_1.Type 34256 v_1_0 := v_1.Args[0] 34257 if v_1_0.Op != OpAMD64ADDLconst { 34258 break 34259 } 34260 c := v_1_0.AuxInt 34261 y := v_1_0.Args[0] 34262 if !(c&63 == 0) { 34263 break 34264 } 34265 v.reset(OpAMD64SHRQ) 34266 v.AddArg(x) 34267 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 34268 v0.AddArg(y) 34269 v.AddArg(v0) 34270 return true 34271 } 34272 // match: (SHRQ x (ANDLconst [c] y)) 34273 // cond: c & 63 == 63 34274 // result: (SHRQ x y) 34275 for { 34276 x := v.Args[0] 34277 v_1 := v.Args[1] 34278 if v_1.Op != OpAMD64ANDLconst { 34279 break 34280 } 34281 c := v_1.AuxInt 34282 y := v_1.Args[0] 34283 if !(c&63 == 63) { 34284 break 34285 } 34286 v.reset(OpAMD64SHRQ) 34287 v.AddArg(x) 34288 v.AddArg(y) 34289 return true 34290 } 34291 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 34292 // cond: c & 63 == 63 34293 // result: (SHRQ x (NEGL <t> y)) 34294 for { 34295 x := v.Args[0] 34296 v_1 := v.Args[1] 34297 if v_1.Op != OpAMD64NEGL { 34298 break 34299 } 34300 t := v_1.Type 34301 v_1_0 := v_1.Args[0] 34302 if v_1_0.Op != OpAMD64ANDLconst { 34303 break 34304 } 34305 c := v_1_0.AuxInt 34306 y := v_1_0.Args[0] 34307 if !(c&63 == 63) { 34308 break 34309 } 34310 v.reset(OpAMD64SHRQ) 34311 v.AddArg(x) 34312 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 34313 v0.AddArg(y) 34314 v.AddArg(v0) 34315 return true 34316 } 34317 return false 34318 } 34319 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 34320 // match: (SHRQconst x [0]) 34321 // cond: 34322 // result: x 34323 for { 34324 if v.AuxInt != 0 { 34325 break 34326 } 34327 x := v.Args[0] 34328 v.reset(OpCopy) 34329 v.Type = x.Type 34330 v.AddArg(x) 34331 return true 34332 } 34333 return false 34334 } 34335 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 34336 // match: (SHRW x (MOVQconst [c])) 34337 // cond: c&31 < 16 34338 // result: (SHRWconst [c&31] x) 34339 for { 34340 x := v.Args[0] 34341 v_1 := v.Args[1] 34342 if v_1.Op != OpAMD64MOVQconst { 34343 break 34344 } 34345 c := v_1.AuxInt 34346 if !(c&31 < 16) { 34347 break 34348 } 34349 v.reset(OpAMD64SHRWconst) 34350 v.AuxInt = c & 31 34351 v.AddArg(x) 34352 return true 34353 } 34354 // match: (SHRW x (MOVLconst [c])) 34355 // cond: c&31 < 16 34356 // result: (SHRWconst [c&31] x) 34357 for { 34358 x := v.Args[0] 34359 v_1 := v.Args[1] 34360 if v_1.Op != OpAMD64MOVLconst { 34361 break 34362 } 34363 c := v_1.AuxInt 34364 if !(c&31 < 16) { 34365 break 34366 } 34367 v.reset(OpAMD64SHRWconst) 34368 v.AuxInt = c & 31 34369 v.AddArg(x) 34370 return true 34371 } 34372 // match: (SHRW _ (MOVQconst [c])) 34373 // cond: c&31 >= 16 34374 // result: (MOVLconst [0]) 34375 for { 34376 v_1 := v.Args[1] 34377 if v_1.Op != OpAMD64MOVQconst { 34378 break 34379 } 34380 c := v_1.AuxInt 34381 if !(c&31 >= 16) { 34382 break 34383 } 34384 v.reset(OpAMD64MOVLconst) 34385 v.AuxInt = 0 34386 return true 34387 } 34388 // match: (SHRW _ (MOVLconst [c])) 34389 // cond: c&31 >= 16 34390 // result: (MOVLconst [0]) 34391 for { 34392 v_1 := v.Args[1] 34393 if v_1.Op != OpAMD64MOVLconst { 34394 break 34395 } 34396 c := v_1.AuxInt 34397 if !(c&31 >= 16) { 34398 break 34399 } 34400 v.reset(OpAMD64MOVLconst) 34401 v.AuxInt = 0 34402 return true 34403 } 34404 return false 34405 } 34406 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 34407 // match: (SHRWconst x [0]) 34408 // cond: 34409 // result: x 34410 for { 34411 if v.AuxInt != 0 { 34412 break 34413 } 34414 x := v.Args[0] 34415 v.reset(OpCopy) 34416 v.Type = x.Type 34417 v.AddArg(x) 34418 return true 34419 } 34420 return false 34421 } 34422 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 34423 b := v.Block 34424 _ = b 34425 // match: (SUBL x (MOVLconst [c])) 34426 // cond: 34427 // result: (SUBLconst x [c]) 34428 for { 34429 x := v.Args[0] 34430 v_1 := v.Args[1] 34431 if v_1.Op != OpAMD64MOVLconst { 34432 break 34433 } 34434 c := v_1.AuxInt 34435 v.reset(OpAMD64SUBLconst) 34436 v.AuxInt = c 34437 v.AddArg(x) 34438 return true 34439 } 34440 // match: (SUBL (MOVLconst [c]) x) 34441 // cond: 34442 // result: (NEGL (SUBLconst <v.Type> x [c])) 34443 for { 34444 v_0 := v.Args[0] 34445 if v_0.Op != OpAMD64MOVLconst { 34446 break 34447 } 34448 c := v_0.AuxInt 34449 x := v.Args[1] 34450 v.reset(OpAMD64NEGL) 34451 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 34452 v0.AuxInt = c 34453 v0.AddArg(x) 34454 v.AddArg(v0) 34455 return true 34456 } 34457 // match: (SUBL x x) 34458 // cond: 34459 // result: (MOVLconst [0]) 34460 for { 34461 x := v.Args[0] 34462 if x != v.Args[1] { 34463 break 34464 } 34465 v.reset(OpAMD64MOVLconst) 34466 v.AuxInt = 0 34467 return true 34468 } 34469 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 34470 // cond: canMergeLoad(v, l, x) && clobber(l) 34471 // result: (SUBLmem x [off] {sym} ptr mem) 34472 for { 34473 x := v.Args[0] 34474 l := v.Args[1] 34475 if l.Op != OpAMD64MOVLload { 34476 break 34477 } 34478 off := l.AuxInt 34479 sym := l.Aux 34480 ptr := l.Args[0] 34481 mem := l.Args[1] 34482 if !(canMergeLoad(v, l, x) && clobber(l)) { 34483 break 34484 } 34485 v.reset(OpAMD64SUBLmem) 34486 v.AuxInt = off 34487 v.Aux = sym 34488 v.AddArg(x) 34489 v.AddArg(ptr) 34490 v.AddArg(mem) 34491 return true 34492 } 34493 return false 34494 } 34495 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 34496 // match: (SUBLconst [c] x) 34497 // cond: int32(c) == 0 34498 // result: x 34499 for { 34500 c := v.AuxInt 34501 x := v.Args[0] 34502 if !(int32(c) == 0) { 34503 break 34504 } 34505 v.reset(OpCopy) 34506 v.Type = x.Type 34507 v.AddArg(x) 34508 return true 34509 } 34510 // match: (SUBLconst [c] x) 34511 // cond: 34512 // result: (ADDLconst [int64(int32(-c))] x) 34513 for { 34514 c := v.AuxInt 34515 x := v.Args[0] 34516 v.reset(OpAMD64ADDLconst) 34517 v.AuxInt = int64(int32(-c)) 34518 v.AddArg(x) 34519 return true 34520 } 34521 } 34522 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 34523 b := v.Block 34524 _ = b 34525 // match: (SUBQ x (MOVQconst [c])) 34526 // cond: is32Bit(c) 34527 // result: (SUBQconst x [c]) 34528 for { 34529 x := v.Args[0] 34530 v_1 := v.Args[1] 34531 if v_1.Op != OpAMD64MOVQconst { 34532 break 34533 } 34534 c := v_1.AuxInt 34535 if !(is32Bit(c)) { 34536 break 34537 } 34538 v.reset(OpAMD64SUBQconst) 34539 v.AuxInt = c 34540 v.AddArg(x) 34541 return true 34542 } 34543 // match: (SUBQ (MOVQconst [c]) x) 34544 // cond: is32Bit(c) 34545 // result: (NEGQ (SUBQconst <v.Type> x [c])) 34546 for { 34547 v_0 := v.Args[0] 34548 if v_0.Op != OpAMD64MOVQconst { 34549 break 34550 } 34551 c := v_0.AuxInt 34552 x := v.Args[1] 34553 if !(is32Bit(c)) { 34554 break 34555 } 34556 v.reset(OpAMD64NEGQ) 34557 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 34558 v0.AuxInt = c 34559 v0.AddArg(x) 34560 v.AddArg(v0) 34561 return true 34562 } 34563 // match: (SUBQ x x) 34564 // cond: 34565 // result: (MOVQconst [0]) 34566 for { 34567 x := v.Args[0] 34568 if x != v.Args[1] { 34569 break 34570 } 34571 v.reset(OpAMD64MOVQconst) 34572 v.AuxInt = 0 34573 return true 34574 } 34575 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 34576 // cond: canMergeLoad(v, l, x) && clobber(l) 34577 // result: (SUBQmem x [off] {sym} ptr mem) 34578 for { 34579 x := v.Args[0] 34580 l := v.Args[1] 34581 if l.Op != OpAMD64MOVQload { 34582 break 34583 } 34584 off := l.AuxInt 34585 sym := l.Aux 34586 ptr := l.Args[0] 34587 mem := l.Args[1] 34588 if !(canMergeLoad(v, l, x) && clobber(l)) { 34589 break 34590 } 34591 v.reset(OpAMD64SUBQmem) 34592 v.AuxInt = off 34593 v.Aux = sym 34594 v.AddArg(x) 34595 v.AddArg(ptr) 34596 v.AddArg(mem) 34597 return true 34598 } 34599 return false 34600 } 34601 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 34602 // match: (SUBQconst [0] x) 34603 // cond: 34604 // result: x 34605 for { 34606 if v.AuxInt != 0 { 34607 break 34608 } 34609 x := v.Args[0] 34610 v.reset(OpCopy) 34611 v.Type = x.Type 34612 v.AddArg(x) 34613 return true 34614 } 34615 // match: (SUBQconst [c] x) 34616 // cond: c != -(1<<31) 34617 // result: (ADDQconst [-c] x) 34618 for { 34619 c := v.AuxInt 34620 x := v.Args[0] 34621 if !(c != -(1 << 31)) { 34622 break 34623 } 34624 v.reset(OpAMD64ADDQconst) 34625 v.AuxInt = -c 34626 v.AddArg(x) 34627 return true 34628 } 34629 // match: (SUBQconst (MOVQconst [d]) [c]) 34630 // cond: 34631 // result: (MOVQconst [d-c]) 34632 for { 34633 c := v.AuxInt 34634 v_0 := v.Args[0] 34635 if v_0.Op != OpAMD64MOVQconst { 34636 break 34637 } 34638 d := v_0.AuxInt 34639 v.reset(OpAMD64MOVQconst) 34640 v.AuxInt = d - c 34641 return true 34642 } 34643 // match: (SUBQconst (SUBQconst x [d]) [c]) 34644 // cond: is32Bit(-c-d) 34645 // result: (ADDQconst [-c-d] x) 34646 for { 34647 c := v.AuxInt 34648 v_0 := v.Args[0] 34649 if v_0.Op != OpAMD64SUBQconst { 34650 break 34651 } 34652 d := v_0.AuxInt 34653 x := v_0.Args[0] 34654 if !(is32Bit(-c - d)) { 34655 break 34656 } 34657 v.reset(OpAMD64ADDQconst) 34658 v.AuxInt = -c - d 34659 v.AddArg(x) 34660 return true 34661 } 34662 return false 34663 } 34664 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 34665 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 34666 // cond: canMergeLoad(v, l, x) && clobber(l) 34667 // result: (SUBSDmem x [off] {sym} ptr mem) 34668 for { 34669 x := v.Args[0] 34670 l := v.Args[1] 34671 if l.Op != OpAMD64MOVSDload { 34672 break 34673 } 34674 off := l.AuxInt 34675 sym := l.Aux 34676 ptr := l.Args[0] 34677 mem := l.Args[1] 34678 if !(canMergeLoad(v, l, x) && clobber(l)) { 34679 break 34680 } 34681 v.reset(OpAMD64SUBSDmem) 34682 v.AuxInt = off 34683 v.Aux = sym 34684 v.AddArg(x) 34685 v.AddArg(ptr) 34686 v.AddArg(mem) 34687 return true 34688 } 34689 return false 34690 } 34691 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 34692 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 34693 // cond: canMergeLoad(v, l, x) && clobber(l) 34694 // result: (SUBSSmem x [off] {sym} ptr mem) 34695 for { 34696 x := v.Args[0] 34697 l := v.Args[1] 34698 if l.Op != OpAMD64MOVSSload { 34699 break 34700 } 34701 off := l.AuxInt 34702 sym := l.Aux 34703 ptr := l.Args[0] 34704 mem := l.Args[1] 34705 if !(canMergeLoad(v, l, x) && clobber(l)) { 34706 break 34707 } 34708 v.reset(OpAMD64SUBSSmem) 34709 v.AuxInt = off 34710 v.Aux = sym 34711 v.AddArg(x) 34712 v.AddArg(ptr) 34713 v.AddArg(mem) 34714 return true 34715 } 34716 return false 34717 } 34718 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 34719 // match: (TESTB (MOVLconst [c]) x) 34720 // cond: 34721 // result: (TESTBconst [c] x) 34722 for { 34723 v_0 := v.Args[0] 34724 if v_0.Op != OpAMD64MOVLconst { 34725 break 34726 } 34727 c := v_0.AuxInt 34728 x := v.Args[1] 34729 v.reset(OpAMD64TESTBconst) 34730 v.AuxInt = c 34731 v.AddArg(x) 34732 return true 34733 } 34734 // match: (TESTB x (MOVLconst [c])) 34735 // cond: 34736 // result: (TESTBconst [c] x) 34737 for { 34738 x := v.Args[0] 34739 v_1 := v.Args[1] 34740 if v_1.Op != OpAMD64MOVLconst { 34741 break 34742 } 34743 c := v_1.AuxInt 34744 v.reset(OpAMD64TESTBconst) 34745 v.AuxInt = c 34746 v.AddArg(x) 34747 return true 34748 } 34749 return false 34750 } 34751 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 34752 // match: (TESTL (MOVLconst [c]) x) 34753 // cond: 34754 // result: (TESTLconst [c] x) 34755 for { 34756 v_0 := v.Args[0] 34757 if v_0.Op != OpAMD64MOVLconst { 34758 break 34759 } 34760 c := v_0.AuxInt 34761 x := v.Args[1] 34762 v.reset(OpAMD64TESTLconst) 34763 v.AuxInt = c 34764 v.AddArg(x) 34765 return true 34766 } 34767 // match: (TESTL x (MOVLconst [c])) 34768 // cond: 34769 // result: (TESTLconst [c] x) 34770 for { 34771 x := v.Args[0] 34772 v_1 := v.Args[1] 34773 if v_1.Op != OpAMD64MOVLconst { 34774 break 34775 } 34776 c := v_1.AuxInt 34777 v.reset(OpAMD64TESTLconst) 34778 v.AuxInt = c 34779 v.AddArg(x) 34780 return true 34781 } 34782 return false 34783 } 34784 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 34785 // match: (TESTQ (MOVQconst [c]) x) 34786 // cond: is32Bit(c) 34787 // result: (TESTQconst [c] x) 34788 for { 34789 v_0 := v.Args[0] 34790 if v_0.Op != OpAMD64MOVQconst { 34791 break 34792 } 34793 c := v_0.AuxInt 34794 x := v.Args[1] 34795 if !(is32Bit(c)) { 34796 break 34797 } 34798 v.reset(OpAMD64TESTQconst) 34799 v.AuxInt = c 34800 v.AddArg(x) 34801 return true 34802 } 34803 // match: (TESTQ x (MOVQconst [c])) 34804 // cond: is32Bit(c) 34805 // result: (TESTQconst [c] x) 34806 for { 34807 x := v.Args[0] 34808 v_1 := v.Args[1] 34809 if v_1.Op != OpAMD64MOVQconst { 34810 break 34811 } 34812 c := v_1.AuxInt 34813 if !(is32Bit(c)) { 34814 break 34815 } 34816 v.reset(OpAMD64TESTQconst) 34817 v.AuxInt = c 34818 v.AddArg(x) 34819 return true 34820 } 34821 return false 34822 } 34823 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 34824 // match: (TESTW (MOVLconst [c]) x) 34825 // cond: 34826 // result: (TESTWconst [c] x) 34827 for { 34828 v_0 := v.Args[0] 34829 if v_0.Op != OpAMD64MOVLconst { 34830 break 34831 } 34832 c := v_0.AuxInt 34833 x := v.Args[1] 34834 v.reset(OpAMD64TESTWconst) 34835 v.AuxInt = c 34836 v.AddArg(x) 34837 return true 34838 } 34839 // match: (TESTW x (MOVLconst [c])) 34840 // cond: 34841 // result: (TESTWconst [c] x) 34842 for { 34843 x := v.Args[0] 34844 v_1 := v.Args[1] 34845 if v_1.Op != OpAMD64MOVLconst { 34846 break 34847 } 34848 c := v_1.AuxInt 34849 v.reset(OpAMD64TESTWconst) 34850 v.AuxInt = c 34851 v.AddArg(x) 34852 return true 34853 } 34854 return false 34855 } 34856 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 34857 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 34858 // cond: is32Bit(off1+off2) 34859 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 34860 for { 34861 off1 := v.AuxInt 34862 sym := v.Aux 34863 val := v.Args[0] 34864 v_1 := v.Args[1] 34865 if v_1.Op != OpAMD64ADDQconst { 34866 break 34867 } 34868 off2 := v_1.AuxInt 34869 ptr := v_1.Args[0] 34870 mem := v.Args[2] 34871 if !(is32Bit(off1 + off2)) { 34872 break 34873 } 34874 v.reset(OpAMD64XADDLlock) 34875 v.AuxInt = off1 + off2 34876 v.Aux = sym 34877 v.AddArg(val) 34878 v.AddArg(ptr) 34879 v.AddArg(mem) 34880 return true 34881 } 34882 return false 34883 } 34884 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 34885 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 34886 // cond: is32Bit(off1+off2) 34887 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 34888 for { 34889 off1 := v.AuxInt 34890 sym := v.Aux 34891 val := v.Args[0] 34892 v_1 := v.Args[1] 34893 if v_1.Op != OpAMD64ADDQconst { 34894 break 34895 } 34896 off2 := v_1.AuxInt 34897 ptr := v_1.Args[0] 34898 mem := v.Args[2] 34899 if !(is32Bit(off1 + off2)) { 34900 break 34901 } 34902 v.reset(OpAMD64XADDQlock) 34903 v.AuxInt = off1 + off2 34904 v.Aux = sym 34905 v.AddArg(val) 34906 v.AddArg(ptr) 34907 v.AddArg(mem) 34908 return true 34909 } 34910 return false 34911 } 34912 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 34913 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 34914 // cond: is32Bit(off1+off2) 34915 // result: (XCHGL [off1+off2] {sym} val ptr mem) 34916 for { 34917 off1 := v.AuxInt 34918 sym := v.Aux 34919 val := v.Args[0] 34920 v_1 := v.Args[1] 34921 if v_1.Op != OpAMD64ADDQconst { 34922 break 34923 } 34924 off2 := v_1.AuxInt 34925 ptr := v_1.Args[0] 34926 mem := v.Args[2] 34927 if !(is32Bit(off1 + off2)) { 34928 break 34929 } 34930 v.reset(OpAMD64XCHGL) 34931 v.AuxInt = off1 + off2 34932 v.Aux = sym 34933 v.AddArg(val) 34934 v.AddArg(ptr) 34935 v.AddArg(mem) 34936 return true 34937 } 34938 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 34939 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 34940 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 34941 for { 34942 off1 := v.AuxInt 34943 sym1 := v.Aux 34944 val := v.Args[0] 34945 v_1 := v.Args[1] 34946 if v_1.Op != OpAMD64LEAQ { 34947 break 34948 } 34949 off2 := v_1.AuxInt 34950 sym2 := v_1.Aux 34951 ptr := v_1.Args[0] 34952 mem := v.Args[2] 34953 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 34954 break 34955 } 34956 v.reset(OpAMD64XCHGL) 34957 v.AuxInt = off1 + off2 34958 v.Aux = mergeSym(sym1, sym2) 34959 v.AddArg(val) 34960 v.AddArg(ptr) 34961 v.AddArg(mem) 34962 return true 34963 } 34964 return false 34965 } 34966 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 34967 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 34968 // cond: is32Bit(off1+off2) 34969 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 34970 for { 34971 off1 := v.AuxInt 34972 sym := v.Aux 34973 val := v.Args[0] 34974 v_1 := v.Args[1] 34975 if v_1.Op != OpAMD64ADDQconst { 34976 break 34977 } 34978 off2 := v_1.AuxInt 34979 ptr := v_1.Args[0] 34980 mem := v.Args[2] 34981 if !(is32Bit(off1 + off2)) { 34982 break 34983 } 34984 v.reset(OpAMD64XCHGQ) 34985 v.AuxInt = off1 + off2 34986 v.Aux = sym 34987 v.AddArg(val) 34988 v.AddArg(ptr) 34989 v.AddArg(mem) 34990 return true 34991 } 34992 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 34993 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 34994 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 34995 for { 34996 off1 := v.AuxInt 34997 sym1 := v.Aux 34998 val := v.Args[0] 34999 v_1 := v.Args[1] 35000 if v_1.Op != OpAMD64LEAQ { 35001 break 35002 } 35003 off2 := v_1.AuxInt 35004 sym2 := v_1.Aux 35005 ptr := v_1.Args[0] 35006 mem := v.Args[2] 35007 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 35008 break 35009 } 35010 v.reset(OpAMD64XCHGQ) 35011 v.AuxInt = off1 + off2 35012 v.Aux = mergeSym(sym1, sym2) 35013 v.AddArg(val) 35014 v.AddArg(ptr) 35015 v.AddArg(mem) 35016 return true 35017 } 35018 return false 35019 } 35020 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 35021 // match: (XORL x (MOVLconst [c])) 35022 // cond: 35023 // result: (XORLconst [c] x) 35024 for { 35025 x := v.Args[0] 35026 v_1 := v.Args[1] 35027 if v_1.Op != OpAMD64MOVLconst { 35028 break 35029 } 35030 c := v_1.AuxInt 35031 v.reset(OpAMD64XORLconst) 35032 v.AuxInt = c 35033 v.AddArg(x) 35034 return true 35035 } 35036 // match: (XORL (MOVLconst [c]) x) 35037 // cond: 35038 // result: (XORLconst [c] x) 35039 for { 35040 v_0 := v.Args[0] 35041 if v_0.Op != OpAMD64MOVLconst { 35042 break 35043 } 35044 c := v_0.AuxInt 35045 x := v.Args[1] 35046 v.reset(OpAMD64XORLconst) 35047 v.AuxInt = c 35048 v.AddArg(x) 35049 return true 35050 } 35051 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 35052 // cond: d==32-c 35053 // result: (ROLLconst x [c]) 35054 for { 35055 v_0 := v.Args[0] 35056 if v_0.Op != OpAMD64SHLLconst { 35057 break 35058 } 35059 c := v_0.AuxInt 35060 x := v_0.Args[0] 35061 v_1 := v.Args[1] 35062 if v_1.Op != OpAMD64SHRLconst { 35063 break 35064 } 35065 d := v_1.AuxInt 35066 if x != v_1.Args[0] { 35067 break 35068 } 35069 if !(d == 32-c) { 35070 break 35071 } 35072 v.reset(OpAMD64ROLLconst) 35073 v.AuxInt = c 35074 v.AddArg(x) 35075 return true 35076 } 35077 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 35078 // cond: d==32-c 35079 // result: (ROLLconst x [c]) 35080 for { 35081 v_0 := v.Args[0] 35082 if v_0.Op != OpAMD64SHRLconst { 35083 break 35084 } 35085 d := v_0.AuxInt 35086 x := v_0.Args[0] 35087 v_1 := v.Args[1] 35088 if v_1.Op != OpAMD64SHLLconst { 35089 break 35090 } 35091 c := v_1.AuxInt 35092 if x != v_1.Args[0] { 35093 break 35094 } 35095 if !(d == 32-c) { 35096 break 35097 } 35098 v.reset(OpAMD64ROLLconst) 35099 v.AuxInt = c 35100 v.AddArg(x) 35101 return true 35102 } 35103 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 35104 // cond: d==16-c && c < 16 && t.Size() == 2 35105 // result: (ROLWconst x [c]) 35106 for { 35107 t := v.Type 35108 v_0 := v.Args[0] 35109 if v_0.Op != OpAMD64SHLLconst { 35110 break 35111 } 35112 c := v_0.AuxInt 35113 x := v_0.Args[0] 35114 v_1 := v.Args[1] 35115 if v_1.Op != OpAMD64SHRWconst { 35116 break 35117 } 35118 d := v_1.AuxInt 35119 if x != v_1.Args[0] { 35120 break 35121 } 35122 if !(d == 16-c && c < 16 && t.Size() == 2) { 35123 break 35124 } 35125 v.reset(OpAMD64ROLWconst) 35126 v.AuxInt = c 35127 v.AddArg(x) 35128 return true 35129 } 35130 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 35131 // cond: d==16-c && c < 16 && t.Size() == 2 35132 // result: (ROLWconst x [c]) 35133 for { 35134 t := v.Type 35135 v_0 := v.Args[0] 35136 if v_0.Op != OpAMD64SHRWconst { 35137 break 35138 } 35139 d := v_0.AuxInt 35140 x := v_0.Args[0] 35141 v_1 := v.Args[1] 35142 if v_1.Op != OpAMD64SHLLconst { 35143 break 35144 } 35145 c := v_1.AuxInt 35146 if x != v_1.Args[0] { 35147 break 35148 } 35149 if !(d == 16-c && c < 16 && t.Size() == 2) { 35150 break 35151 } 35152 v.reset(OpAMD64ROLWconst) 35153 v.AuxInt = c 35154 v.AddArg(x) 35155 return true 35156 } 35157 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 35158 // cond: d==8-c && c < 8 && t.Size() == 1 35159 // result: (ROLBconst x [c]) 35160 for { 35161 t := v.Type 35162 v_0 := v.Args[0] 35163 if v_0.Op != OpAMD64SHLLconst { 35164 break 35165 } 35166 c := v_0.AuxInt 35167 x := v_0.Args[0] 35168 v_1 := v.Args[1] 35169 if v_1.Op != OpAMD64SHRBconst { 35170 break 35171 } 35172 d := v_1.AuxInt 35173 if x != v_1.Args[0] { 35174 break 35175 } 35176 if !(d == 8-c && c < 8 && t.Size() == 1) { 35177 break 35178 } 35179 v.reset(OpAMD64ROLBconst) 35180 v.AuxInt = c 35181 v.AddArg(x) 35182 return true 35183 } 35184 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 35185 // cond: d==8-c && c < 8 && t.Size() == 1 35186 // result: (ROLBconst x [c]) 35187 for { 35188 t := v.Type 35189 v_0 := v.Args[0] 35190 if v_0.Op != OpAMD64SHRBconst { 35191 break 35192 } 35193 d := v_0.AuxInt 35194 x := v_0.Args[0] 35195 v_1 := v.Args[1] 35196 if v_1.Op != OpAMD64SHLLconst { 35197 break 35198 } 35199 c := v_1.AuxInt 35200 if x != v_1.Args[0] { 35201 break 35202 } 35203 if !(d == 8-c && c < 8 && t.Size() == 1) { 35204 break 35205 } 35206 v.reset(OpAMD64ROLBconst) 35207 v.AuxInt = c 35208 v.AddArg(x) 35209 return true 35210 } 35211 // match: (XORL x x) 35212 // cond: 35213 // result: (MOVLconst [0]) 35214 for { 35215 x := v.Args[0] 35216 if x != v.Args[1] { 35217 break 35218 } 35219 v.reset(OpAMD64MOVLconst) 35220 v.AuxInt = 0 35221 return true 35222 } 35223 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 35224 // cond: canMergeLoad(v, l, x) && clobber(l) 35225 // result: (XORLmem x [off] {sym} ptr mem) 35226 for { 35227 x := v.Args[0] 35228 l := v.Args[1] 35229 if l.Op != OpAMD64MOVLload { 35230 break 35231 } 35232 off := l.AuxInt 35233 sym := l.Aux 35234 ptr := l.Args[0] 35235 mem := l.Args[1] 35236 if !(canMergeLoad(v, l, x) && clobber(l)) { 35237 break 35238 } 35239 v.reset(OpAMD64XORLmem) 35240 v.AuxInt = off 35241 v.Aux = sym 35242 v.AddArg(x) 35243 v.AddArg(ptr) 35244 v.AddArg(mem) 35245 return true 35246 } 35247 return false 35248 } 35249 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 35250 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 35251 // cond: canMergeLoad(v, l, x) && clobber(l) 35252 // result: (XORLmem x [off] {sym} ptr mem) 35253 for { 35254 l := v.Args[0] 35255 if l.Op != OpAMD64MOVLload { 35256 break 35257 } 35258 off := l.AuxInt 35259 sym := l.Aux 35260 ptr := l.Args[0] 35261 mem := l.Args[1] 35262 x := v.Args[1] 35263 if !(canMergeLoad(v, l, x) && clobber(l)) { 35264 break 35265 } 35266 v.reset(OpAMD64XORLmem) 35267 v.AuxInt = off 35268 v.Aux = sym 35269 v.AddArg(x) 35270 v.AddArg(ptr) 35271 v.AddArg(mem) 35272 return true 35273 } 35274 return false 35275 } 35276 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 35277 // match: (XORLconst [c] (XORLconst [d] x)) 35278 // cond: 35279 // result: (XORLconst [c ^ d] x) 35280 for { 35281 c := v.AuxInt 35282 v_0 := v.Args[0] 35283 if v_0.Op != OpAMD64XORLconst { 35284 break 35285 } 35286 d := v_0.AuxInt 35287 x := v_0.Args[0] 35288 v.reset(OpAMD64XORLconst) 35289 v.AuxInt = c ^ d 35290 v.AddArg(x) 35291 return true 35292 } 35293 // match: (XORLconst [c] x) 35294 // cond: int32(c)==0 35295 // result: x 35296 for { 35297 c := v.AuxInt 35298 x := v.Args[0] 35299 if !(int32(c) == 0) { 35300 break 35301 } 35302 v.reset(OpCopy) 35303 v.Type = x.Type 35304 v.AddArg(x) 35305 return true 35306 } 35307 // match: (XORLconst [c] (MOVLconst [d])) 35308 // cond: 35309 // result: (MOVLconst [c^d]) 35310 for { 35311 c := v.AuxInt 35312 v_0 := v.Args[0] 35313 if v_0.Op != OpAMD64MOVLconst { 35314 break 35315 } 35316 d := v_0.AuxInt 35317 v.reset(OpAMD64MOVLconst) 35318 v.AuxInt = c ^ d 35319 return true 35320 } 35321 return false 35322 } 35323 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 35324 // match: (XORQ x (MOVQconst [c])) 35325 // cond: is32Bit(c) 35326 // result: (XORQconst [c] x) 35327 for { 35328 x := v.Args[0] 35329 v_1 := v.Args[1] 35330 if v_1.Op != OpAMD64MOVQconst { 35331 break 35332 } 35333 c := v_1.AuxInt 35334 if !(is32Bit(c)) { 35335 break 35336 } 35337 v.reset(OpAMD64XORQconst) 35338 v.AuxInt = c 35339 v.AddArg(x) 35340 return true 35341 } 35342 // match: (XORQ (MOVQconst [c]) x) 35343 // cond: is32Bit(c) 35344 // result: (XORQconst [c] x) 35345 for { 35346 v_0 := v.Args[0] 35347 if v_0.Op != OpAMD64MOVQconst { 35348 break 35349 } 35350 c := v_0.AuxInt 35351 x := v.Args[1] 35352 if !(is32Bit(c)) { 35353 break 35354 } 35355 v.reset(OpAMD64XORQconst) 35356 v.AuxInt = c 35357 v.AddArg(x) 35358 return true 35359 } 35360 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 35361 // cond: d==64-c 35362 // result: (ROLQconst x [c]) 35363 for { 35364 v_0 := v.Args[0] 35365 if v_0.Op != OpAMD64SHLQconst { 35366 break 35367 } 35368 c := v_0.AuxInt 35369 x := v_0.Args[0] 35370 v_1 := v.Args[1] 35371 if v_1.Op != OpAMD64SHRQconst { 35372 break 35373 } 35374 d := v_1.AuxInt 35375 if x != v_1.Args[0] { 35376 break 35377 } 35378 if !(d == 64-c) { 35379 break 35380 } 35381 v.reset(OpAMD64ROLQconst) 35382 v.AuxInt = c 35383 v.AddArg(x) 35384 return true 35385 } 35386 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 35387 // cond: d==64-c 35388 // result: (ROLQconst x [c]) 35389 for { 35390 v_0 := v.Args[0] 35391 if v_0.Op != OpAMD64SHRQconst { 35392 break 35393 } 35394 d := v_0.AuxInt 35395 x := v_0.Args[0] 35396 v_1 := v.Args[1] 35397 if v_1.Op != OpAMD64SHLQconst { 35398 break 35399 } 35400 c := v_1.AuxInt 35401 if x != v_1.Args[0] { 35402 break 35403 } 35404 if !(d == 64-c) { 35405 break 35406 } 35407 v.reset(OpAMD64ROLQconst) 35408 v.AuxInt = c 35409 v.AddArg(x) 35410 return true 35411 } 35412 // match: (XORQ x x) 35413 // cond: 35414 // result: (MOVQconst [0]) 35415 for { 35416 x := v.Args[0] 35417 if x != v.Args[1] { 35418 break 35419 } 35420 v.reset(OpAMD64MOVQconst) 35421 v.AuxInt = 0 35422 return true 35423 } 35424 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 35425 // cond: canMergeLoad(v, l, x) && clobber(l) 35426 // result: (XORQmem x [off] {sym} ptr mem) 35427 for { 35428 x := v.Args[0] 35429 l := v.Args[1] 35430 if l.Op != OpAMD64MOVQload { 35431 break 35432 } 35433 off := l.AuxInt 35434 sym := l.Aux 35435 ptr := l.Args[0] 35436 mem := l.Args[1] 35437 if !(canMergeLoad(v, l, x) && clobber(l)) { 35438 break 35439 } 35440 v.reset(OpAMD64XORQmem) 35441 v.AuxInt = off 35442 v.Aux = sym 35443 v.AddArg(x) 35444 v.AddArg(ptr) 35445 v.AddArg(mem) 35446 return true 35447 } 35448 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 35449 // cond: canMergeLoad(v, l, x) && clobber(l) 35450 // result: (XORQmem x [off] {sym} ptr mem) 35451 for { 35452 l := v.Args[0] 35453 if l.Op != OpAMD64MOVQload { 35454 break 35455 } 35456 off := l.AuxInt 35457 sym := l.Aux 35458 ptr := l.Args[0] 35459 mem := l.Args[1] 35460 x := v.Args[1] 35461 if !(canMergeLoad(v, l, x) && clobber(l)) { 35462 break 35463 } 35464 v.reset(OpAMD64XORQmem) 35465 v.AuxInt = off 35466 v.Aux = sym 35467 v.AddArg(x) 35468 v.AddArg(ptr) 35469 v.AddArg(mem) 35470 return true 35471 } 35472 return false 35473 } 35474 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 35475 // match: (XORQconst [c] (XORQconst [d] x)) 35476 // cond: 35477 // result: (XORQconst [c ^ d] x) 35478 for { 35479 c := v.AuxInt 35480 v_0 := v.Args[0] 35481 if v_0.Op != OpAMD64XORQconst { 35482 break 35483 } 35484 d := v_0.AuxInt 35485 x := v_0.Args[0] 35486 v.reset(OpAMD64XORQconst) 35487 v.AuxInt = c ^ d 35488 v.AddArg(x) 35489 return true 35490 } 35491 // match: (XORQconst [0] x) 35492 // cond: 35493 // result: x 35494 for { 35495 if v.AuxInt != 0 { 35496 break 35497 } 35498 x := v.Args[0] 35499 v.reset(OpCopy) 35500 v.Type = x.Type 35501 v.AddArg(x) 35502 return true 35503 } 35504 // match: (XORQconst [c] (MOVQconst [d])) 35505 // cond: 35506 // result: (MOVQconst [c^d]) 35507 for { 35508 c := v.AuxInt 35509 v_0 := v.Args[0] 35510 if v_0.Op != OpAMD64MOVQconst { 35511 break 35512 } 35513 d := v_0.AuxInt 35514 v.reset(OpAMD64MOVQconst) 35515 v.AuxInt = c ^ d 35516 return true 35517 } 35518 return false 35519 } 35520 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 35521 // match: (Add16 x y) 35522 // cond: 35523 // result: (ADDL x y) 35524 for { 35525 x := v.Args[0] 35526 y := v.Args[1] 35527 v.reset(OpAMD64ADDL) 35528 v.AddArg(x) 35529 v.AddArg(y) 35530 return true 35531 } 35532 } 35533 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 35534 // match: (Add32 x y) 35535 // cond: 35536 // result: (ADDL x y) 35537 for { 35538 x := v.Args[0] 35539 y := v.Args[1] 35540 v.reset(OpAMD64ADDL) 35541 v.AddArg(x) 35542 v.AddArg(y) 35543 return true 35544 } 35545 } 35546 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 35547 // match: (Add32F x y) 35548 // cond: 35549 // result: (ADDSS x y) 35550 for { 35551 x := v.Args[0] 35552 y := v.Args[1] 35553 v.reset(OpAMD64ADDSS) 35554 v.AddArg(x) 35555 v.AddArg(y) 35556 return true 35557 } 35558 } 35559 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 35560 // match: (Add64 x y) 35561 // cond: 35562 // result: (ADDQ x y) 35563 for { 35564 x := v.Args[0] 35565 y := v.Args[1] 35566 v.reset(OpAMD64ADDQ) 35567 v.AddArg(x) 35568 v.AddArg(y) 35569 return true 35570 } 35571 } 35572 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 35573 // match: (Add64F x y) 35574 // cond: 35575 // result: (ADDSD x y) 35576 for { 35577 x := v.Args[0] 35578 y := v.Args[1] 35579 v.reset(OpAMD64ADDSD) 35580 v.AddArg(x) 35581 v.AddArg(y) 35582 return true 35583 } 35584 } 35585 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 35586 // match: (Add8 x y) 35587 // cond: 35588 // result: (ADDL x y) 35589 for { 35590 x := v.Args[0] 35591 y := v.Args[1] 35592 v.reset(OpAMD64ADDL) 35593 v.AddArg(x) 35594 v.AddArg(y) 35595 return true 35596 } 35597 } 35598 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 35599 b := v.Block 35600 _ = b 35601 config := b.Func.Config 35602 _ = config 35603 // match: (AddPtr x y) 35604 // cond: config.PtrSize == 8 35605 // result: (ADDQ x y) 35606 for { 35607 x := v.Args[0] 35608 y := v.Args[1] 35609 if !(config.PtrSize == 8) { 35610 break 35611 } 35612 v.reset(OpAMD64ADDQ) 35613 v.AddArg(x) 35614 v.AddArg(y) 35615 return true 35616 } 35617 // match: (AddPtr x y) 35618 // cond: config.PtrSize == 4 35619 // result: (ADDL x y) 35620 for { 35621 x := v.Args[0] 35622 y := v.Args[1] 35623 if !(config.PtrSize == 4) { 35624 break 35625 } 35626 v.reset(OpAMD64ADDL) 35627 v.AddArg(x) 35628 v.AddArg(y) 35629 return true 35630 } 35631 return false 35632 } 35633 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 35634 b := v.Block 35635 _ = b 35636 config := b.Func.Config 35637 _ = config 35638 // match: (Addr {sym} base) 35639 // cond: config.PtrSize == 8 35640 // result: (LEAQ {sym} base) 35641 for { 35642 sym := v.Aux 35643 base := v.Args[0] 35644 if !(config.PtrSize == 8) { 35645 break 35646 } 35647 v.reset(OpAMD64LEAQ) 35648 v.Aux = sym 35649 v.AddArg(base) 35650 return true 35651 } 35652 // match: (Addr {sym} base) 35653 // cond: config.PtrSize == 4 35654 // result: (LEAL {sym} base) 35655 for { 35656 sym := v.Aux 35657 base := v.Args[0] 35658 if !(config.PtrSize == 4) { 35659 break 35660 } 35661 v.reset(OpAMD64LEAL) 35662 v.Aux = sym 35663 v.AddArg(base) 35664 return true 35665 } 35666 return false 35667 } 35668 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 35669 // match: (And16 x y) 35670 // cond: 35671 // result: (ANDL x y) 35672 for { 35673 x := v.Args[0] 35674 y := v.Args[1] 35675 v.reset(OpAMD64ANDL) 35676 v.AddArg(x) 35677 v.AddArg(y) 35678 return true 35679 } 35680 } 35681 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 35682 // match: (And32 x y) 35683 // cond: 35684 // result: (ANDL x y) 35685 for { 35686 x := v.Args[0] 35687 y := v.Args[1] 35688 v.reset(OpAMD64ANDL) 35689 v.AddArg(x) 35690 v.AddArg(y) 35691 return true 35692 } 35693 } 35694 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 35695 // match: (And64 x y) 35696 // cond: 35697 // result: (ANDQ x y) 35698 for { 35699 x := v.Args[0] 35700 y := v.Args[1] 35701 v.reset(OpAMD64ANDQ) 35702 v.AddArg(x) 35703 v.AddArg(y) 35704 return true 35705 } 35706 } 35707 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 35708 // match: (And8 x y) 35709 // cond: 35710 // result: (ANDL x y) 35711 for { 35712 x := v.Args[0] 35713 y := v.Args[1] 35714 v.reset(OpAMD64ANDL) 35715 v.AddArg(x) 35716 v.AddArg(y) 35717 return true 35718 } 35719 } 35720 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 35721 // match: (AndB x y) 35722 // cond: 35723 // result: (ANDL x y) 35724 for { 35725 x := v.Args[0] 35726 y := v.Args[1] 35727 v.reset(OpAMD64ANDL) 35728 v.AddArg(x) 35729 v.AddArg(y) 35730 return true 35731 } 35732 } 35733 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 35734 b := v.Block 35735 _ = b 35736 types := &b.Func.Config.Types 35737 _ = types 35738 // match: (AtomicAdd32 ptr val mem) 35739 // cond: 35740 // result: (AddTupleFirst32 (XADDLlock val ptr mem) val) 35741 for { 35742 ptr := v.Args[0] 35743 val := v.Args[1] 35744 mem := v.Args[2] 35745 v.reset(OpAMD64AddTupleFirst32) 35746 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, MakeTuple(types.UInt32, TypeMem)) 35747 v0.AddArg(val) 35748 v0.AddArg(ptr) 35749 v0.AddArg(mem) 35750 v.AddArg(v0) 35751 v.AddArg(val) 35752 return true 35753 } 35754 } 35755 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 35756 b := v.Block 35757 _ = b 35758 types := &b.Func.Config.Types 35759 _ = types 35760 // match: (AtomicAdd64 ptr val mem) 35761 // cond: 35762 // result: (AddTupleFirst64 (XADDQlock val ptr mem) val) 35763 for { 35764 ptr := v.Args[0] 35765 val := v.Args[1] 35766 mem := v.Args[2] 35767 v.reset(OpAMD64AddTupleFirst64) 35768 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, MakeTuple(types.UInt64, TypeMem)) 35769 v0.AddArg(val) 35770 v0.AddArg(ptr) 35771 v0.AddArg(mem) 35772 v.AddArg(v0) 35773 v.AddArg(val) 35774 return true 35775 } 35776 } 35777 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 35778 // match: (AtomicAnd8 ptr val mem) 35779 // cond: 35780 // result: (ANDBlock ptr val mem) 35781 for { 35782 ptr := v.Args[0] 35783 val := v.Args[1] 35784 mem := v.Args[2] 35785 v.reset(OpAMD64ANDBlock) 35786 v.AddArg(ptr) 35787 v.AddArg(val) 35788 v.AddArg(mem) 35789 return true 35790 } 35791 } 35792 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 35793 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 35794 // cond: 35795 // result: (CMPXCHGLlock ptr old new_ mem) 35796 for { 35797 ptr := v.Args[0] 35798 old := v.Args[1] 35799 new_ := v.Args[2] 35800 mem := v.Args[3] 35801 v.reset(OpAMD64CMPXCHGLlock) 35802 v.AddArg(ptr) 35803 v.AddArg(old) 35804 v.AddArg(new_) 35805 v.AddArg(mem) 35806 return true 35807 } 35808 } 35809 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 35810 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 35811 // cond: 35812 // result: (CMPXCHGQlock ptr old new_ mem) 35813 for { 35814 ptr := v.Args[0] 35815 old := v.Args[1] 35816 new_ := v.Args[2] 35817 mem := v.Args[3] 35818 v.reset(OpAMD64CMPXCHGQlock) 35819 v.AddArg(ptr) 35820 v.AddArg(old) 35821 v.AddArg(new_) 35822 v.AddArg(mem) 35823 return true 35824 } 35825 } 35826 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 35827 // match: (AtomicExchange32 ptr val mem) 35828 // cond: 35829 // result: (XCHGL val ptr mem) 35830 for { 35831 ptr := v.Args[0] 35832 val := v.Args[1] 35833 mem := v.Args[2] 35834 v.reset(OpAMD64XCHGL) 35835 v.AddArg(val) 35836 v.AddArg(ptr) 35837 v.AddArg(mem) 35838 return true 35839 } 35840 } 35841 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 35842 // match: (AtomicExchange64 ptr val mem) 35843 // cond: 35844 // result: (XCHGQ val ptr mem) 35845 for { 35846 ptr := v.Args[0] 35847 val := v.Args[1] 35848 mem := v.Args[2] 35849 v.reset(OpAMD64XCHGQ) 35850 v.AddArg(val) 35851 v.AddArg(ptr) 35852 v.AddArg(mem) 35853 return true 35854 } 35855 } 35856 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 35857 // match: (AtomicLoad32 ptr mem) 35858 // cond: 35859 // result: (MOVLatomicload ptr mem) 35860 for { 35861 ptr := v.Args[0] 35862 mem := v.Args[1] 35863 v.reset(OpAMD64MOVLatomicload) 35864 v.AddArg(ptr) 35865 v.AddArg(mem) 35866 return true 35867 } 35868 } 35869 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 35870 // match: (AtomicLoad64 ptr mem) 35871 // cond: 35872 // result: (MOVQatomicload ptr mem) 35873 for { 35874 ptr := v.Args[0] 35875 mem := v.Args[1] 35876 v.reset(OpAMD64MOVQatomicload) 35877 v.AddArg(ptr) 35878 v.AddArg(mem) 35879 return true 35880 } 35881 } 35882 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 35883 b := v.Block 35884 _ = b 35885 config := b.Func.Config 35886 _ = config 35887 // match: (AtomicLoadPtr ptr mem) 35888 // cond: config.PtrSize == 8 35889 // result: (MOVQatomicload ptr mem) 35890 for { 35891 ptr := v.Args[0] 35892 mem := v.Args[1] 35893 if !(config.PtrSize == 8) { 35894 break 35895 } 35896 v.reset(OpAMD64MOVQatomicload) 35897 v.AddArg(ptr) 35898 v.AddArg(mem) 35899 return true 35900 } 35901 // match: (AtomicLoadPtr ptr mem) 35902 // cond: config.PtrSize == 4 35903 // result: (MOVLatomicload ptr mem) 35904 for { 35905 ptr := v.Args[0] 35906 mem := v.Args[1] 35907 if !(config.PtrSize == 4) { 35908 break 35909 } 35910 v.reset(OpAMD64MOVLatomicload) 35911 v.AddArg(ptr) 35912 v.AddArg(mem) 35913 return true 35914 } 35915 return false 35916 } 35917 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 35918 // match: (AtomicOr8 ptr val mem) 35919 // cond: 35920 // result: (ORBlock ptr val mem) 35921 for { 35922 ptr := v.Args[0] 35923 val := v.Args[1] 35924 mem := v.Args[2] 35925 v.reset(OpAMD64ORBlock) 35926 v.AddArg(ptr) 35927 v.AddArg(val) 35928 v.AddArg(mem) 35929 return true 35930 } 35931 } 35932 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 35933 b := v.Block 35934 _ = b 35935 types := &b.Func.Config.Types 35936 _ = types 35937 // match: (AtomicStore32 ptr val mem) 35938 // cond: 35939 // result: (Select1 (XCHGL <MakeTuple(types.UInt32,TypeMem)> val ptr mem)) 35940 for { 35941 ptr := v.Args[0] 35942 val := v.Args[1] 35943 mem := v.Args[2] 35944 v.reset(OpSelect1) 35945 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(types.UInt32, TypeMem)) 35946 v0.AddArg(val) 35947 v0.AddArg(ptr) 35948 v0.AddArg(mem) 35949 v.AddArg(v0) 35950 return true 35951 } 35952 } 35953 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 35954 b := v.Block 35955 _ = b 35956 types := &b.Func.Config.Types 35957 _ = types 35958 // match: (AtomicStore64 ptr val mem) 35959 // cond: 35960 // result: (Select1 (XCHGQ <MakeTuple(types.UInt64,TypeMem)> val ptr mem)) 35961 for { 35962 ptr := v.Args[0] 35963 val := v.Args[1] 35964 mem := v.Args[2] 35965 v.reset(OpSelect1) 35966 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(types.UInt64, TypeMem)) 35967 v0.AddArg(val) 35968 v0.AddArg(ptr) 35969 v0.AddArg(mem) 35970 v.AddArg(v0) 35971 return true 35972 } 35973 } 35974 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 35975 b := v.Block 35976 _ = b 35977 config := b.Func.Config 35978 _ = config 35979 types := &b.Func.Config.Types 35980 _ = types 35981 // match: (AtomicStorePtrNoWB ptr val mem) 35982 // cond: config.PtrSize == 8 35983 // result: (Select1 (XCHGQ <MakeTuple(types.BytePtr,TypeMem)> val ptr mem)) 35984 for { 35985 ptr := v.Args[0] 35986 val := v.Args[1] 35987 mem := v.Args[2] 35988 if !(config.PtrSize == 8) { 35989 break 35990 } 35991 v.reset(OpSelect1) 35992 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(types.BytePtr, TypeMem)) 35993 v0.AddArg(val) 35994 v0.AddArg(ptr) 35995 v0.AddArg(mem) 35996 v.AddArg(v0) 35997 return true 35998 } 35999 // match: (AtomicStorePtrNoWB ptr val mem) 36000 // cond: config.PtrSize == 4 36001 // result: (Select1 (XCHGL <MakeTuple(types.BytePtr,TypeMem)> val ptr mem)) 36002 for { 36003 ptr := v.Args[0] 36004 val := v.Args[1] 36005 mem := v.Args[2] 36006 if !(config.PtrSize == 4) { 36007 break 36008 } 36009 v.reset(OpSelect1) 36010 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(types.BytePtr, TypeMem)) 36011 v0.AddArg(val) 36012 v0.AddArg(ptr) 36013 v0.AddArg(mem) 36014 v.AddArg(v0) 36015 return true 36016 } 36017 return false 36018 } 36019 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 36020 // match: (Avg64u x y) 36021 // cond: 36022 // result: (AVGQU x y) 36023 for { 36024 x := v.Args[0] 36025 y := v.Args[1] 36026 v.reset(OpAMD64AVGQU) 36027 v.AddArg(x) 36028 v.AddArg(y) 36029 return true 36030 } 36031 } 36032 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 36033 b := v.Block 36034 _ = b 36035 types := &b.Func.Config.Types 36036 _ = types 36037 // match: (BitLen32 x) 36038 // cond: 36039 // result: (BitLen64 (MOVLQZX <types.UInt64> x)) 36040 for { 36041 x := v.Args[0] 36042 v.reset(OpBitLen64) 36043 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, types.UInt64) 36044 v0.AddArg(x) 36045 v.AddArg(v0) 36046 return true 36047 } 36048 } 36049 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 36050 b := v.Block 36051 _ = b 36052 types := &b.Func.Config.Types 36053 _ = types 36054 // match: (BitLen64 <t> x) 36055 // cond: 36056 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <TypeFlags> (BSRQ x)))) 36057 for { 36058 t := v.Type 36059 x := v.Args[0] 36060 v.reset(OpAMD64ADDQconst) 36061 v.AuxInt = 1 36062 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 36063 v1 := b.NewValue0(v.Pos, OpSelect0, t) 36064 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(types.UInt64, TypeFlags)) 36065 v2.AddArg(x) 36066 v1.AddArg(v2) 36067 v0.AddArg(v1) 36068 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 36069 v3.AuxInt = -1 36070 v0.AddArg(v3) 36071 v4 := b.NewValue0(v.Pos, OpSelect1, TypeFlags) 36072 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(types.UInt64, TypeFlags)) 36073 v5.AddArg(x) 36074 v4.AddArg(v5) 36075 v0.AddArg(v4) 36076 v.AddArg(v0) 36077 return true 36078 } 36079 } 36080 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 36081 // match: (Bswap32 x) 36082 // cond: 36083 // result: (BSWAPL x) 36084 for { 36085 x := v.Args[0] 36086 v.reset(OpAMD64BSWAPL) 36087 v.AddArg(x) 36088 return true 36089 } 36090 } 36091 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 36092 // match: (Bswap64 x) 36093 // cond: 36094 // result: (BSWAPQ x) 36095 for { 36096 x := v.Args[0] 36097 v.reset(OpAMD64BSWAPQ) 36098 v.AddArg(x) 36099 return true 36100 } 36101 } 36102 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 36103 // match: (ClosureCall [argwid] entry closure mem) 36104 // cond: 36105 // result: (CALLclosure [argwid] entry closure mem) 36106 for { 36107 argwid := v.AuxInt 36108 entry := v.Args[0] 36109 closure := v.Args[1] 36110 mem := v.Args[2] 36111 v.reset(OpAMD64CALLclosure) 36112 v.AuxInt = argwid 36113 v.AddArg(entry) 36114 v.AddArg(closure) 36115 v.AddArg(mem) 36116 return true 36117 } 36118 } 36119 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 36120 // match: (Com16 x) 36121 // cond: 36122 // result: (NOTL x) 36123 for { 36124 x := v.Args[0] 36125 v.reset(OpAMD64NOTL) 36126 v.AddArg(x) 36127 return true 36128 } 36129 } 36130 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 36131 // match: (Com32 x) 36132 // cond: 36133 // result: (NOTL x) 36134 for { 36135 x := v.Args[0] 36136 v.reset(OpAMD64NOTL) 36137 v.AddArg(x) 36138 return true 36139 } 36140 } 36141 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 36142 // match: (Com64 x) 36143 // cond: 36144 // result: (NOTQ x) 36145 for { 36146 x := v.Args[0] 36147 v.reset(OpAMD64NOTQ) 36148 v.AddArg(x) 36149 return true 36150 } 36151 } 36152 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 36153 // match: (Com8 x) 36154 // cond: 36155 // result: (NOTL x) 36156 for { 36157 x := v.Args[0] 36158 v.reset(OpAMD64NOTL) 36159 v.AddArg(x) 36160 return true 36161 } 36162 } 36163 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 36164 // match: (Const16 [val]) 36165 // cond: 36166 // result: (MOVLconst [val]) 36167 for { 36168 val := v.AuxInt 36169 v.reset(OpAMD64MOVLconst) 36170 v.AuxInt = val 36171 return true 36172 } 36173 } 36174 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 36175 // match: (Const32 [val]) 36176 // cond: 36177 // result: (MOVLconst [val]) 36178 for { 36179 val := v.AuxInt 36180 v.reset(OpAMD64MOVLconst) 36181 v.AuxInt = val 36182 return true 36183 } 36184 } 36185 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 36186 // match: (Const32F [val]) 36187 // cond: 36188 // result: (MOVSSconst [val]) 36189 for { 36190 val := v.AuxInt 36191 v.reset(OpAMD64MOVSSconst) 36192 v.AuxInt = val 36193 return true 36194 } 36195 } 36196 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 36197 // match: (Const64 [val]) 36198 // cond: 36199 // result: (MOVQconst [val]) 36200 for { 36201 val := v.AuxInt 36202 v.reset(OpAMD64MOVQconst) 36203 v.AuxInt = val 36204 return true 36205 } 36206 } 36207 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 36208 // match: (Const64F [val]) 36209 // cond: 36210 // result: (MOVSDconst [val]) 36211 for { 36212 val := v.AuxInt 36213 v.reset(OpAMD64MOVSDconst) 36214 v.AuxInt = val 36215 return true 36216 } 36217 } 36218 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 36219 // match: (Const8 [val]) 36220 // cond: 36221 // result: (MOVLconst [val]) 36222 for { 36223 val := v.AuxInt 36224 v.reset(OpAMD64MOVLconst) 36225 v.AuxInt = val 36226 return true 36227 } 36228 } 36229 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 36230 // match: (ConstBool [b]) 36231 // cond: 36232 // result: (MOVLconst [b]) 36233 for { 36234 b := v.AuxInt 36235 v.reset(OpAMD64MOVLconst) 36236 v.AuxInt = b 36237 return true 36238 } 36239 } 36240 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 36241 b := v.Block 36242 _ = b 36243 config := b.Func.Config 36244 _ = config 36245 // match: (ConstNil) 36246 // cond: config.PtrSize == 8 36247 // result: (MOVQconst [0]) 36248 for { 36249 if !(config.PtrSize == 8) { 36250 break 36251 } 36252 v.reset(OpAMD64MOVQconst) 36253 v.AuxInt = 0 36254 return true 36255 } 36256 // match: (ConstNil) 36257 // cond: config.PtrSize == 4 36258 // result: (MOVLconst [0]) 36259 for { 36260 if !(config.PtrSize == 4) { 36261 break 36262 } 36263 v.reset(OpAMD64MOVLconst) 36264 v.AuxInt = 0 36265 return true 36266 } 36267 return false 36268 } 36269 func rewriteValueAMD64_OpConvert_0(v *Value) bool { 36270 b := v.Block 36271 _ = b 36272 config := b.Func.Config 36273 _ = config 36274 // match: (Convert <t> x mem) 36275 // cond: config.PtrSize == 8 36276 // result: (MOVQconvert <t> x mem) 36277 for { 36278 t := v.Type 36279 x := v.Args[0] 36280 mem := v.Args[1] 36281 if !(config.PtrSize == 8) { 36282 break 36283 } 36284 v.reset(OpAMD64MOVQconvert) 36285 v.Type = t 36286 v.AddArg(x) 36287 v.AddArg(mem) 36288 return true 36289 } 36290 // match: (Convert <t> x mem) 36291 // cond: config.PtrSize == 4 36292 // result: (MOVLconvert <t> x mem) 36293 for { 36294 t := v.Type 36295 x := v.Args[0] 36296 mem := v.Args[1] 36297 if !(config.PtrSize == 4) { 36298 break 36299 } 36300 v.reset(OpAMD64MOVLconvert) 36301 v.Type = t 36302 v.AddArg(x) 36303 v.AddArg(mem) 36304 return true 36305 } 36306 return false 36307 } 36308 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 36309 b := v.Block 36310 _ = b 36311 types := &b.Func.Config.Types 36312 _ = types 36313 // match: (Ctz32 x) 36314 // cond: 36315 // result: (Select0 (BSFQ (ORQ <types.UInt64> (MOVQconst [1<<32]) x))) 36316 for { 36317 x := v.Args[0] 36318 v.reset(OpSelect0) 36319 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags)) 36320 v1 := b.NewValue0(v.Pos, OpAMD64ORQ, types.UInt64) 36321 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 36322 v2.AuxInt = 1 << 32 36323 v1.AddArg(v2) 36324 v1.AddArg(x) 36325 v0.AddArg(v1) 36326 v.AddArg(v0) 36327 return true 36328 } 36329 } 36330 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 36331 b := v.Block 36332 _ = b 36333 types := &b.Func.Config.Types 36334 _ = types 36335 // match: (Ctz64 <t> x) 36336 // cond: 36337 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x))) 36338 for { 36339 t := v.Type 36340 x := v.Args[0] 36341 v.reset(OpAMD64CMOVQEQ) 36342 v0 := b.NewValue0(v.Pos, OpSelect0, t) 36343 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags)) 36344 v1.AddArg(x) 36345 v0.AddArg(v1) 36346 v.AddArg(v0) 36347 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 36348 v2.AuxInt = 64 36349 v.AddArg(v2) 36350 v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags) 36351 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags)) 36352 v4.AddArg(x) 36353 v3.AddArg(v4) 36354 v.AddArg(v3) 36355 return true 36356 } 36357 } 36358 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 36359 // match: (Cvt32Fto32 x) 36360 // cond: 36361 // result: (CVTTSS2SL x) 36362 for { 36363 x := v.Args[0] 36364 v.reset(OpAMD64CVTTSS2SL) 36365 v.AddArg(x) 36366 return true 36367 } 36368 } 36369 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 36370 // match: (Cvt32Fto64 x) 36371 // cond: 36372 // result: (CVTTSS2SQ x) 36373 for { 36374 x := v.Args[0] 36375 v.reset(OpAMD64CVTTSS2SQ) 36376 v.AddArg(x) 36377 return true 36378 } 36379 } 36380 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 36381 // match: (Cvt32Fto64F x) 36382 // cond: 36383 // result: (CVTSS2SD x) 36384 for { 36385 x := v.Args[0] 36386 v.reset(OpAMD64CVTSS2SD) 36387 v.AddArg(x) 36388 return true 36389 } 36390 } 36391 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 36392 // match: (Cvt32to32F x) 36393 // cond: 36394 // result: (CVTSL2SS x) 36395 for { 36396 x := v.Args[0] 36397 v.reset(OpAMD64CVTSL2SS) 36398 v.AddArg(x) 36399 return true 36400 } 36401 } 36402 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 36403 // match: (Cvt32to64F x) 36404 // cond: 36405 // result: (CVTSL2SD x) 36406 for { 36407 x := v.Args[0] 36408 v.reset(OpAMD64CVTSL2SD) 36409 v.AddArg(x) 36410 return true 36411 } 36412 } 36413 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 36414 // match: (Cvt64Fto32 x) 36415 // cond: 36416 // result: (CVTTSD2SL x) 36417 for { 36418 x := v.Args[0] 36419 v.reset(OpAMD64CVTTSD2SL) 36420 v.AddArg(x) 36421 return true 36422 } 36423 } 36424 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 36425 // match: (Cvt64Fto32F x) 36426 // cond: 36427 // result: (CVTSD2SS x) 36428 for { 36429 x := v.Args[0] 36430 v.reset(OpAMD64CVTSD2SS) 36431 v.AddArg(x) 36432 return true 36433 } 36434 } 36435 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 36436 // match: (Cvt64Fto64 x) 36437 // cond: 36438 // result: (CVTTSD2SQ x) 36439 for { 36440 x := v.Args[0] 36441 v.reset(OpAMD64CVTTSD2SQ) 36442 v.AddArg(x) 36443 return true 36444 } 36445 } 36446 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 36447 // match: (Cvt64to32F x) 36448 // cond: 36449 // result: (CVTSQ2SS x) 36450 for { 36451 x := v.Args[0] 36452 v.reset(OpAMD64CVTSQ2SS) 36453 v.AddArg(x) 36454 return true 36455 } 36456 } 36457 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 36458 // match: (Cvt64to64F x) 36459 // cond: 36460 // result: (CVTSQ2SD x) 36461 for { 36462 x := v.Args[0] 36463 v.reset(OpAMD64CVTSQ2SD) 36464 v.AddArg(x) 36465 return true 36466 } 36467 } 36468 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 36469 // match: (Div128u xhi xlo y) 36470 // cond: 36471 // result: (DIVQU2 xhi xlo y) 36472 for { 36473 xhi := v.Args[0] 36474 xlo := v.Args[1] 36475 y := v.Args[2] 36476 v.reset(OpAMD64DIVQU2) 36477 v.AddArg(xhi) 36478 v.AddArg(xlo) 36479 v.AddArg(y) 36480 return true 36481 } 36482 } 36483 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 36484 b := v.Block 36485 _ = b 36486 types := &b.Func.Config.Types 36487 _ = types 36488 // match: (Div16 x y) 36489 // cond: 36490 // result: (Select0 (DIVW x y)) 36491 for { 36492 x := v.Args[0] 36493 y := v.Args[1] 36494 v.reset(OpSelect0) 36495 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16)) 36496 v0.AddArg(x) 36497 v0.AddArg(y) 36498 v.AddArg(v0) 36499 return true 36500 } 36501 } 36502 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 36503 b := v.Block 36504 _ = b 36505 types := &b.Func.Config.Types 36506 _ = types 36507 // match: (Div16u x y) 36508 // cond: 36509 // result: (Select0 (DIVWU x y)) 36510 for { 36511 x := v.Args[0] 36512 y := v.Args[1] 36513 v.reset(OpSelect0) 36514 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16)) 36515 v0.AddArg(x) 36516 v0.AddArg(y) 36517 v.AddArg(v0) 36518 return true 36519 } 36520 } 36521 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 36522 b := v.Block 36523 _ = b 36524 types := &b.Func.Config.Types 36525 _ = types 36526 // match: (Div32 x y) 36527 // cond: 36528 // result: (Select0 (DIVL x y)) 36529 for { 36530 x := v.Args[0] 36531 y := v.Args[1] 36532 v.reset(OpSelect0) 36533 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(types.Int32, types.Int32)) 36534 v0.AddArg(x) 36535 v0.AddArg(y) 36536 v.AddArg(v0) 36537 return true 36538 } 36539 } 36540 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 36541 // match: (Div32F x y) 36542 // cond: 36543 // result: (DIVSS x y) 36544 for { 36545 x := v.Args[0] 36546 y := v.Args[1] 36547 v.reset(OpAMD64DIVSS) 36548 v.AddArg(x) 36549 v.AddArg(y) 36550 return true 36551 } 36552 } 36553 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 36554 b := v.Block 36555 _ = b 36556 types := &b.Func.Config.Types 36557 _ = types 36558 // match: (Div32u x y) 36559 // cond: 36560 // result: (Select0 (DIVLU x y)) 36561 for { 36562 x := v.Args[0] 36563 y := v.Args[1] 36564 v.reset(OpSelect0) 36565 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(types.UInt32, types.UInt32)) 36566 v0.AddArg(x) 36567 v0.AddArg(y) 36568 v.AddArg(v0) 36569 return true 36570 } 36571 } 36572 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 36573 b := v.Block 36574 _ = b 36575 types := &b.Func.Config.Types 36576 _ = types 36577 // match: (Div64 x y) 36578 // cond: 36579 // result: (Select0 (DIVQ x y)) 36580 for { 36581 x := v.Args[0] 36582 y := v.Args[1] 36583 v.reset(OpSelect0) 36584 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(types.Int64, types.Int64)) 36585 v0.AddArg(x) 36586 v0.AddArg(y) 36587 v.AddArg(v0) 36588 return true 36589 } 36590 } 36591 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 36592 // match: (Div64F x y) 36593 // cond: 36594 // result: (DIVSD x y) 36595 for { 36596 x := v.Args[0] 36597 y := v.Args[1] 36598 v.reset(OpAMD64DIVSD) 36599 v.AddArg(x) 36600 v.AddArg(y) 36601 return true 36602 } 36603 } 36604 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 36605 b := v.Block 36606 _ = b 36607 types := &b.Func.Config.Types 36608 _ = types 36609 // match: (Div64u x y) 36610 // cond: 36611 // result: (Select0 (DIVQU x y)) 36612 for { 36613 x := v.Args[0] 36614 y := v.Args[1] 36615 v.reset(OpSelect0) 36616 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(types.UInt64, types.UInt64)) 36617 v0.AddArg(x) 36618 v0.AddArg(y) 36619 v.AddArg(v0) 36620 return true 36621 } 36622 } 36623 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 36624 b := v.Block 36625 _ = b 36626 types := &b.Func.Config.Types 36627 _ = types 36628 // match: (Div8 x y) 36629 // cond: 36630 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 36631 for { 36632 x := v.Args[0] 36633 y := v.Args[1] 36634 v.reset(OpSelect0) 36635 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16)) 36636 v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16) 36637 v1.AddArg(x) 36638 v0.AddArg(v1) 36639 v2 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16) 36640 v2.AddArg(y) 36641 v0.AddArg(v2) 36642 v.AddArg(v0) 36643 return true 36644 } 36645 } 36646 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 36647 b := v.Block 36648 _ = b 36649 types := &b.Func.Config.Types 36650 _ = types 36651 // match: (Div8u x y) 36652 // cond: 36653 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 36654 for { 36655 x := v.Args[0] 36656 y := v.Args[1] 36657 v.reset(OpSelect0) 36658 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16)) 36659 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16) 36660 v1.AddArg(x) 36661 v0.AddArg(v1) 36662 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16) 36663 v2.AddArg(y) 36664 v0.AddArg(v2) 36665 v.AddArg(v0) 36666 return true 36667 } 36668 } 36669 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 36670 b := v.Block 36671 _ = b 36672 // match: (Eq16 x y) 36673 // cond: 36674 // result: (SETEQ (CMPW x y)) 36675 for { 36676 x := v.Args[0] 36677 y := v.Args[1] 36678 v.reset(OpAMD64SETEQ) 36679 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 36680 v0.AddArg(x) 36681 v0.AddArg(y) 36682 v.AddArg(v0) 36683 return true 36684 } 36685 } 36686 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 36687 b := v.Block 36688 _ = b 36689 // match: (Eq32 x y) 36690 // cond: 36691 // result: (SETEQ (CMPL x y)) 36692 for { 36693 x := v.Args[0] 36694 y := v.Args[1] 36695 v.reset(OpAMD64SETEQ) 36696 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 36697 v0.AddArg(x) 36698 v0.AddArg(y) 36699 v.AddArg(v0) 36700 return true 36701 } 36702 } 36703 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 36704 b := v.Block 36705 _ = b 36706 // match: (Eq32F x y) 36707 // cond: 36708 // result: (SETEQF (UCOMISS x y)) 36709 for { 36710 x := v.Args[0] 36711 y := v.Args[1] 36712 v.reset(OpAMD64SETEQF) 36713 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 36714 v0.AddArg(x) 36715 v0.AddArg(y) 36716 v.AddArg(v0) 36717 return true 36718 } 36719 } 36720 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 36721 b := v.Block 36722 _ = b 36723 // match: (Eq64 x y) 36724 // cond: 36725 // result: (SETEQ (CMPQ x y)) 36726 for { 36727 x := v.Args[0] 36728 y := v.Args[1] 36729 v.reset(OpAMD64SETEQ) 36730 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 36731 v0.AddArg(x) 36732 v0.AddArg(y) 36733 v.AddArg(v0) 36734 return true 36735 } 36736 } 36737 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 36738 b := v.Block 36739 _ = b 36740 // match: (Eq64F x y) 36741 // cond: 36742 // result: (SETEQF (UCOMISD x y)) 36743 for { 36744 x := v.Args[0] 36745 y := v.Args[1] 36746 v.reset(OpAMD64SETEQF) 36747 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 36748 v0.AddArg(x) 36749 v0.AddArg(y) 36750 v.AddArg(v0) 36751 return true 36752 } 36753 } 36754 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 36755 b := v.Block 36756 _ = b 36757 // match: (Eq8 x y) 36758 // cond: 36759 // result: (SETEQ (CMPB x y)) 36760 for { 36761 x := v.Args[0] 36762 y := v.Args[1] 36763 v.reset(OpAMD64SETEQ) 36764 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 36765 v0.AddArg(x) 36766 v0.AddArg(y) 36767 v.AddArg(v0) 36768 return true 36769 } 36770 } 36771 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 36772 b := v.Block 36773 _ = b 36774 // match: (EqB x y) 36775 // cond: 36776 // result: (SETEQ (CMPB x y)) 36777 for { 36778 x := v.Args[0] 36779 y := v.Args[1] 36780 v.reset(OpAMD64SETEQ) 36781 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 36782 v0.AddArg(x) 36783 v0.AddArg(y) 36784 v.AddArg(v0) 36785 return true 36786 } 36787 } 36788 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 36789 b := v.Block 36790 _ = b 36791 config := b.Func.Config 36792 _ = config 36793 // match: (EqPtr x y) 36794 // cond: config.PtrSize == 8 36795 // result: (SETEQ (CMPQ x y)) 36796 for { 36797 x := v.Args[0] 36798 y := v.Args[1] 36799 if !(config.PtrSize == 8) { 36800 break 36801 } 36802 v.reset(OpAMD64SETEQ) 36803 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 36804 v0.AddArg(x) 36805 v0.AddArg(y) 36806 v.AddArg(v0) 36807 return true 36808 } 36809 // match: (EqPtr x y) 36810 // cond: config.PtrSize == 4 36811 // result: (SETEQ (CMPL x y)) 36812 for { 36813 x := v.Args[0] 36814 y := v.Args[1] 36815 if !(config.PtrSize == 4) { 36816 break 36817 } 36818 v.reset(OpAMD64SETEQ) 36819 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 36820 v0.AddArg(x) 36821 v0.AddArg(y) 36822 v.AddArg(v0) 36823 return true 36824 } 36825 return false 36826 } 36827 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 36828 b := v.Block 36829 _ = b 36830 // match: (Geq16 x y) 36831 // cond: 36832 // result: (SETGE (CMPW x y)) 36833 for { 36834 x := v.Args[0] 36835 y := v.Args[1] 36836 v.reset(OpAMD64SETGE) 36837 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 36838 v0.AddArg(x) 36839 v0.AddArg(y) 36840 v.AddArg(v0) 36841 return true 36842 } 36843 } 36844 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 36845 b := v.Block 36846 _ = b 36847 // match: (Geq16U x y) 36848 // cond: 36849 // result: (SETAE (CMPW x y)) 36850 for { 36851 x := v.Args[0] 36852 y := v.Args[1] 36853 v.reset(OpAMD64SETAE) 36854 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 36855 v0.AddArg(x) 36856 v0.AddArg(y) 36857 v.AddArg(v0) 36858 return true 36859 } 36860 } 36861 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 36862 b := v.Block 36863 _ = b 36864 // match: (Geq32 x y) 36865 // cond: 36866 // result: (SETGE (CMPL x y)) 36867 for { 36868 x := v.Args[0] 36869 y := v.Args[1] 36870 v.reset(OpAMD64SETGE) 36871 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 36872 v0.AddArg(x) 36873 v0.AddArg(y) 36874 v.AddArg(v0) 36875 return true 36876 } 36877 } 36878 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 36879 b := v.Block 36880 _ = b 36881 // match: (Geq32F x y) 36882 // cond: 36883 // result: (SETGEF (UCOMISS x y)) 36884 for { 36885 x := v.Args[0] 36886 y := v.Args[1] 36887 v.reset(OpAMD64SETGEF) 36888 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 36889 v0.AddArg(x) 36890 v0.AddArg(y) 36891 v.AddArg(v0) 36892 return true 36893 } 36894 } 36895 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 36896 b := v.Block 36897 _ = b 36898 // match: (Geq32U x y) 36899 // cond: 36900 // result: (SETAE (CMPL x y)) 36901 for { 36902 x := v.Args[0] 36903 y := v.Args[1] 36904 v.reset(OpAMD64SETAE) 36905 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 36906 v0.AddArg(x) 36907 v0.AddArg(y) 36908 v.AddArg(v0) 36909 return true 36910 } 36911 } 36912 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 36913 b := v.Block 36914 _ = b 36915 // match: (Geq64 x y) 36916 // cond: 36917 // result: (SETGE (CMPQ x y)) 36918 for { 36919 x := v.Args[0] 36920 y := v.Args[1] 36921 v.reset(OpAMD64SETGE) 36922 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 36923 v0.AddArg(x) 36924 v0.AddArg(y) 36925 v.AddArg(v0) 36926 return true 36927 } 36928 } 36929 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 36930 b := v.Block 36931 _ = b 36932 // match: (Geq64F x y) 36933 // cond: 36934 // result: (SETGEF (UCOMISD x y)) 36935 for { 36936 x := v.Args[0] 36937 y := v.Args[1] 36938 v.reset(OpAMD64SETGEF) 36939 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 36940 v0.AddArg(x) 36941 v0.AddArg(y) 36942 v.AddArg(v0) 36943 return true 36944 } 36945 } 36946 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 36947 b := v.Block 36948 _ = b 36949 // match: (Geq64U x y) 36950 // cond: 36951 // result: (SETAE (CMPQ x y)) 36952 for { 36953 x := v.Args[0] 36954 y := v.Args[1] 36955 v.reset(OpAMD64SETAE) 36956 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 36957 v0.AddArg(x) 36958 v0.AddArg(y) 36959 v.AddArg(v0) 36960 return true 36961 } 36962 } 36963 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 36964 b := v.Block 36965 _ = b 36966 // match: (Geq8 x y) 36967 // cond: 36968 // result: (SETGE (CMPB x y)) 36969 for { 36970 x := v.Args[0] 36971 y := v.Args[1] 36972 v.reset(OpAMD64SETGE) 36973 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 36974 v0.AddArg(x) 36975 v0.AddArg(y) 36976 v.AddArg(v0) 36977 return true 36978 } 36979 } 36980 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 36981 b := v.Block 36982 _ = b 36983 // match: (Geq8U x y) 36984 // cond: 36985 // result: (SETAE (CMPB x y)) 36986 for { 36987 x := v.Args[0] 36988 y := v.Args[1] 36989 v.reset(OpAMD64SETAE) 36990 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 36991 v0.AddArg(x) 36992 v0.AddArg(y) 36993 v.AddArg(v0) 36994 return true 36995 } 36996 } 36997 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 36998 // match: (GetClosurePtr) 36999 // cond: 37000 // result: (LoweredGetClosurePtr) 37001 for { 37002 v.reset(OpAMD64LoweredGetClosurePtr) 37003 return true 37004 } 37005 } 37006 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 37007 // match: (GetG mem) 37008 // cond: 37009 // result: (LoweredGetG mem) 37010 for { 37011 mem := v.Args[0] 37012 v.reset(OpAMD64LoweredGetG) 37013 v.AddArg(mem) 37014 return true 37015 } 37016 } 37017 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 37018 b := v.Block 37019 _ = b 37020 // match: (Greater16 x y) 37021 // cond: 37022 // result: (SETG (CMPW x y)) 37023 for { 37024 x := v.Args[0] 37025 y := v.Args[1] 37026 v.reset(OpAMD64SETG) 37027 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 37028 v0.AddArg(x) 37029 v0.AddArg(y) 37030 v.AddArg(v0) 37031 return true 37032 } 37033 } 37034 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 37035 b := v.Block 37036 _ = b 37037 // match: (Greater16U x y) 37038 // cond: 37039 // result: (SETA (CMPW x y)) 37040 for { 37041 x := v.Args[0] 37042 y := v.Args[1] 37043 v.reset(OpAMD64SETA) 37044 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 37045 v0.AddArg(x) 37046 v0.AddArg(y) 37047 v.AddArg(v0) 37048 return true 37049 } 37050 } 37051 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 37052 b := v.Block 37053 _ = b 37054 // match: (Greater32 x y) 37055 // cond: 37056 // result: (SETG (CMPL x y)) 37057 for { 37058 x := v.Args[0] 37059 y := v.Args[1] 37060 v.reset(OpAMD64SETG) 37061 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 37062 v0.AddArg(x) 37063 v0.AddArg(y) 37064 v.AddArg(v0) 37065 return true 37066 } 37067 } 37068 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 37069 b := v.Block 37070 _ = b 37071 // match: (Greater32F x y) 37072 // cond: 37073 // result: (SETGF (UCOMISS x y)) 37074 for { 37075 x := v.Args[0] 37076 y := v.Args[1] 37077 v.reset(OpAMD64SETGF) 37078 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 37079 v0.AddArg(x) 37080 v0.AddArg(y) 37081 v.AddArg(v0) 37082 return true 37083 } 37084 } 37085 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 37086 b := v.Block 37087 _ = b 37088 // match: (Greater32U x y) 37089 // cond: 37090 // result: (SETA (CMPL x y)) 37091 for { 37092 x := v.Args[0] 37093 y := v.Args[1] 37094 v.reset(OpAMD64SETA) 37095 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 37096 v0.AddArg(x) 37097 v0.AddArg(y) 37098 v.AddArg(v0) 37099 return true 37100 } 37101 } 37102 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 37103 b := v.Block 37104 _ = b 37105 // match: (Greater64 x y) 37106 // cond: 37107 // result: (SETG (CMPQ x y)) 37108 for { 37109 x := v.Args[0] 37110 y := v.Args[1] 37111 v.reset(OpAMD64SETG) 37112 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 37113 v0.AddArg(x) 37114 v0.AddArg(y) 37115 v.AddArg(v0) 37116 return true 37117 } 37118 } 37119 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 37120 b := v.Block 37121 _ = b 37122 // match: (Greater64F x y) 37123 // cond: 37124 // result: (SETGF (UCOMISD x y)) 37125 for { 37126 x := v.Args[0] 37127 y := v.Args[1] 37128 v.reset(OpAMD64SETGF) 37129 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 37130 v0.AddArg(x) 37131 v0.AddArg(y) 37132 v.AddArg(v0) 37133 return true 37134 } 37135 } 37136 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 37137 b := v.Block 37138 _ = b 37139 // match: (Greater64U x y) 37140 // cond: 37141 // result: (SETA (CMPQ x y)) 37142 for { 37143 x := v.Args[0] 37144 y := v.Args[1] 37145 v.reset(OpAMD64SETA) 37146 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 37147 v0.AddArg(x) 37148 v0.AddArg(y) 37149 v.AddArg(v0) 37150 return true 37151 } 37152 } 37153 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 37154 b := v.Block 37155 _ = b 37156 // match: (Greater8 x y) 37157 // cond: 37158 // result: (SETG (CMPB x y)) 37159 for { 37160 x := v.Args[0] 37161 y := v.Args[1] 37162 v.reset(OpAMD64SETG) 37163 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 37164 v0.AddArg(x) 37165 v0.AddArg(y) 37166 v.AddArg(v0) 37167 return true 37168 } 37169 } 37170 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 37171 b := v.Block 37172 _ = b 37173 // match: (Greater8U x y) 37174 // cond: 37175 // result: (SETA (CMPB x y)) 37176 for { 37177 x := v.Args[0] 37178 y := v.Args[1] 37179 v.reset(OpAMD64SETA) 37180 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 37181 v0.AddArg(x) 37182 v0.AddArg(y) 37183 v.AddArg(v0) 37184 return true 37185 } 37186 } 37187 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 37188 // match: (Hmul32 x y) 37189 // cond: 37190 // result: (HMULL x y) 37191 for { 37192 x := v.Args[0] 37193 y := v.Args[1] 37194 v.reset(OpAMD64HMULL) 37195 v.AddArg(x) 37196 v.AddArg(y) 37197 return true 37198 } 37199 } 37200 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 37201 // match: (Hmul32u x y) 37202 // cond: 37203 // result: (HMULLU x y) 37204 for { 37205 x := v.Args[0] 37206 y := v.Args[1] 37207 v.reset(OpAMD64HMULLU) 37208 v.AddArg(x) 37209 v.AddArg(y) 37210 return true 37211 } 37212 } 37213 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 37214 // match: (Hmul64 x y) 37215 // cond: 37216 // result: (HMULQ x y) 37217 for { 37218 x := v.Args[0] 37219 y := v.Args[1] 37220 v.reset(OpAMD64HMULQ) 37221 v.AddArg(x) 37222 v.AddArg(y) 37223 return true 37224 } 37225 } 37226 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 37227 // match: (Hmul64u x y) 37228 // cond: 37229 // result: (HMULQU x y) 37230 for { 37231 x := v.Args[0] 37232 y := v.Args[1] 37233 v.reset(OpAMD64HMULQU) 37234 v.AddArg(x) 37235 v.AddArg(y) 37236 return true 37237 } 37238 } 37239 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool { 37240 // match: (Int64Hi x) 37241 // cond: 37242 // result: (SHRQconst [32] x) 37243 for { 37244 x := v.Args[0] 37245 v.reset(OpAMD64SHRQconst) 37246 v.AuxInt = 32 37247 v.AddArg(x) 37248 return true 37249 } 37250 } 37251 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 37252 // match: (InterCall [argwid] entry mem) 37253 // cond: 37254 // result: (CALLinter [argwid] entry mem) 37255 for { 37256 argwid := v.AuxInt 37257 entry := v.Args[0] 37258 mem := v.Args[1] 37259 v.reset(OpAMD64CALLinter) 37260 v.AuxInt = argwid 37261 v.AddArg(entry) 37262 v.AddArg(mem) 37263 return true 37264 } 37265 } 37266 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 37267 b := v.Block 37268 _ = b 37269 // match: (IsInBounds idx len) 37270 // cond: 37271 // result: (SETB (CMPQ idx len)) 37272 for { 37273 idx := v.Args[0] 37274 len := v.Args[1] 37275 v.reset(OpAMD64SETB) 37276 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 37277 v0.AddArg(idx) 37278 v0.AddArg(len) 37279 v.AddArg(v0) 37280 return true 37281 } 37282 } 37283 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 37284 b := v.Block 37285 _ = b 37286 config := b.Func.Config 37287 _ = config 37288 // match: (IsNonNil p) 37289 // cond: config.PtrSize == 8 37290 // result: (SETNE (TESTQ p p)) 37291 for { 37292 p := v.Args[0] 37293 if !(config.PtrSize == 8) { 37294 break 37295 } 37296 v.reset(OpAMD64SETNE) 37297 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, TypeFlags) 37298 v0.AddArg(p) 37299 v0.AddArg(p) 37300 v.AddArg(v0) 37301 return true 37302 } 37303 // match: (IsNonNil p) 37304 // cond: config.PtrSize == 4 37305 // result: (SETNE (TESTL p p)) 37306 for { 37307 p := v.Args[0] 37308 if !(config.PtrSize == 4) { 37309 break 37310 } 37311 v.reset(OpAMD64SETNE) 37312 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, TypeFlags) 37313 v0.AddArg(p) 37314 v0.AddArg(p) 37315 v.AddArg(v0) 37316 return true 37317 } 37318 return false 37319 } 37320 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 37321 b := v.Block 37322 _ = b 37323 // match: (IsSliceInBounds idx len) 37324 // cond: 37325 // result: (SETBE (CMPQ idx len)) 37326 for { 37327 idx := v.Args[0] 37328 len := v.Args[1] 37329 v.reset(OpAMD64SETBE) 37330 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 37331 v0.AddArg(idx) 37332 v0.AddArg(len) 37333 v.AddArg(v0) 37334 return true 37335 } 37336 } 37337 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 37338 b := v.Block 37339 _ = b 37340 // match: (Leq16 x y) 37341 // cond: 37342 // result: (SETLE (CMPW x y)) 37343 for { 37344 x := v.Args[0] 37345 y := v.Args[1] 37346 v.reset(OpAMD64SETLE) 37347 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 37348 v0.AddArg(x) 37349 v0.AddArg(y) 37350 v.AddArg(v0) 37351 return true 37352 } 37353 } 37354 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 37355 b := v.Block 37356 _ = b 37357 // match: (Leq16U x y) 37358 // cond: 37359 // result: (SETBE (CMPW x y)) 37360 for { 37361 x := v.Args[0] 37362 y := v.Args[1] 37363 v.reset(OpAMD64SETBE) 37364 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 37365 v0.AddArg(x) 37366 v0.AddArg(y) 37367 v.AddArg(v0) 37368 return true 37369 } 37370 } 37371 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 37372 b := v.Block 37373 _ = b 37374 // match: (Leq32 x y) 37375 // cond: 37376 // result: (SETLE (CMPL x y)) 37377 for { 37378 x := v.Args[0] 37379 y := v.Args[1] 37380 v.reset(OpAMD64SETLE) 37381 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 37382 v0.AddArg(x) 37383 v0.AddArg(y) 37384 v.AddArg(v0) 37385 return true 37386 } 37387 } 37388 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 37389 b := v.Block 37390 _ = b 37391 // match: (Leq32F x y) 37392 // cond: 37393 // result: (SETGEF (UCOMISS y x)) 37394 for { 37395 x := v.Args[0] 37396 y := v.Args[1] 37397 v.reset(OpAMD64SETGEF) 37398 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 37399 v0.AddArg(y) 37400 v0.AddArg(x) 37401 v.AddArg(v0) 37402 return true 37403 } 37404 } 37405 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 37406 b := v.Block 37407 _ = b 37408 // match: (Leq32U x y) 37409 // cond: 37410 // result: (SETBE (CMPL x y)) 37411 for { 37412 x := v.Args[0] 37413 y := v.Args[1] 37414 v.reset(OpAMD64SETBE) 37415 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 37416 v0.AddArg(x) 37417 v0.AddArg(y) 37418 v.AddArg(v0) 37419 return true 37420 } 37421 } 37422 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 37423 b := v.Block 37424 _ = b 37425 // match: (Leq64 x y) 37426 // cond: 37427 // result: (SETLE (CMPQ x y)) 37428 for { 37429 x := v.Args[0] 37430 y := v.Args[1] 37431 v.reset(OpAMD64SETLE) 37432 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 37433 v0.AddArg(x) 37434 v0.AddArg(y) 37435 v.AddArg(v0) 37436 return true 37437 } 37438 } 37439 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 37440 b := v.Block 37441 _ = b 37442 // match: (Leq64F x y) 37443 // cond: 37444 // result: (SETGEF (UCOMISD y x)) 37445 for { 37446 x := v.Args[0] 37447 y := v.Args[1] 37448 v.reset(OpAMD64SETGEF) 37449 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 37450 v0.AddArg(y) 37451 v0.AddArg(x) 37452 v.AddArg(v0) 37453 return true 37454 } 37455 } 37456 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 37457 b := v.Block 37458 _ = b 37459 // match: (Leq64U x y) 37460 // cond: 37461 // result: (SETBE (CMPQ x y)) 37462 for { 37463 x := v.Args[0] 37464 y := v.Args[1] 37465 v.reset(OpAMD64SETBE) 37466 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 37467 v0.AddArg(x) 37468 v0.AddArg(y) 37469 v.AddArg(v0) 37470 return true 37471 } 37472 } 37473 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 37474 b := v.Block 37475 _ = b 37476 // match: (Leq8 x y) 37477 // cond: 37478 // result: (SETLE (CMPB x y)) 37479 for { 37480 x := v.Args[0] 37481 y := v.Args[1] 37482 v.reset(OpAMD64SETLE) 37483 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 37484 v0.AddArg(x) 37485 v0.AddArg(y) 37486 v.AddArg(v0) 37487 return true 37488 } 37489 } 37490 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 37491 b := v.Block 37492 _ = b 37493 // match: (Leq8U x y) 37494 // cond: 37495 // result: (SETBE (CMPB x y)) 37496 for { 37497 x := v.Args[0] 37498 y := v.Args[1] 37499 v.reset(OpAMD64SETBE) 37500 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 37501 v0.AddArg(x) 37502 v0.AddArg(y) 37503 v.AddArg(v0) 37504 return true 37505 } 37506 } 37507 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 37508 b := v.Block 37509 _ = b 37510 // match: (Less16 x y) 37511 // cond: 37512 // result: (SETL (CMPW x y)) 37513 for { 37514 x := v.Args[0] 37515 y := v.Args[1] 37516 v.reset(OpAMD64SETL) 37517 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 37518 v0.AddArg(x) 37519 v0.AddArg(y) 37520 v.AddArg(v0) 37521 return true 37522 } 37523 } 37524 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 37525 b := v.Block 37526 _ = b 37527 // match: (Less16U x y) 37528 // cond: 37529 // result: (SETB (CMPW x y)) 37530 for { 37531 x := v.Args[0] 37532 y := v.Args[1] 37533 v.reset(OpAMD64SETB) 37534 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 37535 v0.AddArg(x) 37536 v0.AddArg(y) 37537 v.AddArg(v0) 37538 return true 37539 } 37540 } 37541 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 37542 b := v.Block 37543 _ = b 37544 // match: (Less32 x y) 37545 // cond: 37546 // result: (SETL (CMPL x y)) 37547 for { 37548 x := v.Args[0] 37549 y := v.Args[1] 37550 v.reset(OpAMD64SETL) 37551 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 37552 v0.AddArg(x) 37553 v0.AddArg(y) 37554 v.AddArg(v0) 37555 return true 37556 } 37557 } 37558 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 37559 b := v.Block 37560 _ = b 37561 // match: (Less32F x y) 37562 // cond: 37563 // result: (SETGF (UCOMISS y x)) 37564 for { 37565 x := v.Args[0] 37566 y := v.Args[1] 37567 v.reset(OpAMD64SETGF) 37568 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 37569 v0.AddArg(y) 37570 v0.AddArg(x) 37571 v.AddArg(v0) 37572 return true 37573 } 37574 } 37575 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 37576 b := v.Block 37577 _ = b 37578 // match: (Less32U x y) 37579 // cond: 37580 // result: (SETB (CMPL x y)) 37581 for { 37582 x := v.Args[0] 37583 y := v.Args[1] 37584 v.reset(OpAMD64SETB) 37585 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 37586 v0.AddArg(x) 37587 v0.AddArg(y) 37588 v.AddArg(v0) 37589 return true 37590 } 37591 } 37592 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 37593 b := v.Block 37594 _ = b 37595 // match: (Less64 x y) 37596 // cond: 37597 // result: (SETL (CMPQ x y)) 37598 for { 37599 x := v.Args[0] 37600 y := v.Args[1] 37601 v.reset(OpAMD64SETL) 37602 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 37603 v0.AddArg(x) 37604 v0.AddArg(y) 37605 v.AddArg(v0) 37606 return true 37607 } 37608 } 37609 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 37610 b := v.Block 37611 _ = b 37612 // match: (Less64F x y) 37613 // cond: 37614 // result: (SETGF (UCOMISD y x)) 37615 for { 37616 x := v.Args[0] 37617 y := v.Args[1] 37618 v.reset(OpAMD64SETGF) 37619 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 37620 v0.AddArg(y) 37621 v0.AddArg(x) 37622 v.AddArg(v0) 37623 return true 37624 } 37625 } 37626 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 37627 b := v.Block 37628 _ = b 37629 // match: (Less64U x y) 37630 // cond: 37631 // result: (SETB (CMPQ x y)) 37632 for { 37633 x := v.Args[0] 37634 y := v.Args[1] 37635 v.reset(OpAMD64SETB) 37636 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 37637 v0.AddArg(x) 37638 v0.AddArg(y) 37639 v.AddArg(v0) 37640 return true 37641 } 37642 } 37643 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 37644 b := v.Block 37645 _ = b 37646 // match: (Less8 x y) 37647 // cond: 37648 // result: (SETL (CMPB x y)) 37649 for { 37650 x := v.Args[0] 37651 y := v.Args[1] 37652 v.reset(OpAMD64SETL) 37653 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 37654 v0.AddArg(x) 37655 v0.AddArg(y) 37656 v.AddArg(v0) 37657 return true 37658 } 37659 } 37660 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 37661 b := v.Block 37662 _ = b 37663 // match: (Less8U x y) 37664 // cond: 37665 // result: (SETB (CMPB x y)) 37666 for { 37667 x := v.Args[0] 37668 y := v.Args[1] 37669 v.reset(OpAMD64SETB) 37670 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 37671 v0.AddArg(x) 37672 v0.AddArg(y) 37673 v.AddArg(v0) 37674 return true 37675 } 37676 } 37677 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 37678 b := v.Block 37679 _ = b 37680 config := b.Func.Config 37681 _ = config 37682 // match: (Load <t> ptr mem) 37683 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 37684 // result: (MOVQload ptr mem) 37685 for { 37686 t := v.Type 37687 ptr := v.Args[0] 37688 mem := v.Args[1] 37689 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 37690 break 37691 } 37692 v.reset(OpAMD64MOVQload) 37693 v.AddArg(ptr) 37694 v.AddArg(mem) 37695 return true 37696 } 37697 // match: (Load <t> ptr mem) 37698 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 37699 // result: (MOVLload ptr mem) 37700 for { 37701 t := v.Type 37702 ptr := v.Args[0] 37703 mem := v.Args[1] 37704 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 37705 break 37706 } 37707 v.reset(OpAMD64MOVLload) 37708 v.AddArg(ptr) 37709 v.AddArg(mem) 37710 return true 37711 } 37712 // match: (Load <t> ptr mem) 37713 // cond: is16BitInt(t) 37714 // result: (MOVWload ptr mem) 37715 for { 37716 t := v.Type 37717 ptr := v.Args[0] 37718 mem := v.Args[1] 37719 if !(is16BitInt(t)) { 37720 break 37721 } 37722 v.reset(OpAMD64MOVWload) 37723 v.AddArg(ptr) 37724 v.AddArg(mem) 37725 return true 37726 } 37727 // match: (Load <t> ptr mem) 37728 // cond: (t.IsBoolean() || is8BitInt(t)) 37729 // result: (MOVBload ptr mem) 37730 for { 37731 t := v.Type 37732 ptr := v.Args[0] 37733 mem := v.Args[1] 37734 if !(t.IsBoolean() || is8BitInt(t)) { 37735 break 37736 } 37737 v.reset(OpAMD64MOVBload) 37738 v.AddArg(ptr) 37739 v.AddArg(mem) 37740 return true 37741 } 37742 // match: (Load <t> ptr mem) 37743 // cond: is32BitFloat(t) 37744 // result: (MOVSSload ptr mem) 37745 for { 37746 t := v.Type 37747 ptr := v.Args[0] 37748 mem := v.Args[1] 37749 if !(is32BitFloat(t)) { 37750 break 37751 } 37752 v.reset(OpAMD64MOVSSload) 37753 v.AddArg(ptr) 37754 v.AddArg(mem) 37755 return true 37756 } 37757 // match: (Load <t> ptr mem) 37758 // cond: is64BitFloat(t) 37759 // result: (MOVSDload ptr mem) 37760 for { 37761 t := v.Type 37762 ptr := v.Args[0] 37763 mem := v.Args[1] 37764 if !(is64BitFloat(t)) { 37765 break 37766 } 37767 v.reset(OpAMD64MOVSDload) 37768 v.AddArg(ptr) 37769 v.AddArg(mem) 37770 return true 37771 } 37772 return false 37773 } 37774 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 37775 b := v.Block 37776 _ = b 37777 // match: (Lsh16x16 <t> x y) 37778 // cond: 37779 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 37780 for { 37781 t := v.Type 37782 x := v.Args[0] 37783 y := v.Args[1] 37784 v.reset(OpAMD64ANDL) 37785 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 37786 v0.AddArg(x) 37787 v0.AddArg(y) 37788 v.AddArg(v0) 37789 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 37790 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 37791 v2.AuxInt = 32 37792 v2.AddArg(y) 37793 v1.AddArg(v2) 37794 v.AddArg(v1) 37795 return true 37796 } 37797 } 37798 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 37799 b := v.Block 37800 _ = b 37801 // match: (Lsh16x32 <t> x y) 37802 // cond: 37803 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 37804 for { 37805 t := v.Type 37806 x := v.Args[0] 37807 y := v.Args[1] 37808 v.reset(OpAMD64ANDL) 37809 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 37810 v0.AddArg(x) 37811 v0.AddArg(y) 37812 v.AddArg(v0) 37813 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 37814 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 37815 v2.AuxInt = 32 37816 v2.AddArg(y) 37817 v1.AddArg(v2) 37818 v.AddArg(v1) 37819 return true 37820 } 37821 } 37822 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 37823 b := v.Block 37824 _ = b 37825 // match: (Lsh16x64 <t> x y) 37826 // cond: 37827 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 37828 for { 37829 t := v.Type 37830 x := v.Args[0] 37831 y := v.Args[1] 37832 v.reset(OpAMD64ANDL) 37833 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 37834 v0.AddArg(x) 37835 v0.AddArg(y) 37836 v.AddArg(v0) 37837 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 37838 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 37839 v2.AuxInt = 32 37840 v2.AddArg(y) 37841 v1.AddArg(v2) 37842 v.AddArg(v1) 37843 return true 37844 } 37845 } 37846 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 37847 b := v.Block 37848 _ = b 37849 // match: (Lsh16x8 <t> x y) 37850 // cond: 37851 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 37852 for { 37853 t := v.Type 37854 x := v.Args[0] 37855 y := v.Args[1] 37856 v.reset(OpAMD64ANDL) 37857 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 37858 v0.AddArg(x) 37859 v0.AddArg(y) 37860 v.AddArg(v0) 37861 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 37862 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 37863 v2.AuxInt = 32 37864 v2.AddArg(y) 37865 v1.AddArg(v2) 37866 v.AddArg(v1) 37867 return true 37868 } 37869 } 37870 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 37871 b := v.Block 37872 _ = b 37873 // match: (Lsh32x16 <t> x y) 37874 // cond: 37875 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 37876 for { 37877 t := v.Type 37878 x := v.Args[0] 37879 y := v.Args[1] 37880 v.reset(OpAMD64ANDL) 37881 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 37882 v0.AddArg(x) 37883 v0.AddArg(y) 37884 v.AddArg(v0) 37885 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 37886 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 37887 v2.AuxInt = 32 37888 v2.AddArg(y) 37889 v1.AddArg(v2) 37890 v.AddArg(v1) 37891 return true 37892 } 37893 } 37894 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 37895 b := v.Block 37896 _ = b 37897 // match: (Lsh32x32 <t> x y) 37898 // cond: 37899 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 37900 for { 37901 t := v.Type 37902 x := v.Args[0] 37903 y := v.Args[1] 37904 v.reset(OpAMD64ANDL) 37905 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 37906 v0.AddArg(x) 37907 v0.AddArg(y) 37908 v.AddArg(v0) 37909 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 37910 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 37911 v2.AuxInt = 32 37912 v2.AddArg(y) 37913 v1.AddArg(v2) 37914 v.AddArg(v1) 37915 return true 37916 } 37917 } 37918 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 37919 b := v.Block 37920 _ = b 37921 // match: (Lsh32x64 <t> x y) 37922 // cond: 37923 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 37924 for { 37925 t := v.Type 37926 x := v.Args[0] 37927 y := v.Args[1] 37928 v.reset(OpAMD64ANDL) 37929 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 37930 v0.AddArg(x) 37931 v0.AddArg(y) 37932 v.AddArg(v0) 37933 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 37934 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 37935 v2.AuxInt = 32 37936 v2.AddArg(y) 37937 v1.AddArg(v2) 37938 v.AddArg(v1) 37939 return true 37940 } 37941 } 37942 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 37943 b := v.Block 37944 _ = b 37945 // match: (Lsh32x8 <t> x y) 37946 // cond: 37947 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 37948 for { 37949 t := v.Type 37950 x := v.Args[0] 37951 y := v.Args[1] 37952 v.reset(OpAMD64ANDL) 37953 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 37954 v0.AddArg(x) 37955 v0.AddArg(y) 37956 v.AddArg(v0) 37957 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 37958 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 37959 v2.AuxInt = 32 37960 v2.AddArg(y) 37961 v1.AddArg(v2) 37962 v.AddArg(v1) 37963 return true 37964 } 37965 } 37966 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 37967 b := v.Block 37968 _ = b 37969 // match: (Lsh64x16 <t> x y) 37970 // cond: 37971 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 37972 for { 37973 t := v.Type 37974 x := v.Args[0] 37975 y := v.Args[1] 37976 v.reset(OpAMD64ANDQ) 37977 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 37978 v0.AddArg(x) 37979 v0.AddArg(y) 37980 v.AddArg(v0) 37981 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 37982 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 37983 v2.AuxInt = 64 37984 v2.AddArg(y) 37985 v1.AddArg(v2) 37986 v.AddArg(v1) 37987 return true 37988 } 37989 } 37990 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 37991 b := v.Block 37992 _ = b 37993 // match: (Lsh64x32 <t> x y) 37994 // cond: 37995 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 37996 for { 37997 t := v.Type 37998 x := v.Args[0] 37999 y := v.Args[1] 38000 v.reset(OpAMD64ANDQ) 38001 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 38002 v0.AddArg(x) 38003 v0.AddArg(y) 38004 v.AddArg(v0) 38005 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 38006 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 38007 v2.AuxInt = 64 38008 v2.AddArg(y) 38009 v1.AddArg(v2) 38010 v.AddArg(v1) 38011 return true 38012 } 38013 } 38014 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 38015 b := v.Block 38016 _ = b 38017 // match: (Lsh64x64 <t> x y) 38018 // cond: 38019 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 38020 for { 38021 t := v.Type 38022 x := v.Args[0] 38023 y := v.Args[1] 38024 v.reset(OpAMD64ANDQ) 38025 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 38026 v0.AddArg(x) 38027 v0.AddArg(y) 38028 v.AddArg(v0) 38029 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 38030 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 38031 v2.AuxInt = 64 38032 v2.AddArg(y) 38033 v1.AddArg(v2) 38034 v.AddArg(v1) 38035 return true 38036 } 38037 } 38038 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 38039 b := v.Block 38040 _ = b 38041 // match: (Lsh64x8 <t> x y) 38042 // cond: 38043 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 38044 for { 38045 t := v.Type 38046 x := v.Args[0] 38047 y := v.Args[1] 38048 v.reset(OpAMD64ANDQ) 38049 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 38050 v0.AddArg(x) 38051 v0.AddArg(y) 38052 v.AddArg(v0) 38053 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 38054 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 38055 v2.AuxInt = 64 38056 v2.AddArg(y) 38057 v1.AddArg(v2) 38058 v.AddArg(v1) 38059 return true 38060 } 38061 } 38062 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 38063 b := v.Block 38064 _ = b 38065 // match: (Lsh8x16 <t> x y) 38066 // cond: 38067 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 38068 for { 38069 t := v.Type 38070 x := v.Args[0] 38071 y := v.Args[1] 38072 v.reset(OpAMD64ANDL) 38073 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 38074 v0.AddArg(x) 38075 v0.AddArg(y) 38076 v.AddArg(v0) 38077 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 38078 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 38079 v2.AuxInt = 32 38080 v2.AddArg(y) 38081 v1.AddArg(v2) 38082 v.AddArg(v1) 38083 return true 38084 } 38085 } 38086 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 38087 b := v.Block 38088 _ = b 38089 // match: (Lsh8x32 <t> x y) 38090 // cond: 38091 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 38092 for { 38093 t := v.Type 38094 x := v.Args[0] 38095 y := v.Args[1] 38096 v.reset(OpAMD64ANDL) 38097 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 38098 v0.AddArg(x) 38099 v0.AddArg(y) 38100 v.AddArg(v0) 38101 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 38102 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 38103 v2.AuxInt = 32 38104 v2.AddArg(y) 38105 v1.AddArg(v2) 38106 v.AddArg(v1) 38107 return true 38108 } 38109 } 38110 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 38111 b := v.Block 38112 _ = b 38113 // match: (Lsh8x64 <t> x y) 38114 // cond: 38115 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 38116 for { 38117 t := v.Type 38118 x := v.Args[0] 38119 y := v.Args[1] 38120 v.reset(OpAMD64ANDL) 38121 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 38122 v0.AddArg(x) 38123 v0.AddArg(y) 38124 v.AddArg(v0) 38125 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 38126 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 38127 v2.AuxInt = 32 38128 v2.AddArg(y) 38129 v1.AddArg(v2) 38130 v.AddArg(v1) 38131 return true 38132 } 38133 } 38134 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 38135 b := v.Block 38136 _ = b 38137 // match: (Lsh8x8 <t> x y) 38138 // cond: 38139 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 38140 for { 38141 t := v.Type 38142 x := v.Args[0] 38143 y := v.Args[1] 38144 v.reset(OpAMD64ANDL) 38145 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 38146 v0.AddArg(x) 38147 v0.AddArg(y) 38148 v.AddArg(v0) 38149 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 38150 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 38151 v2.AuxInt = 32 38152 v2.AddArg(y) 38153 v1.AddArg(v2) 38154 v.AddArg(v1) 38155 return true 38156 } 38157 } 38158 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 38159 b := v.Block 38160 _ = b 38161 types := &b.Func.Config.Types 38162 _ = types 38163 // match: (Mod16 x y) 38164 // cond: 38165 // result: (Select1 (DIVW x y)) 38166 for { 38167 x := v.Args[0] 38168 y := v.Args[1] 38169 v.reset(OpSelect1) 38170 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16)) 38171 v0.AddArg(x) 38172 v0.AddArg(y) 38173 v.AddArg(v0) 38174 return true 38175 } 38176 } 38177 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 38178 b := v.Block 38179 _ = b 38180 types := &b.Func.Config.Types 38181 _ = types 38182 // match: (Mod16u x y) 38183 // cond: 38184 // result: (Select1 (DIVWU x y)) 38185 for { 38186 x := v.Args[0] 38187 y := v.Args[1] 38188 v.reset(OpSelect1) 38189 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16)) 38190 v0.AddArg(x) 38191 v0.AddArg(y) 38192 v.AddArg(v0) 38193 return true 38194 } 38195 } 38196 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 38197 b := v.Block 38198 _ = b 38199 types := &b.Func.Config.Types 38200 _ = types 38201 // match: (Mod32 x y) 38202 // cond: 38203 // result: (Select1 (DIVL x y)) 38204 for { 38205 x := v.Args[0] 38206 y := v.Args[1] 38207 v.reset(OpSelect1) 38208 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(types.Int32, types.Int32)) 38209 v0.AddArg(x) 38210 v0.AddArg(y) 38211 v.AddArg(v0) 38212 return true 38213 } 38214 } 38215 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 38216 b := v.Block 38217 _ = b 38218 types := &b.Func.Config.Types 38219 _ = types 38220 // match: (Mod32u x y) 38221 // cond: 38222 // result: (Select1 (DIVLU x y)) 38223 for { 38224 x := v.Args[0] 38225 y := v.Args[1] 38226 v.reset(OpSelect1) 38227 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(types.UInt32, types.UInt32)) 38228 v0.AddArg(x) 38229 v0.AddArg(y) 38230 v.AddArg(v0) 38231 return true 38232 } 38233 } 38234 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 38235 b := v.Block 38236 _ = b 38237 types := &b.Func.Config.Types 38238 _ = types 38239 // match: (Mod64 x y) 38240 // cond: 38241 // result: (Select1 (DIVQ x y)) 38242 for { 38243 x := v.Args[0] 38244 y := v.Args[1] 38245 v.reset(OpSelect1) 38246 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(types.Int64, types.Int64)) 38247 v0.AddArg(x) 38248 v0.AddArg(y) 38249 v.AddArg(v0) 38250 return true 38251 } 38252 } 38253 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 38254 b := v.Block 38255 _ = b 38256 types := &b.Func.Config.Types 38257 _ = types 38258 // match: (Mod64u x y) 38259 // cond: 38260 // result: (Select1 (DIVQU x y)) 38261 for { 38262 x := v.Args[0] 38263 y := v.Args[1] 38264 v.reset(OpSelect1) 38265 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(types.UInt64, types.UInt64)) 38266 v0.AddArg(x) 38267 v0.AddArg(y) 38268 v.AddArg(v0) 38269 return true 38270 } 38271 } 38272 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 38273 b := v.Block 38274 _ = b 38275 types := &b.Func.Config.Types 38276 _ = types 38277 // match: (Mod8 x y) 38278 // cond: 38279 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 38280 for { 38281 x := v.Args[0] 38282 y := v.Args[1] 38283 v.reset(OpSelect1) 38284 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16)) 38285 v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16) 38286 v1.AddArg(x) 38287 v0.AddArg(v1) 38288 v2 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16) 38289 v2.AddArg(y) 38290 v0.AddArg(v2) 38291 v.AddArg(v0) 38292 return true 38293 } 38294 } 38295 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 38296 b := v.Block 38297 _ = b 38298 types := &b.Func.Config.Types 38299 _ = types 38300 // match: (Mod8u x y) 38301 // cond: 38302 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 38303 for { 38304 x := v.Args[0] 38305 y := v.Args[1] 38306 v.reset(OpSelect1) 38307 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16)) 38308 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16) 38309 v1.AddArg(x) 38310 v0.AddArg(v1) 38311 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16) 38312 v2.AddArg(y) 38313 v0.AddArg(v2) 38314 v.AddArg(v0) 38315 return true 38316 } 38317 } 38318 func rewriteValueAMD64_OpMove_0(v *Value) bool { 38319 b := v.Block 38320 _ = b 38321 types := &b.Func.Config.Types 38322 _ = types 38323 // match: (Move [0] _ _ mem) 38324 // cond: 38325 // result: mem 38326 for { 38327 if v.AuxInt != 0 { 38328 break 38329 } 38330 mem := v.Args[2] 38331 v.reset(OpCopy) 38332 v.Type = mem.Type 38333 v.AddArg(mem) 38334 return true 38335 } 38336 // match: (Move [1] dst src mem) 38337 // cond: 38338 // result: (MOVBstore dst (MOVBload src mem) mem) 38339 for { 38340 if v.AuxInt != 1 { 38341 break 38342 } 38343 dst := v.Args[0] 38344 src := v.Args[1] 38345 mem := v.Args[2] 38346 v.reset(OpAMD64MOVBstore) 38347 v.AddArg(dst) 38348 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8) 38349 v0.AddArg(src) 38350 v0.AddArg(mem) 38351 v.AddArg(v0) 38352 v.AddArg(mem) 38353 return true 38354 } 38355 // match: (Move [2] dst src mem) 38356 // cond: 38357 // result: (MOVWstore dst (MOVWload src mem) mem) 38358 for { 38359 if v.AuxInt != 2 { 38360 break 38361 } 38362 dst := v.Args[0] 38363 src := v.Args[1] 38364 mem := v.Args[2] 38365 v.reset(OpAMD64MOVWstore) 38366 v.AddArg(dst) 38367 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 38368 v0.AddArg(src) 38369 v0.AddArg(mem) 38370 v.AddArg(v0) 38371 v.AddArg(mem) 38372 return true 38373 } 38374 // match: (Move [4] dst src mem) 38375 // cond: 38376 // result: (MOVLstore dst (MOVLload src mem) mem) 38377 for { 38378 if v.AuxInt != 4 { 38379 break 38380 } 38381 dst := v.Args[0] 38382 src := v.Args[1] 38383 mem := v.Args[2] 38384 v.reset(OpAMD64MOVLstore) 38385 v.AddArg(dst) 38386 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 38387 v0.AddArg(src) 38388 v0.AddArg(mem) 38389 v.AddArg(v0) 38390 v.AddArg(mem) 38391 return true 38392 } 38393 // match: (Move [8] dst src mem) 38394 // cond: 38395 // result: (MOVQstore dst (MOVQload src mem) mem) 38396 for { 38397 if v.AuxInt != 8 { 38398 break 38399 } 38400 dst := v.Args[0] 38401 src := v.Args[1] 38402 mem := v.Args[2] 38403 v.reset(OpAMD64MOVQstore) 38404 v.AddArg(dst) 38405 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 38406 v0.AddArg(src) 38407 v0.AddArg(mem) 38408 v.AddArg(v0) 38409 v.AddArg(mem) 38410 return true 38411 } 38412 // match: (Move [16] dst src mem) 38413 // cond: 38414 // result: (MOVOstore dst (MOVOload src mem) mem) 38415 for { 38416 if v.AuxInt != 16 { 38417 break 38418 } 38419 dst := v.Args[0] 38420 src := v.Args[1] 38421 mem := v.Args[2] 38422 v.reset(OpAMD64MOVOstore) 38423 v.AddArg(dst) 38424 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, TypeInt128) 38425 v0.AddArg(src) 38426 v0.AddArg(mem) 38427 v.AddArg(v0) 38428 v.AddArg(mem) 38429 return true 38430 } 38431 // match: (Move [3] dst src mem) 38432 // cond: 38433 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 38434 for { 38435 if v.AuxInt != 3 { 38436 break 38437 } 38438 dst := v.Args[0] 38439 src := v.Args[1] 38440 mem := v.Args[2] 38441 v.reset(OpAMD64MOVBstore) 38442 v.AuxInt = 2 38443 v.AddArg(dst) 38444 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8) 38445 v0.AuxInt = 2 38446 v0.AddArg(src) 38447 v0.AddArg(mem) 38448 v.AddArg(v0) 38449 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, TypeMem) 38450 v1.AddArg(dst) 38451 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 38452 v2.AddArg(src) 38453 v2.AddArg(mem) 38454 v1.AddArg(v2) 38455 v1.AddArg(mem) 38456 v.AddArg(v1) 38457 return true 38458 } 38459 // match: (Move [5] dst src mem) 38460 // cond: 38461 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 38462 for { 38463 if v.AuxInt != 5 { 38464 break 38465 } 38466 dst := v.Args[0] 38467 src := v.Args[1] 38468 mem := v.Args[2] 38469 v.reset(OpAMD64MOVBstore) 38470 v.AuxInt = 4 38471 v.AddArg(dst) 38472 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8) 38473 v0.AuxInt = 4 38474 v0.AddArg(src) 38475 v0.AddArg(mem) 38476 v.AddArg(v0) 38477 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 38478 v1.AddArg(dst) 38479 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 38480 v2.AddArg(src) 38481 v2.AddArg(mem) 38482 v1.AddArg(v2) 38483 v1.AddArg(mem) 38484 v.AddArg(v1) 38485 return true 38486 } 38487 // match: (Move [6] dst src mem) 38488 // cond: 38489 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 38490 for { 38491 if v.AuxInt != 6 { 38492 break 38493 } 38494 dst := v.Args[0] 38495 src := v.Args[1] 38496 mem := v.Args[2] 38497 v.reset(OpAMD64MOVWstore) 38498 v.AuxInt = 4 38499 v.AddArg(dst) 38500 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) 38501 v0.AuxInt = 4 38502 v0.AddArg(src) 38503 v0.AddArg(mem) 38504 v.AddArg(v0) 38505 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 38506 v1.AddArg(dst) 38507 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 38508 v2.AddArg(src) 38509 v2.AddArg(mem) 38510 v1.AddArg(v2) 38511 v1.AddArg(mem) 38512 v.AddArg(v1) 38513 return true 38514 } 38515 // match: (Move [7] dst src mem) 38516 // cond: 38517 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 38518 for { 38519 if v.AuxInt != 7 { 38520 break 38521 } 38522 dst := v.Args[0] 38523 src := v.Args[1] 38524 mem := v.Args[2] 38525 v.reset(OpAMD64MOVLstore) 38526 v.AuxInt = 3 38527 v.AddArg(dst) 38528 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 38529 v0.AuxInt = 3 38530 v0.AddArg(src) 38531 v0.AddArg(mem) 38532 v.AddArg(v0) 38533 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 38534 v1.AddArg(dst) 38535 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) 38536 v2.AddArg(src) 38537 v2.AddArg(mem) 38538 v1.AddArg(v2) 38539 v1.AddArg(mem) 38540 v.AddArg(v1) 38541 return true 38542 } 38543 return false 38544 } 38545 func rewriteValueAMD64_OpMove_10(v *Value) bool { 38546 b := v.Block 38547 _ = b 38548 config := b.Func.Config 38549 _ = config 38550 types := &b.Func.Config.Types 38551 _ = types 38552 // match: (Move [s] dst src mem) 38553 // cond: s > 8 && s < 16 38554 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 38555 for { 38556 s := v.AuxInt 38557 dst := v.Args[0] 38558 src := v.Args[1] 38559 mem := v.Args[2] 38560 if !(s > 8 && s < 16) { 38561 break 38562 } 38563 v.reset(OpAMD64MOVQstore) 38564 v.AuxInt = s - 8 38565 v.AddArg(dst) 38566 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 38567 v0.AuxInt = s - 8 38568 v0.AddArg(src) 38569 v0.AddArg(mem) 38570 v.AddArg(v0) 38571 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 38572 v1.AddArg(dst) 38573 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 38574 v2.AddArg(src) 38575 v2.AddArg(mem) 38576 v1.AddArg(v2) 38577 v1.AddArg(mem) 38578 v.AddArg(v1) 38579 return true 38580 } 38581 // match: (Move [s] dst src mem) 38582 // cond: s > 16 && s%16 != 0 && s%16 <= 8 38583 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 38584 for { 38585 s := v.AuxInt 38586 dst := v.Args[0] 38587 src := v.Args[1] 38588 mem := v.Args[2] 38589 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 38590 break 38591 } 38592 v.reset(OpMove) 38593 v.AuxInt = s - s%16 38594 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 38595 v0.AuxInt = s % 16 38596 v0.AddArg(dst) 38597 v.AddArg(v0) 38598 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 38599 v1.AuxInt = s % 16 38600 v1.AddArg(src) 38601 v.AddArg(v1) 38602 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 38603 v2.AddArg(dst) 38604 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) 38605 v3.AddArg(src) 38606 v3.AddArg(mem) 38607 v2.AddArg(v3) 38608 v2.AddArg(mem) 38609 v.AddArg(v2) 38610 return true 38611 } 38612 // match: (Move [s] dst src mem) 38613 // cond: s > 16 && s%16 != 0 && s%16 > 8 38614 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 38615 for { 38616 s := v.AuxInt 38617 dst := v.Args[0] 38618 src := v.Args[1] 38619 mem := v.Args[2] 38620 if !(s > 16 && s%16 != 0 && s%16 > 8) { 38621 break 38622 } 38623 v.reset(OpMove) 38624 v.AuxInt = s - s%16 38625 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 38626 v0.AuxInt = s % 16 38627 v0.AddArg(dst) 38628 v.AddArg(v0) 38629 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 38630 v1.AuxInt = s % 16 38631 v1.AddArg(src) 38632 v.AddArg(v1) 38633 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, TypeMem) 38634 v2.AddArg(dst) 38635 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, TypeInt128) 38636 v3.AddArg(src) 38637 v3.AddArg(mem) 38638 v2.AddArg(v3) 38639 v2.AddArg(mem) 38640 v.AddArg(v2) 38641 return true 38642 } 38643 // match: (Move [s] dst src mem) 38644 // cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 38645 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 38646 for { 38647 s := v.AuxInt 38648 dst := v.Args[0] 38649 src := v.Args[1] 38650 mem := v.Args[2] 38651 if !(s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 38652 break 38653 } 38654 v.reset(OpAMD64DUFFCOPY) 38655 v.AuxInt = 14 * (64 - s/16) 38656 v.AddArg(dst) 38657 v.AddArg(src) 38658 v.AddArg(mem) 38659 return true 38660 } 38661 // match: (Move [s] dst src mem) 38662 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 38663 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 38664 for { 38665 s := v.AuxInt 38666 dst := v.Args[0] 38667 src := v.Args[1] 38668 mem := v.Args[2] 38669 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 38670 break 38671 } 38672 v.reset(OpAMD64REPMOVSQ) 38673 v.AddArg(dst) 38674 v.AddArg(src) 38675 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 38676 v0.AuxInt = s / 8 38677 v.AddArg(v0) 38678 v.AddArg(mem) 38679 return true 38680 } 38681 return false 38682 } 38683 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 38684 // match: (Mul16 x y) 38685 // cond: 38686 // result: (MULL x y) 38687 for { 38688 x := v.Args[0] 38689 y := v.Args[1] 38690 v.reset(OpAMD64MULL) 38691 v.AddArg(x) 38692 v.AddArg(y) 38693 return true 38694 } 38695 } 38696 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 38697 // match: (Mul32 x y) 38698 // cond: 38699 // result: (MULL x y) 38700 for { 38701 x := v.Args[0] 38702 y := v.Args[1] 38703 v.reset(OpAMD64MULL) 38704 v.AddArg(x) 38705 v.AddArg(y) 38706 return true 38707 } 38708 } 38709 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 38710 // match: (Mul32F x y) 38711 // cond: 38712 // result: (MULSS x y) 38713 for { 38714 x := v.Args[0] 38715 y := v.Args[1] 38716 v.reset(OpAMD64MULSS) 38717 v.AddArg(x) 38718 v.AddArg(y) 38719 return true 38720 } 38721 } 38722 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 38723 // match: (Mul64 x y) 38724 // cond: 38725 // result: (MULQ x y) 38726 for { 38727 x := v.Args[0] 38728 y := v.Args[1] 38729 v.reset(OpAMD64MULQ) 38730 v.AddArg(x) 38731 v.AddArg(y) 38732 return true 38733 } 38734 } 38735 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 38736 // match: (Mul64F x y) 38737 // cond: 38738 // result: (MULSD x y) 38739 for { 38740 x := v.Args[0] 38741 y := v.Args[1] 38742 v.reset(OpAMD64MULSD) 38743 v.AddArg(x) 38744 v.AddArg(y) 38745 return true 38746 } 38747 } 38748 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 38749 // match: (Mul64uhilo x y) 38750 // cond: 38751 // result: (MULQU2 x y) 38752 for { 38753 x := v.Args[0] 38754 y := v.Args[1] 38755 v.reset(OpAMD64MULQU2) 38756 v.AddArg(x) 38757 v.AddArg(y) 38758 return true 38759 } 38760 } 38761 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 38762 // match: (Mul8 x y) 38763 // cond: 38764 // result: (MULL x y) 38765 for { 38766 x := v.Args[0] 38767 y := v.Args[1] 38768 v.reset(OpAMD64MULL) 38769 v.AddArg(x) 38770 v.AddArg(y) 38771 return true 38772 } 38773 } 38774 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 38775 // match: (Neg16 x) 38776 // cond: 38777 // result: (NEGL x) 38778 for { 38779 x := v.Args[0] 38780 v.reset(OpAMD64NEGL) 38781 v.AddArg(x) 38782 return true 38783 } 38784 } 38785 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 38786 // match: (Neg32 x) 38787 // cond: 38788 // result: (NEGL x) 38789 for { 38790 x := v.Args[0] 38791 v.reset(OpAMD64NEGL) 38792 v.AddArg(x) 38793 return true 38794 } 38795 } 38796 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 38797 b := v.Block 38798 _ = b 38799 types := &b.Func.Config.Types 38800 _ = types 38801 // match: (Neg32F x) 38802 // cond: 38803 // result: (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))])) 38804 for { 38805 x := v.Args[0] 38806 v.reset(OpAMD64PXOR) 38807 v.AddArg(x) 38808 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, types.Float32) 38809 v0.AuxInt = f2i(math.Copysign(0, -1)) 38810 v.AddArg(v0) 38811 return true 38812 } 38813 } 38814 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 38815 // match: (Neg64 x) 38816 // cond: 38817 // result: (NEGQ x) 38818 for { 38819 x := v.Args[0] 38820 v.reset(OpAMD64NEGQ) 38821 v.AddArg(x) 38822 return true 38823 } 38824 } 38825 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 38826 b := v.Block 38827 _ = b 38828 types := &b.Func.Config.Types 38829 _ = types 38830 // match: (Neg64F x) 38831 // cond: 38832 // result: (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))])) 38833 for { 38834 x := v.Args[0] 38835 v.reset(OpAMD64PXOR) 38836 v.AddArg(x) 38837 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, types.Float64) 38838 v0.AuxInt = f2i(math.Copysign(0, -1)) 38839 v.AddArg(v0) 38840 return true 38841 } 38842 } 38843 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 38844 // match: (Neg8 x) 38845 // cond: 38846 // result: (NEGL x) 38847 for { 38848 x := v.Args[0] 38849 v.reset(OpAMD64NEGL) 38850 v.AddArg(x) 38851 return true 38852 } 38853 } 38854 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 38855 b := v.Block 38856 _ = b 38857 // match: (Neq16 x y) 38858 // cond: 38859 // result: (SETNE (CMPW x y)) 38860 for { 38861 x := v.Args[0] 38862 y := v.Args[1] 38863 v.reset(OpAMD64SETNE) 38864 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 38865 v0.AddArg(x) 38866 v0.AddArg(y) 38867 v.AddArg(v0) 38868 return true 38869 } 38870 } 38871 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 38872 b := v.Block 38873 _ = b 38874 // match: (Neq32 x y) 38875 // cond: 38876 // result: (SETNE (CMPL x y)) 38877 for { 38878 x := v.Args[0] 38879 y := v.Args[1] 38880 v.reset(OpAMD64SETNE) 38881 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 38882 v0.AddArg(x) 38883 v0.AddArg(y) 38884 v.AddArg(v0) 38885 return true 38886 } 38887 } 38888 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 38889 b := v.Block 38890 _ = b 38891 // match: (Neq32F x y) 38892 // cond: 38893 // result: (SETNEF (UCOMISS x y)) 38894 for { 38895 x := v.Args[0] 38896 y := v.Args[1] 38897 v.reset(OpAMD64SETNEF) 38898 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 38899 v0.AddArg(x) 38900 v0.AddArg(y) 38901 v.AddArg(v0) 38902 return true 38903 } 38904 } 38905 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 38906 b := v.Block 38907 _ = b 38908 // match: (Neq64 x y) 38909 // cond: 38910 // result: (SETNE (CMPQ x y)) 38911 for { 38912 x := v.Args[0] 38913 y := v.Args[1] 38914 v.reset(OpAMD64SETNE) 38915 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 38916 v0.AddArg(x) 38917 v0.AddArg(y) 38918 v.AddArg(v0) 38919 return true 38920 } 38921 } 38922 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 38923 b := v.Block 38924 _ = b 38925 // match: (Neq64F x y) 38926 // cond: 38927 // result: (SETNEF (UCOMISD x y)) 38928 for { 38929 x := v.Args[0] 38930 y := v.Args[1] 38931 v.reset(OpAMD64SETNEF) 38932 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 38933 v0.AddArg(x) 38934 v0.AddArg(y) 38935 v.AddArg(v0) 38936 return true 38937 } 38938 } 38939 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 38940 b := v.Block 38941 _ = b 38942 // match: (Neq8 x y) 38943 // cond: 38944 // result: (SETNE (CMPB x y)) 38945 for { 38946 x := v.Args[0] 38947 y := v.Args[1] 38948 v.reset(OpAMD64SETNE) 38949 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 38950 v0.AddArg(x) 38951 v0.AddArg(y) 38952 v.AddArg(v0) 38953 return true 38954 } 38955 } 38956 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 38957 b := v.Block 38958 _ = b 38959 // match: (NeqB x y) 38960 // cond: 38961 // result: (SETNE (CMPB x y)) 38962 for { 38963 x := v.Args[0] 38964 y := v.Args[1] 38965 v.reset(OpAMD64SETNE) 38966 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 38967 v0.AddArg(x) 38968 v0.AddArg(y) 38969 v.AddArg(v0) 38970 return true 38971 } 38972 } 38973 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 38974 b := v.Block 38975 _ = b 38976 config := b.Func.Config 38977 _ = config 38978 // match: (NeqPtr x y) 38979 // cond: config.PtrSize == 8 38980 // result: (SETNE (CMPQ x y)) 38981 for { 38982 x := v.Args[0] 38983 y := v.Args[1] 38984 if !(config.PtrSize == 8) { 38985 break 38986 } 38987 v.reset(OpAMD64SETNE) 38988 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 38989 v0.AddArg(x) 38990 v0.AddArg(y) 38991 v.AddArg(v0) 38992 return true 38993 } 38994 // match: (NeqPtr x y) 38995 // cond: config.PtrSize == 4 38996 // result: (SETNE (CMPL x y)) 38997 for { 38998 x := v.Args[0] 38999 y := v.Args[1] 39000 if !(config.PtrSize == 4) { 39001 break 39002 } 39003 v.reset(OpAMD64SETNE) 39004 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 39005 v0.AddArg(x) 39006 v0.AddArg(y) 39007 v.AddArg(v0) 39008 return true 39009 } 39010 return false 39011 } 39012 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 39013 // match: (NilCheck ptr mem) 39014 // cond: 39015 // result: (LoweredNilCheck ptr mem) 39016 for { 39017 ptr := v.Args[0] 39018 mem := v.Args[1] 39019 v.reset(OpAMD64LoweredNilCheck) 39020 v.AddArg(ptr) 39021 v.AddArg(mem) 39022 return true 39023 } 39024 } 39025 func rewriteValueAMD64_OpNot_0(v *Value) bool { 39026 // match: (Not x) 39027 // cond: 39028 // result: (XORLconst [1] x) 39029 for { 39030 x := v.Args[0] 39031 v.reset(OpAMD64XORLconst) 39032 v.AuxInt = 1 39033 v.AddArg(x) 39034 return true 39035 } 39036 } 39037 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 39038 b := v.Block 39039 _ = b 39040 config := b.Func.Config 39041 _ = config 39042 types := &b.Func.Config.Types 39043 _ = types 39044 // match: (OffPtr [off] ptr) 39045 // cond: config.PtrSize == 8 && is32Bit(off) 39046 // result: (ADDQconst [off] ptr) 39047 for { 39048 off := v.AuxInt 39049 ptr := v.Args[0] 39050 if !(config.PtrSize == 8 && is32Bit(off)) { 39051 break 39052 } 39053 v.reset(OpAMD64ADDQconst) 39054 v.AuxInt = off 39055 v.AddArg(ptr) 39056 return true 39057 } 39058 // match: (OffPtr [off] ptr) 39059 // cond: config.PtrSize == 8 39060 // result: (ADDQ (MOVQconst [off]) ptr) 39061 for { 39062 off := v.AuxInt 39063 ptr := v.Args[0] 39064 if !(config.PtrSize == 8) { 39065 break 39066 } 39067 v.reset(OpAMD64ADDQ) 39068 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 39069 v0.AuxInt = off 39070 v.AddArg(v0) 39071 v.AddArg(ptr) 39072 return true 39073 } 39074 // match: (OffPtr [off] ptr) 39075 // cond: config.PtrSize == 4 39076 // result: (ADDLconst [off] ptr) 39077 for { 39078 off := v.AuxInt 39079 ptr := v.Args[0] 39080 if !(config.PtrSize == 4) { 39081 break 39082 } 39083 v.reset(OpAMD64ADDLconst) 39084 v.AuxInt = off 39085 v.AddArg(ptr) 39086 return true 39087 } 39088 return false 39089 } 39090 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 39091 // match: (Or16 x y) 39092 // cond: 39093 // result: (ORL x y) 39094 for { 39095 x := v.Args[0] 39096 y := v.Args[1] 39097 v.reset(OpAMD64ORL) 39098 v.AddArg(x) 39099 v.AddArg(y) 39100 return true 39101 } 39102 } 39103 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 39104 // match: (Or32 x y) 39105 // cond: 39106 // result: (ORL x y) 39107 for { 39108 x := v.Args[0] 39109 y := v.Args[1] 39110 v.reset(OpAMD64ORL) 39111 v.AddArg(x) 39112 v.AddArg(y) 39113 return true 39114 } 39115 } 39116 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 39117 // match: (Or64 x y) 39118 // cond: 39119 // result: (ORQ x y) 39120 for { 39121 x := v.Args[0] 39122 y := v.Args[1] 39123 v.reset(OpAMD64ORQ) 39124 v.AddArg(x) 39125 v.AddArg(y) 39126 return true 39127 } 39128 } 39129 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 39130 // match: (Or8 x y) 39131 // cond: 39132 // result: (ORL x y) 39133 for { 39134 x := v.Args[0] 39135 y := v.Args[1] 39136 v.reset(OpAMD64ORL) 39137 v.AddArg(x) 39138 v.AddArg(y) 39139 return true 39140 } 39141 } 39142 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 39143 // match: (OrB x y) 39144 // cond: 39145 // result: (ORL x y) 39146 for { 39147 x := v.Args[0] 39148 y := v.Args[1] 39149 v.reset(OpAMD64ORL) 39150 v.AddArg(x) 39151 v.AddArg(y) 39152 return true 39153 } 39154 } 39155 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 39156 b := v.Block 39157 _ = b 39158 types := &b.Func.Config.Types 39159 _ = types 39160 // match: (PopCount16 x) 39161 // cond: 39162 // result: (POPCNTL (MOVWQZX <types.UInt32> x)) 39163 for { 39164 x := v.Args[0] 39165 v.reset(OpAMD64POPCNTL) 39166 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, types.UInt32) 39167 v0.AddArg(x) 39168 v.AddArg(v0) 39169 return true 39170 } 39171 } 39172 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 39173 // match: (PopCount32 x) 39174 // cond: 39175 // result: (POPCNTL x) 39176 for { 39177 x := v.Args[0] 39178 v.reset(OpAMD64POPCNTL) 39179 v.AddArg(x) 39180 return true 39181 } 39182 } 39183 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 39184 // match: (PopCount64 x) 39185 // cond: 39186 // result: (POPCNTQ x) 39187 for { 39188 x := v.Args[0] 39189 v.reset(OpAMD64POPCNTQ) 39190 v.AddArg(x) 39191 return true 39192 } 39193 } 39194 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 39195 b := v.Block 39196 _ = b 39197 types := &b.Func.Config.Types 39198 _ = types 39199 // match: (PopCount8 x) 39200 // cond: 39201 // result: (POPCNTL (MOVBQZX <types.UInt32> x)) 39202 for { 39203 x := v.Args[0] 39204 v.reset(OpAMD64POPCNTL) 39205 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, types.UInt32) 39206 v0.AddArg(x) 39207 v.AddArg(v0) 39208 return true 39209 } 39210 } 39211 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 39212 // match: (Round32F x) 39213 // cond: 39214 // result: x 39215 for { 39216 x := v.Args[0] 39217 v.reset(OpCopy) 39218 v.Type = x.Type 39219 v.AddArg(x) 39220 return true 39221 } 39222 } 39223 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 39224 // match: (Round64F x) 39225 // cond: 39226 // result: x 39227 for { 39228 x := v.Args[0] 39229 v.reset(OpCopy) 39230 v.Type = x.Type 39231 v.AddArg(x) 39232 return true 39233 } 39234 } 39235 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 39236 b := v.Block 39237 _ = b 39238 // match: (Rsh16Ux16 <t> x y) 39239 // cond: 39240 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 39241 for { 39242 t := v.Type 39243 x := v.Args[0] 39244 y := v.Args[1] 39245 v.reset(OpAMD64ANDL) 39246 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 39247 v0.AddArg(x) 39248 v0.AddArg(y) 39249 v.AddArg(v0) 39250 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39251 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 39252 v2.AuxInt = 16 39253 v2.AddArg(y) 39254 v1.AddArg(v2) 39255 v.AddArg(v1) 39256 return true 39257 } 39258 } 39259 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 39260 b := v.Block 39261 _ = b 39262 // match: (Rsh16Ux32 <t> x y) 39263 // cond: 39264 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 39265 for { 39266 t := v.Type 39267 x := v.Args[0] 39268 y := v.Args[1] 39269 v.reset(OpAMD64ANDL) 39270 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 39271 v0.AddArg(x) 39272 v0.AddArg(y) 39273 v.AddArg(v0) 39274 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39275 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 39276 v2.AuxInt = 16 39277 v2.AddArg(y) 39278 v1.AddArg(v2) 39279 v.AddArg(v1) 39280 return true 39281 } 39282 } 39283 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 39284 b := v.Block 39285 _ = b 39286 // match: (Rsh16Ux64 <t> x y) 39287 // cond: 39288 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 39289 for { 39290 t := v.Type 39291 x := v.Args[0] 39292 y := v.Args[1] 39293 v.reset(OpAMD64ANDL) 39294 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 39295 v0.AddArg(x) 39296 v0.AddArg(y) 39297 v.AddArg(v0) 39298 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39299 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 39300 v2.AuxInt = 16 39301 v2.AddArg(y) 39302 v1.AddArg(v2) 39303 v.AddArg(v1) 39304 return true 39305 } 39306 } 39307 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 39308 b := v.Block 39309 _ = b 39310 // match: (Rsh16Ux8 <t> x y) 39311 // cond: 39312 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 39313 for { 39314 t := v.Type 39315 x := v.Args[0] 39316 y := v.Args[1] 39317 v.reset(OpAMD64ANDL) 39318 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 39319 v0.AddArg(x) 39320 v0.AddArg(y) 39321 v.AddArg(v0) 39322 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39323 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 39324 v2.AuxInt = 16 39325 v2.AddArg(y) 39326 v1.AddArg(v2) 39327 v.AddArg(v1) 39328 return true 39329 } 39330 } 39331 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 39332 b := v.Block 39333 _ = b 39334 // match: (Rsh16x16 <t> x y) 39335 // cond: 39336 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 39337 for { 39338 t := v.Type 39339 x := v.Args[0] 39340 y := v.Args[1] 39341 v.reset(OpAMD64SARW) 39342 v.Type = t 39343 v.AddArg(x) 39344 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 39345 v0.AddArg(y) 39346 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 39347 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 39348 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 39349 v3.AuxInt = 16 39350 v3.AddArg(y) 39351 v2.AddArg(v3) 39352 v1.AddArg(v2) 39353 v0.AddArg(v1) 39354 v.AddArg(v0) 39355 return true 39356 } 39357 } 39358 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 39359 b := v.Block 39360 _ = b 39361 // match: (Rsh16x32 <t> x y) 39362 // cond: 39363 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 39364 for { 39365 t := v.Type 39366 x := v.Args[0] 39367 y := v.Args[1] 39368 v.reset(OpAMD64SARW) 39369 v.Type = t 39370 v.AddArg(x) 39371 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 39372 v0.AddArg(y) 39373 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 39374 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 39375 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 39376 v3.AuxInt = 16 39377 v3.AddArg(y) 39378 v2.AddArg(v3) 39379 v1.AddArg(v2) 39380 v0.AddArg(v1) 39381 v.AddArg(v0) 39382 return true 39383 } 39384 } 39385 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 39386 b := v.Block 39387 _ = b 39388 // match: (Rsh16x64 <t> x y) 39389 // cond: 39390 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 39391 for { 39392 t := v.Type 39393 x := v.Args[0] 39394 y := v.Args[1] 39395 v.reset(OpAMD64SARW) 39396 v.Type = t 39397 v.AddArg(x) 39398 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 39399 v0.AddArg(y) 39400 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 39401 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 39402 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 39403 v3.AuxInt = 16 39404 v3.AddArg(y) 39405 v2.AddArg(v3) 39406 v1.AddArg(v2) 39407 v0.AddArg(v1) 39408 v.AddArg(v0) 39409 return true 39410 } 39411 } 39412 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 39413 b := v.Block 39414 _ = b 39415 // match: (Rsh16x8 <t> x y) 39416 // cond: 39417 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 39418 for { 39419 t := v.Type 39420 x := v.Args[0] 39421 y := v.Args[1] 39422 v.reset(OpAMD64SARW) 39423 v.Type = t 39424 v.AddArg(x) 39425 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 39426 v0.AddArg(y) 39427 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 39428 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 39429 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 39430 v3.AuxInt = 16 39431 v3.AddArg(y) 39432 v2.AddArg(v3) 39433 v1.AddArg(v2) 39434 v0.AddArg(v1) 39435 v.AddArg(v0) 39436 return true 39437 } 39438 } 39439 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 39440 b := v.Block 39441 _ = b 39442 // match: (Rsh32Ux16 <t> x y) 39443 // cond: 39444 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 39445 for { 39446 t := v.Type 39447 x := v.Args[0] 39448 y := v.Args[1] 39449 v.reset(OpAMD64ANDL) 39450 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 39451 v0.AddArg(x) 39452 v0.AddArg(y) 39453 v.AddArg(v0) 39454 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39455 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 39456 v2.AuxInt = 32 39457 v2.AddArg(y) 39458 v1.AddArg(v2) 39459 v.AddArg(v1) 39460 return true 39461 } 39462 } 39463 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 39464 b := v.Block 39465 _ = b 39466 // match: (Rsh32Ux32 <t> x y) 39467 // cond: 39468 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 39469 for { 39470 t := v.Type 39471 x := v.Args[0] 39472 y := v.Args[1] 39473 v.reset(OpAMD64ANDL) 39474 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 39475 v0.AddArg(x) 39476 v0.AddArg(y) 39477 v.AddArg(v0) 39478 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39479 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 39480 v2.AuxInt = 32 39481 v2.AddArg(y) 39482 v1.AddArg(v2) 39483 v.AddArg(v1) 39484 return true 39485 } 39486 } 39487 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 39488 b := v.Block 39489 _ = b 39490 // match: (Rsh32Ux64 <t> x y) 39491 // cond: 39492 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 39493 for { 39494 t := v.Type 39495 x := v.Args[0] 39496 y := v.Args[1] 39497 v.reset(OpAMD64ANDL) 39498 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 39499 v0.AddArg(x) 39500 v0.AddArg(y) 39501 v.AddArg(v0) 39502 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39503 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 39504 v2.AuxInt = 32 39505 v2.AddArg(y) 39506 v1.AddArg(v2) 39507 v.AddArg(v1) 39508 return true 39509 } 39510 } 39511 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 39512 b := v.Block 39513 _ = b 39514 // match: (Rsh32Ux8 <t> x y) 39515 // cond: 39516 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 39517 for { 39518 t := v.Type 39519 x := v.Args[0] 39520 y := v.Args[1] 39521 v.reset(OpAMD64ANDL) 39522 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 39523 v0.AddArg(x) 39524 v0.AddArg(y) 39525 v.AddArg(v0) 39526 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39527 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 39528 v2.AuxInt = 32 39529 v2.AddArg(y) 39530 v1.AddArg(v2) 39531 v.AddArg(v1) 39532 return true 39533 } 39534 } 39535 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 39536 b := v.Block 39537 _ = b 39538 // match: (Rsh32x16 <t> x y) 39539 // cond: 39540 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 39541 for { 39542 t := v.Type 39543 x := v.Args[0] 39544 y := v.Args[1] 39545 v.reset(OpAMD64SARL) 39546 v.Type = t 39547 v.AddArg(x) 39548 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 39549 v0.AddArg(y) 39550 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 39551 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 39552 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 39553 v3.AuxInt = 32 39554 v3.AddArg(y) 39555 v2.AddArg(v3) 39556 v1.AddArg(v2) 39557 v0.AddArg(v1) 39558 v.AddArg(v0) 39559 return true 39560 } 39561 } 39562 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 39563 b := v.Block 39564 _ = b 39565 // match: (Rsh32x32 <t> x y) 39566 // cond: 39567 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 39568 for { 39569 t := v.Type 39570 x := v.Args[0] 39571 y := v.Args[1] 39572 v.reset(OpAMD64SARL) 39573 v.Type = t 39574 v.AddArg(x) 39575 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 39576 v0.AddArg(y) 39577 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 39578 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 39579 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 39580 v3.AuxInt = 32 39581 v3.AddArg(y) 39582 v2.AddArg(v3) 39583 v1.AddArg(v2) 39584 v0.AddArg(v1) 39585 v.AddArg(v0) 39586 return true 39587 } 39588 } 39589 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 39590 b := v.Block 39591 _ = b 39592 // match: (Rsh32x64 <t> x y) 39593 // cond: 39594 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 39595 for { 39596 t := v.Type 39597 x := v.Args[0] 39598 y := v.Args[1] 39599 v.reset(OpAMD64SARL) 39600 v.Type = t 39601 v.AddArg(x) 39602 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 39603 v0.AddArg(y) 39604 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 39605 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 39606 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 39607 v3.AuxInt = 32 39608 v3.AddArg(y) 39609 v2.AddArg(v3) 39610 v1.AddArg(v2) 39611 v0.AddArg(v1) 39612 v.AddArg(v0) 39613 return true 39614 } 39615 } 39616 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 39617 b := v.Block 39618 _ = b 39619 // match: (Rsh32x8 <t> x y) 39620 // cond: 39621 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 39622 for { 39623 t := v.Type 39624 x := v.Args[0] 39625 y := v.Args[1] 39626 v.reset(OpAMD64SARL) 39627 v.Type = t 39628 v.AddArg(x) 39629 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 39630 v0.AddArg(y) 39631 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 39632 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 39633 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 39634 v3.AuxInt = 32 39635 v3.AddArg(y) 39636 v2.AddArg(v3) 39637 v1.AddArg(v2) 39638 v0.AddArg(v1) 39639 v.AddArg(v0) 39640 return true 39641 } 39642 } 39643 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 39644 b := v.Block 39645 _ = b 39646 // match: (Rsh64Ux16 <t> x y) 39647 // cond: 39648 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 39649 for { 39650 t := v.Type 39651 x := v.Args[0] 39652 y := v.Args[1] 39653 v.reset(OpAMD64ANDQ) 39654 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 39655 v0.AddArg(x) 39656 v0.AddArg(y) 39657 v.AddArg(v0) 39658 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 39659 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 39660 v2.AuxInt = 64 39661 v2.AddArg(y) 39662 v1.AddArg(v2) 39663 v.AddArg(v1) 39664 return true 39665 } 39666 } 39667 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 39668 b := v.Block 39669 _ = b 39670 // match: (Rsh64Ux32 <t> x y) 39671 // cond: 39672 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 39673 for { 39674 t := v.Type 39675 x := v.Args[0] 39676 y := v.Args[1] 39677 v.reset(OpAMD64ANDQ) 39678 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 39679 v0.AddArg(x) 39680 v0.AddArg(y) 39681 v.AddArg(v0) 39682 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 39683 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 39684 v2.AuxInt = 64 39685 v2.AddArg(y) 39686 v1.AddArg(v2) 39687 v.AddArg(v1) 39688 return true 39689 } 39690 } 39691 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 39692 b := v.Block 39693 _ = b 39694 // match: (Rsh64Ux64 <t> x y) 39695 // cond: 39696 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 39697 for { 39698 t := v.Type 39699 x := v.Args[0] 39700 y := v.Args[1] 39701 v.reset(OpAMD64ANDQ) 39702 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 39703 v0.AddArg(x) 39704 v0.AddArg(y) 39705 v.AddArg(v0) 39706 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 39707 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 39708 v2.AuxInt = 64 39709 v2.AddArg(y) 39710 v1.AddArg(v2) 39711 v.AddArg(v1) 39712 return true 39713 } 39714 } 39715 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 39716 b := v.Block 39717 _ = b 39718 // match: (Rsh64Ux8 <t> x y) 39719 // cond: 39720 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 39721 for { 39722 t := v.Type 39723 x := v.Args[0] 39724 y := v.Args[1] 39725 v.reset(OpAMD64ANDQ) 39726 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 39727 v0.AddArg(x) 39728 v0.AddArg(y) 39729 v.AddArg(v0) 39730 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 39731 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 39732 v2.AuxInt = 64 39733 v2.AddArg(y) 39734 v1.AddArg(v2) 39735 v.AddArg(v1) 39736 return true 39737 } 39738 } 39739 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 39740 b := v.Block 39741 _ = b 39742 // match: (Rsh64x16 <t> x y) 39743 // cond: 39744 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 39745 for { 39746 t := v.Type 39747 x := v.Args[0] 39748 y := v.Args[1] 39749 v.reset(OpAMD64SARQ) 39750 v.Type = t 39751 v.AddArg(x) 39752 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 39753 v0.AddArg(y) 39754 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 39755 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 39756 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 39757 v3.AuxInt = 64 39758 v3.AddArg(y) 39759 v2.AddArg(v3) 39760 v1.AddArg(v2) 39761 v0.AddArg(v1) 39762 v.AddArg(v0) 39763 return true 39764 } 39765 } 39766 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 39767 b := v.Block 39768 _ = b 39769 // match: (Rsh64x32 <t> x y) 39770 // cond: 39771 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 39772 for { 39773 t := v.Type 39774 x := v.Args[0] 39775 y := v.Args[1] 39776 v.reset(OpAMD64SARQ) 39777 v.Type = t 39778 v.AddArg(x) 39779 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 39780 v0.AddArg(y) 39781 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 39782 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 39783 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 39784 v3.AuxInt = 64 39785 v3.AddArg(y) 39786 v2.AddArg(v3) 39787 v1.AddArg(v2) 39788 v0.AddArg(v1) 39789 v.AddArg(v0) 39790 return true 39791 } 39792 } 39793 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 39794 b := v.Block 39795 _ = b 39796 // match: (Rsh64x64 <t> x y) 39797 // cond: 39798 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 39799 for { 39800 t := v.Type 39801 x := v.Args[0] 39802 y := v.Args[1] 39803 v.reset(OpAMD64SARQ) 39804 v.Type = t 39805 v.AddArg(x) 39806 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 39807 v0.AddArg(y) 39808 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 39809 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 39810 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 39811 v3.AuxInt = 64 39812 v3.AddArg(y) 39813 v2.AddArg(v3) 39814 v1.AddArg(v2) 39815 v0.AddArg(v1) 39816 v.AddArg(v0) 39817 return true 39818 } 39819 } 39820 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 39821 b := v.Block 39822 _ = b 39823 // match: (Rsh64x8 <t> x y) 39824 // cond: 39825 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 39826 for { 39827 t := v.Type 39828 x := v.Args[0] 39829 y := v.Args[1] 39830 v.reset(OpAMD64SARQ) 39831 v.Type = t 39832 v.AddArg(x) 39833 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 39834 v0.AddArg(y) 39835 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 39836 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 39837 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 39838 v3.AuxInt = 64 39839 v3.AddArg(y) 39840 v2.AddArg(v3) 39841 v1.AddArg(v2) 39842 v0.AddArg(v1) 39843 v.AddArg(v0) 39844 return true 39845 } 39846 } 39847 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 39848 b := v.Block 39849 _ = b 39850 // match: (Rsh8Ux16 <t> x y) 39851 // cond: 39852 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 39853 for { 39854 t := v.Type 39855 x := v.Args[0] 39856 y := v.Args[1] 39857 v.reset(OpAMD64ANDL) 39858 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 39859 v0.AddArg(x) 39860 v0.AddArg(y) 39861 v.AddArg(v0) 39862 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39863 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 39864 v2.AuxInt = 8 39865 v2.AddArg(y) 39866 v1.AddArg(v2) 39867 v.AddArg(v1) 39868 return true 39869 } 39870 } 39871 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 39872 b := v.Block 39873 _ = b 39874 // match: (Rsh8Ux32 <t> x y) 39875 // cond: 39876 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 39877 for { 39878 t := v.Type 39879 x := v.Args[0] 39880 y := v.Args[1] 39881 v.reset(OpAMD64ANDL) 39882 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 39883 v0.AddArg(x) 39884 v0.AddArg(y) 39885 v.AddArg(v0) 39886 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39887 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 39888 v2.AuxInt = 8 39889 v2.AddArg(y) 39890 v1.AddArg(v2) 39891 v.AddArg(v1) 39892 return true 39893 } 39894 } 39895 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 39896 b := v.Block 39897 _ = b 39898 // match: (Rsh8Ux64 <t> x y) 39899 // cond: 39900 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 39901 for { 39902 t := v.Type 39903 x := v.Args[0] 39904 y := v.Args[1] 39905 v.reset(OpAMD64ANDL) 39906 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 39907 v0.AddArg(x) 39908 v0.AddArg(y) 39909 v.AddArg(v0) 39910 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39911 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 39912 v2.AuxInt = 8 39913 v2.AddArg(y) 39914 v1.AddArg(v2) 39915 v.AddArg(v1) 39916 return true 39917 } 39918 } 39919 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 39920 b := v.Block 39921 _ = b 39922 // match: (Rsh8Ux8 <t> x y) 39923 // cond: 39924 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 39925 for { 39926 t := v.Type 39927 x := v.Args[0] 39928 y := v.Args[1] 39929 v.reset(OpAMD64ANDL) 39930 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 39931 v0.AddArg(x) 39932 v0.AddArg(y) 39933 v.AddArg(v0) 39934 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39935 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 39936 v2.AuxInt = 8 39937 v2.AddArg(y) 39938 v1.AddArg(v2) 39939 v.AddArg(v1) 39940 return true 39941 } 39942 } 39943 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 39944 b := v.Block 39945 _ = b 39946 // match: (Rsh8x16 <t> x y) 39947 // cond: 39948 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 39949 for { 39950 t := v.Type 39951 x := v.Args[0] 39952 y := v.Args[1] 39953 v.reset(OpAMD64SARB) 39954 v.Type = t 39955 v.AddArg(x) 39956 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 39957 v0.AddArg(y) 39958 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 39959 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 39960 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 39961 v3.AuxInt = 8 39962 v3.AddArg(y) 39963 v2.AddArg(v3) 39964 v1.AddArg(v2) 39965 v0.AddArg(v1) 39966 v.AddArg(v0) 39967 return true 39968 } 39969 } 39970 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 39971 b := v.Block 39972 _ = b 39973 // match: (Rsh8x32 <t> x y) 39974 // cond: 39975 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 39976 for { 39977 t := v.Type 39978 x := v.Args[0] 39979 y := v.Args[1] 39980 v.reset(OpAMD64SARB) 39981 v.Type = t 39982 v.AddArg(x) 39983 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 39984 v0.AddArg(y) 39985 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 39986 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 39987 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 39988 v3.AuxInt = 8 39989 v3.AddArg(y) 39990 v2.AddArg(v3) 39991 v1.AddArg(v2) 39992 v0.AddArg(v1) 39993 v.AddArg(v0) 39994 return true 39995 } 39996 } 39997 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 39998 b := v.Block 39999 _ = b 40000 // match: (Rsh8x64 <t> x y) 40001 // cond: 40002 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 40003 for { 40004 t := v.Type 40005 x := v.Args[0] 40006 y := v.Args[1] 40007 v.reset(OpAMD64SARB) 40008 v.Type = t 40009 v.AddArg(x) 40010 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 40011 v0.AddArg(y) 40012 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 40013 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 40014 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 40015 v3.AuxInt = 8 40016 v3.AddArg(y) 40017 v2.AddArg(v3) 40018 v1.AddArg(v2) 40019 v0.AddArg(v1) 40020 v.AddArg(v0) 40021 return true 40022 } 40023 } 40024 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 40025 b := v.Block 40026 _ = b 40027 // match: (Rsh8x8 <t> x y) 40028 // cond: 40029 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 40030 for { 40031 t := v.Type 40032 x := v.Args[0] 40033 y := v.Args[1] 40034 v.reset(OpAMD64SARB) 40035 v.Type = t 40036 v.AddArg(x) 40037 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 40038 v0.AddArg(y) 40039 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 40040 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 40041 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 40042 v3.AuxInt = 8 40043 v3.AddArg(y) 40044 v2.AddArg(v3) 40045 v1.AddArg(v2) 40046 v0.AddArg(v1) 40047 v.AddArg(v0) 40048 return true 40049 } 40050 } 40051 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 40052 b := v.Block 40053 _ = b 40054 // match: (Select0 <t> (AddTupleFirst32 tuple val)) 40055 // cond: 40056 // result: (ADDL val (Select0 <t> tuple)) 40057 for { 40058 t := v.Type 40059 v_0 := v.Args[0] 40060 if v_0.Op != OpAMD64AddTupleFirst32 { 40061 break 40062 } 40063 tuple := v_0.Args[0] 40064 val := v_0.Args[1] 40065 v.reset(OpAMD64ADDL) 40066 v.AddArg(val) 40067 v0 := b.NewValue0(v.Pos, OpSelect0, t) 40068 v0.AddArg(tuple) 40069 v.AddArg(v0) 40070 return true 40071 } 40072 // match: (Select0 <t> (AddTupleFirst64 tuple val)) 40073 // cond: 40074 // result: (ADDQ val (Select0 <t> tuple)) 40075 for { 40076 t := v.Type 40077 v_0 := v.Args[0] 40078 if v_0.Op != OpAMD64AddTupleFirst64 { 40079 break 40080 } 40081 tuple := v_0.Args[0] 40082 val := v_0.Args[1] 40083 v.reset(OpAMD64ADDQ) 40084 v.AddArg(val) 40085 v0 := b.NewValue0(v.Pos, OpSelect0, t) 40086 v0.AddArg(tuple) 40087 v.AddArg(v0) 40088 return true 40089 } 40090 return false 40091 } 40092 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 40093 // match: (Select1 (AddTupleFirst32 tuple _)) 40094 // cond: 40095 // result: (Select1 tuple) 40096 for { 40097 v_0 := v.Args[0] 40098 if v_0.Op != OpAMD64AddTupleFirst32 { 40099 break 40100 } 40101 tuple := v_0.Args[0] 40102 v.reset(OpSelect1) 40103 v.AddArg(tuple) 40104 return true 40105 } 40106 // match: (Select1 (AddTupleFirst64 tuple _)) 40107 // cond: 40108 // result: (Select1 tuple) 40109 for { 40110 v_0 := v.Args[0] 40111 if v_0.Op != OpAMD64AddTupleFirst64 { 40112 break 40113 } 40114 tuple := v_0.Args[0] 40115 v.reset(OpSelect1) 40116 v.AddArg(tuple) 40117 return true 40118 } 40119 return false 40120 } 40121 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 40122 // match: (SignExt16to32 x) 40123 // cond: 40124 // result: (MOVWQSX x) 40125 for { 40126 x := v.Args[0] 40127 v.reset(OpAMD64MOVWQSX) 40128 v.AddArg(x) 40129 return true 40130 } 40131 } 40132 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 40133 // match: (SignExt16to64 x) 40134 // cond: 40135 // result: (MOVWQSX x) 40136 for { 40137 x := v.Args[0] 40138 v.reset(OpAMD64MOVWQSX) 40139 v.AddArg(x) 40140 return true 40141 } 40142 } 40143 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 40144 // match: (SignExt32to64 x) 40145 // cond: 40146 // result: (MOVLQSX x) 40147 for { 40148 x := v.Args[0] 40149 v.reset(OpAMD64MOVLQSX) 40150 v.AddArg(x) 40151 return true 40152 } 40153 } 40154 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 40155 // match: (SignExt8to16 x) 40156 // cond: 40157 // result: (MOVBQSX x) 40158 for { 40159 x := v.Args[0] 40160 v.reset(OpAMD64MOVBQSX) 40161 v.AddArg(x) 40162 return true 40163 } 40164 } 40165 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 40166 // match: (SignExt8to32 x) 40167 // cond: 40168 // result: (MOVBQSX x) 40169 for { 40170 x := v.Args[0] 40171 v.reset(OpAMD64MOVBQSX) 40172 v.AddArg(x) 40173 return true 40174 } 40175 } 40176 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 40177 // match: (SignExt8to64 x) 40178 // cond: 40179 // result: (MOVBQSX x) 40180 for { 40181 x := v.Args[0] 40182 v.reset(OpAMD64MOVBQSX) 40183 v.AddArg(x) 40184 return true 40185 } 40186 } 40187 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 40188 b := v.Block 40189 _ = b 40190 // match: (Slicemask <t> x) 40191 // cond: 40192 // result: (SARQconst (NEGQ <t> x) [63]) 40193 for { 40194 t := v.Type 40195 x := v.Args[0] 40196 v.reset(OpAMD64SARQconst) 40197 v.AuxInt = 63 40198 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 40199 v0.AddArg(x) 40200 v.AddArg(v0) 40201 return true 40202 } 40203 } 40204 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 40205 // match: (Sqrt x) 40206 // cond: 40207 // result: (SQRTSD x) 40208 for { 40209 x := v.Args[0] 40210 v.reset(OpAMD64SQRTSD) 40211 v.AddArg(x) 40212 return true 40213 } 40214 } 40215 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 40216 // match: (StaticCall [argwid] {target} mem) 40217 // cond: 40218 // result: (CALLstatic [argwid] {target} mem) 40219 for { 40220 argwid := v.AuxInt 40221 target := v.Aux 40222 mem := v.Args[0] 40223 v.reset(OpAMD64CALLstatic) 40224 v.AuxInt = argwid 40225 v.Aux = target 40226 v.AddArg(mem) 40227 return true 40228 } 40229 } 40230 func rewriteValueAMD64_OpStore_0(v *Value) bool { 40231 // match: (Store {t} ptr val mem) 40232 // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type) 40233 // result: (MOVSDstore ptr val mem) 40234 for { 40235 t := v.Aux 40236 ptr := v.Args[0] 40237 val := v.Args[1] 40238 mem := v.Args[2] 40239 if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) { 40240 break 40241 } 40242 v.reset(OpAMD64MOVSDstore) 40243 v.AddArg(ptr) 40244 v.AddArg(val) 40245 v.AddArg(mem) 40246 return true 40247 } 40248 // match: (Store {t} ptr val mem) 40249 // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type) 40250 // result: (MOVSSstore ptr val mem) 40251 for { 40252 t := v.Aux 40253 ptr := v.Args[0] 40254 val := v.Args[1] 40255 mem := v.Args[2] 40256 if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) { 40257 break 40258 } 40259 v.reset(OpAMD64MOVSSstore) 40260 v.AddArg(ptr) 40261 v.AddArg(val) 40262 v.AddArg(mem) 40263 return true 40264 } 40265 // match: (Store {t} ptr val mem) 40266 // cond: t.(Type).Size() == 8 40267 // result: (MOVQstore ptr val mem) 40268 for { 40269 t := v.Aux 40270 ptr := v.Args[0] 40271 val := v.Args[1] 40272 mem := v.Args[2] 40273 if !(t.(Type).Size() == 8) { 40274 break 40275 } 40276 v.reset(OpAMD64MOVQstore) 40277 v.AddArg(ptr) 40278 v.AddArg(val) 40279 v.AddArg(mem) 40280 return true 40281 } 40282 // match: (Store {t} ptr val mem) 40283 // cond: t.(Type).Size() == 4 40284 // result: (MOVLstore ptr val mem) 40285 for { 40286 t := v.Aux 40287 ptr := v.Args[0] 40288 val := v.Args[1] 40289 mem := v.Args[2] 40290 if !(t.(Type).Size() == 4) { 40291 break 40292 } 40293 v.reset(OpAMD64MOVLstore) 40294 v.AddArg(ptr) 40295 v.AddArg(val) 40296 v.AddArg(mem) 40297 return true 40298 } 40299 // match: (Store {t} ptr val mem) 40300 // cond: t.(Type).Size() == 2 40301 // result: (MOVWstore ptr val mem) 40302 for { 40303 t := v.Aux 40304 ptr := v.Args[0] 40305 val := v.Args[1] 40306 mem := v.Args[2] 40307 if !(t.(Type).Size() == 2) { 40308 break 40309 } 40310 v.reset(OpAMD64MOVWstore) 40311 v.AddArg(ptr) 40312 v.AddArg(val) 40313 v.AddArg(mem) 40314 return true 40315 } 40316 // match: (Store {t} ptr val mem) 40317 // cond: t.(Type).Size() == 1 40318 // result: (MOVBstore ptr val mem) 40319 for { 40320 t := v.Aux 40321 ptr := v.Args[0] 40322 val := v.Args[1] 40323 mem := v.Args[2] 40324 if !(t.(Type).Size() == 1) { 40325 break 40326 } 40327 v.reset(OpAMD64MOVBstore) 40328 v.AddArg(ptr) 40329 v.AddArg(val) 40330 v.AddArg(mem) 40331 return true 40332 } 40333 return false 40334 } 40335 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 40336 // match: (Sub16 x y) 40337 // cond: 40338 // result: (SUBL x y) 40339 for { 40340 x := v.Args[0] 40341 y := v.Args[1] 40342 v.reset(OpAMD64SUBL) 40343 v.AddArg(x) 40344 v.AddArg(y) 40345 return true 40346 } 40347 } 40348 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 40349 // match: (Sub32 x y) 40350 // cond: 40351 // result: (SUBL x y) 40352 for { 40353 x := v.Args[0] 40354 y := v.Args[1] 40355 v.reset(OpAMD64SUBL) 40356 v.AddArg(x) 40357 v.AddArg(y) 40358 return true 40359 } 40360 } 40361 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 40362 // match: (Sub32F x y) 40363 // cond: 40364 // result: (SUBSS x y) 40365 for { 40366 x := v.Args[0] 40367 y := v.Args[1] 40368 v.reset(OpAMD64SUBSS) 40369 v.AddArg(x) 40370 v.AddArg(y) 40371 return true 40372 } 40373 } 40374 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 40375 // match: (Sub64 x y) 40376 // cond: 40377 // result: (SUBQ x y) 40378 for { 40379 x := v.Args[0] 40380 y := v.Args[1] 40381 v.reset(OpAMD64SUBQ) 40382 v.AddArg(x) 40383 v.AddArg(y) 40384 return true 40385 } 40386 } 40387 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 40388 // match: (Sub64F x y) 40389 // cond: 40390 // result: (SUBSD x y) 40391 for { 40392 x := v.Args[0] 40393 y := v.Args[1] 40394 v.reset(OpAMD64SUBSD) 40395 v.AddArg(x) 40396 v.AddArg(y) 40397 return true 40398 } 40399 } 40400 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 40401 // match: (Sub8 x y) 40402 // cond: 40403 // result: (SUBL x y) 40404 for { 40405 x := v.Args[0] 40406 y := v.Args[1] 40407 v.reset(OpAMD64SUBL) 40408 v.AddArg(x) 40409 v.AddArg(y) 40410 return true 40411 } 40412 } 40413 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 40414 b := v.Block 40415 _ = b 40416 config := b.Func.Config 40417 _ = config 40418 // match: (SubPtr x y) 40419 // cond: config.PtrSize == 8 40420 // result: (SUBQ x y) 40421 for { 40422 x := v.Args[0] 40423 y := v.Args[1] 40424 if !(config.PtrSize == 8) { 40425 break 40426 } 40427 v.reset(OpAMD64SUBQ) 40428 v.AddArg(x) 40429 v.AddArg(y) 40430 return true 40431 } 40432 // match: (SubPtr x y) 40433 // cond: config.PtrSize == 4 40434 // result: (SUBL x y) 40435 for { 40436 x := v.Args[0] 40437 y := v.Args[1] 40438 if !(config.PtrSize == 4) { 40439 break 40440 } 40441 v.reset(OpAMD64SUBL) 40442 v.AddArg(x) 40443 v.AddArg(y) 40444 return true 40445 } 40446 return false 40447 } 40448 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 40449 // match: (Trunc16to8 x) 40450 // cond: 40451 // result: x 40452 for { 40453 x := v.Args[0] 40454 v.reset(OpCopy) 40455 v.Type = x.Type 40456 v.AddArg(x) 40457 return true 40458 } 40459 } 40460 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 40461 // match: (Trunc32to16 x) 40462 // cond: 40463 // result: x 40464 for { 40465 x := v.Args[0] 40466 v.reset(OpCopy) 40467 v.Type = x.Type 40468 v.AddArg(x) 40469 return true 40470 } 40471 } 40472 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 40473 // match: (Trunc32to8 x) 40474 // cond: 40475 // result: x 40476 for { 40477 x := v.Args[0] 40478 v.reset(OpCopy) 40479 v.Type = x.Type 40480 v.AddArg(x) 40481 return true 40482 } 40483 } 40484 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 40485 // match: (Trunc64to16 x) 40486 // cond: 40487 // result: x 40488 for { 40489 x := v.Args[0] 40490 v.reset(OpCopy) 40491 v.Type = x.Type 40492 v.AddArg(x) 40493 return true 40494 } 40495 } 40496 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 40497 // match: (Trunc64to32 x) 40498 // cond: 40499 // result: x 40500 for { 40501 x := v.Args[0] 40502 v.reset(OpCopy) 40503 v.Type = x.Type 40504 v.AddArg(x) 40505 return true 40506 } 40507 } 40508 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 40509 // match: (Trunc64to8 x) 40510 // cond: 40511 // result: x 40512 for { 40513 x := v.Args[0] 40514 v.reset(OpCopy) 40515 v.Type = x.Type 40516 v.AddArg(x) 40517 return true 40518 } 40519 } 40520 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 40521 // match: (Xor16 x y) 40522 // cond: 40523 // result: (XORL x y) 40524 for { 40525 x := v.Args[0] 40526 y := v.Args[1] 40527 v.reset(OpAMD64XORL) 40528 v.AddArg(x) 40529 v.AddArg(y) 40530 return true 40531 } 40532 } 40533 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 40534 // match: (Xor32 x y) 40535 // cond: 40536 // result: (XORL x y) 40537 for { 40538 x := v.Args[0] 40539 y := v.Args[1] 40540 v.reset(OpAMD64XORL) 40541 v.AddArg(x) 40542 v.AddArg(y) 40543 return true 40544 } 40545 } 40546 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 40547 // match: (Xor64 x y) 40548 // cond: 40549 // result: (XORQ x y) 40550 for { 40551 x := v.Args[0] 40552 y := v.Args[1] 40553 v.reset(OpAMD64XORQ) 40554 v.AddArg(x) 40555 v.AddArg(y) 40556 return true 40557 } 40558 } 40559 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 40560 // match: (Xor8 x y) 40561 // cond: 40562 // result: (XORL x y) 40563 for { 40564 x := v.Args[0] 40565 y := v.Args[1] 40566 v.reset(OpAMD64XORL) 40567 v.AddArg(x) 40568 v.AddArg(y) 40569 return true 40570 } 40571 } 40572 func rewriteValueAMD64_OpZero_0(v *Value) bool { 40573 b := v.Block 40574 _ = b 40575 // match: (Zero [0] _ mem) 40576 // cond: 40577 // result: mem 40578 for { 40579 if v.AuxInt != 0 { 40580 break 40581 } 40582 mem := v.Args[1] 40583 v.reset(OpCopy) 40584 v.Type = mem.Type 40585 v.AddArg(mem) 40586 return true 40587 } 40588 // match: (Zero [1] destptr mem) 40589 // cond: 40590 // result: (MOVBstoreconst [0] destptr mem) 40591 for { 40592 if v.AuxInt != 1 { 40593 break 40594 } 40595 destptr := v.Args[0] 40596 mem := v.Args[1] 40597 v.reset(OpAMD64MOVBstoreconst) 40598 v.AuxInt = 0 40599 v.AddArg(destptr) 40600 v.AddArg(mem) 40601 return true 40602 } 40603 // match: (Zero [2] destptr mem) 40604 // cond: 40605 // result: (MOVWstoreconst [0] destptr mem) 40606 for { 40607 if v.AuxInt != 2 { 40608 break 40609 } 40610 destptr := v.Args[0] 40611 mem := v.Args[1] 40612 v.reset(OpAMD64MOVWstoreconst) 40613 v.AuxInt = 0 40614 v.AddArg(destptr) 40615 v.AddArg(mem) 40616 return true 40617 } 40618 // match: (Zero [4] destptr mem) 40619 // cond: 40620 // result: (MOVLstoreconst [0] destptr mem) 40621 for { 40622 if v.AuxInt != 4 { 40623 break 40624 } 40625 destptr := v.Args[0] 40626 mem := v.Args[1] 40627 v.reset(OpAMD64MOVLstoreconst) 40628 v.AuxInt = 0 40629 v.AddArg(destptr) 40630 v.AddArg(mem) 40631 return true 40632 } 40633 // match: (Zero [8] destptr mem) 40634 // cond: 40635 // result: (MOVQstoreconst [0] destptr mem) 40636 for { 40637 if v.AuxInt != 8 { 40638 break 40639 } 40640 destptr := v.Args[0] 40641 mem := v.Args[1] 40642 v.reset(OpAMD64MOVQstoreconst) 40643 v.AuxInt = 0 40644 v.AddArg(destptr) 40645 v.AddArg(mem) 40646 return true 40647 } 40648 // match: (Zero [3] destptr mem) 40649 // cond: 40650 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 40651 for { 40652 if v.AuxInt != 3 { 40653 break 40654 } 40655 destptr := v.Args[0] 40656 mem := v.Args[1] 40657 v.reset(OpAMD64MOVBstoreconst) 40658 v.AuxInt = makeValAndOff(0, 2) 40659 v.AddArg(destptr) 40660 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, TypeMem) 40661 v0.AuxInt = 0 40662 v0.AddArg(destptr) 40663 v0.AddArg(mem) 40664 v.AddArg(v0) 40665 return true 40666 } 40667 // match: (Zero [5] destptr mem) 40668 // cond: 40669 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 40670 for { 40671 if v.AuxInt != 5 { 40672 break 40673 } 40674 destptr := v.Args[0] 40675 mem := v.Args[1] 40676 v.reset(OpAMD64MOVBstoreconst) 40677 v.AuxInt = makeValAndOff(0, 4) 40678 v.AddArg(destptr) 40679 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 40680 v0.AuxInt = 0 40681 v0.AddArg(destptr) 40682 v0.AddArg(mem) 40683 v.AddArg(v0) 40684 return true 40685 } 40686 // match: (Zero [6] destptr mem) 40687 // cond: 40688 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 40689 for { 40690 if v.AuxInt != 6 { 40691 break 40692 } 40693 destptr := v.Args[0] 40694 mem := v.Args[1] 40695 v.reset(OpAMD64MOVWstoreconst) 40696 v.AuxInt = makeValAndOff(0, 4) 40697 v.AddArg(destptr) 40698 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 40699 v0.AuxInt = 0 40700 v0.AddArg(destptr) 40701 v0.AddArg(mem) 40702 v.AddArg(v0) 40703 return true 40704 } 40705 // match: (Zero [7] destptr mem) 40706 // cond: 40707 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 40708 for { 40709 if v.AuxInt != 7 { 40710 break 40711 } 40712 destptr := v.Args[0] 40713 mem := v.Args[1] 40714 v.reset(OpAMD64MOVLstoreconst) 40715 v.AuxInt = makeValAndOff(0, 3) 40716 v.AddArg(destptr) 40717 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 40718 v0.AuxInt = 0 40719 v0.AddArg(destptr) 40720 v0.AddArg(mem) 40721 v.AddArg(v0) 40722 return true 40723 } 40724 // match: (Zero [s] destptr mem) 40725 // cond: s%8 != 0 && s > 8 40726 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 40727 for { 40728 s := v.AuxInt 40729 destptr := v.Args[0] 40730 mem := v.Args[1] 40731 if !(s%8 != 0 && s > 8) { 40732 break 40733 } 40734 v.reset(OpZero) 40735 v.AuxInt = s - s%8 40736 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 40737 v0.AuxInt = s % 8 40738 v0.AddArg(destptr) 40739 v.AddArg(v0) 40740 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 40741 v1.AuxInt = 0 40742 v1.AddArg(destptr) 40743 v1.AddArg(mem) 40744 v.AddArg(v1) 40745 return true 40746 } 40747 return false 40748 } 40749 func rewriteValueAMD64_OpZero_10(v *Value) bool { 40750 b := v.Block 40751 _ = b 40752 config := b.Func.Config 40753 _ = config 40754 types := &b.Func.Config.Types 40755 _ = types 40756 // match: (Zero [16] destptr mem) 40757 // cond: 40758 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 40759 for { 40760 if v.AuxInt != 16 { 40761 break 40762 } 40763 destptr := v.Args[0] 40764 mem := v.Args[1] 40765 v.reset(OpAMD64MOVQstoreconst) 40766 v.AuxInt = makeValAndOff(0, 8) 40767 v.AddArg(destptr) 40768 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 40769 v0.AuxInt = 0 40770 v0.AddArg(destptr) 40771 v0.AddArg(mem) 40772 v.AddArg(v0) 40773 return true 40774 } 40775 // match: (Zero [24] destptr mem) 40776 // cond: 40777 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 40778 for { 40779 if v.AuxInt != 24 { 40780 break 40781 } 40782 destptr := v.Args[0] 40783 mem := v.Args[1] 40784 v.reset(OpAMD64MOVQstoreconst) 40785 v.AuxInt = makeValAndOff(0, 16) 40786 v.AddArg(destptr) 40787 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 40788 v0.AuxInt = makeValAndOff(0, 8) 40789 v0.AddArg(destptr) 40790 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 40791 v1.AuxInt = 0 40792 v1.AddArg(destptr) 40793 v1.AddArg(mem) 40794 v0.AddArg(v1) 40795 v.AddArg(v0) 40796 return true 40797 } 40798 // match: (Zero [32] destptr mem) 40799 // cond: 40800 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 40801 for { 40802 if v.AuxInt != 32 { 40803 break 40804 } 40805 destptr := v.Args[0] 40806 mem := v.Args[1] 40807 v.reset(OpAMD64MOVQstoreconst) 40808 v.AuxInt = makeValAndOff(0, 24) 40809 v.AddArg(destptr) 40810 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 40811 v0.AuxInt = makeValAndOff(0, 16) 40812 v0.AddArg(destptr) 40813 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 40814 v1.AuxInt = makeValAndOff(0, 8) 40815 v1.AddArg(destptr) 40816 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 40817 v2.AuxInt = 0 40818 v2.AddArg(destptr) 40819 v2.AddArg(mem) 40820 v1.AddArg(v2) 40821 v0.AddArg(v1) 40822 v.AddArg(v0) 40823 return true 40824 } 40825 // match: (Zero [s] destptr mem) 40826 // cond: s <= 1024 && s%8 == 0 && s%16 != 0 && !config.noDuffDevice 40827 // result: (Zero [s-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 40828 for { 40829 s := v.AuxInt 40830 destptr := v.Args[0] 40831 mem := v.Args[1] 40832 if !(s <= 1024 && s%8 == 0 && s%16 != 0 && !config.noDuffDevice) { 40833 break 40834 } 40835 v.reset(OpZero) 40836 v.AuxInt = s - 8 40837 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 40838 v0.AuxInt = 8 40839 v0.AddArg(destptr) 40840 v.AddArg(v0) 40841 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 40842 v1.AddArg(destptr) 40843 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 40844 v2.AuxInt = 0 40845 v1.AddArg(v2) 40846 v1.AddArg(mem) 40847 v.AddArg(v1) 40848 return true 40849 } 40850 // match: (Zero [s] destptr mem) 40851 // cond: s <= 1024 && s%16 == 0 && !config.noDuffDevice 40852 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 40853 for { 40854 s := v.AuxInt 40855 destptr := v.Args[0] 40856 mem := v.Args[1] 40857 if !(s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 40858 break 40859 } 40860 v.reset(OpAMD64DUFFZERO) 40861 v.AuxInt = s 40862 v.AddArg(destptr) 40863 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, TypeInt128) 40864 v0.AuxInt = 0 40865 v.AddArg(v0) 40866 v.AddArg(mem) 40867 return true 40868 } 40869 // match: (Zero [s] destptr mem) 40870 // cond: (s > 1024 || (config.noDuffDevice && s > 32)) && s%8 == 0 40871 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 40872 for { 40873 s := v.AuxInt 40874 destptr := v.Args[0] 40875 mem := v.Args[1] 40876 if !((s > 1024 || (config.noDuffDevice && s > 32)) && s%8 == 0) { 40877 break 40878 } 40879 v.reset(OpAMD64REPSTOSQ) 40880 v.AddArg(destptr) 40881 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 40882 v0.AuxInt = s / 8 40883 v.AddArg(v0) 40884 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) 40885 v1.AuxInt = 0 40886 v.AddArg(v1) 40887 v.AddArg(mem) 40888 return true 40889 } 40890 return false 40891 } 40892 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 40893 // match: (ZeroExt16to32 x) 40894 // cond: 40895 // result: (MOVWQZX x) 40896 for { 40897 x := v.Args[0] 40898 v.reset(OpAMD64MOVWQZX) 40899 v.AddArg(x) 40900 return true 40901 } 40902 } 40903 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 40904 // match: (ZeroExt16to64 x) 40905 // cond: 40906 // result: (MOVWQZX x) 40907 for { 40908 x := v.Args[0] 40909 v.reset(OpAMD64MOVWQZX) 40910 v.AddArg(x) 40911 return true 40912 } 40913 } 40914 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 40915 // match: (ZeroExt32to64 x) 40916 // cond: 40917 // result: (MOVLQZX x) 40918 for { 40919 x := v.Args[0] 40920 v.reset(OpAMD64MOVLQZX) 40921 v.AddArg(x) 40922 return true 40923 } 40924 } 40925 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 40926 // match: (ZeroExt8to16 x) 40927 // cond: 40928 // result: (MOVBQZX x) 40929 for { 40930 x := v.Args[0] 40931 v.reset(OpAMD64MOVBQZX) 40932 v.AddArg(x) 40933 return true 40934 } 40935 } 40936 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 40937 // match: (ZeroExt8to32 x) 40938 // cond: 40939 // result: (MOVBQZX x) 40940 for { 40941 x := v.Args[0] 40942 v.reset(OpAMD64MOVBQZX) 40943 v.AddArg(x) 40944 return true 40945 } 40946 } 40947 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 40948 // match: (ZeroExt8to64 x) 40949 // cond: 40950 // result: (MOVBQZX x) 40951 for { 40952 x := v.Args[0] 40953 v.reset(OpAMD64MOVBQZX) 40954 v.AddArg(x) 40955 return true 40956 } 40957 } 40958 func rewriteBlockAMD64(b *Block) bool { 40959 config := b.Func.Config 40960 _ = config 40961 fe := b.Func.fe 40962 _ = fe 40963 types := &config.Types 40964 _ = types 40965 switch b.Kind { 40966 case BlockAMD64EQ: 40967 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 40968 // cond: !config.nacl 40969 // result: (UGE (BTL x y)) 40970 for { 40971 v := b.Control 40972 if v.Op != OpAMD64TESTL { 40973 break 40974 } 40975 v_0 := v.Args[0] 40976 if v_0.Op != OpAMD64SHLL { 40977 break 40978 } 40979 v_0_0 := v_0.Args[0] 40980 if v_0_0.Op != OpAMD64MOVLconst { 40981 break 40982 } 40983 if v_0_0.AuxInt != 1 { 40984 break 40985 } 40986 x := v_0.Args[1] 40987 y := v.Args[1] 40988 if !(!config.nacl) { 40989 break 40990 } 40991 b.Kind = BlockAMD64UGE 40992 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 40993 v0.AddArg(x) 40994 v0.AddArg(y) 40995 b.SetControl(v0) 40996 return true 40997 } 40998 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 40999 // cond: !config.nacl 41000 // result: (UGE (BTL x y)) 41001 for { 41002 v := b.Control 41003 if v.Op != OpAMD64TESTL { 41004 break 41005 } 41006 y := v.Args[0] 41007 v_1 := v.Args[1] 41008 if v_1.Op != OpAMD64SHLL { 41009 break 41010 } 41011 v_1_0 := v_1.Args[0] 41012 if v_1_0.Op != OpAMD64MOVLconst { 41013 break 41014 } 41015 if v_1_0.AuxInt != 1 { 41016 break 41017 } 41018 x := v_1.Args[1] 41019 if !(!config.nacl) { 41020 break 41021 } 41022 b.Kind = BlockAMD64UGE 41023 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 41024 v0.AddArg(x) 41025 v0.AddArg(y) 41026 b.SetControl(v0) 41027 return true 41028 } 41029 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 41030 // cond: !config.nacl 41031 // result: (UGE (BTQ x y)) 41032 for { 41033 v := b.Control 41034 if v.Op != OpAMD64TESTQ { 41035 break 41036 } 41037 v_0 := v.Args[0] 41038 if v_0.Op != OpAMD64SHLQ { 41039 break 41040 } 41041 v_0_0 := v_0.Args[0] 41042 if v_0_0.Op != OpAMD64MOVQconst { 41043 break 41044 } 41045 if v_0_0.AuxInt != 1 { 41046 break 41047 } 41048 x := v_0.Args[1] 41049 y := v.Args[1] 41050 if !(!config.nacl) { 41051 break 41052 } 41053 b.Kind = BlockAMD64UGE 41054 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 41055 v0.AddArg(x) 41056 v0.AddArg(y) 41057 b.SetControl(v0) 41058 return true 41059 } 41060 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 41061 // cond: !config.nacl 41062 // result: (UGE (BTQ x y)) 41063 for { 41064 v := b.Control 41065 if v.Op != OpAMD64TESTQ { 41066 break 41067 } 41068 y := v.Args[0] 41069 v_1 := v.Args[1] 41070 if v_1.Op != OpAMD64SHLQ { 41071 break 41072 } 41073 v_1_0 := v_1.Args[0] 41074 if v_1_0.Op != OpAMD64MOVQconst { 41075 break 41076 } 41077 if v_1_0.AuxInt != 1 { 41078 break 41079 } 41080 x := v_1.Args[1] 41081 if !(!config.nacl) { 41082 break 41083 } 41084 b.Kind = BlockAMD64UGE 41085 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 41086 v0.AddArg(x) 41087 v0.AddArg(y) 41088 b.SetControl(v0) 41089 return true 41090 } 41091 // match: (EQ (TESTLconst [c] x)) 41092 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 41093 // result: (UGE (BTLconst [log2(c)] x)) 41094 for { 41095 v := b.Control 41096 if v.Op != OpAMD64TESTLconst { 41097 break 41098 } 41099 c := v.AuxInt 41100 x := v.Args[0] 41101 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 41102 break 41103 } 41104 b.Kind = BlockAMD64UGE 41105 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags) 41106 v0.AuxInt = log2(c) 41107 v0.AddArg(x) 41108 b.SetControl(v0) 41109 return true 41110 } 41111 // match: (EQ (TESTQconst [c] x)) 41112 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 41113 // result: (UGE (BTQconst [log2(c)] x)) 41114 for { 41115 v := b.Control 41116 if v.Op != OpAMD64TESTQconst { 41117 break 41118 } 41119 c := v.AuxInt 41120 x := v.Args[0] 41121 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 41122 break 41123 } 41124 b.Kind = BlockAMD64UGE 41125 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 41126 v0.AuxInt = log2(c) 41127 v0.AddArg(x) 41128 b.SetControl(v0) 41129 return true 41130 } 41131 // match: (EQ (TESTQ (MOVQconst [c]) x)) 41132 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 41133 // result: (UGE (BTQconst [log2(c)] x)) 41134 for { 41135 v := b.Control 41136 if v.Op != OpAMD64TESTQ { 41137 break 41138 } 41139 v_0 := v.Args[0] 41140 if v_0.Op != OpAMD64MOVQconst { 41141 break 41142 } 41143 c := v_0.AuxInt 41144 x := v.Args[1] 41145 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 41146 break 41147 } 41148 b.Kind = BlockAMD64UGE 41149 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 41150 v0.AuxInt = log2(c) 41151 v0.AddArg(x) 41152 b.SetControl(v0) 41153 return true 41154 } 41155 // match: (EQ (TESTQ x (MOVQconst [c]))) 41156 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 41157 // result: (UGE (BTQconst [log2(c)] x)) 41158 for { 41159 v := b.Control 41160 if v.Op != OpAMD64TESTQ { 41161 break 41162 } 41163 x := v.Args[0] 41164 v_1 := v.Args[1] 41165 if v_1.Op != OpAMD64MOVQconst { 41166 break 41167 } 41168 c := v_1.AuxInt 41169 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 41170 break 41171 } 41172 b.Kind = BlockAMD64UGE 41173 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 41174 v0.AuxInt = log2(c) 41175 v0.AddArg(x) 41176 b.SetControl(v0) 41177 return true 41178 } 41179 // match: (EQ (InvertFlags cmp) yes no) 41180 // cond: 41181 // result: (EQ cmp yes no) 41182 for { 41183 v := b.Control 41184 if v.Op != OpAMD64InvertFlags { 41185 break 41186 } 41187 cmp := v.Args[0] 41188 b.Kind = BlockAMD64EQ 41189 b.SetControl(cmp) 41190 return true 41191 } 41192 // match: (EQ (FlagEQ) yes no) 41193 // cond: 41194 // result: (First nil yes no) 41195 for { 41196 v := b.Control 41197 if v.Op != OpAMD64FlagEQ { 41198 break 41199 } 41200 b.Kind = BlockFirst 41201 b.SetControl(nil) 41202 return true 41203 } 41204 // match: (EQ (FlagLT_ULT) yes no) 41205 // cond: 41206 // result: (First nil no yes) 41207 for { 41208 v := b.Control 41209 if v.Op != OpAMD64FlagLT_ULT { 41210 break 41211 } 41212 b.Kind = BlockFirst 41213 b.SetControl(nil) 41214 b.swapSuccessors() 41215 return true 41216 } 41217 // match: (EQ (FlagLT_UGT) yes no) 41218 // cond: 41219 // result: (First nil no yes) 41220 for { 41221 v := b.Control 41222 if v.Op != OpAMD64FlagLT_UGT { 41223 break 41224 } 41225 b.Kind = BlockFirst 41226 b.SetControl(nil) 41227 b.swapSuccessors() 41228 return true 41229 } 41230 // match: (EQ (FlagGT_ULT) yes no) 41231 // cond: 41232 // result: (First nil no yes) 41233 for { 41234 v := b.Control 41235 if v.Op != OpAMD64FlagGT_ULT { 41236 break 41237 } 41238 b.Kind = BlockFirst 41239 b.SetControl(nil) 41240 b.swapSuccessors() 41241 return true 41242 } 41243 // match: (EQ (FlagGT_UGT) yes no) 41244 // cond: 41245 // result: (First nil no yes) 41246 for { 41247 v := b.Control 41248 if v.Op != OpAMD64FlagGT_UGT { 41249 break 41250 } 41251 b.Kind = BlockFirst 41252 b.SetControl(nil) 41253 b.swapSuccessors() 41254 return true 41255 } 41256 case BlockAMD64GE: 41257 // match: (GE (InvertFlags cmp) yes no) 41258 // cond: 41259 // result: (LE cmp yes no) 41260 for { 41261 v := b.Control 41262 if v.Op != OpAMD64InvertFlags { 41263 break 41264 } 41265 cmp := v.Args[0] 41266 b.Kind = BlockAMD64LE 41267 b.SetControl(cmp) 41268 return true 41269 } 41270 // match: (GE (FlagEQ) yes no) 41271 // cond: 41272 // result: (First nil yes no) 41273 for { 41274 v := b.Control 41275 if v.Op != OpAMD64FlagEQ { 41276 break 41277 } 41278 b.Kind = BlockFirst 41279 b.SetControl(nil) 41280 return true 41281 } 41282 // match: (GE (FlagLT_ULT) yes no) 41283 // cond: 41284 // result: (First nil no yes) 41285 for { 41286 v := b.Control 41287 if v.Op != OpAMD64FlagLT_ULT { 41288 break 41289 } 41290 b.Kind = BlockFirst 41291 b.SetControl(nil) 41292 b.swapSuccessors() 41293 return true 41294 } 41295 // match: (GE (FlagLT_UGT) yes no) 41296 // cond: 41297 // result: (First nil no yes) 41298 for { 41299 v := b.Control 41300 if v.Op != OpAMD64FlagLT_UGT { 41301 break 41302 } 41303 b.Kind = BlockFirst 41304 b.SetControl(nil) 41305 b.swapSuccessors() 41306 return true 41307 } 41308 // match: (GE (FlagGT_ULT) yes no) 41309 // cond: 41310 // result: (First nil yes no) 41311 for { 41312 v := b.Control 41313 if v.Op != OpAMD64FlagGT_ULT { 41314 break 41315 } 41316 b.Kind = BlockFirst 41317 b.SetControl(nil) 41318 return true 41319 } 41320 // match: (GE (FlagGT_UGT) yes no) 41321 // cond: 41322 // result: (First nil yes no) 41323 for { 41324 v := b.Control 41325 if v.Op != OpAMD64FlagGT_UGT { 41326 break 41327 } 41328 b.Kind = BlockFirst 41329 b.SetControl(nil) 41330 return true 41331 } 41332 case BlockAMD64GT: 41333 // match: (GT (InvertFlags cmp) yes no) 41334 // cond: 41335 // result: (LT cmp yes no) 41336 for { 41337 v := b.Control 41338 if v.Op != OpAMD64InvertFlags { 41339 break 41340 } 41341 cmp := v.Args[0] 41342 b.Kind = BlockAMD64LT 41343 b.SetControl(cmp) 41344 return true 41345 } 41346 // match: (GT (FlagEQ) yes no) 41347 // cond: 41348 // result: (First nil no yes) 41349 for { 41350 v := b.Control 41351 if v.Op != OpAMD64FlagEQ { 41352 break 41353 } 41354 b.Kind = BlockFirst 41355 b.SetControl(nil) 41356 b.swapSuccessors() 41357 return true 41358 } 41359 // match: (GT (FlagLT_ULT) yes no) 41360 // cond: 41361 // result: (First nil no yes) 41362 for { 41363 v := b.Control 41364 if v.Op != OpAMD64FlagLT_ULT { 41365 break 41366 } 41367 b.Kind = BlockFirst 41368 b.SetControl(nil) 41369 b.swapSuccessors() 41370 return true 41371 } 41372 // match: (GT (FlagLT_UGT) yes no) 41373 // cond: 41374 // result: (First nil no yes) 41375 for { 41376 v := b.Control 41377 if v.Op != OpAMD64FlagLT_UGT { 41378 break 41379 } 41380 b.Kind = BlockFirst 41381 b.SetControl(nil) 41382 b.swapSuccessors() 41383 return true 41384 } 41385 // match: (GT (FlagGT_ULT) yes no) 41386 // cond: 41387 // result: (First nil yes no) 41388 for { 41389 v := b.Control 41390 if v.Op != OpAMD64FlagGT_ULT { 41391 break 41392 } 41393 b.Kind = BlockFirst 41394 b.SetControl(nil) 41395 return true 41396 } 41397 // match: (GT (FlagGT_UGT) yes no) 41398 // cond: 41399 // result: (First nil yes no) 41400 for { 41401 v := b.Control 41402 if v.Op != OpAMD64FlagGT_UGT { 41403 break 41404 } 41405 b.Kind = BlockFirst 41406 b.SetControl(nil) 41407 return true 41408 } 41409 case BlockIf: 41410 // match: (If (SETL cmp) yes no) 41411 // cond: 41412 // result: (LT cmp yes no) 41413 for { 41414 v := b.Control 41415 if v.Op != OpAMD64SETL { 41416 break 41417 } 41418 cmp := v.Args[0] 41419 b.Kind = BlockAMD64LT 41420 b.SetControl(cmp) 41421 return true 41422 } 41423 // match: (If (SETLE cmp) yes no) 41424 // cond: 41425 // result: (LE cmp yes no) 41426 for { 41427 v := b.Control 41428 if v.Op != OpAMD64SETLE { 41429 break 41430 } 41431 cmp := v.Args[0] 41432 b.Kind = BlockAMD64LE 41433 b.SetControl(cmp) 41434 return true 41435 } 41436 // match: (If (SETG cmp) yes no) 41437 // cond: 41438 // result: (GT cmp yes no) 41439 for { 41440 v := b.Control 41441 if v.Op != OpAMD64SETG { 41442 break 41443 } 41444 cmp := v.Args[0] 41445 b.Kind = BlockAMD64GT 41446 b.SetControl(cmp) 41447 return true 41448 } 41449 // match: (If (SETGE cmp) yes no) 41450 // cond: 41451 // result: (GE cmp yes no) 41452 for { 41453 v := b.Control 41454 if v.Op != OpAMD64SETGE { 41455 break 41456 } 41457 cmp := v.Args[0] 41458 b.Kind = BlockAMD64GE 41459 b.SetControl(cmp) 41460 return true 41461 } 41462 // match: (If (SETEQ cmp) yes no) 41463 // cond: 41464 // result: (EQ cmp yes no) 41465 for { 41466 v := b.Control 41467 if v.Op != OpAMD64SETEQ { 41468 break 41469 } 41470 cmp := v.Args[0] 41471 b.Kind = BlockAMD64EQ 41472 b.SetControl(cmp) 41473 return true 41474 } 41475 // match: (If (SETNE cmp) yes no) 41476 // cond: 41477 // result: (NE cmp yes no) 41478 for { 41479 v := b.Control 41480 if v.Op != OpAMD64SETNE { 41481 break 41482 } 41483 cmp := v.Args[0] 41484 b.Kind = BlockAMD64NE 41485 b.SetControl(cmp) 41486 return true 41487 } 41488 // match: (If (SETB cmp) yes no) 41489 // cond: 41490 // result: (ULT cmp yes no) 41491 for { 41492 v := b.Control 41493 if v.Op != OpAMD64SETB { 41494 break 41495 } 41496 cmp := v.Args[0] 41497 b.Kind = BlockAMD64ULT 41498 b.SetControl(cmp) 41499 return true 41500 } 41501 // match: (If (SETBE cmp) yes no) 41502 // cond: 41503 // result: (ULE cmp yes no) 41504 for { 41505 v := b.Control 41506 if v.Op != OpAMD64SETBE { 41507 break 41508 } 41509 cmp := v.Args[0] 41510 b.Kind = BlockAMD64ULE 41511 b.SetControl(cmp) 41512 return true 41513 } 41514 // match: (If (SETA cmp) yes no) 41515 // cond: 41516 // result: (UGT cmp yes no) 41517 for { 41518 v := b.Control 41519 if v.Op != OpAMD64SETA { 41520 break 41521 } 41522 cmp := v.Args[0] 41523 b.Kind = BlockAMD64UGT 41524 b.SetControl(cmp) 41525 return true 41526 } 41527 // match: (If (SETAE cmp) yes no) 41528 // cond: 41529 // result: (UGE cmp yes no) 41530 for { 41531 v := b.Control 41532 if v.Op != OpAMD64SETAE { 41533 break 41534 } 41535 cmp := v.Args[0] 41536 b.Kind = BlockAMD64UGE 41537 b.SetControl(cmp) 41538 return true 41539 } 41540 // match: (If (SETGF cmp) yes no) 41541 // cond: 41542 // result: (UGT cmp yes no) 41543 for { 41544 v := b.Control 41545 if v.Op != OpAMD64SETGF { 41546 break 41547 } 41548 cmp := v.Args[0] 41549 b.Kind = BlockAMD64UGT 41550 b.SetControl(cmp) 41551 return true 41552 } 41553 // match: (If (SETGEF cmp) yes no) 41554 // cond: 41555 // result: (UGE cmp yes no) 41556 for { 41557 v := b.Control 41558 if v.Op != OpAMD64SETGEF { 41559 break 41560 } 41561 cmp := v.Args[0] 41562 b.Kind = BlockAMD64UGE 41563 b.SetControl(cmp) 41564 return true 41565 } 41566 // match: (If (SETEQF cmp) yes no) 41567 // cond: 41568 // result: (EQF cmp yes no) 41569 for { 41570 v := b.Control 41571 if v.Op != OpAMD64SETEQF { 41572 break 41573 } 41574 cmp := v.Args[0] 41575 b.Kind = BlockAMD64EQF 41576 b.SetControl(cmp) 41577 return true 41578 } 41579 // match: (If (SETNEF cmp) yes no) 41580 // cond: 41581 // result: (NEF cmp yes no) 41582 for { 41583 v := b.Control 41584 if v.Op != OpAMD64SETNEF { 41585 break 41586 } 41587 cmp := v.Args[0] 41588 b.Kind = BlockAMD64NEF 41589 b.SetControl(cmp) 41590 return true 41591 } 41592 // match: (If cond yes no) 41593 // cond: 41594 // result: (NE (TESTB cond cond) yes no) 41595 for { 41596 v := b.Control 41597 _ = v 41598 cond := b.Control 41599 b.Kind = BlockAMD64NE 41600 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, TypeFlags) 41601 v0.AddArg(cond) 41602 v0.AddArg(cond) 41603 b.SetControl(v0) 41604 return true 41605 } 41606 case BlockAMD64LE: 41607 // match: (LE (InvertFlags cmp) yes no) 41608 // cond: 41609 // result: (GE cmp yes no) 41610 for { 41611 v := b.Control 41612 if v.Op != OpAMD64InvertFlags { 41613 break 41614 } 41615 cmp := v.Args[0] 41616 b.Kind = BlockAMD64GE 41617 b.SetControl(cmp) 41618 return true 41619 } 41620 // match: (LE (FlagEQ) yes no) 41621 // cond: 41622 // result: (First nil yes no) 41623 for { 41624 v := b.Control 41625 if v.Op != OpAMD64FlagEQ { 41626 break 41627 } 41628 b.Kind = BlockFirst 41629 b.SetControl(nil) 41630 return true 41631 } 41632 // match: (LE (FlagLT_ULT) yes no) 41633 // cond: 41634 // result: (First nil yes no) 41635 for { 41636 v := b.Control 41637 if v.Op != OpAMD64FlagLT_ULT { 41638 break 41639 } 41640 b.Kind = BlockFirst 41641 b.SetControl(nil) 41642 return true 41643 } 41644 // match: (LE (FlagLT_UGT) yes no) 41645 // cond: 41646 // result: (First nil yes no) 41647 for { 41648 v := b.Control 41649 if v.Op != OpAMD64FlagLT_UGT { 41650 break 41651 } 41652 b.Kind = BlockFirst 41653 b.SetControl(nil) 41654 return true 41655 } 41656 // match: (LE (FlagGT_ULT) yes no) 41657 // cond: 41658 // result: (First nil no yes) 41659 for { 41660 v := b.Control 41661 if v.Op != OpAMD64FlagGT_ULT { 41662 break 41663 } 41664 b.Kind = BlockFirst 41665 b.SetControl(nil) 41666 b.swapSuccessors() 41667 return true 41668 } 41669 // match: (LE (FlagGT_UGT) yes no) 41670 // cond: 41671 // result: (First nil no yes) 41672 for { 41673 v := b.Control 41674 if v.Op != OpAMD64FlagGT_UGT { 41675 break 41676 } 41677 b.Kind = BlockFirst 41678 b.SetControl(nil) 41679 b.swapSuccessors() 41680 return true 41681 } 41682 case BlockAMD64LT: 41683 // match: (LT (InvertFlags cmp) yes no) 41684 // cond: 41685 // result: (GT cmp yes no) 41686 for { 41687 v := b.Control 41688 if v.Op != OpAMD64InvertFlags { 41689 break 41690 } 41691 cmp := v.Args[0] 41692 b.Kind = BlockAMD64GT 41693 b.SetControl(cmp) 41694 return true 41695 } 41696 // match: (LT (FlagEQ) yes no) 41697 // cond: 41698 // result: (First nil no yes) 41699 for { 41700 v := b.Control 41701 if v.Op != OpAMD64FlagEQ { 41702 break 41703 } 41704 b.Kind = BlockFirst 41705 b.SetControl(nil) 41706 b.swapSuccessors() 41707 return true 41708 } 41709 // match: (LT (FlagLT_ULT) yes no) 41710 // cond: 41711 // result: (First nil yes no) 41712 for { 41713 v := b.Control 41714 if v.Op != OpAMD64FlagLT_ULT { 41715 break 41716 } 41717 b.Kind = BlockFirst 41718 b.SetControl(nil) 41719 return true 41720 } 41721 // match: (LT (FlagLT_UGT) yes no) 41722 // cond: 41723 // result: (First nil yes no) 41724 for { 41725 v := b.Control 41726 if v.Op != OpAMD64FlagLT_UGT { 41727 break 41728 } 41729 b.Kind = BlockFirst 41730 b.SetControl(nil) 41731 return true 41732 } 41733 // match: (LT (FlagGT_ULT) yes no) 41734 // cond: 41735 // result: (First nil no yes) 41736 for { 41737 v := b.Control 41738 if v.Op != OpAMD64FlagGT_ULT { 41739 break 41740 } 41741 b.Kind = BlockFirst 41742 b.SetControl(nil) 41743 b.swapSuccessors() 41744 return true 41745 } 41746 // match: (LT (FlagGT_UGT) yes no) 41747 // cond: 41748 // result: (First nil no yes) 41749 for { 41750 v := b.Control 41751 if v.Op != OpAMD64FlagGT_UGT { 41752 break 41753 } 41754 b.Kind = BlockFirst 41755 b.SetControl(nil) 41756 b.swapSuccessors() 41757 return true 41758 } 41759 case BlockAMD64NE: 41760 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 41761 // cond: 41762 // result: (LT cmp yes no) 41763 for { 41764 v := b.Control 41765 if v.Op != OpAMD64TESTB { 41766 break 41767 } 41768 v_0 := v.Args[0] 41769 if v_0.Op != OpAMD64SETL { 41770 break 41771 } 41772 cmp := v_0.Args[0] 41773 v_1 := v.Args[1] 41774 if v_1.Op != OpAMD64SETL { 41775 break 41776 } 41777 if cmp != v_1.Args[0] { 41778 break 41779 } 41780 b.Kind = BlockAMD64LT 41781 b.SetControl(cmp) 41782 return true 41783 } 41784 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 41785 // cond: 41786 // result: (LT cmp yes no) 41787 for { 41788 v := b.Control 41789 if v.Op != OpAMD64TESTB { 41790 break 41791 } 41792 v_0 := v.Args[0] 41793 if v_0.Op != OpAMD64SETL { 41794 break 41795 } 41796 cmp := v_0.Args[0] 41797 v_1 := v.Args[1] 41798 if v_1.Op != OpAMD64SETL { 41799 break 41800 } 41801 if cmp != v_1.Args[0] { 41802 break 41803 } 41804 b.Kind = BlockAMD64LT 41805 b.SetControl(cmp) 41806 return true 41807 } 41808 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 41809 // cond: 41810 // result: (LE cmp yes no) 41811 for { 41812 v := b.Control 41813 if v.Op != OpAMD64TESTB { 41814 break 41815 } 41816 v_0 := v.Args[0] 41817 if v_0.Op != OpAMD64SETLE { 41818 break 41819 } 41820 cmp := v_0.Args[0] 41821 v_1 := v.Args[1] 41822 if v_1.Op != OpAMD64SETLE { 41823 break 41824 } 41825 if cmp != v_1.Args[0] { 41826 break 41827 } 41828 b.Kind = BlockAMD64LE 41829 b.SetControl(cmp) 41830 return true 41831 } 41832 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 41833 // cond: 41834 // result: (LE cmp yes no) 41835 for { 41836 v := b.Control 41837 if v.Op != OpAMD64TESTB { 41838 break 41839 } 41840 v_0 := v.Args[0] 41841 if v_0.Op != OpAMD64SETLE { 41842 break 41843 } 41844 cmp := v_0.Args[0] 41845 v_1 := v.Args[1] 41846 if v_1.Op != OpAMD64SETLE { 41847 break 41848 } 41849 if cmp != v_1.Args[0] { 41850 break 41851 } 41852 b.Kind = BlockAMD64LE 41853 b.SetControl(cmp) 41854 return true 41855 } 41856 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 41857 // cond: 41858 // result: (GT cmp yes no) 41859 for { 41860 v := b.Control 41861 if v.Op != OpAMD64TESTB { 41862 break 41863 } 41864 v_0 := v.Args[0] 41865 if v_0.Op != OpAMD64SETG { 41866 break 41867 } 41868 cmp := v_0.Args[0] 41869 v_1 := v.Args[1] 41870 if v_1.Op != OpAMD64SETG { 41871 break 41872 } 41873 if cmp != v_1.Args[0] { 41874 break 41875 } 41876 b.Kind = BlockAMD64GT 41877 b.SetControl(cmp) 41878 return true 41879 } 41880 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 41881 // cond: 41882 // result: (GT cmp yes no) 41883 for { 41884 v := b.Control 41885 if v.Op != OpAMD64TESTB { 41886 break 41887 } 41888 v_0 := v.Args[0] 41889 if v_0.Op != OpAMD64SETG { 41890 break 41891 } 41892 cmp := v_0.Args[0] 41893 v_1 := v.Args[1] 41894 if v_1.Op != OpAMD64SETG { 41895 break 41896 } 41897 if cmp != v_1.Args[0] { 41898 break 41899 } 41900 b.Kind = BlockAMD64GT 41901 b.SetControl(cmp) 41902 return true 41903 } 41904 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 41905 // cond: 41906 // result: (GE cmp yes no) 41907 for { 41908 v := b.Control 41909 if v.Op != OpAMD64TESTB { 41910 break 41911 } 41912 v_0 := v.Args[0] 41913 if v_0.Op != OpAMD64SETGE { 41914 break 41915 } 41916 cmp := v_0.Args[0] 41917 v_1 := v.Args[1] 41918 if v_1.Op != OpAMD64SETGE { 41919 break 41920 } 41921 if cmp != v_1.Args[0] { 41922 break 41923 } 41924 b.Kind = BlockAMD64GE 41925 b.SetControl(cmp) 41926 return true 41927 } 41928 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 41929 // cond: 41930 // result: (GE cmp yes no) 41931 for { 41932 v := b.Control 41933 if v.Op != OpAMD64TESTB { 41934 break 41935 } 41936 v_0 := v.Args[0] 41937 if v_0.Op != OpAMD64SETGE { 41938 break 41939 } 41940 cmp := v_0.Args[0] 41941 v_1 := v.Args[1] 41942 if v_1.Op != OpAMD64SETGE { 41943 break 41944 } 41945 if cmp != v_1.Args[0] { 41946 break 41947 } 41948 b.Kind = BlockAMD64GE 41949 b.SetControl(cmp) 41950 return true 41951 } 41952 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 41953 // cond: 41954 // result: (EQ cmp yes no) 41955 for { 41956 v := b.Control 41957 if v.Op != OpAMD64TESTB { 41958 break 41959 } 41960 v_0 := v.Args[0] 41961 if v_0.Op != OpAMD64SETEQ { 41962 break 41963 } 41964 cmp := v_0.Args[0] 41965 v_1 := v.Args[1] 41966 if v_1.Op != OpAMD64SETEQ { 41967 break 41968 } 41969 if cmp != v_1.Args[0] { 41970 break 41971 } 41972 b.Kind = BlockAMD64EQ 41973 b.SetControl(cmp) 41974 return true 41975 } 41976 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 41977 // cond: 41978 // result: (EQ cmp yes no) 41979 for { 41980 v := b.Control 41981 if v.Op != OpAMD64TESTB { 41982 break 41983 } 41984 v_0 := v.Args[0] 41985 if v_0.Op != OpAMD64SETEQ { 41986 break 41987 } 41988 cmp := v_0.Args[0] 41989 v_1 := v.Args[1] 41990 if v_1.Op != OpAMD64SETEQ { 41991 break 41992 } 41993 if cmp != v_1.Args[0] { 41994 break 41995 } 41996 b.Kind = BlockAMD64EQ 41997 b.SetControl(cmp) 41998 return true 41999 } 42000 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 42001 // cond: 42002 // result: (NE cmp yes no) 42003 for { 42004 v := b.Control 42005 if v.Op != OpAMD64TESTB { 42006 break 42007 } 42008 v_0 := v.Args[0] 42009 if v_0.Op != OpAMD64SETNE { 42010 break 42011 } 42012 cmp := v_0.Args[0] 42013 v_1 := v.Args[1] 42014 if v_1.Op != OpAMD64SETNE { 42015 break 42016 } 42017 if cmp != v_1.Args[0] { 42018 break 42019 } 42020 b.Kind = BlockAMD64NE 42021 b.SetControl(cmp) 42022 return true 42023 } 42024 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 42025 // cond: 42026 // result: (NE cmp yes no) 42027 for { 42028 v := b.Control 42029 if v.Op != OpAMD64TESTB { 42030 break 42031 } 42032 v_0 := v.Args[0] 42033 if v_0.Op != OpAMD64SETNE { 42034 break 42035 } 42036 cmp := v_0.Args[0] 42037 v_1 := v.Args[1] 42038 if v_1.Op != OpAMD64SETNE { 42039 break 42040 } 42041 if cmp != v_1.Args[0] { 42042 break 42043 } 42044 b.Kind = BlockAMD64NE 42045 b.SetControl(cmp) 42046 return true 42047 } 42048 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 42049 // cond: 42050 // result: (ULT cmp yes no) 42051 for { 42052 v := b.Control 42053 if v.Op != OpAMD64TESTB { 42054 break 42055 } 42056 v_0 := v.Args[0] 42057 if v_0.Op != OpAMD64SETB { 42058 break 42059 } 42060 cmp := v_0.Args[0] 42061 v_1 := v.Args[1] 42062 if v_1.Op != OpAMD64SETB { 42063 break 42064 } 42065 if cmp != v_1.Args[0] { 42066 break 42067 } 42068 b.Kind = BlockAMD64ULT 42069 b.SetControl(cmp) 42070 return true 42071 } 42072 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 42073 // cond: 42074 // result: (ULT cmp yes no) 42075 for { 42076 v := b.Control 42077 if v.Op != OpAMD64TESTB { 42078 break 42079 } 42080 v_0 := v.Args[0] 42081 if v_0.Op != OpAMD64SETB { 42082 break 42083 } 42084 cmp := v_0.Args[0] 42085 v_1 := v.Args[1] 42086 if v_1.Op != OpAMD64SETB { 42087 break 42088 } 42089 if cmp != v_1.Args[0] { 42090 break 42091 } 42092 b.Kind = BlockAMD64ULT 42093 b.SetControl(cmp) 42094 return true 42095 } 42096 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 42097 // cond: 42098 // result: (ULE cmp yes no) 42099 for { 42100 v := b.Control 42101 if v.Op != OpAMD64TESTB { 42102 break 42103 } 42104 v_0 := v.Args[0] 42105 if v_0.Op != OpAMD64SETBE { 42106 break 42107 } 42108 cmp := v_0.Args[0] 42109 v_1 := v.Args[1] 42110 if v_1.Op != OpAMD64SETBE { 42111 break 42112 } 42113 if cmp != v_1.Args[0] { 42114 break 42115 } 42116 b.Kind = BlockAMD64ULE 42117 b.SetControl(cmp) 42118 return true 42119 } 42120 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 42121 // cond: 42122 // result: (ULE cmp yes no) 42123 for { 42124 v := b.Control 42125 if v.Op != OpAMD64TESTB { 42126 break 42127 } 42128 v_0 := v.Args[0] 42129 if v_0.Op != OpAMD64SETBE { 42130 break 42131 } 42132 cmp := v_0.Args[0] 42133 v_1 := v.Args[1] 42134 if v_1.Op != OpAMD64SETBE { 42135 break 42136 } 42137 if cmp != v_1.Args[0] { 42138 break 42139 } 42140 b.Kind = BlockAMD64ULE 42141 b.SetControl(cmp) 42142 return true 42143 } 42144 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 42145 // cond: 42146 // result: (UGT cmp yes no) 42147 for { 42148 v := b.Control 42149 if v.Op != OpAMD64TESTB { 42150 break 42151 } 42152 v_0 := v.Args[0] 42153 if v_0.Op != OpAMD64SETA { 42154 break 42155 } 42156 cmp := v_0.Args[0] 42157 v_1 := v.Args[1] 42158 if v_1.Op != OpAMD64SETA { 42159 break 42160 } 42161 if cmp != v_1.Args[0] { 42162 break 42163 } 42164 b.Kind = BlockAMD64UGT 42165 b.SetControl(cmp) 42166 return true 42167 } 42168 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 42169 // cond: 42170 // result: (UGT cmp yes no) 42171 for { 42172 v := b.Control 42173 if v.Op != OpAMD64TESTB { 42174 break 42175 } 42176 v_0 := v.Args[0] 42177 if v_0.Op != OpAMD64SETA { 42178 break 42179 } 42180 cmp := v_0.Args[0] 42181 v_1 := v.Args[1] 42182 if v_1.Op != OpAMD64SETA { 42183 break 42184 } 42185 if cmp != v_1.Args[0] { 42186 break 42187 } 42188 b.Kind = BlockAMD64UGT 42189 b.SetControl(cmp) 42190 return true 42191 } 42192 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 42193 // cond: 42194 // result: (UGE cmp yes no) 42195 for { 42196 v := b.Control 42197 if v.Op != OpAMD64TESTB { 42198 break 42199 } 42200 v_0 := v.Args[0] 42201 if v_0.Op != OpAMD64SETAE { 42202 break 42203 } 42204 cmp := v_0.Args[0] 42205 v_1 := v.Args[1] 42206 if v_1.Op != OpAMD64SETAE { 42207 break 42208 } 42209 if cmp != v_1.Args[0] { 42210 break 42211 } 42212 b.Kind = BlockAMD64UGE 42213 b.SetControl(cmp) 42214 return true 42215 } 42216 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 42217 // cond: 42218 // result: (UGE cmp yes no) 42219 for { 42220 v := b.Control 42221 if v.Op != OpAMD64TESTB { 42222 break 42223 } 42224 v_0 := v.Args[0] 42225 if v_0.Op != OpAMD64SETAE { 42226 break 42227 } 42228 cmp := v_0.Args[0] 42229 v_1 := v.Args[1] 42230 if v_1.Op != OpAMD64SETAE { 42231 break 42232 } 42233 if cmp != v_1.Args[0] { 42234 break 42235 } 42236 b.Kind = BlockAMD64UGE 42237 b.SetControl(cmp) 42238 return true 42239 } 42240 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 42241 // cond: !config.nacl 42242 // result: (ULT (BTL x y)) 42243 for { 42244 v := b.Control 42245 if v.Op != OpAMD64TESTL { 42246 break 42247 } 42248 v_0 := v.Args[0] 42249 if v_0.Op != OpAMD64SHLL { 42250 break 42251 } 42252 v_0_0 := v_0.Args[0] 42253 if v_0_0.Op != OpAMD64MOVLconst { 42254 break 42255 } 42256 if v_0_0.AuxInt != 1 { 42257 break 42258 } 42259 x := v_0.Args[1] 42260 y := v.Args[1] 42261 if !(!config.nacl) { 42262 break 42263 } 42264 b.Kind = BlockAMD64ULT 42265 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 42266 v0.AddArg(x) 42267 v0.AddArg(y) 42268 b.SetControl(v0) 42269 return true 42270 } 42271 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 42272 // cond: !config.nacl 42273 // result: (ULT (BTL x y)) 42274 for { 42275 v := b.Control 42276 if v.Op != OpAMD64TESTL { 42277 break 42278 } 42279 y := v.Args[0] 42280 v_1 := v.Args[1] 42281 if v_1.Op != OpAMD64SHLL { 42282 break 42283 } 42284 v_1_0 := v_1.Args[0] 42285 if v_1_0.Op != OpAMD64MOVLconst { 42286 break 42287 } 42288 if v_1_0.AuxInt != 1 { 42289 break 42290 } 42291 x := v_1.Args[1] 42292 if !(!config.nacl) { 42293 break 42294 } 42295 b.Kind = BlockAMD64ULT 42296 v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) 42297 v0.AddArg(x) 42298 v0.AddArg(y) 42299 b.SetControl(v0) 42300 return true 42301 } 42302 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 42303 // cond: !config.nacl 42304 // result: (ULT (BTQ x y)) 42305 for { 42306 v := b.Control 42307 if v.Op != OpAMD64TESTQ { 42308 break 42309 } 42310 v_0 := v.Args[0] 42311 if v_0.Op != OpAMD64SHLQ { 42312 break 42313 } 42314 v_0_0 := v_0.Args[0] 42315 if v_0_0.Op != OpAMD64MOVQconst { 42316 break 42317 } 42318 if v_0_0.AuxInt != 1 { 42319 break 42320 } 42321 x := v_0.Args[1] 42322 y := v.Args[1] 42323 if !(!config.nacl) { 42324 break 42325 } 42326 b.Kind = BlockAMD64ULT 42327 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 42328 v0.AddArg(x) 42329 v0.AddArg(y) 42330 b.SetControl(v0) 42331 return true 42332 } 42333 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 42334 // cond: !config.nacl 42335 // result: (ULT (BTQ x y)) 42336 for { 42337 v := b.Control 42338 if v.Op != OpAMD64TESTQ { 42339 break 42340 } 42341 y := v.Args[0] 42342 v_1 := v.Args[1] 42343 if v_1.Op != OpAMD64SHLQ { 42344 break 42345 } 42346 v_1_0 := v_1.Args[0] 42347 if v_1_0.Op != OpAMD64MOVQconst { 42348 break 42349 } 42350 if v_1_0.AuxInt != 1 { 42351 break 42352 } 42353 x := v_1.Args[1] 42354 if !(!config.nacl) { 42355 break 42356 } 42357 b.Kind = BlockAMD64ULT 42358 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) 42359 v0.AddArg(x) 42360 v0.AddArg(y) 42361 b.SetControl(v0) 42362 return true 42363 } 42364 // match: (NE (TESTLconst [c] x)) 42365 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 42366 // result: (ULT (BTLconst [log2(c)] x)) 42367 for { 42368 v := b.Control 42369 if v.Op != OpAMD64TESTLconst { 42370 break 42371 } 42372 c := v.AuxInt 42373 x := v.Args[0] 42374 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 42375 break 42376 } 42377 b.Kind = BlockAMD64ULT 42378 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags) 42379 v0.AuxInt = log2(c) 42380 v0.AddArg(x) 42381 b.SetControl(v0) 42382 return true 42383 } 42384 // match: (NE (TESTQconst [c] x)) 42385 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 42386 // result: (ULT (BTQconst [log2(c)] x)) 42387 for { 42388 v := b.Control 42389 if v.Op != OpAMD64TESTQconst { 42390 break 42391 } 42392 c := v.AuxInt 42393 x := v.Args[0] 42394 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 42395 break 42396 } 42397 b.Kind = BlockAMD64ULT 42398 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 42399 v0.AuxInt = log2(c) 42400 v0.AddArg(x) 42401 b.SetControl(v0) 42402 return true 42403 } 42404 // match: (NE (TESTQ (MOVQconst [c]) x)) 42405 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 42406 // result: (ULT (BTQconst [log2(c)] x)) 42407 for { 42408 v := b.Control 42409 if v.Op != OpAMD64TESTQ { 42410 break 42411 } 42412 v_0 := v.Args[0] 42413 if v_0.Op != OpAMD64MOVQconst { 42414 break 42415 } 42416 c := v_0.AuxInt 42417 x := v.Args[1] 42418 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 42419 break 42420 } 42421 b.Kind = BlockAMD64ULT 42422 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 42423 v0.AuxInt = log2(c) 42424 v0.AddArg(x) 42425 b.SetControl(v0) 42426 return true 42427 } 42428 // match: (NE (TESTQ x (MOVQconst [c]))) 42429 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 42430 // result: (ULT (BTQconst [log2(c)] x)) 42431 for { 42432 v := b.Control 42433 if v.Op != OpAMD64TESTQ { 42434 break 42435 } 42436 x := v.Args[0] 42437 v_1 := v.Args[1] 42438 if v_1.Op != OpAMD64MOVQconst { 42439 break 42440 } 42441 c := v_1.AuxInt 42442 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 42443 break 42444 } 42445 b.Kind = BlockAMD64ULT 42446 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) 42447 v0.AuxInt = log2(c) 42448 v0.AddArg(x) 42449 b.SetControl(v0) 42450 return true 42451 } 42452 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 42453 // cond: 42454 // result: (UGT cmp yes no) 42455 for { 42456 v := b.Control 42457 if v.Op != OpAMD64TESTB { 42458 break 42459 } 42460 v_0 := v.Args[0] 42461 if v_0.Op != OpAMD64SETGF { 42462 break 42463 } 42464 cmp := v_0.Args[0] 42465 v_1 := v.Args[1] 42466 if v_1.Op != OpAMD64SETGF { 42467 break 42468 } 42469 if cmp != v_1.Args[0] { 42470 break 42471 } 42472 b.Kind = BlockAMD64UGT 42473 b.SetControl(cmp) 42474 return true 42475 } 42476 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 42477 // cond: 42478 // result: (UGT cmp yes no) 42479 for { 42480 v := b.Control 42481 if v.Op != OpAMD64TESTB { 42482 break 42483 } 42484 v_0 := v.Args[0] 42485 if v_0.Op != OpAMD64SETGF { 42486 break 42487 } 42488 cmp := v_0.Args[0] 42489 v_1 := v.Args[1] 42490 if v_1.Op != OpAMD64SETGF { 42491 break 42492 } 42493 if cmp != v_1.Args[0] { 42494 break 42495 } 42496 b.Kind = BlockAMD64UGT 42497 b.SetControl(cmp) 42498 return true 42499 } 42500 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 42501 // cond: 42502 // result: (UGE cmp yes no) 42503 for { 42504 v := b.Control 42505 if v.Op != OpAMD64TESTB { 42506 break 42507 } 42508 v_0 := v.Args[0] 42509 if v_0.Op != OpAMD64SETGEF { 42510 break 42511 } 42512 cmp := v_0.Args[0] 42513 v_1 := v.Args[1] 42514 if v_1.Op != OpAMD64SETGEF { 42515 break 42516 } 42517 if cmp != v_1.Args[0] { 42518 break 42519 } 42520 b.Kind = BlockAMD64UGE 42521 b.SetControl(cmp) 42522 return true 42523 } 42524 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 42525 // cond: 42526 // result: (UGE cmp yes no) 42527 for { 42528 v := b.Control 42529 if v.Op != OpAMD64TESTB { 42530 break 42531 } 42532 v_0 := v.Args[0] 42533 if v_0.Op != OpAMD64SETGEF { 42534 break 42535 } 42536 cmp := v_0.Args[0] 42537 v_1 := v.Args[1] 42538 if v_1.Op != OpAMD64SETGEF { 42539 break 42540 } 42541 if cmp != v_1.Args[0] { 42542 break 42543 } 42544 b.Kind = BlockAMD64UGE 42545 b.SetControl(cmp) 42546 return true 42547 } 42548 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 42549 // cond: 42550 // result: (EQF cmp yes no) 42551 for { 42552 v := b.Control 42553 if v.Op != OpAMD64TESTB { 42554 break 42555 } 42556 v_0 := v.Args[0] 42557 if v_0.Op != OpAMD64SETEQF { 42558 break 42559 } 42560 cmp := v_0.Args[0] 42561 v_1 := v.Args[1] 42562 if v_1.Op != OpAMD64SETEQF { 42563 break 42564 } 42565 if cmp != v_1.Args[0] { 42566 break 42567 } 42568 b.Kind = BlockAMD64EQF 42569 b.SetControl(cmp) 42570 return true 42571 } 42572 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 42573 // cond: 42574 // result: (EQF cmp yes no) 42575 for { 42576 v := b.Control 42577 if v.Op != OpAMD64TESTB { 42578 break 42579 } 42580 v_0 := v.Args[0] 42581 if v_0.Op != OpAMD64SETEQF { 42582 break 42583 } 42584 cmp := v_0.Args[0] 42585 v_1 := v.Args[1] 42586 if v_1.Op != OpAMD64SETEQF { 42587 break 42588 } 42589 if cmp != v_1.Args[0] { 42590 break 42591 } 42592 b.Kind = BlockAMD64EQF 42593 b.SetControl(cmp) 42594 return true 42595 } 42596 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 42597 // cond: 42598 // result: (NEF cmp yes no) 42599 for { 42600 v := b.Control 42601 if v.Op != OpAMD64TESTB { 42602 break 42603 } 42604 v_0 := v.Args[0] 42605 if v_0.Op != OpAMD64SETNEF { 42606 break 42607 } 42608 cmp := v_0.Args[0] 42609 v_1 := v.Args[1] 42610 if v_1.Op != OpAMD64SETNEF { 42611 break 42612 } 42613 if cmp != v_1.Args[0] { 42614 break 42615 } 42616 b.Kind = BlockAMD64NEF 42617 b.SetControl(cmp) 42618 return true 42619 } 42620 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 42621 // cond: 42622 // result: (NEF cmp yes no) 42623 for { 42624 v := b.Control 42625 if v.Op != OpAMD64TESTB { 42626 break 42627 } 42628 v_0 := v.Args[0] 42629 if v_0.Op != OpAMD64SETNEF { 42630 break 42631 } 42632 cmp := v_0.Args[0] 42633 v_1 := v.Args[1] 42634 if v_1.Op != OpAMD64SETNEF { 42635 break 42636 } 42637 if cmp != v_1.Args[0] { 42638 break 42639 } 42640 b.Kind = BlockAMD64NEF 42641 b.SetControl(cmp) 42642 return true 42643 } 42644 // match: (NE (InvertFlags cmp) yes no) 42645 // cond: 42646 // result: (NE cmp yes no) 42647 for { 42648 v := b.Control 42649 if v.Op != OpAMD64InvertFlags { 42650 break 42651 } 42652 cmp := v.Args[0] 42653 b.Kind = BlockAMD64NE 42654 b.SetControl(cmp) 42655 return true 42656 } 42657 // match: (NE (FlagEQ) yes no) 42658 // cond: 42659 // result: (First nil no yes) 42660 for { 42661 v := b.Control 42662 if v.Op != OpAMD64FlagEQ { 42663 break 42664 } 42665 b.Kind = BlockFirst 42666 b.SetControl(nil) 42667 b.swapSuccessors() 42668 return true 42669 } 42670 // match: (NE (FlagLT_ULT) yes no) 42671 // cond: 42672 // result: (First nil yes no) 42673 for { 42674 v := b.Control 42675 if v.Op != OpAMD64FlagLT_ULT { 42676 break 42677 } 42678 b.Kind = BlockFirst 42679 b.SetControl(nil) 42680 return true 42681 } 42682 // match: (NE (FlagLT_UGT) yes no) 42683 // cond: 42684 // result: (First nil yes no) 42685 for { 42686 v := b.Control 42687 if v.Op != OpAMD64FlagLT_UGT { 42688 break 42689 } 42690 b.Kind = BlockFirst 42691 b.SetControl(nil) 42692 return true 42693 } 42694 // match: (NE (FlagGT_ULT) yes no) 42695 // cond: 42696 // result: (First nil yes no) 42697 for { 42698 v := b.Control 42699 if v.Op != OpAMD64FlagGT_ULT { 42700 break 42701 } 42702 b.Kind = BlockFirst 42703 b.SetControl(nil) 42704 return true 42705 } 42706 // match: (NE (FlagGT_UGT) yes no) 42707 // cond: 42708 // result: (First nil yes no) 42709 for { 42710 v := b.Control 42711 if v.Op != OpAMD64FlagGT_UGT { 42712 break 42713 } 42714 b.Kind = BlockFirst 42715 b.SetControl(nil) 42716 return true 42717 } 42718 case BlockAMD64UGE: 42719 // match: (UGE (InvertFlags cmp) yes no) 42720 // cond: 42721 // result: (ULE cmp yes no) 42722 for { 42723 v := b.Control 42724 if v.Op != OpAMD64InvertFlags { 42725 break 42726 } 42727 cmp := v.Args[0] 42728 b.Kind = BlockAMD64ULE 42729 b.SetControl(cmp) 42730 return true 42731 } 42732 // match: (UGE (FlagEQ) yes no) 42733 // cond: 42734 // result: (First nil yes no) 42735 for { 42736 v := b.Control 42737 if v.Op != OpAMD64FlagEQ { 42738 break 42739 } 42740 b.Kind = BlockFirst 42741 b.SetControl(nil) 42742 return true 42743 } 42744 // match: (UGE (FlagLT_ULT) yes no) 42745 // cond: 42746 // result: (First nil no yes) 42747 for { 42748 v := b.Control 42749 if v.Op != OpAMD64FlagLT_ULT { 42750 break 42751 } 42752 b.Kind = BlockFirst 42753 b.SetControl(nil) 42754 b.swapSuccessors() 42755 return true 42756 } 42757 // match: (UGE (FlagLT_UGT) yes no) 42758 // cond: 42759 // result: (First nil yes no) 42760 for { 42761 v := b.Control 42762 if v.Op != OpAMD64FlagLT_UGT { 42763 break 42764 } 42765 b.Kind = BlockFirst 42766 b.SetControl(nil) 42767 return true 42768 } 42769 // match: (UGE (FlagGT_ULT) yes no) 42770 // cond: 42771 // result: (First nil no yes) 42772 for { 42773 v := b.Control 42774 if v.Op != OpAMD64FlagGT_ULT { 42775 break 42776 } 42777 b.Kind = BlockFirst 42778 b.SetControl(nil) 42779 b.swapSuccessors() 42780 return true 42781 } 42782 // match: (UGE (FlagGT_UGT) yes no) 42783 // cond: 42784 // result: (First nil yes no) 42785 for { 42786 v := b.Control 42787 if v.Op != OpAMD64FlagGT_UGT { 42788 break 42789 } 42790 b.Kind = BlockFirst 42791 b.SetControl(nil) 42792 return true 42793 } 42794 case BlockAMD64UGT: 42795 // match: (UGT (InvertFlags cmp) yes no) 42796 // cond: 42797 // result: (ULT cmp yes no) 42798 for { 42799 v := b.Control 42800 if v.Op != OpAMD64InvertFlags { 42801 break 42802 } 42803 cmp := v.Args[0] 42804 b.Kind = BlockAMD64ULT 42805 b.SetControl(cmp) 42806 return true 42807 } 42808 // match: (UGT (FlagEQ) yes no) 42809 // cond: 42810 // result: (First nil no yes) 42811 for { 42812 v := b.Control 42813 if v.Op != OpAMD64FlagEQ { 42814 break 42815 } 42816 b.Kind = BlockFirst 42817 b.SetControl(nil) 42818 b.swapSuccessors() 42819 return true 42820 } 42821 // match: (UGT (FlagLT_ULT) yes no) 42822 // cond: 42823 // result: (First nil no yes) 42824 for { 42825 v := b.Control 42826 if v.Op != OpAMD64FlagLT_ULT { 42827 break 42828 } 42829 b.Kind = BlockFirst 42830 b.SetControl(nil) 42831 b.swapSuccessors() 42832 return true 42833 } 42834 // match: (UGT (FlagLT_UGT) yes no) 42835 // cond: 42836 // result: (First nil yes no) 42837 for { 42838 v := b.Control 42839 if v.Op != OpAMD64FlagLT_UGT { 42840 break 42841 } 42842 b.Kind = BlockFirst 42843 b.SetControl(nil) 42844 return true 42845 } 42846 // match: (UGT (FlagGT_ULT) yes no) 42847 // cond: 42848 // result: (First nil no yes) 42849 for { 42850 v := b.Control 42851 if v.Op != OpAMD64FlagGT_ULT { 42852 break 42853 } 42854 b.Kind = BlockFirst 42855 b.SetControl(nil) 42856 b.swapSuccessors() 42857 return true 42858 } 42859 // match: (UGT (FlagGT_UGT) yes no) 42860 // cond: 42861 // result: (First nil yes no) 42862 for { 42863 v := b.Control 42864 if v.Op != OpAMD64FlagGT_UGT { 42865 break 42866 } 42867 b.Kind = BlockFirst 42868 b.SetControl(nil) 42869 return true 42870 } 42871 case BlockAMD64ULE: 42872 // match: (ULE (InvertFlags cmp) yes no) 42873 // cond: 42874 // result: (UGE cmp yes no) 42875 for { 42876 v := b.Control 42877 if v.Op != OpAMD64InvertFlags { 42878 break 42879 } 42880 cmp := v.Args[0] 42881 b.Kind = BlockAMD64UGE 42882 b.SetControl(cmp) 42883 return true 42884 } 42885 // match: (ULE (FlagEQ) yes no) 42886 // cond: 42887 // result: (First nil yes no) 42888 for { 42889 v := b.Control 42890 if v.Op != OpAMD64FlagEQ { 42891 break 42892 } 42893 b.Kind = BlockFirst 42894 b.SetControl(nil) 42895 return true 42896 } 42897 // match: (ULE (FlagLT_ULT) yes no) 42898 // cond: 42899 // result: (First nil yes no) 42900 for { 42901 v := b.Control 42902 if v.Op != OpAMD64FlagLT_ULT { 42903 break 42904 } 42905 b.Kind = BlockFirst 42906 b.SetControl(nil) 42907 return true 42908 } 42909 // match: (ULE (FlagLT_UGT) yes no) 42910 // cond: 42911 // result: (First nil no yes) 42912 for { 42913 v := b.Control 42914 if v.Op != OpAMD64FlagLT_UGT { 42915 break 42916 } 42917 b.Kind = BlockFirst 42918 b.SetControl(nil) 42919 b.swapSuccessors() 42920 return true 42921 } 42922 // match: (ULE (FlagGT_ULT) yes no) 42923 // cond: 42924 // result: (First nil yes no) 42925 for { 42926 v := b.Control 42927 if v.Op != OpAMD64FlagGT_ULT { 42928 break 42929 } 42930 b.Kind = BlockFirst 42931 b.SetControl(nil) 42932 return true 42933 } 42934 // match: (ULE (FlagGT_UGT) yes no) 42935 // cond: 42936 // result: (First nil no yes) 42937 for { 42938 v := b.Control 42939 if v.Op != OpAMD64FlagGT_UGT { 42940 break 42941 } 42942 b.Kind = BlockFirst 42943 b.SetControl(nil) 42944 b.swapSuccessors() 42945 return true 42946 } 42947 case BlockAMD64ULT: 42948 // match: (ULT (InvertFlags cmp) yes no) 42949 // cond: 42950 // result: (UGT cmp yes no) 42951 for { 42952 v := b.Control 42953 if v.Op != OpAMD64InvertFlags { 42954 break 42955 } 42956 cmp := v.Args[0] 42957 b.Kind = BlockAMD64UGT 42958 b.SetControl(cmp) 42959 return true 42960 } 42961 // match: (ULT (FlagEQ) yes no) 42962 // cond: 42963 // result: (First nil no yes) 42964 for { 42965 v := b.Control 42966 if v.Op != OpAMD64FlagEQ { 42967 break 42968 } 42969 b.Kind = BlockFirst 42970 b.SetControl(nil) 42971 b.swapSuccessors() 42972 return true 42973 } 42974 // match: (ULT (FlagLT_ULT) yes no) 42975 // cond: 42976 // result: (First nil yes no) 42977 for { 42978 v := b.Control 42979 if v.Op != OpAMD64FlagLT_ULT { 42980 break 42981 } 42982 b.Kind = BlockFirst 42983 b.SetControl(nil) 42984 return true 42985 } 42986 // match: (ULT (FlagLT_UGT) yes no) 42987 // cond: 42988 // result: (First nil no yes) 42989 for { 42990 v := b.Control 42991 if v.Op != OpAMD64FlagLT_UGT { 42992 break 42993 } 42994 b.Kind = BlockFirst 42995 b.SetControl(nil) 42996 b.swapSuccessors() 42997 return true 42998 } 42999 // match: (ULT (FlagGT_ULT) yes no) 43000 // cond: 43001 // result: (First nil yes no) 43002 for { 43003 v := b.Control 43004 if v.Op != OpAMD64FlagGT_ULT { 43005 break 43006 } 43007 b.Kind = BlockFirst 43008 b.SetControl(nil) 43009 return true 43010 } 43011 // match: (ULT (FlagGT_UGT) yes no) 43012 // cond: 43013 // result: (First nil no yes) 43014 for { 43015 v := b.Control 43016 if v.Op != OpAMD64FlagGT_UGT { 43017 break 43018 } 43019 b.Kind = BlockFirst 43020 b.SetControl(nil) 43021 b.swapSuccessors() 43022 return true 43023 } 43024 } 43025 return false 43026 }